学习时间: 4-5小时
学习目标: 掌握Kafka消息队列的使用,学会流式数据处理,理解高吞吐量消息系统设计
详细学习清单
✅ 第一部分:Kafka基础概念与架构(60分钟)
1. Kafka核心概念分析
Kafka架构概念详解
java
// KafkaArchitectureBasics.java
package com.example.demo.kafka;
import java.util.ArrayList;
import java.util.List;
public class KafkaArchitectureBasics {
public static class KafkaConcept {
private String name;
private String description;
private String role;
private String example;
public KafkaConcept(String name, String description, String role, String example) {
this.name = name;
this.description = description;
this.role = role;
this.example = example;
}
// Getter方法
public String getName() { return name; }
public String getDescription() { return description; }
public String getRole() { return role; }
public String getExample() { return example; }
}
public static void main(String[] args) {
List<KafkaConcept> concepts = new ArrayList<>();
// Broker概念
concepts.add(new KafkaConcept(
"Broker",
"Kafka集群中的每个服务器节点",
"存储消息、处理客户端请求、管理分区副本",
"一个Kafka集群包含多个Broker,每个Broker负责管理多个Topic的分区"
));
// Topic概念
concepts.add(new KafkaConcept(
"Topic",
"消息的逻辑分类,类似于数据库中的表",
"组织和管理消息,支持多分区并行处理",
"订单Topic包含所有订单相关消息,用户Topic包含所有用户相关消息"
));
// Partition概念
concepts.add(new KafkaConcept(
"Partition",
"Topic的分片,每个Partition是一个有序的消息序列",
"实现并行处理,提高吞吐量,支持水平扩展",
"订单Topic分为3个Partition,可以同时被3个Consumer并行消费"
));
// Consumer Group概念
concepts.add(new KafkaConcept(
"Consumer Group",
"一组Consumer的集合,共同消费Topic中的消息",
"实现负载均衡,保证消息只被消费一次",
"订单处理组包含3个Consumer,共同消费订单Topic,实现负载均衡"
));
// Offset概念
concepts.add(new KafkaConcept(
"Offset",
"消息在Partition中的位置标识",
"记录消费进度,支持断点续传,保证消息不丢失",
"Consumer记录当前消费到的Offset,重启后从该位置继续消费"
));
System.out.println("=== Kafka核心概念详解 ===");
for (KafkaConcept concept : concepts) {
System.out.println("\n概念名称: " + concept.getName());
System.out.println("概念描述: " + concept.getDescription());
System.out.println("作用角色: " + concept.getRole());
System.out.println("具体示例: " + concept.getExample());
}
}
}
2. Kafka与RabbitMQ对比分析
消息队列对比分析
java
// KafkaVsRabbitMQ.java
package com.example.demo.kafka;
import java.util.HashMap;
import java.util.Map;
public class KafkaVsRabbitMQ {
public static class ComparisonItem {
private String aspect;
private String kafka;
private String rabbitmq;
private String recommendation;
public ComparisonItem(String aspect, String kafka, String rabbitmq, String recommendation) {
this.aspect = aspect;
this.kafka = kafka;
this.rabbitmq = rabbitmq;
this.recommendation = recommendation;
}
// Getter方法
public String getAspect() { return aspect; }
public String getKafka() { return kafka; }
public String getRabbitmq() { return rabbitmq; }
public String getRecommendation() { return recommendation; }
}
public static void main(String[] args) {
Map<String, ComparisonItem> comparisons = new HashMap<>();
// 性能对比
comparisons.put("性能", new ComparisonItem(
"性能",
"高吞吐量,单机可达百万级消息/秒",
"中等吞吐量,单机可达万级消息/秒",
"高并发场景选择Kafka,一般场景选择RabbitMQ"
));
// 消息保证对比
comparisons.put("消息保证", new ComparisonItem(
"消息保证",
"至少一次语义,支持精确一次语义",
"支持多种消息保证模式,功能更丰富",
"需要强一致性选择RabbitMQ,高吞吐选择Kafka"
));
// 延迟对比
comparisons.put("延迟", new ComparisonItem(
"延迟",
"毫秒级延迟,适合实时流处理",
"微秒级延迟,适合低延迟场景",
"实时流处理选择Kafka,低延迟要求选择RabbitMQ"
));
// 扩展性对比
comparisons.put("扩展性", new ComparisonItem(
"扩展性",
"水平扩展能力强,支持动态扩容",
"垂直扩展能力强,集群管理复杂",
"大规模分布式选择Kafka,中小规模选择RabbitMQ"
));
// 使用场景对比
comparisons.put("使用场景", new ComparisonItem(
"使用场景",
"日志收集、流式处理、事件溯源",
"任务队列、RPC调用、消息通知",
"根据具体业务场景选择合适的消息队列"
));
System.out.println("=== Kafka vs RabbitMQ 对比分析 ===");
for (Map.Entry<String, ComparisonItem> entry : comparisons.entrySet()) {
ComparisonItem item = entry.getValue();
System.out.println("\n对比维度: " + item.getAspect());
System.out.println("Kafka特点: " + item.getKafka());
System.out.println("RabbitMQ特点: " + item.getRabbitmq());
System.out.println("选择建议: " + item.getRecommendation());
}
}
}
✅ 第二部分:Kafka集成与配置(90分钟)
1. Maven依赖与配置
Maven依赖配置
xml
<!-- pom.xml -->
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.example</groupId>
<artifactId>kafka-demo</artifactId>
<version>1.0.0</version>
</parent>
<artifactId>kafka-service</artifactId>
<properties>
<maven.compiler.source>11</maven.compiler.source>
<maven.compiler.target>11</maven.compiler.target>
<kafka.version>3.4.0</kafka.version>
<spring.kafka.version>3.0.7</spring.kafka.version>
</properties>
<dependencies>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>${spring.kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-streams</artifactId>
<version>${kafka.version}</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka-test</artifactId>
<version>${spring.kafka.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
Kafka配置
yaml
# application.yml
server:
port: 8080
spring:
application:
name: kafka-service
kafka:
# 生产者配置
producer:
bootstrap-servers: localhost:9092
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.apache.kafka.common.serialization.StringSerializer
# 可靠性配置
acks: all
retries: 3
batch-size: 16384
linger-ms: 1
buffer-memory: 33554432
# 幂等性配置
enable-idempotence: true
# 压缩配置
compression-type: snappy
# 消费者配置
consumer:
bootstrap-servers: localhost:9092
group-id: kafka-demo-group
auto-offset-reset: earliest
enable-auto-commit: false
auto-commit-interval: 1000
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
# 消费者配置
max-poll-records: 500
max-poll-interval-ms: 300000
session-timeout-ms: 30000
heartbeat-interval-ms: 10000
# 监听器配置
listener:
ack-mode: manual
concurrency: 3
poll-timeout: 3000
type: batch
# 自定义Kafka配置
kafka:
topic:
order-events: order-events
user-events: user-events
payment-events: payment-events
notification-events: notification-events
partition:
order-events: 3
user-events: 2
payment-events: 3
notification-events: 1
replication:
factor: 1
2. Kafka配置类
Kafka配置类
java
// KafkaConfig.java
package com.example.kafka.config;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.config.TopicBuilder;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import java.util.HashMap;
import java.util.Map;
@Configuration
public class KafkaConfig {
@Value("${spring.kafka.producer.bootstrap-servers}")
private String bootstrapServers;
@Value("${kafka.topic.order-events}")
private String orderEventsTopic;
@Value("${kafka.topic.user-events}")
private String userEventsTopic;
@Value("${kafka.topic.payment-events}")
private String paymentEventsTopic;
@Value("${kafka.topic.notification-events}")
private String notificationEventsTopic;
@Value("${kafka.partition.order-events}")
private int orderEventsPartitions;
@Value("${kafka.partition.user-events}")
private int userEventsPartitions;
@Value("${kafka.partition.payment-events}")
private int paymentEventsPartitions;
@Value("${kafka.partition.notification-events}")
private int notificationEventsPartitions;
@Value("${kafka.replication.factor}")
private int replicationFactor;
/**
* 生产者工厂配置
*/
@Bean
public ProducerFactory<String, String> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
configProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
configProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
// 可靠性配置
configProps.put(ProducerConfig.ACKS_CONFIG, "all");
configProps.put(ProducerConfig.RETRIES_CONFIG, 3);
configProps.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
configProps.put(ProducerConfig.LINGER_MS_CONFIG, 1);
configProps.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
// 幂等性配置
configProps.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
// 压缩配置
configProps.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
// 事务配置
configProps.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "kafka-demo-producer");
return new DefaultKafkaProducerFactory<>(configProps);
}
/**
* Kafka模板
*/
@Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
/**
* 订单事件Topic
*/
@Bean
public NewTopic orderEventsTopic() {
return TopicBuilder.name(orderEventsTopic)
.partitions(orderEventsPartitions)
.replicas(replicationFactor)
.configs(Map.of(
"retention.ms", "604800000", // 7天
"cleanup.policy", "delete",
"compression.type", "snappy"
))
.build();
}
/**
* 用户事件Topic
*/
@Bean
public NewTopic userEventsTopic() {
return TopicBuilder.name(userEventsTopic)
.partitions(userEventsPartitions)
.replicas(replicationFactor)
.configs(Map.of(
"retention.ms", "2592000000", // 30天
"cleanup.policy", "delete",
"compression.type", "snappy"
))
.build();
}
/**
* 支付事件Topic
*/
@Bean
public NewTopic paymentEventsTopic() {
return TopicBuilder.name(paymentEventsTopic)
.partitions(paymentEventsPartitions)
.replicas(replicationFactor)
.configs(Map.of(
"retention.ms", "2592000000", // 30天
"cleanup.policy", "delete",
"compression.type", "snappy"
))
.build();
}
/**
* 通知事件Topic
*/
@Bean
public NewTopic notificationEventsTopic() {
return TopicBuilder.name(notificationEventsTopic)
.partitions(notificationEventsPartitions)
.replicas(replicationFactor)
.configs(Map.of(
"retention.ms", "86400000", // 1天
"cleanup.policy", "delete",
"compression.type", "snappy"
))
.build();
}
}
3. 消息实体类
事件消息基类
java
// BaseEvent.java
package com.example.kafka.model;
import lombok.Data;
import java.time.LocalDateTime;
import java.util.UUID;
@Data
public abstract class BaseEvent {
private String eventId;
private String eventType;
private String source;
private LocalDateTime timestamp;
private String version;
private String correlationId;
private String traceId;
public BaseEvent() {
this.eventId = UUID.randomUUID().toString();
this.timestamp = LocalDateTime.now();
this.version = "1.0";
}
public BaseEvent(String eventType, String source) {
this();
this.eventType = eventType;
this.source = source;
}
/**
* 获取事件类型
*/
public abstract String getEventType();
/**
* 获取事件数据
*/
public abstract Object getEventData();
}
订单事件消息
java
// OrderEvent.java
package com.example.kafka.model;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.math.BigDecimal;
@Data
@EqualsAndHashCode(callSuper = true)
public class OrderEvent extends BaseEvent {
private String orderNumber;
private Long userId;
private Long productId;
private Integer quantity;
private BigDecimal amount;
private String status;
private String previousStatus;
private String reason;
public OrderEvent() {
super();
}
public OrderEvent(String eventType, String source, String orderNumber, Long userId) {
super(eventType, source);
this.orderNumber = orderNumber;
this.userId = userId;
}
@Override
public String getEventType() {
return super.getEventType();
}
@Override
public Object getEventData() {
return this;
}
/**
* 创建订单创建事件
*/
public static OrderEvent orderCreated(String source, String orderNumber, Long userId,
Long productId, Integer quantity, BigDecimal amount) {
OrderEvent event = new OrderEvent("ORDER_CREATED", source, orderNumber, userId);
event.setProductId(productId);
event.setQuantity(quantity);
event.setAmount(amount);
event.setStatus("CREATED");
return event;
}
/**
* 创建订单支付事件
*/
public static OrderEvent orderPaid(String source, String orderNumber, Long userId,
BigDecimal amount) {
OrderEvent event = new OrderEvent("ORDER_PAID", source, orderNumber, userId);
event.setAmount(amount);
event.setPreviousStatus("CREATED");
event.setStatus("PAID");
return event;
}
/**
* 创建订单取消事件
*/
public static OrderEvent orderCancelled(String source, String orderNumber, Long userId,
String reason) {
OrderEvent event = new OrderEvent("ORDER_CANCELLED", source, orderNumber, userId);
event.setPreviousStatus("CREATED");
event.setStatus("CANCELLED");
event.setReason(reason);
return event;
}
}
用户事件消息
java
// UserEvent.java
package com.example.kafka.model;
import lombok.Data;
import lombok.EqualsAndHashCode;
import java.time.LocalDateTime;
@Data
@EqualsAndHashCode(callSuper = true)
public class UserEvent extends BaseEvent {
private Long userId;
private String username;
private String email;
private String phone;
private String action; // REGISTER, LOGIN, LOGOUT, UPDATE, DELETE
private LocalDateTime actionTime;
private String ipAddress;
private String userAgent;
public UserEvent() {
super();
}
public UserEvent(String eventType, String source, Long userId, String action) {
super(eventType, source);
this.userId = userId;
this.action = action;
this.actionTime = LocalDateTime.now();
}
@Override
public String getEventType() {
return super.getEventType();
}
@Override
public Object getEventData() {
return this;
}
/**
* 创建用户注册事件
*/
public static UserEvent userRegistered(String source, Long userId, String username,
String email, String phone) {
UserEvent event = new UserEvent("USER_REGISTERED", source, userId, "REGISTER");
event.setUsername(username);
event.setEmail(email);
event.setPhone(phone);
return event;
}
/**
* 创建用户登录事件
*/
public static UserEvent userLoggedIn(String source, Long userId, String username,
String ipAddress, String userAgent) {
UserEvent event = new UserEvent("USER_LOGGED_IN", source, userId, "LOGIN");
event.setUsername(username);
event.setIpAddress(ipAddress);
event.setUserAgent(userAgent);
return event;
}
/**
* 创建用户更新事件
*/
public static UserEvent userUpdated(String source, Long userId, String username) {
UserEvent event = new UserEvent("USER_UPDATED", source, userId, "UPDATE");
event.setUsername(username);
return event;
}
}
✅ 第三部分:Kafka生产者与消费者(90分钟)
1. Kafka消息生产者
事件消息生产者
java
// EventMessageProducer.java
package com.example.kafka.producer;
import com.example.kafka.model.BaseEvent;
import com.example.kafka.model.OrderEvent;
import com.example.kafka.model.UserEvent;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Service;
import java.util.concurrent.CompletableFuture;
@Slf4j
@Service
public class EventMessageProducer {
@Autowired
private KafkaTemplate<String, String> kafkaTemplate;
@Autowired
private ObjectMapper objectMapper;
@Value("${kafka.topic.order-events}")
private String orderEventsTopic;
@Value("${kafka.topic.user-events}")
private String userEventsTopic;
@Value("${kafka.topic.payment-events}")
private String paymentEventsTopic;
@Value("${kafka.topic.notification-events}")
private String notificationEventsTopic;
/**
* 发送订单事件
*/
public CompletableFuture<SendResult<String, String>> sendOrderEvent(OrderEvent event) {
try {
String message = objectMapper.writeValueAsString(event);
String key = event.getOrderNumber();
log.info("发送订单事件: {} -> {}", key, event.getEventType());
return kafkaTemplate.send(orderEventsTopic, key, message)
.completable()
.whenComplete((result, throwable) -> {
if (throwable != null) {
log.error("订单事件发送失败: {}", key, throwable);
} else {
log.info("订单事件发送成功: {} -> partition: {}, offset: {}",
key, result.getRecordMetadata().partition(),
result.getRecordMetadata().offset());
}
});
} catch (JsonProcessingException e) {
log.error("订单事件序列化失败: {}", event.getOrderNumber(), e);
CompletableFuture<SendResult<String, String>> future = new CompletableFuture<>();
future.completeExceptionally(e);
return future;
}
}
/**
* 发送用户事件
*/
public CompletableFuture<SendResult<String, String>> sendUserEvent(UserEvent event) {
try {
String message = objectMapper.writeValueAsString(event);
String key = String.valueOf(event.getUserId());
log.info("发送用户事件: {} -> {}", key, event.getEventType());
return kafkaTemplate.send(userEventsTopic, key, message)
.completable()
.whenComplete((result, throwable) -> {
if (throwable != null) {
log.error("用户事件发送失败: {}", key, throwable);
} else {
log.info("用户事件发送成功: {} -> partition: {}, offset: {}",
key, result.getRecordMetadata().partition(),
result.getRecordMetadata().offset());
}
});
} catch (JsonProcessingException e) {
log.error("用户事件序列化失败: {}", event.getUserId(), e);
CompletableFuture<SendResult<String, String>> future = new CompletableFuture<>();
future.completeExceptionally(e);
return future;
}
}
/**
* 发送通用事件
*/
public <T extends BaseEvent> CompletableFuture<SendResult<String, String>> sendEvent(
T event, String topic, String key) {
try {
String message = objectMapper.writeValueAsString(event);
log.info("发送事件: {} -> {}", key, event.getEventType());
return kafkaTemplate.send(topic, key, message)
.completable()
.whenComplete((result, throwable) -> {
if (throwable != null) {
log.error("事件发送失败: {}", key, throwable);
} else {
log.info("事件发送成功: {} -> partition: {}, offset: {}",
key, result.getRecordMetadata().partition(),
result.getRecordMetadata().offset());
}
});
} catch (JsonProcessingException e) {
log.error("事件序列化失败: {}", key, e);
CompletableFuture<SendResult<String, String>> future = new CompletableFuture<>();
future.completeExceptionally(e);
return future;
}
}
/**
* 批量发送事件
*/
public void sendBatchEvents(List<BaseEvent> events, String topic) {
for (BaseEvent event : events) {
String key = generateKey(event);
sendEvent(event, topic, key);
}
log.info("批量发送事件完成,数量: {}", events.size());
}
/**
* 生成消息键
*/
private String generateKey(BaseEvent event) {
if (event instanceof OrderEvent) {
return ((OrderEvent) event).getOrderNumber();
} else if (event instanceof UserEvent) {
return String.valueOf(((UserEvent) event).getUserId());
} else {
return event.getEventId();
}
}
}
2. Kafka消息消费者
事件消息消费者
java
// EventMessageConsumer.java
package com.example.kafka.consumer;
import com.example.kafka.model.BaseEvent;
import com.example.kafka.model.OrderEvent;
import com.example.kafka.model.UserEvent;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Service;
import java.util.List;
@Slf4j
@Service
public class EventMessageConsumer {
@Autowired
private ObjectMapper objectMapper;
@Autowired
private OrderEventProcessor orderEventProcessor;
@Autowired
private UserEventProcessor userEventProcessor;
/**
* 消费订单事件
*/
@KafkaListener(
topics = "${kafka.topic.order-events}",
groupId = "order-events-group",
containerFactory = "kafkaListenerContainerFactory"
)
public void consumeOrderEvents(@Payload List<String> messages,
@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
@Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Integer> partitions,
@Header(KafkaHeaders.OFFSET) List<Long> offsets,
Acknowledgment acknowledgment) {
log.info("收到订单事件消息,数量: {}, Topic: {}", messages.size(), topic);
for (int i = 0; i < messages.size(); i++) {
try {
String message = messages.get(i);
Integer partition = partitions.get(i);
Long offset = offsets.get(i);
log.debug("处理消息: partition={}, offset={}, message={}", partition, offset, message);
// 反序列化消息
OrderEvent event = objectMapper.readValue(message, OrderEvent.class);
// 处理订单事件
orderEventProcessor.processOrderEvent(event);
} catch (Exception e) {
log.error("处理订单事件失败: partition={}, offset={}",
partitions.get(i), offsets.get(i), e);
}
}
// 手动确认消息
acknowledgment.acknowledge();
log.info("订单事件处理完成,数量: {}", messages.size());
}
/**
* 消费用户事件
*/
@KafkaListener(
topics = "${kafka.topic.user-events}",
groupId = "user-events-group",
containerFactory = "kafkaListenerContainerFactory"
)
public void consumeUserEvents(@Payload List<String> messages,
@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
@Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Integer> partitions,
@Header(KafkaHeaders.OFFSET) List<Long> offsets,
Acknowledgment acknowledgment) {
log.info("收到用户事件消息,数量: {}, Topic: {}", messages.size(), topic);
for (int i = 0; i < messages.size(); i++) {
try {
String message = messages.get(i);
Integer partition = partitions.get(i);
Long offset = offsets.get(i);
log.debug("处理消息: partition={}, offset={}, message={}", partition, offset, message);
// 反序列化消息
UserEvent event = objectMapper.readValue(message, UserEvent.class);
// 处理用户事件
userEventProcessor.processUserEvent(event);
} catch (Exception e) {
log.error("处理用户事件失败: partition={}, offset={}",
partitions.get(i), offsets.get(i), e);
}
}
// 手动确认消息
acknowledgment.acknowledge();
log.info("用户事件处理完成,数量: {}", messages.size());
}
/**
* 消费单个消息(用于测试)
*/
@KafkaListener(
topics = "${kafka.topic.order-events}",
groupId = "order-events-single-group"
)
public void consumeSingleOrderEvent(@Payload String message,
@Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
@Header(KafkaHeaders.RECEIVED_PARTITION_ID) Integer partition,
@Header(KafkaHeaders.OFFSET) Long offset) {
try {
log.info("收到单个订单事件: topic={}, partition={}, offset={}", topic, partition, offset);
// 反序列化消息
OrderEvent event = objectMapper.readValue(message, OrderEvent.class);
// 处理订单事件
orderEventProcessor.processOrderEvent(event);
} catch (Exception e) {
log.error("处理单个订单事件失败: partition={}, offset={}", partition, offset, e);
}
}
}
3. 事件处理器
订单事件处理器
java
// OrderEventProcessor.java
package com.example.kafka.consumer;
import com.example.kafka.model.OrderEvent;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
@Slf4j
@Service
public class OrderEventProcessor {
/**
* 处理订单事件
*/
public void processOrderEvent(OrderEvent event) {
try {
log.info("开始处理订单事件: {} -> {}", event.getOrderNumber(), event.getEventType());
switch (event.getEventType()) {
case "ORDER_CREATED":
processOrderCreated(event);
break;
case "ORDER_PAID":
processOrderPaid(event);
break;
case "ORDER_CANCELLED":
processOrderCancelled(event);
break;
default:
log.warn("未知的订单事件类型: {}", event.getEventType());
}
log.info("订单事件处理完成: {} -> {}", event.getOrderNumber(), event.getEventType());
} catch (Exception e) {
log.error("处理订单事件失败: {}", event.getOrderNumber(), e);
throw new RuntimeException("订单事件处理失败", e);
}
}
/**
* 处理订单创建事件
*/
private void processOrderCreated(OrderEvent event) {
log.info("处理订单创建事件: {}", event.getOrderNumber());
// 1. 更新订单状态
updateOrderStatus(event.getOrderNumber(), "CREATED");
// 2. 发送库存扣减请求
sendInventoryDeductionRequest(event);
// 3. 发送订单创建通知
sendOrderCreatedNotification(event);
// 4. 记录订单创建日志
logOrderEvent(event, "订单创建成功");
}
/**
* 处理订单支付事件
*/
private void processOrderPaid(OrderEvent event) {
log.info("处理订单支付事件: {}", event.getOrderNumber());
// 1. 更新订单状态
updateOrderStatus(event.getOrderNumber(), "PAID");
// 2. 发送库存确认请求
sendInventoryConfirmationRequest(event);
// 3. 发送支付成功通知
sendPaymentSuccessNotification(event);
// 4. 记录订单支付日志
logOrderEvent(event, "订单支付成功");
}
/**
* 处理订单取消事件
*/
private void processOrderCancelled(OrderEvent event) {
log.info("处理订单取消事件: {}", event.getOrderNumber());
// 1. 更新订单状态
updateOrderStatus(event.getOrderNumber(), "CANCELLED");
// 2. 发送库存恢复请求
sendInventoryRestorationRequest(event);
// 3. 发送订单取消通知
sendOrderCancelledNotification(event);
// 4. 记录订单取消日志
logOrderEvent(event, "订单取消成功,原因: " + event.getReason());
}
/**
* 更新订单状态
*/
private void updateOrderStatus(String orderNumber, String status) {
log.info("更新订单状态: {} -> {}", orderNumber, status);
// 这里应该调用订单服务更新状态
}
/**
* 发送库存扣减请求
*/
private void sendInventoryDeductionRequest(OrderEvent event) {
log.info("发送库存扣减请求: {}", event.getOrderNumber());
// 这里应该调用库存服务
}
/**
* 发送库存确认请求
*/
private void sendInventoryConfirmationRequest(OrderEvent event) {
log.info("发送库存确认请求: {}", event.getOrderNumber());
// 这里应该调用库存服务
}
/**
* 发送库存恢复请求
*/
private void sendInventoryRestorationRequest(OrderEvent event) {
log.info("发送库存恢复请求: {}", event.getOrderNumber());
// 这里应该调用库存服务
}
/**
* 发送订单创建通知
*/
private void sendOrderCreatedNotification(OrderEvent event) {
log.info("发送订单创建通知: {}", event.getOrderNumber());
// 这里应该调用通知服务
}
/**
* 发送支付成功通知
*/
private void sendPaymentSuccessNotification(OrderEvent event) {
log.info("发送支付成功通知: {}", event.getOrderNumber());
// 这里应该调用通知服务
}
/**
* 发送订单取消通知
*/
private void sendOrderCancelledNotification(OrderEvent event) {
log.info("发送订单取消通知: {}", event.getOrderNumber());
// 这里应该调用通知服务
}
/**
* 记录订单事件日志
*/
private void logOrderEvent(OrderEvent event, String message) {
log.info("订单事件日志: {} - {} - {}", event.getOrderNumber(), event.getEventType(), message);
// 这里应该调用日志服务
}
}
✅ 第四部分:Kafka流式处理(60分钟)
1. Kafka Streams配置
Kafka Streams配置类
java
// KafkaStreamsConfig.java
package com.example.kafka.config;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.StreamsConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafkaStreams;
import org.springframework.kafka.annotation.KafkaStreamsDefaultConfiguration;
import org.springframework.kafka.config.KafkaStreamsConfiguration;
import java.util.HashMap;
import java.util.Map;
@Configuration
@EnableKafkaStreams
public class KafkaStreamsConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Value("${spring.application.name}")
private String applicationName;
/**
* Kafka Streams默认配置
*/
@Bean(name = KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME)
public KafkaStreamsConfiguration kStreamsConfig() {
Map<String, Object> props = new HashMap<>();
props.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationName + "-streams");
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
// 处理配置
props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2);
props.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 3);
props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 1000);
// 状态存储配置
props.put(StreamsConfig.STATE_DIR_CONFIG, "/tmp/kafka-streams");
// 缓存配置
props.put(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, 10 * 1024 * 1024); // 10MB
// 复制因子
props.put(StreamsConfig.REPLICATION_FACTOR_CONFIG, 1);
return new KafkaStreamsConfiguration(props);
}
}
2. 流式处理服务
订单流式处理服务
java
// OrderStreamProcessor.java
package com.example.kafka.streams;
import com.example.kafka.model.OrderEvent;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.kstream.*;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import java.time.Duration;
import java.util.Map;
@Slf4j
@Service
public class OrderStreamProcessor {
@Autowired
private ObjectMapper objectMapper;
@Value("${kafka.topic.order-events}")
private String orderEventsTopic;
@Value("${kafka.topic.order-statistics}")
private String orderStatisticsTopic;
@Value("${kafka.topic.order-alerts}")
private String orderAlertsTopic;
/**
* 构建订单流处理拓扑
*/
@Bean
public KStream<String, String> buildOrderStream(StreamsBuilder streamsBuilder) {
// 1. 从订单事件Topic读取消息
KStream<String, String> orderStream = streamsBuilder.stream(orderEventsTopic);
// 2. 过滤订单创建事件
KStream<String, String> orderCreatedStream = orderStream
.filter((key, value) -> {
try {
OrderEvent event = objectMapper.readValue(value, OrderEvent.class);
return "ORDER_CREATED".equals(event.getEventType());
} catch (Exception e) {
log.error("解析订单事件失败", e);
return false;
}
});
// 3. 统计订单数量(按分钟窗口)
KTable<Windowed<String>, Long> orderCountTable = orderCreatedStream
.groupBy((key, value) -> "total")
.windowedBy(TimeWindows.of(Duration.ofMinutes(1)))
.count();
// 4. 统计订单金额(按分钟窗口)
KTable<Windowed<String>, Double> orderAmountTable = orderCreatedStream
.mapValues(value -> {
try {
OrderEvent event = objectMapper.readValue(value, OrderEvent.class);
return event.getAmount().doubleValue();
} catch (Exception e) {
log.error("解析订单金额失败", e);
return 0.0;
}
})
.groupBy((key, value) -> "total")
.windowedBy(TimeWindows.of(Duration.ofMinutes(1)))
.reduce(Double::sum);
// 5. 检测异常订单(金额超过阈值)
KStream<String, String> alertStream = orderCreatedStream
.filter((key, value) -> {
try {
OrderEvent event = objectMapper.readValue(value, OrderEvent.class);
return event.getAmount().doubleValue() > 10000; // 金额超过1万
} catch (Exception e) {
log.error("检测异常订单失败", e);
return false;
}
})
.mapValues(value -> {
try {
OrderEvent event = objectMapper.readValue(value, OrderEvent.class);
return String.format("异常订单: %s, 金额: %.2f",
event.getOrderNumber(), event.getAmount());
} catch (Exception e) {
return "异常订单检测失败";
}
});
// 6. 输出统计结果
orderCountTable.toStream()
.map((key, value) -> KeyValue.pair(key.key(),
String.format("订单数量: %d, 时间窗口: %s", value, key.window())))
.to(orderStatisticsTopic, Produced.with(Serdes.String(), Serdes.String()));
orderAmountTable.toStream()
.map((key, value) -> KeyValue.pair(key.key(),
String.format("订单金额: %.2f, 时间窗口: %s", value, key.window())))
.to(orderStatisticsTopic, Produced.with(Serdes.String(), Serdes.String()));
// 7. 输出告警信息
alertStream.to(orderAlertsTopic, Produced.with(Serdes.String(), Serdes.String()));
log.info("订单流处理拓扑构建完成");
return orderStream;
}
/**
* 构建用户行为分析流
*/
@Bean
public KStream<String, String> buildUserBehaviorStream(StreamsBuilder streamsBuilder) {
// 1. 从订单事件Topic读取消息
KStream<String, String> orderStream = streamsBuilder.stream(orderEventsTopic);
// 2. 按用户ID分组统计
KTable<String, Long> userOrderCountTable = orderStream
.filter((key, value) -> {
try {
OrderEvent event = objectMapper.readValue(value, OrderEvent.class);
return "ORDER_CREATED".equals(event.getEventType());
} catch (Exception e) {
return false;
}
})
.mapValues(value -> {
try {
OrderEvent event = objectMapper.readValue(value, OrderEvent.class);
return event.getUserId();
} catch (Exception e) {
return 0L;
}
})
.groupBy((key, value) -> String.valueOf(value))
.count();
// 3. 输出用户订单统计
userOrderCountTable.toStream()
.map((key, value) -> KeyValue.pair(key,
String.format("用户ID: %s, 订单数量: %d", key, value)))
.to("user-order-statistics", Produced.with(Serdes.String(), Serdes.String()));
log.info("用户行为分析流构建完成");
return orderStream;
}
}
🎯 今日学习总结
1. 掌握的核心技能
- ✅ Kafka基础概念与架构
- ✅ Kafka与RabbitMQ对比分析
- ✅ Kafka集成与配置
- ✅ 消息生产者与消费者实现
- ✅ Kafka Streams流式处理
2. Kafka核心概念
- Broker:Kafka集群中的服务器节点
- Topic:消息的逻辑分类
- Partition:Topic的分片,支持并行处理
- Consumer Group:消费者组,实现负载均衡
- Offset:消息位置标识,支持断点续传
3. Kafka主要特性
- 高吞吐量:单机可达百万级消息/秒
- 水平扩展:支持动