1. 基础 Maven 依赖
yaml
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
<version>2.8.0</version> <!-- 使用适合的版本 -->
</dependency>
<!-- 如果需要 JSON 序列化 -->
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
2. application.yml/application.properties 配置
2.1 生产者配置
yaml
spring:
kafka:
# 基础配置
bootstrap-servers: localhost:9092
# 生产者配置
producer:
key-serializer: org.apache.kafka.common.serialization.StringSerializer
value-serializer: org.springframework.kafka.support.serializer.JsonSerializer
# org.apache.kafka.common.serialization.StringSerializer
# org.apache.kafka.common.serialization.ByteArraySerializer
# 可靠性配置
acks: all # 或 0, 1
retries: 3
batch-size: 16384
buffer-memory: 33554432
# 性能配置
linger-ms: 1
compression-type: snappy # gzip, lz4, zstd, none
# 幂等性配置
enable-idempotence: true
transaction-id-prefix: tx-
# 其他配置
properties:
max.request.size: 1048576
request.timeout.ms: 30000
delivery.timeout.ms: 120000
2.2 消费者配置
yaml
spring:
kafka:
# 消费者配置
consumer:
key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
value-deserializer: org.springframework.kafka.support.serializer.JsonDeserializer
# org.apache.kafka.common.serialization.StringDeserializer
# org.apache.kafka.common.serialization.ByteArrayDeserializer
# 消费组配置
group-id: my-group
auto-offset-reset: earliest # latest, none
enable-auto-commit: false # 建议设置为false,手动提交
# 消费控制
max-poll-records: 500
max-poll-interval-ms: 300000
fetch-min-size: 1
fetch-max-wait-ms: 500
# 分区分配策略
properties:
partition.assignment.strategy:
- org.apache.kafka.clients.consumer.RangeAssignor
- org.apache.kafka.clients.consumer.RoundRobinAssignor
- org.apache.kafka.clients.consumer.StickyAssignor
- org.apache.kafka.clients.consumer.CooperativeStickyAssignor
# 连接配置
session.timeout.ms: 10000
heartbeat.interval.ms: 3000
connections.max.idle.ms: 540000
# 异常处理器
properties:
spring.json.trusted.packages: "*" # JSON反序列化信任的包
spring.json.type.mapping: "myEvent:com.example.MyEvent"
2.3 监听器配置
yaml
spring:
kafka:
listener:
# 监听器类型
type: batch # single 或 batch
# 确认模式
ack-mode: manual_immediate # manual, manual_immediate, record, batch, time, count, count_time
# 并发配置
concurrency: 3
# 批处理配置
batch-listener: true
poll-timeout: 5000
max-poll-records: 500
# 异常处理
ack-on-error: false
auto-startup: true
idle-event-interval: 30000
# 重试配置
properties:
max.retries: 3
backoff.initial-interval: 1000
backoff.max-interval: 10000
backoff.multiplier: 2
2.4 管理配置
yaml
spring:
kafka:
admin:
fail-fast: true
properties:
request.timeout.ms: 5000
retries: 3
3. Java 配置类
3.1 生产者配置类
java
@Configuration
public class KafkaProducerConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Bean
public ProducerFactory<String, Object> producerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers
);
configProps.put(
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
StringSerializer.class
);
configProps.put(
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
JsonSerializer.class
);
configProps.put(
ProducerConfig.ACKS_CONFIG,
"all"
);
configProps.put(
ProducerConfig.RETRIES_CONFIG,
3
);
configProps.put(
ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,
true
);
configProps.put(
ProducerConfig.TRANSACTIONAL_ID_CONFIG,
"tx-"
);
configProps.put(
JsonSerializer.ADD_TYPE_INFO_HEADERS,
false
);
return new DefaultKafkaProducerFactory<>(configProps);
}
@Bean
public KafkaTemplate<String, Object> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
@Bean
public ProducerFactory<String, String> stringProducerFactory() {
Map<String, Object> configProps = new HashMap<>();
configProps.put(
ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers
);
configProps.put(
ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
StringSerializer.class
);
configProps.put(
ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
StringSerializer.class
);
return new DefaultKafkaProducerFactory<>(configProps);
}
@Bean
public KafkaTemplate<String, String> stringKafkaTemplate() {
return new KafkaTemplate<>(stringProducerFactory());
}
}
3.2 消费者配置类
java
@Configuration
public class KafkaConsumerConfig {
@Value("${spring.kafka.bootstrap-servers}")
private String bootstrapServers;
@Bean
public ConsumerFactory<String, Object> consumerFactory() {
Map<String, Object> props = new HashMap<>();
props.put(
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,
bootstrapServers
);
props.put(
ConsumerConfig.GROUP_ID_CONFIG,
"my-group"
);
props.put(
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
StringDeserializer.class
);
props.put(
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
JsonDeserializer.class
);
props.put(
ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,
"earliest"
);
props.put(
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,
false
);
props.put(
ConsumerConfig.MAX_POLL_RECORDS_CONFIG,
500
);
props.put(
JsonDeserializer.TRUSTED_PACKAGES,
"*"
);
props.put(
JsonDeserializer.TYPE_MAPPINGS,
"myEvent:com.example.MyEvent"
);
return new DefaultKafkaConsumerFactory<>(props);
}
@Bean
public ConcurrentKafkaListenerContainerFactory<String, Object>
kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Object> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
factory.setBatchListener(true);
// 设置重试机制
factory.setRetryTemplate(retryTemplate());
factory.setRecoveryCallback(context -> {
// 重试失败后的处理逻辑
return null;
});
return factory;
}
private RetryTemplate retryTemplate() {
RetryTemplate retryTemplate = new RetryTemplate();
FixedBackOffPolicy backOffPolicy = new FixedBackOffPolicy();
backOffPolicy.setBackOffPeriod(1000);
SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy();
retryPolicy.setMaxAttempts(3);
retryTemplate.setBackOffPolicy(backOffPolicy);
retryTemplate.setRetryPolicy(retryPolicy);
return retryTemplate;
}
}
3.3 事务配置
java
@Configuration
@EnableKafka
public class KafkaTransactionConfig {
@Bean
public ProducerFactory<String, Object> transactionalProducerFactory() {
Map<String, Object> configProps = new HashMap<>();
// ... 基础配置
configProps.put(
ProducerConfig.TRANSACTIONAL_ID_CONFIG,
"tx-"
);
configProps.put(
ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,
true
);
configProps.put(
ProducerConfig.ACKS_CONFIG,
"all"
);
DefaultKafkaProducerFactory<String, Object> factory =
new DefaultKafkaProducerFactory<>(configProps);
factory.setTransactionIdPrefix("tx-");
return factory;
}
@Bean
public KafkaTransactionManager<String, Object> kafkaTransactionManager() {
return new KafkaTransactionManager<>(transactionalProducerFactory());
}
}
4. 监听器使用示例
4.1 单条消息消费
java
@Component
public class KafkaConsumer {
@KafkaListener(
topics = "my-topic",
groupId = "my-group",
containerFactory = "kafkaListenerContainerFactory"
)
public void consumeMessage(
ConsumerRecord<String, MyEvent> record,
Acknowledgment acknowledgment
) {
try {
// 处理消息
MyEvent event = record.value();
System.out.println("Received message: " + event);
// 手动提交偏移量
acknowledgment.acknowledge();
} catch (Exception e) {
// 处理异常
System.err.println("Error processing message: " + e.getMessage());
}
}
// 批量消费
@KafkaListener(
topics = "my-batch-topic",
containerFactory = "batchFactory"
)
public void consumeBatch(List<ConsumerRecord<String, MyEvent>> records) {
for (ConsumerRecord<String, MyEvent> record : records) {
// 批量处理
}
}
}
4.2 异常处理器
java
@Configuration
public class KafkaErrorHandlerConfig {
@Bean
public ConcurrentKafkaListenerContainerFactory<String, Object>
kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, Object> factory =
new ConcurrentKafkaListenerContainerFactory<>();
// 设置错误处理器
factory.setErrorHandler(new SeekToCurrentErrorHandler(
new DeadLetterPublishingRecoverer(kafkaTemplate()),
new FixedBackOff(1000L, 3L)
));
// 设置重试模板
RetryTemplate retryTemplate = new RetryTemplate();
ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy();
backOffPolicy.setInitialInterval(1000);
backOffPolicy.setMultiplier(2.0);
backOffPolicy.setMaxInterval(10000);
retryTemplate.setBackOffPolicy(backOffPolicy);
factory.setRetryTemplate(retryTemplate);
return factory;
}
}
5. 生产者使用示例
java
@Service
public class KafkaProducerService {
@Autowired
private KafkaTemplate<String, Object> kafkaTemplate;
// 发送简单消息
public void sendMessage(String topic, String message) {
kafkaTemplate.send(topic, message);
}
// 发送带key的消息
public void sendMessageWithKey(String topic, String key, Object value) {
kafkaTemplate.send(topic, key, value);
}
// 发送到指定分区
public void sendToPartition(String topic, int partition, String key, Object value) {
kafkaTemplate.send(topic, partition, key, value);
}
// 使用 ProducerRecord
public void sendWithProducerRecord(String topic, Object value) {
ProducerRecord<String, Object> record =
new ProducerRecord<>(topic, value);
kafkaTemplate.send(record);
}
// 事务性发送
@Transactional
public void sendInTransaction(String topic, List<Object> messages) {
for (Object message : messages) {
kafkaTemplate.send(topic, message);
}
// 如果有异常会自动回滚
}
// 异步发送带回调
public void sendAsync(String topic, Object value) {
ListenableFuture<SendResult<String, Object>> future =
kafkaTemplate.send(topic, value);
future.addCallback(
result -> {
System.out.println("Sent message: " + result.getProducerRecord().value());
System.out.println("Partition: " + result.getRecordMetadata().partition());
System.out.println("Offset: " + result.getRecordMetadata().offset());
},
ex -> {
System.err.println("Failed to send message: " + ex.getMessage());
// 重试逻辑
}
);
}
}
6. 高级配置
6.1 拦截器配置
yaml
spring:
kafka:
producer:
properties:
interceptor.classes: "com.example.MyProducerInterceptor"
consumer:
properties:
interceptor.classes: "com.example.MyConsumerInterceptor"
6.2 SSL 安全配置
yaml
spring:
kafka:
bootstrap-servers: localhost:9093
security:
protocol: SSL
ssl:
key-password: password
keystore-location: classpath:keystore.jks
keystore-password: password
truststore-location: classpath:truststore.jks
truststore-password: password
6.3 SASL 认证配置
yaml
spring:
kafka:
bootstrap-servers: localhost:9092
properties:
security.protocol: SASL_PLAINTEXT
sasl.mechanism: PLAIN
sasl.jaas.config: >
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="admin-secret";
6.4 监控配置
yaml
management:
metrics:
export:
kafka:
enabled: true
endpoints:
web:
exposure:
include: health,metrics,kafka
7. 常用工具类
java
@Component
public class KafkaAdminUtils {
@Autowired
private KafkaAdmin kafkaAdmin;
@Autowired
private AdminClient adminClient;
// 创建主题
public void createTopic(String topicName, int partitions, short replicationFactor) {
NewTopic newTopic = new NewTopic(topicName, partitions, replicationFactor);
kafkaAdmin.createOrModifyTopics(newTopic);
}
// 获取主题列表
public List<String> listTopics() throws Exception {
ListTopicsResult topics = adminClient.listTopics();
return new ArrayList<>(topics.names().get());
}
// 获取消费者组信息
public void describeConsumerGroups() throws Exception {
DescribeConsumerGroupsResult groups =
adminClient.describeConsumerGroups(Collections.singleton("my-group"));
groups.all().get().forEach((groupId, description) -> {
System.out.println("Group: " + groupId);
System.out.println("Members: " + description.members().size());
});
}
}
8. 最佳实践配置建议
1.生产环境推荐配置
yaml
spring:
kafka:
producer:
acks: all
retries: 2147483647 # Integer.MAX_VALUE
enable-idempotence: true
max-in-flight-requests-per-connection: 5
compression-type: snappy
consumer:
enable-auto-commit: false
auto-offset-reset: latest
max-poll-records: 500
max-poll-interval-ms: 300000
listener:
ack-mode: manual_immediate
concurrency: 3
2.性能优化配置:
- 根据消息大小调整 batch.size 和 linger.ms
- 根据网络延迟调整 request.timeout.ms
- 根据内存情况调整 buffer.memory
3.监控指标:
- 监控生产者发送成功率
- 监控消费者延迟
- 监控分区负载均衡
- 监控消费者组健康状态