首先自行安装docker,通过docker容器安装kafka
CentOS 系统 docker安装地址
1.pom.xml和application.properties或者application.yml文件配置
XML
<dependency>
<groupId>org.springframework.kafka</groupId>
<artifactId>spring-kafka</artifactId>
</dependency>
java
spring:
kafka:
bootstrap-servers: [fafka地址1,fafka地址2,....]
# producer序列化设置
producer:
#key序列化设置,设置成json对象
# key-serializer: org.springframework.kafka.support.serializer.JsonSerializer
# val序列化设置,设置成json对象
value-serializer: org.springframework.kafka.support.serializer.JsonSerializer
2.博主安装了kafka ui插件,就直接创建主题了
当前一个集群,因为博主只搭建了一台服务器,也可以称为一个节点

创建主题


没有安装kafka ui,就再main那里启动项目时创建
javascript
package com.atguigu.boot3_08_kafka;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.TopicBuilder;
@EnableKafka //扫描kafka注解,开启基于注解的模式
@SpringBootApplication
public class Boot308KafkaApplication {
public static void main(String[] args) {
SpringApplication.run(Boot308KafkaApplication.class, args);
TopicBuilder.name("my-new-topic")//主题
.partitions(3)//分区
.replicas(2)//副本
.build();
}
}
副本就是备份,有几节点就可以创建几个副本,副本数量一般采取分区数量-1,只有一个节点就N分区1副本
3.在main 加上这个注解@EnableKafka
java
package com.atguigu.boot3_08_kafka;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.kafka.annotation.EnableKafka;
@EnableKafka //扫描kafka注解,开启基于注解的模式
@SpringBootApplication
public class Boot308KafkaApplication {
public static void main(String[] args) {
SpringApplication.run(Boot308KafkaApplication.class, args);
}
}
4.生产者发送消息
java
package com.atguigu.boot3_08_kafka.controller;
import com.atguigu.boot3_08_kafka.entity.Person;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
public class KafkaController {
@Autowired
private KafkaTemplate kafkaTemplate;
@GetMapping("/jjj")
public String hello() {
kafkaTemplate.send("tach", 0,"hello","急急急132");//send("主题", 分区号,"key","val")
return "ok";
}
@GetMapping("/odj")
public String odj() {
kafkaTemplate.send("tach", 0,"hello",new Person(1L,"odj",19));//对象json需要序列化,可用配置文件配置,也可以在对象中序列化对象
return "OK";
}
}
5.消费者监听消息
java
package com.atguigu.boot3_08_kafka.listener;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.PartitionOffset;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.stereotype.Component;
@Component
public class MykafkaListener {
/**
* 默认的监听是从最后一个消息开始拿,也就是只会拿新消息,不会拿历史的
* @KafkaListener(topics = "主题",groupId = "用户组")
* ConsumerRecord 消费者从 Kafka 获取消息的各种元数据和实际的消息
* @param record
*/
@KafkaListener(topics = "tach",groupId = "teach")
public void listen(ConsumerRecord<?, ?> record) {
Object key = record.key();
Object val = record.value();
System.out.println("收到值key:"+key+"收到值val:"+val);
}
/**
* 想要到历史的消息或者全部消息,只能设置偏移量
* @KafkaListener(groupId = "用户组" ,topicPartitions = {设置分区,设置偏移量})
* @TopicPartition(topic = "主题" ,partitionOffsets 设置偏移量)
* @PartitionOffset(partition = "哪个分区", initialOffset = "从第几个偏移量开始")
*
* @param record
*/
@KafkaListener(groupId = "teach" ,topicPartitions = {
@TopicPartition(topic = "tach" ,partitionOffsets = {
@PartitionOffset(partition = "0", initialOffset = "0")
})
})
public void listens(ConsumerRecord<?, ?> record) {
Object key = record.key();
Object val = record.value();
System.out.println("收到值key:"+key+"收到值val:"+val);
}
}
最后查看结果

最后补充一个小知识
groupId = "用户组"
组里的成员是竞争模式
用户组和用户组之间是发布/订阅模式
由zookeeper分配管理
好了可以和面试官吹牛逼了
课外话
如果是传对象json需要序列化,创建对象时序列化,不推荐太原始重要是很占资源
因为开始我们都配置好了,有对象就会自动序列化
java
package com.atguigu.boot3_08_kafka.entity;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.io.Serializable;
@AllArgsConstructor
@NoArgsConstructor
@Data
public class Person implements Serializable {//不推荐implements Serializable
private Long id;
private String name;
private Integer age;
}