消息发送流程
Kafka的Pruducer采取异步发送,两个线程:main线程和sender线程,一个线程共享变量-RecordAccumulator
main给RecordAccumulator,Sender线程从RecordAccumulator中拉取消息发送到Kafka broker。
发送流程
main线程(Producer->Itercepters->Serializer->Partitioner)->RecordAccumulator->Sender
Itercepters拦截器:传入String
Serializer序列化器:传入String,返回Byte
Partitioner分区器:传入Byte
html
<!--导入依赖-->
<dependency>
<groupId>org.apache.kafka</groupId>
<artifactId>kafka-clients</artifactId>
<version>2.2.0</version>
</dependency>
java
package com.atguigu.producer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class MyProducer {
public static void main(String[] args) {
//1.创建Kafka生产者
Properties properties = new Properties();
//配置信息,可用ProducerConfig.进行提示
//2.指定连接的Kafka集群
properties.put("bootstrap.servers","hadoop102:9092");
//接收机制
properties.put("acks","all");
//4.重试次数
properties.put("retries","3");
//等等,可以设置更多的参数
//创建生产者对象
KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
//发送数据
producer.send(new ProducerRecord<String, String>("first","atiguigu"));
//关闭连接
producer.close();
}
}
java
package com.atguigu.producer;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
import java.util.concurrent.Future;
public class CallBackProducer {
public static void main(String[] args) {
//创建配置信息
Properties properties = new Properties();
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092");
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
//创建生产者对象
KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
//发送数据
for (int i = 0; i < 10; i++) {
producer.send(new ProducerRecord<>("first",
"stiguigu--" + i),
(recordMetadata, e) -> {
if (e == null) {
System.out.println(recordMetadata.partition() + "--" + recordMetadata.offset());
}else{
e.printStackTrace();
}
}
);
}
//关闭资源
producer.close();
}
}
java
package com.atguigu.consumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
public class MyConsumer {
public static void main(String[] args) {
Properties properties = new Properties();
//连接集群
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG."hadoop102:9092");
//自动提交
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true);
//自动提交延时
properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"1000");
//key,value反序列化
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringDeserializer");
//消费组
properties.put(ConsumerConfig.GROUP_ID_CONFIG,"bigdata");
//创建消费者
KafkaConsumer consumer = new KafkaConsumer<>(properties);
//订阅主题
consumer.subscribe(Arrays.asList("first","second"));
//获取数据
while (true) {
ConsumerRecords<String, String> consumerRecords = consumer.poll(100);
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
System.out.println(consumerRecord.key() + "--" + consumerRecord.value());
}
}
//关闭连接
// consumer.close();
}
}