1.Flume官方文档
https://flume.apache.org/releases/content/1.11.0/FlumeUserGuide.html#data-flow-model
2.Flume的配置主体框架
cpp
# example.conf:单节点 Flume 配置
# 1. 声明框架组件
# 将此代理上的组件命名为
# 数据源r1
a1.sources = r1
# 数据终点k1
a1.sinks = k1
#channel通道c1
a1.channels = c1
# 描述/配置源
a1.sources.r1.type = netcat
a1.sources.r1.bind = localhost
a1.sources.r1.port = 44444
# 描述接收器
a1.sinks.k1.type = logger
# 使用在内存中缓冲事件的通道
a1.channels.c1.type = 内存
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# 将源和接收器绑定到通道
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
3.数据源Source介绍
1.taildir Source
监听文件,支持断点续传和多目录,但是可能导致重复数据,不能监控二进制数据
2.kafka Source
cpp
tier1.sources.source1.type = org.apache.flume.source.kafka.KafkaSource
tier1.sources.source1.channels = channel1
# 批量写入Channel的最大消息条数
tier1.sources.source1.batchSize = 5000
tier1.sources.source1.batchDurationMillis = 2000
# broker列表
tier1.sources.source1.kafka.bootstrap.servers = localhost:9092
# Kafka消费的主题 逗号隔开
tier1.sources.source1.kafka.topics = test1, test2
# groupId 消费者组的ID,
# 同一消费者组不能同时消费一个分区,Kafka的知识
tier1.sources.source1.kafka.consumer.group.id = custom.g.id
4.数据接收端Sink
1.HDFS Source
cpp
a1.channels = c1
a1.sinks = k1
a1.sinks.k1.type = hdfs
a1.sinks.k1.channel = c1
# path HDFS文件存储路径
# 样例 hdfs://namenode/flume/webdata/
a1.sinks.k1.hdfs.path = /flume/events/%Y-%m-% d/%H%M/%S
# filePrefix 文件名前缀
a1.sinks.k1.hdfs.filePrefix = 事件-
a1.sinks.k1.hdfs.round = true
a1.sinks.k1.hdfs.roundValue = 10
# rollInterval 滚动文件间隔时间,其实就是多久产生一个新文件
a1.sinks.k1.hdfs.rollInterval = 30
# 滚动文件间隔时间的统计粒度
a1.sinks.k1.hdfs .roundUnit = 分钟
# 1024字节
# HDFS的128M是134,217,728字节 滚动文件的大小
a1.sinks.k1.hdfs.rollInterval.hdfs.rollSize = 1024
# 滚动信息数量 可以通过设置
a1.sinks.k1.hdfs.rollInterval.hdfs.rollCount = 10
2. Kafka Source
cpp
a1.sinks.k1.channel = c1
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.kafka.topic = mytopic
# 逗号间隔
a1.sinks.k1.kafka.bootstrap.servers = localhost:9092
a1.sinks.k1.kafka.flumeBatchSize = 20
# ack机制 1为主机确定收到即可,-1为全部收到 0为只发送
a1.sinks.k1.kafka.producer.acks = 1
a1.sinks.k1.kafka.producer.linger.ms = 1
# 压缩格式看Kafka支持什么
a1.sinks.k1.kafka.producer.compression.type = snappy