一、服务器环境
|----|----------------|--------------------------------------|
| 序号 | 部署版本 | 版本 |
| 1 | 操作系统 | CentOS Linux release 7.9.2009 (Core) |
| 2 | docker | Docker version 20.10.6 |
| 3 | docker-compose | docker-compose version 1.28.2 |
二、服务规划
|----|-----------|-----------|----------------|
| 序号 | 服务 | 名称 | 端口 |
| 1 | zookeeper | zookeeper | 2181,2888,3888 |
| 2 | kafka | kafka | 9092:9092 |
三、部署kafka
1、创建/opt/beidousky/kafka-zk目录,添加docker-compose.yaml文件
version: "3"
services:
zookeeper-sasl:
image: zookeeper:3.6.3
container_name: zookeeper-sasl
user: root
restart: always
ports:
- 2181:2181
- 2888:2888
- 3888:3888
environment:
ZOO_MY_ID: 1
TZ: Asia/Shanghai
volumes:
- ./zk-conf/zoo.cfg:/conf/zoo.cfg
- ./zk-conf/zookeeper_server_jaas.conf:/conf/zookeeper_server_jaas.conf
- ./zk-conf/java.env:/conf/java.env
- ./zk-data/data:/data
- ./zk-data/datalog:/datalog
- ./zk-data/logs:/logs
kafka:
image: wurstmeister/kafka:2.13-2.8.1
container_name: kafka
depends_on:
- zookeeper-sasl
ports:
- 9092:9092
volumes:
- ./kafka-data:/kafka
- ./kafka-conf:/opt/kafka/secrets/
environment:
KAFKA_BROKER_ID: 0
KAFKA_ADVERTISED_PORT: 9092
#KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://192.168.1.244:9092
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.244:9092
#KAFKA_LISTENERS: SASL_PLAINTEXT://0.0.0.0:9092
KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092
#KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT
KAFKA_PORT: 9092
#KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
#KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
#KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
#KAFKA_SUPER_USERS: User:admin
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" #设置为true,ACL机制为黑名单机制,只有黑名单中的用户无法访问,默认为false,ACL机制为白名单机制
,只有白名单中的用户可以访问
KAFKA_ZOOKEEPER_CONNECT: 192.168.1.244:2181
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_HEAP_OPTS: "-Xmx512M -Xms16M"
KAFKA_OPTS: -Djava.security.auth.login.config=/opt/kafka/secrets/server_jaas.conf
restart: always
2、创建/opt/beidousky/kafka-zk/zk-conf目录,添加zoo.cfg文件
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
quorumListenOnAllIPs=true
server.1=192.168.1.244:2888:3888;2181
#server.2=192.168.1.xxx:2888:3888;2181
#server.3=192.168.1.xxx:3888;2181
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
sessionRequireClientSASLAuth=true
#requireClientAuthScheme=sasl
jaasLoginRenew=3600000
3、在/opt/beidousky/kafka-zk/zk-conf目录下,添加java.env文件
# 指定jaas文件的位置
SERVER_JVMFLAGS="-Djava.security.auth.login.config=/conf/zookeeper_server_jaas.conf"
4、在/opt/beidousky/kafka-zk/zk-conf目录下,添加zookeeper_server_jaas.conf文件
数据格式为user_用户名="用户密码"或者username="用户名" password="用户密码"
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="admin123"
user_kafka="kafka123"
;
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="kafka"
password="kafka123"
;
};
5、创建/opt/beidousky/kafka-zk/kafka-conf目录,添加server_jaas.conf文件
Server {
org.apache.zookeeper.server.auth.DigestLoginModule required
user_admin="admin123"
user_kafka="kafka123"
;
};
Client {
org.apache.zookeeper.server.auth.DigestLoginModule required
username="kafka"
password="kafka123"
;
};
6、启动kafka服务
cd /opt/server/kafka-zk
docker-compose up -d