有zookeeper
yaml
version: "3"
services:
zookeeper:
image: wurstmeister/zookeeper
networks:
- flink-net
kafka:
image: bitnami/kafka:3.2.0
environment:
- KAFKA_CFG_NODE_ID=1
- KAFKA_CFG_BROKER_ID=1
- KAFKA_CFG_ZOOKEEPER_CONNECT=zookeeper:2181
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
networks:
- flink-net
# jupyter:
# image: flink:py
# privileged: true
# ports:
# - "9999:8888"
# command: jupyter notebook --ip 0.0.0.0 --port 8888 --allow-root --NotebookApp.password=sha1:6587feaef3b1:6b243404e4cfaafe611fdf494ee71fdaa8c4a563
# networks:
# - flink-net
networks:
flink-net:
external: false
无zookeeper
yaml
version: "3"
services:
kafka:
image: bitnami/kafka:3.2.0
environment:
- KAFKA_CFG_PROCESS_ROLES=broker,controller
- KAFKA_CFG_NODE_ID=1
- KAFKA_CFG_BROKER_ID=1
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@localhost:9093
- ALLOW_PLAINTEXT_LISTENER=yes
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT
- KAFKA_CFG_INTER_BROKER_LISTENER_NAME=PLAINTEXT
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
volumes:
- /data/kafka:/bitnami/kafka:rw
# networks:
# - flink-net
# jupyter:
# image: flink:py
# privileged: true
# ports:
# - "9999:8888"
# command: jupyter notebook --ip 0.0.0.0 --port 8888 --allow-root --NotebookApp.password=sha1:6587feaef3b1:6b243404e4cfaafe611fdf494ee71fdaa8c4a563
# networks:
# - flink-net
# networks:
# flink-net:
# external: false
meta.properties
ini
broker.id=1
node.id=1
version=1
cluster.id=HT0-P_hzQU2uMo1H1aBUmg
Disk error while locking directory /bitnami/kafka/data
删除 .lock 文件即可
[2024-09-30 06:13:08,901] ERROR Disk error while locking directory /bitnami/kafka/data (kafka.server.LogDirFailureChannel)
java.nio.file.AccessDeniedException: /bitnami/kafka/data/.lock
at java.base/sun.nio.fs.UnixException.translateToIOException(UnixException.java:90)
at java.base/sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:111)
at java.base/sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:116)
at java.base/sun.nio.fs.UnixFileSystemProvider.newFileChannel(UnixFileSystemProvider.java:182)
at java.base/java.nio.channels.FileChannel.open(FileChannel.java:292)
at java.base/java.nio.channels.FileChannel.open(FileChannel.java:345)
at kafka.utils.FileLock.<init>(FileLock.scala:31)
at kafka.log.LogManager.$anonfun$lockLogDirs$1(LogManager.scala:239)
at scala.collection.TraversableLike.$anonfun$flatMap$1(TraversableLike.scala:293)
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
at scala.collection.TraversableLike.flatMap(TraversableLike.scala:293)
at scala.collection.TraversableLike.flatMap$(TraversableLike.scala:290)
at scala.collection.AbstractTraversable.flatMap(Traversable.scala:108)
at kafka.log.LogManager.lockLogDirs(LogManager.scala:237)
at kafka.log.LogManager.<init>(LogManager.scala:113)
at kafka.log.LogManager$.apply(LogManager.scala:1319)
at kafka.server.BrokerServer.startup(BrokerServer.scala:203)
at kafka.server.KafkaRaftServer.$anonfun$startup$2(KafkaRaftServer.scala:114)
at kafka.server.KafkaRaftServer.$anonfun$startup$2$adapted(KafkaRaftServer.scala:114)
at scala.Option.foreach(Option.scala:407)
at kafka.server.KafkaRaftServer.startup(KafkaRaftServer.scala:114)
at kafka.Kafka$.main(Kafka.scala:109)
at kafka.Kafka.main(Kafka.scala)