maxwell 输出消息到 kafka

文章目录

1、kafka-producer

kafka-producerhttps://maxwells-daemon.io/config/#kafka-producer

2、运行一个Docker容器,该容器内运行的是Zendesk的Maxwell工具,一个用于实时捕获MySQL数据库变更并将其发布到Kafka或其他消息系统的应用

sh 复制代码
docker run -it --rm zendesk/maxwell bin/maxwell \
    --user=maxwell \
    --password=maxwell \
    --host=192.168.74.148 \
    --port=3306 \
    --producer=kafka \
    --kafka.bootstrap.servers=192.168.74.148:9092 \
    --kafka_topic=maxwell
sh 复制代码
[root@localhost ~]# docker ps
CONTAINER ID   IMAGE                    COMMAND                   CREATED        STATUS         PORTS                                                                                  NAMES
89bb2276fc3d   elasticsearch:8.8.2      "/bin/tini -- /usr/l..."   3 weeks ago    Up 5 days      0.0.0.0:9200->9200/tcp, :::9200->9200/tcp, 0.0.0.0:9300->9300/tcp, :::9300->9300/tcp   elasticsearch
34891ac3e05a   wurstmeister/kafka       "start-kafka.sh"          4 weeks ago    Up 3 minutes   0.0.0.0:9092->9092/tcp, :::9092->9092/tcp                                              kafka
8c71efe9dca7   wurstmeister/zookeeper   "/bin/sh -c '/usr/sb..."   4 weeks ago    Up 5 days      22/tcp, 2888/tcp, 3888/tcp, 0.0.0.0:2181->2181/tcp, :::2181->2181/tcp                  zookeeper
c14772057ab8   redis                    "docker-entrypoint.s..."   8 months ago   Up 5 days      0.0.0.0:6379->6379/tcp, :::6379->6379/tcp                                              spzx-redis
ab66508d9441   mysql:8                  "docker-entrypoint.s..."   8 months ago   Up 2 hours     0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp                                   spzx-mysql
[root@localhost ~]# docker run -it --rm zendesk/maxwell bin/maxwell \
>     --user=maxwell \
>     --password=maxwell \
>     --host=192.168.74.148 \
>     --port=3306 \
>     --producer=kafka \
>     --kafka.bootstrap.servers=192.168.74.148:9092 \
>     --kafka_topic=maxwell
2024-09-19 11:00:53 INFO  Maxwell - Starting Maxwell. maxMemory: 1031798784 bufferMemoryUsage: 0.25
2024-09-19 11:00:54 INFO  ProducerConfig - ProducerConfig values: 
        acks = 1
        batch.size = 16384
        bootstrap.servers = [192.168.74.148:9092]
        buffer.memory = 33554432
        client.dns.lookup = use_all_dns_ips
        client.id = producer-1
        compression.type = none
        connections.max.idle.ms = 540000
        delivery.timeout.ms = 120000
        enable.idempotence = false
        interceptor.classes = []
        internal.auto.downgrade.txn.commit = false
        key.serializer = class org.apache.kafka.common.serialization.StringSerializer
        linger.ms = 0
        max.block.ms = 60000
        max.in.flight.requests.per.connection = 5
        max.request.size = 1048576
        metadata.max.age.ms = 300000
        metadata.max.idle.ms = 300000
        metric.reporters = []
        metrics.num.samples = 2
        metrics.recording.level = INFO
        metrics.sample.window.ms = 30000
        partitioner.class = class org.apache.kafka.clients.producer.internals.DefaultPartitioner
        receive.buffer.bytes = 32768
        reconnect.backoff.max.ms = 1000
        reconnect.backoff.ms = 50
        request.timeout.ms = 30000
        retries = 2147483647
        retry.backoff.ms = 100
        sasl.client.callback.handler.class = null
        sasl.jaas.config = null
        sasl.kerberos.kinit.cmd = /usr/bin/kinit
        sasl.kerberos.min.time.before.relogin = 60000
        sasl.kerberos.service.name = null
        sasl.kerberos.ticket.renew.jitter = 0.05
        sasl.kerberos.ticket.renew.window.factor = 0.8
        sasl.login.callback.handler.class = null
        sasl.login.class = null
        sasl.login.refresh.buffer.seconds = 300
        sasl.login.refresh.min.period.seconds = 60
        sasl.login.refresh.window.factor = 0.8
        sasl.login.refresh.window.jitter = 0.05
        sasl.mechanism = GSSAPI
        security.protocol = PLAINTEXT
        security.providers = null
        send.buffer.bytes = 131072
        socket.connection.setup.timeout.max.ms = 127000
        socket.connection.setup.timeout.ms = 10000
        ssl.cipher.suites = null
        ssl.enabled.protocols = [TLSv1.2, TLSv1.3]
        ssl.endpoint.identification.algorithm = https
        ssl.engine.factory.class = null
        ssl.key.password = null
        ssl.keymanager.algorithm = SunX509
        ssl.keystore.certificate.chain = null
        ssl.keystore.key = null
        ssl.keystore.location = null
        ssl.keystore.password = null
        ssl.keystore.type = JKS
        ssl.protocol = TLSv1.3
        ssl.provider = null
        ssl.secure.random.implementation = null
        ssl.trustmanager.algorithm = PKIX
        ssl.truststore.certificates = null
        ssl.truststore.location = null
        ssl.truststore.password = null
        ssl.truststore.type = JKS
        transaction.timeout.ms = 60000
        transactional.id = null
        value.serializer = class org.apache.kafka.common.serialization.StringSerializer

2024-09-19 11:00:54 INFO  AppInfoParser - Kafka version: 2.7.0
2024-09-19 11:00:54 INFO  AppInfoParser - Kafka commitId: 448719dc99a19793
2024-09-19 11:00:54 INFO  AppInfoParser - Kafka startTimeMs: 1726743654433
2024-09-19 11:00:54 INFO  Maxwell - Maxwell v1.41.2 is booting (MaxwellKafkaProducer), starting at Position[BinlogPosition[spzxbinlog.000003:7129], lastHeartbeat=0]
2024-09-19 11:00:54 INFO  MysqlSavedSchema - Restoring schema id 1 (last modified at Position[BinlogPosition[spzxbinlog.000003:156], lastHeartbeat=0])
2024-09-19 11:00:55 INFO  BinlogConnectorReplicator - Setting initial binlog pos to: spzxbinlog.000003:7129
2024-09-19 11:00:55 INFO  BinaryLogClient - Connected to 192.168.74.148:3306 at spzxbinlog.000003/7129 (sid:6379, cid:60)
2024-09-19 11:00:55 INFO  BinlogConnectorReplicator - Binlog connected.
2024-09-19 11:00:55 INFO  Metadata - [Producer clientId=producer-1] Cluster ID: 9o7eoaK4T1KnGZkMvElpkg

3、进入kafka容器内部

sh 复制代码
[root@localhost ~]# docker exec -it kafka /bin/bash 
bash-5.1# kafka-console-consumer.sh --bootstrap-server 192.168.74.148:9092 --topic maxwell

4、tingshu_album 数据库中 新增数据

cpp 复制代码
[root@localhost ~]# docker exec -it kafka /bin/bash
bash-5.1# kafka-console-consumer.sh --bootstrap-server 192.168.74.148:9092 --topic maxwell
{"database":"tingshu_album","table":"base_category1","type":"insert","ts":1726743970,"xid":10619,"commit":true,"data":{"id":17,"name":"kafka","order_num":0,"create_time":"2024-09-19 11:06:10","update_time":"2024-09-19 11:06:10","is_deleted":0}}
cpp 复制代码
{
    "database": "tingshu_album",
    "table": "base_category1",
    "type": "insert",
    "ts": 1726743970,
    "xid": 10619,
    "commit": true,
    "data": {
        "id": 17,
        "name": "kafka",
        "order_num": 0,
        "create_time": "2024-09-19 11:06:10",
        "update_time": "2024-09-19 11:06:10",
        "is_deleted": 0
    }
}

5、tingshu_album 数据库中 更新数据

sh 复制代码
[root@localhost ~]# docker exec -it kafka /bin/bash
bash-5.1# kafka-console-consumer.sh --bootstrap-server 192.168.74.148:9092 --topic maxwell
{"database":"tingshu_album","table":"base_category1","type":"insert","ts":1726743970,"xid":10619,"commit":true,"data":{"id":17,"name":"kafka","order_num":0,"create_time":"2024-09-19 11:06:10","update_time":"2024-09-19 11:06:10","is_deleted":0}}
{"database":"tingshu_album","table":"base_category1","type":"update","ts":1726744191,"xid":11128,"commit":true,"data":{"id":17,"name":"xxx","order_num":0,"create_time":"2024-09-19 11:06:10","update_time":"2024-09-19 11:09:51","is_deleted":0},"old":{"name":"kafka","update_time":"2024-09-19 11:06:10"}}
cpp 复制代码
{
    "database": "tingshu_album",
    "table": "base_category1",
    "type": "update",
    "ts": 1726744191,
    "xid": 11128,
    "commit": true,
    "data": {
        "id": 17,
        "name": "xxx",
        "order_num": 0,
        "create_time": "2024-09-19 11:06:10",
        "update_time": "2024-09-19 11:09:51",
        "is_deleted": 0
    },
    "old": {
        "name": "kafka",
        "update_time": "2024-09-19 11:06:10"
    }
}

6、tingshu_album 数据库中 删除数据

sh 复制代码
[root@localhost ~]# docker exec -it kafka /bin/bash
bash-5.1# kafka-console-consumer.sh --bootstrap-server 192.168.74.148:9092 --topic maxwell
{"database":"tingshu_album","table":"base_category1","type":"insert","ts":1726743970,"xid":10619,"commit":true,"data":{"id":17,"name":"kafka","order_num":0,"create_time":"2024-09-19 11:06:10","update_time":"2024-09-19 11:06:10","is_deleted":0}}
{"database":"tingshu_album","table":"base_category1","type":"update","ts":1726744191,"xid":11128,"commit":true,"data":{"id":17,"name":"xxx","order_num":0,"create_time":"2024-09-19 11:06:10","update_time":"2024-09-19 11:09:51","is_deleted":0},"old":{"name":"kafka","update_time":"2024-09-19 11:06:10"}}
{"database":"tingshu_album","table":"base_category1","type":"delete","ts":1726744396,"xid":11623,"commit":true,"data":{"id":17,"name":"xxx","order_num":0,"create_time":"2024-09-19 11:06:10","update_time":"2024-09-19 11:09:51","is_deleted":0}}
cpp 复制代码
{
    "database": "tingshu_album",
    "table": "base_category1",
    "type": "delete",
    "ts": 1726744396,
    "xid": 11623,
    "commit": true,
    "data": {
        "id": 17,
        "name": "xxx",
        "order_num": 0,
        "create_time": "2024-09-19 11:06:10",
        "update_time": "2024-09-19 11:09:51",
        "is_deleted": 0
    }
}

7、总结

相关推荐
只因在人海中多看了你一眼25 分钟前
分布式缓存 + 数据存储 + 消息队列知识体系
分布式·缓存
zhixingheyi_tian3 小时前
Spark 之 Aggregate
大数据·分布式·spark
KevinAha4 小时前
Kafka 3.5 源码导读
kafka
求积分不加C4 小时前
-bash: ./kafka-topics.sh: No such file or directory--解决方案
分布式·kafka
nathan05294 小时前
javaer快速上手kafka
分布式·kafka
激流丶7 小时前
【Kafka 实战】Kafka 如何保证消息的顺序性?
java·后端·kafka
谭震鸿8 小时前
Zookeeper集群搭建Centos环境下
分布式·zookeeper·centos
天冬忘忧13 小时前
Kafka 工作流程解析:从 Broker 工作原理、节点的服役、退役、副本的生成到数据存储与读写优化
大数据·分布式·kafka
工业甲酰苯胺15 小时前
Python脚本消费多个Kafka topic
开发语言·python·kafka
B站计算机毕业设计超人17 小时前
计算机毕业设计SparkStreaming+Kafka新能源汽车推荐系统 汽车数据分析可视化大屏 新能源汽车推荐系统 汽车爬虫 汽车大数据 机器学习
数据仓库·爬虫·python·数据分析·kafka·数据可视化·推荐算法