1.下载

canal.deployer-1.1.8.tar.gz:Canal 核心服务端(必须);canal.adapter-1.1.8.tar.gz:数据同步适配器;canal.admin-1.1.8.tar.gz:Canal 管理控制台;canal.example-1.1.8.tar.gz:示例代码包;
2.配置cdc的instance.properties,添加mysql数据库
bash
# 1. 配置你的MySQL地址、账号、密码
canal.instance.master.address=127.0.0.1:3306 # 你的MySQL端口(默认3306)
canal.instance.dbUsername=root # 你的MySQL用户名
canal.instance.dbPassword=607607 # 你的MySQL密码
canal.instance.defaultDatabaseName=test_db # 你要监听的数据库名(比如test_db)
# 2. 下面的配置保持默认,不用改
canal.instance.connectionCharset = UTF-8
canal.instance.parser.parallelThreadSize = 1
3.mysql操作,设置权限
Canal 基于 MySQL 的 binlog 解析数据,必须先让 MySQL 开启ROW 格式的 binlog(CDC 的核心),且配置正确的权限 / 参数,以下是 Windows/Linux 通用配置步骤:
Windows :MySQL 安装目录下的my.ini(如C:\Program Files\MySQL\MySQL Server 8.0\my.ini);
- Linux :
/etc/my.cnf或/etc/mysql/my.cnf; - 若找不到:用 MySQL Shell 执行
\system mysql --help | findstr "my.ini"(Windows)/\system mysql --help | grep my.cnf(Linux)定位。
修改配置文件(开启 CDC 兼容模式)
在[mysqld]节点下添加 / 修改以下配置(核心!):
bash
[mysqld]
# 1. 必须开启binlog,指定存储位置
log-bin=mysql-bin
# 2. binlog格式必须为ROW(Canal仅支持ROW模式)
binlog_format=ROW
# 3. 服务器ID(唯一,Canal需要)
server_id=1
# 4. 字符集(和Canal保持一致)
character-set-server=utf8mb4
# 5. 可选:仅记录指定前缀的数据库/表(提前适配你的前缀需求)
binlog-do-db=canal_* # 监听canal_前缀的数据库(MySQL 8.0需用正则,见步骤5)
binlog-do-db=cdc_* # 监听cdc_前缀的数据库
# 6. 其他优化(可选)
binlog_row_image=FULL # 记录完整行数据(Canal需要)
expire_logs_days=7 # binlog保留7天(避免磁盘占满)
设置完之后重启mysql加载配置
4.创建 Canal 专用账号并授权(必须!)
canal配置下设置在mysql的账号和密码
bash
# username/password
canal.instance.dbUsername=canal
canal.instance.dbPassword=canal
canal.instance.connectionCharset = UTF-8
# enable druid Decrypt database password
canal.instance.enableDruid=false
bash
-- 1. 创建账号(密码自定义,如Canal@123456)
CREATE USER 'canal'@'%' IDENTIFIED BY 'Canal@123456';
-- 2. 授权(Canal需要的最小权限)
GRANT SELECT, REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'canal'@'%';
-- 3. 刷新权限
FLUSH PRIVILEGES;
5.添加依赖配置
1.1.8
bash
<!-- Canal CDC -->
<dependency>
<groupId>com.alibaba.otter</groupId>
<artifactId>canal.client</artifactId>
<version>${canal.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba.otter</groupId>
<artifactId>canal.protocol</artifactId>
<version>${canal.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba.otter</groupId>
<artifactId>canal.common</artifactId>
<version>${canal.version}</version>
</dependency>
6.演示代码
canal配置config
bash
package com.example.cdc.config;
import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import org.apache.skywalking.apm.toolkit.trace.Trace;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class CanalConfig {
private static final Logger logger = LoggerFactory.getLogger(CanalConfig.class);
@Value("${canal.server.host:localhost}")
private String canalHost;
@Value("${canal.server.port:11111}")
private int canalPort;
@Value("${canal.destination:example}")
private String destination;
@Value("${canal.username:canal}")
private String username;
@Value("${canal.password:canal}")
private String password;
@Bean
@Trace
public CanalConnector canalConnector() {
logger.info("初始�?Canal 连接�? {}:{}, destination: {}", canalHost, canalPort, destination);
// 创建 Canal 连接�?
CanalConnector connector = CanalConnectors.newSingleConnector(
new java.net.InetSocketAddress(canalHost, canalPort),
destination,
username,
password
);
return connector;
}
}
监听并且处理变动的数据库信息
bash
@Trace
private void processRowChange(CanalEntry.Entry entry) {
try {
CanalEntry.RowChange rowChange = CanalEntry.RowChange.parseFrom(entry.getStoreValue());
CanalEntry.EventType eventType = rowChange.getEventType();
String tableName = entry.getHeader().getTableName();
String database = entry.getHeader().getSchemaName();
logger.info("检测到数据库变更: {}.{}, 事件类型: {}, traceId: {}",
database, tableName, eventType, TraceContext.traceId());
ActiveSpan.tag("db.database", database);
ActiveSpan.tag("db.table", tableName);
ActiveSpan.tag("db.event", eventType.name());
// 处理每行数据变更
for (CanalEntry.RowData rowData : rowChange.getRowDatasList()) {
if (eventType == CanalEntry.EventType.DELETE) {
// 删除操作
handleDelete(database, tableName, rowData.getBeforeColumnsList());
} else if (eventType == CanalEntry.EventType.INSERT) {
// 插入操作
handleInsert(database, tableName, rowData.getAfterColumnsList());
} else if (eventType == CanalEntry.EventType.UPDATE) {
// 更新操作
handleUpdate(database, tableName, rowData.getAfterColumnsList());
}
// 发送数据变更告警
String dataInfo = buildDataInfo(eventType == CanalEntry.EventType.DELETE ?
rowData.getBeforeColumnsList() : rowData.getAfterColumnsList());
unifiedAlertService.sendDataChangeAlert(database, tableName, eventType.name(), dataInfo);
}
} catch (Exception e) {
logger.error("处理行变更异常", e);
// 发送系统错误告警
unifiedAlertService.sendSystemErrorAlert("cdc-service", "处理行变更异常", e.toString());
}
}
@Trace
private void handleInsert(String database, String tableName, List<CanalEntry.Column> columns) {
logger.info("处理 INSERT 操作: {}.{}", database, tableName);
esSyncService.syncToEs(database, tableName, columns, "INSERT");
}
@Trace
private void handleUpdate(String database, String tableName, List<CanalEntry.Column> columns) {
logger.info("处理 UPDATE 操作: {}.{}", database, tableName);
esSyncService.syncToEs(database, tableName, columns, "UPDATE");
}
@Trace
private void handleDelete(String database, String tableName, List<CanalEntry.Column> columns) {
logger.info("处理 DELETE 操作: {}.{}", database, tableName);
esSyncService.deleteFromEs(database, tableName, columns);
}
/**
* 构建数据信息字符串
*/
private String buildDataInfo(List<CanalEntry.Column> columns) {
StringBuilder sb = new StringBuilder();
for (CanalEntry.Column column : columns) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append(column.getName()).append("=").append(column.getValue());
}
return sb.toString();
}