文章目录
-
- [1 . 本地搭建zookeeper集群](#1 . 本地搭建zookeeper集群)
- [2. 制作zookeeper镜像](#2. 制作zookeeper镜像)
- [3. 迁移zookeeper集群 至k8s](#3. 迁移zookeeper集群 至k8s)
1 . 本地搭建zookeeper集群
环境准备,准备3台机器
- 安装java
go
[root@node01]# yum install java
- 下载zookeeper
go
[root@node01]# wget https://dlcdn.apache.org/zookeeper/zookeeper-3.8.5/apache-zookeeper-3.8.5-bin.tar.gz
[root@node01]# tar xf apache-zookeeper-3.8.5-bin.tar.gz -C /opt/zookeeper/
[root@node01]# ln -s apache-zookeeper-3.8.5-bin zookeeper/
- 修改配置文件
go
[root@ode01]# cat /opt/zookeeper/conf/zoo.cfg
# 服务器之间或者客户端与服务器之间维持心跳的时间间隔tickTime以毫秒为单位
tickTime=2000
# 集群中的follower服务器(F)与leader服务器(L)之间的初始连接心跳数 10* tickTime
initLimit=10
# 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数 5*tickTime
syncLimit=5
# 数据保存目录
dataDir=../data
# 日志保存目录
dataLogDir=../logs
# 客户端连接端口
clientPort=2181
# 客户端最大连接数,默认为60个
maxClientCnxns=60
# 客户端获取zookeeper服务的当前状态及相关信息
#4lw.commands.whitelist=*
# 三个节点配置,格式为: server.服务编号=服务器地址、LF通信端口、选举端口
server.1=192.168.20.233:2888:3888
server.2=192.168.20.234:2888:3888
server.3=192.168.20.235:2888:3888
- 创建数据目录
go
[root@node01]# mkdir /opt/zookeeper/data/
- 创建节点标记ID
go
[root@node01]# echo "1" > /opt/zookeeper/data/myid
[root@node02]# echo "2" > /opt/zookeeper/data/myid
[root@node03]# echo "3" > /opt/zookeeper/data/myid
- 启动zookeeper
go
[root@node01]#cd /opt/zookeeper/bin
[root@node01]#./zkServer.sh start
- 检查集群状态
go
[root@node1 conf]# ../bin/zkServer.sh status
Mode: leader
[root@node2 logs]# ../bin/zkServer.sh status
Mode: follower
[root@node3 logs]# ../bin/zkServer.sh status
Mode: follower
2. 制作zookeeper镜像
- jdk基础镜像制作
j
[root@harbor jdk1.8]# cat Dockerfile
FROM harbor.xxx.com/base/centos:7
# 设置环境变量
ENV JAVA_HOME /jdk
ENV PATH $JAVA_HOME/bin:$PATH
# 复制Oracle JDK安装包到镜像中
COPY jdk-8u291-linux-x64.tar.gz /tmp/oracle-jdk.tar.gz
# 解压Oracle JDK安装包
RUN cd /tmp && \
tar -xzf oracle-jdk.tar.gz && \
mv jdk*/ $JAVA_HOME && \
rm -rf $JAVA_HOME/jre/lib/plugin.jar && \
rm -rf $JAVA_HOME/jre/lib/ext/jfxrt.jar $JAVA_HOME/jre/bin/javaws && \
rm -rf $JAVA_HOME/bin/*.bat && \
rm -rf $JAVA_HOME/lib/*.dll && \
rm -rf $JAVA_HOME/lib/src.zip && \
rm -rf $JAVA_HOME/lib/amd64/jvm.cfg && \
rm -rf /tmp/*
go
[root@harbor jdk1.8]# docker build -t harbor.xxx.com/base/jdk-cenots7:v1.8 .
[root@harbor jdk1.8]# docker push harbor.xxx.com/base/jdk-cenots7:v1.8
- zk镜像制作
Dockerfile
go
[root@harbor zk]# cat Dockerfile
FROM harbor.xxx.com/base/jdk-centos7:v1.8
ENV TZ=Asia/Shanghai
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
ENV VERSION=3.8.5
ADD ./apache-zookeeper-${VERSION}-bin.tar.gz /
ADD ./zoo.cfg /apache-zookeeper-${VERSION}-bin/conf
RUN mv /apache-zookeeper-${VERSION}-bin/ /zookeeper
ADD ./entrypoint.sh /entrypoint.sh
EXPOSE 2181 2888 3888
CMD ["/bin/bash","/entrypoint.sh"]
zoo.cfg
go
[root@harbor zk]# cat zoo.cfg
# 服务器之间或者客户端与服务器之间维持心跳的时间间隔tickTime以毫秒为单位
tickTime={ZOOK_TICK_TIME}
# 集群中的follower服务器(F)与leader服务器(L)之间的初始连接心跳数 10* tickTime
initLimit={ZOOK_INIT_LIMIT}
# 集群中的follower服务器与leader服务器之间请求和应答之间能容忍的最多心跳数 5*tickTime
syncLimit={ZOOK_SYNC_LIMIT}
# 数据保存目录
dataDir={ZOOK_DATA_DIR}
# 日志保存目录
dataLogDir={ZOOK_LOG_DIR}
# 客户端连接端口
clientPort={ZOOK_CLIENT_PORT}
# 客户端最大连接数,默认为60个
maxClientCnxns={ZOOK_MAX_CLIENT_CNXNS}
# 客户端获取zookeeper服务的当前状态及相关信息
4lw.commands.whitelist=*
# 三个节点配置,格式为: server.服务编号=服务器地址、LF通信端口、选举端口
entrypoint
go
[root@harbor zk]# cat entrypoint.sh
#1. 定义zk相关变量
ZOOK_BIN_DIR=/zookeeper/bin
ZOOK_CONF_DIR=/zookeeper/conf/zoo.cfg
#2. 生成zk配置文件,如果需要可以在k8s的脚本中定义env环境变量传入参数
sed -i s#{ZOOK_TICK_TIME}#${ZOOK_TICK_TIME:-2000}#g ${ZOOK_CONF_DIR}
sed -i s#{ZOOK_INIT_LIMIT}#${ZOOK_INIT_LIMIT:-10}#g ${ZOOK_CONF_DIR}
sed -i s#{ZOOK_SYNC_LIMIT}#${ZOOK_SYNC_LIMIT:-5}#g ${ZOOK_CONF_DIR}
sed -i s#{ZOOK_DATA_DIR}#${ZOOK_DATA_DIR:-/data}#g ${ZOOK_CONF_DIR}
sed -i s#{ZOOK_LOG_DIR}#${ZOOK_LOG_DIR:-/logs}#g ${ZOOK_CONF_DIR}
sed -i s#{ZOOK_CLIENT_PORT}#${ZOOK_CLIENT_PORT:-2181}#g ${ZOOK_CONF_DIR}
sed -i s#{ZOOK_MAX_CLIENT_CNXNS}#${ZOOK_MAX_CLIENT_CNXNS:-60}#g ${ZOOK_CONF_DIR}
#3. 后期通过env的方式注入zk的地址,然后对传递的地址进行遍历,最后加到配置文件
for i in ${ZOOK_SERVERS:-server.1=localhost:2181:2888:3888}
do
echo $i >> ${ZOOK_CONF_DIR}
done
#4 建立myid文件,通过pod的主机名提取,这就要求采取statefulset方式来编排,否则无法提取匹配的主机名
ZOOK_MYID=$(( $(hostname | sed 's#.*-##g') + 1 ))
echo "${ZOOK_MYID:-99}" > ${ZOOK_DATA_DIR:-/data/myid}
#5 .前台运行zk
cd ${ZOOK_BIN_DIR}
./zkServer.sh start-foreground
go
[root@harbor zk]# docker build -t harbor.xxx.com/base/zookeeper:3.8.5 .
[root@harbor zk]# docker push harbor.xxx.com/base/zookeeper:3.8.5
3. 迁移zookeeper集群 至k8s
- 创建headless
go
[root@master01 zk]# cat 01-zk-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: zk-svc
spec:
clusterIP: None
selector:
app: zk
ports:
- name: client
port: 2181
targetPort: 2181
- name: leader-fllow
port: 2888
targetPort: 2888
- name: selection
port: 3888
targetPort: 3888
- 创建statefulset
go
[root@master01 zk]# cat 02-zk-sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper
spec:
serviceName: "zk-svc"
replicas: 3
selector:
matchLabels:
app: zk
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- { key: "app", operator: "In", values: ["zk"] }
topologyKey: "kubernetes.io/hostname"
containers:
- name: zk
image: harbor.xxx.com/base/zookeeper:3.8.5
imagePullPolicy: Always
ports:
- name: client
containerPort: 2181
- name: leader-fllow
containerPort: 2888
- name: selection
containerPort: 3888
volumeMounts:
- name: data
mountPath: /data
env:
- name: ZOOK_SERVERS
value: "server.1=zookeeper-0.zk-svc.default.svc.cluster.local:2888:3888 server.2=zookeeper-1.zk-svc.default.svc.cluster.local:2888:3888 server.3=zookeeper-2.zk-svc.default.svc.cluster.local:2888:3888"
readinessProbe:
exec:
command:
- "/bin/bash"
- "-c"
- '[[ "$(/zookeeper/bin/zkServer.sh status 2> /dev/null | grep 2181)" ]] && exit 0 || exit 1'
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- "/bin/bash"
- "-c"
- '[[ "$(/zookeeper/bin/zkServer.sh status 2> /dev/null | grep 2181)" ]] && exit 0 || exit 1'
initialDelaySeconds: 10
timeoutSeconds: 5
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteMany"]
storageClassName: "nfs-provisioner-storage"
resources:
requests:
storage: 50Gi
- 检查zk集群
检查pod,service
go
[root@master01 zk]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-client-provisioner-56b68b695-d7fph 1/1 Running 1 (14d ago) 14d 172.16.2.172 node02.xxx.com <none> <none>
zookeeper-0 1/1 Running 0 28m 172.16.1.20 node01.xxx.com <none> <none>
zookeeper-1 1/1 Running 0 28m 172.16.3.156 node03.xxx.com <none> <none>
zookeeper-2 1/1 Running 0 28m 172.16.2.202 node02.xxx.com <none> <none>
4 . 检查集群状态
go
[root@master01 zk]# kubectl exec -it zookeeper-0 -- /zookeeper/bin/zkServer.sh status
Mode: follower
[root@master01 zk]# kubectl exec -it zookeeper-1 -- /zookeeper/bin/zkServer.sh status
Mode: leader
[root@master01 zk]# kubectl exec -it zookeeper-2 -- /zookeeper/bin/zkServer.sh status
Mode: follower
5 .连接zk集群
go
[root@master01 zk]# kubectl exec -it zookeeper-0 -- /bin/bash
[root@zookeeper-0 /]# /zookeeper/bin/zkCli.sh -server zk-svc
[zk: zk-svc(CONNECTED) 0] create /test haha
Created /test
[zk: zk-svc(CONNECTED) 1] get /test
haha