目录
zookeeper集群部署
复制粘贴即用
创建zookeeper文件夹
yaml
# 创建文件夹
mkdir -p /opt/zookeeper
cd /opt/zookeeper
namespace.yaml
yaml
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: zookeeper
name: zookeeper
scripts-configmap.yaml
yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: zookeeper-scripts
namespace: zookeeper
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/version: 3.9.2
app.kubernetes.io/component: zookeeper
data:
init-certs.sh: |-
#!/bin/bash
setup.sh: |-
#!/bin/bash
# Execute entrypoint as usual after obtaining ZOO_SERVER_ID
# check ZOO_SERVER_ID in persistent volume via myid
# if not present, set based on POD hostname
if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
else
HOSTNAME="$(hostname -s)"
if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
ORD=${BASH_REMATCH[2]}
export ZOO_SERVER_ID="$((ORD + 1 ))"
else
echo "Failed to get index from hostname $HOSTNAME"
exit 1
fi
fi
exec /entrypoint.sh /run.sh
serviceaccount.yaml
yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: zookeeper
namespace: zookeeper
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/version: 3.9.2
app.kubernetes.io/component: zookeeper
role: zookeeper
automountServiceAccountToken: false
statefulset.yaml
yaml
---
# Source: zookeeper/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zookeeper
namespace: zookeeper
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/version: 3.9.2
app.kubernetes.io/component: zookeeper
role: zookeeper
spec:
replicas: 3
revisionHistoryLimit: 10
podManagementPolicy: Parallel
selector:
matchLabels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
serviceName: zookeeper-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/version: 3.9.2
app.kubernetes.io/component: zookeeper
spec:
enableServiceLinks: true
serviceAccountName: zookeeper
# 取消为 service account 自动挂载 API 凭证
automountServiceAccountToken: false
affinity:
podAffinity:
# pod反亲和,将pod打散至不同节点,实现高可用
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
# 配置安全上下文
securityContext:
fsGroup: 1001
fsGroupChangePolicy: Always
supplementalGroups: []
sysctls: []
initContainers:
containers:
- name: zookeeper
image: docker.io/bitnami/zookeeper:3.9.2-debian-12-r2
imagePullPolicy: "IfNotPresent"
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsGroup: 1001
runAsNonRoot: true
runAsUser: 1001
seLinuxOptions: {}
seccompProfile:
type: RuntimeDefault
command:
- /scripts/setup.sh
resources:
limits:
cpu: 375m
ephemeral-storage: 1024Mi
memory: 384Mi
requests:
cpu: 250m
ephemeral-storage: 50Mi
memory: 256Mi
env:
- name: BITNAMI_DEBUG
value: "false"
- name: ZOO_DATA_LOG_DIR
value: ""
- name: ZOO_PORT_NUMBER
value: "2181"
- name: ZOO_TICK_TIME
value: "2000"
- name: ZOO_INIT_LIMIT
value: "10"
- name: ZOO_SYNC_LIMIT
value: "5"
- name: ZOO_PRE_ALLOC_SIZE
value: "65536"
- name: ZOO_SNAPCOUNT
value: "100000"
- name: ZOO_MAX_CLIENT_CNXNS
value: "60"
- name: ZOO_4LW_COMMANDS_WHITELIST
value: "srvr, mntr, ruok"
- name: ZOO_LISTEN_ALLIPS_ENABLED
value: "no"
- name: ZOO_AUTOPURGE_INTERVAL
value: "1"
- name: ZOO_AUTOPURGE_RETAIN_COUNT
value: "10"
- name: ZOO_MAX_SESSION_TIMEOUT
value: "40000"
- name: ZOO_SERVERS
value: zookeeper-0.zookeeper-headless.zookeeper.svc.cluster.local:2888:3888::1 zookeeper-1.zookeeper-headless.zookeeper.svc.cluster.local:2888:3888::2 zookeeper-2.zookeeper-headless.zookeeper.svc.cluster.local:2888:3888::3
- name: ZOO_ENABLE_AUTH
value: "no"
- name: ZOO_ENABLE_QUORUM_AUTH
value: "no"
- name: ZOO_HEAP_SIZE
value: "1024"
- name: ZOO_LOG_LEVEL
value: "ERROR"
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
- name: ZOO_ENABLE_PROMETHEUS_METRICS
value: "yes"
- name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER
value: "9141"
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: ZOO_ADMIN_SERVER_PORT_NUMBER
value: "8080"
ports:
- name: client
containerPort: 2181
- name: follower
containerPort: 2888
- name: election
containerPort: 3888
# 开启metrics,供prometheus监控
- name: metrics
containerPort: 9141
- name: http-admin
containerPort: 8080
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/bash
- -ec
- ZOO_HC_TIMEOUT=2 /opt/bitnami/scripts/zookeeper/healthcheck.sh
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/bash
- -ec
- ZOO_HC_TIMEOUT=2 /opt/bitnami/scripts/zookeeper/healthcheck.sh
volumeMounts:
- name: empty-dir
mountPath: /tmp
subPath: tmp-dir
- name: empty-dir
mountPath: /opt/bitnami/zookeeper/conf
subPath: app-conf-dir
- name: empty-dir
mountPath: /opt/bitnami/zookeeper/logs
subPath: app-logs-dir
- name: scripts
mountPath: /scripts/setup.sh
subPath: setup.sh
- name: data
mountPath: /bitnami/zookeeper
volumes:
- name: empty-dir
emptyDir: {}
- name: scripts
configMap:
name: zookeeper-scripts
defaultMode: 493
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "10Gi"
# 按当前集群存储类修改
storageClassName: nfs-client
svc-headless.yaml
yaml
apiVersion: v1
kind: Service
metadata:
name: zookeeper-headless
namespace: zookeeper
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/version: 3.9.2
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-client
port: 2181
targetPort: client
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
svc.yaml
yaml
apiVersion: v1
kind: Service
metadata:
name: zookeeper
namespace: zookeeper
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/version: 3.9.2
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-client
port: 2181
targetPort: client
nodePort: null
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
metrics-svc.yaml
yaml
apiVersion: v1
kind: Service
metadata:
name: zookeeper-metrics
namespace: zookeeper
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/version: 3.9.2
app.kubernetes.io/component: metrics
annotations:
prometheus.io/path: /metrics
prometheus.io/port: "9141"
# 开启prometheus监控
prometheus.io/scrape: "true"
spec:
type: ClusterIP
ports:
- name: tcp-metrics
port: 9141
targetPort: metrics
selector:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
执行部署
yaml
# 执行部署
kubectl apply -f .
# 查看部署情况
kubectl get pod,svc,pvc -n zookeeper
NAME READY STATUS RESTARTS AGE
pod/zookeeper-0 1/1 Running 0 16h
pod/zookeeper-1 1/1 Running 0 16h
pod/zookeeper-2 1/1 Running 0 16h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/zookeeper ClusterIP 10.0.6.237 <none> 2181/TCP,2888/TCP,3888/TCP 19h
service/zookeeper-headless ClusterIP None <none> 2181/TCP,2888/TCP,3888/TCP 19h
service/zookeeper-metrics ClusterIP 10.0.8.208 <none> 9141/TCP 19h
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
persistentvolumeclaim/data-zookeeper-0 Bound pvc-a6ac5f14-bd56-4d20-8e8b-64b8efe4ab52 10Gi RWO nfs-client 19h
persistentvolumeclaim/data-zookeeper-1 Bound pvc-7beccb60-202f-4be4-90e0-2178385055fb 10Gi RWO nfs-client 19h
persistentvolumeclaim/data-zookeeper-2 Bound pvc-7c5ed75d-0467-4007-b5f1-7a7701b91b92 10Gi RWO nfs-client 19h
接入prometheus
prometheus部署可查看历史博客:k8s-prometheus+grafana+alertmanager监控加邮件告警
访问prometheus查看接入情况
导入zookeeper监控模版
模版编号: 10465
监控展示
注:刚部署完监控值皆为0,图中为接入业务后有数据显示