一.问题现象
阿里云ACK的etcd证书过期,通过图形化界面升级提示升级失败,考虑通过脚本的方式升级ETCD相关的证书。由于在前期做类似的升级ETCD证书失败导致整个集群业务出现访问异常,所有在升级之前做好对应的备份操作是很有必要的
二.前期准备
1.由于升级操作是在master节点操作,涉及的证书文件变更都在master节点,所以针对阿里云ACK的3台master节点做快照。
2.针对阿里云ACK的3台master节点涉及的证书目录做备份,必要情况下备份到其他服务器上。
3.针对阿里云ACK的etcd数据库做备份操作
bash
date;
CACERT="/var/lib/etcd/cert/ca.pem"
CERT="/var/lib/etcd/cert/etcd-server.pem"
EKY="/var/lib/etcd/cert/etcd-server-key.pem"
ENDPOINTS="172.16.0.87:2379"
#etcdctl snapshot save /data/etcd_backup_dir/etcd-snapshot-`date +%Y%m%d`.db
ETCDCTL_API=3 etcdctl \
--cacert="${CACERT}" --cert="${CERT}" --key="${EKY}" \
--endpoints=${ENDPOINTS} \
snapshot save /data/etcd_backup_dir/etcd-snapshot-`date +%Y%m%d`.db
备份ETCD的数据目录
bash
cp -r /var/lib/etcd /var/lib/etcd-`date +%Y%m%d`.bak
4.针对阿里云ACK的所有部署资源的yaml文件做备份,涉及service deploy ingress configmap secret job cronjob daemonset statefulset pvc等,备份脚本如下:
bash
#!/bin/bash
#define variable
BACKUP_PATH=/data/k8s-backup
BACKUP_PATH_DATA=$BACKUP_PATH/yaml/`date +%Y%m%d%H%M%S`
BACKUP_PATH_LOG=$BACKUP_PATH/log
BACKUP_LOG_FILE=$BACKUP_PATH_LOG/k8s-backup-`date +%Y%m%d%H%M%S`.log
# base function
function printlog(){
echo "`date +'%Y-%m-%d %H:%M:%S'` $1"
echo "`date +'%Y-%m-%d %H:%M:%S'` $1" >> $BACKUP_LOG_FILE 2>&1
}
function printlogonly(){
echo "`date +'%Y-%m-%d %H:%M:%S'` $1" >> $BACKUP_LOG_FILE 2>&1
}
# set K8s type(此处可根据集群资源自行修改)
CONFIG_TYPE="service deploy ingress configmap secret job cronjob daemonset statefulset pvc"
# make dir
mkdir -p $BACKUP_PATH_DATA
mkdir -p $BACKUP_PATH_LOG
cd $BACKUP_PATH_DATA
# set namespace list
ns_list=`kubectl get ns | awk '{print $1}' | grep -v NAME`
if [ $# -ge 1 ]; then
ns_list="$@"
fi
# define counters
COUNT_NS=0
COUNT_ITEM_IN_NS=0
COUNT_ITEM_IN_TYPE=0
COUNT_ITEM_ALL=0
# print hint
printlog "Backup kubernetes config in namespaces: ${ns_list}"
printlog "Backup kubernetes config for [type: ${CONFIG_TYPE}]."
printlog "If you want to read the record of backup, please input command ' tail -100f ${BACKUP_LOG_FILE} '"
# ask and answer
message="This will backup resources of kubernetes cluster to yaml files."
printlog ${message}ls
# loop for namespaces
for ns in $ns_list;
do
COUNT_NS=`expr $COUNT_NS + 1`
printlog "Backup No.${COUNT_NS} namespace [namespace: ${ns}]."
COUNT_ITEM_IN_NS=0
## loop for types
for type in $CONFIG_TYPE;
do
printlogonly "Backup type [namespace: ${ns}, type: ${type}]."
item_list=`kubectl -n $ns get $type | awk '{print $1}' | grep -v NAME | grep -v "No "`
COUNT_ITEM_IN_TYPE=0
## loop for items
for item in $item_list;
do
file_name=$BACKUP_PATH_DATA/${ns}_${type}_${item}.yaml
printlogonly "Backup kubernetes config yaml [namespace: ${ns}, type: ${type}, item: ${item}] to file: ${file_name}"
kubectl -n $ns get $type $item -o yaml > $file_name
COUNT_ITEM_IN_NS=`expr $COUNT_ITEM_IN_NS + 1`
COUNT_ITEM_IN_TYPE=`expr $COUNT_ITEM_IN_TYPE + 1`
COUNT_ITEM_ALL=`expr $COUNT_ITEM_ALL + 1`
printlogonly "Backup No.$COUNT_ITEM_ALL file done."
done;
done;
printlogonly "Backup $COUNT_ITEM_IN_TYPE files in [namespace: ${ns}, type: ${type}]."
printlog "Backup ${COUNT_ITEM_IN_NS} files done in [namespace: ${ns}]."
done;
# show stats
printlog "Backup ${COUNT_ITEM_ALL} yaml files in all."
printlog "kubernetes Backup completed, all done."
exit 0
三.升级证书操作步骤
1.确认集群Master节点之间配置了root用户的免密登录。
在Master上通过SSH方式登录其他任意Master节点,如果提示输入密码,请您按如下方式配置Master节点之间的免密登录。
1. ssh-keygen -t rsa # 生成密钥。 2. ssh-copy-id -i ~/.ssh/id_rsa.pub $(internal-ip) # 使用ssh-copy-id工具传输公钥到其他所有Master节点,$(internal-ip)为其他Master节点的内网IP。
2.分别复制以下脚本内容,保存并命名为restart-apiserver.sh和rotate-etcd.sh,然后将两者保存到同一个文件夹下
bash
#! /bin/bash
function restart_apiserver() {
apiserverID=$(/usr/bin/docker ps | grep kube-apiserver | grep -v NAME | awk '{print $1}')
/usr/bin/docker stop $apiserverID
rm -rf /root/kube-apiserver.yaml
mv /etc/kubernetes/manifests/kube-apiserver.yaml /root/kube-apiserver.yaml
while true; do
NUM=$(docker ps | grep kube-apiserver | wc -l)
if [[ $NUM == 0 ]]; then
break
fi
sleep 1
done
/usr/bin/docker ps -a | grep kube-apiserver | awk '{print $1}' | xargs docker stop
/usr/bin/docker ps -a | grep kube-apiserver | awk '{print $1}' | xargs docker rm
mv /root/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml
while true; do
NUM=$(docker ps | grep kube-apiserver | grep -v pause | wc -l)
if [[ $NUM == 1 ]]; then
break
fi
sleep 1
done
k8s::wait_apiserver_ready
}
k8s::wait_apiserver_ready() {
set -e
for i in $(seq 180); do
if kubectl get po &>/dev/null; then
return 0
else
echo "wait apiserver to be ready, retry ${i}th after 1s"
sleep 1
fi
done
echo "failed to wait apiserver to be ready"
return 1
}
function restart_container() {
crictl pods | grep kube-apiserver | awk '{print $1}' | xargs -I '{}' crictl stopp {} || true
}
if [[ -f /usr/bin/docker ]]; then
restart_apiserver
else
restart_container
k8s::wait_apiserver_ready
fi
echo "API Server restarted"
bash
#!/bin/bash
set -eo pipefail
dir=/tmp/etcdcert
KUBE_CERT_PATH=/etc/kubernetes/pki
ETCD_CERT_DIR=/var/lib/etcd/cert
ETCD_HOSTS=""
function get_etcdhosts() {
name1=$(ls $ETCD_CERT_DIR | grep name | grep name-1.pem | sed 's/-name-1.pem//g')
name2=$(ls $ETCD_CERT_DIR | grep name | grep name-2.pem | sed 's/-name-2.pem//g')
name3=$(ls $ETCD_CERT_DIR | grep name | grep name-3.pem | sed 's/-name-3.pem//g')
echo "hosts: " $name1 $name2 $name3
# ETCD_HOSTS=($name1 $name2 $name3)
ETCD_HOSTS=$name1" "$name2" "$name3
}
function gencerts() {
echo "generate ssl cert ..."
rm -rf $dir
mkdir -p "$dir"
hosts=$(echo $ETCD_HOSTS | tr -s " " ",")
echo "-----generate ca"
echo '{"CN":"CA","key":{"algo":"rsa","size":2048}, "ca": {"expiry": "438000h"}}' |
cfssl gencert -initca - | cfssljson -bare $dir/ca -
echo '{"signing":{"default":{"expiry":"438000h","usages":["signing","key encipherment","server auth","client auth"]}}}' >$dir/ca-config.json
echo "-----generate etcdserver"
export ADDRESS=$hosts,ext1.example.com,coreos1.local,coreos1
export NAME=etcd-server
echo '{"CN":"'$NAME'","hosts":[""],"key":{"algo":"rsa","size":2048}}' |
cfssl gencert -config=$dir/ca-config.json -ca=$dir/ca.pem -ca-key=$dir/ca-key.pem -hostname="$ADDRESS" - | cfssljson -bare $dir/$NAME
export ADDRESS=
export NAME=etcd-client
echo '{"CN":"'$NAME'","hosts":[""],"key":{"algo":"rsa","size":2048}}' |
cfssl gencert -config=$dir/ca-config.json -ca=$dir/ca.pem -ca-key=$dir/ca-key.pem -hostname="$ADDRESS" - | cfssljson -bare $dir/$NAME
# gen peer-ca
echo "-----generate peer certificates"
echo '{"CN":"Peer-CA","key":{"algo":"rsa","size":2048}, "ca": {"expiry": "438000h"}}' | cfssl gencert -initca - | cfssljson -bare $dir/peer-ca -
echo '{"signing":{"default":{"expiry":"438000h","usages":["signing","key encipherment","server auth","client auth"]}}}' >$dir/peer-ca-config.json
i=0
for host in $ETCD_HOSTS; do
((i = i + 1))
export MEMBER=${host}-name-$i
echo '{"CN":"'${MEMBER}'","hosts":[""],"key":{"algo":"rsa","size":2048}}' |
cfssl gencert -ca=$dir/peer-ca.pem -ca-key=$dir/peer-ca-key.pem -config=$dir/peer-ca-config.json -profile=peer \
-hostname="$hosts,${MEMBER}.local,${MEMBER}" - | cfssljson -bare $dir/${MEMBER}
#-hostname="$host,${MEMBER}.local,${MEMBER}" - | cfssljson -bare $dir/${MEMBER}
done
## backup
TIMESTAMP=$(date "+%Y%m%d%H%M%S")
\cp -r $ETCD_CERT_DIR $ETCD_CERT_DIR_$TIMESTAMP
\cp -r $KUBE_CERT_PATH/etcd $KUBE_CERT_PATH/etcd_$TIMESTAMP
# 制作bundle ca
cat $KUBE_CERT_PATH/etcd/ca.pem >>$dir/bundle_ca.pem
cat $ETCD_CERT_DIR/ca.pem >>$dir/bundle_ca.pem
cat $dir/ca.pem >>$dir/bundle_ca.pem
# 制作bundle peer-ca
cat $ETCD_CERT_DIR/peer-ca.pem >$dir/bundle_peer-ca.pem
cat $dir/peer-ca.pem >>$dir/bundle_peer-ca.pem
# chown
chown -R etcd:etcd $dir
chmod 0644 $dir/*
}
function rotate_etcd_ca() {
# Update certs on etcd nodes.
for ADDR in $ETCD_HOSTS; do
TIMESTAMP=$(date "+%Y%m%d%H%M%S")
ssh -o StrictHostKeyChecking=no root@$ADDR cp -r $ETCD_CERT_DIR $ETCD_CERT_DIR_$TIMESTAMP
echo "update etcd CA on node $ADDR"
scp -o StrictHostKeyChecking=no $dir/bundle_ca.pem root@$ADDR:$ETCD_CERT_DIR/ca.pem
scp -o StrictHostKeyChecking=no $dir/bundle_ca.pem root@$ADDR:$KUBE_CERT_PATH/etcd/ca.pem
scp -o StrictHostKeyChecking=no $dir/etcd-client.pem root@$ADDR:$KUBE_CERT_PATH/etcd/etcd-client.pem
scp -o StrictHostKeyChecking=no $dir/etcd-client-key.pem root@$ADDR:$KUBE_CERT_PATH/etcd/etcd-client-key.pem
scp -o StrictHostKeyChecking=no $dir/bundle_peer-ca.pem root@$ADDR:$ETCD_CERT_DIR/peer-ca.pem
ssh -o StrictHostKeyChecking=no root@$ADDR chown -R etcd:etcd $ETCD_CERT_DIR
ssh -o StrictHostKeyChecking=no root@$ADDR chmod 0644 $ETCD_CERT_DIR/*
echo "restart etcd on node $ADDR"
ssh -o StrictHostKeyChecking=no root@$ADDR systemctl restart etcd
echo "etcd on node $ADDR restarted"
ssh -o StrictHostKeyChecking=no root@$ADDR /usr/bin/bash /tmp/restart-apiserver.sh
echo "apiserver on node $ADDR restarted"
sleep 10
done
}
function rotate_etcd_certs() {
for ADDR in $ETCD_HOSTS; do
echo "update etcd peer certs on node $ADDR"
scp -o StrictHostKeyChecking=no \
$dir/{peer-ca-key.pem,etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem,ca-key.pem,*-name*.pem} root@$ADDR:$ETCD_CERT_DIR/
ssh -o StrictHostKeyChecking=no root@$ADDR chown -R etcd:etcd $ETCD_CERT_DIR
ssh -o StrictHostKeyChecking=no root@$ADDR \
chmod 0400 $ETCD_CERT_DIR/{peer-ca-key.pem,etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem,ca-key.pem,*-name*.pem}
echo "restart etcd on node $ADDR"
ssh -o StrictHostKeyChecking=no root@$ADDR systemctl restart etcd
echo "etcd on node $ADDR restarted"
sleep 10
done
}
function recover_etcd_ca() {
# Update certs on etcd nodes.
for ADDR in $ETCD_HOSTS; do
echo "replace etcd CA on node $ADDR"
scp -o StrictHostKeyChecking=no $dir/ca.pem root@$ADDR:$ETCD_CERT_DIR/ca.pem
scp -o StrictHostKeyChecking=no $dir/ca.pem root@$ADDR:$KUBE_CERT_PATH/etcd/ca.pem
scp -o StrictHostKeyChecking=no $dir/peer-ca.pem root@$ADDR:$ETCD_CERT_DIR/peer-ca.pem
scp -o StrictHostKeyChecking=no $dir/ca.pem root@$ADDR:$KUBE_CERT_PATH/etcd/ca.pem
ssh -o StrictHostKeyChecking=no root@$ADDR chown -R etcd:etcd $ETCD_CERT_DIR
echo "restart apiserver on node $ADDR"
ssh -o StrictHostKeyChecking=no root@$ADDR bash /tmp/restart-apiserver.sh
echo "apiserver on node $ADDR restarted"
echo "restart etcd on node $ADDR"
sleep 5
ssh -o StrictHostKeyChecking=no root@$ADDR systemctl restart etcd
echo "etcd on node $ADDR restarted"
sleep 5
done
}
function renew_k8s_certs() {
# 更新K8s证书,根据集群Region替换下面cn-hangzhou的默认镜像地域。
for ADDR in $ETCD_HOSTS; do
echo "renew k8s components cert on node $ADDR"
#compatible containerd
set +e
ssh -o StrictHostKeyChecking=no root@$ADDR docker run --privileged=true -v /:/alicoud-k8s-host --pid host --net host \
registry.cn-hangzhou.aliyuncs.com/acs/etcd-rotate:v2.0.0 /renew/upgrade-k8s.sh --role master
ssh -o StrictHostKeyChecking=no root@$ADDR ctr image pull registry.cn-hangzhou.aliyuncs.com/acs/etcd-rotate:v2.0.0
ssh -o StrictHostKeyChecking=no root@$ADDR ctr run --privileged=true --mount type=bind,src=/,dst=/alicoud-k8s-host,options=rbind:rw \
--net-host registry.cn-hangzhou.aliyuncs.com/acs/etcd-rotate:v2.0.0 cert-rotate /renew/upgrade-k8s.sh --role master
set -e
echo "finished renew k8s components cert on $ADDR"
sleep 5
done
}
function generate_cm() {
echo "generate status configmap"
cat <<-"EOF" > /tmp/ack-rotate-etcd-ca-cm.yaml.tpl
apiVersion: v1
kind: ConfigMap
metadata:
name: ack-rotate-etcd-status
namespace: kube-system
data:
status: "success"
hosts: "$hosts"
EOF
sed -e "s#\$hosts#$ETCD_HOSTS#" /tmp/ack-rotate-etcd-ca-cm.yaml.tpl | kubectl apply -f -
}
get_etcdhosts
echo "${ETCD_HOSTS[@]}"
echo "---renew k8s components certs---"
renew_k8s_certs
echo "---end to renew k8s components certs---"
# Update certs on etcd nodes.
for ADDR in $ETCD_HOSTS; do
scp -o StrictHostKeyChecking=no restart-apiserver.sh root@$ADDR:/tmp/restart-apiserver.sh
ssh -o StrictHostKeyChecking=no root@$ADDR chmod +x /tmp/restart-apiserver.sh
done
gencerts
echo "---rotate etcd ca and etcd client ca---"
rotate_etcd_ca
echo "---end to rotate etcd ca and etcd client ca---"
echo
echo "---rotate etcd peer and certs---"
rotate_etcd_certs
echo "---end to rotate etcd peer and certs---"
echo
echo "---replace etcd ca---"
recover_etcd_ca
echo "---end to replace etcd ca---"
generate_cm
echo "etcd CA and certs have succesfully rotated!"
提前下载镜像文件 registry.cn-hangzhou.aliyuncs.com/acs/etcd-rotate:v2.0.0
镜像里面的/renew/upgrade-k8s.sh
bash
#!/bin/sh
set -xe
if [ -d "/alicoud-k8s-host" ]; then
rm -rf /alicoud-k8s-host/usr/local/k8s-upgrade
mkdir -p /alicoud-k8s-host/usr/local/k8s-upgrade
cp -r /renew/* /alicoud-k8s-host/usr/local/k8s-upgrade
ls -l /alicoud-k8s-host/usr/local/k8s-upgrade
chmod -R +x /alicoud-k8s-host/usr/local/k8s-upgrade/
chroot /alicoud-k8s-host /usr/local/k8s-upgrade/rotate.sh "$@"
fi
bash
#!/bin/sh
set -xe
if [ -d "/alicoud-k8s-host" ]; then
rm -rf /alicoud-k8s-host/usr/local/k8s-upgrade
mkdir -p /alicoud-k8s-host/usr/local/k8s-upgrade
cp -r /renew/* /alicoud-k8s-host/usr/local/k8s-upgrade
ls -l /alicoud-k8s-host/usr/local/k8s-upgrade
chmod -R +x /alicoud-k8s-host/usr/local/k8s-upgrade/
chroot /alicoud-k8s-host /usr/local/k8s-upgrade/rotate.sh "$@"
fi
[root@master01 ~]# docker run -it registry.cn-hangzhou.aliyuncs.com/acs/etcd-rotate:v2.0.0 cat /renew/rotate.sh
#!/usr/bin/env bash
set -e -x
public::common::log() {
echo $(date +"[%Y%m%d %H:%M:%S]: ") $1
}
function retry() {
local n=0
local try=$1
local cmd="${@:2}"
[[ $# -le 1 ]] && {
echo "Usage $0 <retry_number> <Command>"
}
set +e
until
[[ $n -ge $try ]]
do
$cmd && break || {
echo "Command Fail.."
((n++))
echo "retry $n :: [$cmd]"
sleep 2
}
done
set -e
}
public::upgrade::backupmaster() {
local backup_dir=/etc/kubeadm/backup-rotate-$(date +%F)
if [ ! -f $backup_dir/kubelet.conf ]; then
mkdir -p $backup_dir $backup_dir/kubelet $backup_dir/etcd
cp -rf /etc/kubernetes/ $backup_dir/
cp /etc/kubeadm/kubeadm.cfg $backup_dir/
cp /etc/systemd/system/kubelet.service.d/10-kubeadm.conf $backup_dir
cp /etc/kubernetes/kubelet.conf $backup_dir
cp -rf /var/lib/kubelet/pki/* $backup_dir/kubelet
cp -rf /var/lib/etcd/cert/* $backup_dir/etcd
else
public::common::log "master configuration is already backup, skip."
fi
}
public::upgrade::backupnode() {
public::common::log "Begin the node backup working."
local backup_dir=/etc/kubeadm/backup-rotate-$(date +%F)
if [ ! -f $backup_dir/10-kubeadm.conf ]; then
mkdir -p $backup_dir $backup_dir/kubelet
cp -rf /etc/kubernetes/ $backup_dir/
cp /etc/kubernetes/kubelet.conf $backup_dir
cp /etc/systemd/system/kubelet.service.d/10-kubeadm.conf $backup_dir
cp -rf /var/lib/kubelet/pki/* $backup_dir/kubelet
else
public::common::log "node configuration is already backup, skip."
fi
}
public::main::master-rotate() {
ls -l /usr/local
pwd
local backup_dir=/etc/kubeadm/backup-rotate-$(date +%F)
echo "mode is $MODE"
if ! grep "rotate-certificates" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then
./usr/local/k8s-upgrade/cert-rotate -mode=$MODE -role=master -nodeip=$NODE_IP -auto-rotate=false >$backup_dir/renew.log
else
./usr/local/k8s-upgrade/cert-rotate -mode=$MODE -role=master -nodeip=$NODE_IP >$backup_dir/renew.log
fi
if [[ "$MODE" = "etcd" ]]; then
public::common::log "Successful update cert on $(hostname)"
exit 0
fi
sleep 1
#renew the dashboard certs
if [ -d /etc/kubernetes/pki/dashboard ]; then
cp -rf /etc/kubernetes/pki/apiserver.crt /etc/kubernetes/pki/dashboard/dashboard.crt
cp -rf /etc/kubernetes/pki/apiserver.key /etc/kubernetes/pki/dashboard/dashboard.key
cp -rf /etc/kubernetes/pki/ca.crt /etc/kubernetes/pki/dashboard/dashboard-ca.crt
cat /etc/kubernetes/pki/client-ca.crt >>/etc/kubernetes/pki/dashboard/dashboard-ca.crt
if [ -f /etc/kubernetes/pki/user-ca.crt ]; then
cat /etc/kubernetes/pki/user-ca.crt >>/etc/kubernetes/pki/dashboard/dashboard-ca.crt
fi
cp -rf /etc/kubernetes/pki/dashboard/dashboard-ca.crt /etc/kubernetes/pki/apiserver-ca.crt
else
cp -rf /etc/kubernetes/pki/ca.crt /etc/kubernetes/pki/apiserver-ca.crt
cat /etc/kubernetes/pki/client-ca.crt >>/etc/kubernetes/pki/apiserver-ca.crt
if [ -f /etc/kubernetes/pki/user-ca.crt ]; then
cat /etc/kubernetes/pki/user-ca.crt >>/etc/kubernetes/pki/apiserver-ca.crt
fi
fi
# /etc/kubernetes/manifests pod can not be pull up automatically. use this to workaround.
set +e
docker ps | grep kube-controller-manager | awk '{print $1}' | xargs -I '{}' docker restart {} || true
crictl pods | grep kube-controller-manager | awk '{print $1}' | xargs -I '{}' crictl stopp {} || true
set -e
sleep 1
#restart kubelet
service kubelet restart
sleep 1
# /etc/kubernetes/manifests pod can not be pull up automatically. use this to workaround.
set +e
docker ps | grep kube-apiserver | awk '{print $1}' | xargs -I '{}' docker restart {} || true
crictl pods | grep kube-apiserver | awk '{print $1}' | xargs -I '{}' crictl stopp {} || true
sleep 1
docker ps | grep kube-scheduler | awk '{print $1}' | xargs -I '{}' docker restart {} || true
crictl pods | grep kube-scheduler | awk '{print $1}' | xargs -I '{}' crictl stopp {} || true
set -e
public::common::log "Successful update cert on $(hostname)"
}
public::main::node-rotate() {
if [ -f /etc/kubernetes/pki/apiserver.crt ]; then
public::common::log "Skip node rotate on master node"
exit 0
fi
local backup_dir=/etc/kubeadm/backup-rotate-$(date +%F)
if ! grep "rotate-certificates" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf; then
./usr/local/k8s-upgrade/cert-rotate -mode=$MODE -role=worker -auto-rotate=false -key=$KEY >$backup_dir/renew.log
else
./usr/local/k8s-upgrade/cert-rotate -mode=$MODE -role=worker -key=$KEY >$backup_dir/renew.log
fi
sleep 1
#restart kubelet
service kubelet restart
sleep 1
public::common::log "Successful update cert on $(hostname)"
}
public::main::master() {
public::upgrade::backupmaster
public::main::master-rotate
}
public::main::node() {
public::upgrade::backupnode
public::main::node-rotate
}
main() {
#use renew mode in default
export MODE=renew
while
[[ $# -gt 0 ]]
do
key="$1"
case $key in
--role)
export ROLE=$2
shift
;;
--mode)
export MODE=$2
shift
;;
--nodeip)
export NODE_IP=$2
shift
;;
--rootkey)
export KEY=$2
shift
;;
*)
public::common::log "unkonw option [$key]"
exit 1
;;
esac
shift
done
mkdir -p /etc/kubeadm/backup-rotate-$(date +%F)
#public::upgrade::backup
######################################################
case $ROLE in
"source")
public::common::log "source scripts"
;;
"master")
public::main::master
;;
"node")
public::main::node
;;
*)
echo "
Usage:
$0 --role master|node --mode renew|rotate
./rotate.sh
"
;;
esac
}
main "$@"
3.在任意Master节点上运行bash rotate-etcd.sh
。
当看到命令行输出etcd CA and certs have succesfully rotated!
时,表示所有Master节点上的证书和K8s证书已经轮转完成。
脚本执行过程中,会将etcd相关的服务端和客户端证书备份在如下目录中:
-
/var/lib/etcd/cert_$时间戳后缀
-
/etc/kubernetes/pki/etcd_$时间戳后缀
4.查看证书有效期
bash
openssl x509 -in cert.crt -noout -dates