一、环境初始化
角色 | IP |
---|---|
master01 | 192.168.174.143 |
master02 | 192.168.174.144 |
master03 | 192.168.174.145 |
node01 | 192.168.174.146 |
node02 | 192.168.174.147 |
角色 | 组件 |
---|---|
master | nginx、APIServer、Schedule、Controllermanager、kubelet、kube-proxy、ETCD |
node | nginx、kubelet、kube-proxy |
作者这里有软件包,里面包含了所有镜像及yaml文件,需要的请留言
所有机器都要操作
bash
1. 安装依赖文件
yum install -y conntrack ipvsadm ipset jq iptables sysstat libseccomp wget vim net-tools
yum -y install lrzsz yum-utils telnet unzip tar iptables-services
yum -y install psmisc nfs-utils device-mapper-persistent-data lvm2
2.修改主机名和 /etc/hosts 解析文件
hostnamectl set-hostname master-01
hostnamectl set-hostname master-02
hostnamectl set-hostname master-03
hostnamectl set-hostname node-01
hostnamectl set-hostname node-02
...
...
vim /etc/hosts
192.168.174.143 master-01 ms1
192.168.174.144 master-02 ms2
192.168.174.145 master-03 ms3
192.168.174.146 node-01 n1
192.168.174.147 node-02 n2
3.做个免密
ssh-keygen
for i in ms1 ms2 ms3 n1 n2
do
ssh-copy-id $i
done
4.Rocky 系统软件源更换
sed -e 's|^mirrorlist=|#mirrorlist=|g' \
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
-i.bak \
/etc/yum.repos.d/[Rr]ocky*.repo
dnf makecache
5.防火墙修改 firewalld 为 iptables。禁用 Selinux
systemctl stop firewalld
systemctl disable firewalld
yum -y install iptables-services
systemctl start iptables
iptables -F
systemctl enable iptables
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
grubby --update-kernel ALL --args selinux=0
# 查看是否禁用
grubby --info DEFAULT
# 回滚内核层禁用操作
grubby --update-kernel ALL --remove-args selinux
# 设置时区
timedatectl set-timezone Asia/Shanghai
# 关闭 swap 分区
swapoff -a
sed -i 's:/dev/mapper/rl-swap:#/dev/mapper/rl-swap:g' /etc/fstab
# 安装 ipvs
yum install -y ipvsadm
# 开启路由转发
echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.conf
sysctl -p
# 时间同步配置
# 服务端 3台master操作即可
yum install chrony -y
cat > /etc/chrony.conf << EOF
pool ntp.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 192.168.174.0/24
local stratum 10 # 三台权重需要不一样
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF
systemctl restart chronyd ; systemctl enable chronyd
# 客户端 2 台 node节点操作即可
yum install chrony -y
cat > /etc/chrony.conf << EOF
pool 192.168.174.143 iburst
pool 192.168.174.144 iburst
pool 192.168.174.145 iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
keyfile /etc/chrony.keys
leapsectz right/UTC
logdir /var/log/chrony
EOF
systemctl restart chronyd && systemctl enable chronyd
#使用客户端进行验证
chronyc sources -v
6.配置 ulimit 所有机器都需要操作
ulimit -SHn 65535
cat >> /etc/security/limits.conf <<EOF
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* seft memlock unlimited
* hard memlock unlimitedd
EOF
7. 安装 ipvs
yum install ipvsadm ipset sysstat conntrack libseccomp -y
cat >> /etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service
lsmod | grep -e ip_vs -e nf_conntrack
# 修改内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.ipv6.conf.all.disable_ipv6 = 0
net.ipv6.conf.default.disable_ipv6 = 0
net.ipv6.conf.lo.disable_ipv6 = 0
net.ipv6.conf.all.forwarding = 1
EOF
sysctl --system
# 安装 Docker
# 二进制包下载地址:https://download.docker.com/linux/static/stable/x86_64/
# wget https://mirrors.ustc.edu.cn/docker-ce/linux/static/stable/x86_64/docker-25.0.3.tgz
tar xf docker-*.tgz
cp docker/* /usr/bin/
# 创建 containerd 的 service 文件,并且启动
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now containerd.service
# 准备 docker 的 service 文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service cri-docker.service docker.socket containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
#准备 docker 的 socket 文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
# 配置加速器
mkdir /etc/docker/ -pv
mkdir -pv /data/docker
cat >/etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://docker.mirrors.ustc.edu.cn",
"https://docker.cloudmessage.top",
"https://kfp63jaj.mirror.aliyuncs.com",
"https://j47dskil.mirror.aliyuncs.com"
],
"max-concurrent-downloads": 10,
"log-driver": "json-file",
"log-level": "warn",
"log-opts": {
"max-size": "10m",
"max-file": "3"
},
"data-root": "/data/docker"
}
EOF
# 启动 Docker
groupadd docker
systemctl daemon-reload
systemctl enable --now docker.socket
systemctl enable --now docker.service
systemctl status docker.service
docker info
# 解压 cri-docker
# https://github.com/Mirantis/cri-dockerd/releases/
# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.16/cri-dockerd-0.3.16.amd64.tgz
tar xvf cri-dockerd-*.amd64.tgz
cd cri-dockerd
cp -r cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd
# 写入启动 cri-docker 配置文件
cat > /usr/lib/systemd/system/cri-docker.service <<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
# 写入 cri-docker 的 socket 配置文件
cat > /usr/lib/systemd/system/cri-docker.socket <<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
systemctl daemon-reload
systemctl enable --now cri-docker.service
systemctl status docker.service
二、K8S 与 ETCD 下载及安装
在 master01 节点上操作
1、安装 K8S 和 ETCD 二进制文件
bash
wget https://github.com/etcd-io/etcd/releases/download/v3.5.12/etcd-v3.5.12-linux-amd64.tar.gz
wget https://dl.k8s.io/v1.29.2/kubcdernetes-server-linux-amd64.tar.gz
# 解压k8s安装文件
tar -xf kubernetes-server-linux-amd64.tar.gz --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}
# 解压etcd安装文件
tar -xf etcd*.tar.gz && mv etcd-*/etcd /usr/local/bin/ && mv etcd-*/etcdctl /usr/local/bin/
# 查看/usr/local/bin下内容
ls /usr/local/bin/
# 查看版本
kubelet --version
Kubernetes v1.29.2
etcdctl version
etcdctl version: 3.5.12
API version: 3.5
# 将组件发送至其它 k8s 节点
Master='master-02 master-03'
Work='node-01 node-02'
# 拷贝 master 组件
for NODE in $Master
do
echo $NODE
scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/
scp /usr/local/bin/etcd* $NODE:/usr/local/bin/
done
# 拷贝 work 组件
for NODE in $Work
do
echo $NODE
scp /usr/local/bin/kube{let,-proxy,ctl} $NODE:/usr/local/bin/
done
# 所有节点执行
mkdir -p /opt/cni/bin
#
有需求可以安装一个ansible;(可忽略)
ansible ms1-n1 -m shell -a 'mkdir -p /opt/cni/bin'(可忽略)
ansible安装文档
2.相关证书生成
生成 ETCD 证书
bash
# 安装证书工具
# master01 节点下载证书生成工具
wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_linux_amd64" -O /usr/local/bin/cfssl
wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_amd64" -O /usr/local/bin/cfssljson
# 生成 ETCD 证书(以下操作在所有 master 节点操作)
# 先在 master01 节点生成 etcd 证书(然后传到剩下两台)
mkdir /etc/etcd/ssl -p
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
cat > etcd-ca-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
# 使用 cfssl 工具根据配置文件 ca-csr.json 生成一个 CA 证书,并将证书文件保存在 /etc/etcd/ssl/etcd-ca 路径下。
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca
cat > etcd-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
EOF
# 使用指定的CA证书和私钥,根据证书请求的JSON文件和配置文件生成etcd的证书文件。
cfssl gencert -ca=/etc/etcd/ssl/etcd-ca.pem -ca-key=/etc/etcd/ssl/etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,master-01,master-02,master-03,192.168.174.143,192.168.174.144,192.168.174.145 -profile=kubernetes etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd
# 将证书复制到其他节点
Master='master-02 master-03'
for NODE in $Master
do
ssh $NODE "mkdir -p /etc/etcd/ssl"
for FILE in etcd-ca-key.pem etcd-ca.pem etcd-key.pem etcd.pem
do
scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}
done
done
生成 K8S 相关证书
bash
# 生成 K8S 相关证书(特别说明除外,以下操作在所有 master 节点操作)
# 创建存放目录(所有master节点)
Master='master-01 master-02 master-03'
for NODE in $Master
do
ssh $NODE "mkdir -p /etc/kubernetes/pki"
done
# 先在 master01 节点生成 k8s 相关证书(然后传到剩下两台)
# 写入生成证书所需的配置文件
# 这个配置文件可以用于生成 Kubernetes 相关的证书,以确保集群中的通信安全性。
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
# 使用cfssl工具根据配置文件ca-csr.json生成一个CA证书,并将证书文件保存在/etc/kubernetes/pki/ca路径下。
cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca
cat > apiserver-csr.json << EOF
{
"CN": "kube-apiserver",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "Kubernetes",
"OU": "Kubernetes-manual"
}
]
}
EOF
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
# 通过指定一个配置文件和一组主机名/地址,使用指定的 CA 对 Kubernetes API Server 证书进行签名,生成用于 API Server 和其他 Kubernetes 组件安全通信的证书和密钥文件
cfssl gencert -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem -config=ca-config.json -hostname=10.96.0.1,192.168.10.16,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.174.143,192.168.174.144,192.168.174.145,192.168.174.146,192.168.174.147,192.168.174.148,192.168.174.149,192.168.174.150,192.168.174.151,192.168.174.152, -profile=kubernetes apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver
# 多加几个hostname 没关系,以防后续新增
生成 apiserver 聚合证书
bash
cat > front-proxy-ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"ca": {
"expiry": "876000h"
}
}
EOF
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca
cat > front-proxy-client-csr.json << EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem \
-config=ca-config.json \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client
生成 controller-manage 的证书
bash
cat > manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/etc/kubernetes/pki/controller-manager.pem \
--client-key=/etc/kubernetes/pki/controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig
生成 kube-scheduler 的证书
bash
cat > scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/etc/kubernetes/pki/scheduler.pem \
--client-key=/etc/kubernetes/pki/scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
生成 admin 的证书配置
bash
cat > admin-csr.json << EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-credentials kubernetes-admin \
--client-certificate=/etc/kubernetes/pki/admin.pem \
--client-key=/etc/kubernetes/pki/admin-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config set-context kubernetes-admin@kubernetes \
--cluster=kubernetes \
--user=kubernetes-admin \
--kubeconfig=/etc/kubernetes/admin.kubeconfig
kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/etc/kubernetes/admin.kubeconfig
创建 kube-proxy 证书
bash
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-proxy",
"OU": "Kubernetes-manual"
}
]
}
EOF
cfssl gencert \
-ca=/etc/kubernetes/pki/ca.pem \
-ca-key=/etc/kubernetes/pki/ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/etc/kubernetes/pki/kube-proxy.pem \
--client-key=/etc/kubernetes/pki/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config set-context kube-proxy@kubernetes \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
kubectl config use-context kube-proxy@kubernetes --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig
创建 ServiceAccount Key ------secret
bash
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
将证书发送到其他 master 节点
bash
# 将证书发送到其他 master 节点
for NODE in master-02 master-03
do
for FILE in $(ls /etc/kubernetes/pki | grep -v etcd)
do
scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}
done
for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig
do
scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}
done
done
# 查看证书
[root@k8s-master01 ~]# ls /etc/kubernetes/pki/
[root@k8s-master01 ~]# ls /etc/kubernetes/pki/ | wc -l
26
三、K8S 系统组件配置
ETCD 配置
master-01-etcd配置
bash
cat > /etc/etcd/etcd.config.yml << EOF
name: 'master-01'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.174.143:2380'
listen-client-urls: 'https://192.168.174.143:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.174.143:2380'
advertise-client-urls: 'https://192.168.174.143:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'master-01=https://192.168.174.143:2380,master-02=https://192.168.174.144:2380,master-03=https://192.168.174.145:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
master-02-etcd配置
bash
cat > /etc/etcd/etcd.config.yml << EOF
name: 'master-02'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.174.144:2380'
listen-client-urls: 'https://192.168.174.144:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.174.144:2380'
advertise-client-urls: 'https://192.168.174.144:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'master-01=https://192.168.174.143:2380,master-02=https://192.168.174.144:2380,master-03=https://192.168.174.145:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
master-03-etcd配置
bash
cat > /etc/etcd/etcd.config.yml << EOF
name: 'master-03'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.174.145:2380'
listen-client-urls: 'https://192.168.174.145:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.174.145:2380'
advertise-client-urls: 'https://192.168.174.145:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'master-01=https://192.168.174.143:2380,master-02=https://192.168.174.144:2380,master-03=https://192.168.174.145:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
创建 service( 所有 master 节点操作)
bash
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
创建 etcd 证书目录
bash
mkdir /etc/kubernetes/pki/etcd
ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
# 启动 etcd
systemctl daemon-reload
systemctl enable --now etcd.service
systemctl status etcd.service
# 查看 etcd 状态
# 如果要用 IPv6 那么把 IPv4 地址修改为 IPv6 即可
export ETCDCTL_API=3
etcdctl --endpoints="192.168.174.143:2379,192.168.174.144:2379,192.168.174.145:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
+----------------------+----------------+---------+---------+-----------+------------+-----------+----------+------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFTINDEX|RAFT APPLIED INDEX| ERRORS|
+----------------------+----------------+---------+---------+-----------+------------+-----------+----------+------------------+--------+
| 192.168.174.143:2379 |bbcbc20300419492| 3.5.12 | 20 kB | true | false | 2 | 9 | 9 | |
| 192.168.174.144:2379 |ccb6e8489b090334| 3.5.12 | 20 kB | false | false | 2 | 9 | 9 | |
| 192.168.174.145:2379 |e9fe5fb46961e1e1| 3.5.12 | 20 kB | false | false | 2 | 9 | 9 | |
+----------------------+----------------+---------+---------+-----------+------------+-----------+----------+------------------+--------+
Nginx 配置
在一台编译安装之后传到其他服务器上(所有)
bash
# 安装编译环境
yum install gcc -y
# 下载解压 nginx 二进制文件
# wget http://nginx.org/download/nginx-1.25.3.tar.gz
tar xvf nginx-*.tar.gz
cd nginx-1.25.3
# 进行编译
./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
make && make install
# 拷贝编译好的 nginx
node='master-02 master-03 node-01 node-02'
for NODE in $node; do scp -r /usr/local/nginx/ $NODE:/usr/local/nginx/; done
# 写入配置文件(在所有主机上执行)
cat > /usr/local/nginx/conf/kube-nginx.conf <<EOF
worker_processes 1;
events {
worker_connections 1024;
}
stream {
upstream backend {
least_conn;
hash $remote_addr consistent;
server 192.168.174.143:6443 max_fails=3 fail_timeout=30s;
server 192.168.174.144:6443 max_fails=3 fail_timeout=30s;
server 192.168.174.145:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 127.0.0.1:8443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
EOF
cat > /etc/systemd/system/kube-nginx.service <<EOF
[Unit]
Description=kube-apiserver nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx
ExecReload=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/kube-nginx.conf -p /usr/local/nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=5
StartLimitInterval=0
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 传到所有主机
for i in $node; do scp /usr/local/nginx/conf/kube-nginx.conf $i:/usr/local/nginx/conf/; scp /etc/systemd/system/kube-nginx.service $i:/etc/systemd/system/; done
# 在所有主机执行
systemctl daemon-reload
systemctl enable --now kube-nginx.service
systemctl status kube-nginx.service
其它组件配置
bash
# 所有 k8s 节点创建以下目录
# 在 master01 执行下面这个命令:
ansible ms1-n1 -m shell -a 'mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes'
# 也可以一台一台创建
for i in ms1 ms2 ms3 n1 n2
do
ssh $i "mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes"
done
创建 apiserver(所有 master 节点)
master01 节点配置
bash
# master01 节点配置
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=192.168.174.143 \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://192.168.174.143:2379,https://192.168.174.144:2379,https://192.168.174.145:2379 \\
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
master02 节点配置
bash
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=192.168.174.144 \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://192.168.174.143:2379,https://192.168.174.144:2379,https://192.168.174.145:2379 \\
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
master03 节点配置
bash
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
--v=2 \\
--allow-privileged=true \\
--bind-address=0.0.0.0 \\
--secure-port=6443 \\
--advertise-address=192.168.174.145 \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--service-node-port-range=30000-32767 \\
--etcd-servers=https://192.168.174.143:2379,https://192.168.174.144:2379,https://192.168.174.145:2379 \\
--etcd-cafile=/etc/etcd/ssl/etcd-ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User \\
--enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
启动 apiServer
bash
systemctl daemon-reload
systemctl enable --now kube-apiserver.service
systemctl status kube-apiserver.service
该配置文件是用于定义Kubernetes API Server的systemd服务的配置。
配置 kube-controller-manager service
bash
# 所有master节点配置,且配置相同
# 172.16.0.0/12为pod网段,按需求设置你自己的网段
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
--v=2 \\
--bind-address=0.0.0.0 \\
--root-ca-file=/etc/kubernetes/pki/ca.pem \\
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \\
--kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\
--leader-elect=true \\
--use-service-account-credentials=true \\
--node-monitor-grace-period=40s \\
--node-monitor-period=5s \\
--controllers=*,bootstrapsigner,tokencleaner \\
--allocate-node-cidrs=true \\
--service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
--cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\
--node-cidr-mask-size-ipv4=24 \\
--node-cidr-mask-size-ipv6=120 \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
for i in ms2 ms3
do
scp /usr/lib/systemd/system/kube-controller-manager.service $i:/usr/lib/systemd/system/
done
# 启动 kube-controller-manager
systemctl daemon-reload
systemctl enable --now kube-controller-manager.service
systemctl status kube-controller-manager.service
# 也可以使用 ansible 执行 3台服务重启
vim manage_kube_controller_manager.yml
---
- name: Manage kube-controller-manager service
hosts: your_target_hosts # 这里是你要操作的主机组,替换为你实际的主机或主机组
become: yes # 使用超级用户权限执行
tasks:
- name: Reload systemd manager configuration
systemd:
daemon_reload: yes
- name: Enable and start kube-controller-manager service
systemd:
name: kube-controller-manager.service
enabled: yes
state: started
- name: Check status of kube-controller-manager service
systemd:
name: kube-controller-manager.service
state: started
enabled: yes
register: kube_controller_manager_status
- name: Show kube-controller-manager service status
debug:
var: kube_controller_manager_status
ansible-playbook manage_kube_controller_manager.yml
配置 kube-scheduler service
bash
# 所有 master 节点配置,且配置相同
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
--v=2 \\
--bind-address=0.0.0.0 \\
--leader-elect=true \\
--kubeconfig=/etc/kubernetes/scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
# 传到另外两台master服务器上
for i in ms2 ms3
do
scp /usr/lib/systemd/system/kube-scheduler.service $i:/usr/lib/systemd/system/
done
# 启动 kube-scheduler
systemctl daemon-reload
systemctl enable --now kube-scheduler.service
systemctl status kube-scheduler.service
四、TLS Bootstrapping 配置
在 master01 上配置
bash
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true --server=https://127.0.0.1:8443 \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig
mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
# 查看集群状态
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy ok
cat > bootstrap.secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-c8ad9c
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: c8ad9c
token-secret: 2e4d610cf3e7426e
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF
kubectl create -f bootstrap.secret.yaml
# master02 master03 执行以下操作
# 让其他节点也可以使用 kubectl 命令
mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
五、Node 配置
1、复制相关证书至 node
bash
# 在 master01 上将证书复制到 node 节点
cd /etc/kubernetes/
for NODE in master-02 master-03 node-01 node-02
do
ssh $NODE mkdir -p /etc/kubernetes/pki
for FILE in pki/ca.pem pki/ca-key.pem pki/front-proxy-ca.pem bootstrap-kubelet.kubeconfig kube-proxy.kubeconfig
do
scp /etc/kubernetes/$FILE $NODE:/etc/kubernetes/${FILE}
done
done
2、配置 kubelet(所有节点)
bash
# kubelet 配置
# 当使用 docker 作为 Runtime
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/kubernetes/kubernetes
After=network-online.target firewalld.service cri-docker.service docker.socket containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
ExecStart=/usr/local/bin/kubelet \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-conf.yml \\
--container-runtime-endpoint=unix:///run/cri-dockerd.sock \\
--node-labels=node.kubernetes.io/node=
[Install]
WantedBy=multi-user.target
EOF
# IPv6示例
# 若不使用IPv6那么忽略此项即可
# 下方 --node-ip 更换为每个节点的IP即可
# cat > /usr/lib/systemd/system/kubelet.service << EOF
# [Unit]
# Description=Kubernetes Kubelet
# Documentation=https://github.com/kubernetes/kubernetes
# After=network-online.target firewalld.service cri-docker.service docker.socket # containerd.service
# Wants=network-online.target
# Requires=docker.socket containerd.service
# [Service]
# ExecStart=/usr/local/bin/kubelet \\
# --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
# --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
# --config=/etc/kubernetes/kubelet-conf.yml \\
# --container-runtime-endpoint=unix:///run/cri-dockerd.sock \\
# --node-labels=node.kubernetes.io/node= \\
# --node-ip=192.168.1.31,2408:822a:245:8c01::fab
# [Install]
# WantedBy=multi-user.target
# EOF
cat > /etc/kubernetes/kubelet-conf.yml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
address: 0.0.0.0
port: 10250
readOnlyPort: 10255
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
cgroupDriver: systemd
cgroupsPerQOS: true
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebuggingHandlers: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kubeAPIBurst: 10
kubeAPIQPS: 5
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
registryBurst: 10
registryPullQPS: 5
resolvConf: /etc/resolv.conf
rotateCertificates: true
runtimeRequestTimeout: 2m0s
serializeImagePulls: true
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
volumeStatsAggPeriod: 1m0s
EOF
# 启动 kubelet
systemctl daemon-reload
systemctl enable --now kubelet.service
systemctl status kubelet.service
for i in ms2 ms3 n1 n2
do
scp /usr/lib/systemd/system/kubelet.service $i:/usr/lib/systemd/system/
scp /etc/kubernetes/kubelet-conf.yml $i:/etc/kubernetes/
done
# 或使用如下方式
ansible master-1,node -m copy -a "src=/etc/kubernetes/kubelet-conf.yml dest=/etc/kubernetes/"
ansible master-1,node -m copy -a "src=/usr/lib/systemd/system/kubelet.service dest=/usr/lib/systemd/system/"
3、kube-proxy 配置(所有节点)
bash
# 将 kubeconfig 发送至其他节点
# master-1 执行
for NODE in ms2 ms3 n1 n2; do scp /etc/kubernetes/kube-proxy.kubeconfig $NODE:/etc/kubernetes/kube-proxy.kubeconfig; done
# 所有 k8s 节点添加 kube-proxy 的 service 文件
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/local/bin/kube-proxy \\
--config=/etc/kubernetes/kube-proxy.yaml \\
--cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
# 所有 k8s节点添加 kube-proxy 的配置
cat > /etc/kubernetes/kube-proxy.yaml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 172.16.0.0/12,fc00:2222::/112
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
for NODE in ms2 ms3 n1 n2; do scp /usr/lib/systemd/system/kube-proxy.service $NODE:/usr/lib/systemd/system/kube-proxy.service; done
for NODE in ms2 ms3 n1 n2; do scp /etc/kubernetes/kube-proxy.yaml $NODE:/etc/kubernetes/kube-proxy.yaml; done
# 启动 kube-proxy
systemctl daemon-reload
systemctl enable --now kube-proxy.service
systemctl status kube-proxy.service
六、安装网络插件
bash
wget https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml
wget https://mirrors.chenby.cn/https://github.com/projectcalico/calico/blob/master/manifests/calico-typha.yaml
cp calico-typha.yaml calico.yaml
vim calico.yaml
# ConfigMap.data.cni_network_config.plugins 处 (默认无需修改)
"ipam": {
"type": "calico-ipam",
},
# DaemonSet.spec.template.spec.initContainers.containers.env 处(默认无需修改)
- name: IP
value: "autodetect"
- name: CALICO_IPV4POOL_CIDR
value: "172.16.0.0/12" # 将配置改为和上方配置的pod网段对应即可
# vim calico-ipv6.yaml
# calico-config ConfigMap处
# "ipam": {
# "type": "calico-ipam",
# "assign_ipv4": "true",
# "assign_ipv6": "true"
# },
# - name: IP
# value: "autodetect"
#
# - name: IP6
# value: "autodetect"
#
# - name: CALICO_IPV4POOL_CIDR
# value: "172.16.0.0/12"
#
# - name: CALICO_IPV6POOL_CIDR
# value: "fc00:2222::/112"
#
# - name: FELIX_IPV6SUPPORT
# value: "true"
# 若docker镜像拉不下来,可以使用国内的仓库
sed -i "s#docker.io/calico/#m.daocloud.io/docker.io/calico/#g" calico.yaml
sed -i "s#docker.io/calico/#m.daocloud.io/docker.io/calico/#g" calico-ipv6.yaml
sed -i "s#m.daocloud.io/docker.io/calico/#docker.io/calico/#g" calico.yaml
sed -i "s#m.daocloud.io/docker.io/calico/#docker.io/calico/#g" calico-ipv6.yaml
# 本地没有公网 IPv6 使用 calico.yaml
kubectl apply -f calico.yaml
[root@master-01 soft]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-57758d645c-x6ntz 1/1 Running 0 2d16h
kube-system calico-node-5ttff 1/1 Running 0 2d16h
kube-system calico-node-c4mnd 1/1 Running 0 2d16h
kube-system calico-node-q8z7n 1/1 Running 0 2d16h
kube-system calico-typha-7f974b9776-jgpt9 1/1 Running 0 2d16h
# ./dockertools load -l images/
七、安装 CoreDNS(只在 master01 操作)
安装helm
如果脚本下载失败,使用这个脚本手动上传一下
bash
# 官方下载地址:
[root@k8s-master 10-pod]# curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
[root@k8s-master 10-pod]# chmod 755 get_helm.sh
[root@k8s-master 10-pod]# ./get_helm.sh
Downloading https://get.helm.sh/helm-v3.16.2-linux-amd64.tar.gz
Verifying checksum... Done.
Preparing to install helm into /usr/local/bin
helm installed into /usr/local/bin/helm
[root@k8s-master 10-pod]# helm version
version.BuildInfo{Version:"v3.16.2", GitCommit:"13654a52f7c70a143b1dd51416d633e1071faffb", GitTreeState:"clean", GoVersion:"go1.22.7"}
[root@k8s-master 10-pod]# helm repo ls
Error: no repositories to show (为空,因为要初始化charts,仓库 ---- > 相当于是 yum文件中的/etc/yum.repos.d/*.repo文件一样 )
# 初始化
# 当您已经安装好了Helm之后,您可以添加一个chart 仓库。从 Artifact Hub 中查找有效的Helm chart仓库
# 官方指定的 bitnami 是镜像名称 后面是镜像地址
helm repo add bitnami https://charts.bitnami.com/bitnami
# 当添加完成,您将可以看到可以被您安装的 charts 列表:
helm repo ls
helm search repo bitnami
使用helm安装 coreDNS
bash
# 下载tgz包
helm repo add coredns https://coredns.github.io/helm
helm pull coredns/coredns
tar xvf coredns-*.tgz
cd coredns/
# 修改IP地址
vim values.yaml
cat values.yaml | grep clusterIP:
clusterIP: "10.96.0.10"
# 示例
---
service:
# clusterIP: ""
# clusterIPs: []
# loadBalancerIP: ""
# externalIPs: []
# externalTrafficPolicy: ""
# ipFamilyPolicy: ""
# The name of the Service
# If not set, a name is generated using the fullname template
clusterIP: "10.96.0.10"
name: ""
annotations: {}
---
# 修改为国内源 docker源可选
sed -i "s#coredns/#m.daocloud.io/docker.io/coredns/#g" values.yaml
sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" values.yaml
# 默认参数安装
helm install coredns ./coredns/ -n kube-system
[root@master-01 soft]# kubectl get pod -A
kube-system coredns-798ffd599-79v6v 1/1 Running 0 2d15h
八、安装 Metrics Server
bash
# 下载
wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
wget https://mirrors.chenby.cn/https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# 修改配置
vim components.yaml
---
# 1
- args:
- --cert-dir=/tmp
- --secure-port=10250
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
# 新增如下:
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
# 2
volumeMounts:
- mountPath: /tmp
name: tmp-dir
# 新增如下:
- name: ca-ssl
mountPath: /etc/kubernetes/pki
# 3
volumes:
- emptyDir: {}
name: tmp-dir
# 新增如下:
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
---
# 修改为国内源 docker源可选
sed -i "s#registry.k8s.io/#m.daocloud.io/registry.k8s.io/#g" *.yaml
# 执行部署
kubectl apply -f components.yaml
--kubelet-insecure-tls:禁用与 kubelet 的 TLS 证书验证。
--requestheader-client-ca-file:指定用于验证客户端证书的 CA 文件。
--requestheader-username-headers:指定用于传递用户名的请求头字段。
--requestheader-group-headers:指定用于传递用户组的请求头字段。
--requestheader-extra-headers-prefix:指定用于传递额外用户信息的请求头字段前缀。
九、安装命令补全
bash
yum install bash-completion -y
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
十、高可用验证
安装etcdhelper命令
etcdhelper工具来解码proto格式的内容,etcd是以proto格式存储的
bash
# 先安装go-lang
wget -c https://dl.google.com/go/go1.22.9.linux-amd64.tar.gz
tar -C /usr/local -zxvf go1.22.9.linux-amd64.tar.gz
vim /etc/profile
#go envrionment
export GOROOT=/usr/local/go
export PATH=$PATH:$GOROOT/bin
#export GOPROXY=http://mirrors.aliyun.com/goproxy/
export GOPROXY=https://goproxy.io
source /etc/profile
go version
# 安装etcdhelper工具
[root@k8s-master01 ]# wget https://github.com/openshift/origin/archive/refs/heads/master.zip
[root@k8s-master01 ]# unzip master.zip
[root@k8s-master01 ]# cd origin-master/tools/etcdhelper
[root@k8s-master01 etcdhelper]# ls
etcdhelper.go OWNERS README.md
[root@k8s-master01 etcdhelper]# go build etcdhelper.go
[root@k8s-master01 etcdhelper]# ls
etcdhelper etcdhelper.go OWNERS README.md
[root@k8s-master01 etcdhelper]# cp etcdhelper /usr/local/bin/
# 然后就可以使用了。 下面是使用方法
bash
# 确认 etcd 高可用
export ETCDCTL_API=3
etcdctl --endpoints="192.168.174.143:2379,192.168.174.144:2379,192.168.174.145:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem endpoint status --write-out=table
# 加个别名
# cat /root/.bashrc
alias ectl='etcdhelper -endpoint "https://192.168.74.143:2379" -key /etc/etcd/ssl/etcd-key.pem -cert /etc/etcd/ssl/etcd.pem -cacert /etc/etcd/ssl/etcd-ca.pem'
source /root/.bashrc
# 查看当前的 schedule 主节点
ectl get /registry/leases/kube-system/kube-scheduler
# 查看当前的 controllermanager 主节点
ectl get /registry/leases/kube-system/kube-controller-manager
读取 etcd 的方式:
https://github.com/openshift/origin/tree/master/tools/etcdhelper
官方解释:
https://github.com/kubernetes/kubernetes/issues/44670
bash
# 如果查看没有其他的几个节点信息
kubectl get nodes
[root@master-01 manifests]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-01 Ready <none> 2d22h v1.29.2
node-01 Ready <none> 2d22h v1.29.2
# 登录到没有节点的主机上
systemctl stop kube-proxy.service
systemctl stop kubelet
systemctl stop kube-nginx
# 执行如下操作
cat > /usr/local/nginx/conf/kube-nginx.conf <<EOF
worker_processes 1;
events {
worker_connections 1024;
}
stream {
upstream backend {
least_conn;
hash $remote_addr consistent;
server 192.168.174.143:6443 max_fails=3 fail_timeout=30s;
server 192.168.174.144:6443 max_fails=3 fail_timeout=30s;
server 192.168.174.145:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 127.0.0.1:8443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
EOF
# 删除错误配置,重新生成
rm -rf /var/lib/kubelet/*
systemctl start kube-nginx
systemctl start kube-proxy
systemctl start kubelet