kubeadm方式部署Kubernetes v1.22.2集群

环境信息:

主机名 主机ip 环境角色 安装软件
VIP 192.168.12.210 VIP VIP
master-01 192.168.12.211 master kubeadm、kubelet、kubectl、docker、nginx、keepalived、kube-controller、kube-proxy、kube-apiserver、kube-scheduler、coredns、etcd、pause
master-02 192.168.12.212 master kubeadm、kubelet、kubectl、docker、nginx、keepalived、kube-controller、kube-proxy、kube-apiserver、kube-scheduler、coredns、etcd、pause
master-03 192.168.12.213 master kubeadm、kubelet、kubectl、docker、nginx、keepalived、kube-controller、kube-proxy、kube-apiserver、kube-scheduler、coredns、etcd、pause
node-01 192.168.12.214 node kubeadm、kubelet、kubectl、docker、kube-controller、kube-proxy、kube-apiserver、kube-scheduler、coredns、etcd、pause
node-02 192.168.12.215 node kubeadm、kubelet、kubectl、docker、kube-controller、kube-proxy、kube-apiserver、kube-scheduler、coredns、etcd、pause

一、部署基础环境

1、所有机器做本地解析并相互解析

2、所有机器设置为静态ip

3、所有机器关闭防火墙和selinux

4、所有节点同步时间

5、所有节点安装docker

6、关闭swap交换分区

bash 复制代码
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab

7、启用 IPv4 转发

bash 复制代码
#临时启用
sudo sysctl -w net.ipv4.ip_forward=1

#永久启用
sudo vi /etc/sysctl.d/99-sysctl.conf
#添加或修改
net.ipv4.ip_forward = 1

sudo sysctl -p /etc/sysctl.d/99-sysctl.conf


#验证是否启用
sysctl net.ipv4.ip_forward

二、使用kubeadm部署Kubernetes

在所有节点安装kubeadm和kubelet:

bash 复制代码
#配置源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#安装1.22.2版本
yum install -y kubelet-1.22.2-0.x86_64 kubeadm-1.22.2-0.x86_64 kubectl-1.22.2-0.x86_64 ipvsadm ipset

#加载ipvs相关内核模块
#如果重新开机,需要重新加载(可以写在 /etc/rc.local 中开机自动加载)
vim /etc/rc.local 
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4

chmod +x /etc/rc.local

#配置:
#配置转发相关参数,否则可能会出错
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF

sysctl --system
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

查看kubeadm版本

bash 复制代码
yum list --showduplicates kubeadm --disableexcludes=kubernetes

配置启动kubelet(所有节点)

bash 复制代码
#配置变量
DOCKER_CGROUPS=`docker info |grep 'Cgroup' | awk ' NR==1 {print $3}'`
echo $DOCKER_CGROUPS

#配置kubelet的cgroups
cat >/etc/sysconfig/kubelet<<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=$DOCKER_CGROUPS --pod-infra-container-image=k8s.gcr.io/pause:3.5"
EOF

systemctl daemon-reload

现在启动会报错,因为没有运行kubeadm init 生成CA证书,生成CA证书后会被自动解决,此处可先忽略。

负载均衡部署

所有Master节点安装Nginx、Keepalived

bash 复制代码
1.配置nginx的官方yum源
[root@master-01 ~] vim /etc/yum.repos.d/nginx.repo 
[nginx-stable]
name=nginx stable repo
baseurl=http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck=1
enabled=1
gpgkey=https://nginx.org/keys/nginx_signing.key
module_hotfixes=true
[root@master-01 ~] yum install -y nginx keepalived

# 开始配置nginx代理master的api-server组件
[root@master01 ~] vim /etc/nginx/nginx.conf #在http快上方添加四层代理
...
stream {
    upstream apiserver {
        server 192.168.12.211:6443 weight=5 max_fails=3 fail_timeout=30s;
        server 192.168.12.212:6443 weight=5 max_fails=3 fail_timeout=30s;
        server 192.168.12.213:6443 weight=5 max_fails=3 fail_timeout=30s;
    }
    server {
        listen 8443;
        proxy_pass apiserver;
    }
}
http {
[root@master-01 ~] mv /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/default.conf.bak
[root@master-01 ~] systemctl start nginx 
[root@master-01 ~] systemctl enable nginx

配置keepalived高可用
[root@master-01 ~] vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.12.210/24
    }
}
[root@master-01 ~] systemctl start keepalived
[root@master-01 ~] systemctl enable keepalived


[root@master-02 ~] vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 90
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.12.210/24
    }
}
[root@master02 ~] systemctl start keepalived
[root@master02 ~] systemctl enable keepalived

[root@master03 ~] vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   router_id LVS_DEVEL
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 51
    priority 80
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
        192.168.12.210/24
    }
}
[root@master-03 ~] systemctl start keepalived
[root@master-03 ~] systemctl enable keepalived

所有节点拉取1.22.2版本所需镜像

bash 复制代码
#拉取镜像
vim pull.sh 
#!/usr/bin/bash
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.8.4
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5

bash pull.sh

#下载完了之后需要将aliyun下载下来的所有镜像打成k8s.gcr.io/kube-controller-manager:v1.20.2这样的tag
vim tag.sh
#!/usr/bin/bash
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.22.2 k8s.gcr.io/kube-controller-manager:v1.22.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.22.2 k8s.gcr.io/kube-proxy:v1.22.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.22.2 k8s.gcr.io/kube-apiserver:v1.22.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.22.2 k8s.gcr.io/kube-scheduler:v1.22.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.8.4 k8s.gcr.io/coredns/coredns:v1.8.4
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.0-0 k8s.gcr.io/etcd:3.5.0-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.5 k8s.gcr.io/pause:3.5

bash tag.sh

配置master节点

在其中一台master节点上面操作即可

bash 复制代码
kubeadm init --kubernetes-version=v1.22.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.12.211 --control-plane-endpoint=192.168.12.210:8443 --ignore-preflight-errors=Swap
#------命令执行成功会出现类似如下内容------#
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join 192.168.12.210:8443 --token 51rolq.qfofvjoz41t8ch1t \
        --discovery-token-ca-cert-hash sha256:2f8521a52b3c9e8a929effac7a6547d8b9c2db3a5ddc67f240b41b6ad16a339f \
        --control-plane   #做为主节点组成高可用集群时使用

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.12.210:8443 --token 51rolq.qfofvjoz41t8ch1t \
        --discovery-token-ca-cert-hash sha256:2f8521a52b3c9e8a929effac7a6547d8b9c2db3a5ddc67f240b41b6ad16a339f #node节点加入集群使用的
#------------------------------------#
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config        

master02与master03加入集群

bash 复制代码
1.在master02、master03节点上创建目录,用户存放证书
[root@master-02 ~] mkdir /etc/kubernetes/pki/etcd -p
[root@master-03 ~] mkdir /etc/kubernetes/pki/etcd -p

2.在master01节点上,将master01节点上的证书拷贝到master02、master03节点上
[root@master-01 ~] scp -rp /etc/kubernetes/pki/ca.*  master-02:/etc/kubernetes/pki/  
[root@master-01 ~] scp -rp /etc/kubernetes/pki/sa.*  master-02:/etc/kubernetes/pki/   
[root@master-01 ~] scp -rp /etc/kubernetes/pki/front-proxy-ca.*  master-02:/etc/kubernetes/pki/   
[root@master-01 ~] scp -rp /etc/kubernetes/pki/etcd/ca.*  master-02:/etc/kubernetes/pki/etcd/
[root@master-01 ~] scp -rp /etc/kubernetes/admin.conf  master-02:/etc/kubernetes/


[root@master-01 ~] scp -rp /etc/kubernetes/pki/ca.*  master-03:/etc/kubernetes/pki/ 
[root@master-01 ~] scp -rp /etc/kubernetes/pki/sa.*  master-03:/etc/kubernetes/pki/
[root@master-01 ~] scp -rp /etc/kubernetes/pki/front-proxy-ca.*  master-03:/etc/kubernetes/pki/
[root@master-01 ~] scp -rp /etc/kubernetes/pki/etcd/ca.*  master-03:/etc/kubernetes/pki/etcd/   
[root@master-01 ~] scp -rp /etc/kubernetes/admin.conf  master-03:/etc/kubernetes/

3.由上面初始成功的信息提示,复制粘贴命令到master-02、master-03节点执行即可
master-02操作:
[root@master-02 ~] kubeadm join 192.168.12.210:8443 --token 51rolq.qfofvjoz41t8ch1t \
>         --discovery-token-ca-cert-hash sha256:2f8521a52b3c9e8a929effac7a6547d8b9c2db3a5ddc67f240b41b6ad16a339f \
>         --control-plane

#------命令执行成功会出现类似如下内容------#
This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
#------------------------------------#

[root@master-02 ~] mkdir -p $HOME/.kube
[root@master-02 ~] cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-02 ~] chown $(id -u):$(id -g) $HOME/.kube/config


#master-03操作:
[root@master-03 ~] kubeadm join 192.168.12.210:8443 --token 51rolq.qfofvjoz41t8ch1t \
>         --discovery-token-ca-cert-hash sha256:2f8521a52b3c9e8a929effac7a6547d8b9c2db3a5ddc67f240b41b6ad16a339f \
>         --control-plane

#------命令执行成功会出现类似如下内容------#
This node has joined the cluster and a new control plane instance was created:

* Certificate signing request was sent to apiserver and approval was received.
* The Kubelet was informed of the new secure connection details.
* Control plane (master) label and taint were applied to the new node.
* The Kubernetes control plane instances scaled up.
* A new etcd member was added to the local/stacked etcd cluster.

To start administering your cluster from this node, you need to run the following as a regular user:

        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config

Run 'kubectl get nodes' to see this node join the cluster.
#------------------------------------#

[root@master-03 ~] mkdir -p $HOME/.kube
[root@master-03 ~] cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-03 ~] chown $(id -u):$(id -g) $HOME/.kube/config


#在任意master节点执行kubectl get nodes查看是否添加成功
kubectl get nodes

添加node节点

bash 复制代码
#node节点直接执行命令即可,不需要做什么配置
#在node-1、node-2节点执行下面命令

[root@node-01 ~] kubeadm join 192.168.12.210:8443 --token 51rolq.qfofvjoz41t8ch1t \
>         --discovery-token-ca-cert-hash sha256:2f8521a52b3c9e8a929effac7a6547d8b9c2db3a5ddc67f240b41b6ad16a339f
......
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@node-02 ~] kubeadm join 192.168.12.210:8443 --token 51rolq.qfofvjoz41t8ch1t \
>         --discovery-token-ca-cert-hash sha256:2f8521a52b3c9e8a929effac7a6547d8b9c2db3a5ddc67f240b41b6ad16a339f
......
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

检查集群

bash 复制代码
[root@master-01 ~] kubectl get nodes
NAME       STATUS     ROLES                  AGE     VERSION
master01   NotReady   control-plane,master   20m     v1.22.2
master02   NotReady   control-plane,master   5m56s   v1.22.2
master03   NotReady   control-plane,master   5m9s    v1.22.2
node-1     NotReady   <none>                 2m10s   v1.22.2
node-2     NotReady   <none>                 117s    v1.22.2

[root@master-01 ~] kubectl config view
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.12.210:8443 #监听集群的ip+端口
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED

配置使用网络插件

主节点操作

bash 复制代码
cd ~ && mkdir flannel && cd flannel
curl -O https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#如果无法下载yaml文件,https://download.csdn.net/download/Dsl00403/91412090内有已经根据本文章修改后的yaml文件
vim kube-flannel.yml
#此处的ip配置要与上面kubeadm的pod-network一致,本来就一致,不用改
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }

# 这里注意kube-flannel.yml这个文件里的flannel的镜像是quay.io/coreos/flannel:v0.14.0 需要提前pull下来。


# 如果Node有多个网卡的话,参考https://github.com/kubernetes/kubernetes/issues/39701
# 目前需要在kube-flannel.yml中使用--iface参数指定集群主机内网网卡的名称,否则可能会出现dns无法解析。容器无法通信的情况。
#需要将kube-flannel.yml下载到本地,
# flanneld启动参数加上--iface=<iface-name>
    containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.12.0-amd64
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens33
        #- --iface=eth0
        
⚠️⚠️⚠️--iface=ens33 的值,是你当前的网卡,或者可以指定多网卡

# 1.12版本的kubeadm额外给node1节点设置了一个污点(Taint):node.kubernetes.io/not-ready:NoSchedule,
# 很容易理解,即如果节点还没有ready之前,是不接受调度的。可是如果Kubernetes的网络插件还没有部署的话,节点是不会进入ready状态的。
# 因此以下kube-flannel.yaml的内容,加入对node.kubernetes.io/not-ready:NoSchedule这个污点的容忍:
    - key: beta.kubernetes.io/arch
                    operator: In
                    values:
                      - arm64
      hostNetwork: true
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel

yaml文件内所需的镜像需要提前pull下载,若无法下载以下链接内有下载后的tag包,导入即可

bash 复制代码
https://download.csdn.net/download/Dsl00403/91412095?spm=1001.2014.3001.5501
https://download.csdn.net/download/Dsl00403/91412098

docker load -i flannel.tar
docker load -i flannel-cni-plugin.tar

确保镜像导入成功后启动网络插件

bash 复制代码
#启动:
[root@master-1 flannel] kubectl apply -f ~/flannel/kube-flannel.yml  #启动完成之后需要等待一会

[root@master01 flannel] kubectl get nodes
NAME       STATUS   ROLES                  AGE   VERSION
master01   Ready    control-plane,master   22h   v1.22.2
master02   Ready    control-plane,master   21h   v1.22.2
master03   Ready    control-plane,master   21h   v1.22.2
node-1     Ready    <none>                 21h   v1.22.2
node-2     Ready    <none>                 21h   v1.22.2
[root@master-01 flannel] kubectl get pod -n kube-system 
NAME                               READY   STATUS    RESTARTS      AGE
coredns-78fcd69978-q7t4c           1/1     Running   0             22h
coredns-78fcd69978-wzv22           1/1     Running   0             22h
etcd-master01                      1/1     Running   3 (20h ago)   22h
etcd-master02                      1/1     Running   1 (20h ago)   21h
etcd-master03                      1/1     Running   1 (20h ago)   21h
kube-apiserver-master01            1/1     Running   3 (20h ago)   22h
kube-apiserver-master02            1/1     Running   1 (20h ago)   21h
kube-apiserver-master03            1/1     Running   2 (20h ago)   21h
kube-controller-manager-master01   1/1     Running   4 (20h ago)   22h
kube-controller-manager-master02   1/1     Running   1 (20h ago)   21h
kube-controller-manager-master03   1/1     Running   1 (20h ago)   21h
kube-proxy-2g5pj                   1/1     Running   1 (20h ago)   22h
kube-proxy-2p579                   1/1     Running   3 (20h ago)   21h
kube-proxy-58g4q                   1/1     Running   1 (20h ago)   21h
kube-proxy-jr4nv                   1/1     Running   2 (20h ago)   21h
kube-proxy-z887s                   1/1     Running   1 (20h ago)   21h
kube-scheduler-master01            1/1     Running   4 (20h ago)   22h
kube-scheduler-master02            1/1     Running   1 (20h ago)   21h
kube-scheduler-master03            1/1     Running   1 (20h ago)   21h
[root@master-01 flannel] kubectl get pod -n kube-flannel 
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-96r5v   1/1     Running   0          29s
kube-flannel-ds-9x9sn   1/1     Running   0          29s
kube-flannel-ds-sp954   1/1     Running   0          29s
kube-flannel-ds-x68pp   1/1     Running   0          29s
kube-flannel-ds-zv9m9   1/1     Running   0          29s

#查看:
kubectl get pods --namespace kube-system
kubectl get service
kubectl get svc --namespace kube-system
#只有网络插件也安装配置完成之后,才能会显示为ready状态
相关推荐
老马啸西风1 小时前
windows wsl ubuntu 如何安装 open-jdk8
linux·windows·ubuntu·docker·容器·k8s·kvm
老任与码3 小时前
安装docker时,yum install -y yum-utils报错的解决
运维·docker·容器
大咖分享课3 小时前
云原生环境下的安全控制框架设计
云原生·零信任·云原生安全·kubernetes安全·安全框架设计·微服务安全
cherishSpring4 小时前
window上docker安装RabbitMQ
docker·容器·rabbitmq
观无4 小时前
Eureka 和 Nacos
云原生·eureka
bailang_zhizun5 小时前
【Docker】在银河麒麟ARM环境下离线安装docker
运维·docker·容器
至善迎风6 小时前
本地部署 Kimi K2 全指南(llama.cpp、vLLM、Docker 三法)
docker·容器·llama·kimi
老马啸西风6 小时前
windows docker-03-如何一步步学习 docker
网络·windows·网络协议·学习·http·docker·容器