完整的k8s搭建服务器流程

1、禁用selinux

#临时禁用
setenforce 0
#永久禁用
sed -i 's/enforcing/disabled/' /etc/selinux/config
#检查selinux是否已禁用
sestatus

2、禁用交换分区

#命令行临时禁用
swapoff -a
#永久禁用
vim /etc/fstab
注释掉有swap字样的那行,重启

3、允许iptables转发、启用br_netfilter模块

cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF

sysctl --system

#停止防火墙
systemctl stop firewalld
systemctl disable firewalld

4、修改hostname,使每台服务器的hostname唯一

hostnamectl set-hostname xxxxx

5、安装docker

centos

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast && yum -y install docker-ce
systemctl enable docker && systemctl start docker

ubuntu

apt install -y  apt-transport-https ca-certificates
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/ubuntu $(lsb_release -cs) stable"
apt update && apt install -y docker-ce
systemctl enable docker && systemctl start docker

debian

apt install -y  apt-transport-https ca-certificates
curl -fsSL http://mirrors.aliyun.com/docker-ce/linux/debian/gpg | apt-key add -
add-apt-repository "deb [arch=amd64] http://mirrors.aliyun.com/docker-ce/linux/debian $(lsb_release -cs) stable"
apt update && apt install -y docker-ce
systemctl enable docker && systemctl start docker

修改docker配置

cat > /etc/docker/daemon.json << EOF
{
    "registry-mirrors": [
        "http://mirrors.ustc.edu.cn/",
        "http://docker.jx42.com",
        "https://0c105db5188026850f80c001def654a0.mirror.swr.myhuaweicloud.com",
        "https://5tqw56kt.mirror.aliyuncs.com",
        "https://docker.1panel.live",
        "http://mirror.azure.cn/",
        "https://hub.rat.dev/",
        "https://docker.ckyl.me/",
        "https://docker.chenby.cn",
        "https://docker.hpcloud.cloud"
    ],
    "exec-opts":["native.cgroupdriver=systemd"]
}
EOF

systemctl restart docker

修改containerd配置

containerd config default > /etc/containerd/config.toml
sed -i 's/registry.k8s.io\/pause:3.8/registry.aliyuncs.com\/google_containers\/pause:3.9/g' /etc/containerd/config.toml 
或者
sed -i 's/registry.k8s.io/registry.aliyuncs.com\/google_containers/g' /etc/containerd/config.toml 

systemctl restart containerd

修改containerd镜像源

vim /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
        endpoint = ["https://atomhub.openatom.cn"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io/library"]
        endpoint = ["https://atomhub.openatom.cn/library"]
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry.k8s.io"]
        endpoint = ["https://registry.aliyuncs.com/google_containers"]

6、离线安装docker

wget https://download.docker.com/linux/static/stable/x86_64/docker-27.2.0.tgz
tar zxvf docker-27.2.0.tgz  -C ./
cp -p ./docker/* /usr/bin/

systemctl daemon-reload
systemctl enable docker && systemctl start docker

6、安装cri-docker

wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.15/cri-dockerd-0.3.15.amd64.tgz
tar -xf cri-dockerd-0.3.15.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/cri-dockerd
curl https://github.com/Mirantis/cri-dockerd/raw/master/packaging/systemd/cri-docker.service -L -o /usr/lib/systemd/system/cri-docker.service
curl https://raw.githubusercontent.com/Mirantis/cri-dockerd/master/packaging/systemd/cri-docker.socket -L -o /usr/lib/systemd/system/cri-docker.socket 

#修改cri-docker配置
vim /usr/lib/systemd/system/cri-docker.service
#修改ExecStart加上pod-infra-container-image参数
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9

systemctl daemon-reload
systemctl start cri-docker

#查看cri-docker信息
crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock info

7、安装ipvs

#centos
yum -y install ipvsadm ipset
#ubuntu&debian
apt -y install ipvsadm ipset

#检测是否加载
lsmod | grep ip_vs

#没有加载成功则手动执行
cat > /etc/sysconfig/modules/ipvs.modules << EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
#modprobe -- nf_conntrack_ipv4 #4以上的内核就没有ipv4
modprobe -- nf_conntrack
EOF
 
chmod 755 /etc/sysconfig/modules/ipvs.modules
source /etc/sysconfig/modules/ipvs.modules

7、安装kubernetes

centos

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum makecache
yum list --showduplicates kubectl
yum install -y kubelet-1.28.2-0 kubeadm-1.28.2-0 kubectl-1.28.2-0

ubuntu

apt update && apt install -y apt-transport-https ca-certificates curl gnupg
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add - 
cat << EOF > /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt update
apt-cache madison kubectl
apt install -y kubelet=1.28.2-00 kubeadm=1.28.2-00 kubectl=1.28.2-00

设置所有组件自启动

systemctl enable containerd
systemctl status docker
systemctl status cri-docker
systemctl enable kubelet

如果kubeadm init时没有指定使用ipvs,可以后续再更改

kubectl edit -n kube-system cm kube-proxy
修改 mode: "ipvs"

#删除 kube-proxy,k8s会自动重建
kubectl get pod -n kube-system |grep kube-proxy|xargs kubectl delete pod -n kube-system

#接着查看日志,有打印 Using ipvs Proxier 表示使用成功
kubectl get pod -n kube-system | grep kube-proxy
kubectl logs -n kube-system kube-proxy-4c5xj

#试试查看一下转发规则
ipvsadm -Ln

8、初始化master节点

#先拉取镜像
kubeadm config images pull \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.28.2 \
--cri-socket=unix:///var/run/cri-dockerd.sock

*如果拉取镜像很慢或者觉得有问题存在,可以查看服务日志
查看cri-docker服务日志
journalctl -xefu cri-docker

#开始初始化
kubeadm init \
--apiserver-advertise-address=服务器内网ip地址 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.28.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--cri-socket=unix:///var/run/cri-dockerd.sock

#如果想跳过cri-docker,直接让k8s跟container通信,需要变更一个参数
--cri-socket=unix:///run/containerd/containerd.sock

#如果代理想用ipvs,性能上会比iptables更高,需要增加一个参数
--proxy-mode=ipvs

#重置集群为未初始化
kubeadm reset -f \
--cri-socket=unix:///var/run/cri-dockerd.sock

环境变量配置

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown ${id -u}:${id -g} $HOME/.kube/config
echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /etc/profile

systemctl daemon-reload
systemctl restart kubelet

检查一下两个配置文件,如果不存在的话,用下面的内容保存

/etc/cni/net.d/10-flannel.conflist

{
  "name": "cbr0",
  "cniVersion": "0.3.1",
  "plugins": [
    {
      "type": "flannel",
      "delegate": {
        "hairpinMode": true,
        "isDefaultGateway": true
      }
    },
    {
      "type": "portmap",
      "capabilities": {
        "portMappings": true
      }
    }
  ]
}

/etc/systemd/system/kubelet.service.d/10-kubeadm.conf

[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf"
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml"
# This is a file that "kubeadm init" and "kubeadm join" generates at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically
EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env
# This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, the user should use
# the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. KUBELET_EXTRA_ARGS should be sourced from this file.
EnvironmentFile=-/etc/default/kubelet
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS

如果新建了这两个配置文件任意一个,需要重启

systemctl daemon-reload

systemctl restart kubelet

9、其他节点加入集群

添加worker节点

#在worker节点上检查文件,不存在就从master上拷贝过来
/etc/cni/net.d/10-flannel.conflist
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
/etc/kubernetes/admin.conf

#worker节点也要做环境变量配置,这样就可以在worker节点上使用kubectl命令了
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown ${id -u}:${id -g} $HOME/.kube/config
echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /etc/profile
systemctl daemon-reload
systemctl restart kubelet

#在master服务器上运行命令
kubeadm token create --print-join-command

这将生成一个 kubeadm join 命令,将上面生成的命令复制并在新的 Worker 节点上执行。这将使新的节点以 Worker 的身份加入集群
*注意,需要在生成的kubeadm join 命令后面再加cri-socket参数,例如

kubeadm join 10.1.3.178:6443 --token z994lz.s0ogba045j84195c --discovery-token-ca-cert-hash sha256:89d69bc4b7c03bc8328713794c7aa4af798b0e65a64021a329bb9bf1d7afd23e --cri-socket=unix:///var/run/cri-dockerd.sock

添加其他master节点

和添加worker节点操作一样,只是在join命令时多加一个参数--control-plane,例如
kubeadm join 10.1.3.178:6443 --token z994lz.s0ogba045j84195c --discovery-token-ca-cert-hash sha256:89d69bc4b7c03bc8328713794c7aa4af798b0e65a64021a329bb9bf1d7afd23e --cri-socket=unix:///var/run/cri-dockerd.sock --control-plane

*注意,集群要建立多master节点的话,还需要创建证书并共享到每个master节点
@todo

查看所有已加入集群的节点

kubectl get nodes

会看到节点都是NotReady状态,需要安装网络插件

先检查一下/opt/cni/bin/目录下是否有完整的cni-plugins的可执行文件,例如可能没有portmap,没有的话下载
wget https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-amd64-v1.5.1.tgz
tar zxvf cni-plugins-linux-amd64-v1.5.1.tgz -C /opt/cni/bin/
systemctl restart kubelet

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
安装后,多等一会

kubectl get nodes
节点就是Ready了

另一个网络插件Calico的安装

  • lannel vs Calico:选择 Flannel 还是 Calico 主要取决于你的具体需求。如果你的集群规模较小,不需要太多复杂的网络功能,Flannel 是一个合适的选择。而如果你需要一个功能强大的网络插件来支持大规模集群和复杂的网络策略,那么 Calico 可能更适合你。

  • 最佳实践:对于未来可能需要扩展或集成更多设备和策略的集群,建议使用 Calico,因为它提供了更好的可扩展性和更丰富的功能集。而对于小规模集群或测试环境,Flannel 可能是一个更简单易用的选择。

    kubectl apply -f https://docs.tigera.io/archive/v3.25/manifests/calico.yaml

    kubectl get pods --namespace=kube-system | grep calico-node
    如果输出结果中显示了calico-node的Pod状态为Running,则表示Calico已经成功安装

10、彻底删除k8s

#停止K8S
systemctl stop kubelet
systemctl stop cri-docker
systemctl stop docker.socket
systemctl stop docker

#清空K8S集群设置
kubeadm reset -f --cri-socket=unix:///var/run/cri-dockerd.sock

#删除K8S相关软件
yum -y remove kubelet kubeadm kubectl docker-ce docker-ce-cli containerd containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras

#手动删除所有镜像、容器和卷
rm -rf /var/lib/docker
rm -rf /var/lib/containerd

#彻底删除相关文件
rm -rf $HOME/.kube ~/.kube/ /etc/kubernetes/ /etc/systemd/system/kubelet.service.d /usr/lib/systemd/system/kubelet.service /usr/lib/systemd/system/cri-docker.service /usr/bin/kube* /etc/cni /opt/cni /var/lib/etcd /etc/docker/daemon.json

11、一些有用命令

#用container命令行查看镜像列表
ctr image list

#查看container下k8s拉取的镜像
ctr -n k8s.io image list

#用cri-docker命令行查看镜像列表
crictl --runtime-endpoint unix:///var/run/cri-dockerd.sock image

#强制删除某个pod
kubectl delete pod <pod>  -n <namespace> --grace-period=0 --force

#查看iptables的转发规则
iptables -L

#查看ipvs的转发规则
ipvsadm -Ln
相关推荐
运维&陈同学39 分钟前
【zookeeper01】消息队列与微服务之zookeeper工作原理
运维·分布式·微服务·zookeeper·云原生·架构·消息队列
O&REO1 小时前
单机部署kubernetes环境下Overleaf-基于MicroK8s的Overleaf应用部署指南
云原生·容器·kubernetes
politeboy1 小时前
k8s启动springboot容器的时候,显示找不到application.yml文件
java·spring boot·kubernetes
运维小文2 小时前
K8S资源限制之LimitRange
云原生·容器·kubernetes·k8s资源限制
登云时刻2 小时前
Kubernetes集群外连接redis集群和使用redis-shake工具迁移数据(二)
redis·容器·kubernetes
wuxingge11 小时前
k8s1.30.0高可用集群部署
云原生·容器·kubernetes
志凌海纳SmartX12 小时前
趋势洞察|AI 能否带动裸金属 K8s 强势崛起?
云原生·容器·kubernetes
锅总12 小时前
nacos与k8s service健康检查详解
云原生·容器·kubernetes
BUG弄潮儿12 小时前
k8s 集群安装
云原生·容器·kubernetes
Code_Artist13 小时前
Docker镜像加速解决方案:配置HTTP代理,让Docker学会科学上网!
docker·云原生·容器