centos7.9搭建k8s集群

环境准备

centos7.9,8G4C

准备工作:

关闭防火墙firewalld、selinux

设置主机名

设置/etc/hosts

bash 复制代码
[root@localhost ~]# hostnamectl set-hostname master
[root@localhost ~]# hostnamectl set-hostname worker1
[root@localhost ~]# hostnamectl set-hostname worker2
[root@master ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.101 master
192.168.1.103 worker1
192.168.1.105 worker2
[root@master ~]# systemctl stop firewalld&&systemctl disable firewalld&&setenforce 0
[root@master ~]#sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

关闭交换分区

bash 复制代码
[root@master ~]# swapoff -a
[root@master ~]# vim /etc/fstab
交换分区加上注释
#/dev/mapper/centos-swap swap                    swap    defaults        0 0

将桥接的ipv4流量传递到iptables链,打开端口转发

bash 复制代码
[root@master ~]# modprobe br_netfilter
[root@master ~]# cat >/etc/sysctl.d/k8s.conf<<EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@master ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
kernel.kptr_restrict = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /usr/lib/sysctl.d/60-libvirtd.conf ...
fs.aio-max-nr = 1048576
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
* Applying /etc/sysctl.conf ...
[root@master ~]# echo "net.ipv4.ip_forward = 1"  >> /etc/sysctl.conf
[root@master ~]# sysctl -p
net.ipv4.ip_forward = 1

时间同步

bash 复制代码
[root@worker2 ~]# yum install -y chrony &&systemctl start chronyd && systemctl enable chronyd

配置containerd

bash 复制代码
[root@worker2 ~]# yum install -y yum-utils  &&sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master ~]# yum install containerd.io -y &&systemctl enable containerd --now
[root@master ~]# containerd config default>/etc/containerd/config.toml
[root@master ~]# vim /etc/containerd/config.toml 
 sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.10"
 SystemdCgroup = true
[root@master ~]# systemctl restart containerd

配置kubernetes仓库

bash 复制代码
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.31/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
sudo systemctl enable --now kubelet

配置crictl与containerd交互

bash 复制代码
[root@master ~]# crictl config --set runtime-endpoint=/run/containerd/containerd.sock
[root@master ~]# crictl images
IMAGE               TAG                 IMAGE ID            SIZE

k8s初始化

bash 复制代码
[root@master ~]# kubeadm config  print init-defaults>kubeadm-config.yaml
[root@master ~]# kubeadm init --config kubeadm-config.yaml 
主节点配置
[root@master ~]#  mkdir -p $HOME/.kube
[root@master ~]#   sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]#   sudo chown $(id -u):$(id -g) $HOME/.kube/config
工作节点
[root@master ~]# scp /etc/kubernetes/admin.conf root@192.168.1.103:/root
[root@master ~]# scp /etc/kubernetes/admin.conf root@192.168.1.105:/root
[root@worker1 ~]# mkdir -p $HOME/.kube
[root@worker1 ~]# sudo cp -i admin.conf $HOME/.kube/config
[root@worker1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@worker1 ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   16m   v1.31.2
[root@worker2 ~]# mkdir -p $HOME/.kube
[root@worker2 ~]# sudo cp -i admin.conf $HOME/.kube/config
[root@worker2 ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@worker2 ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   17m   v1.31.2

工作节点加入集群

bash 复制代码
[root@worker1 ~]# kubeadm join 192.168.1.101:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:54ff174d318f6dd6b7e357ba40f6364cfa4394c5bf1ed74a9d8fdab907fe1b19
[root@worker2 ~]# kubeadm join 192.168.1.101:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:54ff174d318f6dd6b7e357ba40f6364cfa4394c5bf1ed74a9d8fdab907fe1b19
[root@master ~]# kubectl get nodes
NAME      STATUS     ROLES           AGE   VERSION
master    NotReady   control-plane   19m   v1.31.2
worker1   NotReady   <none>          59s   v1.31.2
worker2   NotReady   <none>          52s   v1.31.2

这里发现状态都为NotReady,因为网络还没有通,现在安装calico网络插件

安装calico网络插件

calico.yaml文件地址:
calico/manifests/calico.yaml at master · projectcalico/calicohttps://github.com/projectcalico/calico/blob/master/manifests/calico.yaml这里我是采用的离线安装,把calico需要的镜像提前下载到了本地,进行导入。通过xftp进行上传

bash 复制代码
[root@master ~]# mkdir calico
[root@master ~]# ls calico/
calico.yaml  cni.tar  controller.tar  node.tar
#三个节点都进行镜像导入
[root@master calico]# ctr -n=k8s.io images import cni.tar && ctr -n=k8s.io images import node.tar && ctr -n=k8s.io images import controller.tar
[root@master calico]# ctr images list
REF                                      TYPE                                       DIGEST                                                                  SIZE      PLATFORMS   LABELS 
docker.io/calico/cni:master              application/vnd.oci.image.manifest.v1+json sha256:857793c712dacf42f3d4b7c0223ba1b6497098ad8270029346d62adafb00bd5b 209.5 MiB linux/amd64 -      
docker.io/calico/kube-controllers:master application/vnd.oci.image.manifest.v1+json sha256:a01cd6d582c44775bb1b1053fe05501d44afd79be66bc770ac9218a1dfbf31ea 78.6 MiB  linux/amd64 -      
docker.io/calico/node:master             application/vnd.oci.image.manifest.v1+json sha256:5e3d9dddee857783469742393998a84ee2497bdba98dd44424e28699f58e40ad 325.5 MiB linux/amd64 -      
[root@master calico]# kubectl apply -f calico.yaml 
[root@master calico]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS       AGE
calico-kube-controllers-7bcf789c97-9jx9j   1/1     Running   0              16m
calico-node-d6n68                          1/1     Running   0              16m
calico-node-dlnpr                          1/1     Running   0              16m
calico-node-t4kzf                          1/1     Running   0              16m
coredns-fcd6c9c4-fz4kz                     1/1     Running   0              55m
coredns-fcd6c9c4-r7qgx                     1/1     Running   0              55m
etcd-master                                1/1     Running   0              56m
kube-apiserver-master                      1/1     Running   0              55m
kube-controller-manager-master             1/1     Running   4 (109s ago)   55m
kube-proxy-2pgkw                           1/1     Running   0              37m
kube-proxy-px64d                           1/1     Running   0              37m
kube-proxy-tvxds                           1/1     Running   0              55m
kube-scheduler-master                      1/1     Running   3 (99s ago)    55m
[root@master calico]# kubectl get nodes
NAME      STATUS   ROLES           AGE   VERSION
master    Ready    control-plane   61m   v1.31.2
worker1   Ready    <none>          42m   v1.31.2
worker2   Ready    <none>          42m   v1.31.2

k8s命令补全

bash 复制代码
[root@master calico]# echo 'source /usr/share/bash-completion/bash_completion' >> ~/.bashrc
[root@master calico]# echo 'source  <(kubectl completion bash)' >> ~/.bashrc
[root@master calico]# source ~/.bashrc

以上操作就完成了K8s集群的搭建

相关推荐
一名路过的小码农1 小时前
ceph 18.2.4二次开发,docker镜像制作
ceph·docker·容器
AI_小站1 小时前
RAG 示例:使用 langchain、Redis、llama.cpp 构建一个 kubernetes 知识库问答
人工智能·程序人生·langchain·kubernetes·llama·知识库·rag
xiangshangdemayi4 小时前
Windows环境GeoServer打包Docker极速入门
windows·docker·容器·geoserver·打包·数据挂载
程序员JerrySUN4 小时前
熟悉的 Docker,陌生的 Podman
linux·docker·容器·系统架构·podman
长囧鹿5 小时前
云原生之k8s服务管理
云原生·容器·kubernetes
gobeyye5 小时前
Docker 用法详解
运维·docker·容器
雪碧聊技术7 小时前
Docker3:docker基础1
运维·docker·容器·docker常见命令
我要用代码向我喜欢的女孩表白9 小时前
k8s入门(不教部署,部署跟着文档来就行了)
云原生·容器·kubernetes
FLGB10 小时前
Docker 安装单机版mysql 并持久化数据
mysql·docker·容器