1 服务器初始化(所有服务器都需要)
1.1 apt源初始化
shell
复制代码
root@k8smaster232:~# cat /etc/apt/sources.list.d/ubuntu.sources
Types: deb
URIs: http://192.168.1.12:8081/repository/Ubuntu-Proxy/
Suites: noble noble-updates noble-backports
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg
Types: deb
URIs: http://192.168.1.12:8081/repository/Ubuntu-Proxy/
Suites: noble-security
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg
1.2 K8s源初始化
shell
复制代码
# 代理源使用的ali,这里添加阿里云的源密钥
root@k8smaster232:~# curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.34/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# 源内容
root@master233:~# cat /etc/apt/sources.list.d/kubernetes.list
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb/ /
# 源内容(无gpgkey,当使用nginx作为apt源时可以使用)
root@master233:~# cat /etc/apt/sources.list.d/kubernetes.list
deb [trusted=yes] http://192.168.1.12:8081/repository/Ubuntu-K8s noble main
1.3 确认源准备就绪
shell
复制代码
root@k8smaster232:~# apt update
Get:2 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb InRelease [1,186 B]
Hit:3 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble InRelease
Hit:4 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble-updates InRelease
Hit:5 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble-backports InRelease
Hit:6 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble-security InRelease
Get:7 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb Packages [4,405 B]
1.4 修改hosts映射
shell
复制代码
root@k8smaster230:~# cat >> /etc/hosts << EOF
192.168.1.230 k8smaster230
192.168.1.231 k8sslave231
192.168.1.232 k8sslave232
EOF
1.5 时间同步
shell
复制代码
root@k8smaster230:~# timedatectl set-timezone Asia/Shanghai
root@k8smaster230:~# apt install ntpdate -y
root@k8smaster230:~# ntpdate time1.aliyun.com
root@k8smaster230:~# crontab -e
0 0 * * * ntpdate time1.aliyun.com
1.6 禁用交换分区
shell
复制代码
root@k8smaster230:~# swapoff -a && sudo sed -i '/swap/s/^/#/' /etc/fstab
1.7 系统参数优化
shell
复制代码
root@k8smaster230:~# vi /etc/security/limits.conf
# 可打开的文件句柄最大数
* soft nofile 65535
* hard nofile 65535
# 单个用户可用的最大进程数
* soft nproc 8192
* hard nproc 8192
# 可打开的文件描述符的最大数,unlimited:无限制
* soft memlock unlimited
* hard memlock unlimited
root@k8smaster230:~# vim /etc/sysctl.conf
vm.max_map_count = 262144
root@k8smaster230:~# sysctl -p
1.8 调整内核参数
shell
复制代码
root@k8smaster230:~# cat << EOF |tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
overlay
br_netfilter
# 加载
root@k8smaster230:~# modprobe overlay
root@k8smaster230:~# modprobe br_netfilter
# 查看是否成功
root@k8smaster230:~# lsmod | egrep "overlay"
overlay 212992 0
root@k8smaster230:~# lsmod | egrep "br_netfilter"
br_netfilter 32768 0
bridge 421888 1 br_netfilter
1.9 内核支持转发
shell
复制代码
root@k8smaster230:~# cat << EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
# vm.swappiness=0
EOF
# 加载
root@k8smaster230:~# sysctl -p /etc/sysctl.d/k8s.conf
root@k8smaster230:~# sysctl --system
# 查看是否加载成功
root@k8smaster230:~# sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
1.10 安装ipset和ipvsadm
shell
复制代码
root@k8smaster230:~# apt install ipset ipvsadm -y
# 添加需要加载的模块
root@k8smaster230:~# cat<< EOF |tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF
# 创建加载模块的脚本文件
root@k8smaster230:~# cat << EOF |tee ipvs.sh
#!/bin/sh
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 执行脚本
root@k8smaster230:~# bash ipvs.sh
# 查看加载情况
root@k8smaster230:~# lsmod | grep ip_vs
ip_vs_sh 12288 0
ip_vs_wrr 12288 0
ip_vs_rr 12288 0
ip_vs 221184 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 196608 4 xt_conntrack,nf_nat,xt_MASQUERADE,ip_vs
nf_defrag_ipv6 24576 2 nf_conntrack,ip_vs
libcrc32c 12288 6 nf_conntrack,nf_nat,btrfs,nf_tables,raid456,ip_vs
2 安装Containerd(所有服务器都需要)
2.1 Containerd 2.0.x 版本
shell
复制代码
# 2.0.x版本修改方式
#root@k8smaster230:~# wget https://github.com/containerd/containerd/releases/download/v2.2.0/containerd-2.2.0-linux-amd64.tar.gz
root@k8smaster230:~# wget https://github.com/containerd/containerd/releases/download/v2.0.1/containerd-2.0.1-linux-amd64.tar.gz
root@k8smaster230:~# tar zxf containerd-2.0.1-linux-amd64.tar.gz
root@k8smaster230:~# mv bin/* /usr/bin/
#-------------------配置systemd接管服务--------------------------#
root@k8smaster230:~# vi /etc/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=999
[Install]
WantedBy=multi-user.target
#-------------------生成配置文件--------------------------#
root@k8smaster230:~# mkdir -p /etc/containerd
root@k8smaster230:~# containerd config default > /etc/containerd/config.toml
root@k8smaster230:~# vi /etc/containerd/config.toml
...
# 修改镜像仓库
#[plugins.'io.containerd.cri.v1.images'.pinned_images]
# sandbox = 'registry.k8s.io/pause:3.10'
[plugins.'io.containerd.cri.v1'.images.'pinned_images']
使用kubernetes容器镜像仓库
sandbox = 'registry.k8s.io/pause:3.10.1'
或
使用阿里云容器镜像仓库
sandbox = 'registry.aliyuncs.com/google_containers/pause:3.10.1'
或
使用本地私人容器镜像仓库Harbor
sandbox = '私服域名/google_containers/pause:3.10.1'
.....
# 在 containerd 2.0.0 中,镜像加速器的配置不再直接写在 config.toml 的 plugins 段中,而是通过 config_path 字段指定一个外部目录(通常为 /etc/containerd/certs.d/),并在该目录下为每个镜像仓库创建独立的配置文件。
[plugins.'io.containerd.cri.v1.images'.registry]
config_path = '/etc/containerd/certs.d'
#-------------------创建镜像加速器配置文件--------------------------#
# 在 /etc/containerd/certs.d/目录下,为每个需要加速的镜像仓库(如 docker.io)创建对应的 hosts.toml 文件。
root@k8smaster230:~# mkdir -p /etc/containerd/certs.d/docker.io
root@k8smaster230:~# vim /etc/containerd/certs.d/docker.io/hosts.toml
# 文件内容示例(以 DaoCloud 加速器为例):
server = "https://docker.io"
[host."https://docker.m.daocloud.io"]
capabilities = ["pull", "resolve"]
skip_verify = false # 若加速器使用自签名证书,设为 true
# 在创建私服地址镜像加速器配置文件
root@k8smaster230:~# mkdir -p /etc/containerd/certs.d/私服地址
root@k8smaster230:~# vim /etc/containerd/certs.d/私服地址/hosts.toml
server = "https://私服域名" # 仓库地址
[host."https://私服域名"] # 仓库的详细配置信息
capabilities = ["pull", "resolve"]
skip_verify = true # 若加速器使用自签名证书,设为 true,如果非安全仓库一定要跳过
# 非安全示例
#server = "http://docker.io"
#[host."http://docker.m.daocloud.io"]
# capabilities = ["pull", "resolve"]
# skip_verify = false # 若加速器使用自签名证书,设为 true
2.3 安装runc(所有服务器都需要)
2.3.1 下载runc
shell
复制代码
#root@k8smaster230:~# wget https://github.com/opencontainers/runc/releases/download/v1.3.3/runc.amd64
root@k8smaster230:~# wget https://github.com/opencontainers/runc/releases/download/v1.2.2/runc.amd64
2.3.2 安装runc并启动containerd
shell
复制代码
root@k8smaster230:~# chmod +x runc.amd64
root@k8smaster230:~# mv runc.amd64 /usr/sbin/runc
root@k8smaster230:~# runc --version
runc version 1.2.2
commit: v1.2.2-0-g7cb36325
spec: 1.2.0
go: go1.22.9
libseccomp: 2.5.5
root@k8smaster230:~# systemctl enable --now containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /etc/systemd/system/containerd.service.
root@k8smaster230:~# ll /var/run/containerd/
total 0
drwx--x--x 4 root root 120 Nov 12 17:07 ./
drwxr-xr-x 32 root root 940 Nov 12 17:07 ../
srw-rw---- 1 root root 0 Nov 12 17:07 containerd.sock=
srw-rw---- 1 root root 0 Nov 12 17:07 containerd.sock.ttrpc=
drwx--x--x 2 root root 40 Nov 12 17:07 io.containerd.runtime.v2.task/
drwx--x--x 2 root root 40 Nov 12 17:07 io.containerd.sandbox.controller.v1.shim/
2.4 K8s部署
2.4.1 安装kubectl,kubeadm,kubelet(所有服务器都需要)
shell
复制代码
# 查看安装包的情况
root@k8smaster230:~# apt-cache policy kubeadm kubectl kubelet
kubeadm:
Installed: 1.34.1-1.1
Candidate: 1.34.1-1.1
Version table:
*** 1.34.1-1.1 500
500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb Packages
100 /var/lib/dpkg/status
1.34.0-1.1 500
500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb Packages
kubectl:
Installed: 1.34.1-1.1
Candidate: 1.34.1-1.1
Version table:
*** 1.34.1-1.1 500
500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb Packages
100 /var/lib/dpkg/status
1.34.0-1.1 500
500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb Packages
kubelet:
Installed: 1.34.1-1.1
Candidate: 1.34.1-1.1
Version table:
*** 1.34.1-1.1 500
500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb Packages
100 /var/lib/dpkg/status
1.34.0-1.1 500
500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb Packages
root@k8smaster230:~# apt-get install -y kubelet=1.34.1-1.1 kubeadm=1.34.1-1.1 kubectl=1.34.1-1.1
# 锁定版本
root@k8smaster230:~# apt-mark hold kubelet kubeadm kubectl
# 解锁
root@k8smaster230:~# apt-mark unhold kubelet kubeadm kubectl
# 修改kubelet配置,使用systemd
root@k8smaster230:~# vi /etc/default/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
#KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/var/run/cri-dockerd.sock"
# 设置开机启动
root@k8smaster230:~# systemctl enable kubelet
2.4.2 生成k8s配置文件并修改(仅在Master上执行)
shell
复制代码
root@k8smaster230:~# kubeadm config print init-defaults > kubeadm-config.yaml
root@k8smaster230:~# vi kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
# 修改为MasterIP
advertiseAddress: 192.168.1.230
bindPort: 6443
nodeRegistration:
# 修改容器运行时为docker
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
imagePullSerial: true
# master节点主机名
name: k8smaster230
taints: null
timeouts:
controlPlaneComponentHealthCheck: 4m0s
discovery: 5m0s
etcdAPICall: 2m0s
kubeletHealthCheck: 4m0s
kubernetesAPICall: 1m0s
tlsBootstrap: 5m0s
upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
local:
dataDir: /var/lib/etcd
# 镜像仓库地址
imageRepository: 私服域名/google_containers
kind: ClusterConfiguration
# 修改k8s版本
kubernetesVersion: 1.34.1
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
# 增加pod网段
podSubnet: 10.244.0.0/16
proxy: {}
scheduler: {}
2.4.3 查看并下载镜像(仅在Master上执行)
shell
复制代码
# 查看私有镜像否存在
root@k8smaster230:~# kubeadm config images list --kubernetes-version=v1.34.1 --image-repository
私服域名/google_containers
私服域名/google_containers/kube-apiserver:v1.34.1
私服域名/google_containers/kube-controller-manager:v1.34.1
私服域名/google_containers/kube-scheduler:v1.34.1
私服域名/google_containers/kube-proxy:v1.34.1
私服域名/google_containers/coredns:v1.12.1
私服域名/google_containers/pause:3.10.1
私服域名/google_containers/etcd:3.6.4-0
# 下载镜像
root@k8smaster230:~# kubeadm config images pull --kubernetes-version=v1.34.1 --image-repository 私服域名/google_containers
root@k8smaster230:~# crictl images
WARN[0000] Config "/etc/crictl.yaml" does not exist, trying next: "/usr/bin/crictl.yaml"
WARN[0000] Image connect using default endpoints: [unix:///run/containerd/containerd.sock unix:///run/crio/crio.sock unix:///var/run/cri-dockerd.sock]. As the default settings are now deprecated, you should set the endpoint instead.
IMAGE TAG IMAGE ID SIZE
私服域名/google_containers/coredns v1.12.1 52546a367cc9e 22.4MB
私服域名/google_containers/etcd 3.6.4-0 5f1f5298c888d 74.3MB
私服域名/google_containers/kube-apiserver v1.34.1 c3994bc696102 27.1MB
私服域名/google_containers/kube-controller-manager v1.34.1 c80c8dbafe7dd 22.8MB
私服域名/google_containers/kube-proxy v1.34.1 fc25172553d79 26MB
私服域名/google_containers/kube-scheduler v1.34.1 7dd6aaa1717ab 17.4MB
私服域名/google_containers/pause 3.10.1 cd073f4c5f6a8 318kB
2.4.5 初始化K8s(仅在Master上执行)
shell
复制代码
# 初始化
root@k8smaster230:~# kubeadm init --config kubeadm-config.yaml --upload-certs --v=9
...
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.230:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:dd2cdf528b49b8465f27e4f040e5bfa3be3ab722e776c15a9c5c0d4265c412e8
...
# 执行kubeconfig中的命令
root@k8smaster230:~# mkdir -p $HOME/.kube
root@k8smaster230:~# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@k8smaster230:~# chown $(id -u):$(id -g) $HOME/.kube/config
2.4.5 从节点加入主节点(仅在Slave执行)
shell
复制代码
# 从节点加入
root@k8sslave231:~# kubeadm join 192.168.1.230:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:dd2cdf528b49b8465f27e4f040e5bfa3be3ab722e776c15a9c5c0d4265c412e8
[preflight] Running pre-flight checks
....
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
# 从节点加入
root@k8sslave232:~# kubeadm join 192.168.1.230:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:dd2cdf528b49b8465f27e4f040e5bfa3be3ab722e776c15a9c5c0d4265c412e8
[preflight] Running pre-flight checks
....
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
2.5 部署K8s网络插件Calico
2.5.1 下载文件
shell
复制代码
root@k8smaster230:~# wget https://raw.githubusercontent.com/projectcalico/calico/v3.31.0/manifests/tigera-operator.yaml
root@k8smaster230:~# wget https://raw.githubusercontent.com/projectcalico/calico/v3.31.0/manifests/custom-resources.yaml
2.5.2 修改tigera-operator.yaml文件
shell
复制代码
root@k8smaster230:~# vi tigera-operator.yaml
···
imagePullSecrets:
- imagesets.operator.tigera.io
- imagesets
image: 私服地址/tigera/operator:v1.40.0
imagePullPolicy: IfNotPresent
···
2.5.3 修改custom-resources.yaml文件
shell
复制代码
root@k8smaster230:~# vi custom-resources.yaml
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
name: default
spec:
# Configures Calico networking.
calicoNetwork:
ipPools:
- name: default-ipv4-ippool
blockSize: 26
cidr: 10.244.0.0/16 # 这里要注意修改为你再k8s部署时pod的网段
encapsulation: VXLANCrossSubnet
natOutgoing: Enabled
nodeSelector: all()
registry: 私服地址 # 使用私服镜像
---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
name: default
spec: {}
---
# Configures the Calico Goldmane flow aggregator.
apiVersion: operator.tigera.io/v1
kind: Goldmane
metadata:
name: default
---
# Configures the Calico Whisker observability UI.
apiVersion: operator.tigera.io/v1
kind: Whisker
metadata:
name: default
2.5.4 部署Calico
shell
复制代码
root@k8smaster230:~# kubectl create -f tigera-operator.yaml
namespace/tigera-operator created
serviceaccount/tigera-operator created
clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created
clusterrole.rbac.authorization.k8s.io/tigera-operator created
clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created
rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created
deployment.apps/tigera-operator created
root@k8smaster230:~# kubectl create -f custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
goldmane.operator.tigera.io/default created
whisker.operator.tigera.io/default created
root@k8smaster230:~# kubectl get pods -n calico-system
NAME READY STATUS RESTARTS AGE
calico-apiserver-64c9c9bfcb-5s5h2 1/1 Running 0 7m8s
calico-apiserver-64c9c9bfcb-x99pp 1/1 Running 0 7m8s
calico-kube-controllers-f976c8f55-l6nc7 1/1 Running 0 7m8s
calico-node-kkkwc 1/1 Running 0 7m8s
calico-node-rjcxg 1/1 Running 0 7m8s
calico-node-wf85m 1/1 Running 0 7m8s
calico-typha-7966b97589-lncnh 1/1 Running 0 7m2s
calico-typha-7966b97589-nnc5z 1/1 Running 0 7m8s
csi-node-driver-6h9hx 2/2 Running 0 7m8s
csi-node-driver-hndsc 2/2 Running 0 7m8s
csi-node-driver-trgtj 2/2 Running 0 7m8s
goldmane-84d5b8fbb5-wwnbl 1/1 Running 0 7m8s
whisker-547ff8b85b-hvqhk 2/2 Running 0 88s