Ubuntu24.04基于Docker部署K8s(使用私服部署)

最近开始正在学习和使用k8s,对几种安装方式都进行了测试,这次主要使用了离线镜像和软件源的方式来进行了安装。需要自己先对软件源和镜像源提前部署!

1.1 服务器初始化(所有服务器都需要)

1.1.1 apt源初始化

bash 复制代码
root@k8smaster232:~# cat /etc/apt/sources.list.d/ubuntu.sources
Types: deb
URIs: http://192.168.1.12:8081/repository/Ubuntu-Proxy/
Suites: noble noble-updates noble-backports
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg

Types: deb
URIs: http://192.168.1.12:8081/repository/Ubuntu-Proxy/
Suites: noble-security
Components: main restricted universe multiverse
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg

1.1.2 Docker源初始化

bash 复制代码
# 代理源使用的ali,这里添加阿里云的源密钥
root@k8smaster232:~# curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg 
# 源内容
root@k8smaster232:~# cat /etc/apt/sources.list.d/docker.list
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] http://192.168.1.12:8081/repository/Ubuntu-Docker noble stable

1.1.3 K8s源初始化

bash 复制代码
# 代理源使用的ali,这里添加阿里云的源密钥
root@k8smaster232:~# curl -fsSL https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.34/deb/Release.key | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
# 源内容
root@master233:~# cat /etc/apt/sources.list.d/kubernetes.list
deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb/ /

# 源内容(无gpgkey,当使用nginx作为apt源时可以使用)
root@master233:~# cat /etc/apt/sources.list.d/kubernetes.list
deb [trusted=yes] http://192.168.1.12:8081/repository/Ubuntu-K8s noble main

1.1.4 确认源准备就绪

bash 复制代码
root@k8smaster232:~# apt update
Hit:1 http://192.168.1.12:8081/repository/Ubuntu-Docker noble InRelease
Get:2 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  InRelease [1,186 B]
Hit:3 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble InRelease
Hit:4 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble-updates InRelease
Hit:5 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble-backports InRelease
Hit:6 http://192.168.1.12:8081/repository/Ubuntu-Proxy noble-security InRelease
Get:7 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  Packages [4,405 B]

1.1.5 安装Docker,修改配置

bash 复制代码
root@k8smaster232:~# apt install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin  ca-certificates curl gnupg   -y
root@k8smaster232:~# vi /etc/docker/daemon.json
# 修改配置文件
{
  "insecure-registries": ["https://私服域名"]
}
root@k8smaster232:~# systemctl restart docker

1.1.6 修改hosts映射

bash 复制代码
root@k8smaster230:~# cat >> /etc/hosts << EOF
192.168.1.230 k8smaster230
192.168.1.231 k8sslave231
192.168.1.232 k8sslave232
EOF

1.1.7 时间同步

bash 复制代码
root@k8smaster230:~# timedatectl set-timezone Asia/Shanghai

root@k8smaster230:~# apt install ntpdate -y
root@k8smaster230:~# ntpdate time1.aliyun.com

root@k8smaster230:~# crontab -e
0 0 * * * ntpdate time1.aliyun.com

1.1.8 禁用交换分区

bash 复制代码
root@k8smaster230:~# swapoff -a && sudo sed -i '/swap/s/^/#/' /etc/fstab

1.1.9 系统参数优化

bash 复制代码
root@k8smaster230:~# vi /etc/security/limits.conf
# 可打开的文件句柄最大数
* soft nofile 65535
* hard nofile 65535
# 单个用户可用的最大进程数
* soft nproc 8192
* hard nproc 8192
# 可打开的文件描述符的最大数,unlimited:无限制
* soft memlock unlimited
* hard memlock unlimited

root@k8smaster230:~# vim /etc/sysctl.conf
vm.max_map_count = 262144
root@k8smaster230:~# sysctl -p

1.1.10 调整内核参数

bash 复制代码
root@k8smaster230:~# cat << EOF |tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
overlay
br_netfilter

# 加载
root@k8smaster230:~# modprobe overlay
root@k8smaster230:~# modprobe br_netfilter

# 查看是否成功
root@k8smaster230:~# lsmod | egrep "overlay"
overlay               212992  0

root@k8smaster230:~# lsmod | egrep "br_netfilter"
br_netfilter           32768  0
bridge                421888  1 br_netfilter

1.1.11 内核支持转发

bash 复制代码
root@k8smaster230:~# cat << EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
# vm.swappiness=0
EOF
# 加载
root@k8smaster230:~# sysctl -p /etc/sysctl.d/k8s.conf
root@k8smaster230:~# sysctl --system
# 查看是否加载成功
root@k8smaster230:~# sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward

1.1.12 安装ipset和ipvsadm

bash 复制代码
root@k8smaster230:~# apt install ipset ipvsadm -y

# 添加需要加载的模块
root@k8smaster230:~# cat<< EOF |tee /etc/modules-load.d/ipvs.conf
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF

# 创建加载模块的脚本文件
root@k8smaster230:~# cat << EOF |tee ipvs.sh
#!/bin/sh
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
# 执行脚本
root@k8smaster230:~# bash ipvs.sh

# 查看加载情况
root@k8smaster230:~# lsmod | grep ip_vs
ip_vs_sh               12288  0
ip_vs_wrr              12288  0
ip_vs_rr               12288  0
ip_vs                 221184  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          196608  4 xt_conntrack,nf_nat,xt_MASQUERADE,ip_vs
nf_defrag_ipv6         24576  2 nf_conntrack,ip_vs
libcrc32c              12288  6 nf_conntrack,nf_nat,btrfs,nf_tables,raid456,ip_vs

1.1.13 安装cri-dockerd(二进制方式,目前2404不支持,使用1.1.14方式)

bash 复制代码
# 下载二进制文件
root@k8smaster230:~# wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.21/cri-dockerd-0.3.21.amd64.tgz
# 解压
root@k8smaster230:~# tar zxf cri-dockerd-0.3.21.amd64.tgz
# 安装cri
root@k8smaster230:~# install -o root -g root -m 0755 ./cri-dockerd/cri-dockerd /usr/local/bin/cri-dockerd
# 将cri交给systemcd管理,这里注意增加配置项 --pod-infra-container-image=registry.k8s.io/pause:3.9,可以修改为私服地址,软件的位置也要和实际安装位置对应
root@k8smaster230:~# vi /etc/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify

ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image=私服域名/google_containers/pause:3.10.1 --container-runtime-endpoint fd://
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

# 将cri交给systemcd管理
root@k8smaster230:~# vi /etc/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target

# 加载并启动
root@k8smaster230:~# systemctl daemon-reload
root@k8smaster230:~# systemctl enable --now cri-docker.socket
# 查看是否运行
root@k8smaster230:~# ll /var/run/*.sock
srw-rw---- 1 root docker 0 Nov 12 11:39 /var/run/cri-dockerd.sock=
srw-rw---- 1 root docker 0 Nov 11 16:57 /var/run/docker.sock=

1.1.14 安装cri-dockerd(自行编译,可正常使用)

bash 复制代码
# 下载配置go环境
root@k8smaster230:~# wget https://go.dev/dl/go1.25.4.linux-amd64.tar.gz
root@k8smaster230:~# tar -C /usr/local -xzf go1.25.4.linux-amd64.tar.gz
root@k8smaster230:~# vim /etc/profile
export PATH=$PATH:/usr/local/go/bin
root@k8smaster230:~# source /etc/profile
root@k8smaster230:~# go version 
go1.23.3 linux/amd64
# 编译项目
root@k8smaster230:~# git clone https://github.com/Mirantis/cri-dockerd.git
root@k8smaster230:~# cd cri-dockerd
root@k8smaster230:~# apt install make
root@k8smaster230:~# make cri-dockerd
root@k8smaster230:~# install -o root -g root -m 0755 cri-dockerd /usr/local/bin/cri-dockerd
root@k8smaster230:~# install packaging/systemd/* /etc/systemd/system
root@k8smaster230:~# sed -i -e 's,/usr/bin/cri-dockerd,/usr/local/bin/cri-dockerd,' /etc/systemd/system/cri-docker.service
# 修改cri-docker.service中的源
root@k8smaster232:~/cri-dockerd# vi /etc/systemd/system/cri-docker.service
ExecStart=/usr/local/bin/cri-dockerd --pod-infra-container-image=私服域名/google_containers/pause:3.10.1 --container-runtime-endpoint fd://
root@k8smaster230:~# chmod -x /etc/systemd/system/cri-docker.service
root@k8smaster230:~# chmod -x /etc/systemd/system/cri-docker.socket
root@k8smaster230:~# systemctl daemon-reload
root@k8smaster230:~# systemctl enable --now cri-docker

1.1.15 安装kubectl,kubeadm,kubelet

bash 复制代码
# 查看安装包的情况
root@k8smaster230:~# apt-cache policy kubeadm kubectl kubelet
kubeadm:
  Installed: 1.34.1-1.1
  Candidate: 1.34.1-1.1
  Version table:
 *** 1.34.1-1.1 500
        500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  Packages
        100 /var/lib/dpkg/status
     1.34.0-1.1 500
        500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  Packages
kubectl:
  Installed: 1.34.1-1.1
  Candidate: 1.34.1-1.1
  Version table:
 *** 1.34.1-1.1 500
        500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  Packages
        100 /var/lib/dpkg/status
     1.34.0-1.1 500
        500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  Packages
kubelet:
  Installed: 1.34.1-1.1
  Candidate: 1.34.1-1.1
  Version table:
 *** 1.34.1-1.1 500
        500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  Packages
        100 /var/lib/dpkg/status
     1.34.0-1.1 500
        500 http://192.168.1.12:8081/repository/Ubuntu-K8s/v1.34/deb  Packages

root@k8smaster230:~# apt-get install -y kubelet=1.34.1-1.1 kubeadm=1.34.1-1.1 kubectl=1.34.1-1.1
# 锁定版本
root@k8smaster230:~# apt-mark hold kubelet kubeadm kubectl
# 解锁
root@k8smaster230:~# apt-mark unhold kubelet kubeadm kubectl
# 修改kubelet配置,使用systemd
root@k8smaster230:~# vi /etc/default/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
#KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/var/run/cri-dockerd.sock"
# 设置开机启动
root@k8smaster230:~# systemctl enable kubelet

1.2 K8s部署

1.2.1 生成k8s配置文件并修改(仅在Master上执行)

bash 复制代码
root@k8smaster230:~# kubeadm config print init-defaults > kubeadm-config.yaml
root@k8smaster230:~# vi kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta4
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  # 修改为MasterIP
  advertiseAddress: 192.168.1.230
  bindPort: 6443
nodeRegistration:
  # 修改容器运行时为docker
  criSocket: unix:///var/run/cri-dockerd.sock
  imagePullPolicy: IfNotPresent
  imagePullSerial: true
  # master节点主机名
  name: k8smaster230
  taints: null
timeouts:
  controlPlaneComponentHealthCheck: 4m0s
  discovery: 5m0s
  etcdAPICall: 2m0s
  kubeletHealthCheck: 4m0s
  kubernetesAPICall: 1m0s
  tlsBootstrap: 5m0s
  upgradeManifests: 5m0s
---
apiServer: {}
apiVersion: kubeadm.k8s.io/v1beta4
caCertificateValidityPeriod: 87600h0m0s
certificateValidityPeriod: 8760h0m0s
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
encryptionAlgorithm: RSA-2048
etcd:
  local:
    dataDir: /var/lib/etcd
# 镜像仓库地址
imageRepository: 私服域名/google_containers
kind: ClusterConfiguration
# 修改k8s版本
kubernetesVersion: 1.34.1
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  # 增加pod网段
  podSubnet: 10.244.0.0/16
proxy: {}
scheduler: {}

1.2.2 查看并下载镜像(仅在Master上执行)

bash 复制代码
# 查看私有镜像否存在
root@k8smaster230:~# kubeadm config images list --kubernetes-version=v1.34.1 --image-repository 
私服域名/google_containers
私服域名/google_containers/kube-apiserver:v1.34.1
私服域名/google_containers/kube-controller-manager:v1.34.1
私服域名/google_containers/kube-scheduler:v1.34.1
私服域名/google_containers/kube-proxy:v1.34.1
私服域名/google_containers/coredns:v1.12.1
私服域名/google_containers/pause:3.10.1
私服域名/google_containers/etcd:3.6.4-0
# 下载镜像
root@k8smaster230:~# kubeadm config images pull --kubernetes-version=v1.34.1 --image-repository harbor.muscledog.top/google_containers --cri-socket unix:///var/run/cri-dockerd.sock

1.2.3 初始化K8s(仅在Master上执行)

bash 复制代码
# 初始化
root@k8smaster230:~# kubeadm init --config kubeadm-config.yaml --upload-certs --v=9
...
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.1.230:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:6938788da15217dffd8aa82c1f058da6681a323e26ccb3f7180679b56ad919ae
...
# 执行kubeconfig中的命令
root@k8smaster232:~# mkdir -p $HOME/.kube
root@k8smaster232:~# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@k8smaster232:~# chown $(id -u):$(id -g) $HOME/.kube/config

1.2.4 从节点加入主节点(仅在Slave执行)

bash 复制代码
# 从节点加入
root@k8sslave231:~# kubeadm join 192.168.1.230:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:6938788da15217dffd8aa82c1f058da6681a323e26ccb3f7180679b56ad919ae --cri-socket unix:///var/run/cri-dockerd.sock
[preflight] Running pre-flight checks
....
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
# 从节点加入
root@k8sslave232:~# kubeadm join 192.168.1.230:6443 --token abcdef.0123456789abcdef \
        --discovery-token-ca-cert-hash sha256:6938788da15217dffd8aa82c1f058da6681a323e26ccb3f7180679b56ad919ae --cri-socket unix:///var/run/cri-dockerd.sock
[preflight] Running pre-flight checks
....
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

1.3 部署K8s网络插件Calico

1.3.1 下载文件

bash 复制代码
root@k8smaster232:~# wget https://raw.githubusercontent.com/projectcalico/calico/v3.31.0/manifests/tigera-operator.yaml
root@k8smaster232:~# wget https://raw.githubusercontent.com/projectcalico/calico/v3.31.0/manifests/custom-resources.yaml

1.3.2 修改tigera-operator.yaml文件

bash 复制代码
root@k8smaster232:~# vi tigera-operator.yaml
···
imagePullSecrets:
      - imagesets.operator.tigera.io
      - imagesets
          image: 私服域名/tigera/operator:v1.40.0
          imagePullPolicy: IfNotPresent
···

1.3.3 修改custom-resources.yaml文件

bash 复制代码
root@k8smaster232:~# vi custom-resources.yaml
# This section includes base Calico installation configuration.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.Installation
apiVersion: operator.tigera.io/v1
kind: Installation
metadata:
  name: default
spec:
  # Configures Calico networking.
  calicoNetwork:
    ipPools:
      - name: default-ipv4-ippool
        blockSize: 26
        cidr: 10.244.0.0/16 # 这里要注意修改为你再k8s部署时pod的网段
        encapsulation: VXLANCrossSubnet
        natOutgoing: Enabled
        nodeSelector: all()
  registry: 私服域名 # 使用私服镜像

---
# This section configures the Calico API server.
# For more information, see: https://docs.tigera.io/calico/latest/reference/installation/api#operator.tigera.io/v1.APIServer
apiVersion: operator.tigera.io/v1
kind: APIServer
metadata:
  name: default
spec: {}

---
# Configures the Calico Goldmane flow aggregator.
apiVersion: operator.tigera.io/v1
kind: Goldmane
metadata:
  name: default

---
# Configures the Calico Whisker observability UI.
apiVersion: operator.tigera.io/v1
kind: Whisker
metadata:
  name: default

1.3.4 部署Calico

bash 复制代码
root@k8smaster232:~# kubectl create -f tigera-operator.yaml
namespace/tigera-operator created
serviceaccount/tigera-operator created
clusterrole.rbac.authorization.k8s.io/tigera-operator-secrets created
clusterrole.rbac.authorization.k8s.io/tigera-operator created
clusterrolebinding.rbac.authorization.k8s.io/tigera-operator created
rolebinding.rbac.authorization.k8s.io/tigera-operator-secrets created
deployment.apps/tigera-operator created
root@k8smaster232:~# kubectl create -f custom-resources.yaml
installation.operator.tigera.io/default created
apiserver.operator.tigera.io/default created
goldmane.operator.tigera.io/default created
whisker.operator.tigera.io/default created

root@k8smaster232:~# kubectl get pods -n calico-system
NAME                                      READY   STATUS    RESTARTS   AGE
calico-apiserver-64c9c9bfcb-5s5h2         1/1     Running   0          7m8s
calico-apiserver-64c9c9bfcb-x99pp         1/1     Running   0          7m8s
calico-kube-controllers-f976c8f55-l6nc7   1/1     Running   0          7m8s
calico-node-kkkwc                         1/1     Running   0          7m8s
calico-node-rjcxg                         1/1     Running   0          7m8s
calico-node-wf85m                         1/1     Running   0          7m8s
calico-typha-7966b97589-lncnh             1/1     Running   0          7m2s
calico-typha-7966b97589-nnc5z             1/1     Running   0          7m8s
csi-node-driver-6h9hx                     2/2     Running   0          7m8s
csi-node-driver-hndsc                     2/2     Running   0          7m8s
csi-node-driver-trgtj                     2/2     Running   0          7m8s
goldmane-84d5b8fbb5-wwnbl                 1/1     Running   0          7m8s
whisker-547ff8b85b-hvqhk                  2/2     Running   0          88s
相关推荐
zxnbmk1 分钟前
【7】Kubernetes存储(本章知识密度较高,仅浅浅了解后续详解)
linux·云原生·容器·kubernetes
叫致寒吧1 分钟前
pod详解
云原生·kubernetes
水上冰石31 分钟前
查看k8s下Jenkins的插件在宿主机的路径
容器·kubernetes·jenkins
孤岛悬城33 分钟前
58 k8s之pod
云原生·容器·kubernetes
可爱又迷人的反派角色“yang”38 分钟前
k8s(五)
linux·运维·docker·云原生·容器·kubernetes
oMcLin39 分钟前
如何在Ubuntu 22.10上通过配置K3s轻量级Kubernetes集群,提升边缘计算环境的资源管理能力?
ubuntu·kubernetes·边缘计算
Bin Watson1 小时前
Ubuntu安装Docker记录(基于阿里云)
ubuntu·阿里云·docker
optimistic_chen1 小时前
【Docker入门】容器技术
linux·运维·服务器·docker·容器
小明_GLC1 小时前
理解Docker、镜像Images、容器Container
docker·容器
努力搬砖的咸鱼1 小时前
用 Docker 部署你的第一个微服务
docker·微服务·云原生·容器