# 仅在Master节点执行
[root@k8s-master01 ~]# kubeadm init \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--apiserver-advertise-address=192.168.100.167 \
--image-repository=registry.aliyuncs.com/google_containers \
--kubernetes-version=v1.29.0
[init] Using Kubernetes version: v1.29.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.100.167:6443 --token kl70ra.gfbd42c8rcj2n6rs \
--discovery-token-ca-cert-hash sha256:462449dca061c14dcc97b00f34c1611ad80ed5bd8e099ad4da647db6589714ba
[root@k8s-master01 ~]#
# 此处我是管理员所以用这种方式
[root@k8s-master01 ~]# echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' > /etc/profile.d/k8s.sh
[root@k8s-master01 ~]# source /etc/profile.d/k8s.sh
[root@k8s-master01 ~]# echo $KUBECONFIG
/etc/kubernetes/admin.conf
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 NotReady control-plane 2m56s v1.29.15
[root@k8s-master01 ~]#
# 普通用户的配置,配置kubectl
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
# 保存join命令(后续Worker节点使用)
# 自动生成「工作节点(Worker Node)加入 Kubernetes 主节点(Master)」的完整命令,让你直接复制到工作节点执行,就能把节点加入集群。
kubeadm token create --print-join-command
# kubeadm token create:创建一个新的、有效的集群加入令牌(token)
✅ 主节点初始化时的默认 token 24 小时后会过期,过期就无法加入节点,必须重新生成。
# --print-join-command:直接打印出完整的加入命令(包含主节点地址、token、安全校验码)
3.4 部署网络插件(Calico)
bash复制代码
# 仅在Master节点执行
[root@k8s-master01 ~]# kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
serviceaccount/calico-cni-plugin created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpfilters.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrole.rbac.authorization.k8s.io/calico-cni-plugin created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-cni-plugin created
daemonset.apps/calico-node created
deployment.apps/calico-kube-controllers created
[root@k8s-master01 ~]#
# 验证Pod状态,状态必须都是 Running
[root@k8s-master01 ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-74d5f9d7bb-hj9qd 1/1 Running 0 3m32s 10.244.32.130 k8s-master01 <none> <none>
calico-node-lhpcq 1/1 Running 0 3m32s 192.168.100.167 k8s-master01 <none> <none>
coredns-857d9ff4c9-nzn7s 1/1 Running 0 8m52s 10.244.32.129 k8s-master01 <none> <none>
coredns-857d9ff4c9-swhdb 1/1 Running 0 8m52s 10.244.32.131 k8s-master01 <none> <none>
etcd-k8s-master01 1/1 Running 0 9m8s 192.168.100.167 k8s-master01 <none> <none>
kube-apiserver-k8s-master01 1/1 Running 0 9m8s 192.168.100.167 k8s-master01 <none> <none>
kube-controller-manager-k8s-master01 1/1 Running 0 9m8s 192.168.100.167 k8s-master01 <none> <none>
kube-proxy-76hvt 1/1 Running 0 8m53s 192.168.100.167 k8s-master01 <none> <none>
kube-scheduler-k8s-master01 1/1 Running 0 9m8s 192.168.100.167 k8s-master01 <none> <none>
[root@k8s-master01 ~]#
# 这时,你应该想到一个场景,如果在内网环境下,下载不了。只能找一台可以访问外网的机器,把所有需要的镜像下载下来,再传到内网环境里。
## 首先查看 kube-system 命名空间下运行的 Pod 所使用的镜像,这些镜像正是 worker 节点需要预加载的(尤其是 calico-node、kube-proxy、coredns 等)。
[root@k8s-master01 ~]# kubectl get pods -n kube-system -o jsonpath='{range .items[*]}{.spec.containers[*].image}{"\n"}{end}' | sort -u
docker.io/calico/kube-controllers:v3.26.0
docker.io/calico/node:v3.26.0
registry.aliyuncs.com/google_containers/coredns:v1.11.1
registry.aliyuncs.com/google_containers/etcd:3.5.16-0
registry.aliyuncs.com/google_containers/kube-apiserver:v1.29.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.29.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.29.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.29.0
## 进入 /calico 目录(您已创建),使用 ctr 导出所有镜像到一个 tar 文件。
## 请确保使用完整的镜像名称(包含仓库前缀),否则 ctr 会提示 not found。
[root@k8s-master01 ~]# cd /calico
[root@k8s-master01 calico]# ctr -n k8s.io image export all-images.tar \
docker.io/calico/kube-controllers:v3.26.0 \
docker.io/calico/node:v3.26.0 \
registry.aliyuncs.com/google_containers/coredns:v1.11.1 \
registry.aliyuncs.com/google_containers/etcd:3.5.16-0 \
registry.aliyuncs.com/google_containers/kube-apiserver:v1.29.0 \
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.29.0 \
registry.aliyuncs.com/google_containers/kube-proxy:v1.29.0 \
registry.aliyuncs.com/google_containers/kube-scheduler:v1.29.0
[root@k8s-master01 calico]# ls -l
总计 302632
-rw-r--r-- 1 root root 309894144 3月28日 12:57 all-images.tar
[root@k8s-master01 calico]#
# 传到所需的节点
# 导入所有镜像到 containerd
ctr -n k8s.io image import /root/all-images.tar
# 验证。
ctr -n k8s.io image ls | grep -E "calico|coredns|kube-proxy"
3.5 Worker节点加入集群
bash复制代码
# 在Worker1和Worker2节点执行(使用Master生成的join命令)
[root@k8s-worker1 ~]# kubeadm join 192.168.100.167:6443 --token kl70ra.gfbd42c8rcj2n6rs \
--discovery-token-ca-cert-hash sha256:462449dca061c14dcc97b00f34c1611ad80ed5bd8e099ad4da647db6589714ba
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s-worker1 ~]#
# 在Master节点验证
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane 25m v1.29.15
k8s-worker1 Ready <none> 4m32s v1.29.15
k8s-worker2 Ready <none> 4m26s v1.29.15
[root@k8s-master01 ~]#
# 如 worker节点也需要执行 kubectl get nodes,请在master执行以下命令
[root@k8s-master01 ~]# scp /etc/kubernetes/admin.conf k8s-worker1:/etc/kubernetes/
The authenticity of host 'k8s-worker1 (192.168.100.168)' can't be established.
ED25519 key fingerprint is SHA256:EdX1RSTbnmrAOzO+gVSS6cXdt0ty/8HykXTCxXAsPF8.
This key is not known by any other names.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added 'k8s-worker1' (ED25519) to the list of known hosts.
Authorized users only. All activities may be monitored and reported.
root@k8s-worker1's password:
admin.conf 100% 5659 2.4MB/s 00:00
[root@k8s-master01 ~]# scp /etc/kubernetes/admin.conf k8s-worker2:/etc/kubernetes/
The authenticity of host 'k8s-worker2 (192.168.100.169)' can't be established.
ED25519 key fingerprint is SHA256:R9clN3Zr9xVbGktc4L0jFVbTh03wrbspS3fESATKuKk.
This key is not known by any other names.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added 'k8s-worker2' (ED25519) to the list of known hosts.
Authorized users only. All activities may be monitored and reported.
root@k8s-worker2's password:
admin.conf 100% 5659 3.4MB/s 00:00
[root@k8s-master01 ~]# scp /etc/profile.d/k8s.sh k8s-worker1:/etc/profile.d/
Authorized users only. All activities may be monitored and reported.
root@k8s-worker1's password:
k8s.sh 100% 45 68.4KB/s 00:00
[root@k8s-master01 ~]# scp /etc/profile.d/k8s.sh k8s-worker2:/etc/profile.d/
Authorized users only. All activities may be monitored and reported.
root@k8s-worker2's password:
k8s.sh 100% 45 28.4KB/s 00:00
[root@k8s-master01 ~]#
# worker 节点
[root@k8s-worker1 ~]# bash
Welcome to 6.6.0-132.0.0.111.oe2403sp3.x86_64
System information as of time: 2026年 03月 28日 星期六 12:50:42 CST
System load: 0.00
Memory used: 13.0%
Swap used: 0.0%
Usage On: 5%
IP address: 192.168.100.168
IP address: 10.244.194.64
Users online: 1
[root@k8s-worker1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready control-plane 26m v1.29.15
k8s-worker1 Ready <none> 6m26s v1.29.15
k8s-worker2 Ready <none> 6m20s v1.29.15
[root@k8s-worker1 ~]#
# 在Master节点执行
kubectl create secret docker-registry harbor-secret \
--docker-server=192.168.100.167:8080 \
--docker-username=admin \
--docker-password=Harbor12345 \
--docker-email=admin@example.com \
-n default
# 查看密钥
[root@k8s-master01 ~]# kubectl get secret harbor-secret
NAME TYPE DATA AGE
harbor-secret kubernetes.io/dockerconfigjson 1 26s
[root@k8s-master01 ~]#
5.3 推送镜像到Harbor
bash复制代码
# 登录Harbor
[root@k8s-master01 ~]# docker login 192.168.100.167:8080 -u admin -p Harbor12345
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
[root@k8s-master01 ~]#
# 拉取测试镜像
[root@k8s-master01 ~]# docker pull nginx:latest
latest: Pulling from library/nginx
ec781dee3f47: Pull complete
bb3d0aa29654: Pull complete
510ddf6557d6: Pull complete
cde7a05ae428: Pull complete
587e3d84dbb5: Pull complete
3189680c601f: Pull complete
5e815e07e569: Pull complete
Digest: sha256:7150b3a39203cb5bee612ff4a9d18774f8c7caf6399d6e8985e97e28eb751c18
Status: Downloaded newer image for nginx:latest
docker.io/library/nginx:latest
[root@k8s-master01 ~]#
# 重新打标签
[root@k8s-master01 ~]# docker tag nginx:latest 192.168.100.167:8080/library/nginx:harbor-latest
[root@k8s-master01 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
nginx latest 0cf1d6af5ca7 3 days ago 161MB
192.168.100.167:8080/library/nginx harbor-latest 0cf1d6af5ca7 3 days ago 161MB
goharbor/redis-photon v2.11.0 184984d263c2 22 months ago 165MB
goharbor/harbor-registryctl v2.11.0 f1220f69df90 22 months ago 162MB
goharbor/registry-photon v2.11.0 95046ed33f52 22 months ago 84.5MB
goharbor/nginx-photon v2.11.0 681ba9915791 22 months ago 153MB
goharbor/harbor-log v2.11.0 a0a812a07568 22 months ago 163MB
goharbor/harbor-jobservice v2.11.0 bba862a3784a 22 months ago 159MB
goharbor/harbor-core v2.11.0 2cf11c05e0e2 22 months ago 185MB
goharbor/harbor-portal v2.11.0 ea8fda08df5b 22 months ago 162MB
goharbor/harbor-db v2.11.0 9bd788ea0df6 22 months ago 271MB
goharbor/prepare v2.11.0 2baf15fbf5e2 22 months ago 207MB
# 推送到Harbor
[root@k8s-master01 ~]# docker push 192.168.100.167:8080/library/nginx:harbor-latest
The push refers to repository [192.168.100.167:8080/library/nginx]
4e0a2a122e2f: Pushed
794b45c9a1a2: Pushed
190ba8fba6a7: Pushed
1e9759e65d38: Pushed
2c12d33655c1: Pushed
bcce8ea688d8: Pushed
188c9b34dfbe: Pushed
harbor-latest: digest: sha256:d5590adee87e29c44bc13bfae4492585c861b9893e60b48c86728bf179f5d096 size: 1778
[root@k8s-master01 ~]#
5.4 部署使用Harbor镜像的应用
bash复制代码
# 创建测试Deployment
cat > nginx-harbor.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-harbor
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: nginx-harbor
template:
metadata:
labels:
app: nginx-harbor
spec:
imagePullSecrets:
- name: harbor-secret
containers:
- name: nginx
image: 192.168.100.167:8080/library/nginx:harbor-latest
ports:
- containerPort: 80
EOF
# 部署应用
[root@k8s-master01 ~]# kubectl apply -f nginx-harbor.yaml
deployment.apps/nginx-harbor created
# 验证
[root@k8s-master01 ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-harbor-69b6479767-89gwh 1/1 Running 0 5s
nginx-harbor-69b6479767-t26w4 1/1 Running 0 5s
[root@k8s-master01 ~]#
[root@k8s-master01 ~]# kubectl describe pod nginx-harbor-69b6479767-89gwh
Name: nginx-harbor-69b6479767-89gwh
Namespace: default
Priority: 0
Service Account: default
Node: k8s-worker1/192.168.100.168
Start Time: Sat, 28 Mar 2026 15:23:48 +0800
Labels: app=nginx-harbor
pod-template-hash=69b6479767
Annotations: cni.projectcalico.org/containerID: 1322deb4a382703f13b9a97dc45f902afb93e3ef6757dfb7a3f5de8102b5cff5
cni.projectcalico.org/podIP: 10.244.194.69/32
cni.projectcalico.org/podIPs: 10.244.194.69/32
Status: Running
IP: 10.244.194.69
IPs:
IP: 10.244.194.69
...省略N
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 19s default-scheduler Successfully assigned default/nginx-harbor-69b6479767-89gwh to k8s-worker1
Normal Pulled 18s kubelet Container image "192.168.100.167:8080/library/nginx:harbor-latest" already present on machine
Normal Created 18s kubelet Created container: nginx
Normal Started 18s kubelet Started container nginx
[root@k8s-master01 ~]#
六、验证与测试
6.1 集群状态检查
bash复制代码
# 检查节点状态
kubectl get nodes
# 检查系统Pod
kubectl get pods -n kube-system
# 检查集群信息
[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.100.167:6443
CoreDNS is running at https://192.168.100.167:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
[root@k8s-master01 ~]#
# K8s 默认给 Master 节点加了污点,禁止调度普通 Pod,执行这条命令移除污点即可,pod就能运行在master 节点上。
kubectl taint nodes k8s-master01 node-role.kubernetes.io/control-plane-
6.2 Harbor功能测试
bash复制代码
# 检查Harbor容器状态
docker-compose ps -a
# 访问Harbor API
[root@k8s-master01 ~]# curl -u admin:Harbor12345 http://192.168.100.167:8080/api/v2.0/projects
[{"creation_time":"2026-03-28T05:55:17.609Z","current_user_role_id":1,"current_user_role_ids":[1],"cve_allowlist":{"creation_time":"0001-01-01T00:00:00.000Z","id":1,"items":[],"project_id":1,"update_time":"0001-01-01T00:00:00.000Z"},"metadata":{"public":"true"},"name":"library","owner_id":1,"owner_name":"admin","project_id":1,"repo_count":1,"update_time":"2026-03-28T05:55:17.609Z"}]
# 上面的显示太乱,看着不舒服,安装jq
[root@k8s-master01 ~]# yum install jq
[root@k8s-master01 ~]# curl -u admin:Harbor12345 http://192.168.100.167:8080/api/v2.0/projects | jq '.'
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 386 100 386 0 0 13367 0 --:--:-- --:--:-- --:--:-- 13785
[
{
"creation_time": "2026-03-28T05:55:17.609Z",
"current_user_role_id": 1,
"current_user_role_ids": [
1
],
"cve_allowlist": {
"creation_time": "0001-01-01T00:00:00.000Z",
"id": 1,
"items": [],
"project_id": 1,
"update_time": "0001-01-01T00:00:00.000Z"
},
"metadata": {
"public": "true"
},
"name": "library",
"owner_id": 1,
"owner_name": "admin",
"project_id": 1,
"repo_count": 1,
"update_time": "2026-03-28T05:55:17.609Z"
}
]
[root@k8s-master01 ~]#
# 或者用Linux自带的,优点:系统自带无需额外安装,适合快速查看
curl -u admin:Harbor12345 http://192.168.100.167:8080/api/v2.0/projects | python3 -m json.tool
# 查看镜像仓库
[root@k8s-master01 ~]# curl -u admin:Harbor12345 http://192.168.100.167:8080/api/v2.0/projects/library/repositories | python3 -m json.tool
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 167 100 167 0 0 12170 0 --:--:-- --:--:-- --:--:-- 12846
[
{
"artifact_count": 1,
"creation_time": "2026-03-28T06:57:28.416Z",
"id": 1,
"name": "library/nginx",
"project_id": 1,
"pull_count": 2,
"update_time": "2026-03-28T07:15:42.156Z"
}
]
[root@k8s-master01 ~]#
6.3 应用访问测试
bash复制代码
# 创建Service暴露应用
root@k8s-master01 ~]# kubectl expose deployment nginx-harbor --port=80 --type=NodePort
service/nginx-harbor exposed
[root@k8s-master01 ~]# kubectl get deployments.apps
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-harbor 2/2 2 2 8m46s
[root@k8s-master01 ~]#
# 查看Service
[root@k8s-master01 ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h8m
nginx-harbor NodePort 10.102.145.147 <none> 80:31250/TCP 10s
[root@k8s-master01 ~]#
# 访问应用(使用任意Worker节点IP + NodePort)
curl http://<worker-node-ip>:<node-port>
# 也可到浏览器,使用系统IP访问。
http://192.168.100.167:31250/