Ubuntu 22.04 搭建 KubeSphere 3.4.1 集群

参考资料

系统版本 Ubuntu 22.04
⚠️ 存储空间不够可能安装失败 ⚠️

集群规划

host ip
k8s-master 192.168.8.81
k8s-slaver-01 192.168.8.82
k8s-slaver-02 192.168.8.83

基础环境配置

所有节点都要操作

bash 复制代码
# 各个机器设置hostname
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-slaver-01
hostnamectl set-hostname k8s-slaver-02

# 设置 host
cat>>/etc/hosts<<-eof
192.168.8.81 k8s-master
192.168.8.82 k8s-slave-01
192.168.8.83 k8s-slave-02
eof

# 依赖安装
apt-get install -y openssh-server openssh-client vim net-tools curl socat conntrack ebtables ipset ipvsadm policycoreutils lrzsz

# 关闭 ufw
systemctl disable ufw
 
# 关闭 swap
sed -ri 's/.*swap.*/#&/' /etc/fstab
 
# 检查selinux关闭状态
sestatus
 
# 配置ssh
root@home:~# cd
root@home:~# ssh-keygen -t rsa -P ""
root@home:~# cat .ssh/id_rsa.pub >> .ssh/authorized_keys

# 重启
reboot

Docker 安装

安装

所有节点都要操作

bash 复制代码
curl -fsSL https://test.docker.com -o test-docker.sh && bash test-docker.sh

配置镜像加速

所有节点都要操作

对于使用 systemd 的系统,请在 /etc/docker/daemon.json 中写入如下内容(如果文件不存在请新建该文件):

json 复制代码
{
  "registry-mirrors": [
    "https://r2szmn6d.mirror.aliyuncs.com",
    "https://registry.docker-cn.com",
    "https://docker.mirrors.ustc.edu.cn",
    "https://mirror.ccs.tencentyun.com",
    "http://hub-mirror.c.163.com"
  ]
}

重启 Docker

bash 复制代码
systemctl restart docker

Docker Compose 安装

所有节点都要操作

文档

bash 复制代码
# 下载
curl -SL https://github.com/docker/compose/releases/download/v2.26.1/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose

# 添加执行权限
chmod +x /usr/local/bin/docker-compose

# 创建软链接
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose

KubeSphere

k8s-master 节点操作

KubeSphere 官方文档: 安装(Linux) | 使用手册

国内环境设置

bash 复制代码
echo 'export KKZONE=cn' >> ~/.bashrc
source ~/.bashrc

下载

bash 复制代码
# 下载KK
curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.13 sh -

# 添加可执行权限
chmod +x kk

编辑配置文件

bash 复制代码
# 生成配置文件
./kk create cluster --with-kubernetes v1.22.12 --with-kubesphere v3.4.1

# 修改配置文件
vim config-sample.yaml

按集群规划修改 config-sample.yaml 配置

yaml 复制代码
apiVersion: kubekey.kubesphere.io/v1alpha1
kind: Cluster
metadata:
  name: sample
spec:
  hosts:
  - {name: k8s-master, address: 192.168.8.81, internalAddress: 192.168.8.81, user: root, password: "123456"}
  - {name: k8s-slaver-01, address: 192.168.8.82, internalAddress: 192.168.8.82, user: root, password: "123456"}
  - {name: k8s-slaver-02, address: 192.168.8.83, internalAddress: 192.168.8.83, user: root, password: "123456"}
  roleGroups:
    etcd:
    - k8s-master
    master: 
    - k8s-master
    worker:
    - k8s-slaver-01
    - k8s-slaver-02
  controlPlaneEndpoint:
    domain: lb.kubesphere.local
    address: ""
    port: 6443
  kubernetes:
    version: v1.20.4
    imageRepo: kubesphere
    clusterName: cluster.local
  network:
    plugin: calico
    kubePodsCIDR: 10.233.64.0/18
    kubeServiceCIDR: 10.233.0.0/18
  registry:
    registryMirrors: []
    insecureRegistries: []
  addons: []
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
  name: ks-installer
  namespace: kubesphere-system
  labels:
    version: v3.1.1
spec:
  persistence:
    storageClass: ""       
  authentication:
    jwtSecret: ""
  zone: ""
  local_registry: ""        
  etcd:
    monitoring: false      
    endpointIps: localhost  
    port: 2379             
    tlsEnable: true
  common:
    redis:
      enabled: false
    redisVolumSize: 2Gi 
    openldap:
      enabled: false
    openldapVolumeSize: 2Gi  
    minioVolumeSize: 20Gi
    monitoring:
      endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
    es:  
      elasticsearchMasterVolumeSize: 4Gi   
      elasticsearchDataVolumeSize: 20Gi   
      logMaxAge: 7          
      elkPrefix: logstash
      basicAuth:
        enabled: false
        username: ""
        password: ""
      externalElasticsearchUrl: ""
      externalElasticsearchPort: ""  
  console:
    enableMultiLogin: true 
    port: 30880
  alerting:       
    enabled: false
    # thanosruler:
    #   replicas: 1
    #   resources: {}
  auditing:    
    enabled: false
  devops:           
    enabled: false
    jenkinsMemoryLim: 2Gi     
    jenkinsMemoryReq: 1500Mi 
    jenkinsVolumeSize: 8Gi   
    jenkinsJavaOpts_Xms: 512m  
    jenkinsJavaOpts_Xmx: 512m
    jenkinsJavaOpts_MaxRAM: 2g
  events:          
    enabled: false
    ruler:
      enabled: true
      replicas: 2
  logging:         
    enabled: false
    logsidecar:
      enabled: true
      replicas: 2
  metrics_server:             
    enabled: false
  monitoring:
    storageClass: ""
    prometheusMemoryRequest: 400Mi  
    prometheusVolumeSize: 20Gi  
  multicluster:
    clusterRole: none 
  network:
    networkpolicy:
      enabled: false
    ippool:
      type: none
    topology:
      type: none
  openpitrix:
    store:
      enabled: false
  servicemesh:    
    enabled: false  
  kubeedge:
    enabled: false
    cloudCore:
      nodeSelector: {"node-role.kubernetes.io/worker": ""}
      tolerations: []
      cloudhubPort: "10000"
      cloudhubQuicPort: "10001"
      cloudhubHttpsPort: "10002"
      cloudstreamPort: "10003"
      tunnelPort: "10004"
      cloudHub:
        advertiseAddress: 
          - ""           
        nodeLimit: "100"
      service:
        cloudhubNodePort: "30000"
        cloudhubQuicNodePort: "30001"
        cloudhubHttpsNodePort: "30002"
        cloudstreamNodePort: "30003"
        tunnelNodePort: "30004"
    edgeWatcher:
      nodeSelector: {"node-role.kubernetes.io/worker": ""}
      tolerations: []
      edgeWatcherAgent:
        nodeSelector: {"node-role.kubernetes.io/worker": ""}
        tolerations: []

安装

因环境差异,耗时较长,请耐心等待!

bash 复制代码
# 开始安装
./kk create cluster -f config-sample.yaml

安装日志如下

bash 复制代码
root@k8s-master:~# ./kk create cluster -f config-sample.yaml
+---------------+------+------+---------+----------+-------+-------+-----------+--------+------------+-------------+------------------+--------------+
| name          | sudo | curl | openssl | ebtables | socat | ipset | conntrack | docker | nfs client | ceph client | glusterfs client | time         |
+---------------+------+------+---------+----------+-------+-------+-----------+--------+------------+-------------+------------------+--------------+
| k8s-master    | y    | y    | y       | y        | y     | y     | y         | 26.1.3 |            |             |                  | CST 11:17:22 |
| k8s-slaver-02 | y    | y    | y       | y        | y     | y     | y         | 26.1.3 |            |             |                  | CST 11:17:22 |
| k8s-slaver-01 | y    | y    | y       | y        | y     | y     | y         | 26.1.3 |            |             |                  | CST 11:17:22 |
+---------------+------+------+---------+----------+-------+-------+-----------+--------+------------+-------------+------------------+--------------+

This is a simple check of your environment.
Before installation, you should ensure that your machines meet all requirements specified at
https://github.com/kubesphere/kubekey#requirements-and-recommendations

Continue this installation? [yes/no]: yes
INFO[11:18:35 CST] Downloading Installation Files
INFO[11:18:35 CST] Downloading kubeadm ...
INFO[11:18:35 CST] Downloading kubelet ...
INFO[11:18:37 CST] Downloading kubectl ...
INFO[11:18:38 CST] Downloading helm ...
INFO[11:18:38 CST] Downloading kubecni ...
INFO[11:18:38 CST] Configuring operating system ...
[k8s-slaver-02 192.168.8.83] MSG:
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
vm.swappiness = 1
fs.inotify.max_user_instances = 524288
[k8s-master 192.168.8.81] MSG:
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
vm.swappiness = 1
fs.inotify.max_user_instances = 524288
[k8s-slaver-01 192.168.8.82] MSG:
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-arptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_local_reserved_ports = 30000-32767
vm.max_map_count = 262144
vm.swappiness = 1
fs.inotify.max_user_instances = 524288
INFO[11:18:41 CST] Installing docker ...
INFO[11:18:41 CST] Start to download images on all nodes
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/etcd:v3.4.13
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.4
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.4
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pause:3.2
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-apiserver:v1.20.4
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controller-manager:v1.20.4
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.16.3
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-scheduler:v1.20.4
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-proxy:v1.20.4
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.16.3
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.16.3
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.16.3
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/coredns:1.6.9
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/k8s-dns-node-cache:1.15.12
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/kube-controllers:v3.16.3
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.16.3
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.16.3
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/cni:v3.16.3
[k8s-slaver-02] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.16.3
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/node:v3.16.3
[k8s-slaver-01] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.16.3
[k8s-master] Downloading image: registry.cn-beijing.aliyuncs.com/kubesphereio/pod2daemon-flexvol:v3.16.3
INFO[11:22:23 CST] Generating etcd certs
INFO[11:22:24 CST] Synchronizing etcd certs
INFO[11:22:24 CST] Creating etcd service
[k8s-master 192.168.8.81] MSG:
etcd will be installed
INFO[11:22:26 CST] Starting etcd cluster
[k8s-master 192.168.8.81] MSG:
Configuration file will be created
INFO[11:22:26 CST] Refreshing etcd configuration
[k8s-master 192.168.8.81] MSG:
Created symlink /etc/systemd/system/multi-user.target.wants/etcd.service → /etc/systemd/system/etcd.service.
Waiting for etcd to start
INFO[11:22:32 CST] Backup etcd data regularly
INFO[11:22:39 CST] Get cluster status
[k8s-master 192.168.8.81] MSG:
Cluster will be created.
INFO[11:22:39 CST] Installing kube binaries
Push /root/kubekey/v1.20.4/amd64/kubeadm to 192.168.8.81:/tmp/kubekey/kubeadm   Done
Push /root/kubekey/v1.20.4/amd64/kubeadm to 192.168.8.83:/tmp/kubekey/kubeadm   Done
Push /root/kubekey/v1.20.4/amd64/kubeadm to 192.168.8.82:/tmp/kubekey/kubeadm   Done
Push /root/kubekey/v1.20.4/amd64/kubelet to 192.168.8.81:/tmp/kubekey/kubelet   Done
Push /root/kubekey/v1.20.4/amd64/kubectl to 192.168.8.81:/tmp/kubekey/kubectl   Done
Push /root/kubekey/v1.20.4/amd64/helm to 192.168.8.81:/tmp/kubekey/helm   Done
Push /root/kubekey/v1.20.4/amd64/cni-plugins-linux-amd64-v0.8.6.tgz to 192.168.8.81:/tmp/kubekey/cni-plugins-linux-amd64-v0.8.6.tgz   Done
Push /root/kubekey/v1.20.4/amd64/kubelet to 192.168.8.83:/tmp/kubekey/kubelet   Done
Push /root/kubekey/v1.20.4/amd64/kubelet to 192.168.8.82:/tmp/kubekey/kubelet   Done
Push /root/kubekey/v1.20.4/amd64/kubectl to 192.168.8.83:/tmp/kubekey/kubectl   Done
Push /root/kubekey/v1.20.4/amd64/kubectl to 192.168.8.82:/tmp/kubekey/kubectl   Done
Push /root/kubekey/v1.20.4/amd64/helm to 192.168.8.83:/tmp/kubekey/helm   Done
Push /root/kubekey/v1.20.4/amd64/helm to 192.168.8.82:/tmp/kubekey/helm   Done
Push /root/kubekey/v1.20.4/amd64/cni-plugins-linux-amd64-v0.8.6.tgz to 192.168.8.83:/tmp/kubekey/cni-plugins-linux-amd64-v0.8.6.tgz   Done
Push /root/kubekey/v1.20.4/amd64/cni-plugins-linux-amd64-v0.8.6.tgz to 192.168.8.82:/tmp/kubekey/cni-plugins-linux-amd64-v0.8.6.tgz   Done
INFO[11:22:54 CST] Initializing kubernetes cluster
[k8s-master 192.168.8.81] MSG:
W0530 11:22:54.689097    7688 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10]
[init] Using Kubernetes version: v1.20.4
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 26.1.3. Latest validated version: 19.03
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master k8s-master.cluster.local k8s-slaver-01 k8s-slaver-01.cluster.local k8s-slaver-02 k8s-slaver-02.cluster.local kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local lb.kubesphere.local localhost] and IPs [10.233.0.1 192.168.8.81 127.0.0.1 192.168.8.82 192.168.8.83]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] External etcd mode: Skipping etcd/ca certificate authority generation
[certs] External etcd mode: Skipping etcd/server certificate generation
[certs] External etcd mode: Skipping etcd/peer certificate generation
[certs] External etcd mode: Skipping etcd/healthcheck-client certificate generation
[certs] External etcd mode: Skipping apiserver-etcd-client certificate generation
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 54.002627 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.20" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the labels "node-role.kubernetes.io/master=''" and "node-role.kubernetes.io/control-plane='' (deprecated)"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: vjk7sq.fclgighxpol7y0jp
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown (id−u):(id -u):(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:

  kubeadm join lb.kubesphere.local:6443 --token vjk7sq.fclgighxpol7y0jp \
    --discovery-token-ca-cert-hash sha256:a883cf70aff5075c1eaa81a73024e5e5e26db2e0f86ddecb1db9a9ca991cf879 \
    --control-plane

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join lb.kubesphere.local:6443 --token vjk7sq.fclgighxpol7y0jp \
    --discovery-token-ca-cert-hash sha256:a883cf70aff5075c1eaa81a73024e5e5e26db2e0f86ddecb1db9a9ca991cf879
[k8s-master 192.168.8.81] MSG:
service "kube-dns" deleted
[k8s-master 192.168.8.81] MSG:
service/coredns created
[k8s-master 192.168.8.81] MSG:
serviceaccount/nodelocaldns created
daemonset.apps/nodelocaldns created
[k8s-master 192.168.8.81] MSG:
configmap/nodelocaldns created
[k8s-master 192.168.8.81] MSG:
I0530 11:24:18.748859    9555 version.go:254] remote version is much newer: v1.30.1; falling back to: stable-1.20
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
a731ae68c896896eda6b3b5291fd0626d20caf72db09ccb7fab49606d4734b4e
[k8s-master 192.168.8.81] MSG:
secret/kubeadm-certs patched
[k8s-master 192.168.8.81] MSG:
secret/kubeadm-certs patched
[k8s-master 192.168.8.81] MSG:
secret/kubeadm-certs patched
[k8s-master 192.168.8.81] MSG:
kubeadm join lb.kubesphere.local:6443 --token t2f29w.scvsu83d9l2552ax     --discovery-token-ca-cert-hash sha256:a883cf70aff5075c1eaa81a73024e5e5e26db2e0f86ddecb1db9a9ca991cf879
[k8s-master 192.168.8.81] MSG:
k8s-master   v1.20.4   [map[address:192.168.8.81 type:InternalIP] map[address:k8s-master type:Hostname]]
INFO[11:24:20 CST] Joining nodes to cluster
[k8s-slaver-02 192.168.8.83] MSG:
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 26.1.3. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0530 11:24:21.705064    2253 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10]
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[k8s-slaver-02 192.168.8.83] MSG:
node/k8s-slaver-02 labeled
[k8s-slaver-01 192.168.8.82] MSG:
[preflight] Running pre-flight checks
        [WARNING SystemVerification]: this Docker version is not on the list of validated versions: 26.1.3. Latest validated version: 19.03
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0530 11:24:21.913659    2139 utils.go:69] The recommended value for "clusterDNS" in "KubeletConfiguration" is: [10.233.0.10]; the provided value is: [169.254.25.10]
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[k8s-slaver-01 192.168.8.82] MSG:
node/k8s-slaver-01 labeled
INFO[11:24:30 CST] Deploying network plugin ...
[k8s-master 192.168.8.81] MSG:
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
[k8s-master 192.168.8.81] MSG:
storageclass.storage.k8s.io/local created
serviceaccount/openebs-maya-operator created
Warning: rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole
clusterrole.rbac.authorization.k8s.io/openebs-maya-operator created
Warning: rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding
clusterrolebinding.rbac.authorization.k8s.io/openebs-maya-operator created
deployment.apps/openebs-localpv-provisioner created
INFO[11:24:33 CST] Deploying KubeSphere ...
v3.1.1
[k8s-master 192.168.8.81] MSG:
namespace/kubesphere-system created
namespace/kubesphere-monitoring-system created
[k8s-master 192.168.8.81] MSG:
secret/kube-etcd-client-certs created
[k8s-master 192.168.8.81] MSG:
namespace/kubesphere-system unchanged
serviceaccount/ks-installer unchanged
customresourcedefinition.apiextensions.k8s.io/clusterconfigurations.installer.kubesphere.io unchanged
clusterrole.rbac.authorization.k8s.io/ks-installer unchanged
clusterrolebinding.rbac.authorization.k8s.io/ks-installer unchanged
deployment.apps/ks-installer unchanged
clusterconfiguration.installer.kubesphere.io/ks-installer created
#####################################################
###              Welcome to KubeSphere!           ###
#####################################################

Console: http://192.168.8.81:30880
Account: admin
Password: P@88w0rd

NOTES:
  1. After you log into the console, please check the
     monitoring status of service components in
     "Cluster Management". If any service is not
     ready, please wait patiently until all components
     are up and running.
  2. Please change the default password after login.

#####################################################
https://kubesphere.io             2024-05-30 11:31:59
#####################################################
INFO[11:32:06 CST] Installation is complete.

Please check the result using the command:

       kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f

root@k8s-master:~#

查看安装日志

bash 复制代码
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f

使用

访问:http://192.168.8.81:30880/

账户:admin/P@88w0rd

相关推荐
梓仁沐白3 小时前
ubuntu+windows双系统切换后蓝牙设备无法连接
windows·ubuntu
写点什么啦14 小时前
[debug]不同的window连接ubuntu的vscode后无法正常加载kernel
linux·vscode·ubuntu·debug
wellnw14 小时前
[ubuntu]编译共享内存读取出现read.c:(.text+0x1a): undefined reference to `shm_open‘问题解决方案
linux·ubuntu
oushaojun218 小时前
ubuntu中使用ffmpeg和nginx推流rtmp视频
nginx·ubuntu·ffmpeg·rtmp
命里有定数1 天前
Ubuntu问题 -- 通过远程修改文件配置ubuntu服务器的静态IP (不需要到服务器现场) (通过NetworkManager)
运维·服务器·tcp/ip·ubuntu·网卡
命里有定数1 天前
Ubuntu问题 - 显示ubuntu服务器上可用磁盘空间 一条命令df -h
服务器·ubuntu·数据集
命里有定数1 天前
Ubuntu问题 -- 设置ubuntu的IP为静态IP (图形化界面设置) 小白友好
linux·tcp/ip·ubuntu·ip
基极向上的三极管1 天前
【Linux学习】【Ubuntu入门】1-8 ubuntu下压缩与解压缩
linux·ubuntu
云计算运维丁丁1 天前
Linux四剑客及正则表达式
linux·运维·服务器·ubuntu
sun0077002 天前
Ubuntu networkmanager
linux·运维·ubuntu