使用kubeadm安装k8s并部署应用

安装k8s

1. 准备机器

准备三台机器

复制代码
192.168.136.104 master节点
192.168.136.105 worker节点
192.168.136.106 worker节点

2. 安装前配置

1.基础环境
shell 复制代码
#########################################################################
#关闭防火墙: 如果是云服务器,需要设置安全组策略放行端口
# https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#check-required-ports
systemctl stop firewalld
systemctl disable firewalld

# 修改 hostname
hostnamectl set-hostname k8s-01
# 查看修改结果
hostnamectl status
# 设置 hostname 解析
echo "127.0.0.1   $(hostname)" >> /etc/hosts

#关闭 selinux: 
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0

#关闭 swap:
swapoff -a  
sed -ri 's/.*swap.*/#&/' /etc/fstab 

#允许 iptables 检查桥接流量
#https://kubernetes.io/zh/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#%E5%85%81%E8%AE%B8-iptables-%E6%A3%80%E6%9F%A5%E6%A1%A5%E6%8E%A5%E6%B5%81%E9%87%8F
## 开启br_netfilter
## sudo modprobe br_netfilter
## 确认下
## lsmod | grep br_netfilter

## 修改配置


#####这里用这个,不要用课堂上的配置。。。。。。。。。
#将桥接的 IPv4 流量传递到 iptables 的链:
# 修改 /etc/sysctl.conf
# 如果有配置,则修改
sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g"  /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.disable_ipv6.*#net.ipv6.conf.all.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.default.disable_ipv6.*#net.ipv6.conf.default.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.lo.disable_ipv6.*#net.ipv6.conf.lo.disable_ipv6=1#g"  /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding=1#g"  /etc/sysctl.conf
# 可能没有,追加
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.conf
# 执行命令以应用
sysctl -p


#################################################################
2. docker环境
shell 复制代码
sudo yum remove docker*
sudo yum install -y yum-utils
#配置docker yum 源
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 查找相应的docker版本
yum list docker-ce --showduplicates | sort -r

#安装docker 3:26.1.4-1.el7
yum install -y docker-ce-3:26.1.4-1.el7.x86_64  docker-ce-cli-3:26.1.4-1.el7.x86_64 containerd.io

#启动服务
systemctl start docker
systemctl enable docker

#配置加速 自己申请配置
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
    "registry-mirrors" : [
        "https://#############.mirror.swr.myhuaweicloud.com",
        "https://#############.mirror.aliyuncs.com"
  ]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
3. 安装k8s核心 (都执行)
shell 复制代码
# 配置K8S的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 卸载旧版本
yum remove -y kubelet kubeadm kubectl

# 查看可以安装的版本
yum list kubelet --showduplicates | sort -r

# 安装kubelet、kubeadm、kubectl 指定版本
yum install -y kubelet-1.21.0 kubeadm-1.21.0 kubectl-1.21.0

# 开机启动kubelet
systemctl enable kubelet && systemctl start kubelet

# 此时查看kubelet状态是fail是正常的,因为其他的组件还没完全安装
[root@docker104 ~]# 
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /usr/lib/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: activating (auto-restart) (Result: exit-code) since Tue 2024-06-25 20:40:33 PDT; 9s ago
     Docs: https://kubernetes.io/docs/
  Process: 24068 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1/FAILURE)
 Main PID: 24068 (code=exited, status=1/FAILURE)

Jun 25 20:40:33 docker104 systemd[1]: kubelet.service: main process exited, code=exited, status=1/FAILURE
Jun 25 20:40:33 docker104 systemd[1]: Unit kubelet.service entered failed state.
Jun 25 20:40:33 docker104 systemd[1]: kubelet.service failed.
4. 初始化k8s master节点(master节点执行)
shell 复制代码
# 首先查看依赖的镜像
[root@docker104 ~] kubeadm config images list
I0625 20:47:43.820940   24427 version.go:254] remote version is much newer: v1.30.2; falling back to: stable-1.21
k8s.gcr.io/kube-apiserver:v1.21.14
k8s.gcr.io/kube-controller-manager:v1.21.14
k8s.gcr.io/kube-scheduler:v1.21.14
k8s.gcr.io/kube-proxy:v1.21.14
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0

#封装成images.sh脚本文件
#!/bin/bash
images=(
  kube-apiserver:v1.21.0
  kube-proxy:v1.21.0
  kube-controller-manager:v1.21.0
  kube-scheduler:v1.21.0
  coredns:v1.8.0
  etcd:3.4.13-0
  pause:3.4.1
)
for imageName in ${images[@]} ; do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
#脚本结束


##注意1.21.0版本的k8s coredns镜像比较特殊,结合阿里云需要特殊处理,重新打标签
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.0 registry.cn-hangzhou.aliyuncs.com/google_containers/coredns/coredns:v1.8.0

# 执行脚本
chmod +x images.sh && ./images.sh


########kubeadm init 一个master########################
########kubeadm join 其他worker########################
kubeadm init \
--apiserver-advertise-address=192.168.136.104 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.21.0 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=192.178.0.0/16
## 注意:pod-cidr与service-cidr
# cidr 无类别域间路由(Classless Inter-Domain Routing、CIDR)
# 指定一个网络可达范围  pod的子网范围+service负载均衡网络的子网范围+本机ip的子网范围不能有重复域
# --apiserver-advertise-address apiserver广播地址,为master机器的地址ip
# --image-repository 镜像仓库前缀
# --pod-network-cidr pod网络范围
# --service-cidr 负载均衡网络范围
# 两个网络范围和本地ip范围不能有重复域



############初始化后有提示需要执行的语句##############
Your Kubernetes control-plane has initialized successfully!
# 复制相关文件夹
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

# 声明环境变量
  export KUBECONFIG=/etc/kubernetes/admin.conf

# 部署一个pod网络,安装指定的网络插件
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
  
  ##############如下:推荐安装calico#####################
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml


###可能会遇到calico镜像下载失败的情况,直接去官网下载需要的版本号
wget https://docs.projectcalico.org/manifests/calico.yaml
# 查看版本号
[root@docker104 k8s]#  cat calico.yaml |grep 'image:'
          image: docker.io/calico/cni:v3.25.0
          image: docker.io/calico/cni:v3.25.0
          image: docker.io/calico/node:v3.25.0
          image: docker.io/calico/node:v3.25.0
          image: docker.io/calico/kube-controllers:v3.25.0
# 替换版本号          
[root@docker104 k8s]# sed -i 's#docker.io/##g' calico.yaml
# 再次查看
[root@docker104 k8s]#  cat calico.yaml |grep 'image:'
          image: calico/cni:v3.25.0
          image: calico/cni:v3.25.0
          image: calico/node:v3.25.0
          image: calico/node:v3.25.0
          image: calico/kube-controllers:v3.25.0
          
# 官网地址下载并解压
https://github.com/projectcalico/calico/releases?page=2
tar -vzxf release-v3.25.0.tgz 
[root@docker104 release-v3.25.0]# ls
bin  images  manifests

# 进入到images目录,将所需要的进行引入
docker load -i calico-kube-controllers.tar
docker load -i calico-cni.tar 
docker load -i calico-node.tar 

# 重新执行命令
kubectl apply -f calico.yaml

### 命令检查
kubectl get pod -A  ##获取集群中所有部署好的应用Pod
[root@docker104 images]# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-77959b97b9-pgl2g   1/1     Running   0          25m
kube-system   calico-node-h6bsk                          1/1     Running   0          25m
kube-system   coredns-57d4cbf879-s726m                   1/1     Running   0          17h
kube-system   coredns-57d4cbf879-trdbs                   1/1     Running   0          17h
kube-system   etcd-docker104                             1/1     Running   0          17h
kube-system   kube-apiserver-docker104                   1/1     Running   0          17h
kube-system   kube-controller-manager-docker104          1/1     Running   0          17h
kube-system   kube-proxy-q9g8p                           1/1     Running   0          17h
kube-system   kube-scheduler-docker104                   1/1     Running   0          17h

kubectl get nodes  ##查看集群所有机器的状态




      
5. 初始化woker节点
shell 复制代码
Then you can join any number of worker nodes by running the following on each as root:
# 其他节点执行以下命令加入k8s集群,命令是在kubeadmin init以后给出的
kubeadm join 192.168.136.104:6443 --token tbjrly.cbmgi5g7nb366f1m \
        --discovery-token-ca-cert-hash sha256:a5f53bc7d06d595ae0e34fd92028e03bdbec8aa62b1041c9765f2739f59877fe 
        
        
#都执行完以后,在k8s master节点执行
kubectl get nodes  ##查看集群所有机器的状态
[root@docker104 k8s]# kubectl get nodes
NAME        STATUS     ROLES                  AGE   VERSION
docker104   Ready      control-plane,master   17h   v1.21.0
docker105   NotReady   <none>                 92s   v1.21.0
docker106   NotReady   <none>                 74s   v1.21.0


# worker节点是not ready状态,可能是这个节点没装网络插件,和master节点同样导入一下网络插件calico的镜像即可自动安装

# 具体可以查看日志 tail -100f /var/log/messages 
## 日志起始
Jun 26 18:21:08 docker105 kubelet: E0626 18:21:08.870465   91095 pod_workers.go:190] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"upgrade-ipam\" with ImagePullBackOff: \"Back-off pulling image \\\"calico/cni:v3.25.0\\\"\"" pod="kube-system/calico-node-2zq8d" podUID=78164c8e-8ba0-477e-90d1-76be4fd6965f
Jun 26 18:21:13 docker105 kubelet: I0626 18:21:13.264027   91095 cni.go:239] "Unable to update cni config" err="no networks found in /etc/cni/net.d"
Jun 26 18:21:13 docker105 kubelet: E0626 18:21:13.281034   91095 kubelet.go:2218] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized"
## 日志结束
# 如上 cni config uninitialized就是未装网络插件的原因


docker load -i calico-cni.tar 
docker load -i calico-node.tar 


# 过会在master节点看到状态都是ready了
[root@docker104 k8s]# kubectl get nodes
NAME        STATUS   ROLES                  AGE   VERSION
docker104   Ready    control-plane,master   18h   v1.21.0
docker105   Ready    <none>                 75m   v1.21.0
docker106   Ready    <none>                 75m   v1.21.0

[root@docker104 k8s]# kubectl get pod -A
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE
kube-system   calico-kube-controllers-77959b97b9-pgl2g   1/1     Running   0          113m
kube-system   calico-node-2zq8d                          1/1     Running   0          80m
kube-system   calico-node-b94mp                          1/1     Running   0          80m
kube-system   calico-node-h6bsk                          1/1     Running   0          113m
kube-system   coredns-57d4cbf879-s726m                   1/1     Running   0          18h
kube-system   coredns-57d4cbf879-trdbs                   1/1     Running   0          18h
kube-system   etcd-docker104                             1/1     Running   0          18h
kube-system   kube-apiserver-docker104                   1/1     Running   0          18h
kube-system   kube-controller-manager-docker104          1/1     Running   0          18h
kube-system   kube-proxy-2djt9                           1/1     Running   0          80m
kube-system   kube-proxy-j5jkv                           1/1     Running   0          80m
kube-system   kube-proxy-q9g8p                           1/1     Running   0          18h
kube-system   kube-scheduler-docker104                   1/1     Running   0          18h
7、设置ipvs模式

k8s整个集群为了访问通;默认是用iptables,性能下(kube-proxy在集群之间同步iptables的内容)

sh 复制代码
#1、查看默认kube-proxy 使用的模式
kubectl logs -n kube-system kube-proxy-28xv4
#2、需要修改 kube-proxy 的配置文件,修改mode 为ipvs。默认iptables,但是集群大了以后就很慢
kubectl edit cm kube-proxy -n kube-system
修改如下
   ipvs:
      excludeCIDRs: null
      minSyncPeriod: 0s
      scheduler: ""
      strictARP: false
      syncPeriod: 30s
    kind: KubeProxyConfiguration
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"
 ###修改了kube-proxy的配置,为了让重新生效,需要杀掉以前的Kube-proxy
 kubectl get pod -A|grep kube-proxy
 kubectl delete pod kube-proxy-pqgnt -n kube-system
### 修改完成后可以重启kube-proxy以生效
8、部署应用
shell 复制代码
# 测试部署一个Nginx应用
[root@docker104 k8s]# kubectl create deploy my-nginx --image=nginx
deployment.apps/my-nginx created

# 查看部署应用的ip
[root@docker104 k8s]# kubectl get pod -A -o wide
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE     IP                NODE        NOMINATED NODE   READINESS GATES
default       my-nginx-6b74b79f57-mg76v                  1/1     Running   0          66s     192.178.70.193    docker105   <none>           <none>

# 访问测试nginx
[root@docker104 k8s]# curl 192.178.70.193
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
相关推荐
1024find4 小时前
Spark on k8s部署
大数据·运维·容器·spark·kubernetes
kura_tsuki4 小时前
[Docker集群] Docker 容器入门
运维·docker·容器
能不能别报错16 小时前
K8s学习笔记(十六) 探针(Probe)
笔记·学习·kubernetes
能不能别报错18 小时前
K8s学习笔记(十四) DaemonSet
笔记·学习·kubernetes
火星MARK19 小时前
k8s面试题
容器·面试·kubernetes
Serverless社区20 小时前
阿里云函数计算 AgentRun 全新发布,构筑智能体时代的基础设施
阿里云·云原生·serverless·函数计算
赵渝强老师20 小时前
【赵渝强老师】Docker容器的资源管理机制
linux·docker·容器·kubernetes
能不能别报错1 天前
K8s学习笔记(十五) pause容器与init容器
笔记·学习·kubernetes
稚辉君.MCA_P8_Java1 天前
kafka解决了什么问题?mmap 和sendfile
java·spring boot·分布式·kafka·kubernetes
乄bluefox1 天前
保姆级docker部署nacos集群
java·docker·容器