文章目录
1、前期准备
1、关闭防火墙和selinux
bash
复制代码
systemctl stop firewalld.service
systemctl disable firewalld.service
setenforce 0
vim /etc/selinux/config
2、关闭交换分区
- 如果有交换分区的话,关掉,不需要交换分区(占用磁盘)
3、修改主机名和免密登录
bash
复制代码
[root@localhost ~]# hostnamectl set-hostname master
[root@localhost ~]# bash
[root@master ~]#
[root@localhost ~]# hostnamectl set-hostname node1
[root@localhost ~]# bash
[root@node1 ~]#
[root@master ~]# echo "192.168.200.10 master" >> /etc/hosts
[root@master ~]# echo "192.168.200.11 node1" >> /etc/hosts
[root@master ~]# scp /etc/hosts root@node1:/etc/host
host.conf hostname hosts hosts.allow hosts.deny
[root@master ~]# scp /etc/hosts root@node1:/etc/hosts
The authenticity of host 'node1 (192.168.200.11)' can't be established.
ECDSA key fingerprint is SHA256:ZJD5+SX9K5TIrhruYNsYjv8xCrndEXt4yrdCt077ejw.
ECDSA key fingerprint is MD5:bc:c9:43:fd:b0:19:f8:5e:15:a0:14:f2:ac:17:13:ff.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1,192.168.200.11' (ECDSA) to the list of known hosts.
root@node1's password:
hosts 100% 201 267.3KB/s 00:00
[root@master ~]# ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:I8nt/Be5ue9Rq2/u/zYvYCSgAQEHR15sYvwpIcFkqwE root@master
The key's randomart image is:
+---[RSA 2048]----+
|Eo***+. |
|..o==.+ . |
|. .o.= + . |
| o ..+o . . |
|. .+ S o . .|
| + . = ..|
| o . =.. |
| . + o+.|
| ...=B*O|
+----[SHA256]-----+
[root@master ~]# ssh-copy-id root@node1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node1's password:
Number of key(s) added: 1
Now try logging into the machine, with: "ssh 'root@node1'"
and check to make sure that only the key(s) you wanted were added.
[root@master ~]# ssh root@node1
Last login: Sun Nov 17 13:33:50 2024 from 192.168.200.1
[root@node1 ~]#
4、内核参数
bash
复制代码
[root@master yum.repos.d]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1 # 启用对ipv6流量的桥接网络数据包的iptables的过滤功能,走iptables的规则
net.bridge.bridge-nf-call-iptables = 1 # 对ipv4的iptables的过滤功能
net.ipv4.ip_forward = 1 # 启动对ipv4数据包的转发功能;pod想要访问外网或者访问另外一个Pod,这个功能必须打开,否则系统只会管自己,不会帮别人转发数据包,就是只会管理发给自己的数据包的请求,不是自己的请求不会管
modprobe br_netfilter # 加载linux内核模块
[root@master yum.repos.d]# sysctl -p /etc/sysctl.d/k8s.conf # 加载指定配置文件的参数值,立即生效
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
5、安装docker
- 有一个重要的点就是kubelet使用的是systemd驱动,docker使用的cgroupfs驱动,有冲突
bash
复制代码
yum -y install yum-utils
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce
# docker拉取的镜像仓库地址
[root@master /]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://registry.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"] # 驱动设置为systemd
}
systemctl enable docker --now
6、安装k8s源
bash
复制代码
[root@master /]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
2、安装
1、安装k8s软件包
bash
复制代码
# 指定版本
[root@master ~]# yum -y install kubelet-1.23.0 kubeadm-1.23.0 kubectl-1.23.0
# 列出所安装需要的核心镜像
[root@master ~]# kubeadm config images list
I1117 14:17:57.019109 2908 version.go:255] remote version is much newer: v1.31.2; falling back to: stable-1.23
k8s.gcr.io/kube-apiserver:v1.23.17 # 但是这些镜像国内访问不了,使用阿里云上面的镜像即可
k8s.gcr.io/kube-controller-manager:v1.23.17
k8s.gcr.io/kube-scheduler:v1.23.17
k8s.gcr.io/kube-proxy:v1.23.17
k8s.gcr.io/pause:3.6
k8s.gcr.io/etcd:3.5.1-0
k8s.gcr.io/coredns/coredns:v1.8.6
# 开机自启,等会后面会自己启动的
[root@master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
# 阿里云仓库地址
https://registry.aliyuncs.com/google_containers
2、初始化k8s
bash
复制代码
kubeadm init --apiserver-advertise-address=192.168.200.10 --kubernetes-version=v1.23.0 --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16 --image-repository=registry.aliyuncs.com/google_containers
--apiserver-advertise-address # 控制节点的ip地址
--kubernetes-version=v1.23.0 # k8s的版本
--service-cidr # 指定使用的网络地址,dns和svc的范围,会从这个里面进行分配
--pod-network-cid # 指定pod分配的地址范围
--image-repository # 指定拉取控制平面组件的镜像仓库
# 主要就是安装和配置api server,controller manager,scheduler等组件
# 生成一个config文件
# 还有加入的命令,就是那个join
3、安装calico网络插件
bash
复制代码
wget https://docs.projectcalico.org/v3.21/manifests/calico.yaml
# 修改里面的配置,否则会警告,当然也可以不修改
policy/v1beta1 改成policy/v1
# 因为高版本的k8s逐步的启用这个api了
'
kubectl -f https://docs.projectcalico.org/v3.21/manifests/calico.yaml
4、检查
bash
复制代码
[root@master ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
default busybox 1/1 Running 0 32m
kube-system calico-kube-controllers-7f76d48f74-5zfg9 1/1 Running 0 44m
kube-system calico-node-jnqkl 1/1 Running 0 44m
kube-system calico-node-mwhnh 1/1 Running 0 44m
kube-system coredns-6d8c4cb4d-kt7df 1/1 Running 0 52m
kube-system coredns-6d8c4cb4d-rsr6j 1/1 Running 0 52m
kube-system etcd-master 1/1 Running 1 (41m ago) 52m
kube-system kube-apiserver-master 1/1 Running 1 (41m ago) 52m
kube-system kube-controller-manager-master 1/1 Running 1 (41m ago) 52m
kube-system kube-proxy-7cwv7 1/1 Running 1 (41m ago) 51m
kube-system kube-proxy-j5fph 1/1 Running 1 (41m ago) 52m
kube-system kube-scheduler-master 1/1 Running 1 (41m ago) 52m
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 53m v1.23.0
node1 Ready <none> 51m v1.23.0
# 当然,也可以创建一个pod出来,测试能否访问外网