1 主机/服务规划
主机IP | 主机名 | 节点功能类型 | 服务分布 |
---|---|---|---|
192.168.199.20 | k8s.master.vip vip | 虚拟IP | |
192.168.199.21 | k8s01 | k8s-Master | Keepalived、HAProxy、Docker |
192.168.199.22 | k8s02 | k8s-Master | Keepalived、HAProxy、Docker |
192.168.199.23 | k8s03 | k8s-Node | Docker |
192.168.199.24 | k8s04 | k8s-Node | Docker |
192.168.199.25 | k8s05 | k8s-Node | Docker |
2 主机初始化
参考https://blog.csdn.net/lilinxi001/article/details/140184722
2.1 配置主机名
shell
hostnamectl set-hostname [主机名]
2.2 配置/etc/hosts
shell
vi /etc/hosts
#添加
192.168.199.20 k8s.master.vip vip
192.168.199.21 k8s01
192.168.199.22 k8s02
192.168.199.23 k8s03
192.168.199.24 k8s04
192.168.199.25 k8s05
2.3 配置桥接的IPv4传递到iptables
shell
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system #执行生效
2.4 部署时间同步服务
shell
yum install ntpdate -y
ntpdate time.windows.com
3 在两台Master节点部署keeplived,即k8s01、k8s02
3.1 keepalived安装
shell
yum -y install conntrack-tools
yum -y install libseccomp
yum -y install libtool-ltdl
yum -y install keepalived
3.2 k8s01配置
shell
vi keepalived.conf
#替换
! Configuration File for keepalived
global_defs {
router_id k8s
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface ens33 #根据实际网卡名配置
virtual_router_id 51
priority 250
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.199.20
}
track_script {
check_haproxy
}
}
3.3 k8s01配置
shell
vi keepalived.conf
#替换
! Configuration File for keepalived
global_defs {
router_id k8s
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 250
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.199.20
}
track_script {
check_haproxy
}
}
4 在两台Master节点部署HAProxy,即k8s01、k8s02
shell
yum install -y haproxy
4.1 haproxy配置
shell
vi /etc/haproxy/haproxy.cfg
#替换
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443
option tcplog
default_backend kubernetes-apiserver
#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server k8s01 192.168.199.21:6443 check
server k8s02 192.168.199.22:6443 check
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
5 部署Docker、kubeadm、kubelet
5.1 安装Docker
shell
yum -y install wget
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
#此处docker版本可以根据个人情况修改
yum install -y docker-ce-19.03.0 docker-ce-cli-19.03.0
systemctl enable docker && systemctl start docker
#修改仓库地址
shell
vi /etc/docker/daemon.json
#替换
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://5w5kf152.mirror.aliyuncs.com"]
}
5.2 kubeadm,kubelet 和 kubectl
shell
vi /etc/yum.repos.d/kubernetes.repo
#添加
[Kubernetes]
name=kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
shell
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
systemctl enable kubelet
5.3 部署kubernetes Master 即k8s01执行
shell
kubeadm in it \
--apiserver-advertise-address=192.168.199.21 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.17.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16