K8s-1.29.2二进制安装-第三章(Master组件安装)

K8s二进制安装:本章主要是安装K8s服务端组件apiserver kube-controller-manager kube-scheduler,安装nginx进行高可用负载,配置TLS Bootstrapping 为以后自动给client发布证书使用(所有文章结束后会把使用到的容器镜像及工具一并共享)

1、Nginx负载均衡K8s配置

shell 复制代码
# 安装编译环境
yum install gcc -y

# 下载解压 nginx 二进制文件
wget http://nginx.org/download/nginx-1.25.3.tar.gz
tar xvf nginx-1.25.3.tar.gz
cd nginx-1.25.3

# 进行编译
./configure --with-stream --without-http --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
make && make install 

# 拷贝编译好的 nginx
node='server171 server172 server173 server174'
for NODE in $node; do scp -r /usr/local/nginx/ $NODE:/usr/local/nginx/; done

# 写入配置文件(在所有主机上执行)
cat > /usr/local/nginx/conf/nginx.conf <<EOF
worker_processes 1;
events {
  worker_connections  1024;
}
stream {
  upstream backend {
   least_conn;
    hash $remote_addr consistent;
    server 192.168.1.170:6443     max_fails=3 fail_timeout=30s;
    server 192.168.1.171:6443     max_fails=3 fail_timeout=30s;
    server 192.168.1.172:6443     max_fails=3 fail_timeout=30s;
  }
  server {
    listen 127.0.0.1:8443;
    proxy_connect_timeout 1s;
    proxy_pass backend;
  }
}
EOF
# 也可以生成后推送过去
for NODE in $node; do scp -r /usr/local/nginx/conf/nginx.conf $NODE:/usr/local/nginx/conf; done

# 将nginx服务交个systemd管理(所有主机都要操作)
cat > /etc/systemd/system/kube-nginx.service <<EOF
[Unit]
Description=kube-apiserver nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf -p /usr/local/nginx -t
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf -p /usr/local/nginx
ExecReload=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf -p /usr/local/nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=5
StartLimitInterval=0
LimitNOFILE=65536
 
[Install]
WantedBy=multi-user.target
EOF
# 加载并启动(所有主机都要操作)
systemctl daemon-reload
systemctl enable --now kube-nginx.service
systemctl status kube-nginx.service

2.apiserver组件安装

shell 复制代码
# 所有 k8s 节点创建以下目录(所有master节点)
mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes

# 将apiserver服务托管给systemcd(所有 master 节点)

# server170 节点配置
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
   --v=2  \\
   --allow-privileged=true  \\
   --bind-address=0.0.0.0  \\
   --secure-port=6443  \\
   --advertise-address=192.168.1.170 \\
   --service-cluster-ip-range=10.96.0.0/12  \\
   --service-node-port-range=30000-32767  \\
   --etcd-servers=https://192.168.1.170:2379,https://192.168.1.171:2379,https://192.168.1.172:2379 \\
   --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \\
   --etcd-certfile=/etc/etcd/ssl/etcd.pem  \\
   --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \\
   --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
   --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \\
   --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \\
   --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \\
   --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \\
   --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
   --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
   --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
   --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
   --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
   --authorization-mode=Node,RBAC  \\
   --enable-bootstrap-token-auth=true  \\
   --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
   --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
   --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
   --requestheader-allowed-names=aggregator  \\
   --requestheader-group-headers=X-Remote-Group  \\
   --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
   --requestheader-username-headers=X-Remote-User \\
   --enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF


# server171 节点配置
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
   --v=2  \\
   --allow-privileged=true  \\
   --bind-address=0.0.0.0  \\
   --secure-port=6443  \\
   --advertise-address=192.168.1.171 \\
   --service-cluster-ip-range=10.96.0.0/12  \\
   --service-node-port-range=30000-32767  \\
   --etcd-servers=https://192.168.1.170:2379,https://192.168.1.171:2379,https://192.168.1.172:2379 \\
   --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \\
   --etcd-certfile=/etc/etcd/ssl/etcd.pem  \\
   --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \\
   --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
   --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \\
   --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \\
   --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \\
   --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \\
   --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
   --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
   --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
   --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
   --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
   --authorization-mode=Node,RBAC  \\
   --enable-bootstrap-token-auth=true  \\
   --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
   --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
   --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
   --requestheader-allowed-names=aggregator  \\
   --requestheader-group-headers=X-Remote-Group  \\
   --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
   --requestheader-username-headers=X-Remote-User \\
   --enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF




# server172 配置
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF

[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
   --v=2  \\
   --allow-privileged=true  \\
   --bind-address=0.0.0.0  \\
   --secure-port=6443  \\
   --advertise-address=192.168.1.172 \\
   --service-cluster-ip-range=10.96.0.0/12  \\
   --service-node-port-range=30000-32767  \\
   --etcd-servers=https://192.168.1.170:2379,https://192.168.1.171:2379,https://192.168.1.172:2379 \\
   --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \\
   --etcd-certfile=/etc/etcd/ssl/etcd.pem  \\
   --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \\
   --client-ca-file=/etc/kubernetes/pki/ca.pem  \\
   --tls-cert-file=/etc/kubernetes/pki/apiserver.pem  \\
   --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem  \\
   --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem  \\
   --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem  \\
   --service-account-key-file=/etc/kubernetes/pki/sa.pub  \\
   --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \\
   --service-account-issuer=https://kubernetes.default.svc.cluster.local \\
   --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \\
   --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \\
   --authorization-mode=Node,RBAC  \\
   --enable-bootstrap-token-auth=true  \\
   --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \\
   --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \\
   --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \\
   --requestheader-allowed-names=aggregator  \\
   --requestheader-group-headers=X-Remote-Group  \\
   --requestheader-extra-headers-prefix=X-Remote-Extra-  \\
   --requestheader-username-headers=X-Remote-User \\
   --enable-aggregator-routing=true
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535

[Install]
WantedBy=multi-user.target

EOF


# 启动 apiServer
systemctl daemon-reload
systemctl enable --now kube-apiserver.service
systemctl status kube-apiserver.service

3.kube-controller-manager

shell 复制代码
# 配置 kube-controller-manager service
# 所有master节点配置,且配置相同
# 172.16.0.0/12为pod网段,按需求设置你自己的网段

cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF

[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
   --v=2 \\
   --bind-address=0.0.0.0 \\
   --root-ca-file=/etc/kubernetes/pki/ca.pem \\
   --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \\
   --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \\
   --service-account-private-key-file=/etc/kubernetes/pki/sa.key \\
   --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig \\
   --leader-elect=true \\
   --use-service-account-credentials=true \\
   --node-monitor-grace-period=40s \\
   --node-monitor-period=5s \\
   --controllers=*,bootstrapsigner,tokencleaner \\
   --allocate-node-cidrs=true \\
   --service-cluster-ip-range=10.96.0.0/12,fd00:1111::/112 \\
   --cluster-cidr=172.16.0.0/12,fc00:2222::/112 \\
   --node-cidr-mask-size-ipv4=24 \\
   --node-cidr-mask-size-ipv6=120 \ \如果有IPv6可以添加,没有请去除
   --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF


# 启动 kube-controller-manager
systemctl daemon-reload
systemctl enable --now kube-controller-manager.service
systemctl status kube-controller-manager.service

4.kube-scheduler

shell 复制代码
# 配置 kube-scheduler service
# 所有 master 节点配置,且配置相同
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF

[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
ExecStart=/usr/local/bin/kube-scheduler \\
  --v=2 \\
  --bind-address=0.0.0.0 \\
  --leader-elect=true \\
  --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

Restart=always
RestartSec=10s

[Install]
WantedBy=multi-user.target

EOF


# 启动 kube-scheduler
systemctl daemon-reload
systemctl enable --now kube-scheduler.service
systemctl status kube-scheduler.service

5.TLS Bootstrapping 配置

shell 复制代码
# 自动颁发node节点证书
# 在 master01 上配置
kubectl config set-cluster kubernetes   \
--certificate-authority=/etc/kubernetes/pki/ca.pem   \
--embed-certs=true   --server=https://127.0.0.1:8443   \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

# 设置token值
kubectl config set-credentials tls-bootstrap-token-user   \
--token=c8ad9c.2e4d610cf3e7426e \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config set-context tls-bootstrap-token-user@kubernetes   \
--cluster=kubernetes   \
--user=tls-bootstrap-token-user   \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

kubectl config use-context tls-bootstrap-token-user@kubernetes   \
--kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig

mkdir -p /root/.kube ; cp /etc/kubernetes/admin.kubeconfig /root/.kube/config

# 查看集群状态
kubectl get cs

# 安全文件上下文
cat > bootstrap.secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
  name: bootstrap-token-c8ad9c
  namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
  description: "The default bootstrap token generated by 'kubelet '."
  token-id: c8ad9c
  token-secret: 2e4d610cf3e7426e
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"
  auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubelet-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-bootstrap
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: node-autoapprove-certificate-rotation
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
  kind: Group
  name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kube-apiserver-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kube-apiserver
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kube-apiserver-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kube-apiserver
EOF
# 执行加载
kubectl create -f bootstrap.secret.yaml
相关推荐
没有bug.的程序员7 小时前
微服务的本质:不是拆服务,而是拆复杂度
java·jvm·spring·微服务·云原生·容器·架构
sean9087 小时前
Colima 下 docker pull 失败自查流程
macos·docker·容器·colima
古城小栈8 小时前
云原生架构:微服务 vs 单体应用的选择
微服务·云原生·架构
百锦再8 小时前
Kubernetes与开发语言:重新定义.NET Core与Java的云原生未来
开发语言·云原生·kubernetes
IT界的奇葩8 小时前
康威定律对微服务的启示
微服务·云原生·架构
极限实验室16 小时前
APM(一):Skywalking 与 Easyearch 集成
数据库·云原生
_oP_i20 小时前
Docker 整体架构
docker·容器·架构
ascarl201021 小时前
Kubernetes 环境 NFS 卡死问题排查与解决纪要
云原生·容器·kubernetes
阿里云云原生21 小时前
快速构建企业 AI 开放平台,HiMarket 重磅升级
云原生