自建k8s集群接入阿里云slb使用LoadBalancer

一.测试环境介绍

  • 使用的阿里云的ecs服务器
  • 使用kubeadm安装的k8s 1.24.9版本

二. 配置阿里云CloudProvider

1.修改kube-apiserver.yaml和kube-controller-manager.yaml文件,添加--cloud-provider=external配置

ini 复制代码
[root@master manifests]# grep  -C1 'cloud-provider=external' kube-controller-manager.yaml
    - --use-service-account-credentials=true
    - --cloud-provider=external
    image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.24.9
[root@master manifests]# grep  -C1 'cloud-provider=external' kube-apiserver.yaml
    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
    - --cloud-provider=external
    image: registry.cn-hangz

2.在每台主机上都设置实例id与区域id

swift 复制代码
[root@master manifests]# kubectl get node
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   38m   v1.24.9
node1    Ready    <none>          38m   v1.24.9
[root@master manifests]# ssh node1
Last login: Fri Nov 17 10:00:48 2023 from 172.30.8.211

Welcome to Alibaba Cloud Elastic Compute Service !

[root@node1 ~]# META_EP=http://100.100.100.200/latest/meta-data
[root@node1 ~]# provider_id=`curl -s $META_EP/region-id`.`curl -s $META_EP/instance-id`
[root@node1 ~]# echo $provider_id
cn-chengdu.i-2vcb7xboqe2qd7arpjpt
[root@node1 ~]# 登出
Connection to node1 closed.
[root@master manifests]# kubectl patch node node1 -p '{"spec":{"providerID": "cn-chengdu.i-2vcb7xboqe2qd7arpjpt"}}'
node/node1 patched
[root@master manifests]# META_EP=http://100.100.100.200/latest/meta-data
[root@master manifests]# provider_id=`curl -s $META_EP/region-id`.`curl -s $META_EP/instance-id`
[root@master manifests]# echo $provider_id
cn-chengdu.i-2vcb7xboqe2qd7arpjpu
[root@master manifests]# kubectl patch node master -p '{"spec":{"providerID": "cn-chengdu.i-2vcb7xboqe2qd7arpjpu"}}'
node/master patched

3.创建一个拥有slb管理权限的aksk账号

  • 添加slb管理权限

4.将创建的slb管理权限账号的aksk配置为ConfigMap

arduino 复制代码
echo -n "$AccessKeyID" |base64
echo -n "$AcceessKeySecret"|base64

cat <<EOF >cloud-config.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: cloud-config
  namespace: kube-system
data:
  cloud-config.conf: |-
    {
        "Global": {
            "accessKeyID": "$your-AccessKeyID-base64",
            "accessKeySecret": "$your-AccessKeySecret-base64"
        }
    }
EOF

kubectl create -f cloud-config.yaml
  • 查看
sql 复制代码
[root@master ~]# kubectl get cm  -n kube-system cloud-config
NAME           DATA   AGE
cloud-config   1      25s

5.创建阿里云控制

  • CA_DATA获取方式
bash 复制代码
cat /etc/kubernetes/pki/ca.crt|base64 -w 0
  • 创建/etc/kubernetes/cloud-controller-manager.conf文件
yaml 复制代码
kind: Config
contexts:
- context:
    cluster: kubernetes
    user: system:cloud-controller-manager
  name: system:cloud-controller-manager@kubernetes
current-context: system:cloud-controller-manager@kubernetes
users:
- name: system:cloud-controller-manager
  user:
    tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: $CA_DATA
    server: https://192.168.1.76:6443 #api-server服务地址
  name: kubernetes
  • 查看文件
yaml 复制代码
[root@master ~]# cat /etc/kubernetes/cloud-controller-manager.conf
kind: Config
contexts:
- context:
    cluster: kubernetes
    user: system:cloud-controller-manager
  name: system:cloud-controller-manager@kubernetes
current-context: system:cloud-controller-manager@kubernetes
users:
- name: system:cloud-controller-manager
  user:
    tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1URXhOekF4TWpNd05sb1hEVE16TVRFeE5EQXhNak13Tmxvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTEpLClRkNThKR20rWTJ6b0ZKVTN4QWJrOEpuaTdleUtScW4yV045azhyZmRUTWZmM245OGxSQmxPK3hjTjRaeTl6QnIKMXRpOE16Z1RNTGhHZkNPbFZtZjE2ZGJmaGNzK3crU3Rtc21MT0ZhYVdwQVFnYmI1dzN6Qk1yVzhFbnBwMjNxWgo1Z3VUek5nSG9vOFhaZktybG16aURjZ3RXYmtrV3RZamhtZmdRZ0JUdWswQ1ZZY1BtRmVPQkxOMEZ5aWRpZERDCmhockFjMnE4TitFUUI4TUtrMXpGRkZZc0Fnenc4cnJ5dXd5dXM1ZC9KdlVzWm5CL3hhY1YxQ3J5VVVuQWNVUVEKaWpSblhLbXZBR1NCeFpEUXl4emxNc1NWbjZ6UWVjWnIwL3VrK05xM0J1N1h6T3VMSjBySWszZHA0ZGRrT0NtRgppUi9FQWdPdWtzb2pjOVdBOTBzQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZPeDU2eE1DUWlWYW4vS3Y2TmNXSU9vWnVwem5NQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBSzFSYU82YiticHVLU1QzVVFvMQo1SVpxOUw4Z0JuNVhuUlJRdkI2TXdhU2IxbnhqSnNQNXZmU2x3ektUUm1NMkFEL3hIYVBvTEVyVzVGY1lPalJoCmN5YlBKZjRYODFrczZaZzRTL1lYWGoyeXVsRXpSb3lLZEdicXFobnZYNk82b05Va0dhZzNzQjF1WC9jK3lpYVkKWklBbDhwdCt1T3BpOE9ZNmFRaEgwWEJGUlNYci9USS9KQUMvdkhjRXpHOWRJQzRJQlQvQzRuTjFFZm1TVWlPTAozRFBkT0EzSXlld3hMVHR0T2VjZmVab0p0b2lFV0dsbUdNRTUxLzE4VXcvYm5rakk0cW1Hditidng0Q0k4T0M5CjNUZE9sdVRxOFdYQkFjbU1RdlJOdXl5TVAvSWJSdm1mZUVmbmFMdWJORWlsRVJoZ0FROHJBOUpiTHRqM1JvWVkKclBNPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
    server: https://172.30.8.211:6443
  name: kubernetes

6.导入 CloudProvider daemonset配置

  • 下载文件
ruby 复制代码
wget https://github.com/kubernetes/cloud-provider-alibaba-cloud/blob/master/docs/examples/cloud-controller-manager.yml
  • 查看集群中service的cidr配置
swift 复制代码
SVCRANGE=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"tst"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
echo $SVCRANGE
  • 修改cloud-controller-manager.yml文件中的--service-cidr地址
swift 复制代码
[root@master ~]# SVCRANGE=$(echo '{"apiVersion":"v1","kind":"Service","metadata":{"name":"tst"},"spec":{"clusterIP":"1.1.1.1","ports":[{"port":443}]}}' | kubectl apply -f - 2>&1 | sed 's/.*valid IPs is //')
[root@master ~]# echo $SVCRANGE
10.96.0.0/12
[root@master ~]# sed -i 's#cluster-cidr=${CLUSTER_CIDR}#cluster-cidr=10.96.0.0/12#g'  cloud-controller-manager.yml
less 复制代码
sed -i 's#${ImageVersion}#v2.1.0#g' cloud-controller-manager.yml
  • 修改标签选择器,在有/etc/kubernetes/cloud-controller-manager.conf文件的服务器上打上对应的标签
csharp 复制代码
#查看配置
[root@master ~]# cat cloud-controller-manager.yml |grep -A1 nodeSelector
      nodeSelector:
        node-role.kubernetes.io/master: ""
#修改为cloud-controller-manager: true
[root@master ~]#  sed -i 's#node-role.kubernetes.io/master: ""#cloud-controller-manager: "true"#g' cloud-controller-manager.yml
[root@master ~]# grep -A1 'nodeSelector'  cloud-controller-manager.yml
      nodeSelector:
        cloud-controller-manager: "true"
#在有/etc/kubernetes/cloud-controller-manager.conf文件的服务器上打上对应的标签
[root@master ~]# kubectl  label node node1 cloud-controller-manager="true"
node/master labeled
  • 导入(注意:文件中有cloud-config的ConfigMap配置,导入后要重新导入之前的cloud-config配置)
bash 复制代码
[root@master ~]# kubectl apply -f  cloud-controller-manager.yml
clusterrole.rbac.authorization.k8s.io/system:cloud-controller-manager created
serviceaccount/cloud-controller-manager created
clusterrolebinding.rbac.authorization.k8s.io/system:cloud-controller-manager created
configmap/cloud-config configured
Warning: spec.template.metadata.annotations[scheduler.alpha.kubernetes.io/critical-pod]: non-functional in v1.16+; use the "priorityClassName" field instead
daemonset.apps/cloud-controller-manager created

#再导入之前的cloud-config.yaml配置
[root@master ~]# kubectl apply -f  cloud-config.yaml
configmap/cloud-config unchanged

#查看
[root@master ~]# kubectl get ds,pod -nkube-system|grep cloud-controller-manager
daemonset.apps/cloud-controller-manager   1         1         1       1            1           cloud-controller-manager=true   2m49s
pod/cloud-controller-manager-dv964             1/1     Running   0               67s

7.使用slb测试

1. 已创建好的slb

2.创建测试的nginx

yaml 复制代码
[root@master ~]# cat example-app-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: example-app
spec:
  replicas: 1
  selector:
    matchLabels:
      app: example-app
  template:
    metadata:
      labels:
        app: example-app
    spec:
      containers:
      - name: example-app
        image: nginx:alpine
        ports:
        - name: web
          containerPort: 80
[root@master ~]# kubectl  apply -f example-app-deploy.yaml
deployment.apps/example-app created
[root@master ~]# kubectl get pod
NAME                                      READY   STATUS    RESTARTS        AGE
example-app-56c9b6d95b-rs47h              1/1     Running   0               6s

3.创建LoadBalancer类型的svc

yaml 复制代码
apiVersion: v1
kind: Service
metadata:
  annotations:
    service.beta.kubernetes.io/alicloud-loadbalancer-id: lb-xxxxxxxxxxxx  #slb实例id
  name: slb
  namespace: default
spec:
  selector:
    app: example-app
  ports:
  - name: web
    port: 80
    protocol: TCP
  type: LoadBalancer

4.导入查看

xml 复制代码
[root@master ~]# kubectl  apply -f slb-svc.yaml
service/slb created
[root@master ~]# kubectl get svc
NAME         TYPE           CLUSTER-IP     EXTERNAL-IP      PORT(S)        AGE
kubernetes   ClusterIP      10.96.0.1      <none>           443/TCP        6h22m
slb          LoadBalancer   10.107.85.64   47.108.213.197   80:30505/TCP   3s

#访问测试
[root@master ~]# curl  47.108.213.197
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

5.控制台查看slb配置

  • 新增了一个虚拟服务器组
  • 目前外网还不能直接访问
  • 手动创建80端口的监听配置
  • 添加自动创建的虚拟服务器组
  • 测试
相关推荐
wuxingge4 小时前
k8s1.30.0高可用集群部署
云原生·容器·kubernetes
志凌海纳SmartX5 小时前
趋势洞察|AI 能否带动裸金属 K8s 强势崛起?
云原生·容器·kubernetes
锅总5 小时前
nacos与k8s service健康检查详解
云原生·容器·kubernetes
BUG弄潮儿6 小时前
k8s 集群安装
云原生·容器·kubernetes
颜淡慕潇7 小时前
【K8S系列】kubectl describe pod显示ImagePullBackOff,如何进一步排查?
后端·云原生·容器·kubernetes
Linux运维日记8 小时前
k8s1.31版本最新版本集群使用容器镜像仓库Harbor
linux·docker·云原生·容器·kubernetes
AI_小站10 小时前
RAG 示例:使用 langchain、Redis、llama.cpp 构建一个 kubernetes 知识库问答
人工智能·程序人生·langchain·kubernetes·llama·知识库·rag
长囧鹿13 小时前
云原生之k8s服务管理
云原生·容器·kubernetes
怡雪~14 小时前
centos7.9搭建k8s集群
云原生·容器·kubernetes
我要用代码向我喜欢的女孩表白18 小时前
k8s入门(不教部署,部署跟着文档来就行了)
云原生·容器·kubernetes