K8s-1.29.2二进制安装-第二章(K8s及ETCD下载及安装)

K8s二进制安装:主要内容是安装k8s及安装etcd,并生成了K8s和etcd需要的证书文件(文章结束后会把使用到的容器镜像及工具一并共享)

1.安装 K8S 和 ETCD 二进制文件

shell 复制代码
# 下载安装包
wget https://github.com/etcd-io/etcd/releases/download/v3.5.12/etcd-v3.5.12-linux-amd64.tar.gz
wget https://cdn.dl.k8s.io/release/v1.29.2/kubernetes-server-linux-amd64.tar.gz

# 解压k8s安装文件
tar -xf kubernetes-server-linux-amd64.tar.gz  --strip-components=3 -C /usr/local/bin kubernetes/server/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy}

# 解压etcd安装文件
tar -xf etcd*.tar.gz && mv etcd-*/etcd /usr/local/bin/ && mv etcd-*/etcdctl /usr/local/bin/

# 查看/usr/local/bin下内容
[root@server170 ~]# ll /usr/local/bin/
总用量 543048
-rwxr-xr-x 1 528287 89939  23543808  1月 31  2024 etcd
-rwxr-xr-x 1 528287 89939  17743872  1月 31  2024 etcdctl
-rwxr-xr-x 1 root   root  123719680  2月 14  2024 kube-apiserver
-rwxr-xr-x 1 root   root  118349824  2月 14  2024 kube-controller-manager
-rwxr-xr-x 1 root   root   49704960  2月 14  2024 kubectl
-rwxr-xr-x 1 root   root  111812608  2月 14  2024 kubelet
-rwxr-xr-x 1 root   root   55263232  2月 14  2024 kube-proxy
-rwxr-xr-x 1 root   root   55943168  2月 14  2024 kube-scheduler

# 查看版本
kubelet --version
 Kubernetes v1.29.2

etcdctl version
 etcdctl version: 3.5.12
 API version: 3.5
 
# 将组件发送至其它 k8s 节点(定义一个变量)
Master='server171 server172'
Work='server173 server174'

# 拷贝 master 组件(使用for循环调取环境变量进行传输)
for NODE in $Master; do echo $NODE; scp /usr/local/bin/kube{let,ctl,-apiserver,-controller-manager,-scheduler,-proxy} $NODE:/usr/local/bin/; scp /usr/local/bin/etcd* $NODE:/usr/local/bin/; done
# 拷贝 work 组件
for NODE in $Work; do echo $NODE; scp /usr/local/bin/kube{let,-proxy} $NODE:/usr/local/bin/ ; done

# 所有节点执行
mkdir -p /opt/cni/bin

2.相关证书生成

2.1 安装证书工具

shell 复制代码
# master01 节点下载证书生成工具
wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssl_1.6.4_linux_amd64" -O /usr/local/bin/cfssl

wget "https://github.com/cloudflare/cfssl/releases/download/v1.6.4/cfssljson_1.6.4_linux_amd64" -O /usr/local/bin/cfssljson

chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson

2.2 生成 ETCD 证书(以下操作在所有 master 节点操作)

shell 复制代码
mkdir /etc/etcd/ssl -p
cd /etc/etcd/ssl

# 写入生成证书所需的配置文件,master01 节点生成 etcd 证书
cat > ca-config.json << EOF 
{
 "signing": {
  "default": {
   "expiry": "876000h"
  },
  "profiles": {
   "kubernetes": {
    "usages": [
      "signing",
      "key encipherment",
      "server auth",
      "client auth"
    ],
    "expiry": "876000h"
   }
  }
 }
}
EOF

# 对于kubernetes配置文件,证书的过期时间也是876000h,即100年。
cat > etcd-ca-csr.json  << EOF 
{
 "CN": "etcd",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "etcd",
   "OU": "Etcd Security"
  }
 ],
 "ca": {
  "expiry": "876000h"
 }
}
EOF

# 生成证书
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare /etc/etcd/ssl/etcd-ca

cat > etcd-csr.json << EOF 
{
 "CN": "etcd",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "etcd",
   "OU": "Etcd Security"
  }
 ]
}
EOF


# 生成证书
cfssl gencert -ca=/etc/etcd/ssl/etcd-ca.pem -ca-key=/etc/etcd/ssl/etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,server170,server171,server172,192.168.1.170,192.168.1.171,192.168.1.172 -profile=kubernetes etcd-csr.json | cfssljson -bare /etc/etcd/ssl/etcd

# 将证书复制到其他Master节点
Master='server171 server172'

for NODE in $Master; do ssh $NODE "mkdir -p /etc/etcd/ssl"; for FILE in etcd-ca-key.pem  etcd-ca.pem  etcd-key.pem  etcd.pem; do scp /etc/etcd/ssl/${FILE} $NODE:/etc/etcd/ssl/${FILE}; done; done

2.4 生成 K8S 相关证书(特别说明除外,以下操作在所有 master 节点操作)

shell 复制代码
mkdir -p /etc/kubernetes/pki
cd /etc/kubernetes/pki
# master 节点生成 k8s 证书, 写入生成证书所需的配置文件
cat > ca-csr.json  << EOF 
{
 "CN": "kubernetes",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "Kubernetes",
   "OU": "Kubernetes-manual"
  }
 ],
 "ca": {
  "expiry": "876000h"
 }
}
EOF

# 生成证书
cfssl gencert -initca ca-csr.json | cfssljson -bare /etc/kubernetes/pki/ca

cat > apiserver-csr.json << EOF 
{
 "CN": "kube-apiserver",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "Kubernetes",
   "OU": "Kubernetes-manual"
  }
 ]
}
EOF

cat > ca-config.json << EOF 
{
 "signing": {
  "default": {
   "expiry": "876000h"
  },
  "profiles": {
   "kubernetes": {
    "usages": [
      "signing",
      "key encipherment",
      "server auth",
      "client auth"
    ],
    "expiry": "876000h"
   }
  }
 }
}
EOF


cfssl gencert  -ca=/etc/kubernetes/pki/ca.pem -ca-key=/etc/kubernetes/pki/ca-key.pem  -config=ca-config.json -hostname=10.96.0.1,192.168.10.16,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,xxx.top,xxx.com,xxx.cn,192.168.1.170,192.168.1.171,192.168.1.172,192.168.1.173,192.168.1.174,192.168.1.175,192.168.1.176,192.168.1.177,192.168.1.178,192.168.1.179,192.168.1.180 -profile=kubernetes  apiserver-csr.json | cfssljson -bare /etc/kubernetes/pki/apiserver


# 生成 apiserver 聚合证书
cat > front-proxy-ca-csr.json  << EOF 
{
 "CN": "kubernetes",
 "key": {
   "algo": "rsa",
   "size": 2048
 },
 "ca": {
  "expiry": "876000h"
 }
}
EOF

# 生成证书
cfssl gencert  -initca front-proxy-ca-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-ca 

cat > front-proxy-client-csr.json  << EOF 
{
 "CN": "front-proxy-client",
 "key": {
   "algo": "rsa",
   "size": 2048
 }
}
EOF

cfssl gencert  \
-ca=/etc/kubernetes/pki/front-proxy-ca.pem  \
-ca-key=/etc/kubernetes/pki/front-proxy-ca-key.pem  \
-config=ca-config.json  \
-profile=kubernetes  front-proxy-client-csr.json | cfssljson -bare /etc/kubernetes/pki/front-proxy-client

# 生成 controller-manage 的证书
cat > manager-csr.json << EOF 
{
 "CN": "system:kube-controller-manager",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "system:kube-controller-manager",
   "OU": "Kubernetes-manual"
  }
 ]
}
EOF

cfssl gencert \
  -ca=/etc/kubernetes/pki/ca.pem \
  -ca-key=/etc/kubernetes/pki/ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  manager-csr.json | cfssljson -bare /etc/kubernetes/pki/controller-manager

kubectl config set-cluster kubernetes \
   --certificate-authority=/etc/kubernetes/pki/ca.pem \
   --embed-certs=true \
   --server=https://127.0.0.1:8443 \
   --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

kubectl config set-context system:kube-controller-manager@kubernetes \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=/etc/kubernetes/pki/controller-manager.pem \
  --client-key=/etc/kubernetes/pki/controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

kubectl config use-context system:kube-controller-manager@kubernetes \
   --kubeconfig=/etc/kubernetes/controller-manager.kubeconfig

# 生成 kube-scheduler 的证书
cat > scheduler-csr.json << EOF 
{
 "CN": "system:kube-scheduler",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "system:kube-scheduler",
   "OU": "Kubernetes-manual"
  }
 ]
}
EOF

cfssl gencert \
  -ca=/etc/kubernetes/pki/ca.pem \
  -ca-key=/etc/kubernetes/pki/ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  scheduler-csr.json | cfssljson -bare /etc/kubernetes/pki/scheduler

kubectl config set-cluster kubernetes \
   --certificate-authority=/etc/kubernetes/pki/ca.pem \
   --embed-certs=true \
   --server=https://127.0.0.1:8443 \
   --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler \
   --client-certificate=/etc/kubernetes/pki/scheduler.pem \
   --client-key=/etc/kubernetes/pki/scheduler-key.pem \
   --embed-certs=true \
   --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

kubectl config set-context system:kube-scheduler@kubernetes \
   --cluster=kubernetes \
   --user=system:kube-scheduler \
   --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

kubectl config use-context system:kube-scheduler@kubernetes \
   --kubeconfig=/etc/kubernetes/scheduler.kubeconfig

# 生成 admin 的证书配置
cat > admin-csr.json << EOF 
{
 "CN": "admin",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "system:masters",
   "OU": "Kubernetes-manual"
  }
 ]
}
EOF

cfssl gencert \
  -ca=/etc/kubernetes/pki/ca.pem \
  -ca-key=/etc/kubernetes/pki/ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  admin-csr.json | cfssljson -bare /etc/kubernetes/pki/admin

kubectl config set-cluster kubernetes   \
 --certificate-authority=/etc/kubernetes/pki/ca.pem   \
 --embed-certs=true   \
 --server=https://127.0.0.1:8443   \
 --kubeconfig=/etc/kubernetes/admin.kubeconfig

kubectl config set-credentials kubernetes-admin  \
 --client-certificate=/etc/kubernetes/pki/admin.pem   \
 --client-key=/etc/kubernetes/pki/admin-key.pem   \
 --embed-certs=true   \
 --kubeconfig=/etc/kubernetes/admin.kubeconfig

kubectl config set-context kubernetes-admin@kubernetes   \
 --cluster=kubernetes   \
 --user=kubernetes-admin   \
 --kubeconfig=/etc/kubernetes/admin.kubeconfig

kubectl config use-context kubernetes-admin@kubernetes  --kubeconfig=/etc/kubernetes/admin.kubeconfig

# 创建 kube-proxy 证书
cat > kube-proxy-csr.json  << EOF 
{
 "CN": "system:kube-proxy",
 "key": {
  "algo": "rsa",
  "size": 2048
 },
 "names": [
  {
   "C": "CN",
   "ST": "Beijing",
   "L": "Beijing",
   "O": "system:kube-proxy",
   "OU": "Kubernetes-manual"
  }
 ]
}
EOF

cfssl gencert \
  -ca=/etc/kubernetes/pki/ca.pem \
  -ca-key=/etc/kubernetes/pki/ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  kube-proxy-csr.json | cfssljson -bare /etc/kubernetes/pki/kube-proxy

kubectl config set-cluster kubernetes   \
 --certificate-authority=/etc/kubernetes/pki/ca.pem   \
 --embed-certs=true   \
 --server=https://127.0.0.1:8443   \
 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy  \
 --client-certificate=/etc/kubernetes/pki/kube-proxy.pem   \
 --client-key=/etc/kubernetes/pki/kube-proxy-key.pem   \
 --embed-certs=true   \
 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config set-context kube-proxy@kubernetes   \
 --cluster=kubernetes   \
 --user=kube-proxy   \
 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

kubectl config use-context kube-proxy@kubernetes  --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig

# 创建 ServiceAccount Key ------secret
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub

# 将证书发送到其他 master 节点
# 其他节点创建目录
mkdir  /etc/kubernetes/pki/ -p

for NODE in k8s-master02 k8s-master03; do  for FILE in $(ls /etc/kubernetes/pki | grep -v etcd); do  scp /etc/kubernetes/pki/${FILE} $NODE:/etc/kubernetes/pki/${FILE}; done;  for FILE in admin.kubeconfig controller-manager.kubeconfig scheduler.kubeconfig; do  scp /etc/kubernetes/${FILE} $NODE:/etc/kubernetes/${FILE}; done; done

# 查看证书
ls /etc/kubernetes/pki/

3.ETCD 配置

shell 复制代码
# Server170
cat > /etc/etcd/etcd.config.yml << EOF 
name: 'server170'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.170:2380'
listen-client-urls: 'https://192.168.1.170:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.170:2380'
advertise-client-urls: 'https://192.168.1.170:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'server170=https://192.168.1.170:2380,server171=https://192.168.1.171:2380,server172=https://192.168.1.172:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

# Server171
cat > /etc/etcd/etcd.config.yml << EOF 
name: 'server171'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.171:2380'
listen-client-urls: 'https://192.168.1.171:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.171:2380'
advertise-client-urls: 'https://192.168.1.171:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'server170=https://192.168.1.170:2380,server171=https://192.168.1.171:2380,server172=https://192.168.1.172:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF

# Server172
cat > /etc/etcd/etcd.config.yml << EOF 
name: 'server172'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://192.168.1.172:2380'
listen-client-urls: 'https://192.168.1.172:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://192.168.1.172:2380'
advertise-client-urls: 'https://192.168.1.172:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'server170=https://192.168.1.170:2380,server171=https://192.168.1.171:2380,server172=https://192.168.1.172:2380'
initial-cluster-token: 'etcd-k8s-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
  key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF


# 创建 service( 所有 master 节点操作)
cat > /usr/lib/systemd/system/etcd.service << EOF

[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
Alias=etcd3.service

EOF

# 创建 etcd 证书目录(3台master)
mkdir /etc/kubernetes/pki/etcd

ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/

systemctl daemon-reload
systemctl enable --now etcd.service
systemctl status etcd.service

# 查看 etcd 状态
# 如果要用 IPv6 那么把 IPv4 地址修改为 IPv6 即可
export ETCDCTL_API=3
etcdctl --endpoints="192.168.1.170:2379,192.168.1.171:2379,192.168.1.172:2379" --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem  endpoint status --write-out=table

# 输出结果
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|      ENDPOINT      |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| 192.168.1.170:2379 | c1621f2f11fc33f9 |  3.5.12 |   20 kB |      true |      false |         2 |          9 |                  9 |        |
| 192.168.1.171:2379 | 14f18e5e057e3164 |  3.5.12 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
| 192.168.1.172:2379 | 225a00eef92e8f19 |  3.5.12 |   20 kB |     false |      false |         2 |          9 |                  9 |        |
+--------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
相关推荐
Gold Steps.2 小时前
Alloy+Loki+Minio+Grafana云原生K8S日志收集方案
云原生·kubernetes·grafana
会飞的小蛮猪10 小时前
K8s-1.29.2二进制安装-第一章
云原生·容器·kubernetes
凯新生物13 小时前
mPEG-SS-PLGA-DTX:智能药物递送系统
eureka·flink·ffmpeg·etcd
树下水月15 小时前
docker 启动后 如何通过对应的进程 找docker-compose.yaml 编排文件
运维·docker·容器
平行云15 小时前
实时云渲染支持在网页上运行UE5开发的3A大作Lyra项目
unity·云原生·ue5·webgl·虚拟现实·实时云渲染·像素流送
凯子坚持 c15 小时前
Docker 网络管理深度解析与实践指南
运维·docker·容器
kevin_水滴石穿15 小时前
在镜像生成时从内网获取字体安装包并配置
linux·docker·容器
阿里云云原生16 小时前
AI 原生落地成果获认可,阿里云云原生多项案例入选信通院「AI 云」典型示范
云原生
LILR_16 小时前
简单学docker
运维·docker·容器