k8s环境下的相关操作

9.12 k8s

calico的部署

复制代码
# ls
  anaconda-ks.cfg  k8s-ha-install  kubeadm-config.yaml  new.yaml  token
  # 切换 git 分⽀
  [root@k8s-master ~]# cd k8s-ha-install/
  [root@k8s-master k8s-ha-install]# git checkout manual-installation-v1.28.x && cd calico/
  分支 manual-installation-v1.28.x 设置为跟踪来自 origin 的远程分支 manual-installation-v1.28.x。
  切换到一个新分支 'manual-installation-v1.28.x'
  [root@k8s-master calico]# ls
  calico.yaml
  [root@k8s-master calico]# pwd
  /root/k8s-ha-install/calico
  [root@k8s-master calico]# cat ~/new.yaml | grep Sub
    podSubnet: 172.16.0.0/16
    serviceSubnet: 10.96.0.0/16
    
  [root@k8s-master calico]# vim calico.yaml 
  •
  # 修改配置文件,将文件中的POD_CIDR替换成172.16.0.0/16 
  4801               value: "172.16.0.0/16"
  •
  [root@k8s-master calico]# kubectl get po -A
  NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
  kube-system   coredns-6554b8b87f-m5wnb             0/1     Pending   0          94m
  kube-system   coredns-6554b8b87f-zz9cb             0/1     Pending   0          94m
  kube-system   etcd-k8s-master                      1/1     Running   0          94m
  kube-system   kube-apiserver-k8s-master            1/1     Running   0          94m
  kube-system   kube-controller-manager-k8s-master   1/1     Running   0          94m
  kube-system   kube-proxy-gtt6v                     1/1     Running   0          94m
  kube-system   kube-proxy-snr8v                     1/1     Running   0          59m
  kube-system   kube-proxy-z5hrs                     1/1     Running   0          59m
  kube-system   kube-scheduler-k8s-master            1/1     Running   0          94m
  •
  # 创建pod
  [root@k8s-master calico]# kubectl apply -f calico.yaml
  •
  # 查看日志
  [root@k8s-master calico]# kubectl logs calico-node-9jp9m -n kube-system
  •
  # 出现问题就去节点查看日志
  [root@k8s-node01 ~]# vim /var/log/messages

更新并重新启动,三台机器

复制代码
# yum -y update
# reboot

查看容器和节点状态就差不多好了

复制代码
# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   19h   v1.28.2
node1    Ready    <none>          19h   v1.28.2
node2    Ready    <none>          19h   v1.28.2
# kubectl get po -A
NAMESPACE     NAME                                       READY   ST
kube-system   calico-kube-controllers-6d48795585-hm9q7   1/1     Ru
kube-system   calico-node-jcg6z                          1/1     Ru
kube-system   calico-node-kpjnw                          1/1     Ru
kube-system   calico-node-wkkcb                          1/1     Ru
kube-system   coredns-6554b8b87f-5lt5x                   1/1     Ru
kube-system   coredns-6554b8b87f-dqx6t                   1/1     Ru
kube-system   etcd-master                                1/1     Ru
kube-system   kube-apiserver-master                      1/1     Ru
kube-system   kube-controller-manager-master             1/1     Ru
kube-system   kube-proxy-5rwvt                           1/1     Ru
kube-system   kube-proxy-5x555                           1/1     Ru
kube-system   kube-proxy-g79tw                           1/1     Ru
kube-system   kube-scheduler-master                      1/1     

创建节点

复制代码
 # 添加一个新的pod
  [root@k8s-master calico]# kubectl run nginx0 --image=nginx
  pod/nginx0 created
  •
  [root@k8s-master calico]# kubectl get po -Aowide|grep nginx
  •
  # 查看日志
  [root@k8s-master calico]# kubectl logs nginx0
  Error from server (BadRequest): container "nginx0" in pod "nginx0" is waiting to start: trying and failing to pull image

删除节点

复制代码
 [root@k8s-master calico]# kubectl delete pod nginx0
  pod "nginx0" deleted
  [root@k8s-master calico]# kubectl get po -Aowide|grep nginx

Metrics 部署

复制证书到所有节点

复制代码
# scp /etc/kubernetes/pki/front-proxy-ca.crt  node1:/etc/kubernetes
The authenticity of host 'node1 (192.168.1.12)' can't be establishe
ECDSA key fingerprint is SHA256:donghBpnwWMN6JmjNdCNwYJP179r2qC20tk
ECDSA key fingerprint is MD5:ec:83:ce:f2:5b:6c:ee:2a:04:80:86:48:ad
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node1' (ECDSA) to the list of known hos
front-proxy-ca.crt                                                 
您在 /var/spool/mail/root 中有新邮件
# scp /etc/kubernetes/pki/front-proxy-ca.crt  node2:
The authenticity of host 'node2 (192.168.1.13)' can't be establishe
ECDSA key fingerprint is SHA256:donghBpnwWMN6JmjNdCNwYJP179r2qC20tk
ECDSA key fingerprint is MD5:ec:83:ce:f2:5b:6c:ee:2a:04:80:86:48:ad
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node2' (ECDSA) to the list of known hos
front-proxy-ca.crt  

安装metrics server

复制代码
 [root@k8s-master ~]# ls components.yaml 
  components.yaml
  [root@k8s-master ~]# mkdir pods
  [root@k8s-master ~]# mv components.yaml pods/
  [root@k8s-master ~]# cd pods/
  [root@k8s-master pods]# ls
  components.yaml
  [root@k8s-master pods]# cat components.yaml | wc -l
  202
  •
  # 添加metric server的pod资源
  [root@k8s-master pods]# kubectl create -f components.yaml 
  •
  # 在kube-system命名空间下查看metrics server的pod运⾏状态
  [root@k8s-master pods]# kubectl get po -A|grep metrics
  kube-system   metrics-server-79776b6d54-dmwk6            1/1     Running   0             2m26s

查看节点资源监控

复制代码
 # 查看node节点的系统资源使⽤情况
  [root@k8s-master pods]# kubectl top nodes
  NAME         CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
  k8s-master   151m         7%     1099Mi          63%       
  k8s-node01   40m          4%     467Mi           53%       
  k8s-node02   39m          3%     483Mi           55%       
  [root@k8s-master pods]# kubectl top pods -A

搭建dashboard

1、安装dashboard

--cd /root/k8s-ha-install/dashboard

--ls

dashboard-user.yaml dashboard.yaml

--kubectl create -f .

2、设置svc模式

--kubectl edit svc kubernets-dashboard -n kubernets-dashboard

..

type:NodePort

..

--kubectl get svc kubernets-dashboard -n

浏览器访问

3、获得token

--kubectl create token admin-user -n kube-system

eyJhbGciOiJSUzI1NiIsImtpZCI6ImhvcW5UMVFUQzhtamNrcHEyWnFVV3R0aGMtTFRfOF9GeEFOdVVOeS11c2MifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiXSwiZXhwIjoxNzI2MTI1Mjk5LCJpYXQiOjE3MjYxMjE2OTksImlzcyI6Imh0dHBzOi8va3ViZXJuZXRlcy5kZWZhdWx0LnN2Yy5jbHVzdGVyLmxvY2FsIiwia3ViZXJuZXRlcy5pbyI6eyJuYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhZG1pbi11c2VyIiwidWlkIjoiYzJlYWI4ZTgtYTMyMC00NTI4LTgyOGYtMzk5NmNmZjkxODU1In19LCJuYmYiOjE3MjYxMjE2OTksInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphZG1pbi11c2VyIn0.NpPA6L4XeXIDIZbm8aPVYYXLDSwEZvjhaz_urNbW-12y9CqHc4h66PDOhUPr1v0qqIXPOHA9jHF25EwGDk3QtNmtV5-MR8Te-n7rV-K_oM1QZNFvsQiit9nFlbvu7FuxxkyY_YjfW1IhWf1KuEsln_XOHGRHTMwxKN8xKUqFNjZTAc8UMKTp0hLEsf9Mi0oxxfHnd93tjxjyDhUDGxdFZOd2YNZGA-EWaPMuRcc5PdW3-5FIXUK12HZB7XT-X7R8uxhpboZuoO60Rxh-HPcz_mhNElAr0pDlzBcQeISVbqS5RaAtnKKuNEF5oouCifcMwCvtD137Hsuysn3379vZQg

添加更新

--kubectl patch daemonset kube-proxy -p "{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"date\":\"date +'%s'\"}}}}}" -n kube-system

访问测试

--curl 127.0.0.1:10249/proxyModeipvs

验证节点

--kubectl get nodeNAME STATUS ROLES AGE VERSIONmaster Ready control-plane 23h v1.28.2node1 Ready <none> 22h v1.28.2node2 Ready <none> 22h v1.28.2

查看服务的网段

--kubectl get svcNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGEkubernetes ClusterIP 10.96.0.1 <none> 443/TCP 23h

查看service pod宿主机的网段

--kubectl get svc

--kubectl get po -Aowide

测试创建参数

--kubectl create deploy cluster-test --image=registry.cn-beijing.aliyuncs.com/dotbalo/debug-tools -- sleep=3066

访问dnds443端口何53端口

--curl -k https:10.96.0.1:443

--curl http://10.96.0.10:53

复制代码
#测试创建参数
[root@master ~]# kubectl create deploy cluster-test1 --image=registry.cn-beijing.aliyuncs.com/dotbalo/debug-tools -- sleep 3600
deployment.apps/cluster-test1 created
您在 /var/spool/mail/root 中有新邮件
[root@master ~]# kubectl get po -A|grep cluster-test1
default                cluster-test1-54575cf56c-92grp               1/1     Running            0                7s
​
#进入创建的节点中
[root@master ~]# kubectl  exec -it cluster-test1-54575cf56c-92grp -- bash
(07:29 cluster-test1-54575cf56c-92grp:/) ifconfig
eth0      Link encap:Ethernet  HWaddr f6:21:45:f6:45:29  
          inet addr:172.16.104.8  Bcast:0.0.0.0  Mask:255.255.255.255
          inet6 addr: fe80::f421:45ff:fef6:4529/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1480  Metric:1
          RX packets:5 errors:0 dropped:0 overruns:0 frame:0
          TX packets:8 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:446 (446.0 B)  TX bytes:656 (656.0 B)
​
lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1000 
          RX bytes:0 (0.0 B)  TX bytes:0 (0.0 B)
​
​
(1 07:29 cluster-test1-54575cf56c-92grp:/) nslookup kubernetes
Server:     10.96.0.10
Address:    10.96.0.10#53
​
Name:   kubernetes.default.svc.cluster.local
Address: 10.96.0.1
​
(07:30 cluster-test1-54575cf56c-92grp:/) nslookup kube-dns.kube-system
Server:     10.96.0.10
Address:    10.96.0.10#53
​
Name:   kube-dns.kube-system.svc.cluster.local
Address: 10.96.0.10
​
(07:30 cluster-test1-54575cf56c-92grp:/) exit
exit
您在 /var/spool/mail/root 中有新邮件
​
​
#访问dns的443端口和53端口
[root@master ~]# curl -k https://10.96.0.1:443
{
  "kind": "Status",
  "apiVersion": "v1",
  "metadata": {},
  "status": "Failure",
  "message": "forbidden: User \"system:anonymous\" cannot get path \"/\"",
  "reason": "Forbidden",
  "details": {},
  "code": 403
}
[root@master ~]# curl  http://10.96.0.10:53
curl: (52) Empty reply from server
​

kubernetes自动补齐

常用指令

1、自动补齐

--yum -y install bash-completion //安装自动补齐

--source <(kubectl completion bash)

创建节点 [root@k8s-master ~]# kubectl run nginx1 --image nginx pod/nginx1 created [root@k8s-master ~]# kubectl get po -A ​删除节点

[root@k8s-master ~]# kubectl delete pod nginx1 pod "nginx1" deleted

--echo "source <(kubectl completion bash)" >>~/.bashrc //设置开机自启

2、基础指令

复制代码
 # 删除节点
  [root@k8s-master ~]# kubectl delete pod cluster-test-64b7b9cbf-jjmmh
  pod "cluster-test-64b7b9cbf-jjmmh" deleted
  •
  # 节点还在
  [root@k8s-master ~]# kubectl get po -A|grep cluster-test
  default                cluster-test-64b7b9cbf-dnn2m                 0/1     ContainerCreating   0               20s
  default                cluster-test0-58689d5d5d-qr4mv               1/1     Running             0               34m
  •
  # 使用deployment删除
  [root@k8s-master ~]# kubectl delete deployment cluster-test
  deployment.apps "cluster-test" deleted
  •
  # 已删除
  [root@k8s-master ~]# kubectl get po -A|grep cluster-test

编写yaml文件-创建节点

复制代码
# vim pods/abc.yaml
  apiVersion: v1
  kind: Pod
  metadata:
    name: busybox-sleep
  spec:
    containers:
    - name: busybox
      image: busybox:1.28
      args:
      - sleep
      - "1000"
  [root@k8s-master ~]# cd pods/
  [root@k8s-master pods]# ls
  abc.yaml  components.yaml
  [root@k8s-master pods]# kubectl create -f abc.yaml 
  [root@k8s-master pods]# kubectl create -f abc.yaml 
  pod/busybox-sleep created
  [root@k8s-master pods]# kubectl get po -A|grep busybox-sleep
  default                busybox-sleep                                1/1     Running   0               3s
  [root@k8s-master pods]# kubectl delete pod busybox-sleep
  pod "busybox-sleep" deleted
  [root@k8s-master pods]# kubectl get po -A|grep busy

编写json文件

vim pods/abc.json

{

"apiVersion":"v1",

"kind":"Pod",

"metadata":{

"name":"busybox-sleep000"

},

"spec":{

"containers":[

{

"name":"busybox000",

"image":"busybox:1.28",

"args":[

"sleep",

"1000"

]

}

]

}

}

相关推荐
会掉头发25 分钟前
Linux进程通信之共享内存
linux·运维·共享内存·进程通信
我言秋日胜春朝★27 分钟前
【Linux】冯诺依曼体系、再谈操作系统
linux·运维·服务器
饮啦冰美式1 小时前
22.04Ubuntu---ROS2使用rclcpp编写节点
linux·运维·ubuntu
wowocpp1 小时前
ubuntu 22.04 server 安装 和 初始化 LTS
linux·运维·ubuntu
Lign173141 小时前
ubuntu unrar解压 中文文件名异常问题解决
linux·运维·ubuntu
大霞上仙2 小时前
Ubuntu系统电脑没有WiFi适配器
linux·运维·电脑
Karoku0662 小时前
【企业级分布式系统】Zabbix监控系统与部署安装
运维·服务器·数据库·redis·mysql·zabbix
为什么这亚子2 小时前
九、Go语言快速入门之map
运维·开发语言·后端·算法·云原生·golang·云计算
布值倒区什么name3 小时前
bug日常记录responded with a status of 413 (Request Entity Too Large)
运维·服务器·bug
。puppy4 小时前
HCIP--3实验- 链路聚合,VLAN间通讯,Super VLAN,MSTP,VRRPip配置,OSPF(静态路由,环回,缺省,空接口),NAT
运维·服务器