一、ranker 图形化界面
图形化界面进行k8s集群的管理
rancher自带监控----普罗米修斯
[root@master01 opt]# docker load -i rancher.tar ##所有节点
[root@master01 opt]# docker pull rancher/rancher:v2.5.7 ##主节点
[root@master01 opt]# vim /etc/docker/daemon.json
{
"registry-mirrors": [
"https://hub-mirror.c.163.com",
"https://docker.m.daocloud.io",
"https://ghcr.io",
"https://mirror.baidubce.com",
"https://docker.nju.edu.cn",
"https://hub.littlediary.cn/",
"https://dockerproxy.cn"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
~
[root@master01 opt]# systemctl daemon-reload
[root@master01 opt]# systemctl restart docker
[root@master01 opt]# docker pull rancher/rancher:v2.5.7
[root@master01 opt]# docker run -d --restart=unless-stopped -p 80:80 -p 443:443 --privileged --name rancher rancher/rancher:v2.5.7
95247bfb683e8008b249d237603ef520527b2ec00cc261b6d2ebfc6e51321207
[root@master01 opt]# curl --insecure -sfL https://192.168.168.81/v3/import/6pmp9w44gs678l24ln45r84frtcs2pl7txl6hcz57cqlclstlpsx6s_c-2bhzq.yaml | kubectl apply -f -
error: no objects passed to apply
[root@master01 opt]# curl --insecure -sfL https://192.168.168.81/v3/import/6pmp9w44gs678l24ln45r84frtcs2pl7txl6hcz57cqlclstlpsx6s_c-2bhzq.yaml | kubectl apply -f -
clusterrole.rbac.authorization.k8s.io/proxy-clusterrole-kubeapiserver created
clusterrolebinding.rbac.authorization.k8s.io/proxy-role-binding-kubernetes-master created
namespace/cattle-system created
serviceaccount/cattle created
clusterrolebinding.rbac.authorization.k8s.io/cattle-admin-binding created
secret/cattle-credentials-4134134 created
clusterrole.rbac.authorization.k8s.io/cattle-admin created
deployment.apps/cattle-cluster-agent created
[root@master01 opt]# kubectl get ns
NAME STATUS AGE
cattle-system Active 119s
default Active 18d
ingress-nginx Active 41h
kube-node-lease Active 18d
kube-public Active 18d
kube-system Active 18d
lucky-cloud Active 22h
xy102 Active 47h
[root@master01 opt]# kubectl get pod -n cattle-system
NAME READY STATUS RESTARTS AGE
cattle-cluster-agent-cf684455-bd8bs 1/1 Running 0 2m36s
nfs进行挂载----hostPath
1、声明挂载卷
2、健康检查
3、调度策略
4、扩缩容以及升级策略
5、
调度
Last login: Sat Sep 14 08:55:59 2024 from 192.168.168.168
[root@master01 ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
hpa-test2-0 1/1 Running 16 2d
nfs1-76f66b958-68wpl 1/1 Running 12 8d
test1-11-785f6847f9-mtdnw 1/1 Running 0 3m49s
[root@master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 18d
[root@master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 18d
nginx1-11 NodePort 10.96.48.229 <none> 80:31000/TCP 41s
访问(http://192.168.168.81:31000/)
[root@k8s5 k8s]# rm -rf *
[root@k8s5 k8s]# ls
[root@k8s5 k8s]# echo 123 > index.html
[root@k8s5 k8s]#
[root@master01 k8s-yaml]# kubectl get pod -n cattle-prometheus
NAME READY STATUS RESTARTS AGE
exporter-kube-state-cluster-monitoring-79c667fdc9-ds9jr 0/1 ErrImagePull 0 5m13s
exporter-node-cluster-monitoring-2xrgh 0/1 ImagePullBackOff 0 5m13s
exporter-node-cluster-monitoring-8knvz 1/1 Running 0 5m13s
exporter-node-cluster-monitoring-92rkf 0/1 ImagePullBackOff 0 5m13s
grafana-cluster-monitoring-575d64fcf-2c4xg 0/2 Init:ImagePullBackOff 0 5m13s
operator-init-cluster-monitoring-2pv7x 0/1 ContainerCreating 0 5m13s
operator-init-monitoring-operator-tw9zm 0/1 ImagePullBackOff 0 5m15s
prometheus-operator-monitoring-operator-6dd84ddd49-b9scc 0/1 ImagePullBackOff 0 5m15s
[root@master01 k8s-yaml]# kubectl describe pod -n cattle-prometheus exporter-node-cluster-monitoring-92rkf
##查看镜像拉不下来的原因,注意在哪一个节点,然后去节点上拉取
[root@node01 ~]# vim /etc/docker/daemon.json
{
"registry-mirrors": [
"https://hub-mirror.c.163.com",
"https://docker.m.daocloud.io",
"https://ghcr.io",
"https://mirror.baidubce.com",
"https://docker.nju.edu.cn"
"https://hub.littlediary.cn/",
"https://dockerproxy.cn"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
[root@node01 ~]# systemctl daemon-reload
[root@node01 ~]# systemctl restart docker
[root@node01 ~]# docker pull rancher/prom-node-exporter:v1.0.1
v1.0.1: Pulling from rancher/prom-node-exporter
86fa074c6765: Pull complete
ed1cd1c6cd7a: Pull complete
ff1bb132ce7b: Pull complete
Digest: sha256:ae4d849cc4b14c15dcd7dc47621b577ae14767444ed62add3fe0d9b3b9e032f8
Status: Downloaded newer image for rancher/prom-node-exporter:v1.0.1
docker.io/rancher/prom-node-exporter:v1.0.1
[root@master01 k8s-yaml]# kubectl get pod -n cattle-prometheus
NAME READY STATUS RESTARTS AGE
exporter-kube-state-cluster-monitoring-79c667fdc9-ds9jr 0/1 ImagePullBackOff 0 6m21s
exporter-node-cluster-monitoring-2xrgh 0/1 ImagePullBackOff 0 6m21s
exporter-node-cluster-monitoring-8knvz 1/1 Running 0 6m21s
exporter-node-cluster-monitoring-92rkf 0/1 ImagePullBackOff 0 6m21s
grafana-cluster-monitoring-575d64fcf-2c4xg 0/2 Init:ImagePullBackOff 0 6m21s
operator-init-cluster-monitoring-xgsn9 1/1 Running 0 46s
[root@master01 opt]# kubectl describe pod -n cattle-prometheus operator-init-monitoring-operator-tw9zm
Name: operator-init-monitoring-operator-tw9zm
Namespace: cattle-prometheus
Priority: 0
Node: node01/192.168.168.82
[root@node01 ~]# docker pull rancher/istio-kubectl:1.5.10
[root@master01 k8s-yaml]# kubectl describe pod -n cattle-prometheus grafana-cluster-monitoring-575d64fcf-2c4xg
[root@node01 ~]# docker pull rancher/grafana-grafana:7.1.5
[root@master01 opt]# kubectl describe pod -n cattle-prometheus exporter-node-cluster-monitoring-2xrgh
Name: exporter-node-cluster-monitoring-2xrgh
Namespace: cattle-prometheus
Priority: 0
Node: node02/192.168.168.83
[root@node02 ~]# docker pull rancher/prom-node-exporter:v1.0.1
[root@node02 ~]# vim /etc/docker/daemon.json
{
"registry-mirrors": [
"https://hub-mirror.c.163.com",
"https://docker.m.daocloud.io",
"https://ghcr.io",
"https://mirror.baidubce.com",
"https://docker.nju.edu.cn",
"https://hub.littlediary.cn/",
"https://dockerproxy.cn"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
[root@node02 ~]# systemctl daemon-reload
[root@node02 ~]# systemctl restart docker
[root@node02 ~]# docker pull rancher/prom-node-exporter:v1.0.1
[root@master01 opt]# kubectl get pod -n cattle-prometheus
NAME READY STATUS RESTARTS AGE
exporter-kube-state-cluster-monitoring-79c667fdc9-ds9jr 1/1 Running 0 49m
exporter-node-cluster-monitoring-2xrgh 1/1 Running 0 49m
exporter-node-cluster-monitoring-8knvz 1/1 Running 0 49m
exporter-node-cluster-monitoring-92rkf 1/1 Running 0 49m
grafana-cluster-monitoring-575d64fcf-2c4xg 2/2 Running 0 49m
prometheus-cluster-monitoring-0 5/5 Running 1 33m
prometheus-operator-monitoring-operator-6dd84ddd49-b9scc 1/1 Running 0 49m