node 亲和性----nodename---nodeSelector
软策略和硬策略
pod:亲和性
反亲和性
标签------>node节点的标签比app:nginx1标签优先级高
-------------------------------以上总结------------------------------------------
一、污点
查看master01 节点上的污点
[root@master01 ~]# kubectl describe nodes master01
Taints: node-role.kubernetes.io/master:NoSchedule
在master01节点上添加污点
[root@master01 ~]#kubectl taint node master01 node-role.kubernetes.io/master:NoSchedule
1.1、污点的作用:
污点:一旦节点上有污点的标签,那么调度器在部署pod的时候会避开这些有污点标签的节点。
1.2、污点的格式:
key:effect
key=value:true
例子:
[root@master01 ~]# kubectl describe taint node node01 test1=1:effect
[root@master01 ~]# kubectl describe taint node node01 test1:effect
1.3、effect污点的类型
污点的类型有三种:
1、NoSchedule:节点上一旦有这个污点,调度器是不会把pod部署在该节点上的。污点--不调度在这个节点上
2、PreferNoSchedule:尽量避免把pod部署在该节点。尽量不部署污点。污点---不优先把节点布置在此节点上
3、NoExecute:调度器不仅不会把pod部署在该节点,而且会把该节点上的pod驱逐到其他节点上。
1、删除master01上污点
[root@master01 ~]# kubectl taint node master01 node-role.kubernetes.io/master:NoSchedule-
2、添加污点
[root@master01 ~]# kubectl taint node master01 test1=1:NoSchedule ##不调度pod在此节点
node/master01 tainted
[root@master01 ~]# kubectl taint node node01 test1=2:PreferNoSchedule ##尽量不调度pod在此节点
node/node01 tainted
[root@master01 ~]# kubectl taint node node02 test1=3:NoExecute ##不调度在此节点,且驱逐此节点上的pod
node/node02 tainted
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
查看布置情况发现都在node01上
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 created
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-654cb56c4-5nxrp 1/1 Running 0 9s 10.244.1.124 node01 <none> <none>
nginx1-654cb56c4-ml2hn 1/1 Running 0 9s 10.244.1.125 node01 <none> <none>
nginx1-654cb56c4-qt8bm 1/1 Running 0 9s 10.244.1.126 node01 <none> <none>
3、修改节点上污点
[root@master01 k8s-yaml]# kubectl taint node node01 test1=3:PreferNoSchedule --overwrite
node/node01 modified
[root@master01 k8s-yaml]# kubectl describe nodes node01
Taints: test1=3:PreferNoSchedule
4、删除节点上污点
[root@master01 k8s-yaml]# kubectl taint node node01 test1=3:PreferNoSchedule-
5、查看节点上污点
[root@master01 k8s-yaml]# kubectl describe nodes node01
Taints: test1=3:PreferNoSchedule
6、修改node01上的污点类型,修改为不可调度
[root@master01 k8s-yaml]# kubectl taint node node01 test1=2:NoSchedule --overwrite
node/node01 modified
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 unchanged
[root@master01 k8s-yaml]# kubectl delete deployments.apps nginx1
deployment.apps "nginx1" deleted
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 created
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-654cb56c4-dhwrm 0/1 Pending 0 10s <none> <none> <none> <none>
nginx1-654cb56c4-t5jfl 0/1 Pending 0 10s <none> <none> <none> <none>
nginx1-654cb56c4-zxtpw 0/1 Pending 0 10s <none> <none> <none> <none>
没有可以调度的节点,pod状态为pending
二、容忍
即使节点上有污点,调度器依然可以把pod部署在有污点的节点。相当于忽略此污点
2.1、#Equal匹配:和节点上的污点的标签 键名 值 污点类型要完全匹配才能生效。
Equal容忍节点上的污点的标签,键名test1 值2 污点类型NoSchedule要完全匹配才能生效。
kubectl taint node node01 test1=2:NoSchedule
tolerations:
- key: "test1"
operator: "Equal"
value: "2"
effect: "NoSchedule"
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Equal"
value: "2"
effect: "NoSchedule"
#operator对应的值只有两个:Equal 等于 Exists包含
#Equal:和节点上的污点的标签 键名 值 污点类型要完全匹配才能生效。
[root@master01 k8s-yaml]# kubectl describe nodes node01
Taints: test1=2:NoSchedule
test1=2:PreferNoSchedule
[root@master01 k8s-yaml]# kubectl taint nodes node01 test1=2:PreferNoSchedule-
node/node01 untainted
[root@master01 k8s-yaml]# kubectl describe nodes node01
Taints: test1=2:NoSchedule
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 created
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-7689b876fb-5sr8g 1/1 Running 0 3s 10.244.1.129 node01 <none> <none>
nginx1-7689b876fb-dkhn8 1/1 Running 0 3s 10.244.1.128 node01 <none> <none>
nginx1-7689b876fb-f6545 1/1 Running 0 3s 10.244.1.127 node01 <none> <none>
Equal容忍节点上的污点的标签,键名test1 值3 污点类型NoExecute要完全匹配才能生效。
kubectl taint node node01 test1=2:PreferNoSchedule
tolerations:
- key: "test1"
operator: "Equal"
value: "3"
effect: "NoExecute"
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Equal"
value: "3"
effect: "NoExecute"
#operator对应的值只有两个:Equal 等于 Exists包含
[root@master01 k8s-yaml]# kubectl delete deployments.apps nginx1
deployment.apps "nginx1" deleted
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 created
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-746b4c957c-5l7qq 1/1 Running 0 2s 10.244.2.188 node02 <none> <none>
nginx1-746b4c957c-c7wkx 1/1 Running 0 2s 10.244.2.189 node02 <none> <none>
nginx1-746b4c957c-mn8lv 1/1 Running 0 2s 10.244.2.187 node02 <none> <none>
[root@master01 k8s-yaml]# kubectl delete -f test11.yaml
deployment.apps "nginx1" deleted
[root@master01 k8s-yaml]# kubectl taint node node01 test1=2:NoSchedule-
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-667b66f9df-zxj5t 0/1 Terminating 0 14m <none> node02 <none> <none>
nginx1-746b4c957c-8j7lt 1/1 Running 0 5s 10.244.1.132 node01 <none> <none>
nginx1-746b4c957c-cgm6j 1/1 Running 0 6s 10.244.2.194 node02 <none> <none>
nginx1-746b4c957c-hj2mt 1/1 Running 0 7s 10.244.1.131 node01 <none> <none>
容忍节点上的污点的标签,键名test1 值3 污点类型NoExecute要完全匹配才能生效。
#指定pod在这个节点上部署成功之后多久运行多久被驱逐,单位为s
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Equal"
value: "3"
effect: "NoExecute"
tolerationSeconds: 10
#指定pod在这个节点上部署成功之后多久运行多久被驱逐,单位为s
[root@master01 k8s-yaml]# kubectl delete -f test11.yaml
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5bd6779d95-7lf99 0/1 ContainerCreating 0 1s <none> node01 <none> <none>
nginx1-5bd6779d95-xvmvc 1/1 Running 0 2s 10.244.2.195 node02 <none> <none>
nginx1-746b4c957c-8j7lt 1/1 Terminating 0 2m6s 10.244.1.132 node01 <none> <none>
nginx1-746b4c957c-cgm6j 1/1 Running 0 2m7s 10.244.2.194 node02 <none> <none>
nginx1-746b4c957c-hj2mt 1/1 Running 0 2m8s 10.244.1.131 node01 <none> <none>
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5bd6779d95-7lf99 1/1 Running 0 37s 10.244.1.133 node01 <none> <none>
nginx1-5bd6779d95-878pr 1/1 Running 0 18s 10.244.1.134 node01 <none> <none>
nginx1-5bd6779d95-xlvh5 1/1 Running 0 6s 10.244.2.200 node02 <none> <none>
此驱逐污点,容忍之后,再加上tolerationSeconds: 10s开始驱逐
2.2、Exists包含匹配-包含键值对或者污点类型之一即可
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 10
#指定pod在这个节点上部署成功之后多久运行多久被驱逐,单位为s
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5c7757865d-ck5lf 1/1 Running 0 29s 10.244.1.135 node01 <none> <none>
nginx1-5c7757865d-skzmc 1/1 Running 0 17s 10.244.1.136 node01 <none> <none>
nginx1-5c7757865d-txqzd 1/1 Running 0 10s 10.244.2.244 node02 <none> <none>
Exists包含匹配-包含键值对"test1"
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Exists"
tolerationSeconds: 10
#指定pod在这个节点上部署成功之后多久运行多久被驱逐,单位为s
[root@master01 k8s-yaml]# kubectl apply -f test10.yaml
The Deployment "nginx1" is invalid: spec.template.spec.tolerations[0].effect: Invalid value: "": effect must be 'NoExecute' when `tolerationSeconds` is set
必须加#effect: "NoExecute"
[root@master01 k8s-yaml]# vim test10.yaml
[root@master01 k8s-yaml]# vim test10.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 2
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 10
[root@master01 k8s-yaml]# kubectl apply -f test10.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5c7757865d-tfg4n 1/1 Running 0 2s 10.244.2.47 node02 <none> <none>
nginx1-5c7757865d-zwlh5 1/1 Running 0 3m11s 10.244.1.173 node01 <none> <none>
Exists包含匹配-污点类型"NoExecute"
[root@master01 k8s-yaml]# vim test10.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 2
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- operator: "Exists"
effect: "NoExecute"
#Exists 不指定key的时候,表示所有的节点只要是NoExcute的标签都可以部署。
#master01 test1=1:NoExecute test1=2:NoExecute test1=3:NoExecute
[root@master01 k8s-yaml]# kubectl describe nodes node01
Taints: test1=2:NoSchedule
[root@master01 k8s-yaml]# kubectl describe nodes master01
Taints: node-role.kubernetes.io/master:NoSchedule
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
[root@master01 k8s-yaml]# kubectl apply -f test10.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-667b66f9df-6xwk6 1/1 Running 0 7s 10.244.2.88 node02 <none> <none>
nginx1-667b66f9df-hgrnc 1/1 Running 0 7s 10.244.2.90 node02 <none> <none>
nginx1-667b66f9df-mfndg 1/1 Running 0 7s 10.244.2.89 node02 <none> <none>
Exists包含匹配-键值test1进行容忍
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Exists"
[root@master01 k8s-yaml]# kubectl get nodes --show-labels
NAME STATUS ROLES AGE VERSION LABELS
master01 Ready control-plane,master 9d v1.20.15 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master01,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=
node01 Ready <none> 9d v1.20.15 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node01,kubernetes.io/os=linux,memory=1000,test1=a,test3=b
node02 Ready <none> 9d v1.20.15 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node02,kubernetes.io/os=linux,test2=b,xy102=98
[root@master01 k8s-yaml]# kubectl describe nodes node01
Taints: test1=2:NoSchedule
[root@master01 k8s-yaml]# kubectl describe nodes master01
Taints: node-role.kubernetes.io/master:NoSchedule
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 created
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-57b55d9bf4-7zb52 1/1 Running 0 13s 10.244.1.175 node01 <none> <none>
nginx1-57b55d9bf4-ws52x 1/1 Running 0 11s 10.244.1.176 node01 <none> <none>
nginx1-57b55d9bf4-xb4kj 1/1 Running 0 12s 10.244.2.91 node02 <none> <none>
通过Exists包含匹配-污点类型effect: "NoExecute"进行容忍
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- operator: "Exists"
effect: "NoExecute"
[root@master01 k8s-yaml]# kubectl describe nodes node01
Taints: test1=2:NoSchedule
[root@master01 k8s-yaml]# kubectl describe nodes master01
Taints: node-role.kubernetes.io/master:NoSchedule
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-667b66f9df-758ps 1/1 Running 0 10s 10.244.2.192 node02 <none> <none>
nginx1-667b66f9df-p4jsk 1/1 Running 0 9s 10.244.2.193 node02 <none> <none>
nginx1-667b66f9df-zxj5t 1/1 Running 0 11s 10.244.2.191 node02 <none> <none>
Exists包含匹配-污点类型effect: "NoSchedule"进行容忍
[root@master01 k8s-yaml]# vim test10.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- operator: "Exists"
effect: "NoSchedule"
[root@master01 k8s-yaml]# kubectl apply -f test10.yaml
deployment.apps/nginx1 created
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-7bb454c589-kntvb 1/1 Running 0 3s 10.244.0.18 master01 <none> <none>
nginx1-7bb454c589-mrftz 1/1 Running 0 3s 10.244.1.178 node01 <none> <none>
nginx1-7bb454c589-qd748 1/1 Running 0 3s 10.244.1.177 node01 <none> <none>
三、不可调度和排水
3.1、cordon
直接标记节点为不可用的状态,调度器不会把pod部署该节点
调度器不会把pod部署在node02
cordon:直接标记节点为不可用的状态
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
[root@master01 k8s-yaml]# kubectl cordon node02
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
node.kubernetes.io/unschedulable:NoSchedule
uncordon:取消调度器不会把pod部署在node02
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
node.kubernetes.io/unschedulable:NoSchedule
[root@master01 k8s-yaml]# kubectl uncordon node02
Taints: test1=3:NoExecute
3.2、排水:(慎用)
drain
标记节点为不可调度,而且会把节点上pod驱逐到其他节点。
[root@master01 k8s-yaml]# kubectl drain node02 --ignore-daemonsets --delete-local-data --force
--ignore-daemonsets:无视daemonset部署的pod
master 1
node01 1 1
node02
现在master 1和node01 1上选择一个,最终结果被删除
master 1
node01 1
node02
--delete-local-data 如果被排水的节点上有本地的挂载点,会强制杀死该pod
--force 不是控制器创建的pod会被强制释放。
[root@master01 k8s-yaml]# vim test10.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx
image: nginx:1.22
tolerations:
- key: "test1"
operator: "Equal"
value: "3"
effect: "NoExecute"
tolerationSeconds: 10
[root@master01 k8s-yaml]# kubectl apply -f test10.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5c7757865d-b4k96 1/1 Running 0 7m9s 10.244.1.139 node01 <none> <none>
nginx1-5c7757865d-hc2zp 1/1 Running 0 7m9s 10.244.0.14 master01 <none> <none>
nginx1-5c7757865d-lj6pr 1/1 Running 0 7m9s 10.244.2.49 node02 <none> <none>
[root@master01 k8s-yaml]# kubectl drain node02 --ignore-daemonsets --delete-local-data --force
Flag --delete-local-data has been deprecated, This option is deprecated and will be deleted. Use --delete-emptydir-data.
node/node02 cordoned
WARNING: ignoring DaemonSet-managed Pods: kube-system/kube-flannel-ds-w985p, kube-system/kube-proxy-kpvs2
evicting pod default/nginx1-5c7757865d-lj6pr
pod/nginx1-5c7757865d-lj6pr evicted
node/node02 evicted
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5c7757865d-b4k96 1/1 Running 0 14m 10.244.1.139 node01 <none> <none>
nginx1-5c7757865d-hc2zp 1/1 Running 0 14m 10.244.0.14 master01 <none> <none>
nginx1-5c7757865d-qk8dm 1/1 Running 0 29s 10.244.1.140 node01 <none> <none>
取消排水
[root@master01 k8s-yaml]# kubectl uncordon node02 ##设置排水,只能用取消排水
node/node02 uncordoned
用下面方法删除不了
-------------------------------------
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
node.kubernetes.io/unschedulable:NoSchedule
[root@master01 k8s-yaml]# kubectl taint node node02 node.kubernetes.io/unschedulable:NoSchedule-
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
node.kubernetes.io/unschedulable:NoSchedule
[root@master01 k8s-yaml]# kubectl uncordon node02
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
------------------------------------------------------------
[root@master01 k8s-yaml]# kubectl describe nodes node02
Taints: test1=3:NoExecute
[root@master01 k8s-yaml]# kubectl delete deployments.apps nginx1
deployment.apps "nginx1" deleted
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5c7757865d-9cddh 1/1 Running 0 18s 10.244.1.141 node01 <none> <none>
nginx1-5c7757865d-dt5qf 1/1 Running 0 18s 10.244.0.15 master01 <none> <none>
nginx1-5c7757865d-hp64d 1/1 Running 0 18s 10.244.2.50 node02 <none> <none>
3.3、master的特殊情况:
master节点一般情况下作为集群的调度者,尽量部署pod。但是为了资源最大化,master也可以部署。
可以设置污点类型为:PreferNoSchedule
2、如果集群规模比较小,也可以直接用来当节点进行pod部署。
当排水和驱逐之后,怎么样能让pod重新回到节点?
1、污点类型驱逐必须要取消
2、kubectl uncordon node02
1、重启
四、数据卷
容器,pod的生命周期是有限的,一旦重启或者奔溃,数据会丢失
为了保证数据的完整,我们要实现pod内的容器和节点的挂载。
volume
1、emptyDir 存储卷
emptyDir 存储卷
pod分配给节点之前,首先创建emptyDir卷,只要运行在节点,数据卷一直存在。
这个数据卷不能和宿主机共享,pod内的容器之间共享,一旦pod重启,enptyDir卷的数据也会一起删除。
主要用于容器内部组件通信,不涉及敏感数据
2、hostPath数据卷
hostPath数据卷--------基于宿主机
hostPath和节点挂载,当pod部署到节点时,就会和节点的指定目录进行挂载。
pod node01 /opt/test1-----/opt/test
deployment-----pod-------3
master01 /opt/test1-----/opt/test
node01 /opt/test1-----/opt/test
node02 /opt/test1-----/opt/test
数据可以持久化,但是node节点格式化,数据也会消失。
使用情况:每个pod运行的服务不同,保留的数据要做区分,这个时候需要用hostpath
例如kafka、redis使用
3、nfs共享存储卷:
nfs共享存储:
集群里面的pod相当于客户端
node01 /opt/test1
node02 /opt/test1 ------------->三台节点共享一个挂载点,所有数也都在这一个挂载点
master01 /opt/test1
nginx的服务或者是pod的数据是一致的。
/usr/share/nginx/html/index.html
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx1
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
#容器内的路径
- name: nginx2
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /data
command: ["/bin/bash","-c","while true; do echo $(date) >> /data/index.html; sleep 2; done"]
volumes:
- name: html
emptyDir: {}
#容器1的/usr/share/nginx/html和容器2的/data,数据卷的emptyDir,一旦重启,数据就会丢失
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
[root@master01 k8s-yaml]# kubectl exec -it nginx1-6b677dd6bc-cmc84 -c nginx1 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx1-6b677dd6bc-cmc84:/# cd /usr/share/nginx/html/
root@nginx1-6b677dd6bc-cmc84:/usr/share/nginx/html# ls
index.html
root@nginx1-6b677dd6bc-cmc84:/usr/share/nginx/html# tail -f index.html
Wed Sep 4 05:57:58 UTC 2024
Wed Sep 4 05:58:00 UTC 2024
Wed Sep 4 05:58:02 UTC 2024
Wed Sep 4 05:58:04 UTC 2024
Wed Sep 4 05:58:06 UTC 2024
Wed Sep 4 05:58:08 UTC 2024
[root@master01 k8s-yaml]# kubectl exec -it nginx1-6b677dd6bc-cmc84 -c nginx2 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx1-6b677dd6bc-cmc84:/# ls
bin data docker-entrypoint.d etc lib media opt root sbin sys usr
boot dev docker-entrypoint.sh home lib64 mnt proc run srv tmp var
root@nginx1-6b677dd6bc-cmc84:/# cd data/
root@nginx1-6b677dd6bc-cmc84:/data# ls
index.html
挂载节点宿主机的目录/opt/xy102
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx1
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
#容器内的路径
- name: nginx2
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /data
command: ["/bin/bash","-c","while true; do echo $(date) >> /data/index.html; sleep 2; done"]
volumes:
- name: html
hostPath:
path: /opt/xy102
type: DirectoryOrCreate
pod共享一个挂载卷,共用一个ip
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 configured
[root@master01 k8s-yaml]# kubectl get pod -o wide
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-6c574c856d-5xvvt 2/2 Running 0 93s 10.244.1.146 node01 <none> <none>
nginx1-6c574c856d-j97cs 2/2 Running 0 95s 10.244.1.145 node01 <none> <none>
nginx1-6c574c856d-xnq5h 2/2 Running 0 91s 10.244.1.147 node01 <none> <none>
[root@node01 xy102]# cd /opt/xy102/
[root@node01 xy102]# ls
index.html
[root@node01 xy102]# cat index.html
Wed Sep 4 06:04:47 UTC 2024
Wed Sep 4 06:04:49 UTC 2024
Wed Sep 4 06:04:49 UTC 2024
server: 192.168.168.81的/opt/data1与容器目录挂载共享
[root@master01 k8s-yaml]# mkdir /opt/data1
[root@master01 k8s-yaml]# chmod 777 /opt/data1/
[root@master01 k8s-yaml]# vim /etc/exports
/opt/data1 192.168.168.0/24(rw,no_root_squash)
[root@master01 k8s-yaml]# systemctl restart rpcbind
[root@master01 k8s-yaml]# systemctl restart nfs
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx1
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
#容器内的路径
- name: nginx2
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /data
command: ["/bin/bash","-c","while true; do echo $(date) >> /data/index.html; sleep 2; done"]
volumes:
- name: html
nfs:
path: /opt/data1
server: 192.168.168.81
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 configured
[root@master01 k8s-yaml]# kubectl get pod -o wide
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-679d6cfc9d-g869l 2/2 Running 0 3m1s 10.244.2.187 node02 <none> <none>
nginx1-679d6cfc9d-t86mz 2/2 Running 0 3m5s 10.244.2.186 node02 <none> <none>
nginx1-679d6cfc9d-z9ss4 2/2 Running 0 3m2s 10.244.1.148 node01 <none> <none>
[root@master01 opt]# cd data1/
[root@master01 data1]# ls
index.html
[root@master01 data1]# touch 123 > 123.txt
[root@master01 data1]# ls
123 123.txt index.html
[root@master01 data1]# cat 123
[root@master01 data1]# cat 123.txt
[root@master01 data1]# cat 123.txt
[root@master01 data1]# echo 123 > 234.txt
[root@master01 k8s-yaml]# kubectl exec -it nginx1-679d6cfc9d-g869l bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulting container name to nginx1.
Use 'kubectl describe pod/nginx1-679d6cfc9d-g869l -n default' to see all of the containers in this pod.
root@nginx1-679d6cfc9d-g869l:/# cd /usr/share/nginx/html/
root@nginx1-679d6cfc9d-g869l:/usr/share/nginx/html# ls
index.html
root@nginx1-679d6cfc9d-g869l:/usr/share/nginx/html# ls
123 123.txt index.html
root@nginx1-679d6cfc9d-g869l:/usr/share/nginx/html# ls
123 123.txt 234.txt index.html
root@nginx1-679d6cfc9d-g869l:/usr/share/nginx/html# cat 234.txt
123
nginx2中共享的是/data
[root@master01 k8s-yaml]# kubectl exec -it nginx1-679d6cfc9d-g869l -c nginx2 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx1-679d6cfc9d-g869l:/# cd /usr/share/nginx/html/
root@nginx1-679d6cfc9d-g869l:/usr/share/nginx/html# ls
50x.html index.html
root@nginx1-679d6cfc9d-g869l:/usr/share/nginx/html# cd /data/
root@nginx1-679d6cfc9d-g869l:/data# ls
123 123.txt 234.txt index.html
server: k8s5(192.168.168.85)的/opt/xy103与容器目录挂载
[root@k8s5 ~]# cd /opt/
[root@k8s5 opt]# ls
jenkins-2.396-1.1.noarch.rpm test
[root@k8s5 opt]# mkdir xy103
[root@k8s5 opt]# chmod 777 xy103/
[root@k8s5 opt]# vim /etc/exports
/opt/xy103 192.168.168.0/24(rw,no_root_squash)
[root@k8s5 opt]# systemctl restart rpcbind
[root@k8s5 opt]# systemctl restart nfs
[root@k8s5 opt]# showmount -e
Export list for k8s5:
/opt/xy103 192.168.168.0/24
-----------k8s三台+k8s5------------
192.168.168.85 k8s5
------------------
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx1
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
#容器内的路径
- name: nginx2
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /data
command: ["/bin/bash","-c","while true; do echo $(date) >> /data/index.html; sleep 2; done"]
volumes:
- name: html
nfs:
path: /opt/xy103
server: k8s5
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
deployment.apps/nginx1 configured
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5db8b56b4-27jg2 2/2 Running 0 98s 10.244.2.190 node02 <none> <none>
nginx1-5db8b56b4-h2d9s 2/2 Running 0 99s 10.244.1.151 node01 <none> <none>
nginx1-5db8b56b4-h6gqg 2/2 Running 0 101s 10.244.2.189 node02 <none> <none>
[root@master01 k8s-yaml]# kubectl describe nodes master01
Taints: node-role.kubernetes.io/master:NoSchedule
[root@master01 k8s-yaml]# kubectl exec -it nginx1-5db8b56b4-27jg2 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Defaulting container name to nginx1.
Use 'kubectl describe pod/nginx1-5db8b56b4-27jg2 -n default' to see all of the containers in this pod.
root@nginx1-5db8b56b4-27jg2:/# cd /usr/share/nginx/html/
root@nginx1-5db8b56b4-27jg2:/usr/share/nginx/html# ls
index.html
[root@k8s5 opt]# cd xy103/
[root@k8s5 xy103]# ls
index.html
[root@k8s5 xy103]# echo 123 > 123.txt
root@nginx1-5db8b56b4-27jg2:/usr/share/nginx/html# cat 123.txt
123
[root@master01 k8s-yaml]# kubectl exec -it nginx1-5db8b56b4-27jg2 -c nginx2 bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx1-5db8b56b4-27jg2:/# cd /data/
root@nginx1-5db8b56b4-27jg2:/data# ls
123.txt index.html
root@nginx1-5db8b56b4-27jg2:/data# cat 123.txt
123
server: k8s5(192.168.168.85)的/opt/xy103与容器目录挂载并开启service
[root@k8s5 ~]# cd /opt/
[root@k8s5 opt]# ls
jenkins-2.396-1.1.noarch.rpm test
[root@k8s5 opt]# mkdir xy103
[root@k8s5 opt]# chmod 777 xy103/
[root@k8s5 opt]# vim /etc/exports
/opt/xy103 192.168.168.0/24(rw,no_root_squash)
[root@k8s5 opt]# systemctl restart rpcbind
[root@k8s5 opt]# systemctl restart nfs
[root@k8s5 opt]# showmount -e
Export list for k8s5:
/opt/xy103 192.168.168.0/24
-----------k8s三台+k8s5------------
192.168.168.85 k8s5
----------------------------------
[root@master01 k8s-yaml]# vim test11.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx1
name: nginx1
spec:
replicas: 3
selector:
matchLabels:
app: nginx1
template:
metadata:
labels:
app: nginx1
spec:
containers:
- name: nginx1
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
#容器内的路径
- name: nginx2
image: nginx:1.22
volumeMounts:
- name: html
mountPath: /data
command: ["/bin/bash","-c","while true; do echo $(date) >> /data/index.html; sleep 2; done"]
volumes:
- name: html
nfs:
path: /opt/xy103
server: k8s5
---
#表示分段,上一个yml结束,下一个新的yml
apiVersion: v1
kind: Service
metadata:
name: nginx1
# namespace
labels:
app: nginx1
spec:
type: NodePort
ports:
- port: 80
targetPort: 80
nodePort: 30000
selector:
app: nginx1
[root@master01 k8s-yaml]# kubectl apply -f test11.yaml
[root@master01 k8s-yaml]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx1-5db8b56b4-4bg5l 2/2 Running 0 9m52s 10.244.2.191 node02 <none> <none>
nginx1-5db8b56b4-hrg7c 2/2 Running 0 9m52s 10.244.1.152 node01 <none> <none>
nginx1-5db8b56b4-mx5cq 2/2 Running 0 9m52s 10.244.1.153 node01 <none> <none>
[root@master01 k8s-yaml]# curl 192.168.168.81:30000