k8s的Pod管理

一.资源使用的方法

1.命令式

复制代码
[root@master ~]# kubectl run  webpod --image nginx:1.26 --port 80

[root@master ~]# kubectl run  webpod --image nginx:latest --port 80   #这是老师的版本

[root@master ~]# kubectl get pods
webpod   1/1     Running   0          33s

[root@master ~]# kubectl describe pods webpod

[root@master ~]# kubectl get pods  -o wide
NAME     READY   STATUS    RESTARTS   AGE     IP           NODE    NOMINATED NODE   READINESS GATES
webpod   1/1     Running   0          2m46s   10.244.2.2   node2   <none>           <none>

[root@master ~]# kubectl delete pods webpod
pod "webpod" deleted from default namespace

2.yaml文件方式

复制代码
[root@master ~]# kubectl create deployment test --image nginx --replicas 1  --dry-run=client -o yaml  > test.yml     //老师的

[root@master ~]# kubectl create deployment test --image nginx:1.26 --replicas 1  --dry-run=client -o yaml  > test.yml    //我们的文件要写这个

[root@master ~]# vim test.yml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: test
  name: test
spec:
  replicas: 1
  selector:
    matchLabels:
      app: test
  template:
    metadata:
      labels:
        app: test
    spec:
      containers:
      - image: nginx:1.26
        name: nginx

#建立式
[root@master ~]# kubectl create  -f test.yml
deployment.apps/test created
[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-h2sct   1/1     Running   0          8s

[root@master ~]# kubectl delete -f test.yml
deployment.apps "test" deleted from default namespace
[root@master ~]# kubectl get pods
No resources found in default namespace.



#声明式
[root@master ~]# kubectl apply -f test.yml
deployment.apps/test created
[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-cxtnp   1/1     Running   0          1s


#注意建立只能建立不能更新,声明可以
[root@master ~]# vim test.yml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: test
  name: test
spec:
  replicas: 2			#只修改pod数量
。。。。。。。。。。。。。。。。。。


[root@master ~]# kubectl create  -f test.yml
Error from server (AlreadyExists): error when creating "test.yml": deployments.apps "test" already exists

[root@master ~]# kubectl apply  -f test.yml
deployment.apps/test configured
[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-9sw95   1/1     Running   0          8s
test-56848fd9dc-cxtnp   1/1     Running   0          2m42s

二.资源类型

1.node

复制代码
[root@master ~]# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   18h   v1.35.3
node1    Ready    <none>          17h   v1.35.3
node2    Ready    <none>          17h   v1.35.3

[root@master ~]#  kubeadm token create --print-join-command

2.namespace

复制代码
[root@master ~]# kubectl get namespaces
NAME              STATUS   AGE
default           Active   18h
kube-flannel      Active   17h
kube-node-lease   Active   18h
kube-public       Active   18h
kube-system       Active   18h
[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-9sw95   1/1     Running   0          25m
test-56848fd9dc-cxtnp   1/1     Running   0          27m
[root@master ~]# kubectl -n kube-flannel get pods
NAME                    READY   STATUS    RESTARTS      AGE
kube-flannel-ds-hc8gt   1/1     Running   1 (17h ago)   17h
kube-flannel-ds-rvzng   1/1     Running   1 (17h ago)   17h
kube-flannel-ds-s29g5   1/1     Running   1 (17h ago)   17h


[root@master ~]# kubectl create namespace timinglee
namespace/timinglee created
[root@master ~]# kubectl get namespaces
NAME              STATUS   AGE
default           Active   18h
kube-flannel      Active   17h
kube-node-lease   Active   18h
kube-public       Active   18h
kube-system       Active   18h
timinglee         Active   6s



[root@master ~]# kubectl -n timinglee  run testpod --image  nginx:latest
pod/testpod created
----- kubectl -n timinglee  run testpod --image  nginx:1.26   //这个

[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-9sw95   1/1     Running   0          28m
test-56848fd9dc-cxtnp   1/1     Running   0          30m
[root@master ~]# kubectl -n timinglee  get pods
NAME      READY   STATUS    RESTARTS   AGE
testpod   1/1     Running   0          17s
[root@master ~]# kubectl -n timinglee  run testpod --image  nginx:latest
Error from server (AlreadyExists): pods "testpod" already exists
---------kubectl -n timinglee  run testpod --image  nginx:1:26   //这个
[root@master ~]# kubectl  run testpod --image  nginx:latest
pod/testpod created
----------kubectl  run testpod --image  nginx:1:26   //这个

三.kubectl命令

复制代码
[root@master ~]# kubectl get deployments.apps
root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-9sw95   1/1     Running   0          37m
test-56848fd9dc-cxtnp   1/1     Running   0          40m

[root@master ~]# kubectl edit deployments.apps test
.....
replicas: 4
.....

[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-9sw95   1/1     Running   0          39m
test-56848fd9dc-cxtnp   1/1     Running   0          41m
test-56848fd9dc-kdm4h   1/1     Running   0          28s
test-56848fd9dc-lmpdt   1/1     Running   0          28s


[root@master ~]# kubectl patch  deployments.apps test  -p '{"spec":{"replicas":1}}'
deployment.apps/test patched
[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
test-56848fd9dc-9sw95   1/1     Running   0          42m

#端口暴露

复制代码
[root@master ~]# kubectl expose  deployment test --port  80 --target-port 80
service/test exposed
[root@master ~]# kubectl describe service test
Name:                     test
Namespace:                default
Labels:                   app=test
Annotations:              <none>
Selector:                 app=test
Type:                     ClusterIP
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.104.29.36
IPs:                      10.104.29.36
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
Endpoints:                10.244.1.2:80,10.244.2.11:80,10.244.1.5:80 + 1 more...
Session Affinity:         None
Internal Traffic Policy:  Cluster
Events:                   <none>

root@master \~\]# curl 10.98.111.43 ![](https://i-blog.csdnimg.cn/direct/ca205303612244d8acfcdc0452faa6eb.png)

复制代码
[root@master ~]# kubectl logs testpod -n timinglee    

#删除    kubectl delete pod testpod
#attech
[root@master ~]# kubectl run testpod  -it --image busybox
All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.
If you don't see a command prompt, try pressing enter.
/ #
/ #  ctrl+pq
/ # Session ended, resume using 'kubectl attach testpod -c testpod -i -t' command when the pod is running

[root@master ~]# kubectl attach pods/testpod -it
All commands and output from this session will be recorded in container logs, including credentials and sensitive information passed through the command prompt.
If you don't see a command prompt, try pressing enter.
/ #
/ #
/ #


[root@master ~]# kubectl exec -it pods/testpod -c testpod -- /bin/sh
/ #
/ #


[root@master ~]# kubectl cp testpod.yml testpod:/ -c testpod
[root@master ~]# kubectl ^C
[root@master ~]# kubectl exec -it pods/testpod -c testpod -- /bin/sh
/ #
/ # ls
bin          etc          lib          proc         sys          tmp          var
dev          home         lib64        root         testpod.yml  usr

root@master \~\]# vim testpod.yml apiVersion: v1 kind: Pod metadata: labels: run: testpod name: testpod spec: containers: - image: nginx:1.26 name: web1 ports: - containerPort: 80 - image: busybox:latest name: busybox command: - /bin/sh - -c - sleep 3000 #扩容 [root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE test-68d8574cb-8xjdv 1/1 Running 0 14m test-68d8574cb-b9p9x 1/1 Running 0 14m test-68d8574cb-lb9gq 1/1 Running 0 14m test-68d8574cb-xkd56 1/1 Running 0 14m [root@master ~]# kubectl scale deployment test --replicas 6 deployment.apps/test scaled [root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE test-68d8574cb-5kbgs 1/1 Running 0 2s test-68d8574cb-7nnmn 1/1 Running 0 2s test-68d8574cb-8xjdv 1/1 Running 0 15m test-68d8574cb-b9p9x 1/1 Running 0 15m test-68d8574cb-lb9gq 1/1 Running 0 15m test-68d8574cb-xkd56 1/1 Running 0 15m [root@master ~]# kubectl scale deployment test --replicas 1 deployment.apps/test scaled [root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE test-68d8574cb-5kbgs 0/1 Completed 0 26s test-68d8574cb-8xjdv 0/1 Completed 0 15m test-68d8574cb-lb9gq 1/1 Running 0 15m test-68d8574cb-xkd56 0/1 Completed 0 15m testpod 1/1 Running 1 (6m54s ago) 8m12s [root@master ~]# kubectl get pods NAME READY STATUS RESTARTS AGE test-68d8574cb-lb9gq 1/1 Running 0 15m testpod 1/1 Running 1 (6m56s ago) 8m14s [root@master ~]# kubectl get pods --show-labels NAME READY STATUS RESTARTS AGE LABELS test-68d8574cb-lb9gq 1/1 Running 0 16m app=test,pod-template-hash=68d8574cb testpod 1/1 Running 1 (7m54s ago) 9m12s run=testpod [root@master ~]# kubectl label pods testpod name=lee pod/testpod labeled [root@master ~]# kubectl get pods --show-labels NAME READY STATUS RESTARTS AGE LABELS test-68d8574cb-lb9gq 1/1 Running 0 17m app=test,pod-template-hash=68d8574cb testpod 1/1 Running 1 (8m23s ago) 9m41s name=lee,run=testpod [root@master ~]# kubectl label pods testpod name- pod/testpod unlabeled [root@master ~]# kubectl get pods --show-labels NAME READY STATUS RESTARTS AGE LABELS test-68d8574cb-lb9gq 1/1 Running 0 18m app=test,pod-template-hash=68d8574cb testpod 1/1 Running 1 (9m10s ago) 10m run=testpod

1.自助式管理pod

#清除所有的

kubectl delete pod --all

root@harbor \~\]# docker load -i myapp \[root@harbor \~\]# docker tag timinglee/myapp:v1 reg.timinglee.org/library/myapp:v1 \[root@harbor \~\]# docker push reg.timinglee.org/library/myapp:v1 \[root@harbor \~\]# docker tag timinglee/myapp:v2 reg.timinglee.org/library/myapp:v2 \[root@harbor \~\]# docker push reg.timinglee.org/library/myapp:v2 \[root@master \~\]# docker pull reg.timinglee.org/library/myapp:v1 \[root@master \~\]# docker pull reg.timinglee.org/library/myapp:v2

复制代码
[root@master pod]# kubectl run  myappv2 --image  myapp:v2  --port 80
pod/myappv2 created
[root@master pod]# kubectl get pods
NAME      READY   STATUS              RESTARTS   AGE
myappv2   0/1     ContainerCreating   0          8s				#创建中
[root@master pod]# kubectl get pods
NAME      READY   STATUS         RESTARTS   AGE
myappv2   0/1     ErrImagePull   0          20s					#镜像拉取失败

[root@master pod]# kubectl get pods
NAME      READY   STATUS             RESTARTS   AGE
myappv2   0/1     ImagePullBackOff   0          3m48s			#尝试从新拉去镜像

[root@master pod]# kubectl get pods
NAME      READY   STATUS    RESTARTS   AGE
myappv2   1/1     Running   0          4m20s

[root@master pod]# kubectl delete pods myappv2
pod "myappv2" deleted from default namespace
[root@master pod]# kubectl get pods
No resources found in default namespace.	

2.利用控制器管理pod

复制代码
[root@master pod]# kubectl create deployment webcluster --image myapp:v2 --replicas 1
deployment.apps/webcluster created
[root@master pod]# kubectl get deployments.apps -o wide
NAME         READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES     SELECTOR
webcluster   1/1     1            1           14s   myapp        myapp:v2   app=webcluster


[root@master pod]# kubectl scale deployment webcluster --replicas 2
deployment.apps/webcluster scaled
[root@master pod]# kubectl scale deployment webcluster --replicas 1


[root@master pod]# kubectl label pods webcluster-6c8b4bb9d7-jsjws app-
pod/webcluster-6c8b4bb9d7-jsjws unlabeled
[root@master pod]# kubectl label pods webcluster-6c8b4bb9d7-jsjws app=webcluster
pod/webcluster-6c8b4bb9d7-jsjws labeled


#暴漏控制器(设定访问pod的vip)
[root@master pod]# kubectl expose deployment webcluster --port 80 --target-port 80
[root@master pod]# kubectl describe svc webcluster | tail -n 10 
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.98.36.168
IPs:                      10.98.36.168
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
Endpoints:                10.244.1.12:80
Session Affinity:         None
Internal Traffic Policy:  Cluster
Events:                   <none>


[root@master pod]# curl  10.98.36.168
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>


#更新版本
[root@master pod]# kubectl set image deployments webcluster myapp=myapp:v1
deployment.apps/webcluster image updated

[root@master pod]# curl 10.98.36.168
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

[root@master pod]# kubectl rollout history deployment webcluster
deployment.apps/webcluster
REVISION  CHANGE-CAUSE
1         <none>
2         <none>

[root@master pod]# kubectl rollout undo deployment webcluster --to-revision 1
deployment.apps/webcluster rolled back
[root@master pod]# curl 10.98.36.168
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>

3.利用yaml文件部署应用

运行单个容器

复制代码
#运行单个容器
[root@master pod]# kubectl run  lee1 --image myapp:v1  --dry-run=client -o yaml  > 1test.yml
[root@master pod]# vim 1test.yml
apiVersion: v1
kind: Pod
metadata:
  labels:
    name: lee1
  name: lee1
spec:
  containers:
  - image: myapp:v1
    name: myappv1
    
[root@master pod]# kubectl apply -f 1test.yml
pod/lee1 created
[root@master pod]# kubectl get pods
NAME   READY   STATUS    RESTARTS   AGE
lee1   1/1     Running   0          2s
[root@master pod]# kubectl describe  pods
Name:             lee1
Namespace:        default
Priority:         0
Service Account:  default
Node:             node2/172.25.254.20
Start Time:       Sun, 29 Mar 2026 15:15:50 +0800
Labels:           run=lee1
Annotations:      <none>
Status:           Running
IP:               10.244.2.23
IPs:
  IP:  10.244.2.23


[root@master pod]# kubectl get pods
NAME   READY   STATUS    RESTARTS   AGE
lee1   1/1     Running   0          78s
[root@master pod]# kubectl get pods  -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
lee1   1/1     Running   0          82s   10.244.2.23   node2   <none>           <none>


[root@master pod]# kubectl delete -f 1test.yml
pod "lee1" deleted from default namespace

运行多个容器

复制代码
[root@master pod]# cp 1test.yml  2test.yml
[root@master pod]# vim 2test.yml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: lee1
  name: lee1
spec:
  containers:
  - image: myapp:v1
    name: myappv1
  - image: busybox:latest
    name: busybox
    command:
      - /bin/sh
      - -c
      - sleep 20000
      
[root@master pod]# kubectl apply -f 2test.yml
pod/lee1 created
[root@master pod]# kubectl get pods
NAME   READY   STATUS    RESTARTS   AGE
lee1   2/2     Running   0          4s
[root@master pod]# kubectl delete -f 2test.yml  --force

理解pod间的网络整合

复制代码
[root@master pod]# cp 2test.yml 3test.yml
[root@master pod]# vim 3test.yml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: lee1
  name: lee1
spec:
  containers:
  - image: myapp:v1
    name: myappv1
  - image: busyboxplus:latest
    name: busybox
    command:
      - /bin/sh
      - -c
      - sleep 20000[
root@master pod]# kubectl apply -f 3test.yml
pod/lee1 created
[root@master pod]# kubectl get pods
NAME   READY   STATUS    RESTARTS   AGE
lee1   2/2     Running   0          17s

[root@master pod]# kubectl exec -it pods/lee1 -c busybox -- /bin/sh
[ root@lee1:/ ]$ curl localhost
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

端口映射

复制代码
[root@master pod]# cp 1test.yml  4test.yml
[root@master pod]# vim 4test.yml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: lee1
  name: lee1
spec:
  containers:
  - image: myapp:v1
    name: myappv1
    ports:
    - name: webport
      containerPort: 80
      hostPort: 80
      protocol: TCP
      
[root@master pod]#  kubectl delete pod lee1
[root@master pod]# kubectl apply -f 4test.yml
pod/lee1 created
[root@master pod]# kubectl get pods  -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
lee1   1/1     Running   0          7s    10.244.2.31   node2   <none>           <none>


[root@master pod]# curl  172.25.254.20
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

选择运行节点

复制代码
[root@master pod]# cp 4test.yml 5test.yml
[root@master pod]# vim 5test.yml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: lee1
  name: lee1
spec:
  nodeSelector:
    kubernetes.io/hostname: node1
  containers:
  - image: myapp:v1
    name: myappv1
    ports:
    - name: webport
      containerPort: 80
      hostPort: 80
      protocol: TCP
[root@master pod]# kubectl apply -f 5test.yml
pod/lee1 created
[root@master pod]# kubectl get pods  -o wide
NAME   READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
lee1   1/1     Running   0          5s    10.244.1.14   node1   <none>           <none>

共享宿主机网络

复制代码
root@master pod]# cp  5test.yml  6test.yml
[root@master pod]# vim 6test.yml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: lee1
  name: lee1
spec:
  hostNetwork: true
  nodeSelector:
    kubernetes.io/hostname: node1
  containers:
  - image: busybox:latest
    name:  busybox
    command:
      - /bin/sh
      - -c
      - sleep 1000

[root@master pod]# kubectl apply -f 6test.yml
pod/lee1 created
[root@master pod]# kubectl exec -it pods/lee1 -c  busybox -- /bin/sh
/ #
/ # ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq qlen 1000
    link/ether 00:0c:29:67:23:76 brd ff:ff:ff:ff:ff:ff
    inet 172.25.254.10/24 brd 172.25.254.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::7a35:2bf3:8ff4:9419/64 scope link noprefixroute
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue
    link/ether 3a:0c:33:9f:d9:36 brd ff:ff:ff:ff:ff:ff
    inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
       valid_lft forever preferred_lft forever
4: flannel.1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1450 qdisc noqueue
    link/ether 06:c7:70:fe:6f:e6 brd ff:ff:ff:ff:ff:ff
    inet 10.244.1.0/32 scope global flannel.1
       valid_lft forever preferred_lft forever
    inet6 fe80::4c7:70ff:fefe:6fe6/64 scope link
       valid_lft forever preferred_lft forever
5: cni0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue qlen 1000
    link/ether 7a:22:fc:84:3f:e2 brd ff:ff:ff:ff:ff:ff
    inet 10.244.1.1/24 brd 10.244.1.255 scope global cni0
       valid_lft forever preferred_lft forever
    inet6 fe80::7822:fcff:fe84:3fe2/64 scope link
       valid_lft forever preferred_lft forever
/ #

五.pod的生命周期

1.init 容器

复制代码
[root@master pod]# cp 1test.yml  init.yml
[root@master pod]# vim init.yml
apiVersion: v1
kind: Pod
metadata:
  labels:
    run: lee1
  name: lee1
spec:
  initContainers:
  - name: init-myservice
    image: busybox
    command: ["sh","-c","until test -e /testfile;do echo wating for myservice; sleep 2;done"]
  containers:
  - image: myapp:v1
    name: myappv1

[root@master pod]# kubectl apply -f init.yml
pod/lee1 created

[root@master pod]# watch -n 1 kubectl get pods		#监控命令
NAME   READY   STATUS     RESTARTS   AGE
lee1   0/1     Init:0/1   0          3s

	
[root@master pod]# kubectl exec -it pods/lee1 -c init-myservice -- /bin/sh
/ #
/ #
[root@master pod]# kubectl exec -it pods/lee1 -c init-myservice -- /bin/sh
/ #
/ # touch  /testfile
/ # command terminated with exit code 137
[root@master pod]# kubectl get pods
NAME   READY   STATUS    RESTARTS   AGE
lee1   1/1     Running   0          2m32s

2.livenessprobe(存活探针)

复制代码
[root@master pod]# kubectl create deployment webcluster --image myapp:v1 --replicas 1 --
[root@master pod]# kubectl create deployment webcluster --image myapp:v1 --replicas 1 --dry-run=client -o yaml > liveness.yml

[root@master pod]# kubectl expose deployment webcluster --port 80 --target-port 80  --dry-run=client -o yaml >> liveness.yml


[root@master pod]# kubectl delete -f  liveness.yml
deployment.apps "webcluster" deleted from default namespace
Error from server (NotFound): error when deleting "liveness.yml": services "webcluster" not found

[root@master pod]# kubectl apply -f liveness.yml
deployment.apps/webcluster created
service/webcluster created

#测试liveness
[root@master pod]# watch -n 1 "kubectl get pods  ;kubectl describe svc webcluster | tail -n 10"
NAME                          READY   STATUS    RESTARTS     AGE
webcluster-584fddd575-4ttz9   1/1     Running   3 (2s ago)   92s
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.105.47.234
IPs:                      10.105.47.234
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
Endpoints:                10.244.2.36:80
Session Affinity:         None
Internal Traffic Policy:  Cluster
Events:                   <none>

[root@master pod]# kubectl exec -it pods/webcluster-7fd94cc55b-pgdx6  -c myapp -- /bin/sh
/ # nginx -s stop
2026/03/29 08:50:02 [notice] 59#59: signal process started
/ # command terminated with exit code 137

3.ReadinessProbe

复制代码
[root@master pod]# cp liveness.yml ReadinessProbe.yml
[root@master pod]# vim ReadinessProbe.yml
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: webcluster
  name: webcluster
spec:
  replicas: 1
  selector:
    matchLabels:
      app: webcluster
  template:
    metadata:
      labels:
        app: webcluster
    spec:
      containers:
      - image: myapp:v1
        name: myapp
        readinessProbe:
          httpGet:
            path: /test.html
            port: 80
          initialDelaySeconds: 1
          periodSeconds: 3
          timeoutSeconds: 1

---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: webcluster
  name: webcluster
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: webcluster


[root@master pod]# kubectl apply -f ReadinessProbe.yml
deployment.apps/webcluster configured
service/webcluster unchanged

#监控
[root@master pod]# watch -n 1 "kubectl get pods  ;kubectl describe svc webcluster | tail -n 10"
6

NAME                          READY   STATUS    RESTARTS   AGE
webcluster-6bc85dfc84-zk4mn   0/1     Running   0          7s
IP Family Policy:         SingleStack
IP Families:              IPv4
IP:                       10.107.142.60
IPs:                      10.107.142.60
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
Endpoints:
Session Affinity:         None
Internal Traffic Policy:  Cluster
Events:                   <none>



[root@master pod]# kubectl exec -it pods/webcluster-6bc85dfc84-zk4mn   -c myapp -- /bin/sh
/ # echo timinglee > /usr/share/nginx/html/test.html
/ # rm -fr /usr/share/nginx/html/test.html
/ # echo timinglee > /usr/share/nginx/html/test.html
/ #

kubectl exec -it pods/webcluster-77c87d9946-crqf8  -c myapp -- /bin/sh
相关推荐
IMPYLH2 小时前
Linux 的 env 命令
linux·运维·服务器·数据库
桌面运维家2 小时前
Nginx服务器安全:高级访问控制与流量清洗实战
服务器·nginx·安全
抠脚学代码2 小时前
Linux开发--> UBoot学习
linux·学习·uboot
奇妙之二进制2 小时前
后端常见分层模型
linux·服务器
拾贰_C2 小时前
【Ubuntu | Nvidia 】nvidia 驱动安装
linux·运维·ubuntu
zzzsde2 小时前
【Linux】EXT文件系统(2)
linux·运维·服务器
艾莉丝努力练剑2 小时前
【QT】QT快捷键整理
linux·运维·服务器·开发语言·图像处理·人工智能·qt
硅基导游2 小时前
bpf监控某个应用里各线程锁的申请得到及释放时间
服务器·互斥锁·性能监控
IMPYLH2 小时前
Linux 的 expand 命令
linux·运维·服务器