k8s控制器

一、部署

已经搭好的harbor仓库

scp ca.crt root@172.25.250.100

scp ca.crt root@172.25.250.100:/etc/docker/certs.d/bwmis.org/

scp ca.crt root@172.25.250.110:/etc/docker/certs.d/bwmis.org/

scp ca.crt root@172.25.250.120:/etc/docker/certs.d/bwmis.org/ ##发送证书
##禁止swap磁盘的使用

[root@k8s-master ~]# cd /etc/yum.repos.d/

[root@k8s-master yum.repos.d]# cat k8s.repo

[k8s]

name=k8s

baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.30/rpm/

gpgcheck=0

[root@k8s-master yum.repos.d]# vim /etc/hosts

127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4

::1 localhost localhost.localdomain localhost6 localhost6.localdomain6

172.25.250.200 node1 bwmis.org harbor

172.25.250.120 k8s-node2

172.25.250.110 k8s-node1

172.25.250.100 k8s-master

所有安装docker

[root@k8s-master yum.repos.d]# cat docker.repo

[docker]

name=docker-ce

baseurl=https://mirrors.aliyun.com/docker-ce/linux/rhel/9/x86_64/stable/

gpgcheck=0

所有节点设定docker的资源管理

[root@k8s-master ~]# cat /etc/docker/daemon.json

{

"registry-mirrors": ["https://bwmis.org"],

"insecure-registries" : ["172.25.250.100:5000"],

"exec-opts": ["native.cgroupdriver=systemd"]

}
三台k8s

[root@k8s-master yum.repos.d]# dnf install kubelet-1.30.0 kubeadm-1.30.0 kubectl-1.30.0 -y

[root@k8s-master yum.repos.d]# echo "source<(kubectl completion bash)" >> ~/.bashrc

[root@k8s-master yum.repos.d]# source ~/.bashrc

在所节点安装cri-docker

下载

cri-dockerd-0.3.14-3.el8.x86_64.rpm libcgroup-0.41-19.el8.x86_64.rpm

[root@k8s-master yum.repos.d]# vim /lib/systemd/system/cri-docker.service
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=bwmis.org/k8s/pause:3.9

[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl start cri-docker
[root@k8s-master ~]# ll /var/run/cri-dockerd.sock
[root@k8s-master ~]# kubeadm config images pull \

--image-repository registry.aliyuncs.com/google_containers \

--kubernetes-version v1.30.0 \

--cri-socket=unix:///var/run/cri-dockerd.sock

#上传镜像到harbor仓库

[root@k8s-master ~]# docker images | awk '/google/{ print $1":"$2}' \

| awk -F "/" '{system("docker tag "$0" bwmis.org/k8s/"$3)}'

[root@k8s-master ~]# docker images | awk '/k8s/{system("docker push "$1":"$2)}'

集群初始化

#执行初始化命令

[root@k8s-master ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 \

--image-repository bwmis.org/k8s \

--kubernetes-version v1.30.0 \

--cri-socket=unix:///var/run/cri-dockerd.sock

[root@k8s-master ~]# echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile

[root@k8s-master ~]# source ~/.bash_profile

[root@k8s-master ~]# kubectl get node

安装flannel网络插件

[root@k8s-master ~]# wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

[root@k8s-master ~]# docker tag flannel/flannel-cni-plugin:v1.5.1-flannel1 bwmis.org/flannel/flannel-cni-plugin:v1.5.1-flannel1

[root@k8s-master ~]# docker tag flannel/flannel:v0.25.5 bwmis.org/flannel/flannel:v0.25.5

[root@k8s-master ~]# docker push bwmis.org/flannel/flannel:v0.25.5

[root@k8s-master ~]# docker push bwmis.org/flannel/flannel-cni-plugin:v1.5.1-flannel1

[root@k8s-node1 ~]# vim kube-flannel.yml

[root@k8s-master ~]# grep -n image kube-flannel.yml

146: image: reg.timinglee.org/flannel/flannel:v0.25.5

173: image: reg.timinglee.org/flannel/flannel-cni-plugin:v1.5.1-flannel1

184: image: reg.timinglee.org/flannel/flannel:v0.25.5

[root@k8s-master ~]# kubectl apply -f kube-flannel.yml

以下俩个操作node1&2都要操作

[root@k8s-node1 ~]#  kubeadm join 172.25.250.129:6443 --token fzvf0z.62m59mtnthvokxrb         --discovery-token-ca-cert-hash sha256:6e49a5cbed089f961eefd8da3e03fd126f59e21b2d5e616901336b5a71c5ffd9 --cri-socket=unix:///var/run/cri-dockerd.sock

[root@k8s-node2 yum.repos.d]# vim /lib/systemd/system/cri-docker.service

ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=bwmis.org/k8s/pause:3.9

 [root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/k8s/pause:3.9

[root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/k8s/coredns:v1.11.1
[root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/k8s/etcd:3.5.12-0
[root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/k8s/kube-proxy:v1.30.0
[root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/k8s/kube-controller-manager:v1.30.0
[root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/k8s/kube-scheduler:v1.30.0
[root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/k8s/kube-apiserver:v1.30.0
[root@k8s-node2 yum.repos.d]# docker image pull bwmis.org/flannel/flannel:v0.25.5
[root@k8s-node2 yum.repos.d]# docker image pull  bwmis.org/flannel/flannel-cni-plugin:v1.5.1-flannel1

[root@k8s-node2 yum.repos.d]# systemctl restart cri-docker.service

[root@k8s-node2 yum.repos.d]# systemctl restart kubelet.service

结果

kubernetes的资源管理

[root@k8s-master ~]# kubectl version #显示集群版本

Client Version: v1.30.0

Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3

Server Version: v1.30.0

[root@k8s-master ~]# kubectl cluster-info #显示集群信息

Kubernetes control plane is running at https://172.25.250.129:6443

CoreDNS is running at https://172.25.250.129:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

[root@k8s-master ~]# kubectl create deployment web --image nginx --replicas 2

deployment.apps/web created #创建一个web控制器,控制器中pod数量为2

[root@k8s-master ~]# kubectl get deployments.apps #查看控制器

NAME READY UP-TO-DATE AVAILABLE AGE

web 2/2 2 2 69s

[root@k8s-master ~]# kubectl explain deployment #查看资源帮助

[root@k8s-master ~]# kubectl explain deployment.spec #查看控制器参数帮助
[root@k8s-master ~]# kubectl edit deployments.app web #编辑控制器配置

deployment.apps/web edited

replicas: 3

[root@k8s-master ~]# kubectl get deployments.apps

NAME READY UP-TO-DATE AVAILABLE AGE

web 3/3 3 3 5m26s

[root@k8s-master ~]# kubectl patch deployments.apps web -p '{"spec":{"replicas":4}}'

deployment.apps/web patched #利用补丁更改控制器配置

[root@k8s-master ~]# kubectl get deployments.apps

NAME READY UP-TO-DATE AVAILABLE AGE

web 4/4 4 4 10m

[root@k8s-master ~]# kubectl delete deployments.apps web #删除资源

deployment.apps "web" deleted

[root@k8s-master ~]# kubectl get deployments.apps

No resources found in default namespace.

[root@k8s-master ~]# kubectl run testpod --image nginx #运行pod

pod/testpod created

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

testpod 1/1 Running 0 10s

#端口暴露

[root@k8s-master ~]# kubectl run testpod --image nginx

pod/testpod created

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

testpod 1/1 Running 0 10s

[root@k8s-master ~]# kubectl expose testpod --port 80 --target-port 80

[root@k8s-master ~]# kubectl get service

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE

kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 23h

testpod ClusterIP 10.101.57.250 <none> 80/TCP 10h

[root@k8s-master ~]# kubectl describe pods testpod #查看资源详细信息

[root@k8s-master ~]# kubectl logs pods/testpod #查看资源日志

/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration

/docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/

/docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh

10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf

10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf

/docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh

/docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh

/docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh

/docker-entrypoint.sh: Configuration complete; ready for start up

2024/09/04 02:05:34 [notice] 1#1: using the "epoll" event method

2024/09/04 02:05:34 [notice] 1#1: nginx/1.27.1

2024/09/04 02:05:34 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14)

2024/09/04 02:05:34 [notice] 1#1: OS: Linux 5.14.0-427.13.1.el9_4.x86_64

2024/09/04 02:05:34 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1073741816:1073741816

2024/09/04 02:05:34 [notice] 1#1: start worker processes

2024/09/04 02:05:34 [notice] 1#1: start worker process 29

2024/09/04 02:05:34 [notice] 1#1: start worker process 30

10.244.0.0 - - [04/Sep/2024:02:06:33 +0000] "GET / HTTP/1.1" 200 615 "-" "curl/7.76.1" "-"

运行交互pod

[root@k8s-master ~]# kubectl run -it testpod --image bwmis.org/library/busybox:latest

If you don't see a command prompt, try pressing enter.

/ #

/ # //ctrl+pq退出不停止pod

/ #

运行非交互pod

[root@k8s-master ~]# kubectl run nginx --image nginx

pod/nginx created

进入到已经运行的容器,且容器有交互环境

[root@k8s-master ~]# kubectl attach pods/testpod -it

If you don't see a command prompt, try pressing enter.

/ #

/ #

在已经运行的pod中运行指定命令

[root@k8s-master ~]# kubectl exec -it pods/nginx /bin/bash

kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@nginx:/#

root@nginx:/#

#日志文件到pod中

[root@k8s-master ~]# kubectl cp anaconda-ks.cfg nginx:/

[root@k8s-master ~]# kubectl exec -it pods/nginx /bin/bash

kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@nginx:/# ls

anaconda-ks.cfg bin boot dev docker-entrypoint.d docker-entrypoint.sh etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var

root@nginx:/#

#复制pod中的文件到本机中

[root@k8s-master ~]# kubectl cp nginx:/anaconda-ks.cfg anaconda-ks.cfg

tar: Removing leading `/' from member names

高级命令演示

[root@k8s-master ~]# kubectl create deployment --image nginx web --dry-run=client -o yaml > web.yml

[root@k8s-master ~]# vim web.yml

apiVersion: apps/v1

kind: Deployment

metadata:

creationTimestamp: null

labels:

app: web

name: web

spec:

replicas: 2

selector:

matchLabels:

app: web

template:

metadata:

labels:

app: web

spec:

containers:

  • image: nginx

name: nginx

[root@k8s-master ~]# kubectl apply -f web.yml

deployment.apps/web created

[root@k8s-master ~]# kubectl get deployments.apps

NAME READY UP-TO-DATE AVAILABLE AGE

web 2/2 2 2 9s

[root@k8s-master ~]# kubectl run nginx --image nginx

pod/nginx created

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

nginx 1/1 Running 0 11s run=nginx

testpod 1/1 Running 2 (9m54s ago) 13m run=testpod

[root@k8s-master ~]# kubectl label pods nginx app=lee

pod/nginx labeled

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

nginx 1/1 Running 0 67s app=lee,run=nginx

testpod 1/1 Running 2 (10m ago) 14m run=testpod

[root@k8s-master ~]# kubectl label pods nginx app=webcluster --overwrite #更改便签

pod/nginx labeled

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

nginx 1/1 Running 0 117s app=webcluster,run=nginx

testpod 1/1 Running 2 (11m ago) 15m run=testpod

[r

web-7c56dcdb9b-4wpdd 1/1 Running 0 56m app=web,pod-template-hash=7c56dcdb9b

web-7c56dcdb9b-pbtb6 1/1 Running 0 56m app=web,pod-template-hash=7c56dcdb9b

[root@k8s-master ~]# kubectl label pods web-7c56dcdb9b-4wpdd app-

pod/web-7c56dcdb9b-4wpdd unlabeled

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

nginx 1/1 Running 0 115m app=web,run=nginx

testpod 1/1 Running 2 (125m ago) 128m run=testpod

web-7c56dcdb9b-4wpdd 1/1 Running 0 58m pod-template-hash=7c56dcdb9b

web-7c56dcdb9b-pbtb6 1/1 Running 0 58m app=web,pod-template-hash=7c56dcdb9b

web-7c56dcdb9b-sj4tt 1/1 Running 0 84s app=web,pod-template-hash=7c56dcdb9b

2.1 创建自主式pod

[root@k8s-master ~]# kubectl get pods #查看所有pods

No resources found in default namespace

[root@k8s-master ~]# kubectl run bwmis --image nginx #建立一个名为bwmis的pod

pod/bwmis created

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

bwmis 1/1 Running 0 12s

[root@k8s-master ~]# kubectl get pods -o wide #显示pod的较为详细的信息

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

bwmis 1/1 Running 0 23s 10.244.2.215 k8s-node2

2.2利用控制器管理pod(推荐)

[root@k8s-master ~]# kubectl create deployment bwmis --image nginx #建立控制器并自动运行pod

deployment.apps/bwmis created

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

bwmis-758c4fcfd4-tjtrw 1/1 Running 0 8s

[root@k8s-master ~]# kubectl scale deployment bwmis --replicas 6 #为bwmis扩容

deployment.apps/bwmis scaled

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

bwmis-758c4fcfd4-c7xdv 0/1 ContainerCreating 0 5s

bwmis-758c4fcfd4-dlb6c 1/1 Running 0 5s

bwmis-758c4fcfd4-q9k5q 0/1 ContainerCreating 0 5s

bwmis-758c4fcfd4-tjtrw 1/1 Running 0 35s

bwmis-758c4fcfd4-xr5ph 0/1 ContainerCreating 0 5s

bwmis-758c4fcfd4-zr84x 1/1 Running 0 5s

[root@k8s-master ~]# kubectl scale deployment bwmis --replicas 2 #为bwmis缩容

deployment.apps/bwmis scaled

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

bwmis-758c4fcfd4-tjtrw 1/1 Running 0 2m19s

bwmis-758c4fcfd4-zr84x 1/1 Running 0 109s

2.3 应用版本的更新

[root@k8s-master ~]# kubectl create deployment bwmis --image timinglee/myapp:v1 --replicas 2 #利用控制器建立pod

deployment.apps/bwmis created

[root@k8s-master ~]# kubectl expose deployment bwmis --port 80 --target-port 80 #暴露端口

service/bwmis exposed

[root@k8s-master ~]# kubectl get pods --selector=app=bwmis

NAME READY STATUS RESTARTS AGE

bwmis-c49d689bd-jhkd8 1/1 Running 0 85s

bwmis-c49d689bd-kt9ph 1/1 Running 0 85s

[root@k8s-master ~]# kubectl get services

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE

bwmis ClusterIP 10.108.154.100 <none> 80/TCP 3m1s

kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 33h

[root@k8s-master ~]# curl 10.108.154.100 #访问服务

Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

[root@k8s-master ~]# curl 10.108.154.100

Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

[root@k8s-master ~]# kubectl rollout history deployment bwmis #查看历史版本

deployment.apps/bwmis

REVISION CHANGE-CAUSE

1 <none>
[root@k8s-master ~]# kubectl get deployment bwmis -o yaml

[root@k8s-master ~]# kubectl set image deployments/bwmis myapp=timinglee/myapp:v2

deployment.apps/bwmis image updated #更新控制器镜像版本

[root@k8s-master ~]# kubectl rollout history deployment bwmis #查看历史版本

deployment.apps/bwmis

REVISION CHANGE-CAUSE

1 <none>

2 <none>

[root@k8s-master ~]# curl 10.108.154.100 #访问内容测试

Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>

[root@k8s-master ~]# kubectl rollout undo deployment bwmis --to-revision 1 #版本回滚

deployment.apps/bwmis rolled back

[root@k8s-master ~]# curl 10.108.154.100

Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

2.4利用yaml文件部署应用

[root@k8s-master ~]# kubectl explain pod.spec.containers #获取帮助

[root@k8s-master ~]# kubectl run bwmis --image timinglee/myapp:v1 --dry-run=client -o yaml > pod.yml #用命令获取yaml模板

[root@k8s-master ~]# vim pod.yml

apiVersion: v1

kind: Pod

metadata:

creationTimestamp: null

labels:

run: bwmis #pod标签

name: bwmis #pod名称

spec:

containers:

  • image: timinglee/myapp:v1 #pod镜像

name: bwmis #容器名称

resources: {}

dnsPolicy: ClusterFirst

restartPolicy: Always

status: {}

运行多个容器pod

[root@k8s-master ~]# kubectl run bwmis --image nginx:latest --dry-run=client -o yaml > pod.yml

[root@k8s-master ~]# vim pod.yml

apiVersion: v1

kind: Pod

metadata:

labels:

run: bwmis

name: bwmis

spec:

containers:

  • image: nginx:latest

name: bwmis

  • image: nginx:latest

name: bwmis2

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/bwmis configured

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

bwmis 1/2 CrashLoopBackOff 3 (27s ago) 93s

[root@k8s-master ~]# vim pod.yml

apiVersion: v1

kind: Pod

metadata:

labels:

run: bwmis

name: bwmis

spec:

containers:

  • image: nginx:latest

name: bwmis

  • image: busybox:latest

name: busybox

command: ["/bin/sh","-c","sleep 100000"]

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/bwmis created

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

端口映射

[root@k8s-master ~]# kubectl run bwmis --image timinglee/myapp:v1 --dry-run=client -o yaml > pod.yml

[root@k8s-master ~]# vim pod.yml

apiVersion: v1

kind: Pod

metadata:

labels:

run: bwmis

name: bwmis

spec:

containers:

  • image: timinglee/myapp:v1

name: bwmis

ports:

  • name: http

containerPort: 80

hostPort: 80

protocol: TCP

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/bwmis created

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

bwmis 1/1 Running 0 9s 10.244.1.96 k8s-node1 <none> <none>

[root@k8s-master ~]# curl k8s-node1

Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

如何设置环境变量

[root@k8s-master ~]# kubectl run bwmis --image busybox:latest --dry-run=client -o yaml >pod.yml

[root@k8s-master ~]# vim pod.yml

apiVersion: v1

kind: Pod

metadata:

labels:

run: bwmis

name: bwmis

spec:

containers:

  • image: busybox:latest

name: bwmis

command: ["/bin/sh","-c","echo $NAME;sleep 300000"]

env:

  • name: NAME

value: bwmis

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/bwmis created

[root@k8s-master ~]# kubectl logs pods/bwmis bwmis

bwmis

资源限制

[root@k8s-master ~]# kubectl run bwmis --image timinglee/myapp:v1 --dry-run=client -o yaml >pod.yml

[root@k8s-master ~]# vim pod.yml

apiVersion: v1

kind: Pod

metadata:

labels:

run: bwmis

name: bwmis

spec:

containers:

  • image: timinglee/myapp:v1

name: bwmis

resources:

limits:

cpu: 500m

memory: 100M

requests:

cpu: 500m

memory: 100M

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/bwmis created

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

bwmis 1/1 Running 0 9s

[root@k8s-master ~]# kubectl describe pods bwmis

容器启动管理

apiVersion: v1

kind: Pod

metadata:

labels:

run: bwmis

name: bwmis

spec:

restartPolicy: Always

containers:

  • image: timinglee/myapp:v1

name: bwmis

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/bwmis created

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

bwmis 1/1 Running 0 20s 10.244.2.230 k8s-node2 <none> <none>

[root@k8s-node2 ~]# docker rm -f d4a5e0eaa84f

Error response from daemon: No such container: d4a5e0eaa84f

apiVersion: v1

kind: Pod

metadata:

labels:

run: bwmis

name: bwmis

spec:

nodeSelector:

kubernetes.io/hostname: k8s-node1

restartPolicy: Always

containers:

  • image: timinglee/myapp:v1

name: bwmis

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/bwmis created

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

bwmis 1/1 Running 0 12s 10.244.1.98 k8s-node1 <none> <none>

[root@k8s-master ~]# kubectl run test --image busybox:latest --dry-run=client -o yaml > pod.yml

[root@k8s-master ~]# vim pod.yml

apiVersion: v1

kind: Pod

metadata:

labels:

run: test

name: test

spec:

hostNetwork: true

restartPolicy: Always

containers:

  • image: busybox:latest

name: test

command: ["/bin/sh","-c","sleep 10000"]

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/test created

[root@k8s-master ~]# kubectl exec -it pods/test -c test -- /bin/sh

/ #

/ # ifconfig

br-85ce9c0aa817 Link encap:Ethernet HWaddr 02:42:1D:50:33:65

inet addr:172.19.0.1 Bcast:172.19.255.255 Mask:255.255.0.0

UP BROADCAST MULTICAST MTU:1500 Metric:1

RX packets:0 errors:0 dropped:0 overruns:0 frame:0

TX packets:0 errors:0 dropped:0 overruns:0 carrier:0

collisions:0 txqueuelen:0

RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)

br-c5749edada50 Link encap:Ethernet HWaddr 02:42:D9:24:80:04

inet addr:172.18.0.1 Bcast:172.18.255.255 Mask:255.255.0.0

UP BROADCAST MULTICAST MTU:1500 Metric:1

RX packets:0 errors:0 dropped:0 overruns:0 frame:0

TX packets:0 errors:0 dropped:0 overruns:0 carrier:0

collisions:0 txqueuelen:0

RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)

cni0 Link encap:Ethernet HWaddr 42:91:DF:79:E6:07

inet addr:10.244.1.1 Bcast:10.244.1.255 Mask:255.255.255.0

inet6 addr: fe80::4091:dfff:fe79:e607/64 Scope:Link

UP BROADCAST MULTICAST MTU:1500 Metric:1

RX packets:310 errors:0 dropped:0 overruns:0 frame:0

TX packets:67 errors:0 dropped:0 overruns:0 carrier:0

collisions:0 txqueuelen:1000

RX bytes:20030 (19.5 KiB) TX bytes:6513 (6.3 KiB)

docker0 Link encap:Ethernet HWaddr 02:42:13:35:EC:C9

inet addr:172.17.0.1 Bcast:172.17.255.255 Mask:255.255.0.0

UP BROADCAST MULTICAST MTU:1500 Metric:1

RX packets:0 errors:0 dropped:0 overruns:0 frame:0

TX packets:0 errors:0 dropped:0 overruns:0 carrier:0

collisions:0 txqueuelen:0

RX bytes:0 (0.0 B) TX bytes:0 (0.0 B)

ens160 Link encap:Ethernet HWaddr 00:0C:29:1F:C2:F0

inet addr:172.25.250.131 Bcast:172.25.250.255 Mask:255.255.255.0

inet6 addr: fe80::20c:29ff:fe1f:c2f0/64 Scope:Link

UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1

RX packets:98774 errors:0 dropped:0 overruns:0 frame:0

TX packets:71709 errors:0 dropped:0 overruns:0 carrier:0

collisions:0 txqueuelen:1000

RX bytes:74282198 (70.8 MiB) TX bytes:9029221 (8.6 MiB)

flannel.1 Link encap:Ethernet HWaddr 22:6C:24:0E:85:2E

inet addr:10.244.1.0 Bcast:0.0.0.0 Mask:255.255.255.255

inet6 addr: fe80::206c:24ff:fe0e:852e/64 Scope:Link

UP BROADCAST RUNNING MULTICAST MTU:1450 Metric:1

RX packets:7 errors:0 dropped:0 overruns:0 frame:0

TX packets:5 errors:0 dropped:26 overruns:0 carrier:0

collisions:0 txqueuelen:0

RX bytes:450 (450.0 B) TX bytes:569 (569.0 B)

lo Link encap:Local Loopback

inet addr:127.0.0.1 Mask:255.0.0.0

inet6 addr: ::1/128 Scope:Host

UP LOOPBACK RUNNING MTU:65536 Metric:1

RX packets:772 errors:0 dropped:0 overruns:0 frame:0

TX packets:772 errors:0 dropped:0 overruns:0 carrier:0

collisions:0 txqueuelen:1000

RX bytes:75572 (73.8 KiB) TX bytes:75572 (73.8 KiB)

/ #

INIT容器示例

apiVersion: v1

kind: Pod

metadata:

creationTimestamp: null

labels:

run: test

name: test

spec:

containers:

  • image: timinglee/myapp:v1

name: test

initContainers:

  • name: init-myservice

image: busybox:latest

command: ["sh","-c","until test -e /testfile;do echo wating for myservice;sleep 2;done"]

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/test created

[root@k8s-master ~]# kubectl logs pods/test init-myservice

wating for myservice

wating for myservice

wating for myservice

wating for myservice

[root@k8s-master ~]# kubectl exec pods/test -c init-myservice -- /bin/sh -c "echo hello"

hello

存活探针示例

apiVersion: v1

kind: Pod

metadata:

labels:

run: test

name: test

spec:

containers:

  • image: timinglee/myapp:v1

name: test

livenessProbe:

tcpSocket: #检测端口存在性

port: 8080

initialDelaySeconds: 3 #容器启动后要等待多少秒后探针开始工作,默认工作是0

periodSeconds: 1 #执行探测的时间间隔,默认是10s

timeoutSeconds: 1 #探针执行检测请求后。等待响应的超时时间,默认是1s

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/test created

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

test 1/1 Running 2 (2s ago) 12s

[root@k8s-master ~]# kubectl describe pods

Warning Unhealthy 7s (x9 over 19s) kubelet Liveness probe failed: dial tcp 10.244.1.99:8080: connect: connection refused

就绪探针示例

apiVersion: v1

kind: Pod

metadata:

labels:

run: test

name: test

spec:

containers:

  • image: timinglee/myapp:v1

name: test

readinessProbe:

httpGet:

path: /test.html

port: 80

initialDelaySeconds: 3

periodSeconds: 1

timeoutSeconds: 1

[root@k8s-master ~]# kubectl apply -f pod.yml

pod/test created

[root@k8s-master ~]# kubectl expose pod test --port 80 --target-port 80

service/test exposed

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

test 0/1 Running 0 36s

[root@k8s-master ~]# kubectl describe pods test

Warning Unhealthy 22s (x22 over 43s) kubelet Readiness probe failed: HTTP probe failed with statuscode: 404

[root@k8s-master ~]# kubectl describe services test

[root@k8s-master ~]# kubectl exec pods/test -c test -- /bin/sh -c "echo 'test' > /usr/share/nginx/html/test.html"

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

test 1/1 Running 0 106s

满足条件端口暴露

replicaset控制器

[root@k8s-master ~]# kubectl create deployment replicaset --image timinglee/myapp:v1 --dry-run=client -o yaml > replicaset.yml #生成yaml文件

[root@k8s-master ~]# vim replicaset.yml

apiVersion: apps/v1

kind: Deployment

metadata:

labels:

app: replicaset

name: replicaset #指定pod名称,一定小写,如果出现大写报错

spec:

replicas: 2 #指定维护pod数量为2

selector: #指定检测匹配方式

matchLabels: #指定匹配方式为匹配标签

app: replicaset #指定匹配的标签为app replicaset

template: #模板,当副本数量不足时,会根据下面的模板创建pod副本

metadata:

labels:

app: replicaset

spec:

containers:

  • image: timinglee/myapp:v1

name: myapp

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

replicaset-79fc74b7db-dmsbj 1/1 Running 0 14s app=replicaset,pod-template-hash=79fc74b7db

replicaset-79fc74b7db-l225z 1/1 Running 0 14s app=replicaset,pod-template-hash=79fc74b7db

#replicaset是通过标签匹配pod

[root@k8s-master ~]# kubectl label pod replicaset-79fc74b7db-dmsbj app=bwmis --overwrite

pod/replicaset-79fc74b7db-dmsbj labeled

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

replicaset-79fc74b7db-2xks2 1/1 Running 0 2s app=replicaset,pod-template-hash=79fc74b7db

replicaset-79fc74b7db-dmsbj 1/1 Running 0 62s app=bwmis,pod-template-hash=79fc74b7db

replicaset-79fc74b7db-l225z 1/1 Running 0 62s app=replicaset,pod-template-hash=79fc74b7db

#恢复标签后

[root@k8s-master ~]# kubectl label pod replicaset-79fc74b7db-dmsbj app-

pod/replicaset-79fc74b7db-dmsbj unlabeled

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

replicaset-79fc74b7db-2xks2 1/1 Running 0 4m47s app=replicaset,pod-template-hash=79fc74b7db

replicaset-79fc74b7db-dmsbj 1/1 Running 0 5m47s pod-template-hash=79fc74b7db

replicaset-79fc74b7db-l225z 1/1 Running 0 5m47s app=replicaset,pod-template-hash=79fc74b7db

[root@k8s-master ~]# kubectl delete pods replicaset-79fc74b7db-dmsbj

pod "replicaset-79fc74b7db-dmsbj" deleted

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

replicaset-79fc74b7db-2xks2 1/1 Running 0 20m app=replicaset,pod-template-hash=79fc74b7db

replicaset-79fc74b7db-l225z 1/1 Running 0 21m app=replicaset,pod-template-hash=79fc74b7db

deployment控制器示例

[root@k8s-master ~]# kubectl create deployment deployment --image timinglee/myapp:v1 --dry-run=client -o yaml > deployment.yml

[root@k8s-master ~]# vim deployment.yml

apiVersion: apps/v1

kind: Deployment

metadata:

labels:

app: deployment

name: deployment

spec:

replicas: 4

selector:

matchLabels:

app: deployment

template:

metadata:

labels:

app: deployment

spec:

containers:

  • image: timinglee/myapp:v1

name: myapp

[root@k8s-master ~]# kubectl apply -f deployment.yml

deployment.apps/deployment created

[root@k8s-master ~]# kubectl get pods --show-labels

NAME READY STATUS RESTARTS AGE LABELS

deployment-54cf4cbbff-46cc6 1/1 Running 0 12s app=deployment,pod-template-hash=54cf4cbbff

deployment-54cf4cbbff-49xx4 1/1 Running 0 12s app=deployment,pod-template-hash=54cf4cbbff

deployment-54cf4cbbff-964k6 1/1 Running 0 12s app=deployment,pod-template-hash=54cf4cbbff

deployment-54cf4cbbff-htgzz 1/1 Running 0 12s app=deployment,pod-template-hash=54cf4cbbff

[root@k8s-master ~]#

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

deployment-54cf4cbbff-46cc6 1/1 Running 0 14m 10.244.2.246 k8s-node2 <none> <none>

deployment-54cf4cbbff-49xx4 1/1 Running 0 14m 10.244.2.247 k8s-node2 <none> <none>

deployment-54cf4cbbff-964k6 1/1 Running 0 14m 10.244.1.114 k8s-node1 <none> <none>

deployment-54cf4cbbff-htgzz 1/1 Running 0 14m 10.244.1.115 k8s-node1 <none> <none>

[root@k8s-master ~]# curl 10.244.2.246

Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

[root@k8s-master ~]# kubectl describe deployments.apps deployment

Name: deployment

Namespace: default

CreationTimestamp: Thu, 05 Sep 2024 15:24:48 +0800

Labels: app=deployment

Annotations: deployment.kubernetes.io/revision: 1

Selector: app=deployment

Replicas: 4 desired | 4 updated | 4 total | 4 available | 0 unavailable

StrategyType: RollingUpdate

MinReadySeconds: 0

RollingUpdateStrategy: 25% max unavailable, 25% max surge

[root@k8s-master ~]# kubectl get pods -w #更新过程

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

deployment-6c9b5767d-4hht7 1/1 Running 0 10m 10.244.2.249 k8s-node2 <none> <none>

deployment-6c9b5767d-lhpl4 1/1 Running 0 10m 10.244.2.248 k8s-node2 <none> <none>

deployment-6c9b5767d-pxcqm 1/1 Running 0 10m 10.244.1.116 k8s-node1 <none> <none>

deployment-6c9b5767d-tpr8r 1/1 Running 0 10m 10.244.1.117 k8s-node1 <none> <none>

[root@k8s-master ~]# curl 10.244.2.249

Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>

版本回滚

[root@k8s-master ~]# vim deployment.yml

apiVersion: apps/v1

kind: Deployment

metadata:

labels:

app: deployment

name: deployment

spec:

replicas: 4

selector:

matchLabels:

app: deployment

template:

metadata:

labels:

app: deployment

spec:

containers:

  • image: timinglee/myapp:v1

name: myapp

[root@k8s-master ~]# kubectl apply -f deployment.yml

deployment.apps/deployment configured

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

deployment-54cf4cbbff-28s49 1/1 Running 0 34s 10.244.1.118 k8s-node1 <none> <none>

deployment-54cf4cbbff-bhzng 1/1 Running 0 34s 10.244.2.250 k8s-node2 <none> <none>

deployment-54cf4cbbff-hwv98 1/1 Running 0 32s 10.244.1.119 k8s-node1 <none> <none>

deployment-54cf4cbbff-lgzdd 1/1 Running 0 32s 10.244.2.251 k8s-node2 <none> <none>

[root@k8s-master ~]# curl 10.244.1.118

Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

[root@k8s-master ~]# vim deployment.yml

apiVersion: apps/v1

kind: Deployment

metadata:

labels:

app: deployment

name: deployment

spec:

minReadySeconds: 5

replicas: 4

strategy:

rollingUpdate:

maxSurge: 1

maxUnavailable: 0

selector:

matchLabels:

app: deployment

template:

metadata:

labels:

app: deployment

spec:

containers:

  • image: timinglee/myapp:v1

name: myapp

4.2.4 暂停及恢复

[root@k8s-master ~]# kubectl rollout pause deployment deployment

deployment.apps/deployment paused

[root@k8s-master ~]# vim deployment.yml

[root@k8s-master ~]# kubectl apply -f deployment.yml

[root@k8s-master ~]# kubectl get pods

NAME READY STATUS RESTARTS AGE

deployment-57b889b99f-4pktj 1/1 Running 0 47s

deployment-57b889b99f-ds58w 1/1 Running 0 47s

deployment-57b889b99f-fhg5j 1/1 Running 0 47s

deployment-57b889b99f-rwwq5 0/1 Pending 0 47s

[root@k8s-master ~]# kubectl describe pod deployment-57b889b99f-4pktj

Name: deployment-57b889b99f-4pktj

Namespace: default

Priority: 0

Service Account: default

Node: k8s-node2/172.25.250.132

Start Time: Thu, 05 Sep 2024 18:38:16 +0800

Labels: app=deployment

pod-template-hash=57b889b99f

Annotations: <none>

Status: Running

IP: 10.244.2.5

IPs:

IP: 10.244.2.5

Controlled By: ReplicaSet/deployment-57b889b99f

Containers:

myapp:

Container ID: docker://a4d1f75c00f566a0225d3d4cba32f9b25bdf1bfb800e06cf5305e74f1342c434

Image: timinglee/myapp:v1

Image ID: docker://sha256:d4a5e0eaa84f28550cb9dd1bde4bfe63a93e3cf88886aa5dad52c9a75dd0e6a9

Port: <none>

Host Port: <none>

State: Running

Started: Thu, 05 Sep 2024 18:38:17 +0800

Ready: True

Restart Count: 0

Limits:

cpu: 500m

memory: 200Mi

Requests:

cpu: 500m

memory: 200Mi

Environment: <none>

Mounts:

/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-h5gmf (ro)

Conditions:

Type Status

PodReadyToStartContainers True

Initialized True

Ready True

ContainersReady True

PodScheduled True

Volumes:

kube-api-access-h5gmf:

Type: Projected (a volume that contains injected data from multiple sources)

TokenExpirationSeconds: 3607

ConfigMapName: kube-root-ca.crt

ConfigMapOptional: <nil>

DownwardAPI: true

QoS Class: Guaranteed

Node-Selectors: <none>

Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s

node.kubernetes.io/unreachable:NoExecute op=Exists for 300s

Events:

Type Reason Age From Message


Warning FailedScheduling 66s (x2 over 90s) default-scheduler 0/3 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/control-plane: }, 2 Insufficient cpu. preemption: 0/3 nodes are available: 1 Preemption is not helpful for scheduling, 2 No preemption victims found for incoming pod.

Normal Scheduled 64s default-scheduler Successfully assigned default/deployment-57b889b99f-4pktj to k8s-node2

Normal Pulled 63s kubelet Container image "timinglee/myapp:v1" already present on machine

Normal Created 63s kubelet Created container myapp

Normal Started 63s kubelet Started container myapp

[root@k8s-master ~]# kubectl rollout resume deployment deployment

deployment.apps/deployment resumed

[root@k8s-master ~]# kubectl rollout history deployment deployment

deployment.apps/deployment

REVISION CHANGE-CAUSE

1 <none>

[root@k8s-master ~]# kubectl delete -f deployment.yml #资源回收

deployment.apps "deployment" deleted

daemonset控制器

[root@k8s-master ~]# vim daemonset-example.yml

apiVersion: apps/v1

kind: DaemonSet

metadata:

name: daemonset-example

spec:

selector:

matchLabels:

app: nginx

template:

metadata:

labels:

app: nginx

spec:

tolerations:

  • effect: NoSchedule

operator: Exists

containers:

  • name: nginx

image: nginx:latest

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

daemonset-example-df666 1/1 Running 0 5m35s 10.244.2.8 k8s-node2 <none> <none>

daemonset-example-dqxbs 1/1 Running 0 6m57s 10.244.1.126 k8s-node1 <none> <none>

daemonset-example-kqldl 0/1 ImagePullBackOff 0 5m12s 10.244.0.27 k8s-master <none> <none>

[root@k8s-master ~]# vim /etc/docker/

[root@k8s-master ~]# kubectl get pods -o wide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

daemonset-example-df666 1/1 Running 0 5m44s 10.244.2.8 k8s-node2 <none> <none>

daemonset-example-dqxbs 1/1 Running 0 7m6s 10.244.1.126 k8s-node1 <none> <none>

daemonset-example-kqldl 0/1 ImagePullBackOff 0 5m21s 10.244.0.27 k8s-master <none> <none>

job控制器

[root@k8s-master ~]# vim job.yml

apiVersion: batch/v1

kind: Job

metadata:

name: pi

spec:

completions: 6

parallelism: 2

template:

spec:

containers:

  • name: pi

image: perl:5.34.0

command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]

restartPolicy: Never

backoffLimit: 4

[root@k8s-master ~]# kubectl apply -f job.yml

job.batch/pi created

[root@k8s-master ~]# kubectl describe jobs.batch

Name: pi

Namespace: default

Selector: batch.kubernetes.io/controller-uid=1bf2441c-ff55-47de-bab0-30ed2f4cfd7c

Labels: batch.kubernetes.io/controller-uid=1bf2441c-ff55-47de-bab0-30ed2f4cfd7c

batch.kubernetes.io/job-name=pi

controller-uid=1bf2441c-ff55-47de-bab0-30ed2f4cfd7c

job-name=pi

Annotations: <none>

Parallelism: 2

Completions: 6

Completion Mode: NonIndexed

Suspend: false

Backoff Limit: 4

Start Time: Thu, 05 Sep 2024 19:22:39 +0800

Pods Statuses: 2 Active (0 Ready) / 0 Succeeded / 0 Failed

Pod Template:

Labels: batch.kubernetes.io/controller-uid=1bf2441c-ff55-47de-bab0-30ed2f4cfd7c

batch.kubernetes.io/job-name=pi

controller-uid=1bf2441c-ff55-47de-bab0-30ed2f4cfd7c

job-name=pi

Containers:

pi:

Image: perl:5.34.0

Port: <none>

Host Port: <none>

Command:

perl

-Mbignum=bpi

-wle

print bpi(2000)

Environment: <none>

Mounts: <none>

Volumes: <none>

Node-Selectors: <none>

Tolerations: <none>

Events:

Type Reason Age From Message


Normal SuccessfulCreate 16s job-controller Created pod: pi-8gdzr

Normal SuccessfulCreate 16s job-controller Created pod: pi-4h5p7

cronjob控制器

[root@k8s-master ~]# vim cronjob.yml

apiVersion: batch/v1

kind: CronJob

metadata:

name: hello

spec:

schedule: "* * * * *"

jobTemplate:

spec:

template:

spec:

containers:

  • name: hello

image: bwmis.org/library/busybox:latest

imagePullPolicy: IfNotPresent

command:

  • /bin/sh

  • -c

  • date; echo Hello from the Kubernetes cluster

restartPolicy: OnFailure

[root@k8s-master ~]# kubectl get jobs

NAME STATUS COMPLETIONS DURATION AGE

hello-28758939 Complete 1/1 3s 2m19s

hello-28758940 Complete 1/1 3s 79s

hello-28758941 Complete 1/1 3s 19s

相关推荐
wuxingge3 小时前
k8s1.30.0高可用集群部署
云原生·容器·kubernetes
志凌海纳SmartX4 小时前
趋势洞察|AI 能否带动裸金属 K8s 强势崛起?
云原生·容器·kubernetes
锅总4 小时前
nacos与k8s service健康检查详解
云原生·容器·kubernetes
BUG弄潮儿4 小时前
k8s 集群安装
云原生·容器·kubernetes
意疏4 小时前
【Linux 篇】Docker 的容器之海与镜像之岛:于 Linux 系统内探索容器化的奇妙航行
linux·docker
墨鸦_Cormorant4 小时前
使用docker快速部署Nginx、Redis、MySQL、Tomcat以及制作镜像
redis·nginx·docker
Code_Artist5 小时前
Docker镜像加速解决方案:配置HTTP代理,让Docker学会科学上网!
docker·云原生·容器
颜淡慕潇6 小时前
【K8S系列】kubectl describe pod显示ImagePullBackOff,如何进一步排查?
后端·云原生·容器·kubernetes
wanmei0026 小时前
Dockerfile复制目录进入镜像里
docker
inter_peng6 小时前
[Docker-显示所有容器IP] 显示docker-compose.yml中所有容器IP的方法
tcp/ip·docker·eureka