microk8s使用
在使用microk8s时,可以设置一些别名,使用起来保持与k8s习惯一样,具体如下
sudo snap alias microk8s.kubectl kubectl
sudo snap alias microk8s.ctr ctr
sudo snap alias microk8s.helm helm
sudo snap alias microk8s.helm3 helm3
部署microk8s后,排查集群状态
microk8s status
microk8s.inspect
docker-registry搭建私有镜像仓库
docker pull registry:2
docker create volume registry_data
mkdir /opt/data/auth
docker run --entrypoint htpasswd registry:2 -Bbn admin Harbor@dmin.mec > auth/htpasswd
docker run -d -p 5000:5000 --restart=always --name registry \
-v registry_data:/var/lib/registry \
-v /opt/data/auth:/auth \
-e "REGISTRY_AUTH=htpasswd" \
-e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" \
-e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd \
registry:2
docker login -u username -p userpasswd 192.168.1.33:5000
保存镜像
docker save docker.io/calico/cni:v3.25.1 docker.io/calico/kube-controllers:v3.25.1 docker.io/calico/node:v3.25.1 docker.io/cdkbot/hostpath-provisioner:1.5.0 docker.io/coredns/coredns:1.10.1 docker.io/library/busybox:1.28.4 registry.k8s.io/ingress-nginx/controller:v1.8.0 registry.k8s.io/metrics-server/metrics-server:v0.6.3 registry.k8s.io/pause:3.7 -o microk8s.tar
导入镜像
microk8s ctr i import microk8s.tar
配置集群与访问
microk8s config > ~/.kube/config
kubectl get node -o wide
kubectl get pod -A
启用插件host-path和ingress-controller,metrics-server.
microk8s status
microk8s enable hostpath-storage
microk8s enable ingress
microk8s enable metrics-server
验证
kubectl get pod -A
microk8s status
资源占用量
root@xww-nuc8i5beh:/media/xww/sda1/server/microk8s# kubectl describe node
Name: xww-nuc8i5beh
Roles: <none>
Labels: beta.kubernetes.io/arch=amd64
beta.kubernetes.io/os=linux
kubernetes.io/arch=amd64
kubernetes.io/hostname=xww-nuc8i5beh
kubernetes.io/os=linux
microk8s.io/cluster=true
node.kubernetes.io/microk8s-controlplane=microk8s-controlplane
Annotations: node.alpha.kubernetes.io/ttl: 0
projectcalico.org/IPv4Address: 192.168.10.102/24
projectcalico.org/IPv4VXLANTunnelAddr: 10.1.181.128
volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp: Fri, 12 Jan 2024 20:24:04 +0800
Taints: <none>
Unschedulable: false
Lease:
HolderIdentity: xww-nuc8i5beh
AcquireTime: <unset>
RenewTime: Sat, 13 Jan 2024 11:12:38 +0800
Conditions:
Type Status LastHeartbeatTime LastTransitionTime Reason Message
---- ------ ----------------- ------------------ ------ -------
NetworkUnavailable False Sat, 13 Jan 2024 11:01:36 +0800 Sat, 13 Jan 2024 11:01:36 +0800 CalicoIsUp Calico is running on this node
MemoryPressure False Sat, 13 Jan 2024 11:09:49 +0800 Fri, 12 Jan 2024 20:24:04 +0800 KubeletHasSufficientMemory kubelet has sufficient memory available
DiskPressure False Sat, 13 Jan 2024 11:09:49 +0800 Fri, 12 Jan 2024 20:24:04 +0800 KubeletHasNoDiskPressure kubelet has no disk pressure
PIDPressure False Sat, 13 Jan 2024 11:09:49 +0800 Fri, 12 Jan 2024 20:24:04 +0800 KubeletHasSufficientPID kubelet has sufficient PID available
Ready True Sat, 13 Jan 2024 11:09:49 +0800 Sat, 13 Jan 2024 11:01:40 +0800 KubeletReady kubelet is posting ready status. AppArmor enabled
Addresses:
InternalIP: 192.168.10.102
Hostname: xww-nuc8i5beh
Capacity:
cpu: 8
ephemeral-storage: 245084444Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32867920Ki
pods: 110
Allocatable:
cpu: 8
ephemeral-storage: 244035868Ki
hugepages-1Gi: 0
hugepages-2Mi: 0
memory: 32765520Ki
pods: 110
System Info:
Machine ID: 105c044a9dba42f5a4740ef13b5692eb
System UUID: B17F28E3-2822-F3FD-A1F3-94C691A75966
Boot ID: 6bac866c-d336-4606-9c47-6b01f110baef
Kernel Version: 4.15.0-109-generic
OS Image: Ubuntu 18.04.4 LTS
Operating System: linux
Architecture: amd64
Container Runtime Version: containerd://1.6.15
Kubelet Version: v1.28.3
Kube-Proxy Version: v1.28.3
Non-terminated Pods: (6 in total)
Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits AGE
--------- ---- ------------ ---------- --------------- ------------- ---
kube-system calico-node-dp5tr 250m (3%) 0 (0%) 0 (0%) 0 (0%) 14h
kube-system calico-kube-controllers-77bd7c5b-fph2n 0 (0%) 0 (0%) 0 (0%) 0 (0%) 14h
kube-system coredns-864597b5fd-n5pzq 100m (1%) 0 (0%) 70Mi (0%) 170Mi (0%) 14h
ingress nginx-ingress-microk8s-controller-z5plk 0 (0%) 0 (0%) 0 (0%) 0 (0%) 3m36s
kube-system hostpath-provisioner-7df77bc496-q4zp6 0 (0%) 0 (0%) 0 (0%) 0 (0%) 3m36s
kube-system metrics-server-848968bdcd-k5w78 100m (1%) 0 (0%) 200Mi (0%) 0 (0%) 3m35s
Allocated resources:
(Total limits may be over 100 percent, i.e., overcommitted.)
Resource Requests Limits
-------- -------- ------
cpu 450m (5%) 0 (0%)
memory 270Mi (0%) 170Mi (0%)
ephemeral-storage 0 (0%) 0 (0%)
hugepages-1Gi 0 (0%) 0 (0%)
hugepages-2Mi 0 (0%) 0 (0%)
Events: <none>
运行nginx进行测试
apiVersion: v1 #类型为Namespace
kind: Namespace #类型为Namespace
metadata:
name: ns-test #命名空间名称
labels:
name: label-test #pod标签
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ns-test
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:alpine
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
namespace: ns-test
name: nginx-service
spec:
selector:
app: nginx
type: NodePort
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30080
部署和验证
kubectl apply -f nginx.yaml
curl http://192.168.10.102:30080
部署有状态服务
# cat pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc1
namespace: ns-test
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: microk8s-hostpath
有状态的nginx服务
# cat nginx.yaml
apiVersion: v1 #类型为Namespace
kind: Namespace #类型为Namespace
metadata:
name: ns-test #命名空间名称
labels:
name: label-test #pod标签
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ns-test
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:alpine
ports:
- containerPort: 80
volumeMounts:
- mountPath: "/mydata"
name: mydata
volumes:
- name: mydata
persistentVolumeClaim:
claimName: mypvc1
---
apiVersion: v1
kind: Service
metadata:
namespace: ns-test
name: nginx-service
spec:
selector:
app: nginx
type: NodePort
ports:
- protocol: TCP
port: 80
targetPort: 80
nodePort: 30080
验证
kubectl apply -f pvc.yaml
kubectl apply -f nginx.yaml
kubectl get pvc -A
kubectl get pod -A