Kubernetes+EFK日志收集

Kubernetes离线部署

基础配置(所有主机都需要完成)

1、设置hosts解析

hostnamectl set-hostname k8s-init && bash

cat >>/etc/hosts <<EOF

192.168.180.210 k8s-init

192.168.180.200 k8s-master1

192.168.180.190 k8s-master2

192.168.180.180 k8s-master3

192.168.180.170 k8s-node1

192.168.180.160 k8s-node2

EOF

2、关闭防火墙

systemctl stop firewalld && systemctl disable firewalld

3、关闭SELinux

sed -ri 's#(SELINUX=).#\1disabled#' /etc/selinux/config && setenforce 0
4、关闭swap
swapoff -a && sed -i '/swap/s/^(.
)$/#\1/g' /etc/fstab

5、修改内核参数

cat < /etc/sysctl.d/k8s.conf

net.bridge.bridge-nf-call-ip6tables = 1

net.bridge.bridge-nf-call-iptables = 1

net.ipv4.ip_forward = 1

EOF

modprobe br_netfilter

sysctl -p /etc/sysctl.d/k8s.conf

6、加载ipvs模块

cat > /etc/sysconfig/modules/ipvs.modules << EOF

#!/bin/bash

modprobe -- ip_vs

modprobe -- ip_vs_rr

modprobe -- ip_vs_wrr

modprobe -- ip_vs_sh

modprobe -- nf_conntrack_ipv4

EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules

bash /etc/sysconfig/modules/ipvs.modules

====================================================================================

6、拷贝安装包(只在init节点执行)

1)将安装拷贝到opt目录

yum -y install vim lrzsz unzip

cd /opt

unzip k8s-installer.zip

ls -l k8s-installer

2)部署依赖组件

部署YUM仓库

配置本地repo文件

cat < /etc/yum.repos.d/local.repo

[local]

name=local

baseurl=file:///opt/k8s-installer/docker-ce

gpgcheck=0

enabled=1

EOF

yum clean all && yum makecache

安装并配置httpd服务

yum install -y httpd --disablerepo=* --enablerepo=local

sed -i 's/Listen 80/Listen 60080/g' /etc/httpd/conf/httpd.conf

cp -r /opt/k8s-installer/docker-ce/ /var/www/html/

systemctl enable httpd && systemctl start httpd

============================================================================================

安装并配置Docker(在所有节点均需执行)

配置yum repo

cat < /etc/yum.repos.d/local-http.repo

[local-http]

name=local-http

baseurl=http://192.168.180.210:60080/docker-ce

gpgcheck=0

enabled=1

EOF

yum clean all && yum makecache

配置docker daemon文件

mkdir /etc/docker

cat < /etc/docker/daemon.json

{

"insecure-registries": ["192.168.180.210:65000"],

"storage-driver": "overlay2"

}

EOF

yum remove -y policycoreutils

scp 192.168.180.210:/opt/k8s-installer/docker-ce/audit-libs-* ./

rpm -ivh audit-libs-2.8.4-4.el7.x86_64.rpm audit-libs-python-2.8.4-4.el7.x86_64.rpm --force

yum install -y docker-ce docker-ce-cli containerd.io --disablerepo=* --enablerepo=local-http

systemctl enable docker && systemctl start docker

==================================================================================================

配置镜像仓库(只在k8s-init节点执行)

加载镜像到本地

docker load -i /opt/k8s-installer/registry-image.tar

docker images

启动registry镜像仓库服务

docker run -d --restart=always --name pkg-registry -p 65000:5000 -v /opt/k8s-installer/registry/:/var/lib/registry index.alauda.cn/alaudaorg/distribution:latest

部署Haproxy

安装Haproxy

rpm -Uvh /var/www/html/docker-ce/openssl-*

yum install -y haproxy --disablerepo=* --enablerepo=local-http

配置haproxy

cp /etc/haproxy/haproxy.cfg{,.bak}

vim /etc/haproxy/haproxy.cfg

Global settings

global

log 127.0.0.1 local2

chroot /var/lib/haproxy

pidfile /var/run/haproxy.pid

maxconn 4000

user haproxy

group haproxy

daemon

turn on stats unix socket

stats socket /var/lib/haproxy/stats

defaults

log global

option dontlognull

option forwardfor except 127.0.0.0/8

option redispatch

retries 3

timeout http-request 10s

timeout queue 1m

timeout connect 10s

timeout client 1m

timeout server 1m

timeout http-keep-alive 10s

timeout check 10s

maxconn 3000

frontend kubernetes

bind *:7443

mode tcp

default_backend kubernetes-master

backend kubernetes-master

balance roundrobin

server master1 192.168.180.200:6443 check maxconn 2000

server master2 192.168.180.190:6443 check maxconn 2000

server master3 192.168.180.180:6443 check maxconn 2000

systemctl enable haproxy && systemctl start haproxy

========================================================================================================

部署Kubernetes(在Master和node节点上执行)

安装Kubeadm、kubelet和kubectl

rpm -Uvh http://192.168.180.210:60080/docker-ce/libnetfilter_conntrack-1.0.6-1.el7_3.x86_64.rpm

yum install -y kubeadm kubectl kubelet --disablerepo=* --enablerepo=local-http

配置Kubelet

systemctl enable kubelet

配置Kubelet Service

cat < /etc/systemd/system/kubelet.service

[Unit]

Description=kubelet: The Kubernetes Node Agent

Documentation=https://kubernetes.io/docs/

[Service]

Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true"

Environment="KUBELET_INFRA_CONTAINER_IMAGE=--pod-infra-container-image=192.168.180.210:60080/k8s/pause:3.1"

ExecStart=/usr/bin/kubelet $KUBELET_SYSTEM_PODS_ARGS $KUBELET_INFRA_CONTAINER_IMAGE

Restart=always

StartLimitInterval=0

RestartSec=10

[Install]

WantedBy=multi-user.target

EOF

=============================================================================================================

配置Kubeadm初始化文件(只需要在第一台Master节点执行 k8s-master1)

advertiseAddress: 修改为 k8s-master1 的内网 IP 地址;

controlPlaneEndpoint: 修改为 k8s-init 的内网 IP 地址加端口(默认 7443 端口) ,如果使用第三方 lb 做负载均衡, 则配置对应的 lb 的地址+端口;

imageRepository: 修改为 k8s-init 的内网 IP 地址:65000/k8s。

vim /opt/kubeadm.conf

apiVersion: kubeadm.k8s.io/v1beta1

bootstrapTokens:

  • groups:
    • system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
    • signing
    • authentication
      kind: InitConfiguration
      localAPIEndpoint:
      advertiseAddress: 192.168.180.200
      bindPort: 6443

apiServer:

timeoutForControlPlane: 4m0s

apiVersion: kubeadm.k8s.io/v1beta1

certificatesDir: /etc/kubernetes/pki

clusterName: kubernetes

controlPlaneEndpoint: "192.168.180.210:7443"

controllerManager: {}

dns:

type: CoreDNS

etcd:

local:

dataDir: /var/lib/etcd

imageRepository: 192.168.180.210:65000/k8s

kind: ClusterConfiguration

kubernetesVersion: v1.13.3

networking:

dnsDomain: cluster.local

podSubnet: "10.244.0.0/16"

serviceSubnet: 10.96.0.0/12

scheduler: {}

提前下载镜像

kubeadm config images list --config /opt/kubeadm.conf

kubeadm config images pull --config /opt/kubeadm.conf

执行初始化:

kubeadm init --config /opt/kubeadm.conf

配置kubectl认证(下面信息是初始化时产生)

#mkdir -p $HOME/.kube

#sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

#sudo chown ( i d − u ) : (id -u): (id−u):(id -g) $HOME/.kube/config

kubeadm join 192.168.180.210:7443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:5de632ee3645adbcd830a1045d94f7ae184e1c1e7275f7812e19ce4463970168

添加其他Master节点到集群中(在K8S-Master2、K8S-Master3节点执行)

mkdir -p /etc/kubernetes/pki/etcd

K8S-Master1节点执行以下命令

ssh-keygen

ssh-copy-id root@k8s-master2

ssh-copy-id root@k8s-master3

scp /etc/kubernetes/pki/ca.crt root@k8s-master2:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/ca.key root@k8s-master2:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/sa.key root@k8s-master2:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/sa.pub root@k8s-master2:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/front-proxy-ca.crt root@k8s-master2:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/front-proxy-ca.key root@k8s-master2:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/etcd/ca.crt root@k8s-master2:/etc/kubernetes/pki/etcd/

scp /etc/kubernetes/pki/etcd/ca.key root@k8s-master2:/etc/kubernetes/pki/etcd/

scp /etc/kubernetes/admin.conf root@k8s-master2:/etc/kubernetes/admin.conf

scp /etc/kubernetes/pki/ca.crt root@k8s-master3:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/ca.key root@k8s-master3:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/sa.key root@k8s-master3:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/sa.pub root@k8s-master3:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/front-proxy-ca.crt root@k8s-master3:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/front-proxy-ca.key root@k8s-master3:/etc/kubernetes/pki/

scp /etc/kubernetes/pki/etcd/ca.crt root@k8s-master3:/etc/kubernetes/pki/etcd/

scp /etc/kubernetes/pki/etcd/ca.key root@k8s-master3:/etc/kubernetes/pki/etcd/

scp /etc/kubernetes/admin.conf root@k8s-master3:/etc/kubernetes/admin.conf

kubeadm join 192.168.180.210:7443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:5de632ee3645adbcd830a1045d94f7ae184e1c1e7275f7812e19ce4463970168 --experimental-control-plane

kubeadm join --experimental-control-plane 现在可用于将控制面实例连接到现有集群

创建对应目录

添加Node节点到集群

kubeadm join 192.168.180.210:7443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:5de632ee3645adbcd830a1045d94f7ae184e1c1e7275f7812e19ce4463970168

安装Flannel插件(只在第一台 Master 节点(k8s-master1) 执行)

拷贝kube-flannel.yml

scp root@k8s-init:/opt/k8s-installer/kube-flannel.yml /opt

替换Flannel镜像地址

sed -i "s#quay.io/coreos#192.168.180.210:65000/k8s#g" /opt/kube-flannel.yml

创建Flannel相关资源

kubectl create -f /opt/kube-flannel.yml

设置Master节点是否可调度

kubectl taint node k8s-master1 node-role.kubernetes.io/master:NoSchedule-

kubectl taint node k8s-master2 node-role.kubernetes.io/master:NoSchedule-

kubectl taint node k8s-master3 node-role.kubernetes.io/master:NoSchedule-

验证集群功能

kubectl get nodes

创建Nginx测试服务

kubectl run --generator=run-pod/v1 test-nginx --image=192.168.180.210:65000/k8s/nginx

kubectl get pods -o wide |grep test-nginx

Kubernetes+EFK日志收集

1、部署Kubernetes集群

2、导入EFK镜像(在K8S-master1节点上执行)

上传压缩包

tar xvf efk.tar

cd efk

导入Elasticsearch镜像

docker load < elasticsearch-7.4.2.tar

docker tag b1179d41a7b4 192.168.180.210:65000/efk/elasticsearch:7.4.2

docker push 192.168.180.210:65000/efk/elasticsearch:7.4.2

导入Fluentd镜像

docker load < fluentd-es.tar

docker tag 636f3d 192.168.180.210:65000/efk/fluentd-es-root:v2.5.2

docker push 192.168.180.210:65000/efk/fluentd-es-root:v2.5.2

导入Kibana镜像

docker load < kibana-7.4.2.tar

docker tag 230d3d 192.168.180.210:65000/efk/kibana:7.4.2

docker push 192.168.180.210:65000/efk/kibana:7.4.2

导入alpine镜像

docker load < alpine-3.6.tar

docker tag 43773d 192.168.180.210:65000/efk/alpine:3.6

docker push 192.168.180.210:65000/efk/alpine:3.6

修改Yaml文件

修改elasticsearch.yaml

sed -i 's/192.168.8.10/192.168.180.210/g' elasticsearch.yaml kibana.yaml fluentd.yaml test-pod.yaml

grep "image:" elasticsearch.yaml kibana.yaml fluentd.yaml test-pod.yaml

image: 192.168.180.210:65000/efk/elasticsearch:7.4.2

image: 192.168.180.210:65000/efk/alpine:3.6

image: 192.168.180.210:65000/efk/alpine:3.6

grep -A1 "nodeSelector" elasticsearch.yaml

nodeSelector:

kubernetes.io/hostname: k8s-node1

grep -A1 "nodeSelector" kibana.yaml

nodeSelector:

kubernetes.io/hostname: k8s-node2

部署Elasticsearch

创建命名空间

kubectl create -f namespace.yaml

kubectl get namespaces | grep logging

到K8S-node1节点上

创建es数据存储目录

mkdir /esdata

在k8s-master1上

部署es容器

kubectl create -f elasticsearch.yaml

kubectl -n logging get pods -o wide

kubectl -n logging get svc

curl 10.108.236.66:9200

部署kibana

kubectl create -f kibana.yaml

kubectl -n logging get pods

kubectl -n logging get svc |grep kibana

可以使用浏览器测试访问

http://192.168.180.200:端口

部署Flunentd

给集群节点打标签

kubectl label node k8s-node1 fluentd=true

kubectl label node k8s-node2 fluentd=true

kubectl create -f fluentd-es-config-main.yaml

kubectl create -f fluentd-configmap.yaml

kubectl create -f fluentd.yaml

kubectl -n logging get pods

验证日志的收集

kubectl create -f test-pod.yaml

kubectl get pods

相关推荐
福大大架构师每日一题9 小时前
22.1 k8s不同role级别的服务发现
容器·kubernetes·服务发现
莹雨潇潇9 小时前
Docker 快速入门(Ubuntu版)
java·前端·docker·容器
weixin_4539650010 小时前
[单master节点k8s部署]30.ceph分布式存储(一)
分布式·ceph·kubernetes
weixin_4539650010 小时前
[单master节点k8s部署]32.ceph分布式存储(三)
分布式·ceph·kubernetes
tangdou36909865510 小时前
1分钟搞懂K8S中的NodeSelector
云原生·容器·kubernetes
Lansonli11 小时前
云原生(四十一) | 阿里云ECS服务器介绍
服务器·阿里云·云原生
Dylanioucn12 小时前
【分布式微服务云原生】掌握分布式缓存:Redis与Memcached的深入解析与实战指南
分布式·缓存·云原生
tangdou36909865513 小时前
Docker系列-5种方案超详细讲解docker数据存储持久化(volume,bind mounts,NFS等)
docker·容器
later_rql13 小时前
k8s-集群部署1
云原生·容器·kubernetes
weixin_4539650015 小时前
[单master节点k8s部署]31.ceph分布式存储(二)
分布式·ceph·kubernetes