k8s helm部署kafka集群(KRaft模式)——筑梦之路

添加helm仓库

bash 复制代码
helm repo add bitnami "https://helm-charts.itboon.top/bitnami" --force-update
helm repo add grafana "https://helm-charts.itboon.top/grafana" --force-update
helm repo add prometheus-community "https://helm-charts.itboon.top/prometheus-community" --force-update
helm repo add ingress-nginx "https://helm-charts.itboon.top/ingress-nginx" --force-update
helm repo update

搜索kafka版本

bash 复制代码
# helm  search repo bitnami/kafka -l
NAME          	CHART VERSION	APP VERSION	DESCRIPTION
bitnami2/kafka	31.1.1       	3.9.0      	           
bitnami2/kafka	31.0.0       	3.9.0      	           
bitnami2/kafka	30.1.8       	3.8.1      	           
bitnami2/kafka	30.1.4       	3.8.0      	           
bitnami2/kafka	30.0.5       	3.8.0      	           
bitnami2/kafka	29.3.13      	3.7.1      	           
bitnami2/kafka	29.3.4       	3.7.0      	           
bitnami2/kafka	29.2.0       	3.7.0      	           
bitnami2/kafka	28.1.1       	3.7.0      	           
bitnami2/kafka	28.0.0       	3.7.0      	           
bitnami2/kafka	26.11.4      	3.6.1      	           
bitnami2/kafka	26.8.3       	3.6.1

编辑kafka.yaml

bash 复制代码
image:
  registry: docker.io
  repository: bitnami/kafka
  tag: 3.9.0-debian-12-r4
listeners:
  client:
    protocol: PLAINTEXT #关闭访问认证
  controller:
    protocol: PLAINTEXT #关闭访问认证
  interbroker:
    protocol: PLAINTEXT #关闭访问认证
  external:
    protocol: PLAINTEXT #关闭访问认证
controller:
  replicaCount: 3 #副本数
  controllerOnly: false #controller+broker共用模式
  heapOpts: -Xmx4096m -Xms2048m #KAFKA JVM
  resources:
    limits:
      cpu: 4 
      memory: 8Gi
    requests:
      cpu: 500m
      memory: 512Mi
  affinity: #仅部署在master节点,不限制可删除
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
          - matchExpressions:
              - key: node-role.kubernetes.io/master
                operator: Exists
          - matchExpressions:
              - key: node-role.kubernetes.io/control-plane
                operator: Exists
  tolerations: #仅部署在master节点,不限制可删除
    - operator: Exists
      effect: NoSchedule
    - operator: Exists
      effect: NoExecute
  persistence:
    storageClass: "local-path" #存储卷类型
    size: 10Gi #每个pod的存储大小
externalAccess:
  enabled: true #开启外部访问
  controller:
    service:
      type: NodePort #使用NodePort方式
      nodePorts:
        - 30091 #对外端口
        - 30092 #对外端口
        - 30093 #对外端口
      useHostIPs: true #使用宿主机IP

安装部署

bash 复制代码
helm install kafka bitnami/kafka -f kafka.yaml --dry-run

helm install kafka bitnami-china/kafka -f kafka.yaml

内部访问

bash 复制代码
kafka-controller-headless.default:9092

kafka-controller-0.kafka-controller-headless.default:9092
kafka-controller-1.kafka-controller-headless.default:9092
kafka-controller-2.kafka-controller-headless.default:9092

外部访问

bash 复制代码
# node ip +设置的nodeport端口,注意端口对应的节点的ip
192.168.100.110:30091  
192.168.100.111:30092  
192.168.100.112:30093

# 从pod的配置中查找外部访问信息
kubectl exec -it kafka-controller-0 -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners

测试验证

bash 复制代码
# 创建一个pod

kubectl run kafka-client --restart='Never' --image bitnami/kafka:3.9.0-debian-12-r4 --namespace default --command -- sleep infinity

# 进入pod生产消息
kubectl exec --tty -i kafka-client --namespace default -- bash
kafka-console-producer.sh \
  --broker-list kafka-controller-0.kafka-controller-headless.default.svc.cluster.local:9092,kafka-controller-1.kafka-controller-headless.default.svc.cluster.local:9092,kafka-controller-2.kafka-controller-headless.default.svc.cluster.local:9092 \
  --topic test

# 进入pod消费消息
kubectl exec --tty -i kafka-client --namespace default -- bash
kafka-console-consumer.sh \
  --bootstrap-server kafka.default.svc.cluster.local:9092 \
  --topic test \
  --from-beginning

仅供参考

所有yaml文件

bash 复制代码
---
# Source: kafka/templates/networkpolicy.yaml
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
  name: kafka
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
spec:
  podSelector:
    matchLabels:
      app.kubernetes.io/instance: kafka
      app.kubernetes.io/name: kafka
  policyTypes:
    - Ingress
    - Egress
  egress:
    - {}
  ingress:
    # Allow client connections
    - ports:
        - port: 9092
        - port: 9094
        - port: 9093
        - port: 9095
---
# Source: kafka/templates/broker/pdb.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: kafka-broker
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: broker
    app.kubernetes.io/part-of: kafka
spec:
  maxUnavailable: 1
  selector:
    matchLabels:
      app.kubernetes.io/instance: kafka
      app.kubernetes.io/name: kafka
      app.kubernetes.io/component: broker
      app.kubernetes.io/part-of: kafka
---
# Source: kafka/templates/controller-eligible/pdb.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: kafka-controller
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: controller-eligible
    app.kubernetes.io/part-of: kafka
spec:
  maxUnavailable: 1
  selector:
    matchLabels:
      app.kubernetes.io/instance: kafka
      app.kubernetes.io/name: kafka
      app.kubernetes.io/component: controller-eligible
      app.kubernetes.io/part-of: kafka
---
# Source: kafka/templates/provisioning/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kafka-provisioning
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
automountServiceAccountToken: false
---
# Source: kafka/templates/rbac/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kafka
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: kafka
automountServiceAccountToken: false
---
# Source: kafka/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
  name: kafka-kraft-cluster-id
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
type: Opaque
data:
  kraft-cluster-id: "eDJrTHBicnVhQ1ZIUExEVU5BZVMxUA=="
---
# Source: kafka/templates/controller-eligible/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: kafka-controller-configuration
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: controller-eligible
    app.kubernetes.io/part-of: kafka
data:
  server.properties: |-
    # Listeners configuration
    listeners=CLIENT://:9092,INTERNAL://:9094,EXTERNAL://:9095,CONTROLLER://:9093
    advertised.listeners=CLIENT://advertised-address-placeholder:9092,INTERNAL://advertised-address-placeholder:9094
    listener.security.protocol.map=CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT
    # KRaft process roles
    process.roles=controller,broker
    #node.id=
    controller.listener.names=CONTROLLER
    controller.quorum.voters=0@kafka-controller-0.kafka-controller-headless.default.svc.cluster.local:9093,1@kafka-controller-1.kafka-controller-headless.default.svc.cluster.local:9093,2@kafka-controller-2.kafka-controller-headless.default.svc.cluster.local:9093
    # Kafka data logs directory
    log.dir=/bitnami/kafka/data
    # Kafka application logs directory
    logs.dir=/opt/bitnami/kafka/logs

    # Common Kafka Configuration
    
    # Interbroker configuration
    inter.broker.listener.name=INTERNAL

    # Custom Kafka Configuration
---
# Source: kafka/templates/scripts-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: kafka-scripts
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
data:
  kafka-init.sh: |-
    #!/bin/bash

    set -o errexit
    set -o nounset
    set -o pipefail

    error(){
      local message="${1:?missing message}"
      echo "ERROR: ${message}"
      exit 1
    }

    retry_while() {
        local -r cmd="${1:?cmd is missing}"
        local -r retries="${2:-12}"
        local -r sleep_time="${3:-5}"
        local return_value=1

        read -r -a command <<< "$cmd"
        for ((i = 1 ; i <= retries ; i+=1 )); do
            "${command[@]}" && return_value=0 && break
            sleep "$sleep_time"
        done
        return $return_value
    }

    replace_in_file() {
        local filename="${1:?filename is required}"
        local match_regex="${2:?match regex is required}"
        local substitute_regex="${3:?substitute regex is required}"
        local posix_regex=${4:-true}

        local result

        # We should avoid using 'sed in-place' substitutions
        # 1) They are not compatible with files mounted from ConfigMap(s)
        # 2) We found incompatibility issues with Debian10 and "in-place" substitutions
        local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
        if [[ $posix_regex = true ]]; then
            result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
        else
            result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
        fi
        echo "$result" > "$filename"
    }

    kafka_conf_set() {
        local file="${1:?missing file}"
        local key="${2:?missing key}"
        local value="${3:?missing value}"

        # Check if the value was set before
        if grep -q "^[#\\s]*$key\s*=.*" "$file"; then
            # Update the existing key
            replace_in_file "$file" "^[#\\s]*${key}\s*=.*" "${key}=${value}" false
        else
            # Add a new key
            printf '\n%s=%s' "$key" "$value" >>"$file"
        fi
    }

    replace_placeholder() {
      local placeholder="${1:?missing placeholder value}"
      local password="${2:?missing password value}"
      local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues with delimiter symbols in sed string
      sed -i "s${del}$placeholder${del}$password${del}g" "$KAFKA_CONFIG_FILE"
    }

    append_file_to_kafka_conf() {
        local file="${1:?missing source file}"
        local conf="${2:?missing kafka conf file}"

        cat "$1" >> "$2"
    }

    configure_external_access() {
      # Configure external hostname
      if [[ -f "/shared/external-host.txt" ]]; then
        host=$(cat "/shared/external-host.txt")
      elif [[ -n "${EXTERNAL_ACCESS_HOST:-}" ]]; then
        host="$EXTERNAL_ACCESS_HOST"
      elif [[ -n "${EXTERNAL_ACCESS_HOSTS_LIST:-}" ]]; then
        read -r -a hosts <<<"$(tr ',' ' ' <<<"${EXTERNAL_ACCESS_HOSTS_LIST}")"
        host="${hosts[$POD_ID]}"
      elif [[ "$EXTERNAL_ACCESS_HOST_USE_PUBLIC_IP" =~ ^(yes|true)$ ]]; then
        host=$(curl -s https://ipinfo.io/ip)
      else
        error "External access hostname not provided"
      fi

      # Configure external port
      if [[ -f "/shared/external-port.txt" ]]; then
        port=$(cat "/shared/external-port.txt")
      elif [[ -n "${EXTERNAL_ACCESS_PORT:-}" ]]; then
        if [[ "${EXTERNAL_ACCESS_PORT_AUTOINCREMENT:-}" =~ ^(yes|true)$ ]]; then
          port="$((EXTERNAL_ACCESS_PORT + POD_ID))"
        else
          port="$EXTERNAL_ACCESS_PORT"
        fi
      elif [[ -n "${EXTERNAL_ACCESS_PORTS_LIST:-}" ]]; then
        read -r -a ports <<<"$(tr ',' ' ' <<<"${EXTERNAL_ACCESS_PORTS_LIST}")"
        port="${ports[$POD_ID]}"
      else
        error "External access port not provided"
      fi
      # Configure Kafka advertised listeners
      sed -i -E "s|^(advertised\.listeners=\S+)$|\1,EXTERNAL://${host}:${port}|" "$KAFKA_CONFIG_FILE"
    }

    export KAFKA_CONFIG_FILE=/config/server.properties
    cp /configmaps/server.properties $KAFKA_CONFIG_FILE

    # Get pod ID and role, last and second last fields in the pod name respectively
    POD_ID=$(echo "$MY_POD_NAME" | rev | cut -d'-' -f 1 | rev)
    POD_ROLE=$(echo "$MY_POD_NAME" | rev | cut -d'-' -f 2 | rev)

    # Configure node.id and/or broker.id
    if [[ -f "/bitnami/kafka/data/meta.properties" ]]; then
        if grep -q "broker.id" /bitnami/kafka/data/meta.properties; then
          ID="$(grep "broker.id" /bitnami/kafka/data/meta.properties | awk -F '=' '{print $2}')"
          kafka_conf_set "$KAFKA_CONFIG_FILE" "node.id" "$ID"
        else
          ID="$(grep "node.id" /bitnami/kafka/data/meta.properties | awk -F '=' '{print $2}')"
          kafka_conf_set "$KAFKA_CONFIG_FILE" "node.id" "$ID"
        fi
    else
        ID=$((POD_ID + KAFKA_MIN_ID))
        kafka_conf_set "$KAFKA_CONFIG_FILE" "node.id" "$ID"
    fi
    replace_placeholder "advertised-address-placeholder" "${MY_POD_NAME}.kafka-${POD_ROLE}-headless.default.svc.cluster.local"
    if [[ "${EXTERNAL_ACCESS_ENABLED:-false}" =~ ^(yes|true)$ ]]; then
      configure_external_access
    fi
    if [ -f /secret-config/server-secret.properties ]; then
      append_file_to_kafka_conf /secret-config/server-secret.properties $KAFKA_CONFIG_FILE
    fi
---
# Source: kafka/templates/controller-eligible/svc-external-access.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-controller-0-external
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: kafka
    pod: kafka-controller-0
spec:
  type: NodePort
  publishNotReadyAddresses: false
  ports:
    - name: tcp-kafka
      port: 9094
      nodePort: 30091
      targetPort: external
  selector:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/name: kafka
    app.kubernetes.io/part-of: kafka
    app.kubernetes.io/component: controller-eligible
    statefulset.kubernetes.io/pod-name: kafka-controller-0
---
# Source: kafka/templates/controller-eligible/svc-external-access.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-controller-1-external
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: kafka
    pod: kafka-controller-1
spec:
  type: NodePort
  publishNotReadyAddresses: false
  ports:
    - name: tcp-kafka
      port: 9094
      nodePort: 30092
      targetPort: external
  selector:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/name: kafka
    app.kubernetes.io/part-of: kafka
    app.kubernetes.io/component: controller-eligible
    statefulset.kubernetes.io/pod-name: kafka-controller-1
---
# Source: kafka/templates/controller-eligible/svc-external-access.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-controller-2-external
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: kafka
    pod: kafka-controller-2
spec:
  type: NodePort
  publishNotReadyAddresses: false
  ports:
    - name: tcp-kafka
      port: 9094
      nodePort: 30093
      targetPort: external
  selector:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/name: kafka
    app.kubernetes.io/part-of: kafka
    app.kubernetes.io/component: controller-eligible
    statefulset.kubernetes.io/pod-name: kafka-controller-2
---
# Source: kafka/templates/controller-eligible/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-controller-headless
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: controller-eligible
    app.kubernetes.io/part-of: kafka
spec:
  type: ClusterIP
  clusterIP: None
  publishNotReadyAddresses: true
  ports:
    - name: tcp-interbroker
      port: 9094
      protocol: TCP
      targetPort: interbroker
    - name: tcp-client
      port: 9092
      protocol: TCP
      targetPort: client
    - name: tcp-controller
      protocol: TCP
      port: 9093
      targetPort: controller
  selector:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/name: kafka
    app.kubernetes.io/component: controller-eligible
    app.kubernetes.io/part-of: kafka
---
# Source: kafka/templates/svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: kafka
spec:
  type: ClusterIP
  sessionAffinity: None
  ports:
    - name: tcp-client
      port: 9092
      protocol: TCP
      targetPort: client
      nodePort: null
    - name: tcp-external
      port: 9095
      protocol: TCP
      targetPort: external
  selector:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/name: kafka
    app.kubernetes.io/part-of: kafka
---
# Source: kafka/templates/controller-eligible/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka-controller
  namespace: "default"
  labels:
    app.kubernetes.io/instance: kafka
    app.kubernetes.io/managed-by: Helm
    app.kubernetes.io/name: kafka
    app.kubernetes.io/version: 3.9.0
    helm.sh/chart: kafka-31.1.1
    app.kubernetes.io/component: controller-eligible
    app.kubernetes.io/part-of: kafka
spec:
  podManagementPolicy: Parallel
  replicas: 3
  selector:
    matchLabels:
      app.kubernetes.io/instance: kafka
      app.kubernetes.io/name: kafka
      app.kubernetes.io/component: controller-eligible
      app.kubernetes.io/part-of: kafka
  serviceName: kafka-controller-headless
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app.kubernetes.io/instance: kafka
        app.kubernetes.io/managed-by: Helm
        app.kubernetes.io/name: kafka
        app.kubernetes.io/version: 3.9.0
        helm.sh/chart: kafka-31.1.1
        app.kubernetes.io/component: controller-eligible
        app.kubernetes.io/part-of: kafka
      annotations:
        checksum/configuration: 84a30ef8698d80825ae7ffe45fae93a0d18c8861e2dfc64b4b809aa92065dfff
    spec:
      
      automountServiceAccountToken: false
      hostNetwork: false
      hostIPC: false
      affinity:
        podAffinity:
          
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
            - podAffinityTerm:
                labelSelector:
                  matchLabels:
                    app.kubernetes.io/instance: kafka
                    app.kubernetes.io/name: kafka
                    app.kubernetes.io/component: controller-eligible
                topologyKey: kubernetes.io/hostname
              weight: 1
        nodeAffinity:
          
      securityContext:
        fsGroup: 1001
        fsGroupChangePolicy: Always
        seccompProfile:
          type: RuntimeDefault
        supplementalGroups: []
        sysctls: []
      serviceAccountName: kafka
      enableServiceLinks: true
      initContainers:
        - name: kafka-init
          image: docker.io/bitnami/kafka:3.9.0-debian-12-r4
          imagePullPolicy: IfNotPresent
          securityContext:
            allowPrivilegeEscalation: false
            capabilities:
              drop:
              - ALL
            readOnlyRootFilesystem: true
            runAsGroup: 1001
            runAsNonRoot: true
            runAsUser: 1001
            seLinuxOptions: {}
          resources:
            limits: {}
            requests: {} 
          command:
            - /bin/bash
          args:
            - -ec
            - |
              /scripts/kafka-init.sh
          env:
            - name: BITNAMI_DEBUG
              value: "false"
            - name: MY_POD_NAME
              valueFrom:
                fieldRef:
                    fieldPath: metadata.name
            - name: KAFKA_VOLUME_DIR
              value: "/bitnami/kafka"
            - name: KAFKA_MIN_ID
              value: "0"
            - name: EXTERNAL_ACCESS_ENABLED
              value: "true"
            - name: HOST_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.hostIP
            - name: EXTERNAL_ACCESS_HOST
              value: "$(HOST_IP)"
            - name: EXTERNAL_ACCESS_PORTS_LIST
              value: "30091,30092,30093"
          volumeMounts:
            - name: data
              mountPath: /bitnami/kafka
            - name: kafka-config
              mountPath: /config
            - name: kafka-configmaps
              mountPath: /configmaps
            - name: kafka-secret-config
              mountPath: /secret-config
            - name: scripts
              mountPath: /scripts
            - name: tmp
              mountPath: /tmp
      containers:
        - name: kafka
          image: docker.io/bitnami/kafka:3.9.0-debian-12-r4
          imagePullPolicy: "IfNotPresent"
          securityContext:
            allowPrivilegeEscalation: false
            capabilities:
              drop:
              - ALL
            readOnlyRootFilesystem: true
            runAsGroup: 1001
            runAsNonRoot: true
            runAsUser: 1001
            seLinuxOptions: {}
          env:
            - name: BITNAMI_DEBUG
              value: "false"
            - name: KAFKA_HEAP_OPTS
              value: "-Xmx4096m -Xms2048m"
            - name: KAFKA_KRAFT_CLUSTER_ID
              valueFrom:
                secretKeyRef:
                  name: kafka-kraft-cluster-id
                  key: kraft-cluster-id
          ports:
            - name: controller
              containerPort: 9093
            - name: client
              containerPort: 9092
            - name: interbroker
              containerPort: 9094
            - name: external
              containerPort: 9095
          livenessProbe:
            failureThreshold: 3
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
            exec:
              command:
                - pgrep
                - -f
                - kafka
          readinessProbe:
            failureThreshold: 6
            initialDelaySeconds: 5
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 5
            tcpSocket:
              port: "controller"
          resources:
            limits:
              cpu: 4
              memory: 8Gi
            requests:
              cpu: 500m
              memory: 512Mi
          volumeMounts:
            - name: data
              mountPath: /bitnami/kafka
            - name: logs
              mountPath: /opt/bitnami/kafka/logs
            - name: kafka-config
              mountPath: /opt/bitnami/kafka/config/server.properties
              subPath: server.properties
            - name: tmp
              mountPath: /tmp
      volumes:
        - name: kafka-configmaps
          configMap:
            name: kafka-controller-configuration
        - name: kafka-secret-config
          emptyDir: {}
        - name: kafka-config
          emptyDir: {}
        - name: tmp
          emptyDir: {}
        - name: scripts
          configMap:
            name: kafka-scripts
            defaultMode: 493
        - name: logs
          emptyDir: {}
  volumeClaimTemplates:
    - apiVersion: v1
      kind: PersistentVolumeClaim
      metadata:
        name: data
      spec:
        accessModes:
          - "ReadWriteOnce"
        resources:
          requests:
            storage: "10Gi"
        storageClassName: "local-path"
相关推荐
运维&陈同学2 小时前
【Logstash03】企业级日志分析系统ELK之Logstash 过滤 Filter 插件
大数据·运维·elk·elasticsearch·微服务·云原生·logstash
探索云原生4 小时前
基于 Admission Webhook 实现 Pod DNSConfig 自动注入
云原生·kubernetes·go·dns
阿里技术4 小时前
OpenAI 故障复盘 - 阿里云容器服务与可观测产品如何保障大规模 K8s 集群稳定性
阿里云·ai·容器·kubernetes·云计算·openai
-Bin7 小时前
net-http-transport 引发的句柄数(协程)泄漏问题
网络·网络协议·http·云原生·golang
宋冠巡15 小时前
Eureka Client 服务消费者(调用API接口)(使用OpenFeign)
云原生·eureka
元气满满的热码式17 小时前
K8S中的Pod生命周期之容器探测
云原生·容器·kubernetes
gs8014021 小时前
用CRD定义未来:解锁机器学习平台的无限可能
kubernetes·crd·operator·kubeflow·机器学习平台·分布式训练任务
言之。1 天前
【微服务】4、服务保护
微服务·云原生·架构