向量数据量milvus k8s helm 对接外部安装部署流程

前情概要:历经了太多的坑,从简单的到困难的,该文章主要是为大家尽可能的展现安装部署流程中遇见的坑!
如果2024年7月15日17:13:41 你处在这个时间阶段 附近,你会发现docker下载镜像失败! 这个问题,没有办法,请使用魔法

官方部署网址:https://milvus.io/docs/install_cluster-helm.md

1.如果你想要直接部署,不对接外部组件,直接使用在线部署,当前要注意上面的问题:使用魔法先把需要的镜像下载下来!

镜像如下:

bash 复制代码
milvusdb/milvus: 
milvusdb/milvus-config-tool:
docker.io/milvusdb/etcd:
zilliz/attu:

value.yaml

bash 复制代码
## Enable or disable Milvus Cluster mode
cluster:
  enabled: true

image:
  all:
    repository: milvusdb/milvus
    tag: v2.4.5
    pullPolicy: IfNotPresent
    ## Optionally specify an array of imagePullSecrets.
    ## Secrets must be manually created in the namespace.
    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
    ##
    # pullSecrets:
    #   - myRegistryKeySecretName
  tools:
    repository: milvusdb/milvus-config-tool
    tag: v0.1.2
    pullPolicy: IfNotPresent

# Global node selector
# If set, this will apply to all milvus components
# Individual components can be set to a different node selector
nodeSelector: {}

# Global tolerations
# If set, this will apply to all milvus components
# Individual components can be set to a different tolerations
tolerations: []

# Global affinity
# If set, this will apply to all milvus components
# Individual components can be set to a different affinity
affinity: {}

# Global labels and annotations
# If set, this will apply to all milvus components
labels: {}
annotations: {}

# Extra configs for milvus.yaml
# If set, this config will merge into milvus.yaml
# Please follow the config structure in the milvus.yaml
# at https://github.com/milvus-io/milvus/blob/master/configs/milvus.yaml
# Note: this config will be the top priority which will override the config
# in the image and helm chart.
extraConfigFiles:
  user.yaml: |+
    #    For example enable rest http for milvus proxy
    #    proxy:
    #      http:
    #        enabled: true
    #      maxUserNum: 100
    #      maxRoleNum: 10
    ##  Enable tlsMode and set the tls cert and key
    #  tls:
    #    serverPemPath: /etc/milvus/certs/tls.crt
    #    serverKeyPath: /etc/milvus/certs/tls.key
    #   common:
    #     security:
    #       tlsMode: 1

## Expose the Milvus service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
  type: NodePort
  port: 19530
  portName: milvus
  nodePort: ""
  annotations: {}
  labels: {}

  ## List of IP addresses at which the Milvus service is available
  ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
  ##
  externalIPs: []
  #   - externalIp1

  # LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
  # set allowed inbound rules on the security group assigned to the master load balancer
  loadBalancerSourceRanges:
  - 0.0.0.0/0
  # Optionally assign a known public LB IP
  # loadBalancerIP: 1.2.3.4

ingress:
  enabled: false
  annotations:
    # Annotation example: set nginx ingress type
    # kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/backend-protocol: GRPC
    nginx.ingress.kubernetes.io/listen-ports-ssl: '[19530]'
    nginx.ingress.kubernetes.io/proxy-body-size: 4m
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
  labels: {}
  rules:
    - host: "milvus-example.local"
      path: "/"
      pathType: "Prefix"
    # - host: "milvus-example2.local"
    #   path: "/otherpath"
    #   pathType: "Prefix"
  tls: []
  #  - secretName: chart-example-tls
  #    hosts:
  #      - milvus-example.local

serviceAccount:
  create: false
  name:
  annotations:
  labels:

metrics:
  enabled: true

  serviceMonitor:
    # Set this to `true` to create ServiceMonitor for Prometheus operator
    enabled: false
    interval: "30s"
    scrapeTimeout: "10s"
    # Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
    additionalLabels: {}

livenessProbe:
  enabled: true
  initialDelaySeconds: 90
  periodSeconds: 30
  timeoutSeconds: 5
  successThreshold: 1
  failureThreshold: 5

readinessProbe:
  enabled: true
  initialDelaySeconds: 90
  periodSeconds: 10
  timeoutSeconds: 5
  successThreshold: 1
  failureThreshold: 5

log:
  level: "info"
  file:
    maxSize: 300    # MB
    maxAge: 10    # day
    maxBackups: 20
  format: "text"    # text/json

  persistence:
    mountPath: "/milvus/logs"
    ## If true, create/use a Persistent Volume Claim
    ## If false, use emptyDir
    ##
    enabled: false
    annotations:
      helm.sh/resource-policy: keep
    persistentVolumeClaim:
      existingClaim: ""
      ## Milvus Logs Persistent Volume Storage Class
      ## If defined, storageClassName: <storageClass>
      ## If set to "-", storageClassName: "", which disables dynamic provisioning
      ## If undefined (the default) or set to null, no storageClassName spec is
      ##   set, choosing the default provisioner.
      ## ReadWriteMany access mode required for milvus cluster.
      ##
      storageClass:
      accessModes: ReadWriteMany
      size: 10Gi
      subPath: ""

## Heaptrack traces all memory allocations and annotates these events with stack traces.
## See more: https://github.com/KDE/heaptrack
## Enable heaptrack in production is not recommended.
heaptrack:
  image:
    repository: milvusdb/heaptrack
    tag: v0.1.0
    pullPolicy: IfNotPresent

standalone:
  replicas: 1  # Run standalone mode with replication disabled
  resources: {}
  # Set local storage size in resources
  # resources:
  #   limits:
  #     ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  disk:
    enabled: true
    size:
      enabled: false  # Enable local storage size limit
  profiling:
    enabled: false  # Enable live profiling

  ## Default message queue for milvus standalone
  ## Supported value: rocksmq, natsmq, pulsar and kafka
  messageQueue: rocksmq
  persistence:
    mountPath: "/var/lib/milvus"
    ## If true, alertmanager will create/use a Persistent Volume Claim
    ## If false, use emptyDir
    ##
    enabled: true
    annotations:
      helm.sh/resource-policy: keep
    persistentVolumeClaim:
      existingClaim: ""
      ## Milvus Persistent Volume Storage Class
      ## If defined, storageClassName: <storageClass>
      ## If set to "-", storageClassName: "", which disables dynamic provisioning
      ## If undefined (the default) or set to null, no storageClassName spec is
      ##   set, choosing the default provisioner.
      ##
      storageClass: "csi-driver-s3"
      accessModes: ReadWriteOnce
      size: 50Gi
      subPath: ""

proxy:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  http:
    enabled: true  # whether to enable http rest server
    debugMode:
      enabled: false
  # Mount a TLS secret into proxy pod
  tls:
    enabled: false
## when enabling proxy.tls, all items below should be uncommented and the key and crt values should be populated.
#    enabled: true
#    secretName: milvus-tls
## expecting base64 encoded values here: i.e. $(cat tls.crt | base64 -w 0) and $(cat tls.key | base64 -w 0)
#    key: LS0tLS1CRUdJTiBQU--REDUCT
#    crt: LS0tLS1CRUdJTiBDR--REDUCT
#  volumes:
#  - secret:
#      secretName: milvus-tls
#    name: milvus-tls
#  volumeMounts:
#  - mountPath: /etc/milvus/certs/
#    name: milvus-tls
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

rootCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1  # Run Root Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for root coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 53100
    annotations: {}
    labels: {}
    clusterIP: ""

queryCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1  # Run Query Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for query coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 19531
    annotations: {}
    labels: {}
    clusterIP: ""

queryNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  # Set local storage size in resources
  # resources:
  #   limits:
  #     ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  disk:
    enabled: true  # Enable querynode load disk index, and search on disk index
    size:
      enabled: false  # Enable local storage size limit
  profiling:
    enabled: false  # Enable live profiling
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

indexCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1   # Run Index Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for index coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 31000
    annotations: {}
    labels: {}
    clusterIP: ""

indexNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  # Set local storage size in resources
  # limits:
  #    ephemeral-storage: 100Gi
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  disk:
    enabled: true  # Enable index node build disk vector index
    size:
      enabled: false  # Enable local storage size limit
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

dataCoordinator:
  enabled: true
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1           # Run Data Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for data coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    port: 13333
    annotations: {}
    labels: {}
    clusterIP: ""

dataNode:
  enabled: true
  # You can set the number of replicas to -1 to remove the replicas field in case you want to use HPA
  replicas: 1
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

## mixCoordinator contains all coord
## If you want to use mixcoord, enable this and disable all of other coords
mixCoordinator:
  enabled: false
  # You can set the number of replicas greater than 1, only if enable active standby
  replicas: 1           # Run Mixture Coordinator mode with replication disabled
  resources: {}
  nodeSelector: {}
  affinity: {}
  tolerations: []
  extraEnv: []
  heaptrack:
    enabled: false
  profiling:
    enabled: false  # Enable live profiling
  activeStandby:
    enabled: true  # Enable active-standby when you set multiple replicas for Mixture coordinator
  # Deployment strategy, default is RollingUpdate
  # Ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#rolling-update-deployment
  strategy: {}

  service:
    annotations: {}
    labels: {}
    clusterIP: ""

attu:
  enabled: true
  name: attu
  image:
    repository: zilliz/attu
    tag: v2.3.10
    pullPolicy: IfNotPresent
  service:
    annotations: {}
    labels: {}
    type: NodePort
    port: 3000
    # loadBalancerIP: ""
  resources: {}
  podLabels: {}
  ingress:
    enabled: false
    annotations: {}
    # Annotation example: set nginx ingress type
    # kubernetes.io/ingress.class: nginx
    labels: {}
    hosts:
      - milvus-attu.local
    tls: []
    #  - secretName: chart-attu-tls
    #    hosts:
    #      - milvus-attu.local


## Configuration values for the minio dependency
## ref: https://github.com/zilliztech/milvus-helm/blob/master/charts/minio/README.md
##

minio:
  enabled: false
  name: minio
  mode: distributed
  image:
    tag: "RELEASE.2023-03-20T20-16-18Z"
    pullPolicy: IfNotPresent
  accessKey: minioadmin
  secretKey: minioadmin
  existingSecret: ""
  bucketName: "milvus-bucket"
  rootPath: file
  useIAM: false
  iamEndpoint: ""
  region: ""
  useVirtualHost: false
  podDisruptionBudget:
    enabled: false
  resources:
    requests:
      memory: 2Gi

  service:
    type: ClusterIP
    port: 9000

  persistence:
    enabled: true
    existingClaim: ""
    storageClass: "csi-driver-s3"
    accessMode: ReadWriteOnce
    size: 500Gi

  livenessProbe:
    enabled: true
    initialDelaySeconds: 5
    periodSeconds: 5
    timeoutSeconds: 5
    successThreshold: 1
    failureThreshold: 5

  readinessProbe:
    enabled: true
    initialDelaySeconds: 5
    periodSeconds: 5
    timeoutSeconds: 1
    successThreshold: 1
    failureThreshold: 5

  startupProbe:
    enabled: true
    initialDelaySeconds: 0
    periodSeconds: 10
    timeoutSeconds: 5
    successThreshold: 1
    failureThreshold: 60

## Configuration values for the etcd dependency
## ref: https://artifacthub.io/packages/helm/bitnami/etcd
##

etcd:
  enabled: false
  name: etcd
  replicaCount: 3
  pdb:
    create: false
  image:
    repository: "milvusdb/etcd"
    tag: "3.5.5-r4"
    pullPolicy: IfNotPresent

  service:
    type: ClusterIP
    port: 2379
    peerPort: 2380

  auth:
    rbac:
      enabled: false

  persistence:
    enabled: true
    storageClass: "csi-driver-s3"
    accessMode: ReadWriteOnce
    size: 10Gi

  ## Change default timeout periods to mitigate zoobie probe process
  livenessProbe:
    enabled: true
    timeoutSeconds: 10

  readinessProbe:
    enabled: true
    periodSeconds: 20
    timeoutSeconds: 10

  ## Enable auto compaction
  ## compaction by every 1000 revision
  ##
  autoCompactionMode: revision
  autoCompactionRetention: "1000"

  ## Increase default quota to 4G
  ##
  extraEnvVars:
  - name: ETCD_QUOTA_BACKEND_BYTES
    value: "4294967296"
  - name: ETCD_HEARTBEAT_INTERVAL
    value: "500"
  - name: ETCD_ELECTION_TIMEOUT
    value: "2500"

## Configuration values for the pulsar dependency
## ref: https://github.com/apache/pulsar-helm-chart
##

pulsar:
  enabled: false
  name: pulsar

  fullnameOverride: ""
  persistence: true

  maxMessageSize: "5242880"  # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.

  rbac:
    enabled: false
    psp: false
    limit_to_namespace: true

  affinity:
    anti_affinity: false

## enableAntiAffinity: no

  components:
    zookeeper: true
    bookkeeper: true
    # bookkeeper - autorecovery
    autorecovery: true
    broker: true
    functions: false
    proxy: true
    toolset: false
    pulsar_manager: false

  monitoring:
    prometheus: false
    grafana: false
    node_exporter: false
    alert_manager: false

  images:
    broker:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    autorecovery:
      repository: apachepulsar/pulsar
      tag: 2.8.2
      pullPolicy: IfNotPresent
    zookeeper:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    bookie:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    proxy:
      repository: apachepulsar/pulsar
      pullPolicy: IfNotPresent
      tag: 2.8.2
    pulsar_manager:
      repository: apachepulsar/pulsar-manager
      pullPolicy: IfNotPresent
      tag: v0.1.0

  zookeeper:
    resources:
      requests:
        memory: 1024Mi
        cpu: 0.3
    configData:
      PULSAR_MEM: >
        -Xms1024m
        -Xmx1024m
      PULSAR_GC: >
         -Dcom.sun.management.jmxremote
         -Djute.maxbuffer=10485760
         -XX:+ParallelRefProcEnabled
         -XX:+UnlockExperimentalVMOptions
         -XX:+DoEscapeAnalysis
         -XX:+DisableExplicitGC
         -XX:+PerfDisableSharedMem
         -Dzookeeper.forceSync=no
    pdb:
      usePolicy: false

  bookkeeper:
    replicaCount: 3
    volumes:
      journal:
        name: journal
        size: 100Gi
      ledgers:
        name: ledgers
        size: 200Gi
    resources:
      requests:
        memory: 2048Mi
        cpu: 1
    configData:
      PULSAR_MEM: >
        -Xms4096m
        -Xmx4096m
        -XX:MaxDirectMemorySize=8192m
      PULSAR_GC: >
        -Dio.netty.leakDetectionLevel=disabled
        -Dio.netty.recycler.linkCapacity=1024
        -XX:+UseG1GC -XX:MaxGCPauseMillis=10
        -XX:+ParallelRefProcEnabled
        -XX:+UnlockExperimentalVMOptions
        -XX:+DoEscapeAnalysis
        -XX:ParallelGCThreads=32
        -XX:ConcGCThreads=32
        -XX:G1NewSizePercent=50
        -XX:+DisableExplicitGC
        -XX:-ResizePLAB
        -XX:+ExitOnOutOfMemoryError
        -XX:+PerfDisableSharedMem
        -XX:+PrintGCDetails
      nettyMaxFrameSizeBytes: "104867840"
    pdb:
      usePolicy: false

  broker:
    component: broker
    podMonitor:
      enabled: false
    replicaCount: 1
    resources:
      requests:
        memory: 4096Mi
        cpu: 1.5
    configData:
      PULSAR_MEM: >
        -Xms4096m
        -Xmx4096m
        -XX:MaxDirectMemorySize=8192m
      PULSAR_GC: >
        -Dio.netty.leakDetectionLevel=disabled
        -Dio.netty.recycler.linkCapacity=1024
        -XX:+ParallelRefProcEnabled
        -XX:+UnlockExperimentalVMOptions
        -XX:+DoEscapeAnalysis
        -XX:ParallelGCThreads=32
        -XX:ConcGCThreads=32
        -XX:G1NewSizePercent=50
        -XX:+DisableExplicitGC
        -XX:-ResizePLAB
        -XX:+ExitOnOutOfMemoryError
      maxMessageSize: "104857600"
      defaultRetentionTimeInMinutes: "10080"
      defaultRetentionSizeInMB: "-1"
      backlogQuotaDefaultLimitGB: "8"
      ttlDurationDefaultInSeconds: "259200"
      subscriptionExpirationTimeMinutes: "3"
      backlogQuotaDefaultRetentionPolicy: producer_exception
    pdb:
      usePolicy: false

  autorecovery:
    resources:
      requests:
        memory: 512Mi
        cpu: 1

  proxy:
    replicaCount: 1
    podMonitor:
      enabled: false
    resources:
      requests:
        memory: 2048Mi
        cpu: 1
    service:
      type: ClusterIP
    ports:
      pulsar: 6650
    configData:
      PULSAR_MEM: >
        -Xms2048m -Xmx2048m
      PULSAR_GC: >
        -XX:MaxDirectMemorySize=2048m
      httpNumThreads: "100"
    pdb:
      usePolicy: false

  pulsar_manager:
    service:
      type: ClusterIP

  pulsar_metadata:
    component: pulsar-init
    image:
      # the image used for running `pulsar-cluster-initialize` job
      repository: apachepulsar/pulsar
      tag: 2.8.2


## Configuration values for the kafka dependency
## ref: https://artifacthub.io/packages/helm/bitnami/kafka
##

kafka:
  enabled: false
  name: kafka
  replicaCount: 3
  image:
    repository: bitnami/kafka
    tag: 3.1.0-debian-10-r52
  ## Increase graceful termination for kafka graceful shutdown
  terminationGracePeriodSeconds: "90"
  pdb:
    create: false

  ## Enable startup probe to prevent pod restart during recovering
  startupProbe:
    enabled: true

  ## Kafka Java Heap size
  heapOpts: "-Xmx4096m -Xms4096m"
  maxMessageBytes: _10485760
  defaultReplicationFactor: 3
  offsetsTopicReplicationFactor: 3
  ## Only enable time based log retention
  logRetentionHours: 168
  logRetentionBytes: _-1
  extraEnvVars:
  - name: KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
    value: "5242880"
  - name: KAFKA_CFG_MAX_REQUEST_SIZE
    value: "5242880"
  - name: KAFKA_CFG_REPLICA_FETCH_MAX_BYTES
    value: "10485760"
  - name: KAFKA_CFG_FETCH_MESSAGE_MAX_BYTES
    value: "5242880"
  - name: KAFKA_CFG_LOG_ROLL_HOURS
    value: "24"

  persistence:
    enabled: true
    storageClass:
    accessMode: ReadWriteOnce
    size: 300Gi

  metrics:
    ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter
    kafka:
      enabled: false
      image:
        repository: bitnami/kafka-exporter
        tag: 1.4.2-debian-10-r182

    ## Prometheus JMX exporter: exposes the majority of Kafkas metrics
    jmx:
      enabled: false
      image:
        repository: bitnami/jmx-exporter
        tag: 0.16.1-debian-10-r245

    ## To enable serviceMonitor, you must enable either kafka exporter or jmx exporter.
    ## And you can enable them both
    serviceMonitor:
      enabled: false

  service:
    type: ClusterIP
    ports:
      client: 9092

  zookeeper:
    enabled: true
    replicaCount: 3

###################################
# External S3
# - these configs are only used when `externalS3.enabled` is true
###################################
externalS3:
  enabled: true
  host: "172.20.1.124"
  port: "9000"
  accessKey: "minioadmin"
  secretKey: "minioadmin"
  useSSL: false
  bucketName: "milvus-dev"
  rootPath: ""
  useIAM: false
  cloudProvider: "aws"
  iamEndpoint: ""
  region: ""
  useVirtualHost: false

###################################
# GCS Gateway
# - these configs are only used when `minio.gcsgateway.enabled` is true
###################################
externalGcs:
  bucketName: ""

###################################
# External etcd
# - these configs are only used when `externalEtcd.enabled` is true
###################################
externalEtcd:
  enabled: true

  ## the endpoints of the external etcd
  ##
  endpoints:
    - xxxx:23790

###################################
# External pulsar
# - these configs are only used when `externalPulsar.enabled` is true
###################################
externalPulsar:
  enabled: true
  host: "xxx"
  port: 30012
  maxMessageSize: "5242880"  # 5 * 1024 * 1024 Bytes, Maximum size of each message in pulsar.
  tenant: "xx"
  namespace: "xxx"
  authPlugin: "org.apache.pulsar.client.impl.auth.AuthenticationToken"
  authParams: token:"xxx"

###################################
# External kafka
# - these configs are only used when `externalKafka.enabled` is true
# - note that the following are just examples, you should confirm the
#   value of brokerList and mechanisms according to the actual external
#   Kafka configuration. E.g. If you select the AWS MSK, the configuration
#   should look something like this:
#   externalKafka:
#     enabled: true
#     brokerList: "xxxx:9096"
#     securityProtocol: SASL_SSL
#     sasl:
#       mechanisms: SCRAM-SHA-512
#       password: "xxx"
#       username: "xxx"
###################################
externalKafka:
  enabled: false
  brokerList: localhost:9092
  securityProtocol: SASL_SSL
  sasl:
    mechanisms: PLAIN
    username: ""
    password: ""

k8s可执行文件milvus_manifest.yaml

bash 复制代码
---
# Source: milvus/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: my-release-milvus
data:
  default.yaml: |+
    # Copyright (C) 2019-2021 Zilliz. All rights reserved.
    #
    # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
    # with the License. You may obtain a copy of the License at
    #
    # http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software distributed under the License
    # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
    # or implied. See the License for the specific language governing permissions and limitations under the License.
    
    etcd:
      endpoints:
        - xxxx:23790
    
    metastore:
      type: etcd
    
    minio:
      address: xxxx
      port: 9000
      accessKeyID: minioadmin
      secretAccessKey: minioadmin
      useSSL: false
      bucketName: milvus-dev
      rootPath:
      useIAM: false
      cloudProvider: aws
      iamEndpoint:
      region:
      useVirtualHost: false

    mq:
      type: pulsar

    messageQueue: pulsar

    pulsar:
      address: xxx
      port: 6650
      maxMessageSize: 5242880
      tenant: "my-tenant"
      namespace: my-namespace

    
    rootCoord:
      address: my-release-milvus-rootcoord
      port: 53100
      enableActiveStandby: true  # Enable rootcoord active-standby
    
    proxy:
      port: 19530
      internalPort: 19529
    
    queryCoord:
      address: my-release-milvus-querycoord
      port: 19531
    
      enableActiveStandby: true  # Enable querycoord active-standby
    
    queryNode:
      port: 21123
      enableDisk: true # Enable querynode load disk index, and search on disk index
    
    indexCoord:
      address: my-release-milvus-indexcoord
      port: 31000
      enableActiveStandby: true  # Enable indexcoord active-standby
    
    indexNode:
      port: 21121
      enableDisk: true # Enable index node build disk vector index
    
    dataCoord:
      address: my-release-milvus-datacoord
      port: 13333
      enableActiveStandby: true  # Enable datacoord active-standby
    
    dataNode:
      port: 21124
    
    log:
      level: info
      file:
        rootPath: ""
        maxSize: 300
        maxAge: 10
        maxBackups: 20
      format: text
  user.yaml: |-
    #    For example enable rest http for milvus proxy
    #    proxy:
    #      http:
    #        enabled: true
    #      maxUserNum: 100
    #      maxRoleNum: 10
    ##  Enable tlsMode and set the tls cert and key
    #  tls:
    #    serverPemPath: /etc/milvus/certs/tls.crt
    #    serverKeyPath: /etc/milvus/certs/tls.key
    #   common:
    #     security:
    #       tlsMode: 1
---
# Source: milvus/templates/attu-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-attu
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "attu"
spec:
  type: NodePort
  ports:
    - name: attu
      protocol: TCP
      port: 3000
      targetPort: 3000
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "attu"
---
# Source: milvus/templates/datacoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-datacoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datacoord"
spec:
  type: ClusterIP
  ports:
    - name: datacoord
      port: 13333
      protocol: TCP
      targetPort: datacoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "datacoord"
---
# Source: milvus/templates/datanode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-datanode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datanode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "datanode"
---
# Source: milvus/templates/indexcoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-indexcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexcoord"
spec:
  type: ClusterIP
  ports:
    - name: indexcoord
      port: 31000
      protocol: TCP
      targetPort: indexcoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "indexcoord"
---
# Source: milvus/templates/indexnode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-indexnode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexnode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "indexnode"
---
# Source: milvus/templates/querycoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-querycoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querycoord"
spec:
  type: ClusterIP
  ports:
    - name: querycoord
      port: 19531
      protocol: TCP
      targetPort: querycoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "querycoord"
---
# Source: milvus/templates/querynode-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-querynode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querynode"
spec:
  type: ClusterIP
  clusterIP: None
  ports:
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "querynode"
---
# Source: milvus/templates/rootcoord-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus-rootcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "rootcoord"
spec:
  type: ClusterIP
  ports:
    - name: rootcoord
      port: 53100
      protocol: TCP
      targetPort: rootcoord
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "rootcoord"
---
# Source: milvus/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
  name: my-release-milvus
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "proxy"
spec:
  type: NodePort
  ports:
    - name: milvus
      port: 19530
      protocol: TCP
      targetPort: milvus
    - name: metrics
      protocol: TCP
      port: 9091
      targetPort: metrics
  selector:
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    component: "proxy"
---
# Source: milvus/templates/attu-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-attu
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "attu"
    
spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "attu"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "attu"
        
    spec:
      containers:
      - name: attu
        image: zilliz/attu:v2.3.10
        imagePullPolicy: IfNotPresent
        ports:
        - name: attu
          containerPort: 3000
          protocol: TCP
        env:
        - name: MILVUS_URL
          value: http://my-release-milvus:19530
        resources:
          {}
---
# Source: milvus/templates/datacoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-datacoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datacoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "datacoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "datacoord"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: datacoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "datacoord" ]
        env:
        ports:
          - name: datacoord
            containerPort: 13333
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/datanode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-datanode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "datanode"

    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "datanode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "datanode"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: datanode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "datanode" ]
        env:
        ports:
          - name: datanode
            containerPort: 21124
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/indexcoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-indexcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexcoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "indexcoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "indexcoord"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: indexcoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "indexcoord" ]
        env:
        ports:
          - name: indexcoord
            containerPort: 31000
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/indexnode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-indexnode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "indexnode"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "indexnode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "indexnode"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: indexnode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "indexnode" ]
        env:
        ports:
          - name: indexnode
            containerPort: 21121
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
        - mountPath: /var/lib/milvus/data
          name: disk

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
      - name: disk
        emptyDir: {}
---
# Source: milvus/templates/proxy-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-proxy
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "proxy"

    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "proxy"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "proxy"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: proxy
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "proxy" ]
        env:
        ports:
          - name: milvus
            containerPort: 19530
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/querycoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-querycoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querycoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "querycoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        
        component: "querycoord"
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: querycoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "querycoord" ]
        env:
        ports:
          - name: querycoord
            containerPort: 19531
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
---
# Source: milvus/templates/querynode-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-querynode
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "querynode"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "querynode"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "querynode"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: querynode
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "querynode" ]
        env:
        ports:
          - name: querynode
            containerPort: 21123
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools
        - mountPath: /var/lib/milvus/data
          name: disk

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
      - name: disk
        emptyDir: {}
---
# Source: milvus/templates/rootcoord-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-release-milvus-rootcoord
  labels:
    helm.sh/chart: milvus-4.1.34
    app.kubernetes.io/name: milvus
    app.kubernetes.io/instance: my-release
    app.kubernetes.io/version: "2.4.5"
    app.kubernetes.io/managed-by: Helm
    component: "rootcoord"
    
  annotations:
    

spec:
  replicas: 1
  selector:
    matchLabels:
      app.kubernetes.io/name: milvus
      app.kubernetes.io/instance: my-release
      component: "rootcoord"
  template:
    metadata:
      labels:
        app.kubernetes.io/name: milvus
        app.kubernetes.io/instance: my-release
        component: "rootcoord"
        
      annotations:
        checksum/config: 4d919a6f7279f31d3f04198e9626ab7a0dec59a9e2d63b9b0758840233e77b8f
        
    spec:
      serviceAccountName: default
      initContainers:
      - name: config
        command:
        - /cp
        - /run-helm.sh,/merge
        - /milvus/tools/run-helm.sh,/milvus/tools/merge
        image: "milvusdb/milvus-config-tool:v0.1.2"
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /milvus/tools
          name: tools
      containers:
      - name: rootcoord
        image: "milvusdb/milvus:v2.4.5"
        imagePullPolicy: IfNotPresent
        args: [ "/milvus/tools/run-helm.sh", "milvus", "run", "rootcoord" ]
        env:
        ports:
          - name: rootcoord
            containerPort: 53100
            protocol: TCP
          - name: metrics
            containerPort: 9091
            protocol: TCP
        livenessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 30
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /healthz
            port: metrics
          initialDelaySeconds: 90
          periodSeconds: 10
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        resources:
          {}
        volumeMounts:
        - name: milvus-config
          mountPath: /milvus/configs/default.yaml
          subPath: default.yaml
          readOnly: true
        - name: milvus-config
          mountPath: /milvus/configs/user.yaml
          subPath: user.yaml
          readOnly: true
        - mountPath: /milvus/tools
          name: tools

      volumes:
      - name: milvus-config
        configMap:
          name: my-release-milvus
      - name: tools
        emptyDir: {}
相关推荐
kaiyuanheshang8 小时前
docker 中的entrypoint和cmd指令
运维·docker·容器·cmd·entrypoint
Python私教9 小时前
除了 Docker,还有哪些类似的容器技术?
运维·docker·容器
petaexpress13 小时前
5种常见的k8s云原生数据管理方案详解
云原生·kubernetes·k8s云原生
颜淡慕潇15 小时前
【K8S系列】深入解析 Kubernetes 中的 Deployment
后端·云原生·容器·kubernetes
zwm_yy16 小时前
docker-mysql
mysql·docker·容器
FinelyYang19 小时前
docker+容器+redis+minio+java jar,实现开机自启动
运维·docker·容器
hong1616881 天前
Docker 启动和停止的精准掌舵:操控指南
docker·容器·eureka
Dontla1 天前
Docker Desktop Engine Stopped原因分析(docker桌面停止)WSL没装或没更新
运维·docker·容器
petaexpress1 天前
k8s微服务架构就是云原生吗?两者是什么关系
微服务·云原生·架构·kubernetes·k8s