github.com/verrazzano/verrazzano@v1.7.0/platform-operator/thirdparty/charts/prometheus-community/kube-prometheus-stack/values.yaml (about)

     1  # Default values for kube-prometheus-stack.
     2  # This is a YAML-formatted file.
     3  # Declare variables to be passed into your templates.
     4  
     5  ## Provide a name in place of kube-prometheus-stack for `app:` labels
     6  ##
     7  nameOverride: ""
     8  
     9  ## Override the deployment namespace
    10  ##
    11  namespaceOverride: ""
    12  
    13  ## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6
    14  ##
    15  kubeTargetVersionOverride: ""
    16  
    17  ## Allow kubeVersion to be overridden while creating the ingress
    18  ##
    19  kubeVersionOverride: ""
    20  
    21  ## Provide a name to substitute for the full names of resources
    22  ##
    23  fullnameOverride: ""
    24  
    25  ## Labels to apply to all resources
    26  ##
    27  commonLabels: {}
    28  # scmhash: abc123
    29  # myLabel: aakkmd
    30  
    31  ## Create default rules for monitoring the cluster
    32  ##
    33  defaultRules:
    34    create: true
    35    rules:
    36      alertmanager: true
    37      etcd: true
    38      configReloaders: true
    39      general: true
    40      k8s: true
    41      kubeApiserverAvailability: true
    42      kubeApiserverBurnrate: true
    43      kubeApiserverHistogram: true
    44      kubeApiserverSlos: true
    45      kubeControllerManager: true
    46      kubelet: true
    47      kubeProxy: true
    48      kubePrometheusGeneral: true
    49      kubePrometheusNodeRecording: true
    50      kubernetesApps: true
    51      kubernetesResources: true
    52      kubernetesStorage: true
    53      kubernetesSystem: true
    54      kubeSchedulerAlerting: true
    55      kubeSchedulerRecording: true
    56      kubeStateMetrics: true
    57      network: true
    58      node: true
    59      nodeExporterAlerting: true
    60      nodeExporterRecording: true
    61      prometheus: true
    62      prometheusOperator: true
    63      verrazzanoPlatformOperator: true
    64      verrazzanoApplicationOperator: true
    65      verrazzanoClusterOperator: true
    66      verrazzanoMonitoringOperator: true
    67  
    68    ## Reduce app namespace alert scope
    69    appNamespacesTarget: ".*"
    70  
    71    ## Labels for default rules
    72    labels: {}
    73    ## Annotations for default rules
    74    annotations: {}
    75  
    76    ## Additional labels for PrometheusRule alerts
    77    additionalRuleLabels: {}
    78  
    79    ## Additional annotations for PrometheusRule alerts
    80    additionalRuleAnnotations: {}
    81  
    82    ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
    83    runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
    84  
    85    ## Disabled PrometheusRule alerts
    86    ## @extra defaultRules.disabled.VerrazzanoPlatformOperatorReconcileErrorRate Disable VerrazzanoPlatformOperatorReconcileErrorRate rule when defaultRules.rules.verrazzanoPlatformOperator is true
    87    ## @extra defaultRules.disabled.VerrazzanoPlatformOperatorNotReady Disable VerrazzanoPlatformOperatorNotReady rule when defaultRules.rules.verrazzanoPlatformOperator is true
    88    ## @extra defaultRules.disabled.VerrazzanoPlatformOperatorWebhookNotReady Disable VerrazzanoPlatformOperatorWebhookNotReady rule when defaultRules.rules.verrazzanoPlatformOperator is true
    89    ## @extra defaultRules.disabled.VerrazzanoPlatformOperatorNotRunning Disable VerrazzanoPlatformOperatorNotRunning rule when defaultRules.rules.verrazzanoPlatformOperator is true
    90    ## @extra defaultRules.disabled.VerrazzanoPlatformOperatorWebhookNotRunning Disable VerrazzanoPlatformOperatorWebhookNotRunning rule when defaultRules.rules.verrazzanoPlatformOperator is true
    91    ## @extra defaultRules.disabled.VerrazzanoComponentsNotReady Disable VerrazzanoComponentsNotReady rule when defaultRules.rules.verrazzanoPlatformOperator is true
    92    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorAppConfigReconcileErrorRate Disable VerrazzanoApplicationOperatorAppConfigReconcileErrorRate rule when defaultRules.rules.verrazzanoApplicationOperator is true
    93    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorCoherenceWorkloadReconcileErrorRate Disable VerrazzanoApplicationOperatorCoherenceWorkloadReconcileErrorRate rule when defaultRules.rules.verrazzanoApplicationOperator is true
    94    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorHelidonWorkloadReconcileErrorRate Disable VerrazzanoApplicationOperatorHelidonWorkloadReconcileErrorRate rule when defaultRules.rules.verrazzanoApplicationOperator is true
    95    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorIngressTraitReconcileErrorRate Disable VerrazzanoApplicationOperatorIngressTraitReconcileErrorRate rule when defaultRules.rules.verrazzanoApplicationOperator is true
    96    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorNotReady Disable VerrazzanoApplicationOperatorNotReady rule when defaultRules.rules.verrazzanoApplicationOperator is true
    97    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorWebhookNotReady Disable VerrazzanoApplicationOperatorWebhookNotReady rule when defaultRules.rules.verrazzanoApplicationOperator is true
    98    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorNotRunning Disable VerrazzanoApplicationOperatorNotRunning rule when defaultRules.rules.verrazzanoApplicationOperator is true
    99    ## @extra defaultRules.disabled.VerrazzanoApplicationOperatorWebhookNotRunning Disable VerrazzanoApplicationOperatorWebhookNotRunning rule when defaultRules.rules.verrazzanoApplicationOperator is true
   100    ## @extra defaultRules.disabled.VerrazzanoClusterOperatorVMCReconcileErrorRate Disable VerrazzanoClusterOperatorVMCReconcileErrorRate rule when defaultRules.rules.verrazzanoClusterOperator is true
   101    ## @extra defaultRules.disabled.VerrazzanoClusterOperatorNotReady Disable VerrazzanoClusterOperatorNotReady rule when defaultRules.rules.verrazzanoClusterOperator is true
   102    ## @extra defaultRules.disabled.VerrazzanoClusterOperatorWebhookNotReady Disable VerrazzanoClusterOperatorWebhookNotReady rule when defaultRules.rules.verrazzanoClusterOperator is true
   103    ## @extra defaultRules.disabled.VerrazzanoClusterOperatorNotRunning Disable VerrazzanoClusterOperatorNotRunning rule when defaultRules.rules.verrazzanoClusterOperator is true
   104    ## @extra defaultRules.disabled.VerrazzanoClusterOperatorWebhookNotRunning Disable VerrazzanoClusterOperatorWebhookNotRunning rule when defaultRules.rules.verrazzanoClusterOperator is true
   105    ## @extra defaultRules.disabled.VerrazzanoMonitoringOperatorReconcileErrorRate Disable VerrazzanoMonitoringOperatorReconcileErrorRate rule when defaultRules.rules.verrazzanoMonitoringOperator is true
   106    ## @extra defaultRules.disabled.VerrazzanoMonitoringOperatorNotReady Disable VerrazzanoMonitoringOperatorNotReady rule when defaultRules.rules.verrazzanoMonitoringOperator is true
   107    ## @extra defaultRules.disabled.VerrazzanoMonitoringOperatorNotRunning Disable VerrazzanoMonitoringOperatorNotRunning rule when defaultRules.rules.verrazzanoMonitoringOperator is true
   108    disabled: {}
   109    # KubeAPIDown: true
   110    # NodeRAIDDegraded: true
   111  
   112  ## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
   113  ##
   114  # additionalPrometheusRules: []
   115  #  - name: my-rule-file
   116  #    groups:
   117  #      - name: my_group
   118  #        rules:
   119  #        - record: my_record
   120  #          expr: 100 * my_record
   121  
   122  ## Provide custom recording or alerting rules to be deployed into the cluster.
   123  ##
   124  additionalPrometheusRulesMap: {}
   125  #  rule-name:
   126  #    groups:
   127  #    - name: my_group
   128  #      rules:
   129  #      - record: my_record
   130  #        expr: 100 * my_record
   131  
   132  ##
   133  global:
   134    rbac:
   135      create: true
   136  
   137      ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
   138      ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
   139      createAggregateClusterRoles: false
   140      pspEnabled: false
   141      pspAnnotations: {}
   142        ## Specify pod annotations
   143        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
   144        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
   145        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
   146        ##
   147        # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
   148        # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
   149        # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
   150  
   151    ## Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...)
   152    ##
   153    imageRegistry: ""
   154  
   155    ## Reference to one or more secrets to be used when pulling images
   156    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
   157    ##
   158    imagePullSecrets: []
   159    # - name: "image-pull-secret"
   160    # or
   161    # - "image-pull-secret"
   162  
   163  ## Configuration for alertmanager
   164  ## ref: https://prometheus.io/docs/alerting/alertmanager/
   165  ##
   166  alertmanager:
   167  
   168    ## Deploy alertmanager
   169    ##
   170    enabled: true
   171  
   172    ## Annotations for Alertmanager
   173    ##
   174    annotations: {}
   175  
   176    ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2
   177    ##
   178    apiVersion: v2
   179  
   180    ## Service account for Alertmanager to use.
   181    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
   182    ##
   183    serviceAccount:
   184      create: true
   185      name: ""
   186      annotations: {}
   187      automountServiceAccountToken: true
   188  
   189    ## Configure pod disruption budgets for Alertmanager
   190    ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
   191    ## This configuration is immutable once created and will require the PDB to be deleted to be changed
   192    ## https://github.com/kubernetes/kubernetes/issues/45398
   193    ##
   194    podDisruptionBudget:
   195      enabled: false
   196      minAvailable: 1
   197      maxUnavailable: ""
   198  
   199    ## Alertmanager configuration directives
   200    ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
   201    ##      https://prometheus.io/webtools/alerting/routing-tree-editor/
   202    ##
   203    config:
   204      global:
   205        resolve_timeout: 5m
   206      inhibit_rules:
   207        - source_matchers:
   208            - 'severity = critical'
   209          target_matchers:
   210            - 'severity =~ warning|info'
   211          equal:
   212            - 'namespace'
   213            - 'alertname'
   214        - source_matchers:
   215            - 'severity = warning'
   216          target_matchers:
   217            - 'severity = info'
   218          equal:
   219            - 'namespace'
   220            - 'alertname'
   221        - source_matchers:
   222            - 'alertname = InfoInhibitor'
   223          target_matchers:
   224            - 'severity = info'
   225          equal:
   226            - 'namespace'
   227      route:
   228        group_by: ['namespace']
   229        group_wait: 30s
   230        group_interval: 5m
   231        repeat_interval: 12h
   232        receiver: 'null'
   233        routes:
   234        - receiver: 'null'
   235          matchers:
   236            - alertname =~ "InfoInhibitor|Watchdog"
   237      receivers:
   238      - name: 'null'
   239      templates:
   240      - '/etc/alertmanager/config/*.tmpl'
   241  
   242    ## Alertmanager configuration directives (as string type, preferred over the config hash map)
   243    ## stringConfig will be used only, if tplConfig is true
   244    ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
   245    ##      https://prometheus.io/webtools/alerting/routing-tree-editor/
   246    ##
   247    stringConfig: ""
   248  
   249    ## Pass the Alertmanager configuration directives through Helm's templating
   250    ## engine. If the Alertmanager configuration contains Alertmanager templates,
   251    ## they'll need to be properly escaped so that they are not interpreted by
   252    ## Helm
   253    ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
   254    ##      https://prometheus.io/docs/alerting/configuration/#tmpl_string
   255    ##      https://prometheus.io/docs/alerting/notifications/
   256    ##      https://prometheus.io/docs/alerting/notification_examples/
   257    tplConfig: false
   258  
   259    ## Alertmanager template files to format alerts
   260    ## By default, templateFiles are placed in /etc/alertmanager/config/ and if
   261    ## they have a .tmpl file suffix will be loaded. See config.templates above
   262    ## to change, add other suffixes. If adding other suffixes, be sure to update
   263    ## config.templates above to include those suffixes.
   264    ## ref: https://prometheus.io/docs/alerting/notifications/
   265    ##      https://prometheus.io/docs/alerting/notification_examples/
   266    ##
   267    templateFiles: {}
   268    #
   269    ## An example template:
   270    #   template_1.tmpl: |-
   271    #       {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
   272    #
   273    #       {{ define "slack.myorg.text" }}
   274    #       {{- $root := . -}}
   275    #       {{ range .Alerts }}
   276    #         *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
   277    #         *Cluster:* {{ template "cluster" $root }}
   278    #         *Description:* {{ .Annotations.description }}
   279    #         *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
   280    #         *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
   281    #         *Details:*
   282    #           {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}`
   283    #           {{ end }}
   284    #       {{ end }}
   285    #       {{ end }}
   286  
   287    ingress:
   288      enabled: false
   289  
   290      # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
   291      # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
   292      # ingressClassName: nginx
   293  
   294      annotations: {}
   295  
   296      labels: {}
   297  
   298      ## Redirect ingress to an additional defined port on the service
   299      # servicePort: 8081
   300  
   301      ## Hosts must be provided if Ingress is enabled.
   302      ##
   303      hosts: []
   304        # - alertmanager.domain.com
   305  
   306      ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
   307      ##
   308      paths: []
   309      # - /
   310  
   311      ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
   312      ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
   313      # pathType: ImplementationSpecific
   314  
   315      ## TLS configuration for Alertmanager Ingress
   316      ## Secret must be manually created in the namespace
   317      ##
   318      tls: []
   319      # - secretName: alertmanager-general-tls
   320      #   hosts:
   321      #   - alertmanager.example.com
   322  
   323    ## Configuration for Alertmanager secret
   324    ##
   325    secret:
   326      annotations: {}
   327  
   328    ## Configuration for creating an Ingress that will map to each Alertmanager replica service
   329    ## alertmanager.servicePerReplica must be enabled
   330    ##
   331    ingressPerReplica:
   332      enabled: false
   333  
   334      # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
   335      # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
   336      # ingressClassName: nginx
   337  
   338      annotations: {}
   339      labels: {}
   340  
   341      ## Final form of the hostname for each per replica ingress is
   342      ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
   343      ##
   344      ## Prefix for the per replica ingress that will have `-$replicaNumber`
   345      ## appended to the end
   346      hostPrefix: ""
   347      ## Domain that will be used for the per replica ingress
   348      hostDomain: ""
   349  
   350      ## Paths to use for ingress rules
   351      ##
   352      paths: []
   353      # - /
   354  
   355      ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
   356      ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
   357      # pathType: ImplementationSpecific
   358  
   359      ## Secret name containing the TLS certificate for alertmanager per replica ingress
   360      ## Secret must be manually created in the namespace
   361      tlsSecretName: ""
   362  
   363      ## Separated secret for each per replica Ingress. Can be used together with cert-manager
   364      ##
   365      tlsSecretPerReplica:
   366        enabled: false
   367        ## Final form of the secret for each per replica ingress is
   368        ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
   369        ##
   370        prefix: "alertmanager"
   371  
   372    ## Configuration for Alertmanager service
   373    ##
   374    service:
   375      annotations: {}
   376      labels: {}
   377      clusterIP: ""
   378  
   379      ## Port for Alertmanager Service to listen on
   380      ##
   381      port: 9093
   382      ## To be used with a proxy extraContainer port
   383      ##
   384      targetPort: 9093
   385      ## Port to expose on each node
   386      ## Only used if service.type is 'NodePort'
   387      ##
   388      nodePort: 30903
   389      ## List of IP addresses at which the Prometheus server service is available
   390      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
   391      ##
   392  
   393      ## Additional ports to open for Alertmanager service
   394      additionalPorts: []
   395      # additionalPorts:
   396      # - name: authenticated
   397      #   port: 8081
   398      #   targetPort: 8081
   399  
   400      externalIPs: []
   401      loadBalancerIP: ""
   402      loadBalancerSourceRanges: []
   403  
   404      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
   405      ##
   406      externalTrafficPolicy: Cluster
   407  
   408      ## Service type
   409      ##
   410      type: ClusterIP
   411  
   412    ## Configuration for creating a separate Service for each statefulset Alertmanager replica
   413    ##
   414    servicePerReplica:
   415      enabled: false
   416      annotations: {}
   417  
   418      ## Port for Alertmanager Service per replica to listen on
   419      ##
   420      port: 9093
   421  
   422      ## To be used with a proxy extraContainer port
   423      targetPort: 9093
   424  
   425      ## Port to expose on each node
   426      ## Only used if servicePerReplica.type is 'NodePort'
   427      ##
   428      nodePort: 30904
   429  
   430      ## Loadbalancer source IP ranges
   431      ## Only used if servicePerReplica.type is "LoadBalancer"
   432      loadBalancerSourceRanges: []
   433  
   434      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
   435      ##
   436      externalTrafficPolicy: Cluster
   437  
   438      ## Service type
   439      ##
   440      type: ClusterIP
   441  
   442    ## If true, create a serviceMonitor for alertmanager
   443    ##
   444    serviceMonitor:
   445      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
   446      ##
   447      interval: ""
   448      selfMonitor: true
   449  
   450      ## Additional labels
   451      ##
   452      additionalLabels: {}
   453  
   454      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
   455      ##
   456      sampleLimit: 0
   457  
   458      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
   459      ##
   460      targetLimit: 0
   461  
   462      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
   463      ##
   464      labelLimit: 0
   465  
   466      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
   467      ##
   468      labelNameLengthLimit: 0
   469  
   470      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
   471      ##
   472      labelValueLengthLimit: 0
   473  
   474      ## proxyUrl: URL of a proxy that should be used for scraping.
   475      ##
   476      proxyUrl: ""
   477  
   478      ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
   479      scheme: ""
   480  
   481      ## enableHttp2: Whether to enable HTTP2.
   482      ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
   483      enableHttp2: true
   484  
   485      ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
   486      ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
   487      tlsConfig: {}
   488  
   489      bearerTokenFile:
   490  
   491      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
   492      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
   493      ##
   494      metricRelabelings: []
   495      # - action: keep
   496      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
   497      #   sourceLabels: [__name__]
   498  
   499      ## RelabelConfigs to apply to samples before scraping
   500      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
   501      ##
   502      relabelings: []
   503      # - sourceLabels: [__meta_kubernetes_pod_node_name]
   504      #   separator: ;
   505      #   regex: ^(.*)$
   506      #   targetLabel: nodename
   507      #   replacement: $1
   508      #   action: replace
   509  
   510    ## Settings affecting alertmanagerSpec
   511    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec
   512    ##
   513    alertmanagerSpec:
   514      ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
   515      ## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
   516      ##
   517      podMetadata: {}
   518  
   519      ## Image of Alertmanager
   520      ##
   521      image:
   522        registry: quay.io
   523        repository: prometheus/alertmanager
   524        tag: v0.25.0
   525        sha: ""
   526  
   527      ## If true then the user will be responsible to provide a secret with alertmanager configuration
   528      ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
   529      ##
   530      useExistingSecret: false
   531  
   532      ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
   533      ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
   534      ##
   535      secrets: []
   536  
   537      ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
   538      ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
   539      ##
   540      configMaps: []
   541  
   542      ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
   543      ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
   544      ##
   545      # configSecret:
   546  
   547      ## WebTLSConfig defines the TLS parameters for HTTPS
   548      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec
   549      web: {}
   550  
   551      ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
   552      ##
   553      alertmanagerConfigSelector: {}
   554      ## Example which selects all alertmanagerConfig resources
   555      ## with label "alertconfig" with values any of "example-config" or "example-config-2"
   556      # alertmanagerConfigSelector:
   557      #   matchExpressions:
   558      #     - key: alertconfig
   559      #       operator: In
   560      #       values:
   561      #         - example-config
   562      #         - example-config-2
   563      #
   564      ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
   565      # alertmanagerConfigSelector:
   566      #   matchLabels:
   567      #     role: example-config
   568  
   569      ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
   570      ##
   571      alertmanagerConfigNamespaceSelector: {}
   572      ## Example which selects all namespaces
   573      ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
   574      # alertmanagerConfigNamespaceSelector:
   575      #   matchExpressions:
   576      #     - key: alertmanagerconfig
   577      #       operator: In
   578      #       values:
   579      #         - example-namespace
   580      #         - example-namespace-2
   581  
   582      ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
   583      # alertmanagerConfigNamespaceSelector:
   584      #   matchLabels:
   585      #     alertmanagerconfig: enabled
   586  
   587      ## AlermanagerConfig to be used as top level configuration
   588      ##
   589      alertmanagerConfiguration: {}
   590      ## Example with select a global alertmanagerconfig
   591      # alertmanagerConfiguration:
   592      #   name: global-alertmanager-Configuration
   593  
   594      ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg:
   595      ##
   596      alertmanagerConfigMatcherStrategy: {}
   597      ## Example with use OnNamespace strategy
   598      # alertmanagerConfigMatcherStrategy:
   599      #   type: OnNamespace
   600  
   601      ## Define Log Format
   602      # Use logfmt (default) or json logging
   603      logFormat: logfmt
   604  
   605      ## Log level for Alertmanager to be configured with.
   606      ##
   607      logLevel: info
   608  
   609      ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
   610      ## running cluster equal to the expected size.
   611      replicas: 1
   612  
   613      ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
   614      ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
   615      ##
   616      retention: 120h
   617  
   618      ## Storage is the definition of how storage will be used by the Alertmanager instances.
   619      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
   620      ##
   621      storage: {}
   622      # volumeClaimTemplate:
   623      #   spec:
   624      #     storageClassName: gluster
   625      #     accessModes: ["ReadWriteOnce"]
   626      #     resources:
   627      #       requests:
   628      #         storage: 50Gi
   629      #     selector: {}
   630  
   631  
   632      ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string  false
   633      ##
   634      externalUrl:
   635  
   636      ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
   637      ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
   638      ##
   639      routePrefix: /
   640  
   641      ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
   642      ##
   643      paused: false
   644  
   645      ## Define which Nodes the Pods are scheduled on.
   646      ## ref: https://kubernetes.io/docs/user-guide/node-selection/
   647      ##
   648      nodeSelector: {}
   649  
   650      ## Define resources requests and limits for single Pods.
   651      ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
   652      ##
   653      resources: {}
   654      # requests:
   655      #   memory: 400Mi
   656  
   657      ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
   658      ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
   659      ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
   660      ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
   661      ##
   662      podAntiAffinity: ""
   663  
   664      ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
   665      ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
   666      ##
   667      podAntiAffinityTopologyKey: kubernetes.io/hostname
   668  
   669      ## Assign custom affinity rules to the alertmanager instance
   670      ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
   671      ##
   672      affinity: {}
   673      # nodeAffinity:
   674      #   requiredDuringSchedulingIgnoredDuringExecution:
   675      #     nodeSelectorTerms:
   676      #     - matchExpressions:
   677      #       - key: kubernetes.io/e2e-az-name
   678      #         operator: In
   679      #         values:
   680      #         - e2e-az1
   681      #         - e2e-az2
   682  
   683      ## If specified, the pod's tolerations.
   684      ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
   685      ##
   686      tolerations: []
   687      # - key: "key"
   688      #   operator: "Equal"
   689      #   value: "value"
   690      #   effect: "NoSchedule"
   691  
   692      ## If specified, the pod's topology spread constraints.
   693      ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
   694      ##
   695      topologySpreadConstraints: []
   696      # - maxSkew: 1
   697      #   topologyKey: topology.kubernetes.io/zone
   698      #   whenUnsatisfiable: DoNotSchedule
   699      #   labelSelector:
   700      #     matchLabels:
   701      #       app: alertmanager
   702  
   703      ## SecurityContext holds pod-level security attributes and common container settings.
   704      ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext  false
   705      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
   706      ##
   707      securityContext:
   708        runAsGroup: 2000
   709        runAsNonRoot: true
   710        runAsUser: 1000
   711        fsGroup: 2000
   712  
   713      ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
   714      ## Note this is only for the Alertmanager UI, not the gossip communication.
   715      ##
   716      listenLocal: false
   717  
   718      ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
   719      ##
   720      containers: []
   721      # containers:
   722      # - name: oauth-proxy
   723      #   image: quay.io/oauth2-proxy/oauth2-proxy:v7.3.0
   724      #   args:
   725      #   - --upstream=http://127.0.0.1:9093
   726      #   - --http-address=0.0.0.0:8081
   727      #   - ...
   728      #   ports:
   729      #   - containerPort: 8081
   730      #     name: oauth-proxy
   731      #     protocol: TCP
   732      #   resources: {}
   733  
   734      # Additional volumes on the output StatefulSet definition.
   735      volumes: []
   736  
   737      # Additional VolumeMounts on the output StatefulSet definition.
   738      volumeMounts: []
   739  
   740      ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
   741      ## (permissions, dir tree) on mounted volumes before starting prometheus
   742      initContainers: []
   743  
   744      ## Priority class assigned to the Pods
   745      ##
   746      priorityClassName: ""
   747  
   748      ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
   749      ##
   750      additionalPeers: []
   751  
   752      ## PortName to use for Alert Manager.
   753      ##
   754      portName: "http-web"
   755  
   756      ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
   757      ##
   758      clusterAdvertiseAddress: false
   759  
   760      ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
   761      ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
   762      forceEnableClusterMode: false
   763  
   764      ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
   765      ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
   766      minReadySeconds: 0
   767  
   768    ## ExtraSecret can be used to store various data in an extra secret
   769    ## (use it for example to store hashed basic auth credentials)
   770    extraSecret:
   771      ## if not set, name will be auto generated
   772      # name: ""
   773      annotations: {}
   774      data: {}
   775    #   auth: |
   776    #     foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
   777    #     someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
   778  
   779  ## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml
   780  ##
   781  grafana:
   782    enabled: true
   783    namespaceOverride: ""
   784  
   785    ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
   786    ##
   787    forceDeployDatasources: false
   788  
   789    ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
   790    ##
   791    forceDeployDashboards: false
   792  
   793    ## Deploy default dashboards
   794    ##
   795    defaultDashboardsEnabled: true
   796  
   797    ## Timezone for the default dashboards
   798    ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
   799    ##
   800    defaultDashboardsTimezone: utc
   801  
   802    adminPassword: prom-operator
   803  
   804    rbac:
   805      ## If true, Grafana PSPs will be created
   806      ##
   807      pspEnabled: false
   808  
   809    ingress:
   810      ## If true, Grafana Ingress will be created
   811      ##
   812      enabled: false
   813  
   814      ## IngressClassName for Grafana Ingress.
   815      ## Should be provided if Ingress is enable.
   816      ##
   817      # ingressClassName: nginx
   818  
   819      ## Annotations for Grafana Ingress
   820      ##
   821      annotations: {}
   822        # kubernetes.io/ingress.class: nginx
   823        # kubernetes.io/tls-acme: "true"
   824  
   825      ## Labels to be added to the Ingress
   826      ##
   827      labels: {}
   828  
   829      ## Hostnames.
   830      ## Must be provided if Ingress is enable.
   831      ##
   832      # hosts:
   833      #   - grafana.domain.com
   834      hosts: []
   835  
   836      ## Path for grafana ingress
   837      path: /
   838  
   839      ## TLS configuration for grafana Ingress
   840      ## Secret must be manually created in the namespace
   841      ##
   842      tls: []
   843      # - secretName: grafana-general-tls
   844      #   hosts:
   845      #   - grafana.example.com
   846  
   847    sidecar:
   848      dashboards:
   849        enabled: true
   850        label: grafana_dashboard
   851        labelValue: "1"
   852        # Allow discovery in all namespaces for dashboards
   853        searchNamespace: ALL
   854  
   855        ## Annotations for Grafana dashboard configmaps
   856        ##
   857        annotations: {}
   858        multicluster:
   859          global:
   860            enabled: false
   861          etcd:
   862            enabled: false
   863        provider:
   864          allowUiUpdates: false
   865      datasources:
   866        enabled: true
   867        defaultDatasourceEnabled: true
   868        isDefaultDatasource: true
   869  
   870        uid: prometheus
   871  
   872        ## URL of prometheus datasource
   873        ##
   874        # url: http://prometheus-stack-prometheus:9090/
   875  
   876        ## Prometheus request timeout in seconds
   877        # timeout: 30
   878  
   879        # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
   880        # defaultDatasourceScrapeInterval: 15s
   881  
   882        ## Annotations for Grafana datasource configmaps
   883        ##
   884        annotations: {}
   885  
   886        ## Set method for HTTP to send query to datasource
   887        httpMethod: POST
   888  
   889        ## Create datasource for each Pod of Prometheus StatefulSet;
   890        ## this uses headless service `prometheus-operated` which is
   891        ## created by Prometheus Operator
   892        ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
   893        createPrometheusReplicasDatasources: false
   894        label: grafana_datasource
   895        labelValue: "1"
   896  
   897        ## Field with internal link pointing to existing data source in Grafana.
   898        ## Can be provisioned via additionalDataSources
   899        exemplarTraceIdDestinations: {}
   900          # datasourceUid: Jaeger
   901          # traceIdLabelName: trace_id
   902  
   903    extraConfigmapMounts: []
   904    # - name: certs-configmap
   905    #   mountPath: /etc/grafana/ssl/
   906    #   configMap: certs-configmap
   907    #   readOnly: true
   908  
   909    deleteDatasources: []
   910    # - name: example-datasource
   911    #   orgId: 1
   912  
   913    ## Configure additional grafana datasources (passed through tpl)
   914    ## ref: http://docs.grafana.org/administration/provisioning/#datasources
   915    additionalDataSources: []
   916    # - name: prometheus-sample
   917    #   access: proxy
   918    #   basicAuth: true
   919    #   basicAuthPassword: pass
   920    #   basicAuthUser: daco
   921    #   editable: false
   922    #   jsonData:
   923    #       tlsSkipVerify: true
   924    #   orgId: 1
   925    #   type: prometheus
   926    #   url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
   927    #   version: 1
   928  
   929    ## Passed to grafana subchart and used by servicemonitor below
   930    ##
   931    service:
   932      portName: http-web
   933  
   934    serviceMonitor:
   935      # If true, a ServiceMonitor CRD is created for a prometheus operator
   936      # https://github.com/coreos/prometheus-operator
   937      #
   938      enabled: true
   939  
   940      # Path to use for scraping metrics. Might be different if server.root_url is set
   941      # in grafana.ini
   942      path: "/metrics"
   943  
   944      #  namespace: monitoring  (defaults to use the namespace this chart is deployed to)
   945  
   946      # labels for the ServiceMonitor
   947      labels: {}
   948  
   949      # Scrape interval. If not set, the Prometheus default scrape interval is used.
   950      #
   951      interval: ""
   952      scheme: http
   953      tlsConfig: {}
   954      scrapeTimeout: 30s
   955  
   956      ## RelabelConfigs to apply to samples before scraping
   957      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
   958      ##
   959      relabelings: []
   960      # - sourceLabels: [__meta_kubernetes_pod_node_name]
   961      #   separator: ;
   962      #   regex: ^(.*)$
   963      #   targetLabel: nodename
   964      #   replacement: $1
   965      #   action: replace
   966  
   967  ## Flag to disable all the kubernetes component scrapers
   968  ##
   969  kubernetesServiceMonitors:
   970    enabled: true
   971  
   972  ## Component scraping the kube api server
   973  ##
   974  kubeApiServer:
   975    enabled: true
   976    tlsConfig:
   977      serverName: kubernetes
   978      insecureSkipVerify: false
   979    serviceMonitor:
   980      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
   981      ##
   982      interval: ""
   983  
   984      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
   985      ##
   986      sampleLimit: 0
   987  
   988      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
   989      ##
   990      targetLimit: 0
   991  
   992      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
   993      ##
   994      labelLimit: 0
   995  
   996      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
   997      ##
   998      labelNameLengthLimit: 0
   999  
  1000      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1001      ##
  1002      labelValueLengthLimit: 0
  1003  
  1004      ## proxyUrl: URL of a proxy that should be used for scraping.
  1005      ##
  1006      proxyUrl: ""
  1007  
  1008      jobLabel: component
  1009      selector:
  1010        matchLabels:
  1011          component: apiserver
  1012          provider: kubernetes
  1013  
  1014      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1015      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1016      ##
  1017      metricRelabelings:
  1018        # Drop excessively noisy apiserver buckets.
  1019        - action: drop
  1020          regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50)
  1021          sourceLabels:
  1022            - __name__
  1023            - le
  1024      # - action: keep
  1025      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1026      #   sourceLabels: [__name__]
  1027  
  1028      ## RelabelConfigs to apply to samples before scraping
  1029      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1030      ##
  1031      relabelings: []
  1032      # - sourceLabels:
  1033      #     - __meta_kubernetes_namespace
  1034      #     - __meta_kubernetes_service_name
  1035      #     - __meta_kubernetes_endpoint_port_name
  1036      #   action: keep
  1037      #   regex: default;kubernetes;https
  1038      # - targetLabel: __address__
  1039      #   replacement: kubernetes.default.svc:443
  1040  
  1041      ## Additional labels
  1042      ##
  1043      additionalLabels: {}
  1044      #  foo: bar
  1045  
  1046  ## Component scraping the kubelet and kubelet-hosted cAdvisor
  1047  ##
  1048  kubelet:
  1049    enabled: true
  1050    namespace: kube-system
  1051  
  1052    serviceMonitor:
  1053      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1054      ##
  1055      interval: ""
  1056  
  1057      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1058      ##
  1059      sampleLimit: 0
  1060  
  1061      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1062      ##
  1063      targetLimit: 0
  1064  
  1065      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1066      ##
  1067      labelLimit: 0
  1068  
  1069      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1070      ##
  1071      labelNameLengthLimit: 0
  1072  
  1073      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1074      ##
  1075      labelValueLengthLimit: 0
  1076  
  1077      ## proxyUrl: URL of a proxy that should be used for scraping.
  1078      ##
  1079      proxyUrl: ""
  1080  
  1081      ## Enable scraping the kubelet over https. For requirements to enable this see
  1082      ## https://github.com/prometheus-operator/prometheus-operator/issues/926
  1083      ##
  1084      https: true
  1085  
  1086      ## Enable scraping /metrics/cadvisor from kubelet's service
  1087      ##
  1088      cAdvisor: true
  1089  
  1090      ## Enable scraping /metrics/probes from kubelet's service
  1091      ##
  1092      probes: true
  1093  
  1094      ## Enable scraping /metrics/resource from kubelet's service
  1095      ## This is disabled by default because container metrics are already exposed by cAdvisor
  1096      ##
  1097      resource: false
  1098      # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
  1099      resourcePath: "/metrics/resource/v1alpha1"
  1100  
  1101      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1102      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1103      ##
  1104      cAdvisorMetricRelabelings:
  1105        # Drop less useful container CPU metrics.
  1106        - sourceLabels: [__name__]
  1107          action: drop
  1108          regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
  1109        # Drop less useful container / always zero filesystem metrics.
  1110        - sourceLabels: [__name__]
  1111          action: drop
  1112          regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
  1113        # Drop less useful / always zero container memory metrics.
  1114        - sourceLabels: [__name__]
  1115          action: drop
  1116          regex: 'container_memory_(mapped_file|swap)'
  1117        # Drop less useful container process metrics.
  1118        - sourceLabels: [__name__]
  1119          action: drop
  1120          regex: 'container_(file_descriptors|tasks_state|threads_max)'
  1121        # Drop container spec metrics that overlap with kube-state-metrics.
  1122        - sourceLabels: [__name__]
  1123          action: drop
  1124          regex: 'container_spec.*'
  1125        # Drop cgroup metrics with no pod.
  1126        - sourceLabels: [id, pod]
  1127          action: drop
  1128          regex: '.+;'
  1129      # - sourceLabels: [__name__, image]
  1130      #   separator: ;
  1131      #   regex: container_([a-z_]+);
  1132      #   replacement: $1
  1133      #   action: drop
  1134      # - sourceLabels: [__name__]
  1135      #   separator: ;
  1136      #   regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
  1137      #   replacement: $1
  1138      #   action: drop
  1139  
  1140      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1141      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1142      ##
  1143      probesMetricRelabelings: []
  1144      # - sourceLabels: [__name__, image]
  1145      #   separator: ;
  1146      #   regex: container_([a-z_]+);
  1147      #   replacement: $1
  1148      #   action: drop
  1149      # - sourceLabels: [__name__]
  1150      #   separator: ;
  1151      #   regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
  1152      #   replacement: $1
  1153      #   action: drop
  1154  
  1155      ## RelabelConfigs to apply to samples before scraping
  1156      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1157      ##
  1158      ## metrics_path is required to match upstream rules and charts
  1159      cAdvisorRelabelings:
  1160        - action: replace
  1161          sourceLabels: [__metrics_path__]
  1162          targetLabel: metrics_path
  1163      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1164      #   separator: ;
  1165      #   regex: ^(.*)$
  1166      #   targetLabel: nodename
  1167      #   replacement: $1
  1168      #   action: replace
  1169  
  1170      ## RelabelConfigs to apply to samples before scraping
  1171      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1172      ##
  1173      probesRelabelings:
  1174        - action: replace
  1175          sourceLabels: [__metrics_path__]
  1176          targetLabel: metrics_path
  1177      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1178      #   separator: ;
  1179      #   regex: ^(.*)$
  1180      #   targetLabel: nodename
  1181      #   replacement: $1
  1182      #   action: replace
  1183  
  1184      ## RelabelConfigs to apply to samples before scraping
  1185      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1186      ##
  1187      resourceRelabelings:
  1188        - action: replace
  1189          sourceLabels: [__metrics_path__]
  1190          targetLabel: metrics_path
  1191      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1192      #   separator: ;
  1193      #   regex: ^(.*)$
  1194      #   targetLabel: nodename
  1195      #   replacement: $1
  1196      #   action: replace
  1197  
  1198      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1199      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1200      ##
  1201      metricRelabelings: []
  1202      # - sourceLabels: [__name__, image]
  1203      #   separator: ;
  1204      #   regex: container_([a-z_]+);
  1205      #   replacement: $1
  1206      #   action: drop
  1207      # - sourceLabels: [__name__]
  1208      #   separator: ;
  1209      #   regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
  1210      #   replacement: $1
  1211      #   action: drop
  1212  
  1213      ## RelabelConfigs to apply to samples before scraping
  1214      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1215      ##
  1216      ## metrics_path is required to match upstream rules and charts
  1217      relabelings:
  1218        - action: replace
  1219          sourceLabels: [__metrics_path__]
  1220          targetLabel: metrics_path
  1221      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1222      #   separator: ;
  1223      #   regex: ^(.*)$
  1224      #   targetLabel: nodename
  1225      #   replacement: $1
  1226      #   action: replace
  1227  
  1228      ## Additional labels
  1229      ##
  1230      additionalLabels: {}
  1231      #  foo: bar
  1232  
  1233  ## Component scraping the kube controller manager
  1234  ##
  1235  kubeControllerManager:
  1236    enabled: true
  1237  
  1238    ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
  1239    ##
  1240    endpoints: []
  1241    # - 10.141.4.22
  1242    # - 10.141.4.23
  1243    # - 10.141.4.24
  1244  
  1245    ## If using kubeControllerManager.endpoints only the port and targetPort are used
  1246    ##
  1247    service:
  1248      enabled: true
  1249      ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
  1250      ## of default port in Kubernetes 1.22.
  1251      ##
  1252      port: null
  1253      targetPort: null
  1254      # selector:
  1255      #   component: kube-controller-manager
  1256  
  1257    serviceMonitor:
  1258      enabled: true
  1259      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1260      ##
  1261      interval: ""
  1262  
  1263      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1264      ##
  1265      sampleLimit: 0
  1266  
  1267      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1268      ##
  1269      targetLimit: 0
  1270  
  1271      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1272      ##
  1273      labelLimit: 0
  1274  
  1275      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1276      ##
  1277      labelNameLengthLimit: 0
  1278  
  1279      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1280      ##
  1281      labelValueLengthLimit: 0
  1282  
  1283      ## proxyUrl: URL of a proxy that should be used for scraping.
  1284      ##
  1285      proxyUrl: ""
  1286  
  1287      ## Enable scraping kube-controller-manager over https.
  1288      ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
  1289      ## If null or unset, the value is determined dynamically based on target Kubernetes version.
  1290      ##
  1291      https: null
  1292  
  1293      # Skip TLS certificate validation when scraping
  1294      insecureSkipVerify: null
  1295  
  1296      # Name of the server to use when validating TLS certificate
  1297      serverName: null
  1298  
  1299      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1300      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1301      ##
  1302      metricRelabelings: []
  1303      # - action: keep
  1304      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1305      #   sourceLabels: [__name__]
  1306  
  1307      ## RelabelConfigs to apply to samples before scraping
  1308      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1309      ##
  1310      relabelings: []
  1311      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1312      #   separator: ;
  1313      #   regex: ^(.*)$
  1314      #   targetLabel: nodename
  1315      #   replacement: $1
  1316      #   action: replace
  1317  
  1318      ## Additional labels
  1319      ##
  1320      additionalLabels: {}
  1321      #  foo: bar
  1322  
  1323  ## Component scraping coreDns. Use either this or kubeDns
  1324  ##
  1325  coreDns:
  1326    enabled: true
  1327    service:
  1328      port: 9153
  1329      targetPort: 9153
  1330      # selector:
  1331      #   k8s-app: kube-dns
  1332    serviceMonitor:
  1333      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1334      ##
  1335      interval: ""
  1336  
  1337      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1338      ##
  1339      sampleLimit: 0
  1340  
  1341      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1342      ##
  1343      targetLimit: 0
  1344  
  1345      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1346      ##
  1347      labelLimit: 0
  1348  
  1349      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1350      ##
  1351      labelNameLengthLimit: 0
  1352  
  1353      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1354      ##
  1355      labelValueLengthLimit: 0
  1356  
  1357      ## proxyUrl: URL of a proxy that should be used for scraping.
  1358      ##
  1359      proxyUrl: ""
  1360  
  1361      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1362      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1363      ##
  1364      metricRelabelings: []
  1365      # - action: keep
  1366      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1367      #   sourceLabels: [__name__]
  1368  
  1369      ## RelabelConfigs to apply to samples before scraping
  1370      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1371      ##
  1372      relabelings: []
  1373      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1374      #   separator: ;
  1375      #   regex: ^(.*)$
  1376      #   targetLabel: nodename
  1377      #   replacement: $1
  1378      #   action: replace
  1379  
  1380      ## Additional labels
  1381      ##
  1382      additionalLabels: {}
  1383      #  foo: bar
  1384  
  1385  ## Component scraping kubeDns. Use either this or coreDns
  1386  ##
  1387  kubeDns:
  1388    enabled: false
  1389    service:
  1390      dnsmasq:
  1391        port: 10054
  1392        targetPort: 10054
  1393      skydns:
  1394        port: 10055
  1395        targetPort: 10055
  1396      # selector:
  1397      #   k8s-app: kube-dns
  1398    serviceMonitor:
  1399      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1400      ##
  1401      interval: ""
  1402  
  1403      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1404      ##
  1405      sampleLimit: 0
  1406  
  1407      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1408      ##
  1409      targetLimit: 0
  1410  
  1411      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1412      ##
  1413      labelLimit: 0
  1414  
  1415      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1416      ##
  1417      labelNameLengthLimit: 0
  1418  
  1419      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1420      ##
  1421      labelValueLengthLimit: 0
  1422  
  1423      ## proxyUrl: URL of a proxy that should be used for scraping.
  1424      ##
  1425      proxyUrl: ""
  1426  
  1427      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1428      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1429      ##
  1430      metricRelabelings: []
  1431      # - action: keep
  1432      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1433      #   sourceLabels: [__name__]
  1434  
  1435      ## RelabelConfigs to apply to samples before scraping
  1436      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1437      ##
  1438      relabelings: []
  1439      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1440      #   separator: ;
  1441      #   regex: ^(.*)$
  1442      #   targetLabel: nodename
  1443      #   replacement: $1
  1444      #   action: replace
  1445  
  1446      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1447      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1448      ##
  1449      dnsmasqMetricRelabelings: []
  1450      # - action: keep
  1451      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1452      #   sourceLabels: [__name__]
  1453  
  1454      ## RelabelConfigs to apply to samples before scraping
  1455      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1456      ##
  1457      dnsmasqRelabelings: []
  1458      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1459      #   separator: ;
  1460      #   regex: ^(.*)$
  1461      #   targetLabel: nodename
  1462      #   replacement: $1
  1463      #   action: replace
  1464  
  1465      ## Additional labels
  1466      ##
  1467      additionalLabels: {}
  1468      #  foo: bar
  1469  
  1470  ## Component scraping etcd
  1471  ##
  1472  kubeEtcd:
  1473    enabled: true
  1474  
  1475    ## If your etcd is not deployed as a pod, specify IPs it can be found on
  1476    ##
  1477    endpoints: []
  1478    # - 10.141.4.22
  1479    # - 10.141.4.23
  1480    # - 10.141.4.24
  1481  
  1482    ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
  1483    ##
  1484    service:
  1485      enabled: true
  1486      port: 2381
  1487      targetPort: 2381
  1488      # selector:
  1489      #   component: etcd
  1490  
  1491    ## Configure secure access to the etcd cluster by loading a secret into prometheus and
  1492    ## specifying security configuration below. For example, with a secret named etcd-client-cert
  1493    ##
  1494    ## serviceMonitor:
  1495    ##   scheme: https
  1496    ##   insecureSkipVerify: false
  1497    ##   serverName: localhost
  1498    ##   caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
  1499    ##   certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
  1500    ##   keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
  1501    ##
  1502    serviceMonitor:
  1503      enabled: true
  1504      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1505      ##
  1506      interval: ""
  1507  
  1508      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1509      ##
  1510      sampleLimit: 0
  1511  
  1512      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1513      ##
  1514      targetLimit: 0
  1515  
  1516      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1517      ##
  1518      labelLimit: 0
  1519  
  1520      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1521      ##
  1522      labelNameLengthLimit: 0
  1523  
  1524      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1525      ##
  1526      labelValueLengthLimit: 0
  1527  
  1528      ## proxyUrl: URL of a proxy that should be used for scraping.
  1529      ##
  1530      proxyUrl: ""
  1531      scheme: http
  1532      insecureSkipVerify: false
  1533      serverName: ""
  1534      caFile: ""
  1535      certFile: ""
  1536      keyFile: ""
  1537  
  1538      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1539      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1540      ##
  1541      metricRelabelings: []
  1542      # - action: keep
  1543      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1544      #   sourceLabels: [__name__]
  1545  
  1546      ## RelabelConfigs to apply to samples before scraping
  1547      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1548      ##
  1549      relabelings: []
  1550      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1551      #   separator: ;
  1552      #   regex: ^(.*)$
  1553      #   targetLabel: nodename
  1554      #   replacement: $1
  1555      #   action: replace
  1556  
  1557      ## Additional labels
  1558      ##
  1559      additionalLabels: {}
  1560      #  foo: bar
  1561  
  1562  ## Component scraping kube scheduler
  1563  ##
  1564  kubeScheduler:
  1565    enabled: true
  1566  
  1567    ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
  1568    ##
  1569    endpoints: []
  1570    # - 10.141.4.22
  1571    # - 10.141.4.23
  1572    # - 10.141.4.24
  1573  
  1574    ## If using kubeScheduler.endpoints only the port and targetPort are used
  1575    ##
  1576    service:
  1577      enabled: true
  1578      ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
  1579      ## of default port in Kubernetes 1.23.
  1580      ##
  1581      port: null
  1582      targetPort: null
  1583      # selector:
  1584      #   component: kube-scheduler
  1585  
  1586    serviceMonitor:
  1587      enabled: true
  1588      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1589      ##
  1590      interval: ""
  1591  
  1592      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1593      ##
  1594      sampleLimit: 0
  1595  
  1596      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1597      ##
  1598      targetLimit: 0
  1599  
  1600      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1601      ##
  1602      labelLimit: 0
  1603  
  1604      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1605      ##
  1606      labelNameLengthLimit: 0
  1607  
  1608      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1609      ##
  1610      labelValueLengthLimit: 0
  1611  
  1612      ## proxyUrl: URL of a proxy that should be used for scraping.
  1613      ##
  1614      proxyUrl: ""
  1615      ## Enable scraping kube-scheduler over https.
  1616      ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
  1617      ## If null or unset, the value is determined dynamically based on target Kubernetes version.
  1618      ##
  1619      https: null
  1620  
  1621      ## Skip TLS certificate validation when scraping
  1622      insecureSkipVerify: null
  1623  
  1624      ## Name of the server to use when validating TLS certificate
  1625      serverName: null
  1626  
  1627      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1628      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1629      ##
  1630      metricRelabelings: []
  1631      # - action: keep
  1632      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1633      #   sourceLabels: [__name__]
  1634  
  1635      ## RelabelConfigs to apply to samples before scraping
  1636      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1637      ##
  1638      relabelings: []
  1639      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1640      #   separator: ;
  1641      #   regex: ^(.*)$
  1642      #   targetLabel: nodename
  1643      #   replacement: $1
  1644      #   action: replace
  1645  
  1646      ## Additional labels
  1647      ##
  1648      additionalLabels: {}
  1649      #  foo: bar
  1650  
  1651  ## Component scraping kube proxy
  1652  ##
  1653  kubeProxy:
  1654    enabled: true
  1655  
  1656    ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
  1657    ##
  1658    endpoints: []
  1659    # - 10.141.4.22
  1660    # - 10.141.4.23
  1661    # - 10.141.4.24
  1662  
  1663    service:
  1664      enabled: true
  1665      port: 10249
  1666      targetPort: 10249
  1667      # selector:
  1668      #   k8s-app: kube-proxy
  1669  
  1670    serviceMonitor:
  1671      enabled: true
  1672      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1673      ##
  1674      interval: ""
  1675  
  1676      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1677      ##
  1678      sampleLimit: 0
  1679  
  1680      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1681      ##
  1682      targetLimit: 0
  1683  
  1684      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1685      ##
  1686      labelLimit: 0
  1687  
  1688      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1689      ##
  1690      labelNameLengthLimit: 0
  1691  
  1692      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1693      ##
  1694      labelValueLengthLimit: 0
  1695  
  1696      ## proxyUrl: URL of a proxy that should be used for scraping.
  1697      ##
  1698      proxyUrl: ""
  1699  
  1700      ## Enable scraping kube-proxy over https.
  1701      ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
  1702      ##
  1703      https: false
  1704  
  1705      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1706      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1707      ##
  1708      metricRelabelings: []
  1709      # - action: keep
  1710      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1711      #   sourceLabels: [__name__]
  1712  
  1713      ## RelabelConfigs to apply to samples before scraping
  1714      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1715      ##
  1716      relabelings: []
  1717      # - action: keep
  1718      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1719      #   sourceLabels: [__name__]
  1720  
  1721      ## Additional labels
  1722      ##
  1723      additionalLabels: {}
  1724      #  foo: bar
  1725  
  1726  ## Component scraping kube state metrics
  1727  ##
  1728  kubeStateMetrics:
  1729    enabled: true
  1730  
  1731  ## Configuration for kube-state-metrics subchart
  1732  ##
  1733  kube-state-metrics:
  1734    namespaceOverride: ""
  1735    rbac:
  1736      create: true
  1737    releaseLabel: true
  1738    prometheus:
  1739      monitor:
  1740        enabled: true
  1741  
  1742        ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1743        ##
  1744        interval: ""
  1745  
  1746        ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1747        ##
  1748        sampleLimit: 0
  1749  
  1750        ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1751        ##
  1752        targetLimit: 0
  1753  
  1754        ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1755        ##
  1756        labelLimit: 0
  1757  
  1758        ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1759        ##
  1760        labelNameLengthLimit: 0
  1761  
  1762        ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1763        ##
  1764        labelValueLengthLimit: 0
  1765  
  1766        ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used.
  1767        ##
  1768        scrapeTimeout: ""
  1769  
  1770        ## proxyUrl: URL of a proxy that should be used for scraping.
  1771        ##
  1772        proxyUrl: ""
  1773  
  1774        # Keep labels from scraped data, overriding server-side labels
  1775        ##
  1776        honorLabels: true
  1777  
  1778        ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1779        ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1780        ##
  1781        metricRelabelings: []
  1782        # - action: keep
  1783        #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  1784        #   sourceLabels: [__name__]
  1785  
  1786        ## RelabelConfigs to apply to samples before scraping
  1787        ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1788        ##
  1789        relabelings: []
  1790        # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1791        #   separator: ;
  1792        #   regex: ^(.*)$
  1793        #   targetLabel: nodename
  1794        #   replacement: $1
  1795        #   action: replace
  1796  
  1797    selfMonitor:
  1798      enabled: false
  1799  
  1800  ## Deploy node exporter as a daemonset to all nodes
  1801  ##
  1802  nodeExporter:
  1803    enabled: true
  1804  
  1805  ## Configuration for prometheus-node-exporter subchart
  1806  ##
  1807  prometheus-node-exporter:
  1808    namespaceOverride: ""
  1809    podLabels:
  1810      ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards
  1811      ##
  1812      jobLabel: node-exporter
  1813    releaseLabel: true
  1814    extraArgs:
  1815      - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
  1816      - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
  1817    service:
  1818      portName: http-metrics
  1819    prometheus:
  1820      monitor:
  1821        enabled: true
  1822  
  1823        jobLabel: jobLabel
  1824  
  1825        ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  1826        ##
  1827        interval: ""
  1828  
  1829        ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  1830        ##
  1831        sampleLimit: 0
  1832  
  1833        ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  1834        ##
  1835        targetLimit: 0
  1836  
  1837        ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1838        ##
  1839        labelLimit: 0
  1840  
  1841        ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1842        ##
  1843        labelNameLengthLimit: 0
  1844  
  1845        ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  1846        ##
  1847        labelValueLengthLimit: 0
  1848  
  1849        ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
  1850        ##
  1851        scrapeTimeout: ""
  1852  
  1853        ## proxyUrl: URL of a proxy that should be used for scraping.
  1854        ##
  1855        proxyUrl: ""
  1856  
  1857        ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  1858        ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1859        ##
  1860        metricRelabelings: []
  1861        # - sourceLabels: [__name__]
  1862        #   separator: ;
  1863        #   regex: ^node_mountstats_nfs_(event|operations|transport)_.+
  1864        #   replacement: $1
  1865        #   action: drop
  1866  
  1867        ## RelabelConfigs to apply to samples before scraping
  1868        ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  1869        ##
  1870        relabelings: []
  1871        # - sourceLabels: [__meta_kubernetes_pod_node_name]
  1872        #   separator: ;
  1873        #   regex: ^(.*)$
  1874        #   targetLabel: nodename
  1875        #   replacement: $1
  1876        #   action: replace
  1877    rbac:
  1878      ## If true, create PSPs for node-exporter
  1879      ##
  1880      pspEnabled: false
  1881  
  1882  ## Manages Prometheus and Alertmanager components
  1883  ##
  1884  prometheusOperator:
  1885    enabled: true
  1886  
  1887    ## Prometheus-Operator v0.39.0 and later support TLS natively.
  1888    ##
  1889    tls:
  1890      enabled: true
  1891      # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants
  1892      tlsMinVersion: VersionTLS13
  1893      # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
  1894      internalPort: 10250
  1895  
  1896    ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
  1897    ## rules from making their way into prometheus and potentially preventing the container from starting
  1898    admissionWebhooks:
  1899      failurePolicy:
  1900      ## The default timeoutSeconds is 10 and the maximum value is 30.
  1901      timeoutSeconds: 10
  1902      enabled: true
  1903      ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
  1904      ## If unspecified, system trust roots on the apiserver are used.
  1905      caBundle: ""
  1906      ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
  1907      ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
  1908      ## certs ahead of time if you wish.
  1909      ##
  1910      annotations: {}
  1911      #   argocd.argoproj.io/hook: PreSync
  1912      #   argocd.argoproj.io/hook-delete-policy: HookSucceeded
  1913      patch:
  1914        enabled: true
  1915        image:
  1916          registry: registry.k8s.io
  1917          repository: ingress-nginx/kube-webhook-certgen
  1918          tag: v20221220-controller-v1.5.1-58-g787ea74b6
  1919          sha: ""
  1920          pullPolicy: IfNotPresent
  1921        resources: {}
  1922        ## Provide a priority class name to the webhook patching job
  1923        ##
  1924        priorityClassName: ""
  1925        annotations: {}
  1926        #   argocd.argoproj.io/hook: PreSync
  1927        #   argocd.argoproj.io/hook-delete-policy: HookSucceeded
  1928        podAnnotations: {}
  1929        nodeSelector: {}
  1930        affinity: {}
  1931        tolerations: []
  1932  
  1933        ## SecurityContext holds pod-level security attributes and common container settings.
  1934        ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext  false
  1935        ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
  1936        ##
  1937        securityContext:
  1938          runAsGroup: 2000
  1939          runAsNonRoot: true
  1940          runAsUser: 2000
  1941  
  1942      # Security context for create job container
  1943      createSecretJob:
  1944        securityContext: {}
  1945  
  1946        # Security context for patch job container
  1947      patchWebhookJob:
  1948        securityContext: {}
  1949  
  1950      # Use certmanager to generate webhook certs
  1951      certManager:
  1952        enabled: false
  1953        # self-signed root certificate
  1954        rootCert:
  1955          duration: ""  # default to be 5y
  1956        admissionCert:
  1957          duration: ""  # default to be 1y
  1958        # issuerRef:
  1959        #   name: "issuer"
  1960        #   kind: "ClusterIssuer"
  1961  
  1962    ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
  1963    ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
  1964    ##
  1965    namespaces: {}
  1966      # releaseNamespace: true
  1967      # additional:
  1968      # - kube-system
  1969  
  1970    ## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
  1971    ##
  1972    denyNamespaces: []
  1973  
  1974    ## Filter namespaces to look for prometheus-operator custom resources
  1975    ##
  1976    alertmanagerInstanceNamespaces: []
  1977    alertmanagerConfigNamespaces: []
  1978    prometheusInstanceNamespaces: []
  1979    thanosRulerInstanceNamespaces: []
  1980  
  1981    ## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
  1982    ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
  1983    ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
  1984    ##
  1985    # clusterDomain: "cluster.local"
  1986  
  1987    networkPolicy:
  1988      ## Enable creation of NetworkPolicy resources.
  1989      ##
  1990      enabled: false
  1991  
  1992      ## Flavor of the network policy to use.
  1993      #  Can be:
  1994      #  * kubernetes for networking.k8s.io/v1/NetworkPolicy
  1995      #  * cilium     for cilium.io/v2/CiliumNetworkPolicy
  1996      flavor: kubernetes
  1997  
  1998      # cilium:
  1999      #   egress:
  2000  
  2001    ## Service account for Alertmanager to use.
  2002    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
  2003    ##
  2004    serviceAccount:
  2005      create: true
  2006      name: ""
  2007  
  2008    ## Configuration for Prometheus operator service
  2009    ##
  2010    service:
  2011      annotations: {}
  2012      labels: {}
  2013      clusterIP: ""
  2014  
  2015    ## Port to expose on each node
  2016    ## Only used if service.type is 'NodePort'
  2017    ##
  2018      nodePort: 30080
  2019  
  2020      nodePortTls: 30443
  2021  
  2022    ## Additional ports to open for Prometheus service
  2023    ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
  2024    ##
  2025      additionalPorts: []
  2026  
  2027    ## Loadbalancer IP
  2028    ## Only use if service.type is "LoadBalancer"
  2029    ##
  2030      loadBalancerIP: ""
  2031      loadBalancerSourceRanges: []
  2032  
  2033      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
  2034      ##
  2035      externalTrafficPolicy: Cluster
  2036  
  2037    ## Service type
  2038    ## NodePort, ClusterIP, LoadBalancer
  2039    ##
  2040      type: ClusterIP
  2041  
  2042      ## List of IP addresses at which the Prometheus server service is available
  2043      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
  2044      ##
  2045      externalIPs: []
  2046  
  2047    # ## Labels to add to the operator deployment
  2048    # ##
  2049    labels: {}
  2050  
  2051    ## Annotations to add to the operator deployment
  2052    ##
  2053    annotations: {}
  2054  
  2055    ## Labels to add to the operator pod
  2056    ##
  2057    podLabels: {}
  2058  
  2059    ## Annotations to add to the operator pod
  2060    ##
  2061    podAnnotations: {}
  2062  
  2063    ## Assign a PriorityClassName to pods if set
  2064    # priorityClassName: ""
  2065  
  2066    ## Define Log Format
  2067    # Use logfmt (default) or json logging
  2068    # logFormat: logfmt
  2069  
  2070    ## Decrease log verbosity to errors only
  2071    # logLevel: error
  2072  
  2073    ## If true, the operator will create and maintain a service for scraping kubelets
  2074    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
  2075    ##
  2076    kubeletService:
  2077      enabled: true
  2078      namespace: kube-system
  2079      ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default
  2080      name: ""
  2081  
  2082    ## Create a servicemonitor for the operator
  2083    ##
  2084    serviceMonitor:
  2085      ## Labels for ServiceMonitor
  2086      additionalLabels: {}
  2087  
  2088      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  2089      ##
  2090      interval: ""
  2091  
  2092      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  2093      ##
  2094      sampleLimit: 0
  2095  
  2096      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  2097      ##
  2098      targetLimit: 0
  2099  
  2100      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  2101      ##
  2102      labelLimit: 0
  2103  
  2104      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  2105      ##
  2106      labelNameLengthLimit: 0
  2107  
  2108      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  2109      ##
  2110      labelValueLengthLimit: 0
  2111  
  2112      ## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
  2113      scrapeTimeout: ""
  2114      selfMonitor: true
  2115  
  2116      ## Metric relabel configs to apply to samples before ingestion.
  2117      ##
  2118      metricRelabelings: []
  2119      # - action: keep
  2120      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  2121      #   sourceLabels: [__name__]
  2122  
  2123      #   relabel configs to apply to samples before ingestion.
  2124      ##
  2125      relabelings: []
  2126      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  2127      #   separator: ;
  2128      #   regex: ^(.*)$
  2129      #   targetLabel: nodename
  2130      #   replacement: $1
  2131      #   action: replace
  2132  
  2133    ## Resource limits & requests
  2134    ##
  2135    resources: {}
  2136    # limits:
  2137    #   cpu: 200m
  2138    #   memory: 200Mi
  2139    # requests:
  2140    #   cpu: 100m
  2141    #   memory: 100Mi
  2142  
  2143    # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
  2144    # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
  2145    ##
  2146    hostNetwork: false
  2147  
  2148    ## Define which Nodes the Pods are scheduled on.
  2149    ## ref: https://kubernetes.io/docs/user-guide/node-selection/
  2150    ##
  2151    nodeSelector: {}
  2152  
  2153    ## Tolerations for use with node taints
  2154    ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
  2155    ##
  2156    tolerations: []
  2157    # - key: "key"
  2158    #   operator: "Equal"
  2159    #   value: "value"
  2160    #   effect: "NoSchedule"
  2161  
  2162    ## Assign custom affinity rules to the prometheus operator
  2163    ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  2164    ##
  2165    affinity: {}
  2166      # nodeAffinity:
  2167      #   requiredDuringSchedulingIgnoredDuringExecution:
  2168      #     nodeSelectorTerms:
  2169      #     - matchExpressions:
  2170      #       - key: kubernetes.io/e2e-az-name
  2171      #         operator: In
  2172      #         values:
  2173      #         - e2e-az1
  2174      #         - e2e-az2
  2175    dnsConfig: {}
  2176      # nameservers:
  2177      #   - 1.2.3.4
  2178      # searches:
  2179      #   - ns1.svc.cluster-domain.example
  2180      #   - my.dns.search.suffix
  2181      # options:
  2182      #   - name: ndots
  2183      #     value: "2"
  2184    #   - name: edns0
  2185    securityContext:
  2186      fsGroup: 65534
  2187      runAsGroup: 65534
  2188      runAsNonRoot: true
  2189      runAsUser: 65534
  2190  
  2191    ## Container-specific security context configuration
  2192    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
  2193    ##
  2194    containerSecurityContext:
  2195      allowPrivilegeEscalation: false
  2196      readOnlyRootFilesystem: true
  2197  
  2198    # Enable vertical pod autoscaler support for prometheus-operator
  2199    verticalPodAutoscaler:
  2200      enabled: false
  2201      # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
  2202      controlledResources: []
  2203  
  2204      # Define the max allowed resources for the pod
  2205      maxAllowed: {}
  2206      # cpu: 200m
  2207      # memory: 100Mi
  2208      # Define the min allowed resources for the pod
  2209      minAllowed: {}
  2210      # cpu: 200m
  2211      # memory: 100Mi
  2212  
  2213      updatePolicy:
  2214        # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
  2215        # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
  2216        updateMode: Auto
  2217  
  2218    ## Prometheus-operator image
  2219    ##
  2220    image:
  2221      registry: quay.io
  2222      repository: prometheus-operator/prometheus-operator
  2223      # if not set appVersion field from Chart.yaml is used
  2224      tag: ""
  2225      sha: ""
  2226      pullPolicy: IfNotPresent
  2227  
  2228    ## Prometheus image to use for prometheuses managed by the operator
  2229    ##
  2230    # prometheusDefaultBaseImage: prometheus/prometheus
  2231  
  2232    ## Prometheus image registry to use for prometheuses managed by the operator
  2233    ##
  2234    # prometheusDefaultBaseImageRegistry: quay.io
  2235  
  2236    ## Alertmanager image to use for alertmanagers managed by the operator
  2237    ##
  2238    # alertmanagerDefaultBaseImage: prometheus/alertmanager
  2239  
  2240    ## Alertmanager image registry to use for alertmanagers managed by the operator
  2241    ##
  2242    # alertmanagerDefaultBaseImageRegistry: quay.io
  2243  
  2244    ## Prometheus-config-reloader
  2245    ##
  2246    prometheusConfigReloader:
  2247      image:
  2248        registry: quay.io
  2249        repository: prometheus-operator/prometheus-config-reloader
  2250        # if not set appVersion field from Chart.yaml is used
  2251        tag: ""
  2252        sha: ""
  2253  
  2254      # resource config for prometheusConfigReloader
  2255      resources:
  2256        requests:
  2257          cpu: 200m
  2258          memory: 50Mi
  2259        limits:
  2260          cpu: 200m
  2261          memory: 50Mi
  2262  
  2263    ## Thanos side-car image when configured
  2264    ##
  2265    thanosImage:
  2266      registry: quay.io
  2267      repository: thanos/thanos
  2268      tag: v0.30.2
  2269      sha: ""
  2270  
  2271    ## Set a Label Selector to filter watched prometheus and prometheusAgent
  2272    ##
  2273    prometheusInstanceSelector: ""
  2274  
  2275    ## Set a Label Selector to filter watched alertmanager
  2276    ##
  2277    alertmanagerInstanceSelector: ""
  2278  
  2279    ## Set a Label Selector to filter watched thanosRuler
  2280    thanosRulerInstanceSelector: ""
  2281  
  2282    ## Set a Field Selector to filter watched secrets
  2283    ##
  2284    secretFieldSelector: ""
  2285  
  2286  ## Deploy a Prometheus instance
  2287  ##
  2288  prometheus:
  2289    enabled: true
  2290  
  2291    ## Annotations for Prometheus
  2292    ##
  2293    annotations: {}
  2294  
  2295    ## Configure network policy for the prometheus
  2296    networkPolicy:
  2297      enabled: false
  2298  
  2299      ## Flavor of the network policy to use.
  2300      #  Can be:
  2301      #  * kubernetes for networking.k8s.io/v1/NetworkPolicy
  2302      #  * cilium     for cilium.io/v2/CiliumNetworkPolicy
  2303      flavor: kubernetes
  2304  
  2305      # cilium:
  2306      #   endpointSelector:
  2307      #   egress:
  2308      #   ingress:
  2309  
  2310      # egress:
  2311      # - {}
  2312      # ingress:
  2313      # - {}
  2314      # podSelector:
  2315      #   matchLabels:
  2316      #     app: prometheus
  2317  
  2318    ## Service account for Prometheuses to use.
  2319    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
  2320    ##
  2321    serviceAccount:
  2322      create: true
  2323      name: ""
  2324      annotations: {}
  2325  
  2326    # Service for thanos service discovery on sidecar
  2327    # Enable this can make Thanos Query can use
  2328    # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
  2329    # Thanos sidecar on prometheus nodes
  2330    # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
  2331    thanosService:
  2332      enabled: false
  2333      annotations: {}
  2334      labels: {}
  2335  
  2336      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
  2337      ##
  2338      externalTrafficPolicy: Cluster
  2339  
  2340      ## Service type
  2341      ##
  2342      type: ClusterIP
  2343  
  2344      ## gRPC port config
  2345      portName: grpc
  2346      port: 10901
  2347      targetPort: "grpc"
  2348  
  2349      ## HTTP port config (for metrics)
  2350      httpPortName: http
  2351      httpPort: 10902
  2352      targetHttpPort: "http"
  2353  
  2354      ## ClusterIP to assign
  2355      # Default is to make this a headless service ("None")
  2356      clusterIP: "None"
  2357  
  2358      ## Port to expose on each node, if service type is NodePort
  2359      ##
  2360      nodePort: 30901
  2361      httpNodePort: 30902
  2362  
  2363    # ServiceMonitor to scrape Sidecar metrics
  2364    # Needs thanosService to be enabled as well
  2365    thanosServiceMonitor:
  2366      enabled: false
  2367      interval: ""
  2368  
  2369      ## Additional labels
  2370      ##
  2371      additionalLabels: {}
  2372  
  2373      ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
  2374      scheme: ""
  2375  
  2376      ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
  2377      ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
  2378      tlsConfig: {}
  2379  
  2380      bearerTokenFile:
  2381  
  2382      ## Metric relabel configs to apply to samples before ingestion.
  2383      metricRelabelings: []
  2384  
  2385      ## relabel configs to apply to samples before ingestion.
  2386      relabelings: []
  2387  
  2388    # Service for external access to sidecar
  2389    # Enabling this creates a service to expose thanos-sidecar outside the cluster.
  2390    thanosServiceExternal:
  2391      enabled: false
  2392      annotations: {}
  2393      labels: {}
  2394      loadBalancerIP: ""
  2395      loadBalancerSourceRanges: []
  2396  
  2397      ## gRPC port config
  2398      portName: grpc
  2399      port: 10901
  2400      targetPort: "grpc"
  2401  
  2402      ## HTTP port config (for metrics)
  2403      httpPortName: http
  2404      httpPort: 10902
  2405      targetHttpPort: "http"
  2406  
  2407      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
  2408      ##
  2409      externalTrafficPolicy: Cluster
  2410  
  2411      ## Service type
  2412      ##
  2413      type: LoadBalancer
  2414  
  2415      ## Port to expose on each node
  2416      ##
  2417      nodePort: 30901
  2418      httpNodePort: 30902
  2419  
  2420    ## Thanos enum for picking the integration type
  2421    ## the integration types are disabled and sidecar
  2422    ##
  2423    thanos:
  2424      integration: disabled
  2425  
  2426    ## Configuration for Prometheus service
  2427    ##
  2428    service:
  2429      annotations: {}
  2430      labels: {}
  2431      clusterIP: ""
  2432  
  2433      ## Port for Prometheus Service to listen on
  2434      ##
  2435      port: 9090
  2436  
  2437      ## To be used with a proxy extraContainer port
  2438      targetPort: 9090
  2439  
  2440      ## List of IP addresses at which the Prometheus server service is available
  2441      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
  2442      ##
  2443      externalIPs: []
  2444  
  2445      ## Port to expose on each node
  2446      ## Only used if service.type is 'NodePort'
  2447      ##
  2448      nodePort: 30090
  2449  
  2450      ## Loadbalancer IP
  2451      ## Only use if service.type is "LoadBalancer"
  2452      loadBalancerIP: ""
  2453      loadBalancerSourceRanges: []
  2454  
  2455      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
  2456      ##
  2457      externalTrafficPolicy: Cluster
  2458  
  2459      ## Service type
  2460      ##
  2461      type: ClusterIP
  2462  
  2463      ## Additional port to define in the Service
  2464      additionalPorts: []
  2465      # additionalPorts:
  2466      # - name: authenticated
  2467      #   port: 8081
  2468      #   targetPort: 8081
  2469  
  2470      ## Consider that all endpoints are considered "ready" even if the Pods themselves are not
  2471      ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
  2472      publishNotReadyAddresses: false
  2473  
  2474      sessionAffinity: ""
  2475  
  2476    ## Configuration for creating a separate Service for each statefulset Prometheus replica
  2477    ##
  2478    servicePerReplica:
  2479      enabled: false
  2480      annotations: {}
  2481  
  2482      ## Port for Prometheus Service per replica to listen on
  2483      ##
  2484      port: 9090
  2485  
  2486      ## To be used with a proxy extraContainer port
  2487      targetPort: 9090
  2488  
  2489      ## Port to expose on each node
  2490      ## Only used if servicePerReplica.type is 'NodePort'
  2491      ##
  2492      nodePort: 30091
  2493  
  2494      ## Loadbalancer source IP ranges
  2495      ## Only used if servicePerReplica.type is "LoadBalancer"
  2496      loadBalancerSourceRanges: []
  2497  
  2498      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
  2499      ##
  2500      externalTrafficPolicy: Cluster
  2501  
  2502      ## Service type
  2503      ##
  2504      type: ClusterIP
  2505  
  2506    ## Configure pod disruption budgets for Prometheus
  2507    ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
  2508    ## This configuration is immutable once created and will require the PDB to be deleted to be changed
  2509    ## https://github.com/kubernetes/kubernetes/issues/45398
  2510    ##
  2511    podDisruptionBudget:
  2512      enabled: false
  2513      minAvailable: 1
  2514      maxUnavailable: ""
  2515  
  2516    # Ingress exposes thanos sidecar outside the cluster
  2517    thanosIngress:
  2518      enabled: false
  2519  
  2520      # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
  2521      # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
  2522      # ingressClassName: nginx
  2523  
  2524      annotations: {}
  2525      labels: {}
  2526      servicePort: 10901
  2527  
  2528      ## Port to expose on each node
  2529      ## Only used if service.type is 'NodePort'
  2530      ##
  2531      nodePort: 30901
  2532  
  2533      ## Hosts must be provided if Ingress is enabled.
  2534      ##
  2535      hosts: []
  2536        # - thanos-gateway.domain.com
  2537  
  2538      ## Paths to use for ingress rules
  2539      ##
  2540      paths: []
  2541      # - /
  2542  
  2543      ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
  2544      ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
  2545      # pathType: ImplementationSpecific
  2546  
  2547      ## TLS configuration for Thanos Ingress
  2548      ## Secret must be manually created in the namespace
  2549      ##
  2550      tls: []
  2551      # - secretName: thanos-gateway-tls
  2552      #   hosts:
  2553      #   - thanos-gateway.domain.com
  2554      #
  2555  
  2556    ## ExtraSecret can be used to store various data in an extra secret
  2557    ## (use it for example to store hashed basic auth credentials)
  2558    extraSecret:
  2559      ## if not set, name will be auto generated
  2560      # name: ""
  2561      annotations: {}
  2562      data: {}
  2563    #   auth: |
  2564    #     foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
  2565    #     someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
  2566  
  2567    ingress:
  2568      enabled: false
  2569  
  2570      # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
  2571      # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
  2572      # ingressClassName: nginx
  2573  
  2574      annotations: {}
  2575      labels: {}
  2576  
  2577      ## Redirect ingress to an additional defined port on the service
  2578      # servicePort: 8081
  2579  
  2580      ## Hostnames.
  2581      ## Must be provided if Ingress is enabled.
  2582      ##
  2583      # hosts:
  2584      #   - prometheus.domain.com
  2585      hosts: []
  2586  
  2587      ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
  2588      ##
  2589      paths: []
  2590      # - /
  2591  
  2592      ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
  2593      ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
  2594      # pathType: ImplementationSpecific
  2595  
  2596      ## TLS configuration for Prometheus Ingress
  2597      ## Secret must be manually created in the namespace
  2598      ##
  2599      tls: []
  2600        # - secretName: prometheus-general-tls
  2601        #   hosts:
  2602        #     - prometheus.example.com
  2603  
  2604    ## Configuration for creating an Ingress that will map to each Prometheus replica service
  2605    ## prometheus.servicePerReplica must be enabled
  2606    ##
  2607    ingressPerReplica:
  2608      enabled: false
  2609  
  2610      # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
  2611      # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
  2612      # ingressClassName: nginx
  2613  
  2614      annotations: {}
  2615      labels: {}
  2616  
  2617      ## Final form of the hostname for each per replica ingress is
  2618      ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
  2619      ##
  2620      ## Prefix for the per replica ingress that will have `-$replicaNumber`
  2621      ## appended to the end
  2622      hostPrefix: ""
  2623      ## Domain that will be used for the per replica ingress
  2624      hostDomain: ""
  2625  
  2626      ## Paths to use for ingress rules
  2627      ##
  2628      paths: []
  2629      # - /
  2630  
  2631      ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
  2632      ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
  2633      # pathType: ImplementationSpecific
  2634  
  2635      ## Secret name containing the TLS certificate for Prometheus per replica ingress
  2636      ## Secret must be manually created in the namespace
  2637      tlsSecretName: ""
  2638  
  2639      ## Separated secret for each per replica Ingress. Can be used together with cert-manager
  2640      ##
  2641      tlsSecretPerReplica:
  2642        enabled: false
  2643        ## Final form of the secret for each per replica ingress is
  2644        ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
  2645        ##
  2646        prefix: "prometheus"
  2647  
  2648    ## Configure additional options for default pod security policy for Prometheus
  2649    ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
  2650    podSecurityPolicy:
  2651      allowedCapabilities: []
  2652      allowedHostPaths: []
  2653      volumes: []
  2654  
  2655    serviceMonitor:
  2656      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  2657      ##
  2658      interval: ""
  2659      selfMonitor: true
  2660  
  2661      ## Additional labels
  2662      ##
  2663      additionalLabels: {}
  2664  
  2665      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  2666      ##
  2667      sampleLimit: 0
  2668  
  2669      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  2670      ##
  2671      targetLimit: 0
  2672  
  2673      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  2674      ##
  2675      labelLimit: 0
  2676  
  2677      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  2678      ##
  2679      labelNameLengthLimit: 0
  2680  
  2681      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  2682      ##
  2683      labelValueLengthLimit: 0
  2684  
  2685      ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
  2686      scheme: ""
  2687  
  2688      ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
  2689      ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
  2690      tlsConfig: {}
  2691  
  2692      bearerTokenFile:
  2693  
  2694      ## Metric relabel configs to apply to samples before ingestion.
  2695      ##
  2696      metricRelabelings: []
  2697      # - action: keep
  2698      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  2699      #   sourceLabels: [__name__]
  2700  
  2701      #   relabel configs to apply to samples before ingestion.
  2702      ##
  2703      relabelings: []
  2704      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  2705      #   separator: ;
  2706      #   regex: ^(.*)$
  2707      #   targetLabel: nodename
  2708      #   replacement: $1
  2709      #   action: replace
  2710  
  2711    ## Settings affecting prometheusSpec
  2712    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec
  2713    ##
  2714    prometheusSpec:
  2715      ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
  2716      ##
  2717      disableCompaction: false
  2718      ## APIServerConfig
  2719      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig
  2720      ##
  2721      apiserverConfig: {}
  2722  
  2723      ## Allows setting additional arguments for the Prometheus container
  2724      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus
  2725      additionalArgs: []
  2726  
  2727      ## Interval between consecutive scrapes.
  2728      ## Defaults to 30s.
  2729      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
  2730      ##
  2731      scrapeInterval: ""
  2732  
  2733      ## Number of seconds to wait for target to respond before erroring
  2734      ##
  2735      scrapeTimeout: ""
  2736  
  2737      ## Interval between consecutive evaluations.
  2738      ##
  2739      evaluationInterval: ""
  2740  
  2741      ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
  2742      ##
  2743      listenLocal: false
  2744  
  2745      ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
  2746      ## This is disabled by default.
  2747      ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
  2748      ##
  2749      enableAdminAPI: false
  2750  
  2751      ## Sets version of Prometheus overriding the Prometheus version as derived
  2752      ## from the image tag. Useful in cases where the tag does not follow semver v2.
  2753      version: ""
  2754  
  2755      ## WebTLSConfig defines the TLS parameters for HTTPS
  2756      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig
  2757      web: {}
  2758  
  2759      ## Exemplars related settings that are runtime reloadable.
  2760      ## It requires to enable the exemplar storage feature to be effective.
  2761      exemplars: ""
  2762        ## Maximum number of exemplars stored in memory for all series.
  2763        ## If not set, Prometheus uses its default value.
  2764        ## A value of zero or less than zero disables the storage.
  2765        # maxSize: 100000
  2766  
  2767      # EnableFeatures API enables access to Prometheus disabled features.
  2768      # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/
  2769      enableFeatures: []
  2770      # - exemplar-storage
  2771  
  2772      ## Image of Prometheus.
  2773      ##
  2774      image:
  2775        registry: quay.io
  2776        repository: prometheus/prometheus
  2777        tag: v2.42.0
  2778        sha: ""
  2779  
  2780      ## Tolerations for use with node taints
  2781      ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
  2782      ##
  2783      tolerations: []
  2784      #  - key: "key"
  2785      #    operator: "Equal"
  2786      #    value: "value"
  2787      #    effect: "NoSchedule"
  2788  
  2789      ## If specified, the pod's topology spread constraints.
  2790      ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
  2791      ##
  2792      topologySpreadConstraints: []
  2793      # - maxSkew: 1
  2794      #   topologyKey: topology.kubernetes.io/zone
  2795      #   whenUnsatisfiable: DoNotSchedule
  2796      #   labelSelector:
  2797      #     matchLabels:
  2798      #       app: prometheus
  2799  
  2800      ## Alertmanagers to which alerts will be sent
  2801      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints
  2802      ##
  2803      ## Default configuration will connect to the alertmanager deployed as part of this release
  2804      ##
  2805      alertingEndpoints: []
  2806      # - name: ""
  2807      #   namespace: ""
  2808      #   port: http
  2809      #   scheme: http
  2810      #   pathPrefix: ""
  2811      #   tlsConfig: {}
  2812      #   bearerTokenFile: ""
  2813      #   apiVersion: v2
  2814  
  2815      ## External labels to add to any time series or alerts when communicating with external systems
  2816      ##
  2817      externalLabels: {}
  2818  
  2819      ## enable --web.enable-remote-write-receiver flag on prometheus-server
  2820      ##
  2821      enableRemoteWriteReceiver: false
  2822  
  2823      ## Name of the external label used to denote replica name
  2824      ##
  2825      replicaExternalLabelName: ""
  2826  
  2827      ## If true, the Operator won't add the external label used to denote replica name
  2828      ##
  2829      replicaExternalLabelNameClear: false
  2830  
  2831      ## Name of the external label used to denote Prometheus instance name
  2832      ##
  2833      prometheusExternalLabelName: ""
  2834  
  2835      ## If true, the Operator won't add the external label used to denote Prometheus instance name
  2836      ##
  2837      prometheusExternalLabelNameClear: false
  2838  
  2839      ## External URL at which Prometheus will be reachable.
  2840      ##
  2841      externalUrl: ""
  2842  
  2843      ## Define which Nodes the Pods are scheduled on.
  2844      ## ref: https://kubernetes.io/docs/user-guide/node-selection/
  2845      ##
  2846      nodeSelector: {}
  2847  
  2848      ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
  2849      ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
  2850      ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
  2851      ## with the new list of secrets.
  2852      ##
  2853      secrets: []
  2854  
  2855      ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
  2856      ## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
  2857      ##
  2858      configMaps: []
  2859  
  2860      ## QuerySpec defines the query command line flags when starting Prometheus.
  2861      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec
  2862      ##
  2863      query: {}
  2864  
  2865      ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
  2866      ruleNamespaceSelector: {}
  2867      ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel"
  2868      # ruleNamespaceSelector:
  2869      #   matchLabels:
  2870      #     prometheus: somelabel
  2871  
  2872      ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
  2873      ## prometheus resource to be created with selectors based on values in the helm deployment,
  2874      ## which will also match the PrometheusRule resources created
  2875      ##
  2876      ruleSelectorNilUsesHelmValues: true
  2877  
  2878      ## PrometheusRules to be selected for target discovery.
  2879      ## If {}, select all PrometheusRules
  2880      ##
  2881      ruleSelector: {}
  2882      ## Example which select all PrometheusRules resources
  2883      ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
  2884      # ruleSelector:
  2885      #   matchExpressions:
  2886      #     - key: prometheus
  2887      #       operator: In
  2888      #       values:
  2889      #         - example-rules
  2890      #         - example-rules-2
  2891      #
  2892      ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
  2893      # ruleSelector:
  2894      #   matchLabels:
  2895      #     role: example-rules
  2896  
  2897      ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
  2898      ## prometheus resource to be created with selectors based on values in the helm deployment,
  2899      ## which will also match the servicemonitors created
  2900      ##
  2901      serviceMonitorSelectorNilUsesHelmValues: true
  2902  
  2903      ## ServiceMonitors to be selected for target discovery.
  2904      ## If {}, select all ServiceMonitors
  2905      ##
  2906      serviceMonitorSelector: {}
  2907      ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
  2908      # serviceMonitorSelector:
  2909      #   matchLabels:
  2910      #     prometheus: somelabel
  2911  
  2912      ## Namespaces to be selected for ServiceMonitor discovery.
  2913      ##
  2914      serviceMonitorNamespaceSelector: {}
  2915      ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
  2916      # serviceMonitorNamespaceSelector:
  2917      #   matchLabels:
  2918      #     prometheus: somelabel
  2919  
  2920      ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
  2921      ## prometheus resource to be created with selectors based on values in the helm deployment,
  2922      ## which will also match the podmonitors created
  2923      ##
  2924      podMonitorSelectorNilUsesHelmValues: true
  2925  
  2926      ## PodMonitors to be selected for target discovery.
  2927      ## If {}, select all PodMonitors
  2928      ##
  2929      podMonitorSelector: {}
  2930      ## Example which selects PodMonitors with label "prometheus" set to "somelabel"
  2931      # podMonitorSelector:
  2932      #   matchLabels:
  2933      #     prometheus: somelabel
  2934  
  2935      ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
  2936      podMonitorNamespaceSelector: {}
  2937      ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel"
  2938      # podMonitorNamespaceSelector:
  2939      #   matchLabels:
  2940      #     prometheus: somelabel
  2941  
  2942      ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
  2943      ## prometheus resource to be created with selectors based on values in the helm deployment,
  2944      ## which will also match the probes created
  2945      ##
  2946      probeSelectorNilUsesHelmValues: true
  2947  
  2948      ## Probes to be selected for target discovery.
  2949      ## If {}, select all Probes
  2950      ##
  2951      probeSelector: {}
  2952      ## Example which selects Probes with label "prometheus" set to "somelabel"
  2953      # probeSelector:
  2954      #   matchLabels:
  2955      #     prometheus: somelabel
  2956  
  2957      ## If nil, select own namespace. Namespaces to be selected for Probe discovery.
  2958      probeNamespaceSelector: {}
  2959      ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel"
  2960      # probeNamespaceSelector:
  2961      #   matchLabels:
  2962      #     prometheus: somelabel
  2963  
  2964      ## How long to retain metrics
  2965      ##
  2966      retention: 10d
  2967  
  2968      ## Maximum size of metrics
  2969      ##
  2970      retentionSize: ""
  2971  
  2972      ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration
  2973      ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb
  2974      tsdb:
  2975        outOfOrderTimeWindow: 0s
  2976  
  2977      ## Enable compression of the write-ahead log using Snappy.
  2978      ##
  2979      walCompression: true
  2980  
  2981      ## If true, the Operator won't process any Prometheus configuration changes
  2982      ##
  2983      paused: false
  2984  
  2985      ## Number of replicas of each shard to deploy for a Prometheus deployment.
  2986      ## Number of replicas multiplied by shards is the total number of Pods created.
  2987      ##
  2988      replicas: 1
  2989  
  2990      ## EXPERIMENTAL: Number of shards to distribute targets onto.
  2991      ## Number of replicas multiplied by shards is the total number of Pods created.
  2992      ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
  2993      ## Increasing shards will not reshard data either but it will continue to be available from the same instances.
  2994      ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
  2995      ## Sharding is done on the content of the `__address__` target meta-label.
  2996      ##
  2997      shards: 1
  2998  
  2999      ## Log level for Prometheus be configured in
  3000      ##
  3001      logLevel: info
  3002  
  3003      ## Log format for Prometheus be configured in
  3004      ##
  3005      logFormat: logfmt
  3006  
  3007      ## Prefix used to register routes, overriding externalUrl route.
  3008      ## Useful for proxies that rewrite URLs.
  3009      ##
  3010      routePrefix: /
  3011  
  3012      ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
  3013      ## Metadata Labels and Annotations gets propagated to the prometheus pods.
  3014      ##
  3015      podMetadata: {}
  3016      # labels:
  3017      #   app: prometheus
  3018      #   k8s-app: prometheus
  3019  
  3020      ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
  3021      ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
  3022      ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
  3023      ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
  3024      podAntiAffinity: ""
  3025  
  3026      ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
  3027      ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
  3028      ##
  3029      podAntiAffinityTopologyKey: kubernetes.io/hostname
  3030  
  3031      ## Assign custom affinity rules to the prometheus instance
  3032      ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  3033      ##
  3034      affinity: {}
  3035      # nodeAffinity:
  3036      #   requiredDuringSchedulingIgnoredDuringExecution:
  3037      #     nodeSelectorTerms:
  3038      #     - matchExpressions:
  3039      #       - key: kubernetes.io/e2e-az-name
  3040      #         operator: In
  3041      #         values:
  3042      #         - e2e-az1
  3043      #         - e2e-az2
  3044  
  3045      ## The remote_read spec configuration for Prometheus.
  3046      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec
  3047      remoteRead: []
  3048      # - url: http://remote1/read
  3049      ## additionalRemoteRead is appended to remoteRead
  3050      additionalRemoteRead: []
  3051  
  3052      ## The remote_write spec configuration for Prometheus.
  3053      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec
  3054      remoteWrite: []
  3055      # - url: http://remote1/push
  3056      ## additionalRemoteWrite is appended to remoteWrite
  3057      additionalRemoteWrite: []
  3058  
  3059      ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
  3060      remoteWriteDashboards: false
  3061  
  3062      ## Resource limits & requests
  3063      ##
  3064      resources: {}
  3065      # requests:
  3066      #   memory: 400Mi
  3067  
  3068      ## Prometheus StorageSpec for persistent data
  3069      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
  3070      ##
  3071      storageSpec: {}
  3072      ## Using PersistentVolumeClaim
  3073      ##
  3074      #  volumeClaimTemplate:
  3075      #    spec:
  3076      #      storageClassName: gluster
  3077      #      accessModes: ["ReadWriteOnce"]
  3078      #      resources:
  3079      #        requests:
  3080      #          storage: 50Gi
  3081      #    selector: {}
  3082  
  3083      ## Using tmpfs volume
  3084      ##
  3085      #  emptyDir:
  3086      #    medium: Memory
  3087  
  3088      # Additional volumes on the output StatefulSet definition.
  3089      volumes: []
  3090  
  3091      # Additional VolumeMounts on the output StatefulSet definition.
  3092      volumeMounts: []
  3093  
  3094      ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
  3095      ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
  3096      ## as specified in the official Prometheus documentation:
  3097      ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
  3098      ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
  3099      ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
  3100      ## scrape configs are going to break Prometheus after the upgrade.
  3101      ## AdditionalScrapeConfigs can be defined as a list or as a templated string.
  3102      ##
  3103      ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the
  3104      ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
  3105      ##
  3106      additionalScrapeConfigs: []
  3107      # - job_name: kube-etcd
  3108      #   kubernetes_sd_configs:
  3109      #     - role: node
  3110      #   scheme: https
  3111      #   tls_config:
  3112      #     ca_file:   /etc/prometheus/secrets/etcd-client-cert/etcd-ca
  3113      #     cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
  3114      #     key_file:  /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
  3115      #   relabel_configs:
  3116      #   - action: labelmap
  3117      #     regex: __meta_kubernetes_node_label_(.+)
  3118      #   - source_labels: [__address__]
  3119      #     action: replace
  3120      #     targetLabel: __address__
  3121      #     regex: ([^:;]+):(\d+)
  3122      #     replacement: ${1}:2379
  3123      #   - source_labels: [__meta_kubernetes_node_name]
  3124      #     action: keep
  3125      #     regex: .*mst.*
  3126      #   - source_labels: [__meta_kubernetes_node_name]
  3127      #     action: replace
  3128      #     targetLabel: node
  3129      #     regex: (.*)
  3130      #     replacement: ${1}
  3131      #   metric_relabel_configs:
  3132      #   - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
  3133      #     action: labeldrop
  3134      #
  3135      ## If scrape config contains a repetitive section, you may want to use a template.
  3136      ## In the following example, you can see how to define `gce_sd_configs` for multiple zones
  3137      # additionalScrapeConfigs: |
  3138      #  - job_name: "node-exporter"
  3139      #    gce_sd_configs:
  3140      #    {{range $zone := .Values.gcp_zones}}
  3141      #    - project: "project1"
  3142      #      zone: "{{$zone}}"
  3143      #      port: 9100
  3144      #    {{end}}
  3145      #    relabel_configs:
  3146      #    ...
  3147  
  3148  
  3149      ## If additional scrape configurations are already deployed in a single secret file you can use this section.
  3150      ## Expected values are the secret name and key
  3151      ## Cannot be used with additionalScrapeConfigs
  3152      additionalScrapeConfigsSecret: {}
  3153        # enabled: false
  3154        # name:
  3155        # key:
  3156  
  3157      ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
  3158      ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
  3159      additionalPrometheusSecretsAnnotations: {}
  3160  
  3161      ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
  3162      ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<alertmanager_config>.
  3163      ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
  3164      ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
  3165      ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
  3166      ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
  3167      ##
  3168      additionalAlertManagerConfigs: []
  3169      # - consul_sd_configs:
  3170      #   - server: consul.dev.test:8500
  3171      #     scheme: http
  3172      #     datacenter: dev
  3173      #     tag_separator: ','
  3174      #     services:
  3175      #       - metrics-prometheus-alertmanager
  3176  
  3177      ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
  3178      ## them separately from the helm deployment, you can use this section.
  3179      ## Expected values are the secret name and key
  3180      ## Cannot be used with additionalAlertManagerConfigs
  3181      additionalAlertManagerConfigsSecret: {}
  3182        # name:
  3183        # key:
  3184        # optional: false
  3185  
  3186      ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
  3187      ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
  3188      ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
  3189      ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
  3190      ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
  3191      ## configs are going to break Prometheus after the upgrade.
  3192      ##
  3193      additionalAlertRelabelConfigs: []
  3194      # - separator: ;
  3195      #   regex: prometheus_replica
  3196      #   replacement: $1
  3197      #   action: labeldrop
  3198  
  3199      ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
  3200      ## them separately from the helm deployment, you can use this section.
  3201      ## Expected values are the secret name and key
  3202      ## Cannot be used with additionalAlertRelabelConfigs
  3203      additionalAlertRelabelConfigsSecret: {}
  3204        # name:
  3205        # key:
  3206  
  3207      ## SecurityContext holds pod-level security attributes and common container settings.
  3208      ## This defaults to non root user with uid 1000 and gid 2000.
  3209      ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md
  3210      ##
  3211      securityContext:
  3212        runAsGroup: 2000
  3213        runAsNonRoot: true
  3214        runAsUser: 1000
  3215        fsGroup: 2000
  3216  
  3217      ## Priority class assigned to the Pods
  3218      ##
  3219      priorityClassName: ""
  3220  
  3221      ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
  3222      ## This section is experimental, it may change significantly without deprecation notice in any release.
  3223      ## This is experimental and may change significantly without backward compatibility in any release.
  3224      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec
  3225      ##
  3226      thanos: {}
  3227        # secretProviderClass:
  3228        #   provider: gcp
  3229        #   parameters:
  3230        #     secrets: |
  3231        #       - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
  3232        #         fileName: "objstore.yaml"
  3233        # objectStorageConfigFile: /var/secrets/object-store.yaml
  3234  
  3235      ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
  3236      ## if using proxy extraContainer update targetPort with proxy container port
  3237      containers: []
  3238      # containers:
  3239      # - name: oauth-proxy
  3240      #   image: quay.io/oauth2-proxy/oauth2-proxy:v7.3.0
  3241      #   args:
  3242      #   - --upstream=http://127.0.0.1:9093
  3243      #   - --http-address=0.0.0.0:8081
  3244      #   - ...
  3245      #   ports:
  3246      #   - containerPort: 8081
  3247      #     name: oauth-proxy
  3248      #     protocol: TCP
  3249      #   resources: {}
  3250  
  3251      ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
  3252      ## (permissions, dir tree) on mounted volumes before starting prometheus
  3253      initContainers: []
  3254  
  3255      ## PortName to use for Prometheus.
  3256      ##
  3257      portName: "http-web"
  3258  
  3259      ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
  3260      ## on the file system of the Prometheus container e.g. bearer token files.
  3261      arbitraryFSAccessThroughSMs: false
  3262  
  3263      ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
  3264      ## or PodMonitor to true, this overrides honor_labels to false.
  3265      overrideHonorLabels: false
  3266  
  3267      ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
  3268      overrideHonorTimestamps: false
  3269  
  3270      ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor
  3271      ## configs, and they will only discover endpoints within their current namespace. Defaults to false.
  3272      ignoreNamespaceSelectors: false
  3273  
  3274      ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
  3275      ## The label value will always be the namespace of the object that is being created.
  3276      ## Disabled by default
  3277      enforcedNamespaceLabel: ""
  3278  
  3279      ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
  3280      ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
  3281      ## Deprecated, use `excludedFromEnforcement` instead
  3282      prometheusRulesExcludedFromEnforce: []
  3283  
  3284      ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
  3285      ## to be excluded from enforcing a namespace label of origin.
  3286      ## Works only if enforcedNamespaceLabel set to true.
  3287      ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference
  3288      excludedFromEnforcement: []
  3289  
  3290      ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
  3291      ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
  3292      ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
  3293      ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
  3294      queryLogFile: false
  3295  
  3296      ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
  3297      ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
  3298      ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
  3299      enforcedSampleLimit: false
  3300  
  3301      ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
  3302      ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
  3303      ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
  3304      ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
  3305      enforcedTargetLimit: false
  3306  
  3307  
  3308      ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
  3309      ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
  3310      ## 2.27.0 and newer.
  3311      enforcedLabelLimit: false
  3312  
  3313      ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
  3314      ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
  3315      ## 2.27.0 and newer.
  3316      enforcedLabelNameLengthLimit: false
  3317  
  3318      ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
  3319      ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
  3320      ## versions 2.27.0 and newer.
  3321      enforcedLabelValueLengthLimit: false
  3322  
  3323      ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
  3324      ## in Prometheus so it may change in any upcoming release.
  3325      allowOverlappingBlocks: false
  3326  
  3327      ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
  3328      ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
  3329      minReadySeconds: 0
  3330  
  3331      # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
  3332      # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
  3333      # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it.
  3334      # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically.
  3335      hostNetwork: false
  3336  
  3337      # HostAlias holds the mapping between IP and hostnames that will be injected
  3338      # as an entry in the pod’s hosts file.
  3339      hostAliases: []
  3340      #  - ip: 10.10.0.100
  3341      #    hostnames:
  3342      #      - a1.app.local
  3343      #      - b1.app.local
  3344  
  3345    additionalRulesForClusterRole: []
  3346    #  - apiGroups: [ "" ]
  3347    #    resources:
  3348    #      - nodes/proxy
  3349    #    verbs: [ "get", "list", "watch" ]
  3350  
  3351    additionalServiceMonitors: []
  3352    ## Name of the ServiceMonitor to create
  3353    ##
  3354    # - name: ""
  3355  
  3356      ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
  3357      ## the chart
  3358      ##
  3359      # additionalLabels: {}
  3360  
  3361      ## Service label for use in assembling a job name of the form <label value>-<port>
  3362      ## If no label is specified, the service name is used.
  3363      ##
  3364      # jobLabel: ""
  3365  
  3366      ## labels to transfer from the kubernetes service to the target
  3367      ##
  3368      # targetLabels: []
  3369  
  3370      ## labels to transfer from the kubernetes pods to the target
  3371      ##
  3372      # podTargetLabels: []
  3373  
  3374      ## Label selector for services to which this ServiceMonitor applies
  3375      ##
  3376      # selector: {}
  3377  
  3378      ## Namespaces from which services are selected
  3379      ##
  3380      # namespaceSelector:
  3381        ## Match any namespace
  3382        ##
  3383        # any: false
  3384  
  3385        ## Explicit list of namespace names to select
  3386        ##
  3387        # matchNames: []
  3388  
  3389      ## Endpoints of the selected service to be monitored
  3390      ##
  3391      # endpoints: []
  3392        ## Name of the endpoint's service port
  3393        ## Mutually exclusive with targetPort
  3394        # - port: ""
  3395  
  3396        ## Name or number of the endpoint's target port
  3397        ## Mutually exclusive with port
  3398        # - targetPort: ""
  3399  
  3400        ## File containing bearer token to be used when scraping targets
  3401        ##
  3402        #   bearerTokenFile: ""
  3403  
  3404        ## Interval at which metrics should be scraped
  3405        ##
  3406        #   interval: 30s
  3407  
  3408        ## HTTP path to scrape for metrics
  3409        ##
  3410        #   path: /metrics
  3411  
  3412        ## HTTP scheme to use for scraping
  3413        ##
  3414        #   scheme: http
  3415  
  3416        ## TLS configuration to use when scraping the endpoint
  3417        ##
  3418        #   tlsConfig:
  3419  
  3420            ## Path to the CA file
  3421            ##
  3422            # caFile: ""
  3423  
  3424            ## Path to client certificate file
  3425            ##
  3426            # certFile: ""
  3427  
  3428            ## Skip certificate verification
  3429            ##
  3430            # insecureSkipVerify: false
  3431  
  3432            ## Path to client key file
  3433            ##
  3434            # keyFile: ""
  3435  
  3436            ## Server name used to verify host name
  3437            ##
  3438            # serverName: ""
  3439  
  3440    additionalPodMonitors: []
  3441    ## Name of the PodMonitor to create
  3442    ##
  3443    # - name: ""
  3444  
  3445      ## Additional labels to set used for the PodMonitorSelector. Together with standard labels from
  3446      ## the chart
  3447      ##
  3448      # additionalLabels: {}
  3449  
  3450      ## Pod label for use in assembling a job name of the form <label value>-<port>
  3451      ## If no label is specified, the pod endpoint name is used.
  3452      ##
  3453      # jobLabel: ""
  3454  
  3455      ## Label selector for pods to which this PodMonitor applies
  3456      ##
  3457      # selector: {}
  3458  
  3459      ## PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
  3460      ##
  3461      # podTargetLabels: {}
  3462  
  3463      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  3464      ##
  3465      # sampleLimit: 0
  3466  
  3467      ## Namespaces from which pods are selected
  3468      ##
  3469      # namespaceSelector:
  3470        ## Match any namespace
  3471        ##
  3472        # any: false
  3473  
  3474        ## Explicit list of namespace names to select
  3475        ##
  3476        # matchNames: []
  3477  
  3478      ## Endpoints of the selected pods to be monitored
  3479      ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#podmetricsendpoint
  3480      ##
  3481      # podMetricsEndpoints: []
  3482  
  3483    ## Enable the default Alertmanager endpoint for Prometheus.
  3484    ##
  3485    enableDefaultAlertingEndpoint: true
  3486  
  3487  ## Configuration for thanosRuler
  3488  ## ref: https://thanos.io/tip/components/rule.md/
  3489  ##
  3490  thanosRuler:
  3491  
  3492    ## Deploy thanosRuler
  3493    ##
  3494    enabled: false
  3495  
  3496    ## Annotations for ThanosRuler
  3497    ##
  3498    annotations: {}
  3499  
  3500    ## Service account for ThanosRuler to use.
  3501    ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
  3502    ##
  3503    serviceAccount:
  3504      create: true
  3505      name: ""
  3506      annotations: {}
  3507  
  3508    ## Configure pod disruption budgets for ThanosRuler
  3509    ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
  3510    ## This configuration is immutable once created and will require the PDB to be deleted to be changed
  3511    ## https://github.com/kubernetes/kubernetes/issues/45398
  3512    ##
  3513    podDisruptionBudget:
  3514      enabled: false
  3515      minAvailable: 1
  3516      maxUnavailable: ""
  3517  
  3518    ingress:
  3519      enabled: false
  3520  
  3521      # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName
  3522      # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
  3523      # ingressClassName: nginx
  3524  
  3525      annotations: {}
  3526  
  3527      labels: {}
  3528  
  3529      ## Hosts must be provided if Ingress is enabled.
  3530      ##
  3531      hosts: []
  3532        # - thanosruler.domain.com
  3533  
  3534      ## Paths to use for ingress rules - one path should match the thanosruler.routePrefix
  3535      ##
  3536      paths: []
  3537      # - /
  3538  
  3539      ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
  3540      ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
  3541      # pathType: ImplementationSpecific
  3542  
  3543      ## TLS configuration for ThanosRuler Ingress
  3544      ## Secret must be manually created in the namespace
  3545      ##
  3546      tls: []
  3547      # - secretName: thanosruler-general-tls
  3548      #   hosts:
  3549      #   - thanosruler.example.com
  3550  
  3551    ## Configuration for ThanosRuler service
  3552    ##
  3553    service:
  3554      annotations: {}
  3555      labels: {}
  3556      clusterIP: ""
  3557  
  3558      ## Port for ThanosRuler Service to listen on
  3559      ##
  3560      port: 10902
  3561      ## To be used with a proxy extraContainer port
  3562      ##
  3563      targetPort: 10902
  3564      ## Port to expose on each node
  3565      ## Only used if service.type is 'NodePort'
  3566      ##
  3567      nodePort: 30905
  3568      ## List of IP addresses at which the Prometheus server service is available
  3569      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
  3570      ##
  3571  
  3572      ## Additional ports to open for ThanosRuler service
  3573      additionalPorts: []
  3574  
  3575      externalIPs: []
  3576      loadBalancerIP: ""
  3577      loadBalancerSourceRanges: []
  3578  
  3579      ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
  3580      ##
  3581      externalTrafficPolicy: Cluster
  3582  
  3583      ## Service type
  3584      ##
  3585      type: ClusterIP
  3586  
  3587    ## If true, create a serviceMonitor for thanosRuler
  3588    ##
  3589    serviceMonitor:
  3590      ## Scrape interval. If not set, the Prometheus default scrape interval is used.
  3591      ##
  3592      interval: ""
  3593      selfMonitor: true
  3594  
  3595      ## Additional labels
  3596      ##
  3597      additionalLabels: {}
  3598  
  3599      ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
  3600      ##
  3601      sampleLimit: 0
  3602  
  3603      ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
  3604      ##
  3605      targetLimit: 0
  3606  
  3607      ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  3608      ##
  3609      labelLimit: 0
  3610  
  3611      ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  3612      ##
  3613      labelNameLengthLimit: 0
  3614  
  3615      ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
  3616      ##
  3617      labelValueLengthLimit: 0
  3618  
  3619      ## proxyUrl: URL of a proxy that should be used for scraping.
  3620      ##
  3621      proxyUrl: ""
  3622  
  3623      ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
  3624      scheme: ""
  3625  
  3626      ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
  3627      ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig
  3628      tlsConfig: {}
  3629  
  3630      bearerTokenFile:
  3631  
  3632      ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
  3633      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  3634      ##
  3635      metricRelabelings: []
  3636      # - action: keep
  3637      #   regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
  3638      #   sourceLabels: [__name__]
  3639  
  3640      ## RelabelConfigs to apply to samples before scraping
  3641      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig
  3642      ##
  3643      relabelings: []
  3644      # - sourceLabels: [__meta_kubernetes_pod_node_name]
  3645      #   separator: ;
  3646      #   regex: ^(.*)$
  3647      #   targetLabel: nodename
  3648      #   replacement: $1
  3649      #   action: replace
  3650  
  3651    ## Settings affecting thanosRulerpec
  3652    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosrulerspec
  3653    ##
  3654    thanosRulerSpec:
  3655      ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
  3656      ## Metadata Labels and Annotations gets propagated to the ThanosRuler pods.
  3657      ##
  3658      podMetadata: {}
  3659  
  3660      ## Image of ThanosRuler
  3661      ##
  3662      image:
  3663        registry: quay.io
  3664        repository: thanos/thanos
  3665        tag: v0.30.2
  3666        sha: ""
  3667  
  3668      ## Namespaces to be selected for PrometheusRules discovery.
  3669      ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
  3670      ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#namespaceselector for usage
  3671      ##
  3672      ruleNamespaceSelector: {}
  3673  
  3674      ## If true, a nil or {} value for thanosRuler.thanosRulerSpec.ruleSelector will cause the
  3675      ## prometheus resource to be created with selectors based on values in the helm deployment,
  3676      ## which will also match the PrometheusRule resources created
  3677      ##
  3678      ruleSelectorNilUsesHelmValues: true
  3679  
  3680      ## PrometheusRules to be selected for target discovery.
  3681      ## If {}, select all PrometheusRules
  3682      ##
  3683      ruleSelector: {}
  3684      ## Example which select all PrometheusRules resources
  3685      ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
  3686      # ruleSelector:
  3687      #   matchExpressions:
  3688      #     - key: prometheus
  3689      #       operator: In
  3690      #       values:
  3691      #         - example-rules
  3692      #         - example-rules-2
  3693      #
  3694      ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
  3695      # ruleSelector:
  3696      #   matchLabels:
  3697      #     role: example-rules
  3698  
  3699      ## Define Log Format
  3700      # Use logfmt (default) or json logging
  3701      logFormat: logfmt
  3702  
  3703      ## Log level for ThanosRuler to be configured with.
  3704      ##
  3705      logLevel: info
  3706  
  3707      ## Size is the expected size of the thanosRuler cluster. The controller will eventually make the size of the
  3708      ## running cluster equal to the expected size.
  3709      replicas: 1
  3710  
  3711      ## Time duration ThanosRuler shall retain data for. Default is '24h', and must match the regular expression
  3712      ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
  3713      ##
  3714      retention: 24h
  3715  
  3716      ## Interval between consecutive evaluations.
  3717      ##
  3718      evaluationInterval: ""
  3719  
  3720      ## Storage is the definition of how storage will be used by the ThanosRuler instances.
  3721      ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md
  3722      ##
  3723      storage: {}
  3724      # volumeClaimTemplate:
  3725      #   spec:
  3726      #     storageClassName: gluster
  3727      #     accessModes: ["ReadWriteOnce"]
  3728      #     resources:
  3729      #       requests:
  3730      #         storage: 50Gi
  3731      #   selector: {}
  3732  
  3733      ## AlertmanagerConfig define configuration for connecting to alertmanager.
  3734      ## Only available with Thanos v0.10.0 and higher. Maps to the alertmanagers.config Thanos Ruler arg.
  3735      alertmanagersConfig: {}
  3736      #   - api_version: v2
  3737      #     http_config:
  3738      #       basic_auth:
  3739      #         username: some_user
  3740      #         password: some_pass
  3741      #     static_configs:
  3742      #       - alertmanager.thanos.io
  3743      #     scheme: http
  3744      #     timeout: 10s
  3745  
  3746      ## DEPRECATED. Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, alertmanagersConfig should be used instead.
  3747      ## Note: this field will be ignored if alertmanagersConfig is specified. Maps to the alertmanagers.url Thanos Ruler arg.
  3748      # alertmanagersUrl:
  3749  
  3750      ## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false
  3751      ##
  3752      externalPrefix:
  3753  
  3754      ## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
  3755      ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
  3756      ##
  3757      routePrefix: /
  3758  
  3759      ## ObjectStorageConfig configures object storage in Thanos. Alternative to
  3760      ## ObjectStorageConfigFile, and lower order priority.
  3761      objectStorageConfig: {}
  3762  
  3763      ## ObjectStorageConfigFile specifies the path of the object storage configuration file.
  3764      ## When used alongside with ObjectStorageConfig, ObjectStorageConfigFile takes precedence.
  3765      objectStorageConfigFile: ""
  3766  
  3767      ## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
  3768      ## Maps to the --query flag of thanos ruler.
  3769      queryEndpoints: []
  3770  
  3771      ## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
  3772      ## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
  3773      queryConfig: {}
  3774  
  3775      ## Labels configure the external label pairs to ThanosRuler. A default replica
  3776      ## label `thanos_ruler_replica` will be always added as a label with the value
  3777      ## of the pod's name and it will be dropped in the alerts.
  3778      labels: {}
  3779  
  3780      ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
  3781      ##
  3782      paused: false
  3783  
  3784      ## Define which Nodes the Pods are scheduled on.
  3785      ## ref: https://kubernetes.io/docs/user-guide/node-selection/
  3786      ##
  3787      nodeSelector: {}
  3788  
  3789      ## Define resources requests and limits for single Pods.
  3790      ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
  3791      ##
  3792      resources: {}
  3793      # requests:
  3794      #   memory: 400Mi
  3795  
  3796      ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
  3797      ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
  3798      ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
  3799      ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
  3800      ##
  3801      podAntiAffinity: ""
  3802  
  3803      ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
  3804      ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
  3805      ##
  3806      podAntiAffinityTopologyKey: kubernetes.io/hostname
  3807  
  3808      ## Assign custom affinity rules to the thanosRuler instance
  3809      ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  3810      ##
  3811      affinity: {}
  3812      # nodeAffinity:
  3813      #   requiredDuringSchedulingIgnoredDuringExecution:
  3814      #     nodeSelectorTerms:
  3815      #     - matchExpressions:
  3816      #       - key: kubernetes.io/e2e-az-name
  3817      #         operator: In
  3818      #         values:
  3819      #         - e2e-az1
  3820      #         - e2e-az2
  3821  
  3822      ## If specified, the pod's tolerations.
  3823      ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
  3824      ##
  3825      tolerations: []
  3826      # - key: "key"
  3827      #   operator: "Equal"
  3828      #   value: "value"
  3829      #   effect: "NoSchedule"
  3830  
  3831      ## If specified, the pod's topology spread constraints.
  3832      ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
  3833      ##
  3834      topologySpreadConstraints: []
  3835      # - maxSkew: 1
  3836      #   topologyKey: topology.kubernetes.io/zone
  3837      #   whenUnsatisfiable: DoNotSchedule
  3838      #   labelSelector:
  3839      #     matchLabels:
  3840      #       app: thanos-ruler
  3841  
  3842      ## SecurityContext holds pod-level security attributes and common container settings.
  3843      ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext  false
  3844      ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
  3845      ##
  3846      securityContext:
  3847        runAsGroup: 2000
  3848        runAsNonRoot: true
  3849        runAsUser: 1000
  3850        fsGroup: 2000
  3851  
  3852      ## ListenLocal makes the ThanosRuler server listen on loopback, so that it does not bind against the Pod IP.
  3853      ## Note this is only for the ThanosRuler UI, not the gossip communication.
  3854      ##
  3855      listenLocal: false
  3856  
  3857      ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an ThanosRuler pod.
  3858      ##
  3859      containers: []
  3860  
  3861      # Additional volumes on the output StatefulSet definition.
  3862      volumes: []
  3863  
  3864      # Additional VolumeMounts on the output StatefulSet definition.
  3865      volumeMounts: []
  3866  
  3867      ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
  3868      ## (permissions, dir tree) on mounted volumes before starting prometheus
  3869      initContainers: []
  3870  
  3871      ## Priority class assigned to the Pods
  3872      ##
  3873      priorityClassName: ""
  3874  
  3875      ## PortName to use for ThanosRuler.
  3876      ##
  3877      portName: "web"
  3878  
  3879    ## ExtraSecret can be used to store various data in an extra secret
  3880    ## (use it for example to store hashed basic auth credentials)
  3881    extraSecret:
  3882      ## if not set, name will be auto generated
  3883      # name: ""
  3884      annotations: {}
  3885      data: {}
  3886    #   auth: |
  3887    #     foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
  3888    #     someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
  3889  
  3890  ## Setting to true produces cleaner resource names, but requires a data migration because the name of the persistent volume changes. Therefore this should only be set once on initial installation.
  3891  ##
  3892  cleanPrometheusOperatorObjectNames: false