github.com/microsoft/fabrikate@v1.0.0-alpha.1.0.20210115014322-dc09194d0885/testdata/local-charts/prometheus/values.yaml (about)

     1  rbac:
     2    create: true
     3  
     4  podSecurityPolicy:
     5    enabled: false
     6  
     7  imagePullSecrets:
     8  # - name: "image-pull-secret"
     9  
    10  ## Define serviceAccount names for components. Defaults to component's fully qualified name.
    11  ##
    12  serviceAccounts:
    13    alertmanager:
    14      create: true
    15      name:
    16      annotations: {}
    17    nodeExporter:
    18      create: true
    19      name:
    20      annotations: {}
    21    pushgateway:
    22      create: true
    23      name:
    24      annotations: {}
    25    server:
    26      create: true
    27      name:
    28      annotations: {}
    29  
    30  alertmanager:
    31    ## If false, alertmanager will not be installed
    32    ##
    33    enabled: true
    34  
    35    ## Use a ClusterRole (and ClusterRoleBinding)
    36    ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY
    37    ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide.
    38    useClusterRole: true
    39  
    40    ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here.
    41    useExistingRole: false
    42  
    43    ## alertmanager container name
    44    ##
    45    name: alertmanager
    46  
    47    ## alertmanager container image
    48    ##
    49    image:
    50      repository: prom/alertmanager
    51      tag: v0.21.0
    52      pullPolicy: IfNotPresent
    53  
    54    ## alertmanager priorityClassName
    55    ##
    56    priorityClassName: ""
    57  
    58    ## Additional alertmanager container arguments
    59    ##
    60    extraArgs: {}
    61  
    62    ## Additional InitContainers to initialize the pod
    63    ##
    64    extraInitContainers: []
    65  
    66    ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
    67    ## so that the various internal URLs are still able to access as they are in the default case.
    68    ## (Optional)
    69    prefixURL: ""
    70  
    71    ## External URL which can access alertmanager
    72    baseURL: "http://localhost:9093"
    73  
    74    ## Additional alertmanager container environment variable
    75    ## For instance to add a http_proxy
    76    ##
    77    extraEnv: {}
    78  
    79    ## Additional alertmanager Secret mounts
    80    # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
    81    extraSecretMounts: []
    82      # - name: secret-files
    83      #   mountPath: /etc/secrets
    84      #   subPath: ""
    85      #   secretName: alertmanager-secret-files
    86      #   readOnly: true
    87  
    88    ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}}
    89    ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml
    90    ## to NOT generate a ConfigMap resource
    91    ##
    92    configMapOverrideName: ""
    93  
    94    ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config
    95    ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml
    96    ## to NOT generate a ConfigMap resource
    97    ##
    98    configFromSecret: ""
    99  
   100    ## The configuration file name to be loaded to alertmanager
   101    ## Must match the key within configuration loaded from ConfigMap/Secret
   102    ##
   103    configFileName: alertmanager.yml
   104  
   105    ingress:
   106      ## If true, alertmanager Ingress will be created
   107      ##
   108      enabled: false
   109  
   110      ## alertmanager Ingress annotations
   111      ##
   112      annotations: {}
   113      #   kubernetes.io/ingress.class: nginx
   114      #   kubernetes.io/tls-acme: 'true'
   115  
   116      ## alertmanager Ingress additional labels
   117      ##
   118      extraLabels: {}
   119  
   120      ## alertmanager Ingress hostnames with optional path
   121      ## Must be provided if Ingress is enabled
   122      ##
   123      hosts: []
   124      #   - alertmanager.domain.com
   125      #   - domain.com/alertmanager
   126  
   127      ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
   128      extraPaths: []
   129      # - path: /*
   130      #   backend:
   131      #     serviceName: ssl-redirect
   132      #     servicePort: use-annotation
   133  
   134      ## alertmanager Ingress TLS configuration
   135      ## Secrets must be manually created in the namespace
   136      ##
   137      tls: []
   138      #   - secretName: prometheus-alerts-tls
   139      #     hosts:
   140      #       - alertmanager.domain.com
   141  
   142    ## Alertmanager Deployment Strategy type
   143    # strategy:
   144    #   type: Recreate
   145  
   146    ## Node tolerations for alertmanager scheduling to nodes with taints
   147    ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
   148    ##
   149    tolerations: []
   150      # - key: "key"
   151      #   operator: "Equal|Exists"
   152      #   value: "value"
   153      #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
   154  
   155    ## Node labels for alertmanager pod assignment
   156    ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
   157    ##
   158    nodeSelector: {}
   159  
   160    ## Pod affinity
   161    ##
   162    affinity: {}
   163  
   164    ## PodDisruptionBudget settings
   165    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
   166    ##
   167    podDisruptionBudget:
   168      enabled: false
   169      maxUnavailable: 1
   170  
   171    ## Use an alternate scheduler, e.g. "stork".
   172    ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
   173    ##
   174    # schedulerName:
   175  
   176    persistentVolume:
   177      ## If true, alertmanager will create/use a Persistent Volume Claim
   178      ## If false, use emptyDir
   179      ##
   180      enabled: true
   181  
   182      ## alertmanager data Persistent Volume access modes
   183      ## Must match those of existing PV or dynamic provisioner
   184      ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
   185      ##
   186      accessModes:
   187        - ReadWriteOnce
   188  
   189      ## alertmanager data Persistent Volume Claim annotations
   190      ##
   191      annotations: {}
   192  
   193      ## alertmanager data Persistent Volume existing claim name
   194      ## Requires alertmanager.persistentVolume.enabled: true
   195      ## If defined, PVC must be created manually before volume will be bound
   196      existingClaim: ""
   197  
   198      ## alertmanager data Persistent Volume mount root path
   199      ##
   200      mountPath: /data
   201  
   202      ## alertmanager data Persistent Volume size
   203      ##
   204      size: 2Gi
   205  
   206      ## alertmanager data Persistent Volume Storage Class
   207      ## If defined, storageClassName: <storageClass>
   208      ## If set to "-", storageClassName: "", which disables dynamic provisioning
   209      ## If undefined (the default) or set to null, no storageClassName spec is
   210      ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
   211      ##   GKE, AWS & OpenStack)
   212      ##
   213      # storageClass: "-"
   214  
   215      ## alertmanager data Persistent Volume Binding Mode
   216      ## If defined, volumeBindingMode: <volumeBindingMode>
   217      ## If undefined (the default) or set to null, no volumeBindingMode spec is
   218      ##   set, choosing the default mode.
   219      ##
   220      # volumeBindingMode: ""
   221  
   222      ## Subdirectory of alertmanager data Persistent Volume to mount
   223      ## Useful if the volume's root directory is not empty
   224      ##
   225      subPath: ""
   226  
   227    emptyDir:
   228      ## alertmanager emptyDir volume size limit
   229      ##
   230      sizeLimit: ""
   231  
   232    ## Annotations to be added to alertmanager pods
   233    ##
   234    podAnnotations: {}
   235      ## Tell prometheus to use a specific set of alertmanager pods
   236      ## instead of all alertmanager pods found in the same namespace
   237      ## Useful if you deploy multiple releases within the same namespace
   238      ##
   239      ## prometheus.io/probe: alertmanager-teamA
   240  
   241    ## Labels to be added to Prometheus AlertManager pods
   242    ##
   243    podLabels: {}
   244  
   245    ## Specify if a Pod Security Policy for node-exporter must be created
   246    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
   247    ##
   248    podSecurityPolicy:
   249      annotations: {}
   250        ## Specify pod annotations
   251        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
   252        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
   253        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
   254        ##
   255        # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
   256        # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
   257        # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
   258  
   259    ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
   260    ##
   261    replicaCount: 1
   262  
   263    ## Annotations to be added to deployment
   264    ##
   265    deploymentAnnotations: {}
   266  
   267    statefulSet:
   268      ## If true, use a statefulset instead of a deployment for pod management.
   269      ## This allows to scale replicas to more than 1 pod
   270      ##
   271      enabled: false
   272  
   273      annotations: {}
   274      labels: {}
   275      podManagementPolicy: OrderedReady
   276  
   277      ## Alertmanager headless service to use for the statefulset
   278      ##
   279      headless:
   280        annotations: {}
   281        labels: {}
   282  
   283        ## Enabling peer mesh service end points for enabling the HA alert manager
   284        ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
   285        enableMeshPeer: false
   286  
   287        servicePort: 80
   288  
   289    ## alertmanager resource requests and limits
   290    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
   291    ##
   292    resources: {}
   293      # limits:
   294      #   cpu: 10m
   295      #   memory: 32Mi
   296      # requests:
   297      #   cpu: 10m
   298      #   memory: 32Mi
   299  
   300    ## Security context to be added to alertmanager pods
   301    ##
   302    securityContext:
   303      runAsUser: 65534
   304      runAsNonRoot: true
   305      runAsGroup: 65534
   306      fsGroup: 65534
   307  
   308    service:
   309      annotations: {}
   310      labels: {}
   311      clusterIP: ""
   312  
   313      ## Enabling peer mesh service end points for enabling the HA alert manager
   314      ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
   315      # enableMeshPeer : true
   316  
   317      ## List of IP addresses at which the alertmanager service is available
   318      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
   319      ##
   320      externalIPs: []
   321  
   322      loadBalancerIP: ""
   323      loadBalancerSourceRanges: []
   324      servicePort: 80
   325      # nodePort: 30000
   326      sessionAffinity: None
   327      type: ClusterIP
   328  
   329  ## Monitors ConfigMap changes and POSTs to a URL
   330  ## Ref: https://github.com/jimmidyson/configmap-reload
   331  ##
   332  configmapReload:
   333    prometheus:
   334      ## If false, the configmap-reload container will not be deployed
   335      ##
   336      enabled: true
   337  
   338      ## configmap-reload container name
   339      ##
   340      name: configmap-reload
   341  
   342      ## configmap-reload container image
   343      ##
   344      image:
   345        repository: jimmidyson/configmap-reload
   346        tag: v0.4.0
   347        pullPolicy: IfNotPresent
   348  
   349      ## Additional configmap-reload container arguments
   350      ##
   351      extraArgs: {}
   352      ## Additional configmap-reload volume directories
   353      ##
   354      extraVolumeDirs: []
   355  
   356  
   357      ## Additional configmap-reload mounts
   358      ##
   359      extraConfigmapMounts: []
   360        # - name: prometheus-alerts
   361        #   mountPath: /etc/alerts.d
   362        #   subPath: ""
   363        #   configMap: prometheus-alerts
   364        #   readOnly: true
   365  
   366  
   367      ## configmap-reload resource requests and limits
   368      ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
   369      ##
   370      resources: {}
   371    alertmanager:
   372      ## If false, the configmap-reload container will not be deployed
   373      ##
   374      enabled: true
   375  
   376      ## configmap-reload container name
   377      ##
   378      name: configmap-reload
   379  
   380      ## configmap-reload container image
   381      ##
   382      image:
   383        repository: jimmidyson/configmap-reload
   384        tag: v0.4.0
   385        pullPolicy: IfNotPresent
   386  
   387      ## Additional configmap-reload container arguments
   388      ##
   389      extraArgs: {}
   390      ## Additional configmap-reload volume directories
   391      ##
   392      extraVolumeDirs: []
   393  
   394  
   395      ## Additional configmap-reload mounts
   396      ##
   397      extraConfigmapMounts: []
   398        # - name: prometheus-alerts
   399        #   mountPath: /etc/alerts.d
   400        #   subPath: ""
   401        #   configMap: prometheus-alerts
   402        #   readOnly: true
   403  
   404  
   405      ## configmap-reload resource requests and limits
   406      ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
   407      ##
   408      resources: {}
   409  
   410  kubeStateMetrics:
   411    ## If false, kube-state-metrics sub-chart will not be installed
   412    ##
   413    enabled: true
   414  
   415  ## kube-state-metrics sub-chart configurable values
   416  ## Please see https://github.com/helm/charts/tree/master/stable/kube-state-metrics
   417  ##
   418  # kube-state-metrics:
   419  
   420  nodeExporter:
   421    ## If false, node-exporter will not be installed
   422    ##
   423    enabled: true
   424  
   425    ## If true, node-exporter pods share the host network namespace
   426    ##
   427    hostNetwork: true
   428  
   429    ## If true, node-exporter pods share the host PID namespace
   430    ##
   431    hostPID: true
   432  
   433    ## node-exporter container name
   434    ##
   435    name: node-exporter
   436  
   437    ## node-exporter container image
   438    ##
   439    image:
   440      repository: prom/node-exporter
   441      tag: v1.0.1
   442      pullPolicy: IfNotPresent
   443  
   444    ## Specify if a Pod Security Policy for node-exporter must be created
   445    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
   446    ##
   447    podSecurityPolicy:
   448      annotations: {}
   449        ## Specify pod annotations
   450        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
   451        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
   452        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
   453        ##
   454        # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
   455        # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
   456        # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
   457  
   458    ## node-exporter priorityClassName
   459    ##
   460    priorityClassName: ""
   461  
   462    ## Custom Update Strategy
   463    ##
   464    updateStrategy:
   465      type: RollingUpdate
   466  
   467    ## Additional node-exporter container arguments
   468    ##
   469    extraArgs: {}
   470  
   471    ## Additional InitContainers to initialize the pod
   472    ##
   473    extraInitContainers: []
   474  
   475    ## Additional node-exporter hostPath mounts
   476    ##
   477    extraHostPathMounts: []
   478      # - name: textfile-dir
   479      #   mountPath: /srv/txt_collector
   480      #   hostPath: /var/lib/node-exporter
   481      #   readOnly: true
   482      #   mountPropagation: HostToContainer
   483  
   484    extraConfigmapMounts: []
   485      # - name: certs-configmap
   486      #   mountPath: /prometheus
   487      #   configMap: certs-configmap
   488      #   readOnly: true
   489  
   490    ## Node tolerations for node-exporter scheduling to nodes with taints
   491    ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
   492    ##
   493    tolerations: []
   494      # - key: "key"
   495      #   operator: "Equal|Exists"
   496      #   value: "value"
   497      #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
   498  
   499    ## Node labels for node-exporter pod assignment
   500    ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
   501    ##
   502    nodeSelector: {}
   503  
   504    ## Annotations to be added to node-exporter pods
   505    ##
   506    podAnnotations: {}
   507  
   508    ## Labels to be added to node-exporter pods
   509    ##
   510    pod:
   511      labels: {}
   512  
   513    ## PodDisruptionBudget settings
   514    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
   515    ##
   516    podDisruptionBudget:
   517      enabled: false
   518      maxUnavailable: 1
   519  
   520    ## node-exporter resource limits & requests
   521    ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
   522    ##
   523    resources: {}
   524      # limits:
   525      #   cpu: 200m
   526      #   memory: 50Mi
   527      # requests:
   528      #   cpu: 100m
   529      #   memory: 30Mi
   530  
   531    ## Security context to be added to node-exporter pods
   532    ##
   533    securityContext: {}
   534      # runAsUser: 0
   535  
   536    service:
   537      annotations:
   538        prometheus.io/scrape: "true"
   539      labels: {}
   540  
   541      # Exposed as a headless service:
   542      # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
   543      clusterIP: None
   544  
   545      ## List of IP addresses at which the node-exporter service is available
   546      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
   547      ##
   548      externalIPs: []
   549  
   550      hostPort: 9100
   551      loadBalancerIP: ""
   552      loadBalancerSourceRanges: []
   553      servicePort: 9100
   554      type: ClusterIP
   555  
   556  server:
   557    ## Prometheus server container name
   558    ##
   559    enabled: true
   560  
   561    ## Use a ClusterRole (and ClusterRoleBinding)
   562    ## - If set to false - we define a RoleBinding in the defined namespaces ONLY
   563    ##
   564    ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled.
   565    ##     This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide.
   566    ##
   567    ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus.
   568    ##
   569    # useExistingClusterRoleName: nameofclusterrole
   570  
   571    ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges.
   572    # namespaces:
   573    #   - yournamespace
   574  
   575    name: server
   576    sidecarContainers:
   577  
   578    ## Prometheus server container image
   579    ##
   580    image:
   581      repository: prom/prometheus
   582      tag: v2.21.0
   583      pullPolicy: IfNotPresent
   584  
   585    ## prometheus server priorityClassName
   586    ##
   587    priorityClassName: ""
   588  
   589    ## EnableServiceLinks indicates whether information about services should be injected
   590    ## into pod's environment variables, matching the syntax of Docker links.
   591    ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0.
   592    ##
   593    enableServiceLinks: true
   594  
   595    ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
   596    ## so that the various internal URLs are still able to access as they are in the default case.
   597    ## (Optional)
   598    prefixURL: ""
   599  
   600    ## External URL which can access prometheus
   601    ## Maybe same with Ingress host name
   602    baseURL: ""
   603  
   604    ## Additional server container environment variables
   605    ##
   606    ## You specify this manually like you would a raw deployment manifest.
   607    ## This means you can bind in environment variables from secrets.
   608    ##
   609    ## e.g. static environment variable:
   610    ##  - name: DEMO_GREETING
   611    ##    value: "Hello from the environment"
   612    ##
   613    ## e.g. secret environment variable:
   614    ## - name: USERNAME
   615    ##   valueFrom:
   616    ##     secretKeyRef:
   617    ##       name: mysecret
   618    ##       key: username
   619    env: []
   620  
   621    extraFlags:
   622      - web.enable-lifecycle
   623      ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as
   624      ## deleting time series. This is disabled by default.
   625      # - web.enable-admin-api
   626      ##
   627      ## storage.tsdb.no-lockfile flag controls BD locking
   628      # - storage.tsdb.no-lockfile
   629      ##
   630      ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL)
   631      # - storage.tsdb.wal-compression
   632  
   633    ## Path to a configuration file on prometheus server container FS
   634    configPath: /etc/config/prometheus.yml
   635  
   636    global:
   637      ## How frequently to scrape targets by default
   638      ##
   639      scrape_interval: 1m
   640      ## How long until a scrape request times out
   641      ##
   642      scrape_timeout: 10s
   643      ## How frequently to evaluate rules
   644      ##
   645      evaluation_interval: 1m
   646    ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
   647    ##
   648    remoteWrite: []
   649    ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read
   650    ##
   651    remoteRead: []
   652  
   653    ## Additional Prometheus server container arguments
   654    ##
   655    extraArgs: {}
   656  
   657    ## Additional InitContainers to initialize the pod
   658    ##
   659    extraInitContainers: []
   660  
   661    ## Additional Prometheus server Volume mounts
   662    ##
   663    extraVolumeMounts: []
   664  
   665    ## Additional Prometheus server Volumes
   666    ##
   667    extraVolumes: []
   668  
   669    ## Additional Prometheus server hostPath mounts
   670    ##
   671    extraHostPathMounts: []
   672      # - name: certs-dir
   673      #   mountPath: /etc/kubernetes/certs
   674      #   subPath: ""
   675      #   hostPath: /etc/kubernetes/certs
   676      #   readOnly: true
   677  
   678    extraConfigmapMounts: []
   679      # - name: certs-configmap
   680      #   mountPath: /prometheus
   681      #   subPath: ""
   682      #   configMap: certs-configmap
   683      #   readOnly: true
   684  
   685    ## Additional Prometheus server Secret mounts
   686    # Defines additional mounts with secrets. Secrets must be manually created in the namespace.
   687    extraSecretMounts: []
   688      # - name: secret-files
   689      #   mountPath: /etc/secrets
   690      #   subPath: ""
   691      #   secretName: prom-secret-files
   692      #   readOnly: true
   693  
   694    ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}}
   695    ## Defining configMapOverrideName will cause templates/server-configmap.yaml
   696    ## to NOT generate a ConfigMap resource
   697    ##
   698    configMapOverrideName: ""
   699  
   700    ingress:
   701      ## If true, Prometheus server Ingress will be created
   702      ##
   703      enabled: false
   704  
   705      ## Prometheus server Ingress annotations
   706      ##
   707      annotations: {}
   708      #   kubernetes.io/ingress.class: nginx
   709      #   kubernetes.io/tls-acme: 'true'
   710  
   711      ## Prometheus server Ingress additional labels
   712      ##
   713      extraLabels: {}
   714  
   715      ## Prometheus server Ingress hostnames with optional path
   716      ## Must be provided if Ingress is enabled
   717      ##
   718      hosts: []
   719      #   - prometheus.domain.com
   720      #   - domain.com/prometheus
   721  
   722      ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
   723      extraPaths: []
   724      # - path: /*
   725      #   backend:
   726      #     serviceName: ssl-redirect
   727      #     servicePort: use-annotation
   728  
   729      ## Prometheus server Ingress TLS configuration
   730      ## Secrets must be manually created in the namespace
   731      ##
   732      tls: []
   733      #   - secretName: prometheus-server-tls
   734      #     hosts:
   735      #       - prometheus.domain.com
   736  
   737    ## Server Deployment Strategy type
   738    # strategy:
   739    #   type: Recreate
   740  
   741    ## hostAliases allows adding entries to /etc/hosts inside the containers
   742    hostAliases: []
   743    #   - ip: "127.0.0.1"
   744    #     hostnames:
   745    #       - "example.com"
   746  
   747    ## Node tolerations for server scheduling to nodes with taints
   748    ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
   749    ##
   750    tolerations: []
   751      # - key: "key"
   752      #   operator: "Equal|Exists"
   753      #   value: "value"
   754      #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
   755  
   756    ## Node labels for Prometheus server pod assignment
   757    ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
   758    ##
   759    nodeSelector: {}
   760  
   761    ## Pod affinity
   762    ##
   763    affinity: {}
   764  
   765    ## PodDisruptionBudget settings
   766    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
   767    ##
   768    podDisruptionBudget:
   769      enabled: false
   770      maxUnavailable: 1
   771  
   772    ## Use an alternate scheduler, e.g. "stork".
   773    ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
   774    ##
   775    # schedulerName:
   776  
   777    persistentVolume:
   778      ## If true, Prometheus server will create/use a Persistent Volume Claim
   779      ## If false, use emptyDir
   780      ##
   781      enabled: true
   782  
   783      ## Prometheus server data Persistent Volume access modes
   784      ## Must match those of existing PV or dynamic provisioner
   785      ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
   786      ##
   787      accessModes:
   788        - ReadWriteOnce
   789  
   790      ## Prometheus server data Persistent Volume annotations
   791      ##
   792      annotations: {}
   793  
   794      ## Prometheus server data Persistent Volume existing claim name
   795      ## Requires server.persistentVolume.enabled: true
   796      ## If defined, PVC must be created manually before volume will be bound
   797      existingClaim: ""
   798  
   799      ## Prometheus server data Persistent Volume mount root path
   800      ##
   801      mountPath: /data
   802  
   803      ## Prometheus server data Persistent Volume size
   804      ##
   805      size: 8Gi
   806  
   807      ## Prometheus server data Persistent Volume Storage Class
   808      ## If defined, storageClassName: <storageClass>
   809      ## If set to "-", storageClassName: "", which disables dynamic provisioning
   810      ## If undefined (the default) or set to null, no storageClassName spec is
   811      ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
   812      ##   GKE, AWS & OpenStack)
   813      ##
   814      # storageClass: "-"
   815  
   816      ## Prometheus server data Persistent Volume Binding Mode
   817      ## If defined, volumeBindingMode: <volumeBindingMode>
   818      ## If undefined (the default) or set to null, no volumeBindingMode spec is
   819      ##   set, choosing the default mode.
   820      ##
   821      # volumeBindingMode: ""
   822  
   823      ## Subdirectory of Prometheus server data Persistent Volume to mount
   824      ## Useful if the volume's root directory is not empty
   825      ##
   826      subPath: ""
   827  
   828    emptyDir:
   829      ## Prometheus server emptyDir volume size limit
   830      ##
   831      sizeLimit: ""
   832  
   833    ## Annotations to be added to Prometheus server pods
   834    ##
   835    podAnnotations: {}
   836      # iam.amazonaws.com/role: prometheus
   837  
   838    ## Labels to be added to Prometheus server pods
   839    ##
   840    podLabels: {}
   841  
   842    ## Prometheus AlertManager configuration
   843    ##
   844    alertmanagers: []
   845  
   846    ## Specify if a Pod Security Policy for node-exporter must be created
   847    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
   848    ##
   849    podSecurityPolicy:
   850      annotations: {}
   851        ## Specify pod annotations
   852        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
   853        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
   854        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
   855        ##
   856        # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
   857        # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
   858        # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
   859  
   860    ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below)
   861    ##
   862    replicaCount: 1
   863  
   864    ## Annotations to be added to deployment
   865    ##
   866    deploymentAnnotations: {}
   867  
   868    statefulSet:
   869      ## If true, use a statefulset instead of a deployment for pod management.
   870      ## This allows to scale replicas to more than 1 pod
   871      ##
   872      enabled: false
   873  
   874      annotations: {}
   875      labels: {}
   876      podManagementPolicy: OrderedReady
   877  
   878      ## Alertmanager headless service to use for the statefulset
   879      ##
   880      headless:
   881        annotations: {}
   882        labels: {}
   883        servicePort: 80
   884        ## Enable gRPC port on service to allow auto discovery with thanos-querier
   885        gRPC:
   886          enabled: false
   887          servicePort: 10901
   888          # nodePort: 10901
   889  
   890    ## Prometheus server readiness and liveness probe initial delay and timeout
   891    ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
   892    ##
   893    readinessProbeInitialDelay: 30
   894    readinessProbePeriodSeconds: 5
   895    readinessProbeTimeout: 30
   896    readinessProbeFailureThreshold: 3
   897    readinessProbeSuccessThreshold: 1
   898    livenessProbeInitialDelay: 30
   899    livenessProbePeriodSeconds: 15
   900    livenessProbeTimeout: 30
   901    livenessProbeFailureThreshold: 3
   902    livenessProbeSuccessThreshold: 1
   903  
   904    ## Prometheus server resource requests and limits
   905    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
   906    ##
   907    resources: {}
   908      # limits:
   909      #   cpu: 500m
   910      #   memory: 512Mi
   911      # requests:
   912      #   cpu: 500m
   913      #   memory: 512Mi
   914  
   915    ## Vertical Pod Autoscaler config
   916    ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
   917    verticalAutoscaler:
   918      ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs)
   919      enabled: false
   920      # updateMode: "Auto"
   921      # containerPolicies:
   922      # - containerName: 'prometheus-server'
   923  
   924    ## Security context to be added to server pods
   925    ##
   926    securityContext:
   927      runAsUser: 65534
   928      runAsNonRoot: true
   929      runAsGroup: 65534
   930      fsGroup: 65534
   931  
   932    service:
   933      annotations: {}
   934      labels: {}
   935      clusterIP: ""
   936  
   937      ## List of IP addresses at which the Prometheus server service is available
   938      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
   939      ##
   940      externalIPs: []
   941  
   942      loadBalancerIP: ""
   943      loadBalancerSourceRanges: []
   944      servicePort: 80
   945      sessionAffinity: None
   946      type: ClusterIP
   947  
   948      ## Enable gRPC port on service to allow auto discovery with thanos-querier
   949      gRPC:
   950        enabled: false
   951        servicePort: 10901
   952        # nodePort: 10901
   953  
   954      ## If using a statefulSet (statefulSet.enabled=true), configure the
   955      ## service to connect to a specific replica to have a consistent view
   956      ## of the data.
   957      statefulsetReplica:
   958        enabled: false
   959        replica: 0
   960  
   961    ## Prometheus server pod termination grace period
   962    ##
   963    terminationGracePeriodSeconds: 300
   964  
   965    ## Prometheus data retention period (default if not specified is 15 days)
   966    ##
   967    retention: "15d"
   968  
   969  pushgateway:
   970    ## If false, pushgateway will not be installed
   971    ##
   972    enabled: true
   973  
   974    ## Use an alternate scheduler, e.g. "stork".
   975    ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
   976    ##
   977    # schedulerName:
   978  
   979    ## pushgateway container name
   980    ##
   981    name: pushgateway
   982  
   983    ## pushgateway container image
   984    ##
   985    image:
   986      repository: prom/pushgateway
   987      tag: v1.2.0
   988      pullPolicy: IfNotPresent
   989  
   990    ## pushgateway priorityClassName
   991    ##
   992    priorityClassName: ""
   993  
   994    ## Additional pushgateway container arguments
   995    ##
   996    ## for example: persistence.file: /data/pushgateway.data
   997    extraArgs: {}
   998  
   999    ## Additional InitContainers to initialize the pod
  1000    ##
  1001    extraInitContainers: []
  1002  
  1003    ingress:
  1004      ## If true, pushgateway Ingress will be created
  1005      ##
  1006      enabled: false
  1007  
  1008      ## pushgateway Ingress annotations
  1009      ##
  1010      annotations: {}
  1011      #   kubernetes.io/ingress.class: nginx
  1012      #   kubernetes.io/tls-acme: 'true'
  1013  
  1014      ## pushgateway Ingress hostnames with optional path
  1015      ## Must be provided if Ingress is enabled
  1016      ##
  1017      hosts: []
  1018      #   - pushgateway.domain.com
  1019      #   - domain.com/pushgateway
  1020  
  1021      ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services.
  1022      extraPaths: []
  1023      # - path: /*
  1024      #   backend:
  1025      #     serviceName: ssl-redirect
  1026      #     servicePort: use-annotation
  1027  
  1028      ## pushgateway Ingress TLS configuration
  1029      ## Secrets must be manually created in the namespace
  1030      ##
  1031      tls: []
  1032      #   - secretName: prometheus-alerts-tls
  1033      #     hosts:
  1034      #       - pushgateway.domain.com
  1035  
  1036    ## Node tolerations for pushgateway scheduling to nodes with taints
  1037    ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
  1038    ##
  1039    tolerations: []
  1040      # - key: "key"
  1041      #   operator: "Equal|Exists"
  1042      #   value: "value"
  1043      #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
  1044  
  1045    ## Node labels for pushgateway pod assignment
  1046    ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
  1047    ##
  1048    nodeSelector: {}
  1049  
  1050    ## Annotations to be added to pushgateway pods
  1051    ##
  1052    podAnnotations: {}
  1053  
  1054    ## Labels to be added to pushgateway pods
  1055    ##
  1056    podLabels: {}
  1057  
  1058    ## Specify if a Pod Security Policy for node-exporter must be created
  1059    ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
  1060    ##
  1061    podSecurityPolicy:
  1062      annotations: {}
  1063        ## Specify pod annotations
  1064        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
  1065        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
  1066        ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
  1067        ##
  1068        # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
  1069        # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
  1070        # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
  1071  
  1072    replicaCount: 1
  1073  
  1074    ## Annotations to be added to deployment
  1075    ##
  1076    deploymentAnnotations: {}
  1077  
  1078    ## PodDisruptionBudget settings
  1079    ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
  1080    ##
  1081    podDisruptionBudget:
  1082      enabled: false
  1083      maxUnavailable: 1
  1084  
  1085    ## pushgateway resource requests and limits
  1086    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
  1087    ##
  1088    resources: {}
  1089      # limits:
  1090      #   cpu: 10m
  1091      #   memory: 32Mi
  1092      # requests:
  1093      #   cpu: 10m
  1094      #   memory: 32Mi
  1095  
  1096    ## Security context to be added to push-gateway pods
  1097    ##
  1098    securityContext:
  1099      runAsUser: 65534
  1100      runAsNonRoot: true
  1101  
  1102    service:
  1103      annotations:
  1104        prometheus.io/probe: pushgateway
  1105      labels: {}
  1106      clusterIP: ""
  1107  
  1108      ## List of IP addresses at which the pushgateway service is available
  1109      ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
  1110      ##
  1111      externalIPs: []
  1112  
  1113      loadBalancerIP: ""
  1114      loadBalancerSourceRanges: []
  1115      servicePort: 9091
  1116      type: ClusterIP
  1117  
  1118    ## pushgateway Deployment Strategy type
  1119    # strategy:
  1120    #   type: Recreate
  1121  
  1122    persistentVolume:
  1123      ## If true, pushgateway will create/use a Persistent Volume Claim
  1124      ##
  1125      enabled: false
  1126  
  1127      ## pushgateway data Persistent Volume access modes
  1128      ## Must match those of existing PV or dynamic provisioner
  1129      ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
  1130      ##
  1131      accessModes:
  1132        - ReadWriteOnce
  1133  
  1134      ## pushgateway data Persistent Volume Claim annotations
  1135      ##
  1136      annotations: {}
  1137  
  1138      ## pushgateway data Persistent Volume existing claim name
  1139      ## Requires pushgateway.persistentVolume.enabled: true
  1140      ## If defined, PVC must be created manually before volume will be bound
  1141      existingClaim: ""
  1142  
  1143      ## pushgateway data Persistent Volume mount root path
  1144      ##
  1145      mountPath: /data
  1146  
  1147      ## pushgateway data Persistent Volume size
  1148      ##
  1149      size: 2Gi
  1150  
  1151      ## pushgateway data Persistent Volume Storage Class
  1152      ## If defined, storageClassName: <storageClass>
  1153      ## If set to "-", storageClassName: "", which disables dynamic provisioning
  1154      ## If undefined (the default) or set to null, no storageClassName spec is
  1155      ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
  1156      ##   GKE, AWS & OpenStack)
  1157      ##
  1158      # storageClass: "-"
  1159  
  1160      ## pushgateway data Persistent Volume Binding Mode
  1161      ## If defined, volumeBindingMode: <volumeBindingMode>
  1162      ## If undefined (the default) or set to null, no volumeBindingMode spec is
  1163      ##   set, choosing the default mode.
  1164      ##
  1165      # volumeBindingMode: ""
  1166  
  1167      ## Subdirectory of pushgateway data Persistent Volume to mount
  1168      ## Useful if the volume's root directory is not empty
  1169      ##
  1170      subPath: ""
  1171  
  1172  
  1173  ## alertmanager ConfigMap entries
  1174  ##
  1175  alertmanagerFiles:
  1176    alertmanager.yml:
  1177      global: {}
  1178        # slack_api_url: ''
  1179  
  1180      receivers:
  1181        - name: default-receiver
  1182          # slack_configs:
  1183          #  - channel: '@you'
  1184          #    send_resolved: true
  1185  
  1186      route:
  1187        group_wait: 10s
  1188        group_interval: 5m
  1189        receiver: default-receiver
  1190        repeat_interval: 3h
  1191  
  1192  ## Prometheus server ConfigMap entries
  1193  ##
  1194  serverFiles:
  1195  
  1196    ## Alerts configuration
  1197    ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/
  1198    alerting_rules.yml: {}
  1199    # groups:
  1200    #   - name: Instances
  1201    #     rules:
  1202    #       - alert: InstanceDown
  1203    #         expr: up == 0
  1204    #         for: 5m
  1205    #         labels:
  1206    #           severity: page
  1207    #         annotations:
  1208    #           description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.'
  1209    #           summary: 'Instance {{ $labels.instance }} down'
  1210    ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml
  1211    alerts: {}
  1212  
  1213    ## Records configuration
  1214    ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
  1215    recording_rules.yml: {}
  1216    ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml
  1217    rules: {}
  1218  
  1219    prometheus.yml:
  1220      rule_files:
  1221        - /etc/config/recording_rules.yml
  1222        - /etc/config/alerting_rules.yml
  1223      ## Below two files are DEPRECATED will be removed from this default values file
  1224        - /etc/config/rules
  1225        - /etc/config/alerts
  1226  
  1227      scrape_configs:
  1228        - job_name: prometheus
  1229          static_configs:
  1230            - targets:
  1231              - localhost:9090
  1232  
  1233        # A scrape configuration for running Prometheus on a Kubernetes cluster.
  1234        # This uses separate scrape configs for cluster components (i.e. API server, node)
  1235        # and services to allow each to use different authentication configs.
  1236        #
  1237        # Kubernetes labels will be added as Prometheus labels on metrics via the
  1238        # `labelmap` relabeling action.
  1239  
  1240        # Scrape config for API servers.
  1241        #
  1242        # Kubernetes exposes API servers as endpoints to the default/kubernetes
  1243        # service so this uses `endpoints` role and uses relabelling to only keep
  1244        # the endpoints associated with the default/kubernetes service using the
  1245        # default named port `https`. This works for single API server deployments as
  1246        # well as HA API server deployments.
  1247        - job_name: 'kubernetes-apiservers'
  1248  
  1249          kubernetes_sd_configs:
  1250            - role: endpoints
  1251  
  1252          # Default to scraping over https. If required, just disable this or change to
  1253          # `http`.
  1254          scheme: https
  1255  
  1256          # This TLS & bearer token file config is used to connect to the actual scrape
  1257          # endpoints for cluster components. This is separate to discovery auth
  1258          # configuration because discovery & scraping are two separate concerns in
  1259          # Prometheus. The discovery auth config is automatic if Prometheus runs inside
  1260          # the cluster. Otherwise, more config options have to be provided within the
  1261          # <kubernetes_sd_config>.
  1262          tls_config:
  1263            ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
  1264            # If your node certificates are self-signed or use a different CA to the
  1265            # master CA, then disable certificate verification below. Note that
  1266            # certificate verification is an integral part of a secure infrastructure
  1267            # so this should only be disabled in a controlled environment. You can
  1268            # disable certificate verification by uncommenting the line below.
  1269            #
  1270            insecure_skip_verify: true
  1271          bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
  1272  
  1273          # Keep only the default/kubernetes service endpoints for the https port. This
  1274          # will add targets for each API server which Kubernetes adds an endpoint to
  1275          # the default/kubernetes service.
  1276          relabel_configs:
  1277            - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
  1278              action: keep
  1279              regex: default;kubernetes;https
  1280  
  1281        - job_name: 'kubernetes-nodes'
  1282  
  1283          # Default to scraping over https. If required, just disable this or change to
  1284          # `http`.
  1285          scheme: https
  1286  
  1287          # This TLS & bearer token file config is used to connect to the actual scrape
  1288          # endpoints for cluster components. This is separate to discovery auth
  1289          # configuration because discovery & scraping are two separate concerns in
  1290          # Prometheus. The discovery auth config is automatic if Prometheus runs inside
  1291          # the cluster. Otherwise, more config options have to be provided within the
  1292          # <kubernetes_sd_config>.
  1293          tls_config:
  1294            ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
  1295            # If your node certificates are self-signed or use a different CA to the
  1296            # master CA, then disable certificate verification below. Note that
  1297            # certificate verification is an integral part of a secure infrastructure
  1298            # so this should only be disabled in a controlled environment. You can
  1299            # disable certificate verification by uncommenting the line below.
  1300            #
  1301            insecure_skip_verify: true
  1302          bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
  1303  
  1304          kubernetes_sd_configs:
  1305            - role: node
  1306  
  1307          relabel_configs:
  1308            - action: labelmap
  1309              regex: __meta_kubernetes_node_label_(.+)
  1310            - target_label: __address__
  1311              replacement: kubernetes.default.svc:443
  1312            - source_labels: [__meta_kubernetes_node_name]
  1313              regex: (.+)
  1314              target_label: __metrics_path__
  1315              replacement: /api/v1/nodes/$1/proxy/metrics
  1316  
  1317  
  1318        - job_name: 'kubernetes-nodes-cadvisor'
  1319  
  1320          # Default to scraping over https. If required, just disable this or change to
  1321          # `http`.
  1322          scheme: https
  1323  
  1324          # This TLS & bearer token file config is used to connect to the actual scrape
  1325          # endpoints for cluster components. This is separate to discovery auth
  1326          # configuration because discovery & scraping are two separate concerns in
  1327          # Prometheus. The discovery auth config is automatic if Prometheus runs inside
  1328          # the cluster. Otherwise, more config options have to be provided within the
  1329          # <kubernetes_sd_config>.
  1330          tls_config:
  1331            ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
  1332            # If your node certificates are self-signed or use a different CA to the
  1333            # master CA, then disable certificate verification below. Note that
  1334            # certificate verification is an integral part of a secure infrastructure
  1335            # so this should only be disabled in a controlled environment. You can
  1336            # disable certificate verification by uncommenting the line below.
  1337            #
  1338            insecure_skip_verify: true
  1339          bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
  1340  
  1341          kubernetes_sd_configs:
  1342            - role: node
  1343  
  1344          # This configuration will work only on kubelet 1.7.3+
  1345          # As the scrape endpoints for cAdvisor have changed
  1346          # if you are using older version you need to change the replacement to
  1347          # replacement: /api/v1/nodes/$1:4194/proxy/metrics
  1348          # more info here https://github.com/coreos/prometheus-operator/issues/633
  1349          relabel_configs:
  1350            - action: labelmap
  1351              regex: __meta_kubernetes_node_label_(.+)
  1352            - target_label: __address__
  1353              replacement: kubernetes.default.svc:443
  1354            - source_labels: [__meta_kubernetes_node_name]
  1355              regex: (.+)
  1356              target_label: __metrics_path__
  1357              replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
  1358  
  1359        # Scrape config for service endpoints.
  1360        #
  1361        # The relabeling allows the actual service scrape endpoint to be configured
  1362        # via the following annotations:
  1363        #
  1364        # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
  1365        # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
  1366        # to set this to `https` & most likely set the `tls_config` of the scrape config.
  1367        # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
  1368        # * `prometheus.io/port`: If the metrics are exposed on a different port to the
  1369        # service then set this appropriately.
  1370        - job_name: 'kubernetes-service-endpoints'
  1371  
  1372          kubernetes_sd_configs:
  1373            - role: endpoints
  1374  
  1375          relabel_configs:
  1376            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
  1377              action: keep
  1378              regex: true
  1379            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
  1380              action: replace
  1381              target_label: __scheme__
  1382              regex: (https?)
  1383            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
  1384              action: replace
  1385              target_label: __metrics_path__
  1386              regex: (.+)
  1387            - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
  1388              action: replace
  1389              target_label: __address__
  1390              regex: ([^:]+)(?::\d+)?;(\d+)
  1391              replacement: $1:$2
  1392            - action: labelmap
  1393              regex: __meta_kubernetes_service_label_(.+)
  1394            - source_labels: [__meta_kubernetes_namespace]
  1395              action: replace
  1396              target_label: kubernetes_namespace
  1397            - source_labels: [__meta_kubernetes_service_name]
  1398              action: replace
  1399              target_label: kubernetes_name
  1400            - source_labels: [__meta_kubernetes_pod_node_name]
  1401              action: replace
  1402              target_label: kubernetes_node
  1403  
  1404        # Scrape config for slow service endpoints; same as above, but with a larger
  1405        # timeout and a larger interval
  1406        #
  1407        # The relabeling allows the actual service scrape endpoint to be configured
  1408        # via the following annotations:
  1409        #
  1410        # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true`
  1411        # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
  1412        # to set this to `https` & most likely set the `tls_config` of the scrape config.
  1413        # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
  1414        # * `prometheus.io/port`: If the metrics are exposed on a different port to the
  1415        # service then set this appropriately.
  1416        - job_name: 'kubernetes-service-endpoints-slow'
  1417  
  1418          scrape_interval: 5m
  1419          scrape_timeout: 30s
  1420  
  1421          kubernetes_sd_configs:
  1422            - role: endpoints
  1423  
  1424          relabel_configs:
  1425            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
  1426              action: keep
  1427              regex: true
  1428            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
  1429              action: replace
  1430              target_label: __scheme__
  1431              regex: (https?)
  1432            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
  1433              action: replace
  1434              target_label: __metrics_path__
  1435              regex: (.+)
  1436            - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
  1437              action: replace
  1438              target_label: __address__
  1439              regex: ([^:]+)(?::\d+)?;(\d+)
  1440              replacement: $1:$2
  1441            - action: labelmap
  1442              regex: __meta_kubernetes_service_label_(.+)
  1443            - source_labels: [__meta_kubernetes_namespace]
  1444              action: replace
  1445              target_label: kubernetes_namespace
  1446            - source_labels: [__meta_kubernetes_service_name]
  1447              action: replace
  1448              target_label: kubernetes_name
  1449            - source_labels: [__meta_kubernetes_pod_node_name]
  1450              action: replace
  1451              target_label: kubernetes_node
  1452  
  1453        - job_name: 'prometheus-pushgateway'
  1454          honor_labels: true
  1455  
  1456          kubernetes_sd_configs:
  1457            - role: service
  1458  
  1459          relabel_configs:
  1460            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
  1461              action: keep
  1462              regex: pushgateway
  1463  
  1464        # Example scrape config for probing services via the Blackbox Exporter.
  1465        #
  1466        # The relabeling allows the actual service scrape endpoint to be configured
  1467        # via the following annotations:
  1468        #
  1469        # * `prometheus.io/probe`: Only probe services that have a value of `true`
  1470        - job_name: 'kubernetes-services'
  1471  
  1472          metrics_path: /probe
  1473          params:
  1474            module: [http_2xx]
  1475  
  1476          kubernetes_sd_configs:
  1477            - role: service
  1478  
  1479          relabel_configs:
  1480            - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
  1481              action: keep
  1482              regex: true
  1483            - source_labels: [__address__]
  1484              target_label: __param_target
  1485            - target_label: __address__
  1486              replacement: blackbox
  1487            - source_labels: [__param_target]
  1488              target_label: instance
  1489            - action: labelmap
  1490              regex: __meta_kubernetes_service_label_(.+)
  1491            - source_labels: [__meta_kubernetes_namespace]
  1492              target_label: kubernetes_namespace
  1493            - source_labels: [__meta_kubernetes_service_name]
  1494              target_label: kubernetes_name
  1495  
  1496        # Example scrape config for pods
  1497        #
  1498        # The relabeling allows the actual pod scrape endpoint to be configured via the
  1499        # following annotations:
  1500        #
  1501        # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
  1502        # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
  1503        # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
  1504        - job_name: 'kubernetes-pods'
  1505  
  1506          kubernetes_sd_configs:
  1507            - role: pod
  1508  
  1509          relabel_configs:
  1510            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
  1511              action: keep
  1512              regex: true
  1513            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
  1514              action: replace
  1515              target_label: __metrics_path__
  1516              regex: (.+)
  1517            - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
  1518              action: replace
  1519              regex: ([^:]+)(?::\d+)?;(\d+)
  1520              replacement: $1:$2
  1521              target_label: __address__
  1522            - action: labelmap
  1523              regex: __meta_kubernetes_pod_label_(.+)
  1524            - source_labels: [__meta_kubernetes_namespace]
  1525              action: replace
  1526              target_label: kubernetes_namespace
  1527            - source_labels: [__meta_kubernetes_pod_name]
  1528              action: replace
  1529              target_label: kubernetes_pod_name
  1530            - source_labels: [__meta_kubernetes_pod_phase]
  1531              regex: Pending|Succeeded|Failed
  1532              action: drop
  1533  
  1534        # Example Scrape config for pods which should be scraped slower. An useful example
  1535        # would be stackriver-exporter which querys an API on every scrape of the pod
  1536        #
  1537        # The relabeling allows the actual pod scrape endpoint to be configured via the
  1538        # following annotations:
  1539        #
  1540        # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true`
  1541        # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
  1542        # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
  1543        - job_name: 'kubernetes-pods-slow'
  1544  
  1545          scrape_interval: 5m
  1546          scrape_timeout: 30s
  1547  
  1548          kubernetes_sd_configs:
  1549            - role: pod
  1550  
  1551          relabel_configs:
  1552            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
  1553              action: keep
  1554              regex: true
  1555            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
  1556              action: replace
  1557              target_label: __metrics_path__
  1558              regex: (.+)
  1559            - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
  1560              action: replace
  1561              regex: ([^:]+)(?::\d+)?;(\d+)
  1562              replacement: $1:$2
  1563              target_label: __address__
  1564            - action: labelmap
  1565              regex: __meta_kubernetes_pod_label_(.+)
  1566            - source_labels: [__meta_kubernetes_namespace]
  1567              action: replace
  1568              target_label: kubernetes_namespace
  1569            - source_labels: [__meta_kubernetes_pod_name]
  1570              action: replace
  1571              target_label: kubernetes_pod_name
  1572            - source_labels: [__meta_kubernetes_pod_phase]
  1573              regex: Pending|Succeeded|Failed
  1574              action: drop
  1575  
  1576  # adds additional scrape configs to prometheus.yml
  1577  # must be a string so you have to add a | after extraScrapeConfigs:
  1578  # example adds prometheus-blackbox-exporter scrape config
  1579  extraScrapeConfigs:
  1580    # - job_name: 'prometheus-blackbox-exporter'
  1581    #   metrics_path: /probe
  1582    #   params:
  1583    #     module: [http_2xx]
  1584    #   static_configs:
  1585    #     - targets:
  1586    #       - https://example.com
  1587    #   relabel_configs:
  1588    #     - source_labels: [__address__]
  1589    #       target_label: __param_target
  1590    #     - source_labels: [__param_target]
  1591    #       target_label: instance
  1592    #     - target_label: __address__
  1593    #       replacement: prometheus-blackbox-exporter:9115
  1594  
  1595  # Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager
  1596  # useful in H/A prometheus with different external labels but the same alerts
  1597  alertRelabelConfigs:
  1598    # alert_relabel_configs:
  1599    # - source_labels: [dc]
  1600    #   regex: (.+)\d+
  1601    #   target_label: dc
  1602  
  1603  networkPolicy:
  1604    ## Enable creation of NetworkPolicy resources.
  1605    ##
  1606    enabled: false
  1607  
  1608  # Force namespace of namespaced resources
  1609  forceNamespace: null