github.com/grafana/pyroscope@v1.18.0/operations/monitoring/helm/pyroscope-monitoring/values.yaml (about)

     1  # Default values for pyroscope-monitoring.
     2  # This is a YAML-formatted file.
     3  # Declare variables to be passed into your templates.
     4  
     5  replicaCount: 1
     6  
     7  image:
     8    repository: grafana/otel-lgtm
     9    tag: "0.11.10"
    10    pullPolicy: IfNotPresent
    11  
    12  imagePullSecrets: []
    13  nameOverride: ""
    14  fullnameOverride: ""
    15  
    16  podAnnotations: {}
    17  podLabels: {}
    18  
    19  podSecurityContext: {}
    20    # fsGroup: 2000
    21  
    22  securityContext: {}
    23    # capabilities:
    24    #   drop:
    25    #   - ALL
    26    # readOnlyRootFilesystem: true
    27    # runAsNonRoot: true
    28    # runAsUser: 1000
    29  
    30  
    31  # Set environment variables
    32  # -- @ignored
    33  env:
    34    - name: GF_PLUGINS_PREINSTALL
    35      value: grafana-exploretraces-app
    36    - name: GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH
    37      value: /otel-lgtm/grafana/conf/provisioning/dashboards/operational.json
    38  
    39  service:
    40    type: ClusterIP
    41    # -- @ignored
    42    ports:
    43      - name: grafana
    44        protocol: TCP
    45        port: 3000
    46        targetPort: 3000
    47      - name: otel-grpc
    48        protocol: TCP
    49        port: 4317
    50        targetPort: 4317
    51      - name: otel-http
    52        protocol: TCP
    53        port: 4318
    54        targetPort: 4318
    55      - name: prometheus
    56        protocol: TCP
    57        port: 9090
    58        targetPort: 9090
    59      - name: loki
    60        protocol: TCP
    61        port: 3100
    62        targetPort: 3100
    63    # deploys a service with static name "pyroscope-monitoring" in order to allow the subchart to ingest to it.
    64    deployStaticName: true
    65  
    66  resources: {}
    67    # We usually recommend not to specify default resources and to leave this as a conscious
    68    # choice for the user. This also increases chances charts run on environments with little
    69    # resources, such as Minikube. If you do want to specify resources, uncomment the following
    70    # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
    71    # limits:
    72    #   cpu: 100m
    73    #   memory: 128Mi
    74    # requests:
    75    #   cpu: 100m
    76    #   memory: 128Mi
    77  
    78  # Additional volumes on the output Deployment definition.
    79  # -- @ignored
    80  volumes:
    81    - emptyDir: {}
    82      name: tempo-data
    83    - emptyDir: {}
    84      name: loki-data
    85    - emptyDir: {}
    86      name: grafana-data
    87    - emptyDir: {}
    88      name: loki-storage
    89    - emptyDir: {}
    90      name: p8s-storage
    91    - emptyDir: {}
    92      name: pyroscope-storage
    93  
    94  # Additional volumeMounts on the output Deployment definition.
    95  # -- @ignored
    96  volumeMounts:
    97    - mountPath: /data/tempo
    98      name: tempo-data
    99    - mountPath: /data/grafana
   100      name: grafana-data
   101    - mountPath: /data/loki
   102      name: loki-data
   103    - mountPath: /loki
   104      name: loki-storage
   105    - mountPath: /data/prometheus
   106      name: p8s-storage
   107    - mountPath: /data/pyroscope
   108      name: pyroscope-storage
   109    - mountPath: /otel-lgtm/grafana/conf/provisioning/dashboards
   110      name: dashboards
   111    - mountPath: /otel-lgtm/prometheus.yaml
   112      name: rules
   113      subPath: prometheus.yaml
   114    - mountPath: /prometheus-rules
   115      name: rules
   116  
   117  nodeSelector: {}
   118  
   119  tolerations: []
   120  
   121  affinity: {}
   122  
   123  # Customize dashboard generation
   124  dashboards:
   125    # Is the cloud-backend-gateway available (previously cortex-gw)
   126    cloudBackendGateway: false
   127  
   128    # cloud backend gateway selector
   129    cloudBackendGatewaySelector: container=~"cortex-gw(-internal)?"
   130  
   131    kubeStateMetricsSelector: job=~"(.*/)?kube-state-metrics"
   132    cadvisorSelector: job=~"(.*/)?cadvisor"
   133  
   134    # Default namespace
   135    namespace: default
   136  
   137    # Filter available namespaces by regex
   138    namespaceRegex: .*
   139  
   140    # Default cluster
   141    cluster: pyroscope-dev
   142  
   143    # ingest label selector
   144    ingestSelector: container=~"pyroscope|distributor|query-frontend"
   145  
   146    tenantQuery: |
   147      sum by (tenant, slug, org_name, environment) (
   148        rate(pyroscope_distributor_received_decompressed_bytes_sum{cluster=~"$cluster",namespace=~"$namespace"}[$__rate_interval])
   149      )
   150  
   151    # -- @ignored
   152    links:
   153      global:
   154        - asDropdown: true
   155          icon: external link
   156          includeVars: true
   157          keepTime: true
   158          tags:
   159            - pyroscope
   160          targetBlank: false
   161          title: Pyroscope Dashboards
   162          type: dashboards
   163      perDashboard:
   164        my-dashboard: []
   165  
   166  # -- @ignored
   167  monitoring:
   168    enabled: true
   169  
   170    global:
   171      # This enables scraping of native histograms
   172      scrapeProtocols: ["PrometheusProto", "OpenMetricsText1.0.0", "OpenMetricsText0.0.1", "PrometheusText0.0.4"]
   173      scrapeClassicHistograms: true
   174      # TODO: Enable after https://github.com/grafana/k8s-monitoring-helm/pull/2071 merges
   175      # scrapeNativeHistograms: true
   176    cluster:
   177      name: pyroscope-dev
   178  
   179    destinations:
   180      - name: otlp-gateway
   181        type: otlp
   182        url: "http://pyroscope-monitoring:4318"
   183        protocol: http
   184        traces: {enabled: true}
   185      # NOTE(simonswine): Unable to keep container/namespace/job/cluster as indexed label
   186      - name: loki
   187        type: loki
   188        url: "http://pyroscope-monitoring:3100/loki/api/v1/push"
   189        logs: {enabled: true}
   190      # NOTE(simonswine): Was not able to get native histograms to work with otlp-gateway
   191      - name: prometheus
   192        type: prometheus
   193        url: "http://pyroscope-monitoring:9090/api/v1/write"
   194        metrics: {enabled: true}
   195        sendNativeHistograms: true
   196  
   197    clusterMetrics:
   198      enabled: true
   199      opencost:
   200        enabled: false
   201      kepler:
   202        enabled: false
   203  
   204    clusterEvents:
   205      enabled: true
   206  
   207    podLogs:
   208      enabled: true
   209  
   210    annotationAutodiscovery:
   211      enabled: true
   212  
   213    applicationObservability:
   214      enabled: true
   215      receivers:
   216        jaeger:
   217          thriftHttp: {enabled: true}
   218          thriftBinary: {enabled: true}
   219          thriftCompact: {enabled: true}
   220  
   221    alloy-metrics:
   222      enabled: true
   223      image:
   224        # NOTE(simonswine): Was not able to get native histograms to work with v1.11.0, as they are now default disabled and there is no flag
   225        # https://github.com/grafana/k8s-monitoring-helm/pull/2049#pullrequestreview-3340565881
   226        tag: v1.10.2
   227  
   228    alloy-logs:
   229      enabled: true
   230  
   231    alloy-singleton:
   232      enabled: true
   233  
   234    alloy-receiver:
   235      enabled: true
   236      alloy:
   237        # Should no longer be necessary after https://github.com/grafana/k8s-monitoring-helm/pull/2071 merges
   238        extraPorts:
   239          - name: "thrift-compact"
   240            port: 6831
   241            targetPort: 6831
   242            protocol: UDP
   243          - name: jaeger-binary
   244            port: 6832
   245            targetPort: 6832
   246            protocol: UDP
   247          - name: jaeger-http
   248            port: 14268
   249            targetPort: 14268
   250            protocol: TCP