github.com/verrazzano/verrazzano@v1.7.0/platform-operator/thirdparty/charts/fluent-operator/values.yaml (about) 1 # Default values for fluentbit-operator. 2 # This is a YAML-formatted file. 3 # Declare variables to be passed into your templates. 4 5 # Set this to containerd or crio if you want to collect CRI format logs 6 containerRuntime: docker 7 # If you want to deploy a default Fluent Bit pipeline (including Fluent Bit Input, Filter, and output) to collect Kubernetes logs, you'll need to set the Kubernetes parameter to true 8 # see https://github.com/fluent/fluent-operator/tree/master/manifests/logging-stack 9 Kubernetes: true 10 11 operator: 12 # The init container is to get the actual storage path of the docker log files so that it can be mounted to collect the logs. 13 # see https://github.com/fluent/fluent-operator/blob/master/manifests/setup/fluent-operator-deployment.yaml#L26 14 initcontainer: 15 repository: "docker" 16 tag: "20.10" 17 container: 18 repository: "kubesphere/fluent-operator" 19 tag: "v2.2.0" 20 # Pod security context for Fluent Operator. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ 21 podSecurityContext: {} 22 # Container security context for Fluent Operator container. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ 23 securityContext: {} 24 # Fluent Operator resources. Usually user needn't to adjust these. 25 resources: 26 limits: 27 cpu: 100m 28 memory: 60Mi 29 requests: 30 cpu: 100m 31 memory: 20Mi 32 # Specify custom annotations to be added to each Fluent Operator pod. 33 annotations: {} 34 ## Reference to one or more secrets to be used when pulling images 35 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 36 imagePullSecrets: [] 37 # - name: "image-pull-secret" 38 # Reference one more key-value pairs of labels that should be attached to fluent-operator 39 labels: {} 40 # myExampleLabel: someValue 41 logPath: 42 # The operator currently assumes a Docker container runtime path for the logs as the default, for other container runtimes you can set the location explicitly below. 43 # crio: /var/log 44 containerd: /var/log 45 # By default, the operator provisions both Fluent Bit and FluentD controllers. 46 # A specific controller can be disabled by setting the disableComponentControllers value. 47 # The disableComponentControllers value can be either "fluent-bit" or "fluentd". 48 # This helm chart renders the controllers CRDs in sub charts. 49 # If needed a sub chart, hence corresponding set of CRDs can be disabled by 50 # setting fluentbit.crdsEnable or fluentd.crdsEnable values to false. 51 # By default all CRDs are deployed. 52 disableComponentControllers: "" 53 54 fluentbit: 55 # Installs a sub chart carrying the CRDs for the fluent-bit controller. The sub chart is enabled by default. 56 crdsEnable: true 57 enable: true 58 image: 59 repository: "kubesphere/fluent-bit" 60 tag: "v2.0.11" 61 # fluentbit resources. If you do want to specify resources, adjust them as necessary 62 # You can adjust it based on the log volume. 63 resources: 64 limits: 65 cpu: 500m 66 memory: 200Mi 67 requests: 68 cpu: 10m 69 memory: 25Mi 70 # Specify custom annotations to be added to each FluentBit pod. 71 annotations: 72 {} 73 ## Request to Fluent Bit to exclude or not the logs generated by the Pod. 74 # fluentbit.io/exclude: "true" 75 ## Prometheus can use this tag to automatically discover the Pod and collect monitoring data 76 # prometheus.io/scrape: "true" 77 # Specify additional custom labels for fluentbit-pods 78 labels: {} 79 80 ## Reference to one or more secrets to be used when pulling images 81 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 82 ## 83 imagePullSecrets: [] 84 # - name: "image-pull-secret" 85 secrets: [] 86 # Pod security context for Fluent Bit pods. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ 87 podSecurityContext: {} 88 # Security context for Fluent Bit container. Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ 89 securityContext: {} 90 # List of volumes that can be mounted by containers belonging to the pod. 91 additionalVolumes: [] 92 # Pod volumes to mount into the container's filesystem. 93 additionalVolumesMounts: [] 94 # Environment variables that can be passed to fluentbit pods 95 envVars: [] 96 # - name: FOO 97 # value: "bar" 98 99 # Remove the above empty volumes and volumesMounts, and then set additionalVolumes and additionalVolumesMounts as below if you want to collect node exporter metrics 100 # additionalVolumes: 101 # - name: hostProc 102 # hostPath: 103 # path: /proc/ 104 # - name: hostSys 105 # hostPath: 106 # path: /sys/ 107 # Uncomment the code if you intend to create the volume for buffer storage in case the storage type "filesystem" is being used in the configuration of the fluentbit service. 108 # - name: hostBuffer 109 # hostPath: 110 # path: /tmp/fluent-bit-buffer 111 # additionalVolumesMounts: 112 # - mountPath: /host/sys 113 # mountPropagation: HostToContainer 114 # name: hostSys 115 # readOnly: true 116 # - mountPath: /host/proc 117 # mountPropagation: HostToContainer 118 # name: hostProc 119 # readOnly: true 120 # Uncomment the code if you intend to mount the volume for buffer storage in case the storage type "filesystem" is being used in the configuration of the fluentbit service. 121 # - mountPath: /host/fluent-bit-buffer 122 # mountPropagation: HostToContainer 123 # name: hostBuffer 124 125 126 namespaceFluentBitCfgSelector: {} 127 128 # Set a limit of memory that Tail plugin can use when appending data to the Engine. 129 # You can find more details here: https://docs.fluentbit.io/manual/pipeline/inputs/tail#config 130 # If the limit is reach, it will be paused; when the data is flushed it resumes. 131 # if the inbound traffic is less than 2.4Mbps, setting memBufLimit to 5MB is enough 132 # if the inbound traffic is less than 4.0Mbps, setting memBufLimit to 10MB is enough 133 # if the inbound traffic is less than 13.64Mbps, setting memBufLimit to 50MB is enough 134 input: 135 tail: 136 enable: true 137 refreshIntervalSeconds: 10 138 memBufLimit: 5MB 139 path: "/var/log/containers/*.log" 140 skipLongLines: true 141 readFromHead: false 142 # Use storageType as "filesystem" if you want to use filesystem as the buffering mechanism for tail input. 143 storageType: memory 144 pauseOnChunksOverlimit: "off" 145 systemd: 146 enable: true 147 systemdFilter: 148 enable: true 149 filters: [] 150 path: "/var/log/journal" 151 includeKubelet: true 152 stripUnderscores: "off" 153 # Use storageType as "filesystem" if you want to use filesystem as the buffering mechanism for systemd input. 154 storageType: memory 155 pauseOnChunksOverlimit: "off" 156 nodeExporterMetrics: {} 157 # uncomment below nodeExporterMetrics section if you want to collect node exporter metrics 158 # nodeExporterMetrics: 159 # tag: node_metrics 160 # scrapeInterval: 15s 161 # path: 162 # procfs: /host/proc 163 # sysfs: /host/sys 164 fluentBitMetrics: {} 165 # uncomment below fluentBitMetrics section if you want to collect fluentBit metrics 166 # fluentBitMetrics: 167 # scrapeInterval: "2" 168 # scrapeOnStart: true 169 # tag: "fb.metrics" 170 171 # Configure the output plugin parameter in FluentBit. 172 # You can set enable to true to output logs to the specified location. 173 output: 174 # You can find more supported output plugins here: https://github.com/fluent/fluent-operator/tree/master/docs/plugins/fluentbit/clusteroutput 175 es: 176 enable: false 177 host: "<Elasticsearch url like elasticsearch-logging-data.kubesphere-logging-system.svc>" 178 port: 9200 179 logstashPrefix: ks-logstash-log 180 # path: "" 181 # bufferSize: "4KB" 182 # index: "fluent-bit" 183 # httpUser: 184 # httpPassword: 185 # logstashFormat: true 186 # replaceDots: false 187 # enableTLS: false 188 # tls: 189 # verify: On 190 # debug: 1 191 # caFile: "<Absolute path to CA certificate file>" 192 # caPath: "<Absolute path to scan for certificate files>" 193 # crtFile: "<Absolute path to private Key file>" 194 # keyFile: "<Absolute path to private Key file>" 195 # keyPassword: 196 # vhost: "<Hostname to be used for TLS SNI extension>" 197 kafka: 198 enable: false 199 brokers: "<kafka broker list like xxx.xxx.xxx.xxx:9092,yyy.yyy.yyy.yyy:9092>" 200 topics: ks-log 201 opentelemetry: {} 202 # You can configure the opentelemetry-related configuration here 203 opensearch: {} 204 # You can configure the opensearch-related configuration here 205 stdout: 206 enable: false 207 # Uncomment the following section to enable Prometheus metrics exporter. 208 prometheusMetricsExporter: {} 209 # prometheusMetricsExporter: 210 # match: "fb.metrics" 211 # metricsExporter: 212 # host: "0.0.0.0" 213 # port: 2020 214 # addLabels: 215 # app: "fluentbit" 216 service: 217 storage: {} 218 # Remove the above storage section and uncomment below section if you want to configure file-system as storage for buffer 219 # storage: 220 # path: "/host/fluent-bit-buffer/" 221 # backlogMemLimit: "50MB" 222 # checksum: "off" 223 # deleteIrrecoverableChunks: "on" 224 # maxChunksUp: 128 225 # metrics: "on" 226 # sync: normal 227 228 # Configure the default filters in FluentBit. 229 # The `filter` will filter and parse the collected log information and output the logs into a uniform format. You can choose whether to turn this on or not. 230 filter: 231 kubernetes: 232 enable: true 233 labels: false 234 annotations: false 235 containerd: 236 # This is customized lua containerd log format converter, you can refer here: 237 # https://github.com/fluent/fluent-operator/blob/master/charts/fluent-operator/templates/fluentbit-clusterfilter-containerd.yaml 238 # https://github.com/fluent/fluent-operator/blob/master/charts/fluent-operator/templates/fluentbit-containerd-config.yaml 239 enable: true 240 systemd: 241 enable: true 242 243 kubeedge: 244 enable: false 245 prometheusRemoteWrite: 246 # Change the host to the address of a cloud-side Prometheus-compatible server that can receive Prometheus remote write data 247 host: "<cloud-prometheus-service-host>" 248 # Change the port to the port of a cloud-side Prometheus-compatible server that can receive Prometheus remote write data 249 port: "<cloud-prometheus-service-port>" 250 251 fluentd: 252 # Installs a sub chart carrying the CRDs for the fluentd controller. The sub chart is enabled by default. 253 crdsEnable: true 254 enable: false 255 name: fluentd 256 port: 24224 257 image: 258 repository: "kubesphere/fluentd" 259 tag: "v1.15.3" 260 replicas: 1 261 forward: 262 port: 24224 263 watchedNamespaces: 264 - kube-system 265 - default 266 resources: 267 limits: 268 cpu: 500m 269 memory: 500Mi 270 requests: 271 cpu: 100m 272 memory: 128Mi 273 # Configure the output plugin parameter in Fluentd. 274 # Fluentd is disabled by default, if you enable it make sure to also set up an output to use. 275 output: 276 es: 277 enable: false 278 host: elasticsearch-logging-data.kubesphere-logging-system.svc 279 port: 9200 280 logstashPrefix: ks-logstash-log 281 buffer: 282 enable: false 283 type: file 284 path: /buffers/es 285 kafka: 286 enable: false 287 brokers: "my-cluster-kafka-bootstrap.default.svc:9091,my-cluster-kafka-bootstrap.default.svc:9092,my-cluster-kafka-bootstrap.default.svc:9093" 288 topicKey: kubernetes_ns 289 buffer: 290 enable: false 291 type: file 292 path: /buffers/kafka 293 opensearch: {} 294 295 nameOverride: "" 296 fullnameOverride: "" 297 namespaceOverride: ""