github.com/m3db/m3@v1.5.0/scripts/development/m3_stack/prometheus.yml (about) 1 global: 2 external_labels: 3 role: "remote" 4 scrape_interval: 10s 5 evaluation_interval: 10s 6 7 # Alertmanager configuration 8 alerting: 9 alertmanagers: 10 - static_configs: 11 - targets: 12 # - alertmanager:9093 13 14 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. 15 rule_files: 16 # - "first_rules.yml" 17 # - "second_rules.yml" 18 19 # A scrape configuration containing exactly one endpoint to scrape: 20 # Here it's Prometheus itself. 21 scrape_configs: 22 # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. 23 - job_name: 'prometheus01' 24 25 # metrics_path defaults to '/metrics' 26 # scheme defaults to 'http'. 27 28 static_configs: 29 - targets: ['prometheus01:9090'] 30 31 - job_name: 'coordinator' 32 static_configs: 33 - targets: 34 - m3coordinator01:7210 35 - m3coordinator01:7211 36 - m3coordinator01:7212 37 - m3coordinator01:7213 38 39 - job_name: 'dbnode' 40 static_configs: 41 - targets: ['m3db_seed:9004', 'm3db_data01:9004', 'm3db_data02:9004'] 42 43 - job_name: 'aggregator' 44 static_configs: 45 - targets: ['m3aggregator01:6002', 'm3aggregator01:6002'] 46 47 remote_read: 48 - url: http://m3coordinator01:7201/api/v1/prom/remote/read 49 read_recent: true 50 51 remote_write: 52 - url: http://m3coordinator01:7201/api/v1/prom/remote/write 53 remote_timeout: 30s 54 queue_config: 55 capacity: 10000 56 max_shards: 10 57 min_shards: 3 58 max_samples_per_send: 5000 59 batch_send_deadline: 1m 60 min_backoff: 50ms 61 max_backoff: 1s