github.com/netdata/go.d.plugin@v0.58.1/modules/cassandra/collect.go (about)

     1  // SPDX-License-Identifier: GPL-3.0-or-later
     2  
     3  package cassandra
     4  
     5  import (
     6  	"errors"
     7  	"github.com/netdata/go.d.plugin/pkg/prometheus"
     8  	"strings"
     9  )
    10  
    11  const (
    12  	suffixCount = "_count"
    13  	suffixValue = "_value"
    14  )
    15  
    16  func (c *Cassandra) collect() (map[string]int64, error) {
    17  	pms, err := c.prom.ScrapeSeries()
    18  	if err != nil {
    19  		return nil, err
    20  	}
    21  
    22  	if c.validateMetrics {
    23  		if !isCassandraMetrics(pms) {
    24  			return nil, errors.New("collected metrics aren't Cassandra metrics")
    25  		}
    26  		c.validateMetrics = false
    27  	}
    28  
    29  	mx := make(map[string]int64)
    30  
    31  	c.resetMetrics()
    32  	c.collectMetrics(pms)
    33  	c.processMetric(mx)
    34  
    35  	return mx, nil
    36  }
    37  
    38  func (c *Cassandra) resetMetrics() {
    39  	cm := newCassandraMetrics()
    40  	for key, p := range c.mx.threadPools {
    41  		cm.threadPools[key] = &threadPoolMetrics{
    42  			name:      p.name,
    43  			hasCharts: p.hasCharts,
    44  		}
    45  	}
    46  	c.mx = cm
    47  }
    48  
    49  func (c *Cassandra) processMetric(mx map[string]int64) {
    50  	c.mx.clientReqTotalLatencyReads.write(mx, "client_request_total_latency_reads")
    51  	c.mx.clientReqTotalLatencyWrites.write(mx, "client_request_total_latency_writes")
    52  	c.mx.clientReqLatencyReads.write(mx, "client_request_latency_reads")
    53  	c.mx.clientReqLatencyWrites.write(mx, "client_request_latency_writes")
    54  	c.mx.clientReqTimeoutsReads.write(mx, "client_request_timeouts_reads")
    55  	c.mx.clientReqTimeoutsWrites.write(mx, "client_request_timeouts_writes")
    56  	c.mx.clientReqUnavailablesReads.write(mx, "client_request_unavailables_reads")
    57  	c.mx.clientReqUnavailablesWrites.write(mx, "client_request_unavailables_writes")
    58  	c.mx.clientReqFailuresReads.write(mx, "client_request_failures_reads")
    59  	c.mx.clientReqFailuresWrites.write(mx, "client_request_failures_writes")
    60  
    61  	c.mx.clientReqReadLatencyP50.write(mx, "client_request_read_latency_p50")
    62  	c.mx.clientReqReadLatencyP75.write(mx, "client_request_read_latency_p75")
    63  	c.mx.clientReqReadLatencyP95.write(mx, "client_request_read_latency_p95")
    64  	c.mx.clientReqReadLatencyP98.write(mx, "client_request_read_latency_p98")
    65  	c.mx.clientReqReadLatencyP99.write(mx, "client_request_read_latency_p99")
    66  	c.mx.clientReqReadLatencyP999.write(mx, "client_request_read_latency_p999")
    67  	c.mx.clientReqWriteLatencyP50.write(mx, "client_request_write_latency_p50")
    68  	c.mx.clientReqWriteLatencyP75.write(mx, "client_request_write_latency_p75")
    69  	c.mx.clientReqWriteLatencyP95.write(mx, "client_request_write_latency_p95")
    70  	c.mx.clientReqWriteLatencyP98.write(mx, "client_request_write_latency_p98")
    71  	c.mx.clientReqWriteLatencyP99.write(mx, "client_request_write_latency_p99")
    72  	c.mx.clientReqWriteLatencyP999.write(mx, "client_request_write_latency_p999")
    73  
    74  	c.mx.rowCacheHits.write(mx, "row_cache_hits")
    75  	c.mx.rowCacheMisses.write(mx, "row_cache_misses")
    76  	c.mx.rowCacheSize.write(mx, "row_cache_size")
    77  	if c.mx.rowCacheHits.isSet && c.mx.rowCacheMisses.isSet {
    78  		if s := c.mx.rowCacheHits.value + c.mx.rowCacheMisses.value; s > 0 {
    79  			mx["row_cache_hit_ratio"] = int64((c.mx.rowCacheHits.value * 100 / s) * 1000)
    80  		} else {
    81  			mx["row_cache_hit_ratio"] = 0
    82  		}
    83  	}
    84  	if c.mx.rowCacheCapacity.isSet && c.mx.rowCacheSize.isSet {
    85  		if s := c.mx.rowCacheCapacity.value; s > 0 {
    86  			mx["row_cache_utilization"] = int64((c.mx.rowCacheSize.value * 100 / s) * 1000)
    87  		} else {
    88  			mx["row_cache_utilization"] = 0
    89  		}
    90  	}
    91  
    92  	c.mx.keyCacheHits.write(mx, "key_cache_hits")
    93  	c.mx.keyCacheMisses.write(mx, "key_cache_misses")
    94  	c.mx.keyCacheSize.write(mx, "key_cache_size")
    95  	if c.mx.keyCacheHits.isSet && c.mx.keyCacheMisses.isSet {
    96  		if s := c.mx.keyCacheHits.value + c.mx.keyCacheMisses.value; s > 0 {
    97  			mx["key_cache_hit_ratio"] = int64((c.mx.keyCacheHits.value * 100 / s) * 1000)
    98  		} else {
    99  			mx["key_cache_hit_ratio"] = 0
   100  		}
   101  	}
   102  	if c.mx.keyCacheCapacity.isSet && c.mx.keyCacheSize.isSet {
   103  		if s := c.mx.keyCacheCapacity.value; s > 0 {
   104  			mx["key_cache_utilization"] = int64((c.mx.keyCacheSize.value * 100 / s) * 1000)
   105  		} else {
   106  			mx["key_cache_utilization"] = 0
   107  		}
   108  	}
   109  
   110  	c.mx.droppedMessages.write1k(mx, "dropped_messages")
   111  
   112  	c.mx.storageLoad.write(mx, "storage_load")
   113  	c.mx.storageExceptions.write(mx, "storage_exceptions")
   114  
   115  	c.mx.compactionBytesCompacted.write(mx, "compaction_bytes_compacted")
   116  	c.mx.compactionPendingTasks.write(mx, "compaction_pending_tasks")
   117  	c.mx.compactionCompletedTasks.write(mx, "compaction_completed_tasks")
   118  
   119  	c.mx.jvmMemoryHeapUsed.write(mx, "jvm_memory_heap_used")
   120  	c.mx.jvmMemoryNonHeapUsed.write(mx, "jvm_memory_nonheap_used")
   121  	c.mx.jvmGCParNewCount.write(mx, "jvm_gc_parnew_count")
   122  	c.mx.jvmGCParNewTime.write1k(mx, "jvm_gc_parnew_time")
   123  	c.mx.jvmGCCMSCount.write(mx, "jvm_gc_cms_count")
   124  	c.mx.jvmGCCMSTime.write1k(mx, "jvm_gc_cms_time")
   125  
   126  	for _, p := range c.mx.threadPools {
   127  		if !p.hasCharts {
   128  			p.hasCharts = true
   129  			c.addThreadPoolCharts(p)
   130  		}
   131  
   132  		px := "thread_pool_" + p.name + "_"
   133  		p.activeTasks.write(mx, px+"active_tasks")
   134  		p.pendingTasks.write(mx, px+"pending_tasks")
   135  		p.blockedTasks.write(mx, px+"blocked_tasks")
   136  		p.totalBlockedTasks.write(mx, px+"total_blocked_tasks")
   137  	}
   138  }
   139  
   140  func (c *Cassandra) collectMetrics(pms prometheus.Series) {
   141  	c.collectClientRequestMetrics(pms)
   142  	c.collectDroppedMessagesMetrics(pms)
   143  	c.collectThreadPoolsMetrics(pms)
   144  	c.collectStorageMetrics(pms)
   145  	c.collectCacheMetrics(pms)
   146  	c.collectJVMMetrics(pms)
   147  	c.collectCompactionMetrics(pms)
   148  }
   149  
   150  func (c *Cassandra) collectClientRequestMetrics(pms prometheus.Series) {
   151  	const metric = "org_apache_cassandra_metrics_clientrequest"
   152  
   153  	var rw struct{ read, write *metricValue }
   154  	for _, pm := range pms.FindByName(metric + suffixCount) {
   155  		name := pm.Labels.Get("name")
   156  		scope := pm.Labels.Get("scope")
   157  
   158  		switch name {
   159  		case "TotalLatency":
   160  			rw.read, rw.write = &c.mx.clientReqTotalLatencyReads, &c.mx.clientReqTotalLatencyWrites
   161  		case "Latency":
   162  			rw.read, rw.write = &c.mx.clientReqLatencyReads, &c.mx.clientReqLatencyWrites
   163  		case "Timeouts":
   164  			rw.read, rw.write = &c.mx.clientReqTimeoutsReads, &c.mx.clientReqTimeoutsWrites
   165  		case "Unavailables":
   166  			rw.read, rw.write = &c.mx.clientReqUnavailablesReads, &c.mx.clientReqUnavailablesWrites
   167  		case "Failures":
   168  			rw.read, rw.write = &c.mx.clientReqFailuresReads, &c.mx.clientReqFailuresWrites
   169  		default:
   170  			continue
   171  		}
   172  
   173  		switch scope {
   174  		case "Read":
   175  			rw.read.add(pm.Value)
   176  		case "Write":
   177  			rw.write.add(pm.Value)
   178  		}
   179  	}
   180  
   181  	rw = struct{ read, write *metricValue }{}
   182  
   183  	for _, pm := range pms.FindByNames(
   184  		metric+"_50thpercentile",
   185  		metric+"_75thpercentile",
   186  		metric+"_95thpercentile",
   187  		metric+"_98thpercentile",
   188  		metric+"_99thpercentile",
   189  		metric+"_999thpercentile",
   190  	) {
   191  		name := pm.Labels.Get("name")
   192  		scope := pm.Labels.Get("scope")
   193  
   194  		if name != "Latency" {
   195  			continue
   196  		}
   197  
   198  		switch {
   199  		case strings.HasSuffix(pm.Name(), "_50thpercentile"):
   200  			rw.read, rw.write = &c.mx.clientReqReadLatencyP50, &c.mx.clientReqWriteLatencyP50
   201  		case strings.HasSuffix(pm.Name(), "_75thpercentile"):
   202  			rw.read, rw.write = &c.mx.clientReqReadLatencyP75, &c.mx.clientReqWriteLatencyP75
   203  		case strings.HasSuffix(pm.Name(), "_95thpercentile"):
   204  			rw.read, rw.write = &c.mx.clientReqReadLatencyP95, &c.mx.clientReqWriteLatencyP95
   205  		case strings.HasSuffix(pm.Name(), "_98thpercentile"):
   206  			rw.read, rw.write = &c.mx.clientReqReadLatencyP98, &c.mx.clientReqWriteLatencyP98
   207  		case strings.HasSuffix(pm.Name(), "_99thpercentile"):
   208  			rw.read, rw.write = &c.mx.clientReqReadLatencyP99, &c.mx.clientReqWriteLatencyP99
   209  		case strings.HasSuffix(pm.Name(), "_999thpercentile"):
   210  			rw.read, rw.write = &c.mx.clientReqReadLatencyP999, &c.mx.clientReqWriteLatencyP999
   211  		default:
   212  			continue
   213  		}
   214  
   215  		switch scope {
   216  		case "Read":
   217  			rw.read.add(pm.Value)
   218  		case "Write":
   219  			rw.write.add(pm.Value)
   220  		}
   221  	}
   222  }
   223  
   224  func (c *Cassandra) collectCacheMetrics(pms prometheus.Series) {
   225  	const metric = "org_apache_cassandra_metrics_cache"
   226  
   227  	var hm struct{ hits, misses *metricValue }
   228  	for _, pm := range pms.FindByName(metric + suffixCount) {
   229  		name := pm.Labels.Get("name")
   230  		scope := pm.Labels.Get("scope")
   231  
   232  		switch scope {
   233  		case "KeyCache":
   234  			hm.hits, hm.misses = &c.mx.keyCacheHits, &c.mx.keyCacheMisses
   235  		case "RowCache":
   236  			hm.hits, hm.misses = &c.mx.rowCacheHits, &c.mx.rowCacheMisses
   237  		default:
   238  			continue
   239  		}
   240  
   241  		switch name {
   242  		case "Hits":
   243  			hm.hits.add(pm.Value)
   244  		case "Misses":
   245  			hm.misses.add(pm.Value)
   246  		}
   247  	}
   248  
   249  	var cs struct{ cap, size *metricValue }
   250  	for _, pm := range pms.FindByName(metric + suffixValue) {
   251  		name := pm.Labels.Get("name")
   252  		scope := pm.Labels.Get("scope")
   253  
   254  		switch scope {
   255  		case "KeyCache":
   256  			cs.cap, cs.size = &c.mx.keyCacheCapacity, &c.mx.keyCacheSize
   257  		case "RowCache":
   258  			cs.cap, cs.size = &c.mx.rowCacheCapacity, &c.mx.rowCacheSize
   259  		default:
   260  			continue
   261  		}
   262  
   263  		switch name {
   264  		case "Capacity":
   265  			cs.cap.add(pm.Value)
   266  		case "Size":
   267  			cs.size.add(pm.Value)
   268  		}
   269  	}
   270  }
   271  
   272  func (c *Cassandra) collectThreadPoolsMetrics(pms prometheus.Series) {
   273  	const metric = "org_apache_cassandra_metrics_threadpools"
   274  
   275  	for _, pm := range pms.FindByName(metric + suffixValue) {
   276  		name := pm.Labels.Get("name")
   277  		scope := pm.Labels.Get("scope")
   278  		pool := c.getThreadPoolMetrics(scope)
   279  
   280  		switch name {
   281  		case "ActiveTasks":
   282  			pool.activeTasks.add(pm.Value)
   283  		case "PendingTasks":
   284  			pool.pendingTasks.add(pm.Value)
   285  		}
   286  	}
   287  	for _, pm := range pms.FindByName(metric + suffixCount) {
   288  		name := pm.Labels.Get("name")
   289  		scope := pm.Labels.Get("scope")
   290  		pool := c.getThreadPoolMetrics(scope)
   291  
   292  		switch name {
   293  		case "CompletedTasks":
   294  			pool.totalBlockedTasks.add(pm.Value)
   295  		case "TotalBlockedTasks":
   296  			pool.totalBlockedTasks.add(pm.Value)
   297  		case "CurrentlyBlockedTasks":
   298  			pool.blockedTasks.add(pm.Value)
   299  		}
   300  	}
   301  }
   302  
   303  func (c *Cassandra) collectStorageMetrics(pms prometheus.Series) {
   304  	const metric = "org_apache_cassandra_metrics_storage"
   305  
   306  	for _, pm := range pms.FindByName(metric + suffixCount) {
   307  		name := pm.Labels.Get("name")
   308  
   309  		switch name {
   310  		case "Load":
   311  			c.mx.storageLoad.add(pm.Value)
   312  		case "Exceptions":
   313  			c.mx.storageExceptions.add(pm.Value)
   314  		}
   315  	}
   316  }
   317  
   318  func (c *Cassandra) collectDroppedMessagesMetrics(pms prometheus.Series) {
   319  	const metric = "org_apache_cassandra_metrics_droppedmessage"
   320  
   321  	for _, pm := range pms.FindByName(metric + suffixCount) {
   322  		c.mx.droppedMessages.add(pm.Value)
   323  	}
   324  }
   325  
   326  func (c *Cassandra) collectJVMMetrics(pms prometheus.Series) {
   327  	const metricMemUsed = "jvm_memory_bytes_used"
   328  	const metricGC = "jvm_gc_collection_seconds"
   329  
   330  	for _, pm := range pms.FindByName(metricMemUsed) {
   331  		area := pm.Labels.Get("area")
   332  
   333  		switch area {
   334  		case "heap":
   335  			c.mx.jvmMemoryHeapUsed.add(pm.Value)
   336  		case "nonheap":
   337  			c.mx.jvmMemoryNonHeapUsed.add(pm.Value)
   338  		}
   339  	}
   340  
   341  	for _, pm := range pms.FindByName(metricGC + suffixCount) {
   342  		gc := pm.Labels.Get("gc")
   343  
   344  		switch gc {
   345  		case "ParNew":
   346  			c.mx.jvmGCParNewCount.add(pm.Value)
   347  		case "ConcurrentMarkSweep":
   348  			c.mx.jvmGCCMSCount.add(pm.Value)
   349  		}
   350  	}
   351  
   352  	for _, pm := range pms.FindByName(metricGC + "_sum") {
   353  		gc := pm.Labels.Get("gc")
   354  
   355  		switch gc {
   356  		case "ParNew":
   357  			c.mx.jvmGCParNewTime.add(pm.Value)
   358  		case "ConcurrentMarkSweep":
   359  			c.mx.jvmGCCMSTime.add(pm.Value)
   360  		}
   361  	}
   362  }
   363  
   364  func (c *Cassandra) collectCompactionMetrics(pms prometheus.Series) {
   365  	const metric = "org_apache_cassandra_metrics_compaction"
   366  
   367  	for _, pm := range pms.FindByName(metric + suffixValue) {
   368  		name := pm.Labels.Get("name")
   369  
   370  		switch name {
   371  		case "CompletedTasks":
   372  			c.mx.compactionCompletedTasks.add(pm.Value)
   373  		case "PendingTasks":
   374  			c.mx.compactionPendingTasks.add(pm.Value)
   375  		}
   376  	}
   377  	for _, pm := range pms.FindByName(metric + suffixCount) {
   378  		name := pm.Labels.Get("name")
   379  
   380  		switch name {
   381  		case "BytesCompacted":
   382  			c.mx.compactionBytesCompacted.add(pm.Value)
   383  		}
   384  	}
   385  }
   386  
   387  func (c *Cassandra) getThreadPoolMetrics(name string) *threadPoolMetrics {
   388  	pool, ok := c.mx.threadPools[name]
   389  	if !ok {
   390  		pool = &threadPoolMetrics{name: name}
   391  		c.mx.threadPools[name] = pool
   392  	}
   393  	return pool
   394  }
   395  
   396  func isCassandraMetrics(pms prometheus.Series) bool {
   397  	for _, pm := range pms {
   398  		if strings.HasPrefix(pm.Name(), "org_apache_cassandra_metrics") {
   399  			return true
   400  		}
   401  	}
   402  	return false
   403  }