github.com/MetalBlockchain/metalgo@v1.11.9/database/leveldb/metrics.go (about)

     1  // Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package leveldb
     5  
     6  import (
     7  	"errors"
     8  	"strconv"
     9  
    10  	"github.com/prometheus/client_golang/prometheus"
    11  	"github.com/syndtr/goleveldb/leveldb"
    12  )
    13  
    14  var levelLabels = []string{"level"}
    15  
    16  type metrics struct {
    17  	// total number of writes that have been delayed due to compaction
    18  	writesDelayedCount prometheus.Counter
    19  	// total amount of time (in ns) that writes that have been delayed due to
    20  	// compaction
    21  	writesDelayedDuration prometheus.Gauge
    22  	// set to 1 if there is currently at least one write that is being delayed
    23  	// due to compaction
    24  	writeIsDelayed prometheus.Gauge
    25  
    26  	// number of currently alive snapshots
    27  	aliveSnapshots prometheus.Gauge
    28  	// number of currently alive iterators
    29  	aliveIterators prometheus.Gauge
    30  
    31  	// total amount of data written
    32  	ioWrite prometheus.Counter
    33  	// total amount of data read
    34  	ioRead prometheus.Counter
    35  
    36  	// total number of bytes of cached data blocks
    37  	blockCacheSize prometheus.Gauge
    38  	// current number of open tables
    39  	openTables prometheus.Gauge
    40  
    41  	// number of tables per level
    42  	levelTableCount *prometheus.GaugeVec
    43  	// size of each level
    44  	levelSize *prometheus.GaugeVec
    45  	// amount of time spent compacting each level
    46  	levelDuration *prometheus.GaugeVec
    47  	// amount of bytes read while compacting each level
    48  	levelReads *prometheus.CounterVec
    49  	// amount of bytes written while compacting each level
    50  	levelWrites *prometheus.CounterVec
    51  
    52  	// total number memory compactions performed
    53  	memCompactions prometheus.Counter
    54  	// total number of level 0 compactions performed
    55  	level0Compactions prometheus.Counter
    56  	// total number of non-level 0 compactions performed
    57  	nonLevel0Compactions prometheus.Counter
    58  	// total number of seek compactions performed
    59  	seekCompactions prometheus.Counter
    60  
    61  	priorStats, currentStats *leveldb.DBStats
    62  }
    63  
    64  func newMetrics(reg prometheus.Registerer) (metrics, error) {
    65  	m := metrics{
    66  		writesDelayedCount: prometheus.NewCounter(prometheus.CounterOpts{
    67  			Name: "writes_delayed",
    68  			Help: "number of cumulative writes that have been delayed due to compaction",
    69  		}),
    70  		writesDelayedDuration: prometheus.NewGauge(prometheus.GaugeOpts{
    71  			Name: "writes_delayed_duration",
    72  			Help: "amount of time (in ns) that writes have been delayed due to compaction",
    73  		}),
    74  		writeIsDelayed: prometheus.NewGauge(prometheus.GaugeOpts{
    75  			Name: "write_delayed",
    76  			Help: "1 if there is currently a write that is being delayed due to compaction",
    77  		}),
    78  
    79  		aliveSnapshots: prometheus.NewGauge(prometheus.GaugeOpts{
    80  			Name: "alive_snapshots",
    81  			Help: "number of currently alive snapshots",
    82  		}),
    83  		aliveIterators: prometheus.NewGauge(prometheus.GaugeOpts{
    84  			Name: "alive_iterators",
    85  			Help: "number of currently alive iterators",
    86  		}),
    87  
    88  		ioWrite: prometheus.NewCounter(prometheus.CounterOpts{
    89  			Name: "io_write",
    90  			Help: "cumulative amount of io write during compaction",
    91  		}),
    92  		ioRead: prometheus.NewCounter(prometheus.CounterOpts{
    93  			Name: "io_read",
    94  			Help: "cumulative amount of io read during compaction",
    95  		}),
    96  
    97  		blockCacheSize: prometheus.NewGauge(prometheus.GaugeOpts{
    98  			Name: "block_cache_size",
    99  			Help: "total size of cached blocks",
   100  		}),
   101  		openTables: prometheus.NewGauge(prometheus.GaugeOpts{
   102  			Name: "open_tables",
   103  			Help: "number of currently opened tables",
   104  		}),
   105  
   106  		levelTableCount: prometheus.NewGaugeVec(
   107  			prometheus.GaugeOpts{
   108  				Name: "table_count",
   109  				Help: "number of tables allocated by level",
   110  			},
   111  			levelLabels,
   112  		),
   113  		levelSize: prometheus.NewGaugeVec(
   114  			prometheus.GaugeOpts{
   115  				Name: "size",
   116  				Help: "amount of bytes allocated by level",
   117  			},
   118  			levelLabels,
   119  		),
   120  		levelDuration: prometheus.NewGaugeVec(
   121  			prometheus.GaugeOpts{
   122  				Name: "duration",
   123  				Help: "amount of time (in ns) spent in compaction by level",
   124  			},
   125  			levelLabels,
   126  		),
   127  		levelReads: prometheus.NewCounterVec(
   128  			prometheus.CounterOpts{
   129  				Name: "reads",
   130  				Help: "amount of bytes read during compaction by level",
   131  			},
   132  			levelLabels,
   133  		),
   134  		levelWrites: prometheus.NewCounterVec(
   135  			prometheus.CounterOpts{
   136  				Name: "writes",
   137  				Help: "amount of bytes written during compaction by level",
   138  			},
   139  			levelLabels,
   140  		),
   141  
   142  		memCompactions: prometheus.NewCounter(prometheus.CounterOpts{
   143  			Name: "mem_comps",
   144  			Help: "total number of memory compactions performed",
   145  		}),
   146  		level0Compactions: prometheus.NewCounter(prometheus.CounterOpts{
   147  			Name: "level_0_comps",
   148  			Help: "total number of level 0 compactions performed",
   149  		}),
   150  		nonLevel0Compactions: prometheus.NewCounter(prometheus.CounterOpts{
   151  			Name: "non_level_0_comps",
   152  			Help: "total number of non-level 0 compactions performed",
   153  		}),
   154  		seekCompactions: prometheus.NewCounter(prometheus.CounterOpts{
   155  			Name: "seek_comps",
   156  			Help: "total number of seek compactions performed",
   157  		}),
   158  
   159  		priorStats:   &leveldb.DBStats{},
   160  		currentStats: &leveldb.DBStats{},
   161  	}
   162  
   163  	err := errors.Join(
   164  		reg.Register(m.writesDelayedCount),
   165  		reg.Register(m.writesDelayedDuration),
   166  		reg.Register(m.writeIsDelayed),
   167  
   168  		reg.Register(m.aliveSnapshots),
   169  		reg.Register(m.aliveIterators),
   170  
   171  		reg.Register(m.ioWrite),
   172  		reg.Register(m.ioRead),
   173  
   174  		reg.Register(m.blockCacheSize),
   175  		reg.Register(m.openTables),
   176  
   177  		reg.Register(m.levelTableCount),
   178  		reg.Register(m.levelSize),
   179  		reg.Register(m.levelDuration),
   180  		reg.Register(m.levelReads),
   181  		reg.Register(m.levelWrites),
   182  
   183  		reg.Register(m.memCompactions),
   184  		reg.Register(m.level0Compactions),
   185  		reg.Register(m.nonLevel0Compactions),
   186  		reg.Register(m.seekCompactions),
   187  	)
   188  	return m, err
   189  }
   190  
   191  func (db *Database) updateMetrics() error {
   192  	metrics := &db.metrics
   193  
   194  	priorStats := metrics.priorStats
   195  	currentStats := metrics.currentStats
   196  
   197  	// Retrieve the database stats
   198  	if err := db.DB.Stats(currentStats); err != nil {
   199  		return err
   200  	}
   201  
   202  	metrics.writesDelayedCount.Add(float64(currentStats.WriteDelayCount - priorStats.WriteDelayCount))
   203  	metrics.writesDelayedDuration.Add(float64(currentStats.WriteDelayDuration - priorStats.WriteDelayDuration))
   204  	if currentStats.WritePaused {
   205  		metrics.writeIsDelayed.Set(1)
   206  	} else {
   207  		metrics.writeIsDelayed.Set(0)
   208  	}
   209  
   210  	metrics.aliveSnapshots.Set(float64(currentStats.AliveSnapshots))
   211  	metrics.aliveIterators.Set(float64(currentStats.AliveIterators))
   212  
   213  	metrics.ioWrite.Add(float64(currentStats.IOWrite - priorStats.IOWrite))
   214  	metrics.ioRead.Add(float64(currentStats.IORead - priorStats.IORead))
   215  
   216  	metrics.blockCacheSize.Set(float64(currentStats.BlockCacheSize))
   217  	metrics.openTables.Set(float64(currentStats.OpenedTablesCount))
   218  
   219  	for level, tableCounts := range currentStats.LevelTablesCounts {
   220  		levelStr := strconv.Itoa(level)
   221  		metrics.levelTableCount.WithLabelValues(levelStr).Set(float64(tableCounts))
   222  		metrics.levelSize.WithLabelValues(levelStr).Set(float64(currentStats.LevelSizes[level]))
   223  
   224  		if level < len(priorStats.LevelTablesCounts) {
   225  			metrics.levelDuration.WithLabelValues(levelStr).Add(float64(currentStats.LevelDurations[level] - priorStats.LevelDurations[level]))
   226  			metrics.levelReads.WithLabelValues(levelStr).Add(float64(currentStats.LevelRead[level] - priorStats.LevelRead[level]))
   227  			metrics.levelWrites.WithLabelValues(levelStr).Add(float64(currentStats.LevelWrite[level] - priorStats.LevelWrite[level]))
   228  		} else {
   229  			metrics.levelDuration.WithLabelValues(levelStr).Add(float64(currentStats.LevelDurations[level]))
   230  			metrics.levelReads.WithLabelValues(levelStr).Add(float64(currentStats.LevelRead[level]))
   231  			metrics.levelWrites.WithLabelValues(levelStr).Add(float64(currentStats.LevelWrite[level]))
   232  		}
   233  	}
   234  
   235  	metrics.memCompactions.Add(float64(currentStats.MemComp - priorStats.MemComp))
   236  	metrics.level0Compactions.Add(float64(currentStats.Level0Comp - priorStats.Level0Comp))
   237  	metrics.nonLevel0Compactions.Add(float64(currentStats.NonLevel0Comp - priorStats.NonLevel0Comp))
   238  	metrics.seekCompactions.Add(float64(currentStats.SeekComp - priorStats.SeekComp))
   239  
   240  	// update the priorStats to update the counters correctly next time this
   241  	// method is called
   242  	metrics.priorStats = currentStats
   243  
   244  	// update currentStats to a pre-allocated stats struct. This avoids
   245  	// performing memory allocations for each update
   246  	metrics.currentStats = priorStats
   247  	return nil
   248  }