github.com/nitinawathare/ethereumassignment3@v0.0.0-20211021213010-f07344c2b868/go-ethereum/swarm/shed/db.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package shed provides a simple abstraction components to compose
    18  // more complex operations on storage data organized in fields and indexes.
    19  //
    20  // Only type which holds logical information about swarm storage chunks data
    21  // and metadata is Item. This part is not generalized mostly for
    22  // performance reasons.
    23  package shed
    24  
    25  import (
    26  	"fmt"
    27  	"strconv"
    28  	"strings"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/metrics"
    32  	"github.com/ethereum/go-ethereum/swarm/log"
    33  	"github.com/syndtr/goleveldb/leveldb"
    34  	"github.com/syndtr/goleveldb/leveldb/iterator"
    35  	"github.com/syndtr/goleveldb/leveldb/opt"
    36  )
    37  
    38  const (
    39  	openFileLimit              = 128 // The limit for LevelDB OpenFilesCacheCapacity.
    40  	writePauseWarningThrottler = 1 * time.Minute
    41  )
    42  
    43  // DB provides abstractions over LevelDB in order to
    44  // implement complex structures using fields and ordered indexes.
    45  // It provides a schema functionality to store fields and indexes
    46  // information about naming and types.
    47  type DB struct {
    48  	ldb *leveldb.DB
    49  
    50  	compTimeMeter    metrics.Meter // Meter for measuring the total time spent in database compaction
    51  	compReadMeter    metrics.Meter // Meter for measuring the data read during compaction
    52  	compWriteMeter   metrics.Meter // Meter for measuring the data written during compaction
    53  	writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction
    54  	writeDelayMeter  metrics.Meter // Meter for measuring the write delay duration due to database compaction
    55  	diskReadMeter    metrics.Meter // Meter for measuring the effective amount of data read
    56  	diskWriteMeter   metrics.Meter // Meter for measuring the effective amount of data written
    57  
    58  	quit chan struct{} // Quit channel to stop the metrics collection before closing the database
    59  }
    60  
    61  // NewDB constructs a new DB and validates the schema
    62  // if it exists in database on the given path.
    63  // metricsPrefix is used for metrics collection for the given DB.
    64  func NewDB(path string, metricsPrefix string) (db *DB, err error) {
    65  	ldb, err := leveldb.OpenFile(path, &opt.Options{
    66  		OpenFilesCacheCapacity: openFileLimit,
    67  	})
    68  	if err != nil {
    69  		return nil, err
    70  	}
    71  	db = &DB{
    72  		ldb: ldb,
    73  	}
    74  
    75  	if _, err = db.getSchema(); err != nil {
    76  		if err == leveldb.ErrNotFound {
    77  			// save schema with initialized default fields
    78  			if err = db.putSchema(schema{
    79  				Fields:  make(map[string]fieldSpec),
    80  				Indexes: make(map[byte]indexSpec),
    81  			}); err != nil {
    82  				return nil, err
    83  			}
    84  		} else {
    85  			return nil, err
    86  		}
    87  	}
    88  
    89  	// Configure meters for DB
    90  	db.configure(metricsPrefix)
    91  
    92  	// Create a quit channel for the periodic metrics collector and run it
    93  	db.quit = make(chan struct{})
    94  
    95  	go db.meter(10 * time.Second)
    96  
    97  	return db, nil
    98  }
    99  
   100  // Put wraps LevelDB Put method to increment metrics counter.
   101  func (db *DB) Put(key []byte, value []byte) (err error) {
   102  	err = db.ldb.Put(key, value, nil)
   103  	if err != nil {
   104  		metrics.GetOrRegisterCounter("DB.putFail", nil).Inc(1)
   105  		return err
   106  	}
   107  	metrics.GetOrRegisterCounter("DB.put", nil).Inc(1)
   108  	return nil
   109  }
   110  
   111  // Get wraps LevelDB Get method to increment metrics counter.
   112  func (db *DB) Get(key []byte) (value []byte, err error) {
   113  	value, err = db.ldb.Get(key, nil)
   114  	if err != nil {
   115  		if err == leveldb.ErrNotFound {
   116  			metrics.GetOrRegisterCounter("DB.getNotFound", nil).Inc(1)
   117  		} else {
   118  			metrics.GetOrRegisterCounter("DB.getFail", nil).Inc(1)
   119  		}
   120  		return nil, err
   121  	}
   122  	metrics.GetOrRegisterCounter("DB.get", nil).Inc(1)
   123  	return value, nil
   124  }
   125  
   126  // Has wraps LevelDB Has method to increment metrics counter.
   127  func (db *DB) Has(key []byte) (yes bool, err error) {
   128  	yes, err = db.ldb.Has(key, nil)
   129  	if err != nil {
   130  		metrics.GetOrRegisterCounter("DB.hasFail", nil).Inc(1)
   131  		return false, err
   132  	}
   133  	metrics.GetOrRegisterCounter("DB.has", nil).Inc(1)
   134  	return yes, nil
   135  }
   136  
   137  // Delete wraps LevelDB Delete method to increment metrics counter.
   138  func (db *DB) Delete(key []byte) (err error) {
   139  	err = db.ldb.Delete(key, nil)
   140  	if err != nil {
   141  		metrics.GetOrRegisterCounter("DB.deleteFail", nil).Inc(1)
   142  		return err
   143  	}
   144  	metrics.GetOrRegisterCounter("DB.delete", nil).Inc(1)
   145  	return nil
   146  }
   147  
   148  // NewIterator wraps LevelDB NewIterator method to increment metrics counter.
   149  func (db *DB) NewIterator() iterator.Iterator {
   150  	metrics.GetOrRegisterCounter("DB.newiterator", nil).Inc(1)
   151  
   152  	return db.ldb.NewIterator(nil, nil)
   153  }
   154  
   155  // WriteBatch wraps LevelDB Write method to increment metrics counter.
   156  func (db *DB) WriteBatch(batch *leveldb.Batch) (err error) {
   157  	err = db.ldb.Write(batch, nil)
   158  	if err != nil {
   159  		metrics.GetOrRegisterCounter("DB.writebatchFail", nil).Inc(1)
   160  		return err
   161  	}
   162  	metrics.GetOrRegisterCounter("DB.writebatch", nil).Inc(1)
   163  	return nil
   164  }
   165  
   166  // Close closes LevelDB database.
   167  func (db *DB) Close() (err error) {
   168  	close(db.quit)
   169  	return db.ldb.Close()
   170  }
   171  
   172  // Configure configures the database metrics collectors
   173  func (db *DB) configure(prefix string) {
   174  	// Initialize all the metrics collector at the requested prefix
   175  	db.compTimeMeter = metrics.NewRegisteredMeter(prefix+"compact/time", nil)
   176  	db.compReadMeter = metrics.NewRegisteredMeter(prefix+"compact/input", nil)
   177  	db.compWriteMeter = metrics.NewRegisteredMeter(prefix+"compact/output", nil)
   178  	db.diskReadMeter = metrics.NewRegisteredMeter(prefix+"disk/read", nil)
   179  	db.diskWriteMeter = metrics.NewRegisteredMeter(prefix+"disk/write", nil)
   180  	db.writeDelayMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/duration", nil)
   181  	db.writeDelayNMeter = metrics.NewRegisteredMeter(prefix+"compact/writedelay/counter", nil)
   182  }
   183  
   184  func (db *DB) meter(refresh time.Duration) {
   185  	// Create the counters to store current and previous compaction values
   186  	compactions := make([][]float64, 2)
   187  	for i := 0; i < 2; i++ {
   188  		compactions[i] = make([]float64, 3)
   189  	}
   190  	// Create storage for iostats.
   191  	var iostats [2]float64
   192  
   193  	// Create storage and warning log tracer for write delay.
   194  	var (
   195  		delaystats      [2]int64
   196  		lastWritePaused time.Time
   197  	)
   198  
   199  	// Iterate ad infinitum and collect the stats
   200  	for i := 1; true; i++ {
   201  		// Retrieve the database stats
   202  		stats, err := db.ldb.GetProperty("leveldb.stats")
   203  		if err != nil {
   204  			log.Error("Failed to read database stats", "err", err)
   205  			continue
   206  		}
   207  		// Find the compaction table, skip the header
   208  		lines := strings.Split(stats, "\n")
   209  		for len(lines) > 0 && strings.TrimSpace(lines[0]) != "Compactions" {
   210  			lines = lines[1:]
   211  		}
   212  		if len(lines) <= 3 {
   213  			log.Error("Compaction table not found")
   214  			continue
   215  		}
   216  		lines = lines[3:]
   217  
   218  		// Iterate over all the table rows, and accumulate the entries
   219  		for j := 0; j < len(compactions[i%2]); j++ {
   220  			compactions[i%2][j] = 0
   221  		}
   222  		for _, line := range lines {
   223  			parts := strings.Split(line, "|")
   224  			if len(parts) != 6 {
   225  				break
   226  			}
   227  			for idx, counter := range parts[3:] {
   228  				value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64)
   229  				if err != nil {
   230  					log.Error("Compaction entry parsing failed", "err", err)
   231  					continue
   232  				}
   233  				compactions[i%2][idx] += value
   234  			}
   235  		}
   236  		// Update all the requested meters
   237  		if db.compTimeMeter != nil {
   238  			db.compTimeMeter.Mark(int64((compactions[i%2][0] - compactions[(i-1)%2][0]) * 1000 * 1000 * 1000))
   239  		}
   240  		if db.compReadMeter != nil {
   241  			db.compReadMeter.Mark(int64((compactions[i%2][1] - compactions[(i-1)%2][1]) * 1024 * 1024))
   242  		}
   243  		if db.compWriteMeter != nil {
   244  			db.compWriteMeter.Mark(int64((compactions[i%2][2] - compactions[(i-1)%2][2]) * 1024 * 1024))
   245  		}
   246  
   247  		// Retrieve the write delay statistic
   248  		writedelay, err := db.ldb.GetProperty("leveldb.writedelay")
   249  		if err != nil {
   250  			log.Error("Failed to read database write delay statistic", "err", err)
   251  			continue
   252  		}
   253  		var (
   254  			delayN        int64
   255  			delayDuration string
   256  			duration      time.Duration
   257  			paused        bool
   258  		)
   259  		if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil {
   260  			log.Error("Write delay statistic not found")
   261  			continue
   262  		}
   263  		duration, err = time.ParseDuration(delayDuration)
   264  		if err != nil {
   265  			log.Error("Failed to parse delay duration", "err", err)
   266  			continue
   267  		}
   268  		if db.writeDelayNMeter != nil {
   269  			db.writeDelayNMeter.Mark(delayN - delaystats[0])
   270  		}
   271  		if db.writeDelayMeter != nil {
   272  			db.writeDelayMeter.Mark(duration.Nanoseconds() - delaystats[1])
   273  		}
   274  		// If a warning that db is performing compaction has been displayed, any subsequent
   275  		// warnings will be withheld for one minute not to overwhelm the user.
   276  		if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 &&
   277  			time.Now().After(lastWritePaused.Add(writePauseWarningThrottler)) {
   278  			log.Warn("Database compacting, degraded performance")
   279  			lastWritePaused = time.Now()
   280  		}
   281  		delaystats[0], delaystats[1] = delayN, duration.Nanoseconds()
   282  
   283  		// Retrieve the database iostats.
   284  		ioStats, err := db.ldb.GetProperty("leveldb.iostats")
   285  		if err != nil {
   286  			log.Error("Failed to read database iostats", "err", err)
   287  			continue
   288  		}
   289  		var nRead, nWrite float64
   290  		parts := strings.Split(ioStats, " ")
   291  		if len(parts) < 2 {
   292  			log.Error("Bad syntax of ioStats", "ioStats", ioStats)
   293  			continue
   294  		}
   295  		if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil {
   296  			log.Error("Bad syntax of read entry", "entry", parts[0])
   297  			continue
   298  		}
   299  		if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil {
   300  			log.Error("Bad syntax of write entry", "entry", parts[1])
   301  			continue
   302  		}
   303  		if db.diskReadMeter != nil {
   304  			db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024))
   305  		}
   306  		if db.diskWriteMeter != nil {
   307  			db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024))
   308  		}
   309  		iostats[0], iostats[1] = nRead, nWrite
   310  
   311  		// Sleep a bit, then repeat the stats collection
   312  		select {
   313  		case <-db.quit:
   314  			// Quit requesting, stop hammering the database
   315  			return
   316  		case <-time.After(refresh):
   317  			// Timeout, gather a new set of stats
   318  		}
   319  	}
   320  }