github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/rawdb/freezer.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"math"
    23  	"os"
    24  	"path/filepath"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/prometheus/tsdb/fileutil"
    30  
    31  	"github.com/scroll-tech/go-ethereum/common"
    32  	"github.com/scroll-tech/go-ethereum/ethdb"
    33  	"github.com/scroll-tech/go-ethereum/log"
    34  	"github.com/scroll-tech/go-ethereum/metrics"
    35  	"github.com/scroll-tech/go-ethereum/params"
    36  )
    37  
    38  var (
    39  	// errReadOnly is returned if the freezer is opened in read only mode. All the
    40  	// mutations are disallowed.
    41  	errReadOnly = errors.New("read only")
    42  
    43  	// errUnknownTable is returned if the user attempts to read from a table that is
    44  	// not tracked by the freezer.
    45  	errUnknownTable = errors.New("unknown table")
    46  
    47  	// errOutOrderInsertion is returned if the user attempts to inject out-of-order
    48  	// binary blobs into the freezer.
    49  	errOutOrderInsertion = errors.New("the append operation is out-order")
    50  
    51  	// errSymlinkDatadir is returned if the ancient directory specified by user
    52  	// is a symbolic link.
    53  	errSymlinkDatadir = errors.New("symbolic link datadir is not supported")
    54  )
    55  
    56  const (
    57  	// freezerRecheckInterval is the frequency to check the key-value database for
    58  	// chain progression that might permit new blocks to be frozen into immutable
    59  	// storage.
    60  	freezerRecheckInterval = time.Minute
    61  
    62  	// freezerBatchLimit is the maximum number of blocks to freeze in one batch
    63  	// before doing an fsync and deleting it from the key-value store.
    64  	freezerBatchLimit = 30000
    65  
    66  	// freezerTableSize defines the maximum size of freezer data files.
    67  	freezerTableSize = 2 * 1000 * 1000 * 1000
    68  )
    69  
    70  // freezer is an memory mapped append-only database to store immutable chain data
    71  // into flat files:
    72  //
    73  // - The append only nature ensures that disk writes are minimized.
    74  // - The memory mapping ensures we can max out system memory for caching without
    75  //   reserving it for go-ethereum. This would also reduce the memory requirements
    76  //   of Geth, and thus also GC overhead.
    77  type freezer struct {
    78  	// WARNING: The `frozen` field is accessed atomically. On 32 bit platforms, only
    79  	// 64-bit aligned fields can be atomic. The struct is guaranteed to be so aligned,
    80  	// so take advantage of that (https://golang.org/pkg/sync/atomic/#pkg-note-BUG).
    81  	frozen    uint64 // Number of blocks already frozen
    82  	threshold uint64 // Number of recent blocks not to freeze (params.FullImmutabilityThreshold apart from tests)
    83  
    84  	// This lock synchronizes writers and the truncate operation, as well as
    85  	// the "atomic" (batched) read operations.
    86  	writeLock  sync.RWMutex
    87  	writeBatch *freezerBatch
    88  
    89  	readonly     bool
    90  	tables       map[string]*freezerTable // Data tables for storing everything
    91  	instanceLock fileutil.Releaser        // File-system lock to prevent double opens
    92  
    93  	trigger chan chan struct{} // Manual blocking freeze trigger, test determinism
    94  
    95  	quit      chan struct{}
    96  	wg        sync.WaitGroup
    97  	closeOnce sync.Once
    98  }
    99  
   100  // newFreezer creates a chain freezer that moves ancient chain data into
   101  // append-only flat file containers.
   102  //
   103  // The 'tables' argument defines the data tables. If the value of a map
   104  // entry is true, snappy compression is disabled for the table.
   105  func newFreezer(datadir string, namespace string, readonly bool, maxTableSize uint32, tables map[string]bool) (*freezer, error) {
   106  	// Create the initial freezer object
   107  	var (
   108  		readMeter  = metrics.NewRegisteredMeter(namespace+"ancient/read", nil)
   109  		writeMeter = metrics.NewRegisteredMeter(namespace+"ancient/write", nil)
   110  		sizeGauge  = metrics.NewRegisteredGauge(namespace+"ancient/size", nil)
   111  	)
   112  	// Ensure the datadir is not a symbolic link if it exists.
   113  	if info, err := os.Lstat(datadir); !os.IsNotExist(err) {
   114  		if info.Mode()&os.ModeSymlink != 0 {
   115  			log.Warn("Symbolic link ancient database is not supported", "path", datadir)
   116  			return nil, errSymlinkDatadir
   117  		}
   118  	}
   119  	// Leveldb uses LOCK as the filelock filename. To prevent the
   120  	// name collision, we use FLOCK as the lock name.
   121  	lock, _, err := fileutil.Flock(filepath.Join(datadir, "FLOCK"))
   122  	if err != nil {
   123  		return nil, err
   124  	}
   125  	// Open all the supported data tables
   126  	freezer := &freezer{
   127  		readonly:     readonly,
   128  		threshold:    params.FullImmutabilityThreshold,
   129  		tables:       make(map[string]*freezerTable),
   130  		instanceLock: lock,
   131  		trigger:      make(chan chan struct{}),
   132  		quit:         make(chan struct{}),
   133  	}
   134  
   135  	// Create the tables.
   136  	for name, disableSnappy := range tables {
   137  		table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy)
   138  		if err != nil {
   139  			for _, table := range freezer.tables {
   140  				table.Close()
   141  			}
   142  			lock.Release()
   143  			return nil, err
   144  		}
   145  		freezer.tables[name] = table
   146  	}
   147  
   148  	// Truncate all tables to common length.
   149  	if err := freezer.repair(); err != nil {
   150  		for _, table := range freezer.tables {
   151  			table.Close()
   152  		}
   153  		lock.Release()
   154  		return nil, err
   155  	}
   156  
   157  	// Create the write batch.
   158  	freezer.writeBatch = newFreezerBatch(freezer)
   159  
   160  	log.Info("Opened ancient database", "database", datadir, "readonly", readonly)
   161  	return freezer, nil
   162  }
   163  
   164  // Close terminates the chain freezer, unmapping all the data files.
   165  func (f *freezer) Close() error {
   166  	f.writeLock.Lock()
   167  	defer f.writeLock.Unlock()
   168  
   169  	var errs []error
   170  	f.closeOnce.Do(func() {
   171  		close(f.quit)
   172  		// Wait for any background freezing to stop
   173  		f.wg.Wait()
   174  		for _, table := range f.tables {
   175  			if err := table.Close(); err != nil {
   176  				errs = append(errs, err)
   177  			}
   178  		}
   179  		if err := f.instanceLock.Release(); err != nil {
   180  			errs = append(errs, err)
   181  		}
   182  	})
   183  	if errs != nil {
   184  		return fmt.Errorf("%v", errs)
   185  	}
   186  	return nil
   187  }
   188  
   189  // HasAncient returns an indicator whether the specified ancient data exists
   190  // in the freezer.
   191  func (f *freezer) HasAncient(kind string, number uint64) (bool, error) {
   192  	if table := f.tables[kind]; table != nil {
   193  		return table.has(number), nil
   194  	}
   195  	return false, nil
   196  }
   197  
   198  // Ancient retrieves an ancient binary blob from the append-only immutable files.
   199  func (f *freezer) Ancient(kind string, number uint64) ([]byte, error) {
   200  	if table := f.tables[kind]; table != nil {
   201  		return table.Retrieve(number)
   202  	}
   203  	return nil, errUnknownTable
   204  }
   205  
   206  // AncientRange retrieves multiple items in sequence, starting from the index 'start'.
   207  // It will return
   208  //  - at most 'max' items,
   209  //  - at least 1 item (even if exceeding the maxByteSize), but will otherwise
   210  //   return as many items as fit into maxByteSize.
   211  func (f *freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) {
   212  	if table := f.tables[kind]; table != nil {
   213  		return table.RetrieveItems(start, count, maxBytes)
   214  	}
   215  	return nil, errUnknownTable
   216  }
   217  
   218  // Ancients returns the length of the frozen items.
   219  func (f *freezer) Ancients() (uint64, error) {
   220  	return atomic.LoadUint64(&f.frozen), nil
   221  }
   222  
   223  // AncientSize returns the ancient size of the specified category.
   224  func (f *freezer) AncientSize(kind string) (uint64, error) {
   225  	// This needs the write lock to avoid data races on table fields.
   226  	// Speed doesn't matter here, AncientSize is for debugging.
   227  	f.writeLock.RLock()
   228  	defer f.writeLock.RUnlock()
   229  
   230  	if table := f.tables[kind]; table != nil {
   231  		return table.size()
   232  	}
   233  	return 0, errUnknownTable
   234  }
   235  
   236  // ReadAncients runs the given read operation while ensuring that no writes take place
   237  // on the underlying freezer.
   238  func (f *freezer) ReadAncients(fn func(ethdb.AncientReader) error) (err error) {
   239  	f.writeLock.RLock()
   240  	defer f.writeLock.RUnlock()
   241  	return fn(f)
   242  }
   243  
   244  // ModifyAncients runs the given write operation.
   245  func (f *freezer) ModifyAncients(fn func(ethdb.AncientWriteOp) error) (writeSize int64, err error) {
   246  	if f.readonly {
   247  		return 0, errReadOnly
   248  	}
   249  	f.writeLock.Lock()
   250  	defer f.writeLock.Unlock()
   251  
   252  	// Roll back all tables to the starting position in case of error.
   253  	prevItem := f.frozen
   254  	defer func() {
   255  		if err != nil {
   256  			// The write operation has failed. Go back to the previous item position.
   257  			for name, table := range f.tables {
   258  				err := table.truncate(prevItem)
   259  				if err != nil {
   260  					log.Error("Freezer table roll-back failed", "table", name, "index", prevItem, "err", err)
   261  				}
   262  			}
   263  		}
   264  	}()
   265  
   266  	f.writeBatch.reset()
   267  	if err := fn(f.writeBatch); err != nil {
   268  		return 0, err
   269  	}
   270  	item, writeSize, err := f.writeBatch.commit()
   271  	if err != nil {
   272  		return 0, err
   273  	}
   274  	atomic.StoreUint64(&f.frozen, item)
   275  	return writeSize, nil
   276  }
   277  
   278  // TruncateAncients discards any recent data above the provided threshold number.
   279  func (f *freezer) TruncateAncients(items uint64) error {
   280  	if f.readonly {
   281  		return errReadOnly
   282  	}
   283  	f.writeLock.Lock()
   284  	defer f.writeLock.Unlock()
   285  
   286  	if atomic.LoadUint64(&f.frozen) <= items {
   287  		return nil
   288  	}
   289  	for _, table := range f.tables {
   290  		if err := table.truncate(items); err != nil {
   291  			return err
   292  		}
   293  	}
   294  	atomic.StoreUint64(&f.frozen, items)
   295  	return nil
   296  }
   297  
   298  // Sync flushes all data tables to disk.
   299  func (f *freezer) Sync() error {
   300  	var errs []error
   301  	for _, table := range f.tables {
   302  		if err := table.Sync(); err != nil {
   303  			errs = append(errs, err)
   304  		}
   305  	}
   306  	if errs != nil {
   307  		return fmt.Errorf("%v", errs)
   308  	}
   309  	return nil
   310  }
   311  
   312  // repair truncates all data tables to the same length.
   313  func (f *freezer) repair() error {
   314  	min := uint64(math.MaxUint64)
   315  	for _, table := range f.tables {
   316  		items := atomic.LoadUint64(&table.items)
   317  		if min > items {
   318  			min = items
   319  		}
   320  	}
   321  	for _, table := range f.tables {
   322  		if err := table.truncate(min); err != nil {
   323  			return err
   324  		}
   325  	}
   326  	atomic.StoreUint64(&f.frozen, min)
   327  	return nil
   328  }
   329  
   330  // freeze is a background thread that periodically checks the blockchain for any
   331  // import progress and moves ancient data from the fast database into the freezer.
   332  //
   333  // This functionality is deliberately broken off from block importing to avoid
   334  // incurring additional data shuffling delays on block propagation.
   335  func (f *freezer) freeze(db ethdb.KeyValueStore) {
   336  	nfdb := &nofreezedb{KeyValueStore: db}
   337  
   338  	var (
   339  		backoff   bool
   340  		triggered chan struct{} // Used in tests
   341  	)
   342  	for {
   343  		select {
   344  		case <-f.quit:
   345  			log.Info("Freezer shutting down")
   346  			return
   347  		default:
   348  		}
   349  		if backoff {
   350  			// If we were doing a manual trigger, notify it
   351  			if triggered != nil {
   352  				triggered <- struct{}{}
   353  				triggered = nil
   354  			}
   355  			select {
   356  			case <-time.NewTimer(freezerRecheckInterval).C:
   357  				backoff = false
   358  			case triggered = <-f.trigger:
   359  				backoff = false
   360  			case <-f.quit:
   361  				return
   362  			}
   363  		}
   364  		// Retrieve the freezing threshold.
   365  		hash := ReadHeadBlockHash(nfdb)
   366  		if hash == (common.Hash{}) {
   367  			log.Debug("Current full block hash unavailable") // new chain, empty database
   368  			backoff = true
   369  			continue
   370  		}
   371  		number := ReadHeaderNumber(nfdb, hash)
   372  		threshold := atomic.LoadUint64(&f.threshold)
   373  
   374  		switch {
   375  		case number == nil:
   376  			log.Error("Current full block number unavailable", "hash", hash)
   377  			backoff = true
   378  			continue
   379  
   380  		case *number < threshold:
   381  			log.Debug("Current full block not old enough", "number", *number, "hash", hash, "delay", threshold)
   382  			backoff = true
   383  			continue
   384  
   385  		case *number-threshold <= f.frozen:
   386  			log.Debug("Ancient blocks frozen already", "number", *number, "hash", hash, "frozen", f.frozen)
   387  			backoff = true
   388  			continue
   389  		}
   390  		head := ReadHeader(nfdb, hash, *number)
   391  		if head == nil {
   392  			log.Error("Current full block unavailable", "number", *number, "hash", hash)
   393  			backoff = true
   394  			continue
   395  		}
   396  
   397  		// Seems we have data ready to be frozen, process in usable batches
   398  		var (
   399  			start    = time.Now()
   400  			first, _ = f.Ancients()
   401  			limit    = *number - threshold
   402  		)
   403  		if limit-first > freezerBatchLimit {
   404  			limit = first + freezerBatchLimit
   405  		}
   406  		ancients, err := f.freezeRange(nfdb, first, limit)
   407  		if err != nil {
   408  			log.Error("Error in block freeze operation", "err", err)
   409  			backoff = true
   410  			continue
   411  		}
   412  
   413  		// Batch of blocks have been frozen, flush them before wiping from leveldb
   414  		if err := f.Sync(); err != nil {
   415  			log.Crit("Failed to flush frozen tables", "err", err)
   416  		}
   417  
   418  		// Wipe out all data from the active database
   419  		batch := db.NewBatch()
   420  		for i := 0; i < len(ancients); i++ {
   421  			// Always keep the genesis block in active database
   422  			if first+uint64(i) != 0 {
   423  				DeleteBlockWithoutNumber(batch, ancients[i], first+uint64(i))
   424  				DeleteCanonicalHash(batch, first+uint64(i))
   425  			}
   426  		}
   427  		if err := batch.Write(); err != nil {
   428  			log.Crit("Failed to delete frozen canonical blocks", "err", err)
   429  		}
   430  		batch.Reset()
   431  
   432  		// Wipe out side chains also and track dangling side chains
   433  		var dangling []common.Hash
   434  		for number := first; number < f.frozen; number++ {
   435  			// Always keep the genesis block in active database
   436  			if number != 0 {
   437  				dangling = ReadAllHashes(db, number)
   438  				for _, hash := range dangling {
   439  					log.Trace("Deleting side chain", "number", number, "hash", hash)
   440  					DeleteBlock(batch, hash, number)
   441  				}
   442  			}
   443  		}
   444  		if err := batch.Write(); err != nil {
   445  			log.Crit("Failed to delete frozen side blocks", "err", err)
   446  		}
   447  		batch.Reset()
   448  
   449  		// Step into the future and delete and dangling side chains
   450  		if f.frozen > 0 {
   451  			tip := f.frozen
   452  			for len(dangling) > 0 {
   453  				drop := make(map[common.Hash]struct{})
   454  				for _, hash := range dangling {
   455  					log.Debug("Dangling parent from freezer", "number", tip-1, "hash", hash)
   456  					drop[hash] = struct{}{}
   457  				}
   458  				children := ReadAllHashes(db, tip)
   459  				for i := 0; i < len(children); i++ {
   460  					// Dig up the child and ensure it's dangling
   461  					child := ReadHeader(nfdb, children[i], tip)
   462  					if child == nil {
   463  						log.Error("Missing dangling header", "number", tip, "hash", children[i])
   464  						continue
   465  					}
   466  					if _, ok := drop[child.ParentHash]; !ok {
   467  						children = append(children[:i], children[i+1:]...)
   468  						i--
   469  						continue
   470  					}
   471  					// Delete all block data associated with the child
   472  					log.Debug("Deleting dangling block", "number", tip, "hash", children[i], "parent", child.ParentHash)
   473  					DeleteBlock(batch, children[i], tip)
   474  				}
   475  				dangling = children
   476  				tip++
   477  			}
   478  			if err := batch.Write(); err != nil {
   479  				log.Crit("Failed to delete dangling side blocks", "err", err)
   480  			}
   481  		}
   482  
   483  		// Log something friendly for the user
   484  		context := []interface{}{
   485  			"blocks", f.frozen - first, "elapsed", common.PrettyDuration(time.Since(start)), "number", f.frozen - 1,
   486  		}
   487  		if n := len(ancients); n > 0 {
   488  			context = append(context, []interface{}{"hash", ancients[n-1]}...)
   489  		}
   490  		log.Info("Deep froze chain segment", context...)
   491  
   492  		// Avoid database thrashing with tiny writes
   493  		if f.frozen-first < freezerBatchLimit {
   494  			backoff = true
   495  		}
   496  	}
   497  }
   498  
   499  func (f *freezer) freezeRange(nfdb *nofreezedb, number, limit uint64) (hashes []common.Hash, err error) {
   500  	hashes = make([]common.Hash, 0, limit-number)
   501  
   502  	_, err = f.ModifyAncients(func(op ethdb.AncientWriteOp) error {
   503  		for ; number <= limit; number++ {
   504  			// Retrieve all the components of the canonical block.
   505  			hash := ReadCanonicalHash(nfdb, number)
   506  			if hash == (common.Hash{}) {
   507  				return fmt.Errorf("canonical hash missing, can't freeze block %d", number)
   508  			}
   509  			header := ReadHeaderRLP(nfdb, hash, number)
   510  			if len(header) == 0 {
   511  				return fmt.Errorf("block header missing, can't freeze block %d", number)
   512  			}
   513  			body := ReadBodyRLP(nfdb, hash, number)
   514  			if len(body) == 0 {
   515  				return fmt.Errorf("block body missing, can't freeze block %d", number)
   516  			}
   517  			receipts := ReadReceiptsRLP(nfdb, hash, number)
   518  			if len(receipts) == 0 {
   519  				return fmt.Errorf("block receipts missing, can't freeze block %d", number)
   520  			}
   521  			td := ReadTdRLP(nfdb, hash, number)
   522  			if len(td) == 0 {
   523  				return fmt.Errorf("total difficulty missing, can't freeze block %d", number)
   524  			}
   525  
   526  			// Write to the batch.
   527  			if err := op.AppendRaw(freezerHashTable, number, hash[:]); err != nil {
   528  				return fmt.Errorf("can't write hash to freezer: %v", err)
   529  			}
   530  			if err := op.AppendRaw(freezerHeaderTable, number, header); err != nil {
   531  				return fmt.Errorf("can't write header to freezer: %v", err)
   532  			}
   533  			if err := op.AppendRaw(freezerBodiesTable, number, body); err != nil {
   534  				return fmt.Errorf("can't write body to freezer: %v", err)
   535  			}
   536  			if err := op.AppendRaw(freezerReceiptTable, number, receipts); err != nil {
   537  				return fmt.Errorf("can't write receipts to freezer: %v", err)
   538  			}
   539  			if err := op.AppendRaw(freezerDifficultyTable, number, td); err != nil {
   540  				return fmt.Errorf("can't write td to freezer: %v", err)
   541  			}
   542  
   543  			hashes = append(hashes, hash)
   544  		}
   545  		return nil
   546  	})
   547  
   548  	return hashes, err
   549  }