github.com/btcsuite/btcd@v0.24.0/blockchain/utxocache.go (about)

     1  // Copyright (c) 2023 The btcsuite developers
     2  // Use of this source code is governed by an ISC
     3  // license that can be found in the LICENSE file.
     4  
     5  package blockchain
     6  
     7  import (
     8  	"container/list"
     9  	"fmt"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/btcsuite/btcd/btcutil"
    14  	"github.com/btcsuite/btcd/chaincfg/chainhash"
    15  	"github.com/btcsuite/btcd/database"
    16  	"github.com/btcsuite/btcd/txscript"
    17  	"github.com/btcsuite/btcd/wire"
    18  )
    19  
    20  // mapSlice is a slice of maps for utxo entries.  The slice of maps are needed to
    21  // guarantee that the map will only take up N amount of bytes.  As of v1.20, the
    22  // go runtime will allocate 2^N + few extra buckets, meaning that for large N, we'll
    23  // allocate a lot of extra memory if the amount of entries goes over the previously
    24  // allocated buckets.  A slice of maps allows us to have a better control of how much
    25  // total memory gets allocated by all the maps.
    26  type mapSlice struct {
    27  	// mtx protects against concurrent access for the map slice.
    28  	mtx sync.Mutex
    29  
    30  	// maps are the underlying maps in the slice of maps.
    31  	maps []map[wire.OutPoint]*UtxoEntry
    32  
    33  	// maxEntries is the maximum amount of elements that the map is allocated for.
    34  	maxEntries []int
    35  
    36  	// maxTotalMemoryUsage is the maximum memory usage in bytes that the state
    37  	// should contain in normal circumstances.
    38  	maxTotalMemoryUsage uint64
    39  }
    40  
    41  // length returns the length of all the maps in the map slice added together.
    42  //
    43  // This function is safe for concurrent access.
    44  func (ms *mapSlice) length() int {
    45  	ms.mtx.Lock()
    46  	defer ms.mtx.Unlock()
    47  
    48  	var l int
    49  	for _, m := range ms.maps {
    50  		l += len(m)
    51  	}
    52  
    53  	return l
    54  }
    55  
    56  // size returns the size of all the maps in the map slice added together.
    57  //
    58  // This function is safe for concurrent access.
    59  func (ms *mapSlice) size() int {
    60  	ms.mtx.Lock()
    61  	defer ms.mtx.Unlock()
    62  
    63  	var size int
    64  	for _, num := range ms.maxEntries {
    65  		size += calculateRoughMapSize(num, bucketSize)
    66  	}
    67  
    68  	return size
    69  }
    70  
    71  // get looks for the outpoint in all the maps in the map slice and returns
    72  // the entry.  nil and false is returned if the outpoint is not found.
    73  //
    74  // This function is safe for concurrent access.
    75  func (ms *mapSlice) get(op wire.OutPoint) (*UtxoEntry, bool) {
    76  	ms.mtx.Lock()
    77  	defer ms.mtx.Unlock()
    78  
    79  	var entry *UtxoEntry
    80  	var found bool
    81  
    82  	for _, m := range ms.maps {
    83  		entry, found = m[op]
    84  		if found {
    85  			return entry, found
    86  		}
    87  	}
    88  
    89  	return nil, false
    90  }
    91  
    92  // put puts the outpoint and the entry into one of the maps in the map slice.  If the
    93  // existing maps are all full, it will allocate a new map based on how much memory we
    94  // have left over.  Leftover memory is calculated as:
    95  // maxTotalMemoryUsage - (totalEntryMemory + mapSlice.size())
    96  //
    97  // This function is safe for concurrent access.
    98  func (ms *mapSlice) put(op wire.OutPoint, entry *UtxoEntry, totalEntryMemory uint64) {
    99  	ms.mtx.Lock()
   100  	defer ms.mtx.Unlock()
   101  
   102  	for i, maxNum := range ms.maxEntries {
   103  		m := ms.maps[i]
   104  		_, found := m[op]
   105  		if found {
   106  			// If the key is found, overwrite it.
   107  			m[op] = entry
   108  			return // Return as we were successful in adding the entry.
   109  		}
   110  		if len(m) >= maxNum {
   111  			// Don't try to insert if the map already at max since
   112  			// that'll force the map to allocate double the memory it's
   113  			// currently taking up.
   114  			continue
   115  		}
   116  
   117  		m[op] = entry
   118  		return // Return as we were successful in adding the entry.
   119  	}
   120  
   121  	// We only reach this code if we've failed to insert into the map above as
   122  	// all the current maps were full.  We thus make a new map and insert into
   123  	// it.
   124  	m := ms.makeNewMap(totalEntryMemory)
   125  	m[op] = entry
   126  }
   127  
   128  // delete attempts to delete the given outpoint in all of the maps. No-op if the
   129  // outpoint doesn't exist.
   130  //
   131  // This function is safe for concurrent access.
   132  func (ms *mapSlice) delete(op wire.OutPoint) {
   133  	ms.mtx.Lock()
   134  	defer ms.mtx.Unlock()
   135  
   136  	for i := 0; i < len(ms.maps); i++ {
   137  		delete(ms.maps[i], op)
   138  	}
   139  }
   140  
   141  // makeNewMap makes and appends the new map into the map slice.
   142  //
   143  // This function is NOT safe for concurrent access and must be called with the
   144  // lock held.
   145  func (ms *mapSlice) makeNewMap(totalEntryMemory uint64) map[wire.OutPoint]*UtxoEntry {
   146  	// Get the size of the leftover memory.
   147  	memSize := ms.maxTotalMemoryUsage - totalEntryMemory
   148  	for _, maxNum := range ms.maxEntries {
   149  		memSize -= uint64(calculateRoughMapSize(maxNum, bucketSize))
   150  	}
   151  
   152  	// Get a new map that's sized to house inside the leftover memory.
   153  	// -1 on the returned value will make the map allocate half as much total
   154  	// bytes.  This is done to make sure there's still room left for utxo
   155  	// entries to take up.
   156  	numMaxElements := calculateMinEntries(int(memSize), bucketSize+avgEntrySize)
   157  	numMaxElements -= 1
   158  	ms.maxEntries = append(ms.maxEntries, numMaxElements)
   159  	ms.maps = append(ms.maps, make(map[wire.OutPoint]*UtxoEntry, numMaxElements))
   160  
   161  	return ms.maps[len(ms.maps)-1]
   162  }
   163  
   164  // deleteMaps deletes all maps except for the first one which should be the biggest.
   165  //
   166  // This function is safe for concurrent access.
   167  func (ms *mapSlice) deleteMaps() {
   168  	ms.mtx.Lock()
   169  	defer ms.mtx.Unlock()
   170  
   171  	size := ms.maxEntries[0]
   172  	ms.maxEntries = []int{size}
   173  	ms.maps = ms.maps[:1]
   174  }
   175  
   176  const (
   177  	// utxoFlushPeriodicInterval is the interval at which a flush is performed
   178  	// when the flush mode FlushPeriodic is used.  This is used when the initial
   179  	// block download is complete and it's useful to flush periodically in case
   180  	// of unforseen shutdowns.
   181  	utxoFlushPeriodicInterval = time.Minute * 5
   182  )
   183  
   184  // FlushMode is used to indicate the different urgency types for a flush.
   185  type FlushMode uint8
   186  
   187  const (
   188  	// FlushRequired is the flush mode that means a flush must be performed
   189  	// regardless of the cache state.  For example right before shutting down.
   190  	FlushRequired FlushMode = iota
   191  
   192  	// FlushPeriodic is the flush mode that means a flush can be performed
   193  	// when it would be almost needed.  This is used to periodically signal when
   194  	// no I/O heavy operations are expected soon, so there is time to flush.
   195  	FlushPeriodic
   196  
   197  	// FlushIfNeeded is the flush mode that means a flush must be performed only
   198  	// if the cache is exceeding a safety threshold very close to its maximum
   199  	// size.  This is used mostly internally in between operations that can
   200  	// increase the cache size.
   201  	FlushIfNeeded
   202  )
   203  
   204  // utxoCache is a cached utxo view in the chainstate of a BlockChain.
   205  type utxoCache struct {
   206  	db database.DB
   207  
   208  	// maxTotalMemoryUsage is the maximum memory usage in bytes that the state
   209  	// should contain in normal circumstances.
   210  	maxTotalMemoryUsage uint64
   211  
   212  	// cachedEntries keeps the internal cache of the utxo state.  The tfModified
   213  	// flag indicates that the state of the entry (potentially) deviates from the
   214  	// state in the database.  Explicit nil values in the map are used to
   215  	// indicate that the database does not contain the entry.
   216  	cachedEntries    mapSlice
   217  	totalEntryMemory uint64 // Total memory usage in bytes.
   218  
   219  	// Below fields are used to indicate when the last flush happened.
   220  	lastFlushHash chainhash.Hash
   221  	lastFlushTime time.Time
   222  }
   223  
   224  // newUtxoCache initiates a new utxo cache instance with its memory usage limited
   225  // to the given maximum.
   226  func newUtxoCache(db database.DB, maxTotalMemoryUsage uint64) *utxoCache {
   227  	// While the entry isn't included in the map size, add the average size to the
   228  	// bucket size so we get some leftover space for entries to take up.
   229  	numMaxElements := calculateMinEntries(int(maxTotalMemoryUsage), bucketSize+avgEntrySize)
   230  	numMaxElements -= 1
   231  
   232  	log.Infof("Pre-alloacting for %d MiB: ", maxTotalMemoryUsage/(1024*1024)+1)
   233  
   234  	m := make(map[wire.OutPoint]*UtxoEntry, numMaxElements)
   235  
   236  	return &utxoCache{
   237  		db:                  db,
   238  		maxTotalMemoryUsage: maxTotalMemoryUsage,
   239  		cachedEntries: mapSlice{
   240  			maps:                []map[wire.OutPoint]*UtxoEntry{m},
   241  			maxEntries:          []int{numMaxElements},
   242  			maxTotalMemoryUsage: maxTotalMemoryUsage,
   243  		},
   244  	}
   245  }
   246  
   247  // totalMemoryUsage returns the total memory usage in bytes of the UTXO cache.
   248  func (s *utxoCache) totalMemoryUsage() uint64 {
   249  	// Total memory is the map size + the size that the utxo entries are
   250  	// taking up.
   251  	size := uint64(s.cachedEntries.size())
   252  	size += s.totalEntryMemory
   253  
   254  	return size
   255  }
   256  
   257  // fetchEntries returns the UTXO entries for the given outpoints.  The function always
   258  // returns as many entries as there are outpoints and the returns entries are in the
   259  // same order as the outpoints.  It returns nil if there is no entry for the outpoint
   260  // in the UTXO set.
   261  //
   262  // The returned entries are NOT safe for concurrent access.
   263  func (s *utxoCache) fetchEntries(outpoints []wire.OutPoint) ([]*UtxoEntry, error) {
   264  	entries := make([]*UtxoEntry, len(outpoints))
   265  	var (
   266  		missingOps    []wire.OutPoint
   267  		missingOpsIdx []int
   268  	)
   269  	for i := range outpoints {
   270  		if entry, ok := s.cachedEntries.get(outpoints[i]); ok {
   271  			entries[i] = entry
   272  			continue
   273  		}
   274  
   275  		// At this point, we have missing outpoints.  Allocate them now
   276  		// so that we never allocate if the cache never misses.
   277  		if len(missingOps) == 0 {
   278  			missingOps = make([]wire.OutPoint, 0, len(outpoints))
   279  			missingOpsIdx = make([]int, 0, len(outpoints))
   280  		}
   281  
   282  		missingOpsIdx = append(missingOpsIdx, i)
   283  		missingOps = append(missingOps, outpoints[i])
   284  	}
   285  
   286  	// Return early and don't attempt access the database if we don't have any
   287  	// missing outpoints.
   288  	if len(missingOps) == 0 {
   289  		return entries, nil
   290  	}
   291  
   292  	// Fetch the missing outpoints in the cache from the database.
   293  	dbEntries := make([]*UtxoEntry, len(missingOps))
   294  	err := s.db.View(func(dbTx database.Tx) error {
   295  		utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
   296  
   297  		for i := range missingOps {
   298  			entry, err := dbFetchUtxoEntry(dbTx, utxoBucket, missingOps[i])
   299  			if err != nil {
   300  				return err
   301  			}
   302  
   303  			dbEntries[i] = entry
   304  		}
   305  
   306  		return nil
   307  	})
   308  	if err != nil {
   309  		return nil, err
   310  	}
   311  
   312  	// Add each of the entries to the UTXO cache and update their memory
   313  	// usage.
   314  	//
   315  	// NOTE: When the fetched entry is nil, it is still added to the cache
   316  	// as a miss; this prevents future lookups to perform the same database
   317  	// fetch.
   318  	for i := range dbEntries {
   319  		s.cachedEntries.put(missingOps[i], dbEntries[i], s.totalEntryMemory)
   320  		s.totalEntryMemory += dbEntries[i].memoryUsage()
   321  	}
   322  
   323  	// Fill in the entries with the ones fetched from the database.
   324  	for i := range missingOpsIdx {
   325  		entries[missingOpsIdx[i]] = dbEntries[i]
   326  	}
   327  
   328  	return entries, nil
   329  }
   330  
   331  // addTxOut adds the specified output to the cache if it is not provably
   332  // unspendable.  When the cache already has an entry for the output, it will be
   333  // overwritten with the given output.  All fields will be updated for existing
   334  // entries since it's possible it has changed during a reorg.
   335  func (s *utxoCache) addTxOut(outpoint wire.OutPoint, txOut *wire.TxOut, isCoinBase bool,
   336  	blockHeight int32) error {
   337  
   338  	// Don't add provably unspendable outputs.
   339  	if txscript.IsUnspendable(txOut.PkScript) {
   340  		return nil
   341  	}
   342  
   343  	entry := new(UtxoEntry)
   344  	entry.amount = txOut.Value
   345  
   346  	// Deep copy the script when the script in the entry differs from the one in
   347  	// the txout.  This is required since the txout script is a subslice of the
   348  	// overall contiguous buffer that the msg tx houses for all scripts within
   349  	// the tx.  It is deep copied here since this entry may be added to the utxo
   350  	// cache, and we don't want the utxo cache holding the entry to prevent all
   351  	// of the other tx scripts from getting garbage collected.
   352  	entry.pkScript = make([]byte, len(txOut.PkScript))
   353  	copy(entry.pkScript, txOut.PkScript)
   354  
   355  	entry.blockHeight = blockHeight
   356  	entry.packedFlags = tfFresh | tfModified
   357  	if isCoinBase {
   358  		entry.packedFlags |= tfCoinBase
   359  	}
   360  
   361  	s.cachedEntries.put(outpoint, entry, s.totalEntryMemory)
   362  	s.totalEntryMemory += entry.memoryUsage()
   363  
   364  	return nil
   365  }
   366  
   367  // addTxOuts adds all outputs in the passed transaction which are not provably
   368  // unspendable to the view.  When the view already has entries for any of the
   369  // outputs, they are simply marked unspent.  All fields will be updated for
   370  // existing entries since it's possible it has changed during a reorg.
   371  func (s *utxoCache) addTxOuts(tx *btcutil.Tx, blockHeight int32) error {
   372  	// Loop all of the transaction outputs and add those which are not
   373  	// provably unspendable.
   374  	isCoinBase := IsCoinBase(tx)
   375  	prevOut := wire.OutPoint{Hash: *tx.Hash()}
   376  	for txOutIdx, txOut := range tx.MsgTx().TxOut {
   377  		// Update existing entries.  All fields are updated because it's
   378  		// possible (although extremely unlikely) that the existing
   379  		// entry is being replaced by a different transaction with the
   380  		// same hash.  This is allowed so long as the previous
   381  		// transaction is fully spent.
   382  		prevOut.Index = uint32(txOutIdx)
   383  		err := s.addTxOut(prevOut, txOut, isCoinBase, blockHeight)
   384  		if err != nil {
   385  			return err
   386  		}
   387  	}
   388  
   389  	return nil
   390  }
   391  
   392  // addTxIn will add the given input to the cache if the previous outpoint the txin
   393  // is pointing to exists in the utxo set.  The utxo that is being spent by the input
   394  // will be marked as spent and if the utxo is fresh (meaning that the database on disk
   395  // never saw it), it will be removed from the cache.
   396  func (s *utxoCache) addTxIn(txIn *wire.TxIn, stxos *[]SpentTxOut) error {
   397  	// Ensure the referenced utxo exists in the view.  This should
   398  	// never happen unless there is a bug is introduced in the code.
   399  	entries, err := s.fetchEntries([]wire.OutPoint{txIn.PreviousOutPoint})
   400  	if err != nil {
   401  		return err
   402  	}
   403  	if len(entries) != 1 || entries[0] == nil {
   404  		return AssertError(fmt.Sprintf("missing input %v",
   405  			txIn.PreviousOutPoint))
   406  	}
   407  
   408  	// Only create the stxo details if requested.
   409  	entry := entries[0]
   410  	if stxos != nil {
   411  		// Populate the stxo details using the utxo entry.
   412  		stxo := SpentTxOut{
   413  			Amount:     entry.Amount(),
   414  			PkScript:   entry.PkScript(),
   415  			Height:     entry.BlockHeight(),
   416  			IsCoinBase: entry.IsCoinBase(),
   417  		}
   418  
   419  		*stxos = append(*stxos, stxo)
   420  	}
   421  
   422  	// Mark the entry as spent.
   423  	entry.Spend()
   424  
   425  	// If an entry is fresh it indicates that this entry was spent before it could be
   426  	// flushed to the database. Because of this, we can just delete it from the map of
   427  	// cached entries.
   428  	if entry.isFresh() {
   429  		// If the entry is fresh, we will always have it in the cache.
   430  		s.cachedEntries.delete(txIn.PreviousOutPoint)
   431  		s.totalEntryMemory -= entry.memoryUsage()
   432  	} else {
   433  		// Can leave the entry to be garbage collected as the only purpose
   434  		// of this entry now is so that the entry on disk can be deleted.
   435  		entry = nil
   436  		s.totalEntryMemory -= entry.memoryUsage()
   437  	}
   438  
   439  	return nil
   440  }
   441  
   442  // addTxIns will add the given inputs of the tx if it's not a coinbase tx and if
   443  // the previous output that the input is pointing to exists in the utxo set.  The
   444  // utxo that is being spent by the input will be marked as spent and if the utxo
   445  // is fresh (meaning that the database on disk never saw it), it will be removed
   446  // from the cache.
   447  func (s *utxoCache) addTxIns(tx *btcutil.Tx, stxos *[]SpentTxOut) error {
   448  	// Coinbase transactions don't have any inputs to spend.
   449  	if IsCoinBase(tx) {
   450  		return nil
   451  	}
   452  
   453  	for _, txIn := range tx.MsgTx().TxIn {
   454  		err := s.addTxIn(txIn, stxos)
   455  		if err != nil {
   456  			return err
   457  		}
   458  	}
   459  
   460  	return nil
   461  }
   462  
   463  // connectTransaction updates the cache by adding all new utxos created by the
   464  // passed transaction and marking and/or removing all utxos that the transactions
   465  // spend as spent.  In addition, when the 'stxos' argument is not nil, it will
   466  // be updated to append an entry for each spent txout.  An error will be returned
   467  // if the cache and the database does not contain the required utxos.
   468  func (s *utxoCache) connectTransaction(
   469  	tx *btcutil.Tx, blockHeight int32, stxos *[]SpentTxOut) error {
   470  
   471  	err := s.addTxIns(tx, stxos)
   472  	if err != nil {
   473  		return err
   474  	}
   475  
   476  	// Add the transaction's outputs as available utxos.
   477  	return s.addTxOuts(tx, blockHeight)
   478  }
   479  
   480  // connectTransactions updates the cache by adding all new utxos created by all
   481  // of the transactions in the passed block, marking and/or removing all utxos
   482  // the transactions spend as spent, and setting the best hash for the view to
   483  // the passed block.  In addition, when the 'stxos' argument is not nil, it will
   484  // be updated to append an entry for each spent txout.
   485  func (s *utxoCache) connectTransactions(block *btcutil.Block, stxos *[]SpentTxOut) error {
   486  	for _, tx := range block.Transactions() {
   487  		err := s.connectTransaction(tx, block.Height(), stxos)
   488  		if err != nil {
   489  			return err
   490  		}
   491  	}
   492  
   493  	return nil
   494  }
   495  
   496  // writeCache writes all the entries that are cached in memory to the database atomically.
   497  func (s *utxoCache) writeCache(dbTx database.Tx, bestState *BestState) error {
   498  	// Update commits and flushes the cache to the database.
   499  	// NOTE: The database has its own cache which gets atomically written
   500  	// to leveldb.
   501  	utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
   502  	for i := range s.cachedEntries.maps {
   503  		for outpoint, entry := range s.cachedEntries.maps[i] {
   504  			switch {
   505  			// If the entry is nil or spent, remove the entry from the database
   506  			// and the cache.
   507  			case entry == nil || entry.IsSpent():
   508  				err := dbDeleteUtxoEntry(utxoBucket, outpoint)
   509  				if err != nil {
   510  					return err
   511  				}
   512  
   513  			// No need to update the cache if the entry was not modified.
   514  			case !entry.isModified():
   515  			default:
   516  				// Entry is fresh and needs to be put into the database.
   517  				err := dbPutUtxoEntry(utxoBucket, outpoint, entry)
   518  				if err != nil {
   519  					return err
   520  				}
   521  			}
   522  
   523  			delete(s.cachedEntries.maps[i], outpoint)
   524  		}
   525  	}
   526  	s.cachedEntries.deleteMaps()
   527  	s.totalEntryMemory = 0
   528  
   529  	// When done, store the best state hash in the database to indicate the state
   530  	// is consistent until that hash.
   531  	err := dbPutUtxoStateConsistency(dbTx, &bestState.Hash)
   532  	if err != nil {
   533  		return err
   534  	}
   535  
   536  	// The best state is the new last flush hash.
   537  	s.lastFlushHash = bestState.Hash
   538  	s.lastFlushTime = time.Now()
   539  
   540  	return nil
   541  }
   542  
   543  // flush flushes the UTXO state to the database if a flush is needed with the given flush mode.
   544  //
   545  // This function MUST be called with the chain state lock held (for writes).
   546  func (s *utxoCache) flush(dbTx database.Tx, mode FlushMode, bestState *BestState) error {
   547  	var threshold uint64
   548  	switch mode {
   549  	case FlushRequired:
   550  		threshold = 0
   551  
   552  	case FlushIfNeeded:
   553  		// If we performed a flush in the current best state, we have nothing to do.
   554  		if bestState.Hash == s.lastFlushHash {
   555  			return nil
   556  		}
   557  
   558  		threshold = s.maxTotalMemoryUsage
   559  
   560  	case FlushPeriodic:
   561  		// If the time since the last flush is over the periodic interval,
   562  		// force a flush.  Otherwise just flush when the cache is full.
   563  		if time.Since(s.lastFlushTime) > utxoFlushPeriodicInterval {
   564  			threshold = 0
   565  		} else {
   566  			threshold = s.maxTotalMemoryUsage
   567  		}
   568  	}
   569  
   570  	if s.totalMemoryUsage() >= threshold {
   571  		// Add one to round up the integer division.
   572  		totalMiB := s.totalMemoryUsage() / ((1024 * 1024) + 1)
   573  		log.Infof("Flushing UTXO cache of %d MiB with %d entries to disk. For large sizes, "+
   574  			"this can take up to several minutes...", totalMiB, s.cachedEntries.length())
   575  
   576  		return s.writeCache(dbTx, bestState)
   577  	}
   578  
   579  	return nil
   580  }
   581  
   582  // FlushUtxoCache flushes the UTXO state to the database if a flush is needed with the
   583  // given flush mode.
   584  //
   585  // This function is safe for concurrent access.
   586  func (b *BlockChain) FlushUtxoCache(mode FlushMode) error {
   587  	b.chainLock.Lock()
   588  	defer b.chainLock.Unlock()
   589  
   590  	return b.db.Update(func(dbTx database.Tx) error {
   591  		return b.utxoCache.flush(dbTx, mode, b.BestSnapshot())
   592  	})
   593  }
   594  
   595  // InitConsistentState checks the consistency status of the utxo state and
   596  // replays blocks if it lags behind the best state of the blockchain.
   597  //
   598  // It needs to be ensured that the chainView passed to this method does not
   599  // get changed during the execution of this method.
   600  func (b *BlockChain) InitConsistentState(tip *blockNode, interrupt <-chan struct{}) error {
   601  	s := b.utxoCache
   602  
   603  	// Load the consistency status from the database.
   604  	var statusBytes []byte
   605  	s.db.View(func(dbTx database.Tx) error {
   606  		statusBytes = dbFetchUtxoStateConsistency(dbTx)
   607  		return nil
   608  	})
   609  
   610  	// If no status was found, the database is old and didn't have a cached utxo
   611  	// state yet. In that case, we set the status to the best state and write
   612  	// this to the database.
   613  	if statusBytes == nil {
   614  		err := s.db.Update(func(dbTx database.Tx) error {
   615  			return dbPutUtxoStateConsistency(dbTx, &tip.hash)
   616  		})
   617  
   618  		// Set the last flush hash as it's the default value of 0s.
   619  		s.lastFlushHash = tip.hash
   620  
   621  		return err
   622  	}
   623  
   624  	statusHash, err := chainhash.NewHash(statusBytes)
   625  	if err != nil {
   626  		return err
   627  	}
   628  
   629  	// If state is consistent, we are done.
   630  	if statusHash.IsEqual(&tip.hash) {
   631  		log.Debugf("UTXO state consistent at (%d:%v)", tip.height, tip.hash)
   632  
   633  		// The last flush hash is set to the default value of all 0s. Set
   634  		// it to the tip since we checked it's consistent.
   635  		s.lastFlushHash = tip.hash
   636  
   637  		return nil
   638  	}
   639  
   640  	lastFlushNode := b.index.LookupNode(statusHash)
   641  	log.Infof("Reconstructing UTXO state after an unclean shutdown. The UTXO state is "+
   642  		"consistent at block %s (%d) but the chainstate is at block %s (%d),  This may "+
   643  		"take a long time...", statusHash.String(), lastFlushNode.height,
   644  		tip.hash.String(), tip.height)
   645  
   646  	// Even though this should always be true, make sure the fetched hash is in
   647  	// the best chain.
   648  	fork := b.bestChain.FindFork(lastFlushNode)
   649  	if fork == nil {
   650  		return AssertError(fmt.Sprintf("last utxo consistency status contains "+
   651  			"hash that is not in best chain: %v", statusHash))
   652  	}
   653  
   654  	// We never disconnect blocks as they cannot be inconsistent during a reorganization.
   655  	// This is because The cache is flushed before the reorganization begins and the utxo
   656  	// set at each block disconnect is written atomically to the database.
   657  	node := lastFlushNode
   658  
   659  	// We replay the blocks from the last consistent state up to the best
   660  	// state. Iterate forward from the consistent node to the tip of the best
   661  	// chain.
   662  	attachNodes := list.New()
   663  	for n := tip; n.height >= 0; n = n.parent {
   664  		if n == fork {
   665  			break
   666  		}
   667  		attachNodes.PushFront(n)
   668  	}
   669  
   670  	for e := attachNodes.Front(); e != nil; e = e.Next() {
   671  		node = e.Value.(*blockNode)
   672  
   673  		var block *btcutil.Block
   674  		err := s.db.View(func(dbTx database.Tx) error {
   675  			block, err = dbFetchBlockByNode(dbTx, node)
   676  			if err != nil {
   677  				return err
   678  			}
   679  
   680  			return err
   681  		})
   682  		if err != nil {
   683  			return err
   684  		}
   685  
   686  		err = b.utxoCache.connectTransactions(block, nil)
   687  		if err != nil {
   688  			return err
   689  		}
   690  
   691  		// Flush the utxo cache if needed.  This will in turn update the
   692  		// consistent state to this block.
   693  		err = s.db.Update(func(dbTx database.Tx) error {
   694  			return s.flush(dbTx, FlushIfNeeded, &BestState{Hash: node.hash, Height: node.height})
   695  		})
   696  		if err != nil {
   697  			return err
   698  		}
   699  
   700  		if interruptRequested(interrupt) {
   701  			log.Warn("UTXO state reconstruction interrupted")
   702  
   703  			return errInterruptRequested
   704  		}
   705  	}
   706  	log.Debug("UTXO state reconstruction done")
   707  
   708  	// Set the last flush hash as it's the default value of 0s.
   709  	s.lastFlushHash = tip.hash
   710  	s.lastFlushTime = time.Now()
   711  
   712  	return nil
   713  }
   714  
   715  // flushNeededAfterPrune returns true if the utxo cache needs to be flushed after a prune
   716  // of the block storage.  In the case of an unexpected shutdown, the utxo cache needs
   717  // to be reconstructed from where the utxo cache was last flushed.  In order for the
   718  // utxo cache to be reconstructed, we always need to have the blocks since the utxo cache
   719  // flush last happened.
   720  //
   721  // Example: if the last flush hash was at height 100 and one of the deleted blocks was at
   722  // height 98, this function will return true.
   723  func (b *BlockChain) flushNeededAfterPrune(deletedBlockHashes []chainhash.Hash) (bool, error) {
   724  	lastFlushHeight, err := b.BlockHeightByHash(&b.utxoCache.lastFlushHash)
   725  	if err != nil {
   726  		return false, err
   727  	}
   728  
   729  	// Loop through all the block hashes and find out what the highest block height
   730  	// among the deleted hashes is.
   731  	highestDeletedHeight := int32(-1)
   732  	for _, deletedBlockHash := range deletedBlockHashes {
   733  		height, err := b.BlockHeightByHash(&deletedBlockHash)
   734  		if err != nil {
   735  			return false, err
   736  		}
   737  
   738  		if height > highestDeletedHeight {
   739  			highestDeletedHeight = height
   740  		}
   741  	}
   742  
   743  	return highestDeletedHeight >= lastFlushHeight, nil
   744  }