github.com/lbryio/lbcd@v0.22.119/blockchain/upgrade.go (about)

     1  // Copyright (c) 2017 The btcsuite developers
     2  // Use of this source code is governed by an ISC
     3  // license that can be found in the LICENSE file.
     4  
     5  package blockchain
     6  
     7  import (
     8  	"bytes"
     9  	"container/list"
    10  	"errors"
    11  	"fmt"
    12  	"time"
    13  
    14  	"github.com/lbryio/lbcd/chaincfg/chainhash"
    15  	"github.com/lbryio/lbcd/database"
    16  	"github.com/lbryio/lbcd/wire"
    17  )
    18  
    19  const (
    20  	// blockHdrOffset defines the offsets into a v1 block index row for the
    21  	// block header.
    22  	//
    23  	// The serialized block index row format is:
    24  	//   <blocklocation><blockheader>
    25  	blockHdrOffset = 12
    26  )
    27  
    28  // errInterruptRequested indicates that an operation was cancelled due
    29  // to a user-requested interrupt.
    30  var errInterruptRequested = errors.New("interrupt requested")
    31  
    32  // interruptRequested returns true when the provided channel has been closed.
    33  // This simplifies early shutdown slightly since the caller can just use an if
    34  // statement instead of a select.
    35  func interruptRequested(interrupted <-chan struct{}) bool {
    36  	select {
    37  	case <-interrupted:
    38  		return true
    39  	default:
    40  	}
    41  
    42  	return false
    43  }
    44  
    45  // blockChainContext represents a particular block's placement in the block
    46  // chain. This is used by the block index migration to track block metadata that
    47  // will be written to disk.
    48  type blockChainContext struct {
    49  	parent    *chainhash.Hash
    50  	children  []*chainhash.Hash
    51  	height    int32
    52  	mainChain bool
    53  }
    54  
    55  // migrateBlockIndex migrates all block entries from the v1 block index bucket
    56  // to the v2 bucket. The v1 bucket stores all block entries keyed by block hash,
    57  // whereas the v2 bucket stores the exact same values, but keyed instead by
    58  // block height + hash.
    59  func migrateBlockIndex(db database.DB) error {
    60  	// Hardcoded bucket names so updates to the global values do not affect
    61  	// old upgrades.
    62  	v1BucketName := []byte("ffldb-blockidx")
    63  	v2BucketName := []byte("blockheaderidx")
    64  
    65  	err := db.Update(func(dbTx database.Tx) error {
    66  		v1BlockIdxBucket := dbTx.Metadata().Bucket(v1BucketName)
    67  		if v1BlockIdxBucket == nil {
    68  			return fmt.Errorf("Bucket %s does not exist", v1BucketName)
    69  		}
    70  
    71  		log.Info("Re-indexing block information in the database. This might take a while...")
    72  
    73  		v2BlockIdxBucket, err :=
    74  			dbTx.Metadata().CreateBucketIfNotExists(v2BucketName)
    75  		if err != nil {
    76  			return err
    77  		}
    78  
    79  		// Get tip of the main chain.
    80  		serializedData := dbTx.Metadata().Get(chainStateKeyName)
    81  		state, err := deserializeBestChainState(serializedData)
    82  		if err != nil {
    83  			return err
    84  		}
    85  		tip := &state.hash
    86  
    87  		// Scan the old block index bucket and construct a mapping of each block
    88  		// to parent block and all child blocks.
    89  		blocksMap, err := readBlockTree(v1BlockIdxBucket)
    90  		if err != nil {
    91  			return err
    92  		}
    93  
    94  		// Use the block graph to calculate the height of each block.
    95  		err = determineBlockHeights(blocksMap)
    96  		if err != nil {
    97  			return err
    98  		}
    99  
   100  		// Find blocks on the main chain with the block graph and current tip.
   101  		determineMainChainBlocks(blocksMap, tip)
   102  
   103  		// Now that we have heights for all blocks, scan the old block index
   104  		// bucket and insert all rows into the new one.
   105  		return v1BlockIdxBucket.ForEach(func(hashBytes, blockRow []byte) error {
   106  			endOffset := blockHdrOffset + blockHdrSize
   107  			headerBytes := blockRow[blockHdrOffset:endOffset:endOffset]
   108  
   109  			var hash chainhash.Hash
   110  			copy(hash[:], hashBytes[0:chainhash.HashSize])
   111  			chainContext := blocksMap[hash]
   112  
   113  			if chainContext.height == -1 {
   114  				return fmt.Errorf("Unable to calculate chain height for "+
   115  					"stored block %s", hash)
   116  			}
   117  
   118  			// Mark blocks as valid if they are part of the main chain.
   119  			status := statusDataStored
   120  			if chainContext.mainChain {
   121  				status |= statusValid
   122  			}
   123  
   124  			// Write header to v2 bucket
   125  			value := make([]byte, blockHdrSize+1)
   126  			copy(value[0:blockHdrSize], headerBytes)
   127  			value[blockHdrSize] = byte(status)
   128  
   129  			key := blockIndexKey(&hash, uint32(chainContext.height))
   130  			err := v2BlockIdxBucket.Put(key, value)
   131  			if err != nil {
   132  				return err
   133  			}
   134  
   135  			// Delete header from v1 bucket
   136  			truncatedRow := blockRow[0:blockHdrOffset:blockHdrOffset]
   137  			return v1BlockIdxBucket.Put(hashBytes, truncatedRow)
   138  		})
   139  	})
   140  	if err != nil {
   141  		return err
   142  	}
   143  
   144  	log.Infof("Block database migration complete")
   145  	return nil
   146  }
   147  
   148  // readBlockTree reads the old block index bucket and constructs a mapping of
   149  // each block to its parent block and all child blocks. This mapping represents
   150  // the full tree of blocks. This function does not populate the height or
   151  // mainChain fields of the returned blockChainContext values.
   152  func readBlockTree(v1BlockIdxBucket database.Bucket) (map[chainhash.Hash]*blockChainContext, error) {
   153  	blocksMap := make(map[chainhash.Hash]*blockChainContext)
   154  	err := v1BlockIdxBucket.ForEach(func(_, blockRow []byte) error {
   155  		var header wire.BlockHeader
   156  		endOffset := blockHdrOffset + blockHdrSize
   157  		headerBytes := blockRow[blockHdrOffset:endOffset:endOffset]
   158  		err := header.Deserialize(bytes.NewReader(headerBytes))
   159  		if err != nil {
   160  			return err
   161  		}
   162  
   163  		blockHash := header.BlockHash()
   164  		prevHash := header.PrevBlock
   165  
   166  		if blocksMap[blockHash] == nil {
   167  			blocksMap[blockHash] = &blockChainContext{height: -1}
   168  		}
   169  		if blocksMap[prevHash] == nil {
   170  			blocksMap[prevHash] = &blockChainContext{height: -1}
   171  		}
   172  
   173  		blocksMap[blockHash].parent = &prevHash
   174  		blocksMap[prevHash].children =
   175  			append(blocksMap[prevHash].children, &blockHash)
   176  		return nil
   177  	})
   178  	return blocksMap, err
   179  }
   180  
   181  // determineBlockHeights takes a map of block hashes to a slice of child hashes
   182  // and uses it to compute the height for each block. The function assigns a
   183  // height of 0 to the genesis hash and explores the tree of blocks
   184  // breadth-first, assigning a height to every block with a path back to the
   185  // genesis block. This function modifies the height field on the blocksMap
   186  // entries.
   187  func determineBlockHeights(blocksMap map[chainhash.Hash]*blockChainContext) error {
   188  	queue := list.New()
   189  
   190  	// The genesis block is included in blocksMap as a child of the zero hash
   191  	// because that is the value of the PrevBlock field in the genesis header.
   192  	preGenesisContext, exists := blocksMap[zeroHash]
   193  	if !exists || len(preGenesisContext.children) == 0 {
   194  		return fmt.Errorf("Unable to find genesis block")
   195  	}
   196  
   197  	for _, genesisHash := range preGenesisContext.children {
   198  		blocksMap[*genesisHash].height = 0
   199  		queue.PushBack(genesisHash)
   200  	}
   201  
   202  	for e := queue.Front(); e != nil; e = queue.Front() {
   203  		queue.Remove(e)
   204  		hash := e.Value.(*chainhash.Hash)
   205  		height := blocksMap[*hash].height
   206  
   207  		// For each block with this one as a parent, assign it a height and
   208  		// push to queue for future processing.
   209  		for _, childHash := range blocksMap[*hash].children {
   210  			blocksMap[*childHash].height = height + 1
   211  			queue.PushBack(childHash)
   212  		}
   213  	}
   214  
   215  	return nil
   216  }
   217  
   218  // determineMainChainBlocks traverses the block graph down from the tip to
   219  // determine which block hashes that are part of the main chain. This function
   220  // modifies the mainChain field on the blocksMap entries.
   221  func determineMainChainBlocks(blocksMap map[chainhash.Hash]*blockChainContext, tip *chainhash.Hash) {
   222  	for nextHash := tip; *nextHash != zeroHash; nextHash = blocksMap[*nextHash].parent {
   223  		blocksMap[*nextHash].mainChain = true
   224  	}
   225  }
   226  
   227  // deserializeUtxoEntryV0 decodes a utxo entry from the passed serialized byte
   228  // slice according to the legacy version 0 format into a map of utxos keyed by
   229  // the output index within the transaction.  The map is necessary because the
   230  // previous format encoded all unspent outputs for a transaction using a single
   231  // entry, whereas the new format encodes each unspent output individually.
   232  //
   233  // The legacy format is as follows:
   234  //
   235  //	<version><height><header code><unspentness bitmap>[<compressed txouts>,...]
   236  //
   237  //	Field                Type     Size
   238  //	version              VLQ      variable
   239  //	block height         VLQ      variable
   240  //	header code          VLQ      variable
   241  //	unspentness bitmap   []byte   variable
   242  //	compressed txouts
   243  //	  compressed amount  VLQ      variable
   244  //	  compressed script  []byte   variable
   245  //
   246  // The serialized header code format is:
   247  //
   248  //	bit 0 - containing transaction is a coinbase
   249  //	bit 1 - output zero is unspent
   250  //	bit 2 - output one is unspent
   251  //	bits 3-x - number of bytes in unspentness bitmap.  When both bits 1 and 2
   252  //	  are unset, it encodes N-1 since there must be at least one unspent
   253  //	  output.
   254  //
   255  // The rationale for the header code scheme is as follows:
   256  //   - Transactions which only pay to a single output and a change output are
   257  //     extremely common, thus an extra byte for the unspentness bitmap can be
   258  //     avoided for them by encoding those two outputs in the low order bits.
   259  //   - Given it is encoded as a VLQ which can encode values up to 127 with a
   260  //     single byte, that leaves 4 bits to represent the number of bytes in the
   261  //     unspentness bitmap while still only consuming a single byte for the
   262  //     header code.  In other words, an unspentness bitmap with up to 120
   263  //     transaction outputs can be encoded with a single-byte header code.
   264  //     This covers the vast majority of transactions.
   265  //   - Encoding N-1 bytes when both bits 1 and 2 are unset allows an additional
   266  //     8 outpoints to be encoded before causing the header code to require an
   267  //     additional byte.
   268  //
   269  // Example 1:
   270  // From tx in main blockchain:
   271  // Blk 1, 0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098
   272  //
   273  //	  010103320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
   274  //	  <><><><------------------------------------------------------------------>
   275  //	   | | \--------\                               |
   276  //	   | height     |                      compressed txout 0
   277  //	version    header code
   278  //
   279  //	- version: 1
   280  //	- height: 1
   281  //	- header code: 0x03 (coinbase, output zero unspent, 0 bytes of unspentness)
   282  //	- unspentness: Nothing since it is zero bytes
   283  //	- compressed txout 0:
   284  //	  - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
   285  //	  - 0x04: special script type pay-to-pubkey
   286  //	  - 0x96...52: x-coordinate of the pubkey
   287  //
   288  // Example 2:
   289  // From tx in main blockchain:
   290  // Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f
   291  //
   292  //	  0185f90b0a011200e2ccd6ec7c6e2e581349c77e067385fa8236bf8a800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
   293  //	  <><----><><><------------------------------------------><-------------------------------------------->
   294  //	   |    |  | \-------------------\            |                            |
   295  //	version |  \--------\       unspentness       |                    compressed txout 2
   296  //	      height     header code          compressed txout 0
   297  //
   298  //	- version: 1
   299  //	- height: 113931
   300  //	- header code: 0x0a (output zero unspent, 1 byte in unspentness bitmap)
   301  //	- unspentness: [0x01] (bit 0 is set, so output 0+2 = 2 is unspent)
   302  //	  NOTE: It's +2 since the first two outputs are encoded in the header code
   303  //	- compressed txout 0:
   304  //	  - 0x12: VLQ-encoded compressed amount for 20000000 (0.2 BTC)
   305  //	  - 0x00: special script type pay-to-pubkey-hash
   306  //	  - 0xe2...8a: pubkey hash
   307  //	- compressed txout 2:
   308  //	  - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
   309  //	  - 0x00: special script type pay-to-pubkey-hash
   310  //	  - 0xb8...58: pubkey hash
   311  //
   312  // Example 3:
   313  // From tx in main blockchain:
   314  // Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620
   315  //
   316  //	  0193d06c100000108ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
   317  //	  <><----><><----><-------------------------------------------------->
   318  //	   |    |  |   \-----------------\            |
   319  //	version |  \--------\       unspentness       |
   320  //	      height     header code          compressed txout 22
   321  //
   322  //	- version: 1
   323  //	- height: 338156
   324  //	- header code: 0x10 (2+1 = 3 bytes in unspentness bitmap)
   325  //	  NOTE: It's +1 since neither bit 1 nor 2 are set, so N-1 is encoded.
   326  //	- unspentness: [0x00 0x00 0x10] (bit 20 is set, so output 20+2 = 22 is unspent)
   327  //	  NOTE: It's +2 since the first two outputs are encoded in the header code
   328  //	- compressed txout 22:
   329  //	  - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
   330  //	  - 0x01: special script type pay-to-script-hash
   331  //	  - 0x1d...e6: script hash
   332  func deserializeUtxoEntryV0(serialized []byte) (map[uint32]*UtxoEntry, error) {
   333  	// Deserialize the version.
   334  	//
   335  	// NOTE: Ignore version since it is no longer used in the new format.
   336  	_, bytesRead := deserializeVLQ(serialized)
   337  	offset := bytesRead
   338  	if offset >= len(serialized) {
   339  		return nil, errDeserialize("unexpected end of data after version")
   340  	}
   341  
   342  	// Deserialize the block height.
   343  	blockHeight, bytesRead := deserializeVLQ(serialized[offset:])
   344  	offset += bytesRead
   345  	if offset >= len(serialized) {
   346  		return nil, errDeserialize("unexpected end of data after height")
   347  	}
   348  
   349  	// Deserialize the header code.
   350  	code, bytesRead := deserializeVLQ(serialized[offset:])
   351  	offset += bytesRead
   352  	if offset >= len(serialized) {
   353  		return nil, errDeserialize("unexpected end of data after header")
   354  	}
   355  
   356  	// Decode the header code.
   357  	//
   358  	// Bit 0 indicates whether the containing transaction is a coinbase.
   359  	// Bit 1 indicates output 0 is unspent.
   360  	// Bit 2 indicates output 1 is unspent.
   361  	// Bits 3-x encodes the number of non-zero unspentness bitmap bytes that
   362  	// follow.  When both output 0 and 1 are spent, it encodes N-1.
   363  	isCoinBase := code&0x01 != 0
   364  	output0Unspent := code&0x02 != 0
   365  	output1Unspent := code&0x04 != 0
   366  	numBitmapBytes := code >> 3
   367  	if !output0Unspent && !output1Unspent {
   368  		numBitmapBytes++
   369  	}
   370  
   371  	// Ensure there are enough bytes left to deserialize the unspentness
   372  	// bitmap.
   373  	if uint64(len(serialized[offset:])) < numBitmapBytes {
   374  		return nil, errDeserialize("unexpected end of data for " +
   375  			"unspentness bitmap")
   376  	}
   377  
   378  	// Add sparse output for unspent outputs 0 and 1 as needed based on the
   379  	// details provided by the header code.
   380  	var outputIndexes []uint32
   381  	if output0Unspent {
   382  		outputIndexes = append(outputIndexes, 0)
   383  	}
   384  	if output1Unspent {
   385  		outputIndexes = append(outputIndexes, 1)
   386  	}
   387  
   388  	// Decode the unspentness bitmap adding a sparse output for each unspent
   389  	// output.
   390  	for i := uint32(0); i < uint32(numBitmapBytes); i++ {
   391  		unspentBits := serialized[offset]
   392  		for j := uint32(0); j < 8; j++ {
   393  			if unspentBits&0x01 != 0 {
   394  				// The first 2 outputs are encoded via the
   395  				// header code, so adjust the output number
   396  				// accordingly.
   397  				outputNum := 2 + i*8 + j
   398  				outputIndexes = append(outputIndexes, outputNum)
   399  			}
   400  			unspentBits >>= 1
   401  		}
   402  		offset++
   403  	}
   404  
   405  	// Map to hold all of the converted outputs.
   406  	entries := make(map[uint32]*UtxoEntry)
   407  
   408  	// All entries will need to potentially be marked as a coinbase.
   409  	var packedFlags txoFlags
   410  	if isCoinBase {
   411  		packedFlags |= tfCoinBase
   412  	}
   413  
   414  	// Decode and add all of the utxos.
   415  	for i, outputIndex := range outputIndexes {
   416  		// Decode the next utxo.
   417  		amount, pkScript, bytesRead, err := decodeCompressedTxOut(
   418  			serialized[offset:])
   419  		if err != nil {
   420  			return nil, errDeserialize(fmt.Sprintf("unable to "+
   421  				"decode utxo at index %d: %v", i, err))
   422  		}
   423  		offset += bytesRead
   424  
   425  		// Create a new utxo entry with the details deserialized above.
   426  		entries[outputIndex] = &UtxoEntry{
   427  			amount:      int64(amount),
   428  			pkScript:    pkScript,
   429  			blockHeight: int32(blockHeight),
   430  			packedFlags: packedFlags,
   431  		}
   432  	}
   433  
   434  	return entries, nil
   435  }
   436  
   437  // upgradeUtxoSetToV2 migrates the utxo set entries from version 1 to 2 in
   438  // batches.  It is guaranteed to updated if this returns without failure.
   439  func upgradeUtxoSetToV2(db database.DB, interrupt <-chan struct{}) error {
   440  	// Hardcoded bucket names so updates to the global values do not affect
   441  	// old upgrades.
   442  	var (
   443  		v1BucketName = []byte("utxoset")
   444  		v2BucketName = []byte("utxosetv2")
   445  	)
   446  
   447  	log.Infof("Upgrading utxo set to v2.  This will take a while...")
   448  	start := time.Now()
   449  
   450  	// Create the new utxo set bucket as needed.
   451  	err := db.Update(func(dbTx database.Tx) error {
   452  		_, err := dbTx.Metadata().CreateBucketIfNotExists(v2BucketName)
   453  		return err
   454  	})
   455  	if err != nil {
   456  		return err
   457  	}
   458  
   459  	// doBatch contains the primary logic for upgrading the utxo set from
   460  	// version 1 to 2 in batches.  This is done because the utxo set can be
   461  	// huge and thus attempting to migrate in a single database transaction
   462  	// would result in massive memory usage and could potentially crash on
   463  	// many systems due to ulimits.
   464  	//
   465  	// It returns the number of utxos processed.
   466  	const maxUtxos = 200000
   467  	doBatch := func(dbTx database.Tx) (uint32, error) {
   468  		v1Bucket := dbTx.Metadata().Bucket(v1BucketName)
   469  		v2Bucket := dbTx.Metadata().Bucket(v2BucketName)
   470  		v1Cursor := v1Bucket.Cursor()
   471  
   472  		// Migrate utxos so long as the max number of utxos for this
   473  		// batch has not been exceeded.
   474  		var numUtxos uint32
   475  		for ok := v1Cursor.First(); ok && numUtxos < maxUtxos; ok =
   476  			v1Cursor.Next() {
   477  
   478  			// Old key was the transaction hash.
   479  			oldKey := v1Cursor.Key()
   480  			var txHash chainhash.Hash
   481  			copy(txHash[:], oldKey)
   482  
   483  			// Deserialize the old entry which included all utxos
   484  			// for the given transaction.
   485  			utxos, err := deserializeUtxoEntryV0(v1Cursor.Value())
   486  			if err != nil {
   487  				return 0, err
   488  			}
   489  
   490  			// Add an entry for each utxo into the new bucket using
   491  			// the new format.
   492  			for txOutIdx, utxo := range utxos {
   493  				reserialized, err := serializeUtxoEntry(utxo)
   494  				if err != nil {
   495  					return 0, err
   496  				}
   497  
   498  				key := outpointKey(wire.OutPoint{
   499  					Hash:  txHash,
   500  					Index: txOutIdx,
   501  				})
   502  				err = v2Bucket.Put(*key, reserialized)
   503  				// NOTE: The key is intentionally not recycled
   504  				// here since the database interface contract
   505  				// prohibits modifications.  It will be garbage
   506  				// collected normally when the database is done
   507  				// with it.
   508  				if err != nil {
   509  					return 0, err
   510  				}
   511  			}
   512  
   513  			// Remove old entry.
   514  			err = v1Bucket.Delete(oldKey)
   515  			if err != nil {
   516  				return 0, err
   517  			}
   518  
   519  			numUtxos += uint32(len(utxos))
   520  
   521  			if interruptRequested(interrupt) {
   522  				// No error here so the database transaction
   523  				// is not cancelled and therefore outstanding
   524  				// work is written to disk.
   525  				break
   526  			}
   527  		}
   528  
   529  		return numUtxos, nil
   530  	}
   531  
   532  	// Migrate all entries in batches for the reasons mentioned above.
   533  	var totalUtxos uint64
   534  	for {
   535  		var numUtxos uint32
   536  		err := db.Update(func(dbTx database.Tx) error {
   537  			var err error
   538  			numUtxos, err = doBatch(dbTx)
   539  			return err
   540  		})
   541  		if err != nil {
   542  			return err
   543  		}
   544  
   545  		if interruptRequested(interrupt) {
   546  			return errInterruptRequested
   547  		}
   548  
   549  		if numUtxos == 0 {
   550  			break
   551  		}
   552  
   553  		totalUtxos += uint64(numUtxos)
   554  		log.Infof("Migrated %d utxos (%d total)", numUtxos, totalUtxos)
   555  	}
   556  
   557  	// Remove the old bucket and update the utxo set version once it has
   558  	// been fully migrated.
   559  	err = db.Update(func(dbTx database.Tx) error {
   560  		err := dbTx.Metadata().DeleteBucket(v1BucketName)
   561  		if err != nil {
   562  			return err
   563  		}
   564  
   565  		return dbPutVersion(dbTx, utxoSetVersionKeyName, 2)
   566  	})
   567  	if err != nil {
   568  		return err
   569  	}
   570  
   571  	seconds := int64(time.Since(start) / time.Second)
   572  	log.Infof("Done upgrading utxo set.  Total utxos: %d in %d seconds",
   573  		totalUtxos, seconds)
   574  	return nil
   575  }
   576  
   577  // maybeUpgradeDbBuckets checks the database version of the buckets used by this
   578  // package and performs any needed upgrades to bring them to the latest version.
   579  //
   580  // All buckets used by this package are guaranteed to be the latest version if
   581  // this function returns without error.
   582  func (b *BlockChain) maybeUpgradeDbBuckets(interrupt <-chan struct{}) error {
   583  	// Load or create bucket versions as needed.
   584  	var utxoSetVersion uint32
   585  	err := b.db.Update(func(dbTx database.Tx) error {
   586  		// Load the utxo set version from the database or create it and
   587  		// initialize it to version 1 if it doesn't exist.
   588  		var err error
   589  		utxoSetVersion, err = dbFetchOrCreateVersion(dbTx,
   590  			utxoSetVersionKeyName, 1)
   591  		return err
   592  	})
   593  	if err != nil {
   594  		return err
   595  	}
   596  
   597  	// Update the utxo set to v2 if needed.
   598  	if utxoSetVersion < 2 {
   599  		if err := upgradeUtxoSetToV2(b.db, interrupt); err != nil {
   600  			return err
   601  		}
   602  	}
   603  
   604  	return nil
   605  }