github.com/btcsuite/btcd@v0.24.0/blockchain/chainio.go (about)

     1  // Copyright (c) 2015-2017 The btcsuite developers
     2  // Use of this source code is governed by an ISC
     3  // license that can be found in the LICENSE file.
     4  
     5  package blockchain
     6  
     7  import (
     8  	"bytes"
     9  	"encoding/binary"
    10  	"fmt"
    11  	"math/big"
    12  	"sync"
    13  	"time"
    14  
    15  	"github.com/btcsuite/btcd/btcutil"
    16  	"github.com/btcsuite/btcd/chaincfg/chainhash"
    17  	"github.com/btcsuite/btcd/database"
    18  	"github.com/btcsuite/btcd/wire"
    19  )
    20  
    21  const (
    22  	// blockHdrSize is the size of a block header.  This is simply the
    23  	// constant from wire and is only provided here for convenience since
    24  	// wire.MaxBlockHeaderPayload is quite long.
    25  	blockHdrSize = wire.MaxBlockHeaderPayload
    26  
    27  	// latestUtxoSetBucketVersion is the current version of the utxo set
    28  	// bucket that is used to track all unspent outputs.
    29  	latestUtxoSetBucketVersion = 2
    30  
    31  	// latestSpendJournalBucketVersion is the current version of the spend
    32  	// journal bucket that is used to track all spent transactions for use
    33  	// in reorgs.
    34  	latestSpendJournalBucketVersion = 1
    35  )
    36  
    37  var (
    38  	// blockIndexBucketName is the name of the db bucket used to house to the
    39  	// block headers and contextual information.
    40  	blockIndexBucketName = []byte("blockheaderidx")
    41  
    42  	// hashIndexBucketName is the name of the db bucket used to house to the
    43  	// block hash -> block height index.
    44  	hashIndexBucketName = []byte("hashidx")
    45  
    46  	// heightIndexBucketName is the name of the db bucket used to house to
    47  	// the block height -> block hash index.
    48  	heightIndexBucketName = []byte("heightidx")
    49  
    50  	// chainStateKeyName is the name of the db key used to store the best
    51  	// chain state.
    52  	chainStateKeyName = []byte("chainstate")
    53  
    54  	// utxoStateConsistencyKeyName is the name of the db key used to store the
    55  	// consistency status of the utxo state.
    56  	utxoStateConsistencyKeyName = []byte("utxostateconsistency")
    57  
    58  	// spendJournalVersionKeyName is the name of the db key used to store
    59  	// the version of the spend journal currently in the database.
    60  	spendJournalVersionKeyName = []byte("spendjournalversion")
    61  
    62  	// spendJournalBucketName is the name of the db bucket used to house
    63  	// transactions outputs that are spent in each block.
    64  	spendJournalBucketName = []byte("spendjournal")
    65  
    66  	// utxoSetVersionKeyName is the name of the db key used to store the
    67  	// version of the utxo set currently in the database.
    68  	utxoSetVersionKeyName = []byte("utxosetversion")
    69  
    70  	// utxoSetBucketName is the name of the db bucket used to house the
    71  	// unspent transaction output set.
    72  	utxoSetBucketName = []byte("utxosetv2")
    73  
    74  	// byteOrder is the preferred byte order used for serializing numeric
    75  	// fields for storage in the database.
    76  	byteOrder = binary.LittleEndian
    77  )
    78  
    79  // errNotInMainChain signifies that a block hash or height that is not in the
    80  // main chain was requested.
    81  type errNotInMainChain string
    82  
    83  // Error implements the error interface.
    84  func (e errNotInMainChain) Error() string {
    85  	return string(e)
    86  }
    87  
    88  // isNotInMainChainErr returns whether or not the passed error is an
    89  // errNotInMainChain error.
    90  func isNotInMainChainErr(err error) bool {
    91  	_, ok := err.(errNotInMainChain)
    92  	return ok
    93  }
    94  
    95  // errDeserialize signifies that a problem was encountered when deserializing
    96  // data.
    97  type errDeserialize string
    98  
    99  // Error implements the error interface.
   100  func (e errDeserialize) Error() string {
   101  	return string(e)
   102  }
   103  
   104  // isDeserializeErr returns whether or not the passed error is an errDeserialize
   105  // error.
   106  func isDeserializeErr(err error) bool {
   107  	_, ok := err.(errDeserialize)
   108  	return ok
   109  }
   110  
   111  // isDbBucketNotFoundErr returns whether or not the passed error is a
   112  // database.Error with an error code of database.ErrBucketNotFound.
   113  func isDbBucketNotFoundErr(err error) bool {
   114  	dbErr, ok := err.(database.Error)
   115  	return ok && dbErr.ErrorCode == database.ErrBucketNotFound
   116  }
   117  
   118  // dbFetchVersion fetches an individual version with the given key from the
   119  // metadata bucket.  It is primarily used to track versions on entities such as
   120  // buckets.  It returns zero if the provided key does not exist.
   121  func dbFetchVersion(dbTx database.Tx, key []byte) uint32 {
   122  	serialized := dbTx.Metadata().Get(key)
   123  	if serialized == nil {
   124  		return 0
   125  	}
   126  
   127  	return byteOrder.Uint32(serialized)
   128  }
   129  
   130  // dbPutVersion uses an existing database transaction to update the provided
   131  // key in the metadata bucket to the given version.  It is primarily used to
   132  // track versions on entities such as buckets.
   133  func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error {
   134  	var serialized [4]byte
   135  	byteOrder.PutUint32(serialized[:], version)
   136  	return dbTx.Metadata().Put(key, serialized[:])
   137  }
   138  
   139  // dbFetchOrCreateVersion uses an existing database transaction to attempt to
   140  // fetch the provided key from the metadata bucket as a version and in the case
   141  // it doesn't exist, it adds the entry with the provided default version and
   142  // returns that.  This is useful during upgrades to automatically handle loading
   143  // and adding version keys as necessary.
   144  func dbFetchOrCreateVersion(dbTx database.Tx, key []byte, defaultVersion uint32) (uint32, error) {
   145  	version := dbFetchVersion(dbTx, key)
   146  	if version == 0 {
   147  		version = defaultVersion
   148  		err := dbPutVersion(dbTx, key, version)
   149  		if err != nil {
   150  			return 0, err
   151  		}
   152  	}
   153  
   154  	return version, nil
   155  }
   156  
   157  // -----------------------------------------------------------------------------
   158  // The transaction spend journal consists of an entry for each block connected
   159  // to the main chain which contains the transaction outputs the block spends
   160  // serialized such that the order is the reverse of the order they were spent.
   161  //
   162  // This is required because reorganizing the chain necessarily entails
   163  // disconnecting blocks to get back to the point of the fork which implies
   164  // unspending all of the transaction outputs that each block previously spent.
   165  // Since the utxo set, by definition, only contains unspent transaction outputs,
   166  // the spent transaction outputs must be resurrected from somewhere.  There is
   167  // more than one way this could be done, however this is the most straight
   168  // forward method that does not require having a transaction index and unpruned
   169  // blockchain.
   170  //
   171  // NOTE: This format is NOT self describing.  The additional details such as
   172  // the number of entries (transaction inputs) are expected to come from the
   173  // block itself and the utxo set (for legacy entries).  The rationale in doing
   174  // this is to save space.  This is also the reason the spent outputs are
   175  // serialized in the reverse order they are spent because later transactions are
   176  // allowed to spend outputs from earlier ones in the same block.
   177  //
   178  // The reserved field below used to keep track of the version of the containing
   179  // transaction when the height in the header code was non-zero, however the
   180  // height is always non-zero now, but keeping the extra reserved field allows
   181  // backwards compatibility.
   182  //
   183  // The serialized format is:
   184  //
   185  //   [<header code><reserved><compressed txout>],...
   186  //
   187  //   Field                Type     Size
   188  //   header code          VLQ      variable
   189  //   reserved             byte     1
   190  //   compressed txout
   191  //     compressed amount  VLQ      variable
   192  //     compressed script  []byte   variable
   193  //
   194  // The serialized header code format is:
   195  //   bit 0 - containing transaction is a coinbase
   196  //   bits 1-x - height of the block that contains the spent txout
   197  //
   198  // Example 1:
   199  // From block 170 in main blockchain.
   200  //
   201  //    1300320511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c
   202  //    <><><------------------------------------------------------------------>
   203  //     | |                                  |
   204  //     | reserved                  compressed txout
   205  //    header code
   206  //
   207  //  - header code: 0x13 (coinbase, height 9)
   208  //  - reserved: 0x00
   209  //  - compressed txout 0:
   210  //    - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
   211  //    - 0x05: special script type pay-to-pubkey
   212  //    - 0x11...5c: x-coordinate of the pubkey
   213  //
   214  // Example 2:
   215  // Adapted from block 100025 in main blockchain.
   216  //
   217  //    8b99700091f20f006edbc6c4d31bae9f1ccc38538a114bf42de65e868b99700086c64700b2fb57eadf61e106a100a7445a8c3f67898841ec
   218  //    <----><><----------------------------------------------><----><><---------------------------------------------->
   219  //     |    |                         |                        |    |                         |
   220  //     |    reserved         compressed txout                  |    reserved         compressed txout
   221  //    header code                                          header code
   222  //
   223  //  - Last spent output:
   224  //    - header code: 0x8b9970 (not coinbase, height 100024)
   225  //    - reserved: 0x00
   226  //    - compressed txout:
   227  //      - 0x91f20f: VLQ-encoded compressed amount for 34405000000 (344.05 BTC)
   228  //      - 0x00: special script type pay-to-pubkey-hash
   229  //      - 0x6e...86: pubkey hash
   230  //  - Second to last spent output:
   231  //    - header code: 0x8b9970 (not coinbase, height 100024)
   232  //    - reserved: 0x00
   233  //    - compressed txout:
   234  //      - 0x86c647: VLQ-encoded compressed amount for 13761000000 (137.61 BTC)
   235  //      - 0x00: special script type pay-to-pubkey-hash
   236  //      - 0xb2...ec: pubkey hash
   237  // -----------------------------------------------------------------------------
   238  
   239  // SpentTxOut contains a spent transaction output and potentially additional
   240  // contextual information such as whether or not it was contained in a coinbase
   241  // transaction, the version of the transaction it was contained in, and which
   242  // block height the containing transaction was included in.  As described in
   243  // the comments above, the additional contextual information will only be valid
   244  // when this spent txout is spending the last unspent output of the containing
   245  // transaction.
   246  type SpentTxOut struct {
   247  	// Amount is the amount of the output.
   248  	Amount int64
   249  
   250  	// PkScipt is the public key script for the output.
   251  	PkScript []byte
   252  
   253  	// Height is the height of the block containing the creating tx.
   254  	Height int32
   255  
   256  	// Denotes if the creating tx is a coinbase.
   257  	IsCoinBase bool
   258  }
   259  
   260  // FetchSpendJournal attempts to retrieve the spend journal, or the set of
   261  // outputs spent for the target block. This provides a view of all the outputs
   262  // that will be consumed once the target block is connected to the end of the
   263  // main chain.
   264  //
   265  // This function is safe for concurrent access.
   266  func (b *BlockChain) FetchSpendJournal(targetBlock *btcutil.Block) ([]SpentTxOut, error) {
   267  	b.chainLock.RLock()
   268  	defer b.chainLock.RUnlock()
   269  
   270  	var spendEntries []SpentTxOut
   271  	err := b.db.View(func(dbTx database.Tx) error {
   272  		var err error
   273  
   274  		spendEntries, err = dbFetchSpendJournalEntry(dbTx, targetBlock)
   275  		return err
   276  	})
   277  	if err != nil {
   278  		return nil, err
   279  	}
   280  
   281  	return spendEntries, nil
   282  }
   283  
   284  // spentTxOutHeaderCode returns the calculated header code to be used when
   285  // serializing the provided stxo entry.
   286  func spentTxOutHeaderCode(stxo *SpentTxOut) uint64 {
   287  	// As described in the serialization format comments, the header code
   288  	// encodes the height shifted over one bit and the coinbase flag in the
   289  	// lowest bit.
   290  	headerCode := uint64(stxo.Height) << 1
   291  	if stxo.IsCoinBase {
   292  		headerCode |= 0x01
   293  	}
   294  
   295  	return headerCode
   296  }
   297  
   298  // spentTxOutSerializeSize returns the number of bytes it would take to
   299  // serialize the passed stxo according to the format described above.
   300  func spentTxOutSerializeSize(stxo *SpentTxOut) int {
   301  	size := serializeSizeVLQ(spentTxOutHeaderCode(stxo))
   302  	if stxo.Height > 0 {
   303  		// The legacy v1 spend journal format conditionally tracked the
   304  		// containing transaction version when the height was non-zero,
   305  		// so this is required for backwards compat.
   306  		size += serializeSizeVLQ(0)
   307  	}
   308  	return size + compressedTxOutSize(uint64(stxo.Amount), stxo.PkScript)
   309  }
   310  
   311  // putSpentTxOut serializes the passed stxo according to the format described
   312  // above directly into the passed target byte slice.  The target byte slice must
   313  // be at least large enough to handle the number of bytes returned by the
   314  // SpentTxOutSerializeSize function or it will panic.
   315  func putSpentTxOut(target []byte, stxo *SpentTxOut) int {
   316  	headerCode := spentTxOutHeaderCode(stxo)
   317  	offset := putVLQ(target, headerCode)
   318  	if stxo.Height > 0 {
   319  		// The legacy v1 spend journal format conditionally tracked the
   320  		// containing transaction version when the height was non-zero,
   321  		// so this is required for backwards compat.
   322  		offset += putVLQ(target[offset:], 0)
   323  	}
   324  	return offset + putCompressedTxOut(target[offset:], uint64(stxo.Amount),
   325  		stxo.PkScript)
   326  }
   327  
   328  // decodeSpentTxOut decodes the passed serialized stxo entry, possibly followed
   329  // by other data, into the passed stxo struct.  It returns the number of bytes
   330  // read.
   331  func decodeSpentTxOut(serialized []byte, stxo *SpentTxOut) (int, error) {
   332  	// Ensure there are bytes to decode.
   333  	if len(serialized) == 0 {
   334  		return 0, errDeserialize("no serialized bytes")
   335  	}
   336  
   337  	// Deserialize the header code.
   338  	code, offset := deserializeVLQ(serialized)
   339  	if offset >= len(serialized) {
   340  		return offset, errDeserialize("unexpected end of data after " +
   341  			"header code")
   342  	}
   343  
   344  	// Decode the header code.
   345  	//
   346  	// Bit 0 indicates containing transaction is a coinbase.
   347  	// Bits 1-x encode height of containing transaction.
   348  	stxo.IsCoinBase = code&0x01 != 0
   349  	stxo.Height = int32(code >> 1)
   350  	if stxo.Height > 0 {
   351  		// The legacy v1 spend journal format conditionally tracked the
   352  		// containing transaction version when the height was non-zero,
   353  		// so this is required for backwards compat.
   354  		_, bytesRead := deserializeVLQ(serialized[offset:])
   355  		offset += bytesRead
   356  		if offset >= len(serialized) {
   357  			return offset, errDeserialize("unexpected end of data " +
   358  				"after reserved")
   359  		}
   360  	}
   361  
   362  	// Decode the compressed txout.
   363  	amount, pkScript, bytesRead, err := decodeCompressedTxOut(
   364  		serialized[offset:])
   365  	offset += bytesRead
   366  	if err != nil {
   367  		return offset, errDeserialize(fmt.Sprintf("unable to decode "+
   368  			"txout: %v", err))
   369  	}
   370  	stxo.Amount = int64(amount)
   371  	stxo.PkScript = pkScript
   372  	return offset, nil
   373  }
   374  
   375  // deserializeSpendJournalEntry decodes the passed serialized byte slice into a
   376  // slice of spent txouts according to the format described in detail above.
   377  //
   378  // Since the serialization format is not self describing, as noted in the
   379  // format comments, this function also requires the transactions that spend the
   380  // txouts.
   381  func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]SpentTxOut, error) {
   382  	// Calculate the total number of stxos.
   383  	var numStxos int
   384  	for _, tx := range txns {
   385  		numStxos += len(tx.TxIn)
   386  	}
   387  
   388  	// When a block has no spent txouts there is nothing to serialize.
   389  	if len(serialized) == 0 {
   390  		// Ensure the block actually has no stxos.  This should never
   391  		// happen unless there is database corruption or an empty entry
   392  		// erroneously made its way into the database.
   393  		if numStxos != 0 {
   394  			return nil, AssertError(fmt.Sprintf("mismatched spend "+
   395  				"journal serialization - no serialization for "+
   396  				"expected %d stxos", numStxos))
   397  		}
   398  
   399  		return nil, nil
   400  	}
   401  
   402  	// Loop backwards through all transactions so everything is read in
   403  	// reverse order to match the serialization order.
   404  	stxoIdx := numStxos - 1
   405  	offset := 0
   406  	stxos := make([]SpentTxOut, numStxos)
   407  	for txIdx := len(txns) - 1; txIdx > -1; txIdx-- {
   408  		tx := txns[txIdx]
   409  
   410  		// Loop backwards through all of the transaction inputs and read
   411  		// the associated stxo.
   412  		for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- {
   413  			txIn := tx.TxIn[txInIdx]
   414  			stxo := &stxos[stxoIdx]
   415  			stxoIdx--
   416  
   417  			n, err := decodeSpentTxOut(serialized[offset:], stxo)
   418  			offset += n
   419  			if err != nil {
   420  				return nil, errDeserialize(fmt.Sprintf("unable "+
   421  					"to decode stxo for %v: %v",
   422  					txIn.PreviousOutPoint, err))
   423  			}
   424  		}
   425  	}
   426  
   427  	return stxos, nil
   428  }
   429  
   430  // serializeSpendJournalEntry serializes all of the passed spent txouts into a
   431  // single byte slice according to the format described in detail above.
   432  func serializeSpendJournalEntry(stxos []SpentTxOut) []byte {
   433  	if len(stxos) == 0 {
   434  		return nil
   435  	}
   436  
   437  	// Calculate the size needed to serialize the entire journal entry.
   438  	var size int
   439  	for i := range stxos {
   440  		size += spentTxOutSerializeSize(&stxos[i])
   441  	}
   442  	serialized := make([]byte, size)
   443  
   444  	// Serialize each individual stxo directly into the slice in reverse
   445  	// order one after the other.
   446  	var offset int
   447  	for i := len(stxos) - 1; i > -1; i-- {
   448  		offset += putSpentTxOut(serialized[offset:], &stxos[i])
   449  	}
   450  
   451  	return serialized
   452  }
   453  
   454  // dbFetchSpendJournalEntry fetches the spend journal entry for the passed block
   455  // and deserializes it into a slice of spent txout entries.
   456  //
   457  // NOTE: Legacy entries will not have the coinbase flag or height set unless it
   458  // was the final output spend in the containing transaction.  It is up to the
   459  // caller to handle this properly by looking the information up in the utxo set.
   460  func dbFetchSpendJournalEntry(dbTx database.Tx, block *btcutil.Block) ([]SpentTxOut, error) {
   461  	// Exclude the coinbase transaction since it can't spend anything.
   462  	spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName)
   463  	serialized := spendBucket.Get(block.Hash()[:])
   464  	blockTxns := block.MsgBlock().Transactions[1:]
   465  	stxos, err := deserializeSpendJournalEntry(serialized, blockTxns)
   466  	if err != nil {
   467  		// Ensure any deserialization errors are returned as database
   468  		// corruption errors.
   469  		if isDeserializeErr(err) {
   470  			return nil, database.Error{
   471  				ErrorCode: database.ErrCorruption,
   472  				Description: fmt.Sprintf("corrupt spend "+
   473  					"information for %v: %v", block.Hash(),
   474  					err),
   475  			}
   476  		}
   477  
   478  		return nil, err
   479  	}
   480  
   481  	return stxos, nil
   482  }
   483  
   484  // dbPutSpendJournalEntry uses an existing database transaction to update the
   485  // spend journal entry for the given block hash using the provided slice of
   486  // spent txouts.   The spent txouts slice must contain an entry for every txout
   487  // the transactions in the block spend in the order they are spent.
   488  func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash, stxos []SpentTxOut) error {
   489  	spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName)
   490  	serialized := serializeSpendJournalEntry(stxos)
   491  	return spendBucket.Put(blockHash[:], serialized)
   492  }
   493  
   494  // dbRemoveSpendJournalEntry uses an existing database transaction to remove the
   495  // spend journal entry for the passed block hash.
   496  func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) error {
   497  	spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName)
   498  	return spendBucket.Delete(blockHash[:])
   499  }
   500  
   501  // dbPruneSpendJournalEntry uses an existing database transaction to remove all
   502  // the spend journal entries for the pruned blocks.
   503  func dbPruneSpendJournalEntry(dbTx database.Tx, blockHashes []chainhash.Hash) error {
   504  	spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName)
   505  
   506  	for _, blockHash := range blockHashes {
   507  		err := spendBucket.Delete(blockHash[:])
   508  		if err != nil {
   509  			return err
   510  		}
   511  	}
   512  
   513  	return nil
   514  }
   515  
   516  // -----------------------------------------------------------------------------
   517  // The unspent transaction output (utxo) set consists of an entry for each
   518  // unspent output using a format that is optimized to reduce space using domain
   519  // specific compression algorithms.  This format is a slightly modified version
   520  // of the format used in Bitcoin Core.
   521  //
   522  // Each entry is keyed by an outpoint as specified below.  It is important to
   523  // note that the key encoding uses a VLQ, which employs an MSB encoding so
   524  // iteration of utxos when doing byte-wise comparisons will produce them in
   525  // order.
   526  //
   527  // The serialized key format is:
   528  //   <hash><output index>
   529  //
   530  //   Field                Type             Size
   531  //   hash                 chainhash.Hash   chainhash.HashSize
   532  //   output index         VLQ              variable
   533  //
   534  // The serialized value format is:
   535  //
   536  //   <header code><compressed txout>
   537  //
   538  //   Field                Type     Size
   539  //   header code          VLQ      variable
   540  //   compressed txout
   541  //     compressed amount  VLQ      variable
   542  //     compressed script  []byte   variable
   543  //
   544  // The serialized header code format is:
   545  //   bit 0 - containing transaction is a coinbase
   546  //   bits 1-x - height of the block that contains the unspent txout
   547  //
   548  // Example 1:
   549  // From tx in main blockchain:
   550  // Blk 1, 0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098:0
   551  //
   552  //    03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52
   553  //    <><------------------------------------------------------------------>
   554  //     |                                          |
   555  //   header code                         compressed txout
   556  //
   557  //  - header code: 0x03 (coinbase, height 1)
   558  //  - compressed txout:
   559  //    - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC)
   560  //    - 0x04: special script type pay-to-pubkey
   561  //    - 0x96...52: x-coordinate of the pubkey
   562  //
   563  // Example 2:
   564  // From tx in main blockchain:
   565  // Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2
   566  //
   567  //    8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58
   568  //    <----><------------------------------------------>
   569  //      |                             |
   570  //   header code             compressed txout
   571  //
   572  //  - header code: 0x8cf316 (not coinbase, height 113931)
   573  //  - compressed txout:
   574  //    - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC)
   575  //    - 0x00: special script type pay-to-pubkey-hash
   576  //    - 0xb8...58: pubkey hash
   577  //
   578  // Example 3:
   579  // From tx in main blockchain:
   580  // Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22
   581  //
   582  //    a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6
   583  //    <----><-------------------------------------------------->
   584  //      |                             |
   585  //   header code             compressed txout
   586  //
   587  //  - header code: 0xa8a258 (not coinbase, height 338156)
   588  //  - compressed txout:
   589  //    - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC)
   590  //    - 0x01: special script type pay-to-script-hash
   591  //    - 0x1d...e6: script hash
   592  // -----------------------------------------------------------------------------
   593  
   594  // maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes
   595  // to serialize as a VLQ.
   596  var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1)
   597  
   598  // outpointKeyPool defines a concurrent safe free list of byte slices used to
   599  // provide temporary buffers for outpoint database keys.
   600  var outpointKeyPool = sync.Pool{
   601  	New: func() interface{} {
   602  		b := make([]byte, chainhash.HashSize+maxUint32VLQSerializeSize)
   603  		return &b // Pointer to slice to avoid boxing alloc.
   604  	},
   605  }
   606  
   607  // outpointKey returns a key suitable for use as a database key in the utxo set
   608  // while making use of a free list.  A new buffer is allocated if there are not
   609  // already any available on the free list.  The returned byte slice should be
   610  // returned to the free list by using the recycleOutpointKey function when the
   611  // caller is done with it _unless_ the slice will need to live for longer than
   612  // the caller can calculate such as when used to write to the database.
   613  func outpointKey(outpoint wire.OutPoint) *[]byte {
   614  	// A VLQ employs an MSB encoding, so they are useful not only to reduce
   615  	// the amount of storage space, but also so iteration of utxos when
   616  	// doing byte-wise comparisons will produce them in order.
   617  	key := outpointKeyPool.Get().(*[]byte)
   618  	idx := uint64(outpoint.Index)
   619  	*key = (*key)[:chainhash.HashSize+serializeSizeVLQ(idx)]
   620  	copy(*key, outpoint.Hash[:])
   621  	putVLQ((*key)[chainhash.HashSize:], idx)
   622  	return key
   623  }
   624  
   625  // recycleOutpointKey puts the provided byte slice, which should have been
   626  // obtained via the outpointKey function, back on the free list.
   627  func recycleOutpointKey(key *[]byte) {
   628  	outpointKeyPool.Put(key)
   629  }
   630  
   631  // utxoEntryHeaderCode returns the calculated header code to be used when
   632  // serializing the provided utxo entry.
   633  func utxoEntryHeaderCode(entry *UtxoEntry) (uint64, error) {
   634  	if entry.IsSpent() {
   635  		return 0, AssertError("attempt to serialize spent utxo header")
   636  	}
   637  
   638  	// As described in the serialization format comments, the header code
   639  	// encodes the height shifted over one bit and the coinbase flag in the
   640  	// lowest bit.
   641  	headerCode := uint64(entry.BlockHeight()) << 1
   642  	if entry.IsCoinBase() {
   643  		headerCode |= 0x01
   644  	}
   645  
   646  	return headerCode, nil
   647  }
   648  
   649  // serializeUtxoEntry returns the entry serialized to a format that is suitable
   650  // for long-term storage.  The format is described in detail above.
   651  func serializeUtxoEntry(entry *UtxoEntry) ([]byte, error) {
   652  	// Spent outputs have no serialization.
   653  	if entry.IsSpent() {
   654  		return nil, nil
   655  	}
   656  
   657  	// Encode the header code.
   658  	headerCode, err := utxoEntryHeaderCode(entry)
   659  	if err != nil {
   660  		return nil, err
   661  	}
   662  
   663  	// Calculate the size needed to serialize the entry.
   664  	size := serializeSizeVLQ(headerCode) +
   665  		compressedTxOutSize(uint64(entry.Amount()), entry.PkScript())
   666  
   667  	// Serialize the header code followed by the compressed unspent
   668  	// transaction output.
   669  	serialized := make([]byte, size)
   670  	offset := putVLQ(serialized, headerCode)
   671  	offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()),
   672  		entry.PkScript())
   673  
   674  	return serialized, nil
   675  }
   676  
   677  // deserializeUtxoEntry decodes a utxo entry from the passed serialized byte
   678  // slice into a new UtxoEntry using a format that is suitable for long-term
   679  // storage.  The format is described in detail above.
   680  func deserializeUtxoEntry(serialized []byte) (*UtxoEntry, error) {
   681  	// Deserialize the header code.
   682  	code, offset := deserializeVLQ(serialized)
   683  	if offset >= len(serialized) {
   684  		return nil, errDeserialize("unexpected end of data after header")
   685  	}
   686  
   687  	// Decode the header code.
   688  	//
   689  	// Bit 0 indicates whether the containing transaction is a coinbase.
   690  	// Bits 1-x encode height of containing transaction.
   691  	isCoinBase := code&0x01 != 0
   692  	blockHeight := int32(code >> 1)
   693  
   694  	// Decode the compressed unspent transaction output.
   695  	amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:])
   696  	if err != nil {
   697  		return nil, errDeserialize(fmt.Sprintf("unable to decode "+
   698  			"utxo: %v", err))
   699  	}
   700  
   701  	entry := &UtxoEntry{
   702  		amount:      int64(amount),
   703  		pkScript:    pkScript,
   704  		blockHeight: blockHeight,
   705  		packedFlags: 0,
   706  	}
   707  	if isCoinBase {
   708  		entry.packedFlags |= tfCoinBase
   709  	}
   710  
   711  	return entry, nil
   712  }
   713  
   714  // dbFetchUtxoEntryByHash attempts to find and fetch a utxo for the given hash.
   715  // It uses a cursor and seek to try and do this as efficiently as possible.
   716  //
   717  // When there are no entries for the provided hash, nil will be returned for the
   718  // both the entry and the error.
   719  func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) {
   720  	// Attempt to find an entry by seeking for the hash along with a zero
   721  	// index.  Due to the fact the keys are serialized as <hash><index>,
   722  	// where the index uses an MSB encoding, if there are any entries for
   723  	// the hash at all, one will be found.
   724  	cursor := dbTx.Metadata().Bucket(utxoSetBucketName).Cursor()
   725  	key := outpointKey(wire.OutPoint{Hash: *hash, Index: 0})
   726  	ok := cursor.Seek(*key)
   727  	recycleOutpointKey(key)
   728  	if !ok {
   729  		return nil, nil
   730  	}
   731  
   732  	// An entry was found, but it could just be an entry with the next
   733  	// highest hash after the requested one, so make sure the hashes
   734  	// actually match.
   735  	cursorKey := cursor.Key()
   736  	if len(cursorKey) < chainhash.HashSize {
   737  		return nil, nil
   738  	}
   739  	if !bytes.Equal(hash[:], cursorKey[:chainhash.HashSize]) {
   740  		return nil, nil
   741  	}
   742  
   743  	return deserializeUtxoEntry(cursor.Value())
   744  }
   745  
   746  // dbFetchUtxoEntry uses an existing database transaction to fetch the specified
   747  // transaction output from the utxo set.
   748  //
   749  // When there is no entry for the provided output, nil will be returned for both
   750  // the entry and the error.
   751  func dbFetchUtxoEntry(dbTx database.Tx, utxoBucket database.Bucket,
   752  	outpoint wire.OutPoint) (*UtxoEntry, error) {
   753  
   754  	// Fetch the unspent transaction output information for the passed
   755  	// transaction output.  Return now when there is no entry.
   756  	key := outpointKey(outpoint)
   757  	serializedUtxo := utxoBucket.Get(*key)
   758  	recycleOutpointKey(key)
   759  	if serializedUtxo == nil {
   760  		return nil, nil
   761  	}
   762  
   763  	// A non-nil zero-length entry means there is an entry in the database
   764  	// for a spent transaction output which should never be the case.
   765  	if len(serializedUtxo) == 0 {
   766  		return nil, AssertError(fmt.Sprintf("database contains entry "+
   767  			"for spent tx output %v", outpoint))
   768  	}
   769  
   770  	// Deserialize the utxo entry and return it.
   771  	entry, err := deserializeUtxoEntry(serializedUtxo)
   772  	if err != nil {
   773  		// Ensure any deserialization errors are returned as database
   774  		// corruption errors.
   775  		if isDeserializeErr(err) {
   776  			return nil, database.Error{
   777  				ErrorCode: database.ErrCorruption,
   778  				Description: fmt.Sprintf("corrupt utxo entry "+
   779  					"for %v: %v", outpoint, err),
   780  			}
   781  		}
   782  
   783  		return nil, err
   784  	}
   785  
   786  	return entry, nil
   787  }
   788  
   789  // dbPutUtxoView uses an existing database transaction to update the utxo set
   790  // in the database based on the provided utxo view contents and state.  In
   791  // particular, only the entries that have been marked as modified are written
   792  // to the database.
   793  func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error {
   794  	// Return early if the view is nil.
   795  	if view == nil {
   796  		return nil
   797  	}
   798  
   799  	utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName)
   800  	for outpoint, entry := range view.entries {
   801  		// No need to update the database if the entry was not modified.
   802  		if entry == nil || !entry.isModified() {
   803  			continue
   804  		}
   805  
   806  		// Remove the utxo entry if it is spent.
   807  		if entry.IsSpent() {
   808  			err := dbDeleteUtxoEntry(utxoBucket, outpoint)
   809  			if err != nil {
   810  				return err
   811  			}
   812  		} else {
   813  			err := dbPutUtxoEntry(utxoBucket, outpoint, entry)
   814  			if err != nil {
   815  				return err
   816  			}
   817  		}
   818  	}
   819  
   820  	return nil
   821  }
   822  
   823  // dbDeleteUtxoEntry uses an existing database transaction to delete the utxo
   824  // entry from the database.
   825  func dbDeleteUtxoEntry(utxoBucket database.Bucket, outpoint wire.OutPoint) error {
   826  	key := outpointKey(outpoint)
   827  	err := utxoBucket.Delete(*key)
   828  	recycleOutpointKey(key)
   829  	return err
   830  }
   831  
   832  // dbPutUtxoEntry uses an existing database transaction to update the utxo entry
   833  // in the database.
   834  func dbPutUtxoEntry(utxoBucket database.Bucket, outpoint wire.OutPoint,
   835  	entry *UtxoEntry) error {
   836  
   837  	if entry == nil || entry.IsSpent() {
   838  		return AssertError("trying to store nil or spent entry")
   839  	}
   840  
   841  	// Serialize and store the utxo entry.
   842  	serialized, err := serializeUtxoEntry(entry)
   843  	if err != nil {
   844  		return err
   845  	}
   846  	key := outpointKey(outpoint)
   847  	err = utxoBucket.Put(*key, serialized)
   848  	if err != nil {
   849  		return err
   850  	}
   851  
   852  	// NOTE: The key is intentionally not recycled here since the
   853  	// database interface contract prohibits modifications.  It will
   854  	// be garbage collected normally when the database is done with
   855  	// it.
   856  	return nil
   857  }
   858  
   859  // -----------------------------------------------------------------------------
   860  // The block index consists of two buckets with an entry for every block in the
   861  // main chain.  One bucket is for the hash to height mapping and the other is
   862  // for the height to hash mapping.
   863  //
   864  // The serialized format for values in the hash to height bucket is:
   865  //   <height>
   866  //
   867  //   Field      Type     Size
   868  //   height     uint32   4 bytes
   869  //
   870  // The serialized format for values in the height to hash bucket is:
   871  //   <hash>
   872  //
   873  //   Field      Type             Size
   874  //   hash       chainhash.Hash   chainhash.HashSize
   875  // -----------------------------------------------------------------------------
   876  
   877  // dbPutBlockIndex uses an existing database transaction to update or add the
   878  // block index entries for the hash to height and height to hash mappings for
   879  // the provided values.
   880  func dbPutBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error {
   881  	// Serialize the height for use in the index entries.
   882  	var serializedHeight [4]byte
   883  	byteOrder.PutUint32(serializedHeight[:], uint32(height))
   884  
   885  	// Add the block hash to height mapping to the index.
   886  	meta := dbTx.Metadata()
   887  	hashIndex := meta.Bucket(hashIndexBucketName)
   888  	if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil {
   889  		return err
   890  	}
   891  
   892  	// Add the block height to hash mapping to the index.
   893  	heightIndex := meta.Bucket(heightIndexBucketName)
   894  	return heightIndex.Put(serializedHeight[:], hash[:])
   895  }
   896  
   897  // dbRemoveBlockIndex uses an existing database transaction remove block index
   898  // entries from the hash to height and height to hash mappings for the provided
   899  // values.
   900  func dbRemoveBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error {
   901  	// Remove the block hash to height mapping.
   902  	meta := dbTx.Metadata()
   903  	hashIndex := meta.Bucket(hashIndexBucketName)
   904  	if err := hashIndex.Delete(hash[:]); err != nil {
   905  		return err
   906  	}
   907  
   908  	// Remove the block height to hash mapping.
   909  	var serializedHeight [4]byte
   910  	byteOrder.PutUint32(serializedHeight[:], uint32(height))
   911  	heightIndex := meta.Bucket(heightIndexBucketName)
   912  	return heightIndex.Delete(serializedHeight[:])
   913  }
   914  
   915  // dbFetchHeightByHash uses an existing database transaction to retrieve the
   916  // height for the provided hash from the index.
   917  func dbFetchHeightByHash(dbTx database.Tx, hash *chainhash.Hash) (int32, error) {
   918  	meta := dbTx.Metadata()
   919  	hashIndex := meta.Bucket(hashIndexBucketName)
   920  	serializedHeight := hashIndex.Get(hash[:])
   921  	if serializedHeight == nil {
   922  		str := fmt.Sprintf("block %s is not in the main chain", hash)
   923  		return 0, errNotInMainChain(str)
   924  	}
   925  
   926  	return int32(byteOrder.Uint32(serializedHeight)), nil
   927  }
   928  
   929  // dbFetchHashByHeight uses an existing database transaction to retrieve the
   930  // hash for the provided height from the index.
   931  func dbFetchHashByHeight(dbTx database.Tx, height int32) (*chainhash.Hash, error) {
   932  	var serializedHeight [4]byte
   933  	byteOrder.PutUint32(serializedHeight[:], uint32(height))
   934  
   935  	meta := dbTx.Metadata()
   936  	heightIndex := meta.Bucket(heightIndexBucketName)
   937  	hashBytes := heightIndex.Get(serializedHeight[:])
   938  	if hashBytes == nil {
   939  		str := fmt.Sprintf("no block at height %d exists", height)
   940  		return nil, errNotInMainChain(str)
   941  	}
   942  
   943  	var hash chainhash.Hash
   944  	copy(hash[:], hashBytes)
   945  	return &hash, nil
   946  }
   947  
   948  // -----------------------------------------------------------------------------
   949  // The best chain state consists of the best block hash and height, the total
   950  // number of transactions up to and including those in the best block, and the
   951  // accumulated work sum up to and including the best block.
   952  //
   953  // The serialized format is:
   954  //
   955  //   <block hash><block height><total txns><work sum length><work sum>
   956  //
   957  //   Field             Type             Size
   958  //   block hash        chainhash.Hash   chainhash.HashSize
   959  //   block height      uint32           4 bytes
   960  //   total txns        uint64           8 bytes
   961  //   work sum length   uint32           4 bytes
   962  //   work sum          big.Int          work sum length
   963  // -----------------------------------------------------------------------------
   964  
   965  // bestChainState represents the data to be stored the database for the current
   966  // best chain state.
   967  type bestChainState struct {
   968  	hash      chainhash.Hash
   969  	height    uint32
   970  	totalTxns uint64
   971  	workSum   *big.Int
   972  }
   973  
   974  // serializeBestChainState returns the serialization of the passed block best
   975  // chain state.  This is data to be stored in the chain state bucket.
   976  func serializeBestChainState(state bestChainState) []byte {
   977  	// Calculate the full size needed to serialize the chain state.
   978  	workSumBytes := state.workSum.Bytes()
   979  	workSumBytesLen := uint32(len(workSumBytes))
   980  	serializedLen := chainhash.HashSize + 4 + 8 + 4 + workSumBytesLen
   981  
   982  	// Serialize the chain state.
   983  	serializedData := make([]byte, serializedLen)
   984  	copy(serializedData[0:chainhash.HashSize], state.hash[:])
   985  	offset := uint32(chainhash.HashSize)
   986  	byteOrder.PutUint32(serializedData[offset:], state.height)
   987  	offset += 4
   988  	byteOrder.PutUint64(serializedData[offset:], state.totalTxns)
   989  	offset += 8
   990  	byteOrder.PutUint32(serializedData[offset:], workSumBytesLen)
   991  	offset += 4
   992  	copy(serializedData[offset:], workSumBytes)
   993  	return serializedData
   994  }
   995  
   996  // deserializeBestChainState deserializes the passed serialized best chain
   997  // state.  This is data stored in the chain state bucket and is updated after
   998  // every block is connected or disconnected form the main chain.
   999  // block.
  1000  func deserializeBestChainState(serializedData []byte) (bestChainState, error) {
  1001  	// Ensure the serialized data has enough bytes to properly deserialize
  1002  	// the hash, height, total transactions, and work sum length.
  1003  	if len(serializedData) < chainhash.HashSize+16 {
  1004  		return bestChainState{}, database.Error{
  1005  			ErrorCode:   database.ErrCorruption,
  1006  			Description: "corrupt best chain state",
  1007  		}
  1008  	}
  1009  
  1010  	state := bestChainState{}
  1011  	copy(state.hash[:], serializedData[0:chainhash.HashSize])
  1012  	offset := uint32(chainhash.HashSize)
  1013  	state.height = byteOrder.Uint32(serializedData[offset : offset+4])
  1014  	offset += 4
  1015  	state.totalTxns = byteOrder.Uint64(serializedData[offset : offset+8])
  1016  	offset += 8
  1017  	workSumBytesLen := byteOrder.Uint32(serializedData[offset : offset+4])
  1018  	offset += 4
  1019  
  1020  	// Ensure the serialized data has enough bytes to deserialize the work
  1021  	// sum.
  1022  	if uint32(len(serializedData[offset:])) < workSumBytesLen {
  1023  		return bestChainState{}, database.Error{
  1024  			ErrorCode:   database.ErrCorruption,
  1025  			Description: "corrupt best chain state",
  1026  		}
  1027  	}
  1028  	workSumBytes := serializedData[offset : offset+workSumBytesLen]
  1029  	state.workSum = new(big.Int).SetBytes(workSumBytes)
  1030  
  1031  	return state, nil
  1032  }
  1033  
  1034  // dbPutBestState uses an existing database transaction to update the best chain
  1035  // state with the given parameters.
  1036  func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error {
  1037  	// Serialize the current best chain state.
  1038  	serializedData := serializeBestChainState(bestChainState{
  1039  		hash:      snapshot.Hash,
  1040  		height:    uint32(snapshot.Height),
  1041  		totalTxns: snapshot.TotalTxns,
  1042  		workSum:   workSum,
  1043  	})
  1044  
  1045  	// Store the current best chain state into the database.
  1046  	return dbTx.Metadata().Put(chainStateKeyName, serializedData)
  1047  }
  1048  
  1049  // dbPutUtxoStateConsistency uses an existing database transaction to
  1050  // update the utxo state consistency status with the given parameters.
  1051  func dbPutUtxoStateConsistency(dbTx database.Tx, hash *chainhash.Hash) error {
  1052  	// Store the utxo state consistency status into the database.
  1053  	return dbTx.Metadata().Put(utxoStateConsistencyKeyName, hash[:])
  1054  }
  1055  
  1056  // dbFetchUtxoStateConsistency uses an existing database transaction to retrieve
  1057  // the utxo state consistency status from the database.  The code is 0 when
  1058  // nothing was found.
  1059  func dbFetchUtxoStateConsistency(dbTx database.Tx) []byte {
  1060  	// Fetch the serialized data from the database.
  1061  	return dbTx.Metadata().Get(utxoStateConsistencyKeyName)
  1062  }
  1063  
  1064  // createChainState initializes both the database and the chain state to the
  1065  // genesis block.  This includes creating the necessary buckets and inserting
  1066  // the genesis block, so it must only be called on an uninitialized database.
  1067  func (b *BlockChain) createChainState() error {
  1068  	// Create a new node from the genesis block and set it as the best node.
  1069  	genesisBlock := btcutil.NewBlock(b.chainParams.GenesisBlock)
  1070  	genesisBlock.SetHeight(0)
  1071  	header := &genesisBlock.MsgBlock().Header
  1072  	node := newBlockNode(header, nil)
  1073  	node.status = statusDataStored | statusValid
  1074  	b.bestChain.SetTip(node)
  1075  
  1076  	// Add the new node to the index which is used for faster lookups.
  1077  	b.index.addNode(node)
  1078  
  1079  	// Initialize the state related to the best block.  Since it is the
  1080  	// genesis block, use its timestamp for the median time.
  1081  	numTxns := uint64(len(genesisBlock.MsgBlock().Transactions))
  1082  	blockSize := uint64(genesisBlock.MsgBlock().SerializeSize())
  1083  	blockWeight := uint64(GetBlockWeight(genesisBlock))
  1084  	b.stateSnapshot = newBestState(node, blockSize, blockWeight, numTxns,
  1085  		numTxns, time.Unix(node.timestamp, 0))
  1086  
  1087  	// Create the initial the database chain state including creating the
  1088  	// necessary index buckets and inserting the genesis block.
  1089  	err := b.db.Update(func(dbTx database.Tx) error {
  1090  		meta := dbTx.Metadata()
  1091  
  1092  		// Create the bucket that houses the block index data.
  1093  		_, err := meta.CreateBucket(blockIndexBucketName)
  1094  		if err != nil {
  1095  			return err
  1096  		}
  1097  
  1098  		// Create the bucket that houses the chain block hash to height
  1099  		// index.
  1100  		_, err = meta.CreateBucket(hashIndexBucketName)
  1101  		if err != nil {
  1102  			return err
  1103  		}
  1104  
  1105  		// Create the bucket that houses the chain block height to hash
  1106  		// index.
  1107  		_, err = meta.CreateBucket(heightIndexBucketName)
  1108  		if err != nil {
  1109  			return err
  1110  		}
  1111  
  1112  		// Create the bucket that houses the spend journal data and
  1113  		// store its version.
  1114  		_, err = meta.CreateBucket(spendJournalBucketName)
  1115  		if err != nil {
  1116  			return err
  1117  		}
  1118  		err = dbPutVersion(dbTx, utxoSetVersionKeyName,
  1119  			latestUtxoSetBucketVersion)
  1120  		if err != nil {
  1121  			return err
  1122  		}
  1123  
  1124  		// Create the bucket that houses the utxo set and store its
  1125  		// version.  Note that the genesis block coinbase transaction is
  1126  		// intentionally not inserted here since it is not spendable by
  1127  		// consensus rules.
  1128  		_, err = meta.CreateBucket(utxoSetBucketName)
  1129  		if err != nil {
  1130  			return err
  1131  		}
  1132  		err = dbPutVersion(dbTx, spendJournalVersionKeyName,
  1133  			latestSpendJournalBucketVersion)
  1134  		if err != nil {
  1135  			return err
  1136  		}
  1137  
  1138  		// Save the genesis block to the block index database.
  1139  		err = dbStoreBlockNode(dbTx, node)
  1140  		if err != nil {
  1141  			return err
  1142  		}
  1143  
  1144  		// Add the genesis block hash to height and height to hash
  1145  		// mappings to the index.
  1146  		err = dbPutBlockIndex(dbTx, &node.hash, node.height)
  1147  		if err != nil {
  1148  			return err
  1149  		}
  1150  
  1151  		// Store the current best chain state into the database.
  1152  		err = dbPutBestState(dbTx, b.stateSnapshot, node.workSum)
  1153  		if err != nil {
  1154  			return err
  1155  		}
  1156  
  1157  		// Store the genesis block into the database.
  1158  		return dbStoreBlock(dbTx, genesisBlock)
  1159  	})
  1160  	return err
  1161  }
  1162  
  1163  // initChainState attempts to load and initialize the chain state from the
  1164  // database.  When the db does not yet contain any chain state, both it and the
  1165  // chain state are initialized to the genesis block.
  1166  func (b *BlockChain) initChainState() error {
  1167  	// Determine the state of the chain database. We may need to initialize
  1168  	// everything from scratch or upgrade certain buckets.
  1169  	var initialized, hasBlockIndex bool
  1170  	err := b.db.View(func(dbTx database.Tx) error {
  1171  		initialized = dbTx.Metadata().Get(chainStateKeyName) != nil
  1172  		hasBlockIndex = dbTx.Metadata().Bucket(blockIndexBucketName) != nil
  1173  		return nil
  1174  	})
  1175  	if err != nil {
  1176  		return err
  1177  	}
  1178  
  1179  	if !initialized {
  1180  		// At this point the database has not already been initialized, so
  1181  		// initialize both it and the chain state to the genesis block.
  1182  		return b.createChainState()
  1183  	}
  1184  
  1185  	if !hasBlockIndex {
  1186  		err := migrateBlockIndex(b.db)
  1187  		if err != nil {
  1188  			return nil
  1189  		}
  1190  	}
  1191  
  1192  	// Attempt to load the chain state from the database.
  1193  	err = b.db.View(func(dbTx database.Tx) error {
  1194  		// Fetch the stored chain state from the database metadata.
  1195  		// When it doesn't exist, it means the database hasn't been
  1196  		// initialized for use with chain yet, so break out now to allow
  1197  		// that to happen under a writable database transaction.
  1198  		serializedData := dbTx.Metadata().Get(chainStateKeyName)
  1199  		log.Tracef("Serialized chain state: %x", serializedData)
  1200  		state, err := deserializeBestChainState(serializedData)
  1201  		if err != nil {
  1202  			return err
  1203  		}
  1204  
  1205  		// Load all of the headers from the data for the known best
  1206  		// chain and construct the block index accordingly.  Since the
  1207  		// number of nodes are already known, perform a single alloc
  1208  		// for them versus a whole bunch of little ones to reduce
  1209  		// pressure on the GC.
  1210  		log.Infof("Loading block index...")
  1211  
  1212  		blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
  1213  
  1214  		var i int32
  1215  		var lastNode *blockNode
  1216  		cursor := blockIndexBucket.Cursor()
  1217  		for ok := cursor.First(); ok; ok = cursor.Next() {
  1218  			header, status, err := deserializeBlockRow(cursor.Value())
  1219  			if err != nil {
  1220  				return err
  1221  			}
  1222  
  1223  			// Determine the parent block node. Since we iterate block headers
  1224  			// in order of height, if the blocks are mostly linear there is a
  1225  			// very good chance the previous header processed is the parent.
  1226  			var parent *blockNode
  1227  			if lastNode == nil {
  1228  				blockHash := header.BlockHash()
  1229  				if !blockHash.IsEqual(b.chainParams.GenesisHash) {
  1230  					return AssertError(fmt.Sprintf("initChainState: Expected "+
  1231  						"first entry in block index to be genesis block, "+
  1232  						"found %s", blockHash))
  1233  				}
  1234  			} else if header.PrevBlock == lastNode.hash {
  1235  				// Since we iterate block headers in order of height, if the
  1236  				// blocks are mostly linear there is a very good chance the
  1237  				// previous header processed is the parent.
  1238  				parent = lastNode
  1239  			} else {
  1240  				parent = b.index.LookupNode(&header.PrevBlock)
  1241  				if parent == nil {
  1242  					return AssertError(fmt.Sprintf("initChainState: Could "+
  1243  						"not find parent for block %s", header.BlockHash()))
  1244  				}
  1245  			}
  1246  
  1247  			// Initialize the block node for the block, connect it,
  1248  			// and add it to the block index.
  1249  			node := new(blockNode)
  1250  			initBlockNode(node, header, parent)
  1251  			node.status = status
  1252  			b.index.addNode(node)
  1253  
  1254  			lastNode = node
  1255  			i++
  1256  		}
  1257  
  1258  		// Set the best chain view to the stored best state.
  1259  		tip := b.index.LookupNode(&state.hash)
  1260  		if tip == nil {
  1261  			return AssertError(fmt.Sprintf("initChainState: cannot find "+
  1262  				"chain tip %s in block index", state.hash))
  1263  		}
  1264  		b.bestChain.SetTip(tip)
  1265  
  1266  		// Load the raw block bytes for the best block.
  1267  		blockBytes, err := dbTx.FetchBlock(&state.hash)
  1268  		if err != nil {
  1269  			return err
  1270  		}
  1271  		var block wire.MsgBlock
  1272  		err = block.Deserialize(bytes.NewReader(blockBytes))
  1273  		if err != nil {
  1274  			return err
  1275  		}
  1276  
  1277  		// As a final consistency check, we'll run through all the
  1278  		// nodes which are ancestors of the current chain tip, and mark
  1279  		// them as valid if they aren't already marked as such.  This
  1280  		// is a safe assumption as all the block before the current tip
  1281  		// are valid by definition.
  1282  		for iterNode := tip; iterNode != nil; iterNode = iterNode.parent {
  1283  			// If this isn't already marked as valid in the index, then
  1284  			// we'll mark it as valid now to ensure consistency once
  1285  			// we're up and running.
  1286  			if !iterNode.status.KnownValid() {
  1287  				log.Infof("Block %v (height=%v) ancestor of "+
  1288  					"chain tip not marked as valid, "+
  1289  					"upgrading to valid for consistency",
  1290  					iterNode.hash, iterNode.height)
  1291  
  1292  				b.index.SetStatusFlags(iterNode, statusValid)
  1293  			}
  1294  		}
  1295  
  1296  		// Initialize the state related to the best block.
  1297  		blockSize := uint64(len(blockBytes))
  1298  		blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block)))
  1299  		numTxns := uint64(len(block.Transactions))
  1300  		b.stateSnapshot = newBestState(tip, blockSize, blockWeight,
  1301  			numTxns, state.totalTxns, CalcPastMedianTime(tip))
  1302  
  1303  		return nil
  1304  	})
  1305  	if err != nil {
  1306  		return err
  1307  	}
  1308  
  1309  	// As we might have updated the index after it was loaded, we'll
  1310  	// attempt to flush the index to the DB. This will only result in a
  1311  	// write if the elements are dirty, so it'll usually be a noop.
  1312  	return b.index.flushToDB()
  1313  }
  1314  
  1315  // deserializeBlockRow parses a value in the block index bucket into a block
  1316  // header and block status bitfield.
  1317  func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) {
  1318  	buffer := bytes.NewReader(blockRow)
  1319  
  1320  	var header wire.BlockHeader
  1321  	err := header.Deserialize(buffer)
  1322  	if err != nil {
  1323  		return nil, statusNone, err
  1324  	}
  1325  
  1326  	statusByte, err := buffer.ReadByte()
  1327  	if err != nil {
  1328  		return nil, statusNone, err
  1329  	}
  1330  
  1331  	return &header, blockStatus(statusByte), nil
  1332  }
  1333  
  1334  // dbFetchHeaderByHash uses an existing database transaction to retrieve the
  1335  // block header for the provided hash.
  1336  func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) {
  1337  	headerBytes, err := dbTx.FetchBlockHeader(hash)
  1338  	if err != nil {
  1339  		return nil, err
  1340  	}
  1341  
  1342  	var header wire.BlockHeader
  1343  	err = header.Deserialize(bytes.NewReader(headerBytes))
  1344  	if err != nil {
  1345  		return nil, err
  1346  	}
  1347  
  1348  	return &header, nil
  1349  }
  1350  
  1351  // dbFetchHeaderByHeight uses an existing database transaction to retrieve the
  1352  // block header for the provided height.
  1353  func dbFetchHeaderByHeight(dbTx database.Tx, height int32) (*wire.BlockHeader, error) {
  1354  	hash, err := dbFetchHashByHeight(dbTx, height)
  1355  	if err != nil {
  1356  		return nil, err
  1357  	}
  1358  
  1359  	return dbFetchHeaderByHash(dbTx, hash)
  1360  }
  1361  
  1362  // dbFetchBlockByNode uses an existing database transaction to retrieve the
  1363  // raw block for the provided node, deserialize it, and return a btcutil.Block
  1364  // with the height set.
  1365  func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*btcutil.Block, error) {
  1366  	// Load the raw block bytes from the database.
  1367  	blockBytes, err := dbTx.FetchBlock(&node.hash)
  1368  	if err != nil {
  1369  		return nil, err
  1370  	}
  1371  
  1372  	// Create the encapsulated block and set the height appropriately.
  1373  	block, err := btcutil.NewBlockFromBytes(blockBytes)
  1374  	if err != nil {
  1375  		return nil, err
  1376  	}
  1377  	block.SetHeight(node.height)
  1378  
  1379  	return block, nil
  1380  }
  1381  
  1382  // dbStoreBlockNode stores the block header and validation status to the block
  1383  // index bucket. This overwrites the current entry if there exists one.
  1384  func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error {
  1385  	// Serialize block data to be stored.
  1386  	w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1))
  1387  	header := node.Header()
  1388  	err := header.Serialize(w)
  1389  	if err != nil {
  1390  		return err
  1391  	}
  1392  	err = w.WriteByte(byte(node.status))
  1393  	if err != nil {
  1394  		return err
  1395  	}
  1396  	value := w.Bytes()
  1397  
  1398  	// Write block header data to block index bucket.
  1399  	blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName)
  1400  	key := blockIndexKey(&node.hash, uint32(node.height))
  1401  	return blockIndexBucket.Put(key, value)
  1402  }
  1403  
  1404  // dbStoreBlock stores the provided block in the database if it is not already
  1405  // there. The full block data is written to ffldb.
  1406  func dbStoreBlock(dbTx database.Tx, block *btcutil.Block) error {
  1407  	hasBlock, err := dbTx.HasBlock(block.Hash())
  1408  	if err != nil {
  1409  		return err
  1410  	}
  1411  	if hasBlock {
  1412  		return nil
  1413  	}
  1414  	return dbTx.StoreBlock(block)
  1415  }
  1416  
  1417  // blockIndexKey generates the binary key for an entry in the block index
  1418  // bucket. The key is composed of the block height encoded as a big-endian
  1419  // 32-bit unsigned int followed by the 32 byte block hash.
  1420  func blockIndexKey(blockHash *chainhash.Hash, blockHeight uint32) []byte {
  1421  	indexKey := make([]byte, chainhash.HashSize+4)
  1422  	binary.BigEndian.PutUint32(indexKey[0:4], blockHeight)
  1423  	copy(indexKey[4:chainhash.HashSize+4], blockHash[:])
  1424  	return indexKey
  1425  }
  1426  
  1427  // BlockByHeight returns the block at the given height in the main chain.
  1428  //
  1429  // This function is safe for concurrent access.
  1430  func (b *BlockChain) BlockByHeight(blockHeight int32) (*btcutil.Block, error) {
  1431  	// Lookup the block height in the best chain.
  1432  	node := b.bestChain.NodeByHeight(blockHeight)
  1433  	if node == nil {
  1434  		str := fmt.Sprintf("no block at height %d exists", blockHeight)
  1435  		return nil, errNotInMainChain(str)
  1436  	}
  1437  
  1438  	// Load the block from the database and return it.
  1439  	var block *btcutil.Block
  1440  	err := b.db.View(func(dbTx database.Tx) error {
  1441  		var err error
  1442  		block, err = dbFetchBlockByNode(dbTx, node)
  1443  		return err
  1444  	})
  1445  	return block, err
  1446  }
  1447  
  1448  // BlockByHash returns the block from the main chain with the given hash with
  1449  // the appropriate chain height set.
  1450  //
  1451  // This function is safe for concurrent access.
  1452  func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*btcutil.Block, error) {
  1453  	// Lookup the block hash in block index and ensure it is in the best
  1454  	// chain.
  1455  	node := b.index.LookupNode(hash)
  1456  	if node == nil || !b.bestChain.Contains(node) {
  1457  		str := fmt.Sprintf("block %s is not in the main chain", hash)
  1458  		return nil, errNotInMainChain(str)
  1459  	}
  1460  
  1461  	// Load the block from the database and return it.
  1462  	var block *btcutil.Block
  1463  	err := b.db.View(func(dbTx database.Tx) error {
  1464  		var err error
  1465  		block, err = dbFetchBlockByNode(dbTx, node)
  1466  		return err
  1467  	})
  1468  	return block, err
  1469  }