github.com/decred/dcrd/blockchain@v1.2.1/chainio.go (about)

     1  // Copyright (c) 2015-2016 The btcsuite developers
     2  // Copyright (c) 2016-2019 The Decred developers
     3  // Use of this source code is governed by an ISC
     4  // license that can be found in the LICENSE file.
     5  
     6  package blockchain
     7  
     8  import (
     9  	"bytes"
    10  	"encoding/binary"
    11  	"fmt"
    12  	"math/big"
    13  	"sort"
    14  	"time"
    15  
    16  	"github.com/decred/dcrd/blockchain/internal/dbnamespace"
    17  	"github.com/decred/dcrd/blockchain/stake"
    18  	"github.com/decred/dcrd/chaincfg/chainhash"
    19  	"github.com/decred/dcrd/database"
    20  	"github.com/decred/dcrd/dcrutil"
    21  	"github.com/decred/dcrd/wire"
    22  )
    23  
    24  const (
    25  	// currentDatabaseVersion indicates what the current database
    26  	// version is.
    27  	currentDatabaseVersion = 5
    28  
    29  	// currentBlockIndexVersion indicates what the current block index
    30  	// database version.
    31  	currentBlockIndexVersion = 2
    32  
    33  	// blockHdrSize is the size of a block header.  This is simply the
    34  	// constant from wire and is only provided here for convenience since
    35  	// wire.MaxBlockHeaderPayload is quite long.
    36  	blockHdrSize = wire.MaxBlockHeaderPayload
    37  )
    38  
    39  // errNotInMainChain signifies that a block hash or height that is not in the
    40  // main chain was requested.
    41  type errNotInMainChain string
    42  
    43  // Error implements the error interface.
    44  func (e errNotInMainChain) Error() string {
    45  	return string(e)
    46  }
    47  
    48  // errDeserialize signifies that a problem was encountered when deserializing
    49  // data.
    50  type errDeserialize string
    51  
    52  // Error implements the error interface.
    53  func (e errDeserialize) Error() string {
    54  	return string(e)
    55  }
    56  
    57  // isDeserializeErr returns whether or not the passed error is an errDeserialize
    58  // error.
    59  func isDeserializeErr(err error) bool {
    60  	_, ok := err.(errDeserialize)
    61  	return ok
    62  }
    63  
    64  // -----------------------------------------------------------------------------
    65  // The staking system requires some extra information to be stored for tickets
    66  // to maintain consensus rules. The full set of minimal outputs are thus required
    67  // in order for the chain to work correctly. A 'minimal output' is simply the
    68  // script version, pubkey script, and amount.
    69  
    70  // serializeSizeForMinimalOutputs calculates the number of bytes needed to
    71  // serialize a transaction to its minimal outputs.
    72  func serializeSizeForMinimalOutputs(tx *dcrutil.Tx) int {
    73  	sz := serializeSizeVLQ(uint64(len(tx.MsgTx().TxOut)))
    74  	for _, out := range tx.MsgTx().TxOut {
    75  		sz += serializeSizeVLQ(compressTxOutAmount(uint64(out.Value)))
    76  		sz += serializeSizeVLQ(uint64(out.Version))
    77  		sz += serializeSizeVLQ(uint64(len(out.PkScript)))
    78  		sz += len(out.PkScript)
    79  	}
    80  
    81  	return sz
    82  }
    83  
    84  // putTxToMinimalOutputs serializes a transaction to its minimal outputs.
    85  // It returns the amount of data written. The function will panic if it writes
    86  // beyond the bounds of the passed memory.
    87  func putTxToMinimalOutputs(target []byte, tx *dcrutil.Tx) int {
    88  	offset := putVLQ(target, uint64(len(tx.MsgTx().TxOut)))
    89  	for _, out := range tx.MsgTx().TxOut {
    90  		offset += putVLQ(target[offset:], compressTxOutAmount(uint64(out.Value)))
    91  		offset += putVLQ(target[offset:], uint64(out.Version))
    92  		offset += putVLQ(target[offset:], uint64(len(out.PkScript)))
    93  		copy(target[offset:], out.PkScript)
    94  		offset += len(out.PkScript)
    95  	}
    96  
    97  	return offset
    98  }
    99  
   100  // deserializeToMinimalOutputs deserializes a series of minimal outputs to their
   101  // decompressed, deserialized state and stores them in a slice. It also returns
   102  // the amount of data read. The function will panic if it reads beyond the bounds
   103  // of the passed memory.
   104  func deserializeToMinimalOutputs(serialized []byte) ([]*stake.MinimalOutput, int) {
   105  	numOutputs, offset := deserializeVLQ(serialized)
   106  	minOuts := make([]*stake.MinimalOutput, int(numOutputs))
   107  	for i := 0; i < int(numOutputs); i++ {
   108  		amountComp, bytesRead := deserializeVLQ(serialized[offset:])
   109  		amount := decompressTxOutAmount(amountComp)
   110  		offset += bytesRead
   111  
   112  		version, bytesRead := deserializeVLQ(serialized[offset:])
   113  		offset += bytesRead
   114  
   115  		scriptSize, bytesRead := deserializeVLQ(serialized[offset:])
   116  		offset += bytesRead
   117  
   118  		pkScript := make([]byte, int(scriptSize))
   119  		copy(pkScript, serialized[offset:offset+int(scriptSize)])
   120  		offset += int(scriptSize)
   121  
   122  		minOuts[i] = &stake.MinimalOutput{
   123  			Value:    int64(amount),
   124  			Version:  uint16(version),
   125  			PkScript: pkScript,
   126  		}
   127  	}
   128  
   129  	return minOuts, offset
   130  }
   131  
   132  // readDeserializeSizeOfMinimalOutputs reads the size of the stored set of
   133  // minimal outputs without allocating memory for the structs themselves. It
   134  // will panic if the function reads outside of memory bounds.
   135  func readDeserializeSizeOfMinimalOutputs(serialized []byte) int {
   136  	numOutputs, offset := deserializeVLQ(serialized)
   137  	for i := 0; i < int(numOutputs); i++ {
   138  		// Amount
   139  		_, bytesRead := deserializeVLQ(serialized[offset:])
   140  		offset += bytesRead
   141  
   142  		// Script version
   143  		_, bytesRead = deserializeVLQ(serialized[offset:])
   144  		offset += bytesRead
   145  
   146  		// Script
   147  		var scriptSize uint64
   148  		scriptSize, bytesRead = deserializeVLQ(serialized[offset:])
   149  		offset += bytesRead
   150  		offset += int(scriptSize)
   151  	}
   152  
   153  	return offset
   154  }
   155  
   156  // ConvertUtxosToMinimalOutputs converts the contents of a UTX to a series of
   157  // minimal outputs. It does this so that these can be passed to stake subpackage
   158  // functions, where they will be evaluated for correctness.
   159  func ConvertUtxosToMinimalOutputs(entry *UtxoEntry) []*stake.MinimalOutput {
   160  	minOuts, _ := deserializeToMinimalOutputs(entry.stakeExtra)
   161  
   162  	return minOuts
   163  }
   164  
   165  // -----------------------------------------------------------------------------
   166  // The block index consists of an entry for every known block.  It consists of
   167  // information such as the block header and hashes of tickets voted and revoked.
   168  //
   169  // The serialized key format is:
   170  //
   171  //   <block height><block hash>
   172  //
   173  //   Field           Type              Size
   174  //   block height    uint32            4 bytes
   175  //   block hash      chainhash.Hash    chainhash.HashSize
   176  //
   177  // The serialized value format is:
   178  //
   179  //   <block header><status><num votes><votes info><num revoked><revoked tickets>
   180  //
   181  //   Field              Type                Size
   182  //   block header       wire.BlockHeader    180 bytes
   183  //   status             blockStatus         1 byte
   184  //   num votes          VLQ                 variable
   185  //   vote info
   186  //     ticket hash      chainhash.Hash      chainhash.HashSize
   187  //     vote version     VLQ                 variable
   188  //     vote bits        VLQ                 variable
   189  //   num revoked        VLQ                 variable
   190  //   revoked tickets
   191  //     ticket hash      chainhash.Hash      chainhash.HashSize
   192  // -----------------------------------------------------------------------------
   193  
   194  // blockIndexEntry represents a block index database entry.
   195  type blockIndexEntry struct {
   196  	header         wire.BlockHeader
   197  	status         blockStatus
   198  	voteInfo       []stake.VoteVersionTuple
   199  	ticketsVoted   []chainhash.Hash
   200  	ticketsRevoked []chainhash.Hash
   201  }
   202  
   203  // blockIndexKey generates the binary key for an entry in the block index
   204  // bucket.  The key is composed of the block height encoded as a big-endian
   205  // 32-bit unsigned int followed by the 32 byte block hash.  Big endian is used
   206  // here so the entries can easily be iterated by height.
   207  func blockIndexKey(blockHash *chainhash.Hash, blockHeight uint32) []byte {
   208  	indexKey := make([]byte, chainhash.HashSize+4)
   209  	binary.BigEndian.PutUint32(indexKey[0:4], blockHeight)
   210  	copy(indexKey[4:chainhash.HashSize+4], blockHash[:])
   211  	return indexKey
   212  }
   213  
   214  // blockIndexEntrySerializeSize returns the number of bytes it would take to
   215  // serialize the passed block index entry according to the format described
   216  // above.
   217  func blockIndexEntrySerializeSize(entry *blockIndexEntry) int {
   218  	voteInfoSize := 0
   219  	for i := range entry.voteInfo {
   220  		voteInfoSize += chainhash.HashSize +
   221  			serializeSizeVLQ(uint64(entry.voteInfo[i].Version)) +
   222  			serializeSizeVLQ(uint64(entry.voteInfo[i].Bits))
   223  	}
   224  
   225  	return blockHdrSize + 1 + serializeSizeVLQ(uint64(len(entry.voteInfo))) +
   226  		voteInfoSize + serializeSizeVLQ(uint64(len(entry.ticketsRevoked))) +
   227  		chainhash.HashSize*len(entry.ticketsRevoked)
   228  }
   229  
   230  // putBlockIndexEntry serializes the passed block index entry according to the
   231  // format described above directly into the passed target byte slice.  The
   232  // target byte slice must be at least large enough to handle the number of bytes
   233  // returned by the blockIndexEntrySerializeSize function or it will panic.
   234  func putBlockIndexEntry(target []byte, entry *blockIndexEntry) (int, error) {
   235  	if len(entry.voteInfo) != len(entry.ticketsVoted) {
   236  		return 0, AssertError("putBlockIndexEntry called with " +
   237  			"mismatched number of tickets voted and vote info")
   238  	}
   239  
   240  	// Serialize the entire block header.
   241  	w := bytes.NewBuffer(target[0:0])
   242  	if err := entry.header.Serialize(w); err != nil {
   243  		return 0, err
   244  	}
   245  
   246  	// Serialize the status.
   247  	offset := blockHdrSize
   248  	target[offset] = byte(entry.status)
   249  	offset++
   250  
   251  	// Serialize the number of votes and associated vote information.
   252  	offset += putVLQ(target[offset:], uint64(len(entry.voteInfo)))
   253  	for i := range entry.voteInfo {
   254  		offset += copy(target[offset:], entry.ticketsVoted[i][:])
   255  		offset += putVLQ(target[offset:], uint64(entry.voteInfo[i].Version))
   256  		offset += putVLQ(target[offset:], uint64(entry.voteInfo[i].Bits))
   257  	}
   258  
   259  	// Serialize the number of revocations and associated revocation
   260  	// information.
   261  	offset += putVLQ(target[offset:], uint64(len(entry.ticketsRevoked)))
   262  	for i := range entry.ticketsRevoked {
   263  		offset += copy(target[offset:], entry.ticketsRevoked[i][:])
   264  	}
   265  
   266  	return offset, nil
   267  }
   268  
   269  // serializeBlockIndexEntry serializes the passed block index entry into a
   270  // single byte slice according to the format described in detail above.
   271  func serializeBlockIndexEntry(entry *blockIndexEntry) ([]byte, error) {
   272  	serialized := make([]byte, blockIndexEntrySerializeSize(entry))
   273  	_, err := putBlockIndexEntry(serialized, entry)
   274  	return serialized, err
   275  }
   276  
   277  // decodeBlockIndexEntry decodes the passed serialized block index entry into
   278  // the passed struct according to the format described above.  It returns the
   279  // number of bytes read.
   280  func decodeBlockIndexEntry(serialized []byte, entry *blockIndexEntry) (int, error) {
   281  	// Ensure there are enough bytes to decode header.
   282  	if len(serialized) < blockHdrSize {
   283  		return 0, errDeserialize("unexpected end of data while " +
   284  			"reading block header")
   285  	}
   286  	hB := serialized[0:blockHdrSize]
   287  
   288  	// Deserialize the header.
   289  	var header wire.BlockHeader
   290  	if err := header.Deserialize(bytes.NewReader(hB)); err != nil {
   291  		return 0, err
   292  	}
   293  	offset := blockHdrSize
   294  
   295  	// Deserialize the status.
   296  	if offset+1 > len(serialized) {
   297  		return offset, errDeserialize("unexpected end of data while " +
   298  			"reading status")
   299  	}
   300  	status := blockStatus(serialized[offset])
   301  	offset++
   302  
   303  	// Deserialize the number of tickets spent.
   304  	var ticketsVoted []chainhash.Hash
   305  	var votes []stake.VoteVersionTuple
   306  	numVotes, bytesRead := deserializeVLQ(serialized[offset:])
   307  	if bytesRead == 0 {
   308  		return offset, errDeserialize("unexpected end of data while " +
   309  			"reading num votes")
   310  	}
   311  	offset += bytesRead
   312  	if numVotes > 0 {
   313  		ticketsVoted = make([]chainhash.Hash, numVotes)
   314  		votes = make([]stake.VoteVersionTuple, numVotes)
   315  		for i := uint64(0); i < numVotes; i++ {
   316  			// Deserialize the ticket hash associated with the vote.
   317  			if offset+chainhash.HashSize > len(serialized) {
   318  				return offset, errDeserialize(fmt.Sprintf("unexpected "+
   319  					"end of data while reading vote #%d hash",
   320  					i))
   321  			}
   322  			copy(ticketsVoted[i][:], serialized[offset:])
   323  			offset += chainhash.HashSize
   324  
   325  			// Deserialize the vote version.
   326  			version, bytesRead := deserializeVLQ(serialized[offset:])
   327  			if bytesRead == 0 {
   328  				return offset, errDeserialize(fmt.Sprintf("unexpected "+
   329  					"end of data while reading vote #%d version",
   330  					i))
   331  			}
   332  			offset += bytesRead
   333  
   334  			// Deserialize the vote bits.
   335  			voteBits, bytesRead := deserializeVLQ(serialized[offset:])
   336  			if bytesRead == 0 {
   337  				return offset, errDeserialize(fmt.Sprintf("unexpected "+
   338  					"end of data while reading vote #%d bits",
   339  					i))
   340  			}
   341  			offset += bytesRead
   342  
   343  			votes[i].Version = uint32(version)
   344  			votes[i].Bits = uint16(voteBits)
   345  		}
   346  	}
   347  
   348  	// Deserialize the number of tickets revoked.
   349  	var ticketsRevoked []chainhash.Hash
   350  	numTicketsRevoked, bytesRead := deserializeVLQ(serialized[offset:])
   351  	if bytesRead == 0 {
   352  		return offset, errDeserialize("unexpected end of data while " +
   353  			"reading num tickets revoked")
   354  	}
   355  	offset += bytesRead
   356  	if numTicketsRevoked > 0 {
   357  		ticketsRevoked = make([]chainhash.Hash, numTicketsRevoked)
   358  		for i := uint64(0); i < numTicketsRevoked; i++ {
   359  			// Deserialize the ticket hash associated with the
   360  			// revocation.
   361  			if offset+chainhash.HashSize > len(serialized) {
   362  				return offset, errDeserialize(fmt.Sprintf("unexpected "+
   363  					"end of data while reading revocation "+
   364  					"#%d", i))
   365  			}
   366  			copy(ticketsRevoked[i][:], serialized[offset:])
   367  			offset += chainhash.HashSize
   368  		}
   369  	}
   370  
   371  	entry.header = header
   372  	entry.status = status
   373  	entry.voteInfo = votes
   374  	entry.ticketsVoted = ticketsVoted
   375  	entry.ticketsRevoked = ticketsRevoked
   376  	return offset, nil
   377  }
   378  
   379  // deserializeBlockIndexEntry decodes the passed serialized byte slice into a
   380  // block index entry according to the format described above.
   381  func deserializeBlockIndexEntry(serialized []byte) (*blockIndexEntry, error) {
   382  	var entry blockIndexEntry
   383  	if _, err := decodeBlockIndexEntry(serialized, &entry); err != nil {
   384  		return nil, err
   385  	}
   386  	return &entry, nil
   387  }
   388  
   389  // dbPutBlockNode stores the information needed to reconstruct the provided
   390  // block node in the block index according to the format described above.
   391  func dbPutBlockNode(dbTx database.Tx, node *blockNode) error {
   392  	serialized, err := serializeBlockIndexEntry(&blockIndexEntry{
   393  		header:         node.Header(),
   394  		status:         node.status,
   395  		voteInfo:       node.votes,
   396  		ticketsVoted:   node.ticketsVoted,
   397  		ticketsRevoked: node.ticketsRevoked,
   398  	})
   399  	if err != nil {
   400  		return err
   401  	}
   402  
   403  	bucket := dbTx.Metadata().Bucket(dbnamespace.BlockIndexBucketName)
   404  	key := blockIndexKey(&node.hash, uint32(node.height))
   405  	return bucket.Put(key, serialized)
   406  }
   407  
   408  // dbMaybeStoreBlock stores the provided block in the database if it's not
   409  // already there.
   410  func dbMaybeStoreBlock(dbTx database.Tx, block *dcrutil.Block) error {
   411  	// Store the block in ffldb if not already done.
   412  	hasBlock, err := dbTx.HasBlock(block.Hash())
   413  	if err != nil {
   414  		return err
   415  	}
   416  	if hasBlock {
   417  		return nil
   418  	}
   419  
   420  	return dbTx.StoreBlock(block)
   421  }
   422  
   423  // -----------------------------------------------------------------------------
   424  // The transaction spend journal consists of an entry for each block connected
   425  // to the main chain which contains the transaction outputs the block spends
   426  // serialized such that the order is the reverse of the order they were spent.
   427  //
   428  // This is required because reorganizing the chain necessarily entails
   429  // disconnecting blocks to get back to the point of the fork which implies
   430  // unspending all of the transaction outputs that each block previously spent.
   431  // Since the utxo set, by definition, only contains unspent transaction outputs,
   432  // the spent transaction outputs must be resurrected from somewhere.  There is
   433  // more than one way this could be done, however this is the most straight
   434  // forward method that does not require having a transaction index and unpruned
   435  // blockchain.
   436  //
   437  // NOTE: This format is NOT self describing.  The additional details such as
   438  // the number of entries (transaction inputs) are expected to come from the
   439  // block itself and the utxo set.  The rationale in doing this is to save a
   440  // significant amount of space.  This is also the reason the spent outputs are
   441  // serialized in the reverse order they are spent because later transactions
   442  // are allowed to spend outputs from earlier ones in the same block.
   443  //
   444  // The serialized format is:
   445  //
   446  //   [<flags><script version><compressed pk script>],...
   447  //   OPTIONAL: [<txVersion>]
   448  //
   449  //   Field                Type           Size
   450  //   flags                VLQ            byte
   451  //   scriptVersion        uint16         2 bytes
   452  //   pkScript             VLQ+[]byte     variable
   453  //
   454  //   OPTIONAL
   455  //     txVersion          VLQ            variable
   456  //     stakeExtra         []byte         variable
   457  //
   458  // The serialized flags code format is:
   459  //   bit  0   - containing transaction is a coinbase
   460  //   bit  1   - containing transaction has an expiry
   461  //   bits 2-3 - transaction type
   462  //   bit  4   - is fully spent
   463  //
   464  // The stake extra field contains minimally encoded outputs for all
   465  // consensus-related outputs in the stake transaction. It is only
   466  // encoded for tickets.
   467  //
   468  //   NOTE: The transaction version and flags are only encoded when the spent
   469  //   txout was the final unspent output of the containing transaction.
   470  //   Otherwise, the header code will be 0 and the version is not serialized at
   471  //   all. This is  done because that information is only needed when the utxo
   472  //   set no longer has it.
   473  //
   474  // Example:
   475  //   TODO
   476  // -----------------------------------------------------------------------------
   477  
   478  // spentTxOut contains a spent transaction output and potentially additional
   479  // contextual information such as whether or not it was contained in a coinbase
   480  // transaction, the txVersion of the transaction it was contained in, and which
   481  // block height the containing transaction was included in.  As described in
   482  // the comments above, the additional contextual information will only be valid
   483  // when this spent txout is spending the last unspent output of the containing
   484  // transaction.
   485  //
   486  // The struct is aligned for memory efficiency.
   487  type spentTxOut struct {
   488  	pkScript   []byte // The public key script for the output.
   489  	stakeExtra []byte // Extra information for the staking system.
   490  
   491  	amount        int64        // The amount of the output.
   492  	txType        stake.TxType // The stake type of the transaction.
   493  	height        uint32       // Height of the the block containing the tx.
   494  	index         uint32       // Index in the block of the transaction.
   495  	scriptVersion uint16       // The version of the scripting language.
   496  	txVersion     uint16       // The version of creating tx.
   497  
   498  	txFullySpent bool // Whether or not the transaction is fully spent.
   499  	isCoinBase   bool // Whether creating tx is a coinbase.
   500  	hasExpiry    bool // The expiry of the creating tx.
   501  	compressed   bool // Whether or not the script is compressed.
   502  }
   503  
   504  // spentTxOutSerializeSize returns the number of bytes it would take to
   505  // serialize the passed stxo according to the format described above.
   506  // The amount is never encoded into spent transaction outputs in Decred
   507  // because they're already encoded into the transactions, so skip them when
   508  // determining the serialization size.
   509  func spentTxOutSerializeSize(stxo *spentTxOut) int {
   510  	flags := encodeFlags(stxo.isCoinBase, stxo.hasExpiry, stxo.txType,
   511  		stxo.txFullySpent)
   512  	size := serializeSizeVLQ(uint64(flags))
   513  
   514  	// false below indicates that the txOut does not specify an amount.
   515  	size += compressedTxOutSize(uint64(stxo.amount), stxo.scriptVersion,
   516  		stxo.pkScript, currentCompressionVersion, stxo.compressed, false)
   517  
   518  	// The transaction was fully spent, so we need to store some extra
   519  	// data for UTX resurrection.
   520  	if stxo.txFullySpent {
   521  		size += serializeSizeVLQ(uint64(stxo.txVersion))
   522  		if stxo.txType == stake.TxTypeSStx {
   523  			size += len(stxo.stakeExtra)
   524  		}
   525  	}
   526  
   527  	return size
   528  }
   529  
   530  // putSpentTxOut serializes the passed stxo according to the format described
   531  // above directly into the passed target byte slice.  The target byte slice must
   532  // be at least large enough to handle the number of bytes returned by the
   533  // spentTxOutSerializeSize function or it will panic.
   534  func putSpentTxOut(target []byte, stxo *spentTxOut) int {
   535  	flags := encodeFlags(stxo.isCoinBase, stxo.hasExpiry, stxo.txType,
   536  		stxo.txFullySpent)
   537  	offset := putVLQ(target, uint64(flags))
   538  
   539  	// false below indicates that the txOut does not specify an amount.
   540  	offset += putCompressedTxOut(target[offset:], 0, stxo.scriptVersion,
   541  		stxo.pkScript, currentCompressionVersion, stxo.compressed, false)
   542  
   543  	// The transaction was fully spent, so we need to store some extra
   544  	// data for UTX resurrection.
   545  	if stxo.txFullySpent {
   546  		offset += putVLQ(target[offset:], uint64(stxo.txVersion))
   547  		if stxo.txType == stake.TxTypeSStx {
   548  			copy(target[offset:], stxo.stakeExtra)
   549  			offset += len(stxo.stakeExtra)
   550  		}
   551  	}
   552  	return offset
   553  }
   554  
   555  // decodeSpentTxOut decodes the passed serialized stxo entry, possibly followed
   556  // by other data, into the passed stxo struct.  It returns the number of bytes
   557  // read.
   558  //
   559  // Since the serialized stxo entry does not contain the height, version, or
   560  // coinbase flag of the containing transaction when it still has utxos, the
   561  // caller is responsible for passing in the containing transaction version in
   562  // that case.  The provided version is ignore when it is serialized as a part of
   563  // the stxo.
   564  //
   565  // An error will be returned if the version is not serialized as a part of the
   566  // stxo and is also not provided to the function.
   567  func decodeSpentTxOut(serialized []byte, stxo *spentTxOut, amount int64, height uint32, index uint32) (int, error) {
   568  	// Ensure there are bytes to decode.
   569  	if len(serialized) == 0 {
   570  		return 0, errDeserialize("no serialized bytes")
   571  	}
   572  
   573  	// Deserialize the header code.
   574  	flags, offset := deserializeVLQ(serialized)
   575  	if offset >= len(serialized) {
   576  		return offset, errDeserialize("unexpected end of data after " +
   577  			"spent tx out flags")
   578  	}
   579  
   580  	// Decode the flags. If the flags are non-zero, it means that the
   581  	// transaction was fully spent at this spend.
   582  	if decodeFlagsFullySpent(byte(flags)) {
   583  		isCoinBase, hasExpiry, txType, _ := decodeFlags(byte(flags))
   584  
   585  		stxo.isCoinBase = isCoinBase
   586  		stxo.hasExpiry = hasExpiry
   587  		stxo.txType = txType
   588  		stxo.txFullySpent = true
   589  	}
   590  
   591  	// Decode the compressed txout. We pass false for the amount flag,
   592  	// since in Decred we only need pkScript at most due to fraud proofs
   593  	// already storing the decompressed amount.
   594  	_, scriptVersion, compScript, bytesRead, err :=
   595  		decodeCompressedTxOut(serialized[offset:], currentCompressionVersion,
   596  			false)
   597  	offset += bytesRead
   598  	if err != nil {
   599  		return offset, errDeserialize(fmt.Sprintf("unable to decode "+
   600  			"txout: %v", err))
   601  	}
   602  	stxo.scriptVersion = scriptVersion
   603  	stxo.amount = amount
   604  	stxo.pkScript = compScript
   605  	stxo.compressed = true
   606  	stxo.height = height
   607  	stxo.index = index
   608  
   609  	// Deserialize the containing transaction if the flags indicate that
   610  	// the transaction has been fully spent.
   611  	if decodeFlagsFullySpent(byte(flags)) {
   612  		txVersion, bytesRead := deserializeVLQ(serialized[offset:])
   613  		offset += bytesRead
   614  		if offset == 0 || offset > len(serialized) {
   615  			return offset, errDeserialize("unexpected end of data " +
   616  				"after version")
   617  		}
   618  
   619  		stxo.txVersion = uint16(txVersion)
   620  
   621  		if stxo.txType == stake.TxTypeSStx {
   622  			sz := readDeserializeSizeOfMinimalOutputs(serialized[offset:])
   623  			if sz == 0 || sz > len(serialized[offset:]) {
   624  				return offset, errDeserialize("corrupt data for ticket " +
   625  					"fully spent stxo stakeextra")
   626  			}
   627  
   628  			stakeExtra := make([]byte, sz)
   629  			copy(stakeExtra, serialized[offset:offset+sz])
   630  			stxo.stakeExtra = stakeExtra
   631  			offset += sz
   632  		}
   633  	}
   634  
   635  	return offset, nil
   636  }
   637  
   638  // deserializeSpendJournalEntry decodes the passed serialized byte slice into a
   639  // slice of spent txouts according to the format described in detail above.
   640  //
   641  // Since the serialization format is not self describing, as noted in the
   642  // format comments, this function also requires the transactions that spend the
   643  // txouts and a utxo view that contains any remaining existing utxos in the
   644  // transactions referenced by the inputs to the passed transasctions.
   645  func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]spentTxOut, error) {
   646  	// Calculate the total number of stxos.
   647  	var numStxos int
   648  	for _, tx := range txns {
   649  		if stake.IsSSGen(tx) {
   650  			numStxos++
   651  			continue
   652  		}
   653  		numStxos += len(tx.TxIn)
   654  	}
   655  
   656  	// When a block has no spent txouts there is nothing to serialize.
   657  	if len(serialized) == 0 {
   658  		// Ensure the block actually has no stxos.  This should never
   659  		// happen unless there is database corruption or an empty entry
   660  		// erroneously made its way into the database.
   661  		if numStxos != 0 {
   662  			return nil, AssertError(fmt.Sprintf("mismatched spend "+
   663  				"journal serialization - no serialization for "+
   664  				"expected %d stxos", numStxos))
   665  		}
   666  
   667  		return nil, nil
   668  	}
   669  
   670  	// Loop backwards through all transactions so everything is read in
   671  	// reverse order to match the serialization order.
   672  	stxoIdx := numStxos - 1
   673  	offset := 0
   674  	stxos := make([]spentTxOut, numStxos)
   675  	for txIdx := len(txns) - 1; txIdx > -1; txIdx-- {
   676  		tx := txns[txIdx]
   677  		isVote := stake.IsSSGen(tx)
   678  
   679  		// Loop backwards through all of the transaction inputs and read
   680  		// the associated stxo.
   681  		for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- {
   682  			// Skip stakebase since it has no input.
   683  			if txInIdx == 0 && isVote {
   684  				continue
   685  			}
   686  
   687  			txIn := tx.TxIn[txInIdx]
   688  			stxo := &stxos[stxoIdx]
   689  			stxoIdx--
   690  
   691  			// Get the transaction version for the stxo based on
   692  			// whether or not it should be serialized as a part of
   693  			// the stxo.  Recall that it is only serialized when the
   694  			// stxo spends the final utxo of a transaction.  Since
   695  			// they are deserialized in reverse order, this means
   696  			// the first time an entry for a given containing tx is
   697  			// encountered that is not already in the utxo view it
   698  			// must have been the final spend and thus the extra
   699  			// data will be serialized with the stxo.  Otherwise,
   700  			// the version must be pulled from the utxo entry.
   701  			//
   702  			// Since the view is not actually modified as the stxos
   703  			// are read here and it's possible later entries
   704  			// reference earlier ones, an inflight map is maintained
   705  			// to detect this case and pull the tx version from the
   706  			// entry that contains the version information as just
   707  			// described.
   708  			n, err := decodeSpentTxOut(serialized[offset:], stxo, txIn.ValueIn,
   709  				txIn.BlockHeight, txIn.BlockIndex)
   710  			offset += n
   711  			if err != nil {
   712  				return nil, errDeserialize(fmt.Sprintf("unable "+
   713  					"to decode stxo for %v: %v",
   714  					txIn.PreviousOutPoint, err))
   715  			}
   716  		}
   717  	}
   718  
   719  	return stxos, nil
   720  }
   721  
   722  // serializeSpendJournalEntry serializes all of the passed spent txouts into a
   723  // single byte slice according to the format described in detail above.
   724  func serializeSpendJournalEntry(stxos []spentTxOut) ([]byte, error) {
   725  	if len(stxos) == 0 {
   726  		return nil, nil
   727  	}
   728  
   729  	// Calculate the size needed to serialize the entire journal entry.
   730  	var size int
   731  	sizes := make([]int, 0, len(stxos))
   732  	for i := range stxos {
   733  		sz := spentTxOutSerializeSize(&stxos[i])
   734  		sizes = append(sizes, sz)
   735  		size += sz
   736  	}
   737  	serialized := make([]byte, size)
   738  
   739  	// Serialize each individual stxo directly into the slice in reverse
   740  	// order one after the other.
   741  	var offset int
   742  	for i := len(stxos) - 1; i > -1; i-- {
   743  		oldOffset := offset
   744  		offset += putSpentTxOut(serialized[offset:], &stxos[i])
   745  
   746  		if offset-oldOffset != sizes[i] {
   747  			return nil, AssertError(fmt.Sprintf("bad write; expect sz %v, "+
   748  				"got sz %v (wrote %x)", sizes[i], offset-oldOffset,
   749  				serialized[oldOffset:offset]))
   750  		}
   751  	}
   752  
   753  	return serialized, nil
   754  }
   755  
   756  // dbFetchSpendJournalEntry fetches the spend journal entry for the passed
   757  // block and deserializes it into a slice of spent txout entries.  The provided
   758  // view MUST have the utxos referenced by all of the transactions available for
   759  // the passed block since that information is required to reconstruct the spent
   760  // txouts.
   761  func dbFetchSpendJournalEntry(dbTx database.Tx, block *dcrutil.Block) ([]spentTxOut, error) {
   762  	// Exclude the coinbase transaction since it can't spend anything.
   763  	spendBucket := dbTx.Metadata().Bucket(dbnamespace.SpendJournalBucketName)
   764  	serialized := spendBucket.Get(block.Hash()[:])
   765  	msgBlock := block.MsgBlock()
   766  
   767  	blockTxns := make([]*wire.MsgTx, 0, len(msgBlock.STransactions)+
   768  		len(msgBlock.Transactions[1:]))
   769  	blockTxns = append(blockTxns, msgBlock.STransactions...)
   770  	blockTxns = append(blockTxns, msgBlock.Transactions[1:]...)
   771  	if len(blockTxns) > 0 && len(serialized) == 0 {
   772  		panicf("missing spend journal data for %s", block.Hash())
   773  	}
   774  
   775  	stxos, err := deserializeSpendJournalEntry(serialized, blockTxns)
   776  	if err != nil {
   777  		// Ensure any deserialization errors are returned as database
   778  		// corruption errors.
   779  		if isDeserializeErr(err) {
   780  			return nil, database.Error{
   781  				ErrorCode: database.ErrCorruption,
   782  				Description: fmt.Sprintf("corrupt spend "+
   783  					"information for %v: %v", block.Hash(),
   784  					err),
   785  			}
   786  		}
   787  
   788  		return nil, err
   789  	}
   790  
   791  	return stxos, nil
   792  }
   793  
   794  // dbPutSpendJournalEntry uses an existing database transaction to update the
   795  // spend journal entry for the given block hash using the provided slice of
   796  // spent txouts.   The spent txouts slice must contain an entry for every txout
   797  // the transactions in the block spend in the order they are spent.
   798  func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash, stxos []spentTxOut) error {
   799  	spendBucket := dbTx.Metadata().Bucket(dbnamespace.SpendJournalBucketName)
   800  	serialized, err := serializeSpendJournalEntry(stxos)
   801  	if err != nil {
   802  		return err
   803  	}
   804  	return spendBucket.Put(blockHash[:], serialized)
   805  }
   806  
   807  // dbRemoveSpendJournalEntry uses an existing database transaction to remove the
   808  // spend journal entry for the passed block hash.
   809  func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) error {
   810  	spendBucket := dbTx.Metadata().Bucket(dbnamespace.SpendJournalBucketName)
   811  	return spendBucket.Delete(blockHash[:])
   812  }
   813  
   814  // -----------------------------------------------------------------------------
   815  // The unspent transaction output (utxo) set consists of an entry for each
   816  // transaction which contains a utxo serialized using a format that is highly
   817  // optimized to reduce space using domain specific compression algorithms.  This
   818  // format is a slightly modified version of the format used in Bitcoin Core.
   819  //
   820  // The serialized format is:
   821  //
   822  //   <version><height><header code><unspentness bitmap>[<compressed txouts>,...]
   823  //
   824  //   Field                 Type     Size
   825  //   transaction version   VLQ      variable
   826  //   block height          VLQ      variable
   827  //   block index           VLQ      variable
   828  //   flags                 VLQ      variable (currently 1 byte)
   829  //   header code           VLQ      variable
   830  //   unspentness bitmap    []byte   variable
   831  //   compressed txouts
   832  //     compressed amount   VLQ      variable
   833  //     compressed version  VLQ      variable
   834  //     compressed script   []byte   variable
   835  //   stakeExtra            []byte   variable
   836  //
   837  // The serialized flags code format is:
   838  //   bit  0   - containing transaction is a coinbase
   839  //   bit  1   - containing transaction has an expiry
   840  //   bits 2-3 - transaction type
   841  //   bits 4-7 - unused
   842  //
   843  // The serialized header code format is:
   844  //   bit 0 - output zero is unspent
   845  //   bit 1 - output one is unspent
   846  //   bits 2-x - number of bytes in unspentness bitmap.  When both bits 1 and 2
   847  //     are unset, it encodes N-1 since there must be at least one unspent
   848  //     output.
   849  //
   850  // The rationale for the header code scheme is as follows:
   851  //   - Transactions which only pay to a single output and a change output are
   852  //     extremely common, thus an extra byte for the unspentness bitmap can be
   853  //     avoided for them by encoding those two outputs in the low order bits.
   854  //   - Given it is encoded as a VLQ which can encode values up to 127 with a
   855  //     single byte, that leaves 4 bits to represent the number of bytes in the
   856  //     unspentness bitmap while still only consuming a single byte for the
   857  //     header code.  In other words, an unspentness bitmap with up to 120
   858  //     transaction outputs can be encoded with a single-byte header code.
   859  //     This covers the vast majority of transactions.
   860  //   - Encoding N-1 bytes when both bits 0 and 1 are unset allows an additional
   861  //     8 outpoints to be encoded before causing the header code to require an
   862  //     additional byte.
   863  //
   864  // The stake extra field contains minimally encoded outputs for all
   865  // consensus-related outputs in the stake transaction. It is only
   866  // encoded for tickets.
   867  //
   868  // Example 1: TODO
   869  // -----------------------------------------------------------------------------
   870  
   871  // utxoEntryHeaderCode returns the calculated header code to be used when
   872  // serializing the provided utxo entry and the number of bytes needed to encode
   873  // the unspentness bitmap.
   874  func utxoEntryHeaderCode(entry *UtxoEntry, highestOutputIndex uint32) (uint64, int, error) {
   875  	// The first two outputs are encoded separately, so offset the index
   876  	// accordingly to calculate the correct number of bytes needed to encode
   877  	// up to the highest unspent output index.
   878  	numBitmapBytes := int((highestOutputIndex + 6) / 8)
   879  
   880  	// As previously described, one less than the number of bytes is encoded
   881  	// when both output 0 and 1 are spent because there must be at least one
   882  	// unspent output.  Adjust the number of bytes to encode accordingly and
   883  	// encode the value by shifting it over 2 bits.
   884  	output0Unspent := !entry.IsOutputSpent(0)
   885  	output1Unspent := !entry.IsOutputSpent(1)
   886  	var numBitmapBytesAdjustment int
   887  	if !output0Unspent && !output1Unspent {
   888  		if numBitmapBytes == 0 {
   889  			return 0, 0, AssertError("attempt to serialize utxo " +
   890  				"header for fully spent transaction")
   891  		}
   892  		numBitmapBytesAdjustment = 1
   893  	}
   894  	headerCode := uint64(numBitmapBytes-numBitmapBytesAdjustment) << 2
   895  
   896  	// Set the output 0 and output 1 bits in the header code
   897  	// accordingly.
   898  	if output0Unspent {
   899  		headerCode |= 0x01 // bit 0
   900  	}
   901  	if output1Unspent {
   902  		headerCode |= 0x02 // bit 1
   903  	}
   904  
   905  	return headerCode, numBitmapBytes, nil
   906  }
   907  
   908  // serializeUtxoEntry returns the entry serialized to a format that is suitable
   909  // for long-term storage.  The format is described in detail above.
   910  func serializeUtxoEntry(entry *UtxoEntry) ([]byte, error) {
   911  	// Fully spent entries have no serialization.
   912  	if entry.IsFullySpent() {
   913  		return nil, nil
   914  	}
   915  
   916  	// Determine the output order by sorting the sparse output index keys.
   917  	outputOrder := make([]int, 0, len(entry.sparseOutputs))
   918  	for outputIndex := range entry.sparseOutputs {
   919  		outputOrder = append(outputOrder, int(outputIndex))
   920  	}
   921  	sort.Ints(outputOrder)
   922  
   923  	// Encode the header code and determine the number of bytes the
   924  	// unspentness bitmap needs.
   925  	highIndex := uint32(outputOrder[len(outputOrder)-1])
   926  	headerCode, numBitmapBytes, err := utxoEntryHeaderCode(entry, highIndex)
   927  	if err != nil {
   928  		return nil, err
   929  	}
   930  
   931  	// Calculate the size needed to serialize the entry.
   932  	flags := encodeFlags(entry.isCoinBase, entry.hasExpiry, entry.txType, false)
   933  	size := serializeSizeVLQ(uint64(entry.txVersion)) +
   934  		serializeSizeVLQ(uint64(entry.height)) +
   935  		serializeSizeVLQ(uint64(entry.index)) +
   936  		serializeSizeVLQ(uint64(flags)) +
   937  		serializeSizeVLQ(headerCode) + numBitmapBytes
   938  	for _, outputIndex := range outputOrder {
   939  		out := entry.sparseOutputs[uint32(outputIndex)]
   940  		if out.spent {
   941  			continue
   942  		}
   943  		size += compressedTxOutSize(uint64(out.amount), out.scriptVersion,
   944  			out.pkScript, currentCompressionVersion, out.compressed, true)
   945  	}
   946  	if entry.txType == stake.TxTypeSStx {
   947  		size += len(entry.stakeExtra)
   948  	}
   949  
   950  	// Serialize the version, block height, block index, and flags of the
   951  	// containing transaction, and "header code" which is a complex bitmap
   952  	// of spentness.
   953  	serialized := make([]byte, size)
   954  	offset := putVLQ(serialized, uint64(entry.txVersion))
   955  	offset += putVLQ(serialized[offset:], uint64(entry.height))
   956  	offset += putVLQ(serialized[offset:], uint64(entry.index))
   957  	offset += putVLQ(serialized[offset:], uint64(flags))
   958  	offset += putVLQ(serialized[offset:], headerCode)
   959  
   960  	// Serialize the unspentness bitmap.
   961  	for i := uint32(0); i < uint32(numBitmapBytes); i++ {
   962  		unspentBits := byte(0)
   963  		for j := uint32(0); j < 8; j++ {
   964  			// The first 2 outputs are encoded via the header code,
   965  			// so adjust the output index accordingly.
   966  			if !entry.IsOutputSpent(2 + i*8 + j) {
   967  				unspentBits |= 1 << uint8(j)
   968  			}
   969  		}
   970  		serialized[offset] = unspentBits
   971  		offset++
   972  	}
   973  
   974  	// Serialize the compressed unspent transaction outputs.  Outputs that
   975  	// are already compressed are serialized without modifications.
   976  	for _, outputIndex := range outputOrder {
   977  		out := entry.sparseOutputs[uint32(outputIndex)]
   978  		if out.spent {
   979  			continue
   980  		}
   981  
   982  		offset += putCompressedTxOut(serialized[offset:],
   983  			uint64(out.amount), out.scriptVersion, out.pkScript,
   984  			currentCompressionVersion, out.compressed, true)
   985  	}
   986  
   987  	if entry.txType == stake.TxTypeSStx {
   988  		copy(serialized[offset:], entry.stakeExtra)
   989  	}
   990  
   991  	return serialized, nil
   992  }
   993  
   994  // deserializeUtxoEntry decodes a utxo entry from the passed serialized byte
   995  // slice into a new UtxoEntry using a format that is suitable for long-term
   996  // storage.  The format is described in detail above.
   997  func deserializeUtxoEntry(serialized []byte) (*UtxoEntry, error) {
   998  	// Deserialize the version.
   999  	version, bytesRead := deserializeVLQ(serialized)
  1000  	offset := bytesRead
  1001  	if offset >= len(serialized) {
  1002  		return nil, errDeserialize("unexpected end of data after version")
  1003  	}
  1004  
  1005  	// Deserialize the block height.
  1006  	blockHeight, bytesRead := deserializeVLQ(serialized[offset:])
  1007  	offset += bytesRead
  1008  	if offset >= len(serialized) {
  1009  		return nil, errDeserialize("unexpected end of data after height")
  1010  	}
  1011  
  1012  	// Deserialize the block index.
  1013  	blockIndex, bytesRead := deserializeVLQ(serialized[offset:])
  1014  	offset += bytesRead
  1015  	if offset >= len(serialized) {
  1016  		return nil, errDeserialize("unexpected end of data after index")
  1017  	}
  1018  
  1019  	// Deserialize the flags.
  1020  	flags, bytesRead := deserializeVLQ(serialized[offset:])
  1021  	offset += bytesRead
  1022  	if offset >= len(serialized) {
  1023  		return nil, errDeserialize("unexpected end of data after flags")
  1024  	}
  1025  	isCoinBase, hasExpiry, txType, _ := decodeFlags(byte(flags))
  1026  
  1027  	// Deserialize the header code.
  1028  	code, bytesRead := deserializeVLQ(serialized[offset:])
  1029  	offset += bytesRead
  1030  	if offset >= len(serialized) {
  1031  		return nil, errDeserialize("unexpected end of data after header")
  1032  	}
  1033  
  1034  	// Decode the header code.
  1035  	//
  1036  	// Bit 0 indicates output 0 is unspent.
  1037  	// Bit 1 indicates output 1 is unspent.
  1038  	// Bits 2-x encodes the number of non-zero unspentness bitmap bytes that
  1039  	// follow.  When both output 0 and 1 are spent, it encodes N-1.
  1040  	output0Unspent := code&0x01 != 0
  1041  	output1Unspent := code&0x02 != 0
  1042  	numBitmapBytes := code >> 2
  1043  	if !output0Unspent && !output1Unspent {
  1044  		numBitmapBytes++
  1045  	}
  1046  
  1047  	// Ensure there are enough bytes left to deserialize the unspentness
  1048  	// bitmap.
  1049  	if uint64(len(serialized[offset:])) < numBitmapBytes {
  1050  		return nil, errDeserialize("unexpected end of data for " +
  1051  			"unspentness bitmap")
  1052  	}
  1053  
  1054  	// Create a new utxo entry with the details deserialized above to house
  1055  	// all of the utxos.
  1056  	entry := newUtxoEntry(uint16(version), uint32(blockHeight),
  1057  		uint32(blockIndex), isCoinBase, hasExpiry, txType)
  1058  
  1059  	// Add sparse output for unspent outputs 0 and 1 as needed based on the
  1060  	// details provided by the header code.
  1061  	var outputIndexes []uint32
  1062  	if output0Unspent {
  1063  		outputIndexes = append(outputIndexes, 0)
  1064  	}
  1065  	if output1Unspent {
  1066  		outputIndexes = append(outputIndexes, 1)
  1067  	}
  1068  
  1069  	// Decode the unspentness bitmap adding a sparse output for each unspent
  1070  	// output.
  1071  	for i := uint32(0); i < uint32(numBitmapBytes); i++ {
  1072  		unspentBits := serialized[offset]
  1073  		for j := uint32(0); j < 8; j++ {
  1074  			if unspentBits&0x01 != 0 {
  1075  				// The first 2 outputs are encoded via the
  1076  				// header code, so adjust the output number
  1077  				// accordingly.
  1078  				outputNum := 2 + i*8 + j
  1079  				outputIndexes = append(outputIndexes, outputNum)
  1080  			}
  1081  			unspentBits >>= 1
  1082  		}
  1083  		offset++
  1084  	}
  1085  
  1086  	// Decode and add all of the utxos.
  1087  	for i, outputIndex := range outputIndexes {
  1088  		// Decode the next utxo.  The script and amount fields of the
  1089  		// utxo output are left compressed so decompression can be
  1090  		// avoided on those that are not accessed.  This is done since
  1091  		// it is quite common for a redeeming transaction to only
  1092  		// reference a single utxo from a referenced transaction.
  1093  		//
  1094  		// 'true' below instructs the method to deserialize a stored
  1095  		// amount.
  1096  		amount, scriptVersion, compScript, bytesRead, err :=
  1097  			decodeCompressedTxOut(serialized[offset:], currentCompressionVersion,
  1098  				true)
  1099  		if err != nil {
  1100  			return nil, errDeserialize(fmt.Sprintf("unable to "+
  1101  				"decode utxo at index %d: %v", i, err))
  1102  		}
  1103  		offset += bytesRead
  1104  
  1105  		entry.sparseOutputs[outputIndex] = &utxoOutput{
  1106  			spent:         false,
  1107  			compressed:    true,
  1108  			scriptVersion: scriptVersion,
  1109  			pkScript:      compScript,
  1110  			amount:        amount,
  1111  		}
  1112  	}
  1113  
  1114  	// Copy the stake extra data if this was a ticket.
  1115  	if entry.txType == stake.TxTypeSStx {
  1116  		stakeExtra := make([]byte, len(serialized[offset:]))
  1117  		copy(stakeExtra, serialized[offset:])
  1118  		entry.stakeExtra = stakeExtra
  1119  	}
  1120  
  1121  	return entry, nil
  1122  }
  1123  
  1124  // dbFetchUtxoEntry uses an existing database transaction to fetch all unspent
  1125  // outputs for the provided Bitcoin transaction hash from the utxo set.
  1126  //
  1127  // When there is no entry for the provided hash, nil will be returned for the
  1128  // both the entry and the error.
  1129  func dbFetchUtxoEntry(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) {
  1130  	// Fetch the unspent transaction output information for the passed
  1131  	// transaction hash.  Return now when there is no entry.
  1132  	utxoBucket := dbTx.Metadata().Bucket(dbnamespace.UtxoSetBucketName)
  1133  	serializedUtxo := utxoBucket.Get(hash[:])
  1134  	if serializedUtxo == nil {
  1135  		return nil, nil
  1136  	}
  1137  
  1138  	// A non-nil zero-length entry means there is an entry in the database
  1139  	// for a fully spent transaction which should never be the case.
  1140  	if len(serializedUtxo) == 0 {
  1141  		return nil, AssertError(fmt.Sprintf("database contains entry "+
  1142  			"for fully spent tx %v", hash))
  1143  	}
  1144  
  1145  	// Deserialize the utxo entry and return it.
  1146  	entry, err := deserializeUtxoEntry(serializedUtxo)
  1147  	if err != nil {
  1148  		// Ensure any deserialization errors are returned as database
  1149  		// corruption errors.
  1150  		if isDeserializeErr(err) {
  1151  			return nil, database.Error{
  1152  				ErrorCode: database.ErrCorruption,
  1153  				Description: fmt.Sprintf("corrupt utxo entry "+
  1154  					"for %v: %v", hash, err),
  1155  			}
  1156  		}
  1157  
  1158  		return nil, err
  1159  	}
  1160  
  1161  	return entry, nil
  1162  }
  1163  
  1164  // dbPutUtxoView uses an existing database transaction to update the utxo set
  1165  // in the database based on the provided utxo view contents and state.  In
  1166  // particular, only the entries that have been marked as modified are written
  1167  // to the database.
  1168  func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error {
  1169  	utxoBucket := dbTx.Metadata().Bucket(dbnamespace.UtxoSetBucketName)
  1170  	for txHashIter, entry := range view.entries {
  1171  		// No need to update the database if the entry was not modified.
  1172  		if entry == nil || !entry.modified {
  1173  			continue
  1174  		}
  1175  
  1176  		// Serialize the utxo entry without any entries that have been
  1177  		// spent.
  1178  		serialized, err := serializeUtxoEntry(entry)
  1179  		if err != nil {
  1180  			return err
  1181  		}
  1182  
  1183  		// Make a copy of the hash because the iterator changes on each
  1184  		// loop iteration and thus slicing it directly would cause the
  1185  		// data to change out from under the put/delete funcs below.
  1186  		txHash := txHashIter
  1187  
  1188  		// Remove the utxo entry if it is now fully spent.
  1189  		if serialized == nil {
  1190  			if err := utxoBucket.Delete(txHash[:]); err != nil {
  1191  				return err
  1192  			}
  1193  
  1194  			continue
  1195  		}
  1196  
  1197  		// At this point the utxo entry is not fully spent, so store its
  1198  		// serialization in the database.
  1199  		err = utxoBucket.Put(txHash[:], serialized)
  1200  		if err != nil {
  1201  			return err
  1202  		}
  1203  	}
  1204  
  1205  	return nil
  1206  }
  1207  
  1208  // -----------------------------------------------------------------------------
  1209  // The database information contains information about the version and date
  1210  // of the blockchain database.
  1211  //
  1212  // It consists of a separate key for each individual piece of information:
  1213  //
  1214  //   Key        Value    Size      Description
  1215  //   version    uint32   4 bytes   The version of the database
  1216  //   compver    uint32   4 bytes   The script compression version of the database
  1217  //   bidxver    uint32   4 bytes   The block index version of the database
  1218  //   created    uint64   8 bytes   The date of the creation of the database
  1219  // -----------------------------------------------------------------------------
  1220  
  1221  // databaseInfo is the structure for a database.
  1222  type databaseInfo struct {
  1223  	version uint32
  1224  	compVer uint32
  1225  	bidxVer uint32
  1226  	created time.Time
  1227  }
  1228  
  1229  // dbPutDatabaseInfo uses an existing database transaction to store the database
  1230  // information.
  1231  func dbPutDatabaseInfo(dbTx database.Tx, dbi *databaseInfo) error {
  1232  	// uint32Bytes is a helper function to convert a uint32 to a byte slice
  1233  	// using the byte order specified by the database namespace.
  1234  	uint32Bytes := func(ui32 uint32) []byte {
  1235  		var b [4]byte
  1236  		dbnamespace.ByteOrder.PutUint32(b[:], ui32)
  1237  		return b[:]
  1238  	}
  1239  
  1240  	// uint64Bytes is a helper function to convert a uint64 to a byte slice
  1241  	// using the byte order specified by the database namespace.
  1242  	uint64Bytes := func(ui64 uint64) []byte {
  1243  		var b [8]byte
  1244  		dbnamespace.ByteOrder.PutUint64(b[:], ui64)
  1245  		return b[:]
  1246  	}
  1247  
  1248  	// Store the database version.
  1249  	meta := dbTx.Metadata()
  1250  	bucket := meta.Bucket(dbnamespace.BCDBInfoBucketName)
  1251  	err := bucket.Put(dbnamespace.BCDBInfoVersionKeyName,
  1252  		uint32Bytes(dbi.version))
  1253  	if err != nil {
  1254  		return err
  1255  	}
  1256  
  1257  	// Store the compression version.
  1258  	err = bucket.Put(dbnamespace.BCDBInfoCompressionVersionKeyName,
  1259  		uint32Bytes(dbi.compVer))
  1260  	if err != nil {
  1261  		return err
  1262  	}
  1263  
  1264  	// Store the block index version.
  1265  	err = bucket.Put(dbnamespace.BCDBInfoBlockIndexVersionKeyName,
  1266  		uint32Bytes(dbi.bidxVer))
  1267  	if err != nil {
  1268  		return err
  1269  	}
  1270  
  1271  	// Store the database creation date.
  1272  	return bucket.Put(dbnamespace.BCDBInfoCreatedKeyName,
  1273  		uint64Bytes(uint64(dbi.created.Unix())))
  1274  }
  1275  
  1276  // dbFetchDatabaseInfo uses an existing database transaction to fetch the
  1277  // database versioning and creation information.
  1278  func dbFetchDatabaseInfo(dbTx database.Tx) (*databaseInfo, error) {
  1279  	meta := dbTx.Metadata()
  1280  	bucket := meta.Bucket(dbnamespace.BCDBInfoBucketName)
  1281  
  1282  	// Uninitialized state.
  1283  	if bucket == nil {
  1284  		return nil, nil
  1285  	}
  1286  
  1287  	// Load the database version.
  1288  	var version uint32
  1289  	versionBytes := bucket.Get(dbnamespace.BCDBInfoVersionKeyName)
  1290  	if versionBytes != nil {
  1291  		version = dbnamespace.ByteOrder.Uint32(versionBytes)
  1292  	}
  1293  
  1294  	// Load the database compression version.
  1295  	var compVer uint32
  1296  	compVerBytes := bucket.Get(dbnamespace.BCDBInfoCompressionVersionKeyName)
  1297  	if compVerBytes != nil {
  1298  		compVer = dbnamespace.ByteOrder.Uint32(compVerBytes)
  1299  	}
  1300  
  1301  	// Load the database block index version.
  1302  	var bidxVer uint32
  1303  	bidxVerBytes := bucket.Get(dbnamespace.BCDBInfoBlockIndexVersionKeyName)
  1304  	if bidxVerBytes != nil {
  1305  		bidxVer = dbnamespace.ByteOrder.Uint32(bidxVerBytes)
  1306  	}
  1307  
  1308  	// Load the database creation date.
  1309  	var created time.Time
  1310  	createdBytes := bucket.Get(dbnamespace.BCDBInfoCreatedKeyName)
  1311  	if createdBytes != nil {
  1312  		ts := dbnamespace.ByteOrder.Uint64(createdBytes)
  1313  		created = time.Unix(int64(ts), 0)
  1314  	}
  1315  
  1316  	return &databaseInfo{
  1317  		version: version,
  1318  		compVer: compVer,
  1319  		bidxVer: bidxVer,
  1320  		created: created,
  1321  	}, nil
  1322  }
  1323  
  1324  // -----------------------------------------------------------------------------
  1325  // The best chain state consists of the best block hash and height, the total
  1326  // number of transactions up to and including those in the best block, the
  1327  // total coin supply, the subsidy at the current block, the subsidy of the
  1328  // block prior (for rollbacks), and the accumulated work sum up to and
  1329  // including the best block.
  1330  //
  1331  // The serialized format is:
  1332  //
  1333  //   <block hash><block height><total txns><total subsidy><work sum length><work sum>
  1334  //
  1335  //   Field             Type             Size
  1336  //   block hash        chainhash.Hash   chainhash.HashSize
  1337  //   block height      uint32           4 bytes
  1338  //   total txns        uint64           8 bytes
  1339  //   total subsidy     int64            8 bytes
  1340  //   work sum length   uint32           4 bytes
  1341  //   work sum          big.Int          work sum length
  1342  // -----------------------------------------------------------------------------
  1343  
  1344  // bestChainState represents the data to be stored the database for the current
  1345  // best chain state.
  1346  type bestChainState struct {
  1347  	hash         chainhash.Hash
  1348  	height       uint32
  1349  	totalTxns    uint64
  1350  	totalSubsidy int64
  1351  	workSum      *big.Int
  1352  }
  1353  
  1354  // serializeBestChainState returns the serialization of the passed block best
  1355  // chain state.  This is data to be stored in the chain state bucket.
  1356  func serializeBestChainState(state bestChainState) []byte {
  1357  	// Calculate the full size needed to serialize the chain state.
  1358  	workSumBytes := state.workSum.Bytes()
  1359  	workSumBytesLen := uint32(len(workSumBytes))
  1360  	serializedLen := chainhash.HashSize + 4 + 8 + 8 + 4 + workSumBytesLen
  1361  
  1362  	// Serialize the chain state.
  1363  	serializedData := make([]byte, serializedLen)
  1364  	copy(serializedData[0:chainhash.HashSize], state.hash[:])
  1365  	offset := uint32(chainhash.HashSize)
  1366  	dbnamespace.ByteOrder.PutUint32(serializedData[offset:], state.height)
  1367  	offset += 4
  1368  	dbnamespace.ByteOrder.PutUint64(serializedData[offset:], state.totalTxns)
  1369  	offset += 8
  1370  	dbnamespace.ByteOrder.PutUint64(serializedData[offset:],
  1371  		uint64(state.totalSubsidy))
  1372  	offset += 8
  1373  	dbnamespace.ByteOrder.PutUint32(serializedData[offset:], workSumBytesLen)
  1374  	offset += 4
  1375  	copy(serializedData[offset:], workSumBytes)
  1376  	return serializedData
  1377  }
  1378  
  1379  // deserializeBestChainState deserializes the passed serialized best chain
  1380  // state.  This is data stored in the chain state bucket and is updated after
  1381  // every block is connected or disconnected form the main chain.
  1382  // block.
  1383  func deserializeBestChainState(serializedData []byte) (bestChainState, error) {
  1384  	// Ensure the serialized data has enough bytes to properly deserialize
  1385  	// the hash, height, total transactions, total subsidy, current subsidy,
  1386  	// and work sum length.
  1387  	expectedMinLen := chainhash.HashSize + 4 + 8 + 8 + 4
  1388  	if len(serializedData) < expectedMinLen {
  1389  		return bestChainState{}, database.Error{
  1390  			ErrorCode: database.ErrCorruption,
  1391  			Description: fmt.Sprintf("corrupt best chain state size; min %v "+
  1392  				"got %v", expectedMinLen, len(serializedData)),
  1393  		}
  1394  	}
  1395  
  1396  	state := bestChainState{}
  1397  	copy(state.hash[:], serializedData[0:chainhash.HashSize])
  1398  	offset := uint32(chainhash.HashSize)
  1399  	state.height = dbnamespace.ByteOrder.Uint32(serializedData[offset : offset+4])
  1400  	offset += 4
  1401  	state.totalTxns = dbnamespace.ByteOrder.Uint64(
  1402  		serializedData[offset : offset+8])
  1403  	offset += 8
  1404  	state.totalSubsidy = int64(dbnamespace.ByteOrder.Uint64(
  1405  		serializedData[offset : offset+8]))
  1406  	offset += 8
  1407  	workSumBytesLen := dbnamespace.ByteOrder.Uint32(
  1408  		serializedData[offset : offset+4])
  1409  	offset += 4
  1410  
  1411  	// Ensure the serialized data has enough bytes to deserialize the work
  1412  	// sum.
  1413  	if uint32(len(serializedData[offset:])) < workSumBytesLen {
  1414  		return bestChainState{}, database.Error{
  1415  			ErrorCode: database.ErrCorruption,
  1416  			Description: fmt.Sprintf("corrupt work sum size; want %v "+
  1417  				"got %v", workSumBytesLen, uint32(len(serializedData[offset:]))),
  1418  		}
  1419  	}
  1420  	workSumBytes := serializedData[offset : offset+workSumBytesLen]
  1421  	state.workSum = new(big.Int).SetBytes(workSumBytes)
  1422  
  1423  	return state, nil
  1424  }
  1425  
  1426  // dbPutBestState uses an existing database transaction to update the best chain
  1427  // state with the given parameters.
  1428  func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error {
  1429  	// Serialize the current best chain state.
  1430  	serializedData := serializeBestChainState(bestChainState{
  1431  		hash:         snapshot.Hash,
  1432  		height:       uint32(snapshot.Height),
  1433  		totalTxns:    snapshot.TotalTxns,
  1434  		totalSubsidy: snapshot.TotalSubsidy,
  1435  		workSum:      workSum,
  1436  	})
  1437  
  1438  	// Store the current best chain state into the database.
  1439  	return dbTx.Metadata().Put(dbnamespace.ChainStateKeyName, serializedData)
  1440  }
  1441  
  1442  // createChainState initializes both the database and the chain state to the
  1443  // genesis block.  This includes creating the necessary buckets and inserting
  1444  // the genesis block, so it must only be called on an uninitialized database.
  1445  func (b *BlockChain) createChainState() error {
  1446  	// Create a new node from the genesis block and set it as the best node.
  1447  	genesisBlock := dcrutil.NewBlock(b.chainParams.GenesisBlock)
  1448  	header := &genesisBlock.MsgBlock().Header
  1449  	node := newBlockNode(header, nil)
  1450  	node.status = statusDataStored | statusValid
  1451  
  1452  	// Initialize the state related to the best block.  Since it is the
  1453  	// genesis block, use its timestamp for the median time.
  1454  	numTxns := uint64(len(genesisBlock.MsgBlock().Transactions))
  1455  	blockSize := uint64(genesisBlock.MsgBlock().SerializeSize())
  1456  	stateSnapshot := newBestState(node, blockSize, numTxns, numTxns,
  1457  		time.Unix(node.timestamp, 0), 0, 0, b.chainParams.MinimumStakeDiff,
  1458  		nil, nil, earlyFinalState)
  1459  
  1460  	// Create the initial the database chain state including creating the
  1461  	// necessary index buckets and inserting the genesis block.
  1462  	err := b.db.Update(func(dbTx database.Tx) error {
  1463  		meta := dbTx.Metadata()
  1464  
  1465  		// Create the bucket that houses information about the database's
  1466  		// creation and version.
  1467  		_, err := meta.CreateBucket(dbnamespace.BCDBInfoBucketName)
  1468  		if err != nil {
  1469  			return err
  1470  		}
  1471  
  1472  		b.dbInfo = &databaseInfo{
  1473  			version: currentDatabaseVersion,
  1474  			compVer: currentCompressionVersion,
  1475  			bidxVer: currentBlockIndexVersion,
  1476  			created: time.Now(),
  1477  		}
  1478  		err = dbPutDatabaseInfo(dbTx, b.dbInfo)
  1479  		if err != nil {
  1480  			return err
  1481  		}
  1482  
  1483  		// Create the bucket that houses the block index data.
  1484  		_, err = meta.CreateBucket(dbnamespace.BlockIndexBucketName)
  1485  		if err != nil {
  1486  			return err
  1487  		}
  1488  
  1489  		// Create the bucket that houses the spend journal data.
  1490  		_, err = meta.CreateBucket(dbnamespace.SpendJournalBucketName)
  1491  		if err != nil {
  1492  			return err
  1493  		}
  1494  
  1495  		// Create the bucket that houses the utxo set.  Note that the
  1496  		// genesis block coinbase transaction is intentionally not
  1497  		// inserted here since it is not spendable by consensus rules.
  1498  		_, err = meta.CreateBucket(dbnamespace.UtxoSetBucketName)
  1499  		if err != nil {
  1500  			return err
  1501  		}
  1502  
  1503  		// Add the genesis block to the block index.
  1504  		err = dbPutBlockNode(dbTx, node)
  1505  		if err != nil {
  1506  			return err
  1507  		}
  1508  
  1509  		// Store the current best chain state into the database.
  1510  		err = dbPutBestState(dbTx, stateSnapshot, node.workSum)
  1511  		if err != nil {
  1512  			return err
  1513  		}
  1514  
  1515  		// Initialize the stake buckets in the database, along with
  1516  		// the best state for the stake database.
  1517  		_, err = stake.InitDatabaseState(dbTx, b.chainParams)
  1518  		if err != nil {
  1519  			return err
  1520  		}
  1521  
  1522  		// Store the genesis block into the database.
  1523  		return dbTx.StoreBlock(genesisBlock)
  1524  	})
  1525  	return err
  1526  }
  1527  
  1528  // initChainState attempts to load and initialize the chain state from the
  1529  // database.  When the db does not yet contain any chain state, both it and the
  1530  // chain state are initialized to the genesis block.
  1531  func (b *BlockChain) initChainState() error {
  1532  	// Update database versioning scheme if needed.
  1533  	err := b.db.Update(func(dbTx database.Tx) error {
  1534  		// No versioning upgrade is needed if the dbinfo bucket does not
  1535  		// exist or the legacy key does not exist.
  1536  		bucket := dbTx.Metadata().Bucket(dbnamespace.BCDBInfoBucketName)
  1537  		if bucket == nil {
  1538  			return nil
  1539  		}
  1540  		legacyBytes := bucket.Get(dbnamespace.BCDBInfoBucketName)
  1541  		if legacyBytes == nil {
  1542  			return nil
  1543  		}
  1544  
  1545  		// No versioning upgrade is needed if the new version key exists.
  1546  		if bucket.Get(dbnamespace.BCDBInfoVersionKeyName) != nil {
  1547  			return nil
  1548  		}
  1549  
  1550  		// Load and deserialize the legacy version information.
  1551  		log.Infof("Migrating versioning scheme...")
  1552  		dbi, err := deserializeDatabaseInfoV2(legacyBytes)
  1553  		if err != nil {
  1554  			return err
  1555  		}
  1556  
  1557  		// Store the database version info using the new format.
  1558  		if err := dbPutDatabaseInfo(dbTx, dbi); err != nil {
  1559  			return err
  1560  		}
  1561  
  1562  		// Remove the legacy version information.
  1563  		return bucket.Delete(dbnamespace.BCDBInfoBucketName)
  1564  	})
  1565  	if err != nil {
  1566  		return err
  1567  	}
  1568  
  1569  	// Determine the state of the database.
  1570  	var isStateInitialized bool
  1571  	err = b.db.View(func(dbTx database.Tx) error {
  1572  		// Fetch the database versioning information.
  1573  		dbInfo, err := dbFetchDatabaseInfo(dbTx)
  1574  		if err != nil {
  1575  			return err
  1576  		}
  1577  
  1578  		// The database bucket for the versioning information is missing.
  1579  		if dbInfo == nil {
  1580  			return nil
  1581  		}
  1582  
  1583  		// Don't allow downgrades of the blockchain database.
  1584  		if dbInfo.version > currentDatabaseVersion {
  1585  			return fmt.Errorf("the current blockchain database is "+
  1586  				"no longer compatible with this version of "+
  1587  				"the software (%d > %d)", dbInfo.version,
  1588  				currentDatabaseVersion)
  1589  		}
  1590  
  1591  		// Don't allow downgrades of the database compression version.
  1592  		if dbInfo.compVer > currentCompressionVersion {
  1593  			return fmt.Errorf("the current database compression "+
  1594  				"version is no longer compatible with this "+
  1595  				"version of the software (%d > %d)",
  1596  				dbInfo.compVer, currentCompressionVersion)
  1597  		}
  1598  
  1599  		// Don't allow downgrades of the block index.
  1600  		if dbInfo.bidxVer > currentBlockIndexVersion {
  1601  			return fmt.Errorf("the current database block index "+
  1602  				"version is no longer compatible with this "+
  1603  				"version of the software (%d > %d)",
  1604  				dbInfo.bidxVer, currentBlockIndexVersion)
  1605  		}
  1606  
  1607  		b.dbInfo = dbInfo
  1608  		isStateInitialized = true
  1609  		return nil
  1610  	})
  1611  	if err != nil {
  1612  		return err
  1613  	}
  1614  
  1615  	// Initialize the database if it has not already been done.
  1616  	if !isStateInitialized {
  1617  		if err := b.createChainState(); err != nil {
  1618  			return err
  1619  		}
  1620  	}
  1621  
  1622  	// Upgrade the database as needed.
  1623  	err = upgradeDB(b.db, b.chainParams, b.dbInfo, b.interrupt)
  1624  	if err != nil {
  1625  		return err
  1626  	}
  1627  
  1628  	// Attempt to load the chain state from the database.
  1629  	err = b.db.View(func(dbTx database.Tx) error {
  1630  		// Fetch the stored chain state from the database metadata.
  1631  		// When it doesn't exist, it means the database hasn't been
  1632  		// initialized for use with chain yet, so break out now to allow
  1633  		// that to happen under a writable database transaction.
  1634  		meta := dbTx.Metadata()
  1635  		serializedData := meta.Get(dbnamespace.ChainStateKeyName)
  1636  		if serializedData == nil {
  1637  			return nil
  1638  		}
  1639  		log.Tracef("Serialized chain state: %x", serializedData)
  1640  		state, err := deserializeBestChainState(serializedData)
  1641  		if err != nil {
  1642  			return err
  1643  		}
  1644  
  1645  		log.Infof("Loading block index...")
  1646  		bidxStart := time.Now()
  1647  
  1648  		// Determine how many blocks will be loaded into the index in order to
  1649  		// allocate the right amount as a single alloc versus a whole bunch of
  1650  		// littles ones to reduce pressure on the GC.
  1651  		blockIndexBucket := meta.Bucket(dbnamespace.BlockIndexBucketName)
  1652  		var blockCount int32
  1653  		cursor := blockIndexBucket.Cursor()
  1654  		for ok := cursor.First(); ok; ok = cursor.Next() {
  1655  			blockCount++
  1656  		}
  1657  		blockNodes := make([]blockNode, blockCount)
  1658  
  1659  		// Load all of the block index entries and construct the block index
  1660  		// accordingly.
  1661  		//
  1662  		// NOTE: No locks are used on the block index here since this is
  1663  		// initialization code.
  1664  		var i int32
  1665  		var lastNode *blockNode
  1666  		cursor = blockIndexBucket.Cursor()
  1667  		for ok := cursor.First(); ok; ok = cursor.Next() {
  1668  			entry, err := deserializeBlockIndexEntry(cursor.Value())
  1669  			if err != nil {
  1670  				return err
  1671  			}
  1672  			header := &entry.header
  1673  
  1674  			// Determine the parent block node.  Since the block headers are
  1675  			// iterated in order of height, there is a very good chance the
  1676  			// previous header processed is the parent.
  1677  			var parent *blockNode
  1678  			if lastNode == nil {
  1679  				blockHash := header.BlockHash()
  1680  				if blockHash != *b.chainParams.GenesisHash {
  1681  					return AssertError(fmt.Sprintf("initChainState: expected "+
  1682  						"first entry in block index to be genesis block, "+
  1683  						"found %s", blockHash))
  1684  				}
  1685  			} else if header.PrevBlock == lastNode.hash {
  1686  				parent = lastNode
  1687  			} else {
  1688  				parent = b.index.lookupNode(&header.PrevBlock)
  1689  				if parent == nil {
  1690  					return AssertError(fmt.Sprintf("initChainState: could "+
  1691  						"not find parent for block %s", header.BlockHash()))
  1692  				}
  1693  			}
  1694  
  1695  			// Initialize the block node, connect it, and add it to the block
  1696  			// index.
  1697  			node := &blockNodes[i]
  1698  			initBlockNode(node, header, parent)
  1699  			node.status = entry.status
  1700  			node.ticketsVoted = entry.ticketsVoted
  1701  			node.ticketsRevoked = entry.ticketsRevoked
  1702  			node.votes = entry.voteInfo
  1703  			b.index.addNode(node)
  1704  
  1705  			lastNode = node
  1706  			i++
  1707  		}
  1708  
  1709  		// Set the best chain to the stored best state.
  1710  		tip := b.index.lookupNode(&state.hash)
  1711  		if tip == nil {
  1712  			return AssertError(fmt.Sprintf("initChainState: cannot find "+
  1713  				"chain tip %s in block index", state.hash))
  1714  		}
  1715  		b.bestChain.SetTip(tip)
  1716  
  1717  		log.Debugf("Block index loaded in %v", time.Since(bidxStart))
  1718  
  1719  		// Exception for version 1 blockchains: skip loading the stake
  1720  		// node, as the upgrade path handles ensuring this is correctly
  1721  		// set.
  1722  		if b.dbInfo.version >= 2 {
  1723  			tip.stakeNode, err = stake.LoadBestNode(dbTx, uint32(tip.height),
  1724  				tip.hash, tip.Header(), b.chainParams)
  1725  			if err != nil {
  1726  				return err
  1727  			}
  1728  			tip.newTickets = tip.stakeNode.NewTickets()
  1729  		}
  1730  
  1731  		// Load the best and parent blocks and cache them.
  1732  		utilBlock, err := dbFetchBlockByNode(dbTx, tip)
  1733  		if err != nil {
  1734  			return err
  1735  		}
  1736  		b.mainchainBlockCache[tip.hash] = utilBlock
  1737  		if tip.parent != nil {
  1738  			parentBlock, err := dbFetchBlockByNode(dbTx, tip.parent)
  1739  			if err != nil {
  1740  				return err
  1741  			}
  1742  			b.mainchainBlockCache[tip.parent.hash] = parentBlock
  1743  		}
  1744  
  1745  		// Initialize the state related to the best block.
  1746  		block := utilBlock.MsgBlock()
  1747  		blockSize := uint64(block.SerializeSize())
  1748  		numTxns := uint64(len(block.Transactions))
  1749  
  1750  		// Calculate the next stake difficulty.
  1751  		nextStakeDiff, err := b.calcNextRequiredStakeDifficulty(tip)
  1752  		if err != nil {
  1753  			return err
  1754  		}
  1755  
  1756  		b.stateSnapshot = newBestState(tip, blockSize, numTxns,
  1757  			state.totalTxns, tip.CalcPastMedianTime(),
  1758  			state.totalSubsidy, uint32(tip.stakeNode.PoolSize()),
  1759  			nextStakeDiff, tip.stakeNode.Winners(),
  1760  			tip.stakeNode.MissedTickets(), tip.stakeNode.FinalState())
  1761  
  1762  		return nil
  1763  	})
  1764  	return err
  1765  }
  1766  
  1767  // dbFetchBlockByNode uses an existing database transaction to retrieve the raw
  1768  // block for the provided node, deserialize it, and return a dcrutil.Block.
  1769  func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*dcrutil.Block, error) {
  1770  	// Load the raw block bytes from the database.
  1771  	blockBytes, err := dbTx.FetchBlock(&node.hash)
  1772  	if err != nil {
  1773  		return nil, err
  1774  	}
  1775  
  1776  	// Create the encapsulated block and set the height appropriately.
  1777  	block, err := dcrutil.NewBlockFromBytes(blockBytes)
  1778  	if err != nil {
  1779  		return nil, err
  1780  	}
  1781  
  1782  	return block, nil
  1783  }