github.com/deso-protocol/core@v1.2.9/lib/db_utils.go (about)

     1  package lib
     2  
     3  import (
     4  	"bytes"
     5  	"crypto/rand"
     6  	"encoding/binary"
     7  	"encoding/gob"
     8  	"encoding/hex"
     9  	"encoding/json"
    10  	"fmt"
    11  	"io"
    12  	"log"
    13  	"math"
    14  	"math/big"
    15  	"path/filepath"
    16  	"reflect"
    17  	"sort"
    18  	"strings"
    19  	"time"
    20  
    21  	"github.com/btcsuite/btcd/btcec"
    22  	"github.com/davecgh/go-spew/spew"
    23  	"github.com/dgraph-io/badger/v3"
    24  	"github.com/golang/glog"
    25  	"github.com/pkg/errors"
    26  )
    27  
    28  // This file contains all of the functions that interact with the database.
    29  
    30  const (
    31  	// badgerDbFolder is the subfolder in the config dir where we
    32  	// store the badgerdb database by default.
    33  	badgerDbFolder = "badgerdb"
    34  )
    35  
    36  var (
    37  	// The key prefixes for the key-value database. To store a particular
    38  	// type of data, we create a key prefix and store all those types of
    39  	// data with a key prefixed by that key prefix.
    40  	// Bitcoin does a similar thing that you can see at this link:
    41  	// https://bitcoin.stackexchange.com/questions/28168/what-are-the-keys-used-in-the-blockchain-leveldb-ie-what-are-the-keyvalue-pair
    42  
    43  	// The prefix for the block index:
    44  	// Key format: <hash BlockHash>
    45  	// Value format: serialized MsgDeSoBlock
    46  	_PrefixBlockHashToBlock = []byte{0}
    47  
    48  	// The prefix for the node index that we use to reconstruct the block tree.
    49  	// Storing the height in big-endian byte order allows us to read in all the
    50  	// blocks in height-sorted order from the db and construct the block tree by connecting
    51  	// nodes to their parents as we go.
    52  	//
    53  	// Key format: <height uint32 (big-endian), hash BlockHash>
    54  	// Value format: serialized BlockNode
    55  	_PrefixHeightHashToNodeInfo        = []byte{1}
    56  	_PrefixBitcoinHeightHashToNodeInfo = []byte{2}
    57  
    58  	// We store the hash of the node that is the current tip of the main chain.
    59  	// This key is used to look it up.
    60  	// Value format: BlockHash
    61  	_KeyBestDeSoBlockHash = []byte{3}
    62  
    63  	_KeyBestBitcoinHeaderHash = []byte{4}
    64  
    65  	// Utxo table.
    66  	// <txid BlockHash, output_index uint64> -> UtxoEntry
    67  	_PrefixUtxoKeyToUtxoEntry = []byte{5}
    68  	// <prefix, pubKey [33]byte, utxoKey< txid BlockHash, index uint32 >> -> <>
    69  	_PrefixPubKeyUtxoKey = []byte{7}
    70  	// The number of utxo entries in the database.
    71  	_KeyUtxoNumEntries = []byte{8}
    72  	// Utxo operations table.
    73  	// This table contains, for each blockhash on the main chain, the UtxoOperations
    74  	// that were applied by this block. To roll back the block, one must loop through
    75  	// the UtxoOperations for a particular block backwards and invert them.
    76  	//
    77  	// < hash *BlockHash > -> < serialized []UtxoOperation using gob encoding >
    78  	_PrefixBlockHashToUtxoOperations = []byte{9}
    79  
    80  	// The below are mappings related to the validation of BitcoinExchange transactions.
    81  	//
    82  	// The number of nanos that has been purchased thus far.
    83  	_KeyNanosPurchased = []byte{10}
    84  	// How much Bitcoin is work in USD cents.
    85  	_KeyUSDCentsPerBitcoinExchangeRate = []byte{27}
    86  	// <key> -> <GlobalParamsEntry gob serialized>
    87  	_KeyGlobalParams = []byte{40}
    88  
    89  	// The prefix for the Bitcoin TxID map. If a key is set for a TxID that means this
    90  	// particular TxID has been processed as part of a BitcoinExchange transaction. If
    91  	// no key is set for a TxID that means it has not been processed (and thus it can be
    92  	// used to create new nanos).
    93  	// <BitcoinTxID BlockHash> -> <nothing>
    94  	_PrefixBitcoinBurnTxIDs = []byte{11}
    95  
    96  	// Messages are indexed by the public key of their senders and receivers. If
    97  	// a message sends from pkFrom to pkTo then there will be two separate entries,
    98  	// one for pkFrom and one for pkTo. The exact format is as follows:
    99  	// <public key (33 bytes) || uint64 big-endian> -> < SenderPublicKey || RecipientPublicKey || EncryptedText >
   100  	_PrefixPublicKeyTimestampToPrivateMessage = []byte{12}
   101  
   102  	// Tracks the tip of the transaction index. This is used to determine
   103  	// which blocks need to be processed in order to update the index.
   104  	_KeyTransactionIndexTip = []byte{14}
   105  	// <prefix, transactionID BlockHash> -> <TransactionMetadata struct>
   106  	_PrefixTransactionIDToMetadata = []byte{15}
   107  	// <prefix, publicKey []byte, index uint32> -> <txid BlockHash>
   108  	_PrefixPublicKeyIndexToTransactionIDs = []byte{16}
   109  	// <prefx, publicKey []byte> -> <index uint32>
   110  	_PrefixPublicKeyToNextIndex = []byte{42}
   111  
   112  	// Main post index.
   113  	// <prefix, PostHash BlockHash> -> PostEntry
   114  	_PrefixPostHashToPostEntry = []byte{17}
   115  
   116  	// Post sorts
   117  	// <prefix, publicKey [33]byte, PostHash> -> <>
   118  	_PrefixPosterPublicKeyPostHash = []byte{18}
   119  
   120  	// <prefix, tstampNanos uint64, PostHash> -> <>
   121  	_PrefixTstampNanosPostHash = []byte{19}
   122  	// <prefix, creatorbps uint64, PostHash> -> <>
   123  	_PrefixCreatorBpsPostHash = []byte{20}
   124  	// <prefix, multiplebps uint64, PostHash> -> <>
   125  	_PrefixMultipleBpsPostHash = []byte{21}
   126  
   127  	// Comments are just posts that have their ParentStakeID set, and
   128  	// so we have a separate index that allows us to return all the
   129  	// comments for a given StakeID
   130  	// <prefix, parent stakeID [33]byte, tstampnanos uint64, post hash> -> <>
   131  	_PrefixCommentParentStakeIDToPostHash = []byte{22}
   132  
   133  	// Main profile index
   134  	// <prefix, PKID [33]byte> -> ProfileEntry
   135  	_PrefixPKIDToProfileEntry = []byte{23}
   136  
   137  	// Profile sorts
   138  	// For username, we set the PKID as a value since the username is not fixed width.
   139  	// We always lowercase usernames when using them as map keys in order to make
   140  	// all uniqueness checks case-insensitive
   141  	// <prefix, username> -> <PKID>
   142  	_PrefixProfileUsernameToPKID = []byte{25}
   143  	// This allows us to sort the profiles by the value of their coin (since
   144  	// the amount of DeSo locked in a profile is proportional to coin price).
   145  	_PrefixCreatorDeSoLockedNanosCreatorPKID = []byte{32}
   146  
   147  	// The StakeID is a post hash for posts and a public key for users.
   148  	// <StakeIDType | AmountNanos uint64 | StakeID [var]byte> -> <>
   149  	_PrefixStakeIDTypeAmountStakeIDIndex = []byte{26}
   150  
   151  	// Prefixes for follows:
   152  	// <prefix, follower PKID [33]byte, followed PKID [33]byte> -> <>
   153  	// <prefix, followed PKID [33]byte, follower PKID [33]byte> -> <>
   154  	_PrefixFollowerPKIDToFollowedPKID = []byte{28}
   155  	_PrefixFollowedPKIDToFollowerPKID = []byte{29}
   156  
   157  	// Prefixes for likes:
   158  	// <prefix, user pub key [33]byte, liked post hash [32]byte> -> <>
   159  	// <prefix, post hash [32]byte, user pub key [33]byte> -> <>
   160  	_PrefixLikerPubKeyToLikedPostHash = []byte{30}
   161  	_PrefixLikedPostHashToLikerPubKey = []byte{31}
   162  
   163  	// Prefixes for creator coin fields:
   164  	// <prefix, HODLer PKID [33]byte, creator PKID [33]byte> -> <BalanceEntry>
   165  	// <prefix, creator PKID [33]byte, HODLer PKID [33]byte> -> <BalanceEntry>
   166  	_PrefixHODLerPKIDCreatorPKIDToBalanceEntry = []byte{33}
   167  	_PrefixCreatorPKIDHODLerPKIDToBalanceEntry = []byte{34}
   168  
   169  	_PrefixPosterPublicKeyTimestampPostHash = []byte{35}
   170  
   171  	// If no mapping exists for a particular public key, then the PKID is simply
   172  	// the public key itself.
   173  	// <[33]byte> -> <PKID [33]byte>
   174  	_PrefixPublicKeyToPKID = []byte{36}
   175  	// <PKID [33]byte> -> <PublicKey [33]byte>
   176  	_PrefixPKIDToPublicKey = []byte{37}
   177  
   178  	// Prefix for storing mempool transactions in badger. These stored transactions are
   179  	// used to restore the state of a node after it is shutdown.
   180  	// <prefix, tx hash BlockHash> -> <*MsgDeSoTxn>
   181  	_PrefixMempoolTxnHashToMsgDeSoTxn = []byte{38}
   182  
   183  	// Prefixes for Reposts:
   184  	// <prefix, user pub key [39]byte, reposted post hash [39]byte> -> RepostEntry
   185  	_PrefixReposterPubKeyRepostedPostHashToRepostPostHash = []byte{39}
   186  
   187  	// Prefixes for diamonds:
   188  	//  <prefix, DiamondReceiverPKID [33]byte, DiamondSenderPKID [33]byte, posthash> -> <gob-encoded DiamondEntry>
   189  	//  <prefix, DiamondSenderPKID [33]byte, DiamondReceiverPKID [33]byte, posthash> -> <gob-encoded DiamondEntry>
   190  	_PrefixDiamondReceiverPKIDDiamondSenderPKIDPostHash = []byte{41}
   191  	_PrefixDiamondSenderPKIDDiamondReceiverPKIDPostHash = []byte{43}
   192  
   193  	// Public keys that have been restricted from signing blocks.
   194  	// <prefix, ForbiddenPublicKey [33]byte> -> <>
   195  	_PrefixForbiddenBlockSignaturePubKeys = []byte{44}
   196  
   197  	// These indexes are used in order to fetch the pub keys of users that liked or diamonded a post.
   198  	// 		Reposts: <prefix, RepostedPostHash, ReposterPubKey> -> <>
   199  	// 		Quote Reposts: <prefix, RepostedPostHash, ReposterPubKey, RepostPostHash> -> <>
   200  	// 		Diamonds: <prefix, DiamondedPostHash, DiamonderPubKey [33]byte> -> <DiamondLevel (uint64)>
   201  	_PrefixRepostedPostHashReposterPubKey               = []byte{45}
   202  	_PrefixRepostedPostHashReposterPubKeyRepostPostHash = []byte{46}
   203  	_PrefixDiamondedPostHashDiamonderPKIDDiamondLevel   = []byte{47}
   204  
   205  	// Prefixes for NFT ownership:
   206  	// 	<prefix, NFTPostHash [32]byte, SerialNumber uint64> -> NFTEntry
   207  	_PrefixPostHashSerialNumberToNFTEntry = []byte{48}
   208  	//  <prefix, PKID [33]byte, IsForSale bool, BidAmountNanos uint64, NFTPostHash[32]byte, SerialNumber uint64> -> NFTEntry
   209  	_PrefixPKIDIsForSaleBidAmountNanosPostHashSerialNumberToNFTEntry = []byte{49}
   210  
   211  	// Prefixes for NFT bids:
   212  	//  <prefix, NFTPostHash [32]byte, SerialNumber uint64, BidNanos uint64, PKID [33]byte> -> <>
   213  	_PrefixPostHashSerialNumberBidNanosBidderPKID = []byte{50}
   214  	//  <BidderPKID [33]byte, NFTPostHash [32]byte, SerialNumber uint64> -> <BidNanos uint64>
   215  	_PrefixBidderPKIDPostHashSerialNumberToBidNanos = []byte{51}
   216  
   217  	// Prefix for NFT accepted bid entries:
   218  	//   - Note: this index uses a slice to track the history of winning bids for an NFT. It is
   219  	//     not core to consensus and should not be relied upon as it could get inefficient.
   220  	//   - Schema: <prefix>, NFTPostHash [32]byte, SerialNumber uint64 -> []NFTBidEntry
   221  	_PrefixPostHashSerialNumberToAcceptedBidEntries = []byte{54}
   222  
   223  	// <prefix, PublicKey [33]byte> -> uint64
   224  	_PrefixPublicKeyToDeSoBalanceNanos = []byte{52}
   225  	// Block reward prefix:
   226  	//   - This index is needed because block rewards take N blocks to mature, which means we need
   227  	//     a way to deduct them from balance calculations until that point. Without this index, it
   228  	//     would be impossible to figure out which of a user's UTXOs have yet to mature.
   229  	//   - Schema: <hash BlockHash> -> <pubKey [33]byte, uint64 blockRewardNanos>
   230  	_PrefixPublicKeyBlockHashToBlockReward = []byte{53}
   231  
   232  	// Prefix for Authorize Derived Key transactions:
   233  	// 		<prefix, OwnerPublicKey [33]byte> -> <>
   234  	_PrefixAuthorizeDerivedKey = []byte{54}
   235  
   236  	// TODO: This process is a bit error-prone. We should come up with a test or
   237  	// something to at least catch cases where people have two prefixes with the
   238  	// same ID.
   239  	// NEXT_TAG: 55
   240  )
   241  
   242  func DBGetPKIDEntryForPublicKeyWithTxn(txn *badger.Txn, publicKey []byte) *PKIDEntry {
   243  	if len(publicKey) == 0 {
   244  		return nil
   245  	}
   246  
   247  	prefix := append([]byte{}, _PrefixPublicKeyToPKID...)
   248  	pkidItem, err := txn.Get(append(prefix, publicKey...))
   249  
   250  	if err != nil {
   251  		// If we don't have a mapping from public key to PKID in the db,
   252  		// then we use the public key itself as the PKID. Doing this makes
   253  		// it so that the PKID is generally the *first* public key that the
   254  		// user ever associated with a particular piece of data.
   255  		return &PKIDEntry{
   256  			PKID:      PublicKeyToPKID(publicKey),
   257  			PublicKey: publicKey,
   258  		}
   259  	}
   260  
   261  	// If we get here then it means we actually had a PKID in the DB.
   262  	// So return that pkid.
   263  	pkidEntryObj := &PKIDEntry{}
   264  	err = pkidItem.Value(func(valBytes []byte) error {
   265  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(pkidEntryObj)
   266  	})
   267  	if err != nil {
   268  		glog.Errorf("DBGetPKIDEntryForPublicKeyWithTxn: Problem reading "+
   269  			"PKIDEntry for public key %s",
   270  			PkToStringMainnet(publicKey))
   271  		return nil
   272  	}
   273  	return pkidEntryObj
   274  }
   275  
   276  func DBGetPKIDEntryForPublicKey(db *badger.DB, publicKey []byte) *PKIDEntry {
   277  	var pkid *PKIDEntry
   278  	db.View(func(txn *badger.Txn) error {
   279  		pkid = DBGetPKIDEntryForPublicKeyWithTxn(txn, publicKey)
   280  		return nil
   281  	})
   282  	return pkid
   283  }
   284  
   285  func DBGetPublicKeyForPKIDWithTxn(txn *badger.Txn, pkidd *PKID) []byte {
   286  	prefix := append([]byte{}, _PrefixPKIDToPublicKey...)
   287  	pkidItem, err := txn.Get(append(prefix, pkidd[:]...))
   288  
   289  	if err != nil {
   290  		// If we don't have a mapping in the db then return the pkid itself
   291  		// as the public key.
   292  		return pkidd[:]
   293  	}
   294  
   295  	// If we get here then it means we actually had a public key mapping in the DB.
   296  	// So return that public key.
   297  	pkRet, err := pkidItem.ValueCopy(nil)
   298  	if err != nil {
   299  		// If we had a problem reading the mapping then log an error and return nil.
   300  		glog.Errorf("DBGetPublicKeyForPKIDWithTxn: Problem reading "+
   301  			"public key for pkid %s",
   302  			PkToStringMainnet(pkidd[:]))
   303  		return nil
   304  	}
   305  
   306  	return pkRet
   307  }
   308  
   309  func DBGetPublicKeyForPKID(db *badger.DB, pkidd *PKID) []byte {
   310  	var publicKey []byte
   311  	db.View(func(txn *badger.Txn) error {
   312  		publicKey = DBGetPublicKeyForPKIDWithTxn(txn, pkidd)
   313  		return nil
   314  	})
   315  	return publicKey
   316  }
   317  
   318  func DBPutPKIDMappingsWithTxn(
   319  	txn *badger.Txn, publicKey []byte, pkidEntry *PKIDEntry, params *DeSoParams) error {
   320  
   321  	// Set the main pub key -> pkid mapping.
   322  	{
   323  		pkidDataBuf := bytes.NewBuffer([]byte{})
   324  		gob.NewEncoder(pkidDataBuf).Encode(pkidEntry)
   325  
   326  		prefix := append([]byte{}, _PrefixPublicKeyToPKID...)
   327  		pubKeyToPkidKey := append(prefix, publicKey...)
   328  		if err := txn.Set(pubKeyToPkidKey, pkidDataBuf.Bytes()); err != nil {
   329  
   330  			return errors.Wrapf(err, "DBPutPKIDMappingsWithTxn: Problem "+
   331  				"adding mapping for pkid: %v public key: %v",
   332  				PkToString(pkidEntry.PKID[:], params), PkToString(publicKey, params))
   333  		}
   334  	}
   335  
   336  	// Set the reverse mapping: pkid -> pub key
   337  	{
   338  		prefix := append([]byte{}, _PrefixPKIDToPublicKey...)
   339  		pkidToPubKey := append(prefix, pkidEntry.PKID[:]...)
   340  		if err := txn.Set(pkidToPubKey, publicKey); err != nil {
   341  
   342  			return errors.Wrapf(err, "DBPutPKIDMappingsWithTxn: Problem "+
   343  				"adding mapping for pkid: %v public key: %v",
   344  				PkToString(pkidEntry.PKID[:], params), PkToString(publicKey, params))
   345  		}
   346  	}
   347  
   348  	return nil
   349  }
   350  
   351  func DBDeletePKIDMappingsWithTxn(
   352  	txn *badger.Txn, publicKey []byte, params *DeSoParams) error {
   353  
   354  	// Look up the pkid for the public key.
   355  	pkidEntry := DBGetPKIDEntryForPublicKeyWithTxn(txn, publicKey)
   356  
   357  	{
   358  		prefix := append([]byte{}, _PrefixPublicKeyToPKID...)
   359  		pubKeyToPkidKey := append(prefix, publicKey...)
   360  		if err := txn.Delete(pubKeyToPkidKey); err != nil {
   361  
   362  			return errors.Wrapf(err, "DBDeletePKIDMappingsWithTxn: Problem "+
   363  				"deleting mapping for public key: %v",
   364  				PkToString(publicKey, params))
   365  		}
   366  	}
   367  
   368  	{
   369  		prefix := append([]byte{}, _PrefixPKIDToPublicKey...)
   370  		pubKeyToPkidKey := append(prefix, pkidEntry.PKID[:]...)
   371  		if err := txn.Delete(pubKeyToPkidKey); err != nil {
   372  
   373  			return errors.Wrapf(err, "DBDeletePKIDMappingsWithTxn: Problem "+
   374  				"deleting mapping for pkid: %v",
   375  				PkToString(pkidEntry.PKID[:], params))
   376  		}
   377  	}
   378  
   379  	return nil
   380  }
   381  
   382  func EnumerateKeysForPrefix(db *badger.DB, dbPrefix []byte) (_keysFound [][]byte, _valsFound [][]byte) {
   383  	return _enumerateKeysForPrefix(db, dbPrefix)
   384  }
   385  
   386  // A helper function to enumerate all of the values for a particular prefix.
   387  func _enumerateKeysForPrefix(db *badger.DB, dbPrefix []byte) (_keysFound [][]byte, _valsFound [][]byte) {
   388  	keysFound := [][]byte{}
   389  	valsFound := [][]byte{}
   390  
   391  	dbErr := db.View(func(txn *badger.Txn) error {
   392  		var err error
   393  		keysFound, valsFound, err = _enumerateKeysForPrefixWithTxn(txn, dbPrefix)
   394  		if err != nil {
   395  			return err
   396  		}
   397  		return nil
   398  	})
   399  	if dbErr != nil {
   400  		glog.Errorf("_enumerateKeysForPrefix: Problem fetching keys and values from db: %v", dbErr)
   401  		return nil, nil
   402  	}
   403  
   404  	return keysFound, valsFound
   405  }
   406  
   407  func _enumerateKeysForPrefixWithTxn(dbTxn *badger.Txn, dbPrefix []byte) (_keysFound [][]byte, _valsFound [][]byte, _err error) {
   408  	keysFound := [][]byte{}
   409  	valsFound := [][]byte{}
   410  
   411  	opts := badger.DefaultIteratorOptions
   412  	nodeIterator := dbTxn.NewIterator(opts)
   413  	defer nodeIterator.Close()
   414  	prefix := dbPrefix
   415  	for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() {
   416  		key := nodeIterator.Item().Key()
   417  		keyCopy := make([]byte, len(key))
   418  		copy(keyCopy[:], key[:])
   419  
   420  		valCopy, err := nodeIterator.Item().ValueCopy(nil)
   421  		if err != nil {
   422  			return nil, nil, err
   423  		}
   424  		keysFound = append(keysFound, keyCopy)
   425  		valsFound = append(valsFound, valCopy)
   426  	}
   427  	return keysFound, valsFound, nil
   428  }
   429  
   430  // A helper function to enumerate a limited number of the values for a particular prefix.
   431  func _enumerateLimitedKeysReversedForPrefix(db *badger.DB, dbPrefix []byte, limit uint64) (_keysFound [][]byte, _valsFound [][]byte) {
   432  	keysFound := [][]byte{}
   433  	valsFound := [][]byte{}
   434  
   435  	dbErr := db.View(func(txn *badger.Txn) error {
   436  		var err error
   437  		keysFound, valsFound, err = _enumerateLimitedKeysReversedForPrefixWithTxn(txn, dbPrefix, limit)
   438  		return err
   439  	})
   440  	if dbErr != nil {
   441  		glog.Errorf("_enumerateKeysForPrefix: Problem fetching keys and values from db: %v", dbErr)
   442  		return nil, nil
   443  	}
   444  
   445  	return keysFound, valsFound
   446  }
   447  
   448  func _enumerateLimitedKeysReversedForPrefixWithTxn(dbTxn *badger.Txn, dbPrefix []byte, limit uint64) (_keysFound [][]byte, _valsFound [][]byte, _err error) {
   449  	keysFound := [][]byte{}
   450  	valsFound := [][]byte{}
   451  
   452  	opts := badger.DefaultIteratorOptions
   453  
   454  	// Go in reverse order
   455  	opts.Reverse = true
   456  
   457  	nodeIterator := dbTxn.NewIterator(opts)
   458  	defer nodeIterator.Close()
   459  	prefix := dbPrefix
   460  
   461  	counter := uint64(0)
   462  	for nodeIterator.Seek(append(prefix, 0xff)); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() {
   463  		if counter == limit {
   464  			break
   465  		}
   466  		counter++
   467  
   468  		key := nodeIterator.Item().Key()
   469  		keyCopy := make([]byte, len(key))
   470  		copy(keyCopy[:], key[:])
   471  
   472  		valCopy, err := nodeIterator.Item().ValueCopy(nil)
   473  		if err != nil {
   474  			return nil, nil, err
   475  		}
   476  		keysFound = append(keysFound, keyCopy)
   477  		valsFound = append(valsFound, valCopy)
   478  	}
   479  	return keysFound, valsFound, nil
   480  }
   481  
   482  // -------------------------------------------------------------------------------------
   483  // DeSo balance mapping functions
   484  // -------------------------------------------------------------------------------------
   485  
   486  func _dbKeyForPublicKeyToDeSoBalanceNanos(publicKey []byte) []byte {
   487  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   488  	prefixCopy := append([]byte{}, _PrefixPublicKeyToDeSoBalanceNanos...)
   489  	key := append(prefixCopy, publicKey...)
   490  	return key
   491  }
   492  
   493  func DbGetPrefixForPublicKeyToDesoBalanceNanos() []byte {
   494  	return append([]byte{}, _PrefixPublicKeyToDeSoBalanceNanos...)
   495  }
   496  
   497  func DbGetDeSoBalanceNanosForPublicKeyWithTxn(txn *badger.Txn, publicKey []byte,
   498  ) (_balance uint64, _err error) {
   499  
   500  	key := _dbKeyForPublicKeyToDeSoBalanceNanos(publicKey)
   501  	desoBalanceItem, err := txn.Get(key)
   502  	if err != nil {
   503  		return uint64(0), nil
   504  	}
   505  	desoBalanceBytes, err := desoBalanceItem.ValueCopy(nil)
   506  	if err != nil {
   507  		return uint64(0), errors.Wrapf(
   508  			err, "DbGetDeSoBalanceNanosForPublicKeyWithTxn: Problem getting balance for: %s ",
   509  			PkToStringBoth(publicKey))
   510  	}
   511  
   512  	desoBalance := DecodeUint64(desoBalanceBytes)
   513  
   514  	return desoBalance, nil
   515  }
   516  
   517  func DbGetDeSoBalanceNanosForPublicKey(db *badger.DB, publicKey []byte,
   518  ) (_balance uint64, _err error) {
   519  	ret := uint64(0)
   520  	dbErr := db.View(func(txn *badger.Txn) error {
   521  		var err error
   522  		ret, err = DbGetDeSoBalanceNanosForPublicKeyWithTxn(txn, publicKey)
   523  		if err != nil {
   524  			return fmt.Errorf("DbGetDeSoBalanceNanosForPublicKey: %v", err)
   525  		}
   526  		return nil
   527  	})
   528  	if dbErr != nil {
   529  		return ret, dbErr
   530  	}
   531  	return ret, nil
   532  }
   533  
   534  func DbPutDeSoBalanceForPublicKeyWithTxn(
   535  	txn *badger.Txn, publicKey []byte, balanceNanos uint64) error {
   536  
   537  	if len(publicKey) != btcec.PubKeyBytesLenCompressed {
   538  		return fmt.Errorf("DbPutDeSoBalanceForPublicKeyWithTxn: Public key "+
   539  			"length %d != %d", len(publicKey), btcec.PubKeyBytesLenCompressed)
   540  	}
   541  
   542  	balanceBytes := EncodeUint64(balanceNanos)
   543  
   544  	if err := txn.Set(_dbKeyForPublicKeyToDeSoBalanceNanos(publicKey), balanceBytes); err != nil {
   545  
   546  		return errors.Wrapf(
   547  			err, "DbPutDeSoBalanceForPublicKey: Problem adding balance mapping of %d for: %s ",
   548  			balanceNanos, PkToStringBoth(publicKey))
   549  	}
   550  
   551  	return nil
   552  }
   553  
   554  func DbPutDeSoBalanceForPublicKey(handle *badger.DB, publicKey []byte, balanceNanos uint64) error {
   555  
   556  	return handle.Update(func(txn *badger.Txn) error {
   557  		return DbPutDeSoBalanceForPublicKeyWithTxn(txn, publicKey, balanceNanos)
   558  	})
   559  }
   560  
   561  func DbDeletePublicKeyToDeSoBalanceWithTxn(txn *badger.Txn, publicKey []byte) error {
   562  
   563  	if err := txn.Delete(_dbKeyForPublicKeyToDeSoBalanceNanos(publicKey)); err != nil {
   564  		return errors.Wrapf(err, "DbDeletePublicKeyToDeSoBalanceWithTxn: Problem deleting "+
   565  			"balance for public key %s", PkToStringMainnet(publicKey))
   566  	}
   567  
   568  	return nil
   569  }
   570  
   571  func DbDeletePublicKeyToDeSoBalance(handle *badger.DB, publicKey []byte) error {
   572  	return handle.Update(func(txn *badger.Txn) error {
   573  		return DbDeletePublicKeyToDeSoBalanceWithTxn(txn, publicKey)
   574  	})
   575  }
   576  
   577  // -------------------------------------------------------------------------------------
   578  // PrivateMessage mapping functions
   579  // <public key (33 bytes) || uint64 big-endian> ->
   580  // 		< SenderPublicKey || RecipientPublicKey || EncryptedText >
   581  // -------------------------------------------------------------------------------------
   582  
   583  func _dbKeyForMessageEntry(publicKey []byte, tstampNanos uint64) []byte {
   584  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   585  	prefixCopy := append([]byte{}, _PrefixPublicKeyTimestampToPrivateMessage...)
   586  	key := append(prefixCopy, publicKey...)
   587  	key = append(key, EncodeUint64(tstampNanos)...)
   588  	return key
   589  }
   590  
   591  func _dbSeekPrefixForMessagePublicKey(publicKey []byte) []byte {
   592  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   593  	prefixCopy := append([]byte{}, _PrefixPublicKeyTimestampToPrivateMessage...)
   594  	return append(prefixCopy, publicKey...)
   595  }
   596  
   597  // Note that this adds a mapping for the sender *and* the recipient.
   598  func DbPutMessageEntryWithTxn(
   599  	txn *badger.Txn, messageEntry *MessageEntry) error {
   600  
   601  	if len(messageEntry.SenderPublicKey) != btcec.PubKeyBytesLenCompressed {
   602  		return fmt.Errorf("DbPutPrivateMessageWithTxn: Sender public key "+
   603  			"length %d != %d", len(messageEntry.SenderPublicKey), btcec.PubKeyBytesLenCompressed)
   604  	}
   605  	if len(messageEntry.RecipientPublicKey) != btcec.PubKeyBytesLenCompressed {
   606  		return fmt.Errorf("DbPutPrivateMessageWithTxn: Recipient public key "+
   607  			"length %d != %d", len(messageEntry.RecipientPublicKey), btcec.PubKeyBytesLenCompressed)
   608  	}
   609  	messageData := &MessageEntry{
   610  		SenderPublicKey:    messageEntry.SenderPublicKey,
   611  		RecipientPublicKey: messageEntry.RecipientPublicKey,
   612  		EncryptedText:      messageEntry.EncryptedText,
   613  		TstampNanos:        messageEntry.TstampNanos,
   614  		Version:            messageEntry.Version,
   615  	}
   616  
   617  	messageDataBuf := bytes.NewBuffer([]byte{})
   618  	gob.NewEncoder(messageDataBuf).Encode(messageData)
   619  
   620  	if err := txn.Set(_dbKeyForMessageEntry(
   621  		messageEntry.SenderPublicKey, messageEntry.TstampNanos), messageDataBuf.Bytes()); err != nil {
   622  
   623  		return errors.Wrapf(err, "DbPutMessageEntryWithTxn: Problem adding mapping for sender: ")
   624  	}
   625  	if err := txn.Set(_dbKeyForMessageEntry(
   626  		messageEntry.RecipientPublicKey, messageEntry.TstampNanos), messageDataBuf.Bytes()); err != nil {
   627  
   628  		return errors.Wrapf(err, "DbPutMessageEntryWithTxn: Problem adding mapping for recipient: ")
   629  	}
   630  
   631  	return nil
   632  }
   633  
   634  func DbPutMessageEntry(handle *badger.DB, messageEntry *MessageEntry) error {
   635  
   636  	return handle.Update(func(txn *badger.Txn) error {
   637  		return DbPutMessageEntryWithTxn(txn, messageEntry)
   638  	})
   639  }
   640  
   641  func DbGetMessageEntryWithTxn(
   642  	txn *badger.Txn, publicKey []byte, tstampNanos uint64) *MessageEntry {
   643  
   644  	key := _dbKeyForMessageEntry(publicKey, tstampNanos)
   645  	privateMessageObj := &MessageEntry{}
   646  	privateMessageItem, err := txn.Get(key)
   647  	if err != nil {
   648  		return nil
   649  	}
   650  	err = privateMessageItem.Value(func(valBytes []byte) error {
   651  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(privateMessageObj)
   652  	})
   653  	if err != nil {
   654  		glog.Errorf("DbGetMessageEntryWithTxn: Problem reading "+
   655  			"MessageEntry for public key %s with tstampnanos %d",
   656  			PkToStringMainnet(publicKey), tstampNanos)
   657  		return nil
   658  	}
   659  	return privateMessageObj
   660  }
   661  
   662  func DbGetMessageEntry(db *badger.DB, publicKey []byte, tstampNanos uint64) *MessageEntry {
   663  	var ret *MessageEntry
   664  	db.View(func(txn *badger.Txn) error {
   665  		ret = DbGetMessageEntryWithTxn(txn, publicKey, tstampNanos)
   666  		return nil
   667  	})
   668  	return ret
   669  }
   670  
   671  // Note this deletes the message for the sender *and* receiver since a mapping
   672  // should exist for each.
   673  func DbDeleteMessageEntryMappingsWithTxn(
   674  	txn *badger.Txn, publicKey []byte, tstampNanos uint64) error {
   675  
   676  	// First pull up the mapping that exists for the public key passed in.
   677  	// If one doesn't exist then there's nothing to do.
   678  	existingMessage := DbGetMessageEntryWithTxn(txn, publicKey, tstampNanos)
   679  	if existingMessage == nil {
   680  		return nil
   681  	}
   682  
   683  	// When a message exists, delete the mapping for the sender and receiver.
   684  	if err := txn.Delete(_dbKeyForMessageEntry(existingMessage.SenderPublicKey, tstampNanos)); err != nil {
   685  		return errors.Wrapf(err, "DbDeleteMessageEntryMappingsWithTxn: Deleting "+
   686  			"sender mapping for public key %s and tstamp %d failed",
   687  			PkToStringMainnet(existingMessage.SenderPublicKey), tstampNanos)
   688  	}
   689  	if err := txn.Delete(_dbKeyForMessageEntry(existingMessage.RecipientPublicKey, tstampNanos)); err != nil {
   690  		return errors.Wrapf(err, "DbDeleteMessageEntryMappingsWithTxn: Deleting "+
   691  			"recipient mapping for public key %s and tstamp %d failed",
   692  			PkToStringMainnet(existingMessage.RecipientPublicKey), tstampNanos)
   693  	}
   694  
   695  	return nil
   696  }
   697  
   698  func DbDeleteMessageEntryMappings(handle *badger.DB, publicKey []byte, tstampNanos uint64) error {
   699  	return handle.Update(func(txn *badger.Txn) error {
   700  		return DbDeleteMessageEntryMappingsWithTxn(txn, publicKey, tstampNanos)
   701  	})
   702  }
   703  
   704  func DbGetMessageEntriesForPublicKey(handle *badger.DB, publicKey []byte) (
   705  	_privateMessages []*MessageEntry, _err error) {
   706  
   707  	// Setting the prefix to a tstamp of zero should return all the messages
   708  	// for the public key in sorted order since 0 << the minimum timestamp in
   709  	// the db.
   710  	prefix := _dbSeekPrefixForMessagePublicKey(publicKey)
   711  
   712  	// Goes backwards to get messages in time sorted order.
   713  	// Limit the number of keys to speed up load times.
   714  	_, valuesFound := _enumerateKeysForPrefix(handle, prefix)
   715  
   716  	privateMessages := []*MessageEntry{}
   717  	for _, valBytes := range valuesFound {
   718  		privateMessageObj := &MessageEntry{}
   719  		if err := gob.NewDecoder(bytes.NewReader(valBytes)).Decode(privateMessageObj); err != nil {
   720  			return nil, errors.Wrapf(
   721  				err, "DbGetMessageEntriesForPublicKey: Problem decoding value: ")
   722  		}
   723  
   724  		privateMessages = append(privateMessages, privateMessageObj)
   725  	}
   726  
   727  	return privateMessages, nil
   728  }
   729  
   730  func DbGetLimitedMessageEntriesForPublicKey(handle *badger.DB, publicKey []byte) (
   731  	_privateMessages []*MessageEntry, _err error) {
   732  
   733  	// Setting the prefix to a tstamp of zero should return all the messages
   734  	// for the public key in sorted order since 0 << the minimum timestamp in
   735  	// the db.
   736  	prefix := _dbSeekPrefixForMessagePublicKey(publicKey)
   737  
   738  	// Goes backwards to get messages in time sorted order.
   739  	// Limit the number of keys to speed up load times.
   740  	_, valuesFound := _enumerateLimitedKeysReversedForPrefix(handle, prefix, uint64(MessagesToFetchPerInboxCall))
   741  
   742  	privateMessages := []*MessageEntry{}
   743  	for _, valBytes := range valuesFound {
   744  		privateMessageObj := &MessageEntry{}
   745  		if err := gob.NewDecoder(bytes.NewReader(valBytes)).Decode(privateMessageObj); err != nil {
   746  			return nil, errors.Wrapf(
   747  				err, "DbGetMessageEntriesForPublicKey: Problem decoding value: ")
   748  		}
   749  
   750  		privateMessages = append(privateMessages, privateMessageObj)
   751  	}
   752  
   753  	return privateMessages, nil
   754  }
   755  
   756  // -------------------------------------------------------------------------------------
   757  // Forbidden block signature public key functions
   758  // <prefix, public key> -> <>
   759  // -------------------------------------------------------------------------------------
   760  
   761  func _dbKeyForForbiddenBlockSignaturePubKeys(publicKey []byte) []byte {
   762  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   763  	prefixCopy := append([]byte{}, _PrefixForbiddenBlockSignaturePubKeys...)
   764  	key := append(prefixCopy, publicKey...)
   765  	return key
   766  }
   767  
   768  func DbPutForbiddenBlockSignaturePubKeyWithTxn(txn *badger.Txn, publicKey []byte) error {
   769  
   770  	if len(publicKey) != btcec.PubKeyBytesLenCompressed {
   771  		return fmt.Errorf("DbPutForbiddenBlockSignaturePubKeyWithTxn: Forbidden public key "+
   772  			"length %d != %d", len(publicKey), btcec.PubKeyBytesLenCompressed)
   773  	}
   774  
   775  	if err := txn.Set(_dbKeyForForbiddenBlockSignaturePubKeys(publicKey), []byte{}); err != nil {
   776  		return errors.Wrapf(err, "DbPutForbiddenBlockSignaturePubKeyWithTxn: Problem adding mapping for sender: ")
   777  	}
   778  
   779  	return nil
   780  }
   781  
   782  func DbPutForbiddenBlockSignaturePubKey(handle *badger.DB, publicKey []byte) error {
   783  
   784  	return handle.Update(func(txn *badger.Txn) error {
   785  		return DbPutForbiddenBlockSignaturePubKeyWithTxn(txn, publicKey)
   786  	})
   787  }
   788  
   789  func DbGetForbiddenBlockSignaturePubKeyWithTxn(txn *badger.Txn, publicKey []byte) []byte {
   790  
   791  	key := _dbKeyForForbiddenBlockSignaturePubKeys(publicKey)
   792  	_, err := txn.Get(key)
   793  	if err != nil {
   794  		return nil
   795  	}
   796  
   797  	// Typically we return a DB entry here but we don't store anything for this mapping.
   798  	// We use this function instead of one returning true / false for feature consistency.
   799  	return []byte{}
   800  }
   801  
   802  func DbGetForbiddenBlockSignaturePubKey(db *badger.DB, publicKey []byte) []byte {
   803  	var ret []byte
   804  	db.View(func(txn *badger.Txn) error {
   805  		ret = DbGetForbiddenBlockSignaturePubKeyWithTxn(txn, publicKey)
   806  		return nil
   807  	})
   808  	return ret
   809  }
   810  
   811  func DbDeleteForbiddenBlockSignaturePubKeyWithTxn(txn *badger.Txn, publicKey []byte) error {
   812  
   813  	existingEntry := DbGetForbiddenBlockSignaturePubKeyWithTxn(txn, publicKey)
   814  	if existingEntry == nil {
   815  		return nil
   816  	}
   817  
   818  	if err := txn.Delete(_dbKeyForForbiddenBlockSignaturePubKeys(publicKey)); err != nil {
   819  		return errors.Wrapf(err, "DbDeleteForbiddenBlockSignaturePubKeyWithTxn: Deleting "+
   820  			"sender mapping for public key %s failed", PkToStringMainnet(publicKey))
   821  	}
   822  
   823  	return nil
   824  }
   825  
   826  func DbDeleteForbiddenBlockSignaturePubKey(handle *badger.DB, publicKey []byte) error {
   827  	return handle.Update(func(txn *badger.Txn) error {
   828  		return DbDeleteForbiddenBlockSignaturePubKeyWithTxn(txn, publicKey)
   829  	})
   830  }
   831  
   832  // -------------------------------------------------------------------------------------
   833  // Likes mapping functions
   834  // 		<prefix, user pub key [33]byte, liked post BlockHash> -> <>
   835  // 		<prefix, liked post BlockHash, user pub key [33]byte> -> <>
   836  // -------------------------------------------------------------------------------------
   837  
   838  func _dbKeyForLikerPubKeyToLikedPostHashMapping(
   839  	userPubKey []byte, likedPostHash BlockHash) []byte {
   840  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   841  	prefixCopy := append([]byte{}, _PrefixLikerPubKeyToLikedPostHash...)
   842  	key := append(prefixCopy, userPubKey...)
   843  	key = append(key, likedPostHash[:]...)
   844  	return key
   845  }
   846  
   847  func _dbKeyForLikedPostHashToLikerPubKeyMapping(
   848  	likedPostHash BlockHash, userPubKey []byte) []byte {
   849  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   850  	prefixCopy := append([]byte{}, _PrefixLikedPostHashToLikerPubKey...)
   851  	key := append(prefixCopy, likedPostHash[:]...)
   852  	key = append(key, userPubKey...)
   853  	return key
   854  }
   855  
   856  func _dbSeekPrefixForPostHashesYouLike(yourPubKey []byte) []byte {
   857  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   858  	prefixCopy := append([]byte{}, _PrefixLikerPubKeyToLikedPostHash...)
   859  	return append(prefixCopy, yourPubKey...)
   860  }
   861  
   862  func _dbSeekPrefixForLikerPubKeysLikingAPostHash(likedPostHash BlockHash) []byte {
   863  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   864  	prefixCopy := append([]byte{}, _PrefixLikedPostHashToLikerPubKey...)
   865  	return append(prefixCopy, likedPostHash[:]...)
   866  }
   867  
   868  // Note that this adds a mapping for the user *and* the liked post.
   869  func DbPutLikeMappingsWithTxn(
   870  	txn *badger.Txn, userPubKey []byte, likedPostHash BlockHash) error {
   871  
   872  	if len(userPubKey) != btcec.PubKeyBytesLenCompressed {
   873  		return fmt.Errorf("DbPutLikeMappingsWithTxn: User public key "+
   874  			"length %d != %d", len(userPubKey), btcec.PubKeyBytesLenCompressed)
   875  	}
   876  
   877  	if err := txn.Set(_dbKeyForLikerPubKeyToLikedPostHashMapping(userPubKey, likedPostHash), []byte{}); err != nil {
   878  		return errors.Wrapf(err, "DbPutLikeMappingsWithTxn: Problem adding user to liked post mapping: ")
   879  	}
   880  
   881  	if err := txn.Set(_dbKeyForLikedPostHashToLikerPubKeyMapping(likedPostHash, userPubKey), []byte{}); err != nil {
   882  		return errors.Wrapf(err, "DbPutLikeMappingsWithTxn: Problem adding liked post to user mapping: ")
   883  	}
   884  
   885  	return nil
   886  }
   887  
   888  func DbPutLikeMappings(
   889  	handle *badger.DB, userPubKey []byte, likedPostHash BlockHash) error {
   890  
   891  	return handle.Update(func(txn *badger.Txn) error {
   892  		return DbPutLikeMappingsWithTxn(txn, userPubKey, likedPostHash)
   893  	})
   894  }
   895  
   896  func DbGetLikerPubKeyToLikedPostHashMappingWithTxn(
   897  	txn *badger.Txn, userPubKey []byte, likedPostHash BlockHash) []byte {
   898  
   899  	key := _dbKeyForLikerPubKeyToLikedPostHashMapping(userPubKey, likedPostHash)
   900  	_, err := txn.Get(key)
   901  	if err != nil {
   902  		return nil
   903  	}
   904  
   905  	// Typically we return a DB entry here but we don't store anything for like mappings.
   906  	// We use this function instead of one returning true / false for feature consistency.
   907  	return []byte{}
   908  }
   909  
   910  func DbGetLikerPubKeyToLikedPostHashMapping(
   911  	db *badger.DB, userPubKey []byte, likedPostHash BlockHash) []byte {
   912  	var ret []byte
   913  	db.View(func(txn *badger.Txn) error {
   914  		ret = DbGetLikerPubKeyToLikedPostHashMappingWithTxn(txn, userPubKey, likedPostHash)
   915  		return nil
   916  	})
   917  	return ret
   918  }
   919  
   920  // Note this deletes the like for the user *and* the liked post since a mapping
   921  // should exist for each.
   922  func DbDeleteLikeMappingsWithTxn(
   923  	txn *badger.Txn, userPubKey []byte, likedPostHash BlockHash) error {
   924  
   925  	// First check that a mapping exists. If one doesn't exist then there's nothing to do.
   926  	existingMapping := DbGetLikerPubKeyToLikedPostHashMappingWithTxn(
   927  		txn, userPubKey, likedPostHash)
   928  	if existingMapping == nil {
   929  		return nil
   930  	}
   931  
   932  	// When a message exists, delete the mapping for the sender and receiver.
   933  	if err := txn.Delete(
   934  		_dbKeyForLikerPubKeyToLikedPostHashMapping(userPubKey, likedPostHash)); err != nil {
   935  		return errors.Wrapf(err, "DbDeleteLikeMappingsWithTxn: Deleting "+
   936  			"userPubKey %s and likedPostHash %s failed",
   937  			PkToStringBoth(userPubKey), likedPostHash)
   938  	}
   939  	if err := txn.Delete(
   940  		_dbKeyForLikedPostHashToLikerPubKeyMapping(likedPostHash, userPubKey)); err != nil {
   941  		return errors.Wrapf(err, "DbDeleteLikeMappingsWithTxn: Deleting "+
   942  			"likedPostHash %s and userPubKey %s failed",
   943  			PkToStringBoth(likedPostHash[:]), PkToStringBoth(userPubKey))
   944  	}
   945  
   946  	return nil
   947  }
   948  
   949  func DbDeleteLikeMappings(
   950  	handle *badger.DB, userPubKey []byte, likedPostHash BlockHash) error {
   951  	return handle.Update(func(txn *badger.Txn) error {
   952  		return DbDeleteLikeMappingsWithTxn(txn, userPubKey, likedPostHash)
   953  	})
   954  }
   955  
   956  func DbGetPostHashesYouLike(handle *badger.DB, yourPublicKey []byte) (
   957  	_postHashes []*BlockHash, _err error) {
   958  
   959  	prefix := _dbSeekPrefixForPostHashesYouLike(yourPublicKey)
   960  	keysFound, _ := _enumerateKeysForPrefix(handle, prefix)
   961  
   962  	postHashesYouLike := []*BlockHash{}
   963  	for _, keyBytes := range keysFound {
   964  		// We must slice off the first byte and userPubKey to get the likedPostHash.
   965  		postHash := &BlockHash{}
   966  		copy(postHash[:], keyBytes[1+btcec.PubKeyBytesLenCompressed:])
   967  		postHashesYouLike = append(postHashesYouLike, postHash)
   968  	}
   969  
   970  	return postHashesYouLike, nil
   971  }
   972  
   973  func DbGetLikerPubKeysLikingAPostHash(handle *badger.DB, likedPostHash BlockHash) (
   974  	_pubKeys [][]byte, _err error) {
   975  
   976  	prefix := _dbSeekPrefixForLikerPubKeysLikingAPostHash(likedPostHash)
   977  	keysFound, _ := _enumerateKeysForPrefix(handle, prefix)
   978  
   979  	userPubKeys := [][]byte{}
   980  	for _, keyBytes := range keysFound {
   981  		// We must slice off the first byte and likedPostHash to get the userPubKey.
   982  		userPubKey := keyBytes[1+HashSizeBytes:]
   983  		userPubKeys = append(userPubKeys, userPubKey)
   984  	}
   985  
   986  	return userPubKeys, nil
   987  }
   988  
   989  // -------------------------------------------------------------------------------------
   990  // Reposts mapping functions
   991  // 		<prefix, user pub key [33]byte, reposted post BlockHash> -> <>
   992  // 		<prefix, reposted post BlockHash, user pub key [33]byte> -> <>
   993  // -------------------------------------------------------------------------------------
   994  //_PrefixReposterPubKeyRepostedPostHashToRepostPostHash
   995  func _dbKeyForReposterPubKeyRepostedPostHashToRepostPostHash(userPubKey []byte, repostedPostHash BlockHash) []byte {
   996  	// Make a copy to avoid multiple calls to this function re-using the same slice.
   997  	prefixCopy := append([]byte{}, _PrefixReposterPubKeyRepostedPostHashToRepostPostHash...)
   998  	key := append(prefixCopy, userPubKey...)
   999  	key = append(key, repostedPostHash[:]...)
  1000  	return key
  1001  }
  1002  
  1003  //_PrefixRepostedPostHashReposterPubKey
  1004  func _dbKeyForRepostedPostHashReposterPubKey(repostedPostHash *BlockHash, reposterPubKey []byte) []byte {
  1005  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1006  	prefixCopy := append([]byte{}, _PrefixRepostedPostHashReposterPubKey...)
  1007  	key := append(prefixCopy, repostedPostHash[:]...)
  1008  	key = append(key, reposterPubKey...)
  1009  	return key
  1010  }
  1011  
  1012  // **For quoted reposts**
  1013  //_PrefixRepostedPostHashReposterPubKeyRepostPostHash
  1014  func _dbKeyForRepostedPostHashReposterPubKeyRepostPostHash(
  1015  	repostedPostHash *BlockHash, reposterPubKey []byte, repostPostHash *BlockHash) []byte {
  1016  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1017  	prefixCopy := append([]byte{}, _PrefixRepostedPostHashReposterPubKeyRepostPostHash...)
  1018  	key := append(prefixCopy, repostedPostHash[:]...)
  1019  	key = append(key, reposterPubKey...)
  1020  	key = append(key, repostPostHash[:]...)
  1021  	return key
  1022  }
  1023  
  1024  func _dbSeekPrefixForPostHashesYouRepost(yourPubKey []byte) []byte {
  1025  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1026  	prefixCopy := append([]byte{}, _PrefixReposterPubKeyRepostedPostHashToRepostPostHash...)
  1027  	return append(prefixCopy, yourPubKey...)
  1028  }
  1029  
  1030  // Note that this adds a mapping for the user *and* the reposted post.
  1031  func DbPutRepostMappingsWithTxn(
  1032  	txn *badger.Txn, userPubKey []byte, repostedPostHash BlockHash, repostEntry RepostEntry) error {
  1033  
  1034  	if len(userPubKey) != btcec.PubKeyBytesLenCompressed {
  1035  		return fmt.Errorf("DbPutRepostMappingsWithTxn: User public key "+
  1036  			"length %d != %d", len(userPubKey), btcec.PubKeyBytesLenCompressed)
  1037  	}
  1038  
  1039  	repostDataBuf := bytes.NewBuffer([]byte{})
  1040  	gob.NewEncoder(repostDataBuf).Encode(repostEntry)
  1041  
  1042  	if err := txn.Set(_dbKeyForReposterPubKeyRepostedPostHashToRepostPostHash(
  1043  		userPubKey, repostedPostHash), repostDataBuf.Bytes()); err != nil {
  1044  
  1045  		return errors.Wrapf(
  1046  			err, "DbPutRepostMappingsWithTxn: Problem adding user to reposted post mapping: ")
  1047  	}
  1048  
  1049  	return nil
  1050  }
  1051  
  1052  func DbPutRepostMappings(
  1053  	handle *badger.DB, userPubKey []byte, repostedPostHash BlockHash, repostEntry RepostEntry) error {
  1054  
  1055  	return handle.Update(func(txn *badger.Txn) error {
  1056  		return DbPutRepostMappingsWithTxn(txn, userPubKey, repostedPostHash, repostEntry)
  1057  	})
  1058  }
  1059  
  1060  func DbGetReposterPubKeyRepostedPostHashToRepostEntryWithTxn(
  1061  	txn *badger.Txn, userPubKey []byte, repostedPostHash BlockHash) *RepostEntry {
  1062  
  1063  	key := _dbKeyForReposterPubKeyRepostedPostHashToRepostPostHash(userPubKey, repostedPostHash)
  1064  	repostEntryObj := &RepostEntry{}
  1065  	repostEntryItem, err := txn.Get(key)
  1066  	if err != nil {
  1067  		return nil
  1068  	}
  1069  	err = repostEntryItem.Value(func(valBytes []byte) error {
  1070  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(repostEntryObj)
  1071  	})
  1072  	if err != nil {
  1073  		glog.Errorf("DbGetReposterPubKeyRepostedPostHashToRepostedPostMappingWithTxn: Problem reading "+
  1074  			"RepostEntry for postHash %v", repostedPostHash)
  1075  		return nil
  1076  	}
  1077  	return repostEntryObj
  1078  }
  1079  
  1080  func DbReposterPubKeyRepostedPostHashToRepostEntry(
  1081  	db *badger.DB, userPubKey []byte, repostedPostHash BlockHash) *RepostEntry {
  1082  	var ret *RepostEntry
  1083  	db.View(func(txn *badger.Txn) error {
  1084  		ret = DbGetReposterPubKeyRepostedPostHashToRepostEntryWithTxn(txn, userPubKey, repostedPostHash)
  1085  		return nil
  1086  	})
  1087  	return ret
  1088  }
  1089  
  1090  // Note this deletes the repost for the user *and* the reposted post since a mapping
  1091  // should exist for each.
  1092  func DbDeleteRepostMappingsWithTxn(
  1093  	txn *badger.Txn, userPubKey []byte, repostedPostHash BlockHash) error {
  1094  
  1095  	// First check that a mapping exists. If one doesn't exist then there's nothing to do.
  1096  	existingMapping := DbGetReposterPubKeyRepostedPostHashToRepostEntryWithTxn(
  1097  		txn, userPubKey, repostedPostHash)
  1098  	if existingMapping == nil {
  1099  		return nil
  1100  	}
  1101  
  1102  	// When a repost exists, delete the repost entry mapping.
  1103  	if err := txn.Delete(_dbKeyForReposterPubKeyRepostedPostHashToRepostPostHash(userPubKey, repostedPostHash)); err != nil {
  1104  		return errors.Wrapf(err, "DbDeleteRepostMappingsWithTxn: Deleting "+
  1105  			"user public key %s and reposted post hash %s failed",
  1106  			PkToStringMainnet(userPubKey[:]), PkToStringMainnet(repostedPostHash[:]))
  1107  	}
  1108  	return nil
  1109  }
  1110  
  1111  func DbDeleteRepostMappings(
  1112  	handle *badger.DB, userPubKey []byte, repostedPostHash BlockHash) error {
  1113  	return handle.Update(func(txn *badger.Txn) error {
  1114  		return DbDeleteRepostMappingsWithTxn(txn, userPubKey, repostedPostHash)
  1115  	})
  1116  }
  1117  
  1118  func DbGetPostHashesYouRepost(handle *badger.DB, yourPublicKey []byte) (
  1119  	_postHashes []*BlockHash, _err error) {
  1120  
  1121  	prefix := _dbSeekPrefixForPostHashesYouRepost(yourPublicKey)
  1122  	keysFound, _ := _enumerateKeysForPrefix(handle, prefix)
  1123  
  1124  	postHashesYouRepost := []*BlockHash{}
  1125  	for _, keyBytes := range keysFound {
  1126  		// We must slice off the first byte and userPubKey to get the repostedPostHash.
  1127  		postHash := &BlockHash{}
  1128  		copy(postHash[:], keyBytes[1+btcec.PubKeyBytesLenCompressed:])
  1129  		postHashesYouRepost = append(postHashesYouRepost, postHash)
  1130  	}
  1131  
  1132  	return postHashesYouRepost, nil
  1133  }
  1134  
  1135  // -------------------------------------------------------------------------------------
  1136  // Follows mapping functions
  1137  // 		<prefix, follower pub key [33]byte, followed pub key [33]byte> -> <>
  1138  // 		<prefix, followed pub key [33]byte, follower pub key [33]byte> -> <>
  1139  // -------------------------------------------------------------------------------------
  1140  
  1141  func _dbKeyForFollowerToFollowedMapping(
  1142  	followerPKID *PKID, followedPKID *PKID) []byte {
  1143  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1144  	prefixCopy := append([]byte{}, _PrefixFollowerPKIDToFollowedPKID...)
  1145  	key := append(prefixCopy, followerPKID[:]...)
  1146  	key = append(key, followedPKID[:]...)
  1147  	return key
  1148  }
  1149  
  1150  func _dbKeyForFollowedToFollowerMapping(
  1151  	followedPKID *PKID, followerPKID *PKID) []byte {
  1152  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1153  	prefixCopy := append([]byte{}, _PrefixFollowedPKIDToFollowerPKID...)
  1154  	key := append(prefixCopy, followedPKID[:]...)
  1155  	key = append(key, followerPKID[:]...)
  1156  	return key
  1157  }
  1158  
  1159  func _dbSeekPrefixForPKIDsYouFollow(yourPKID *PKID) []byte {
  1160  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1161  	prefixCopy := append([]byte{}, _PrefixFollowerPKIDToFollowedPKID...)
  1162  	return append(prefixCopy, yourPKID[:]...)
  1163  }
  1164  
  1165  func _dbSeekPrefixForPKIDsFollowingYou(yourPKID *PKID) []byte {
  1166  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1167  	prefixCopy := append([]byte{}, _PrefixFollowedPKIDToFollowerPKID...)
  1168  	return append(prefixCopy, yourPKID[:]...)
  1169  }
  1170  
  1171  // Note that this adds a mapping for the follower *and* the pub key being followed.
  1172  func DbPutFollowMappingsWithTxn(
  1173  	txn *badger.Txn, followerPKID *PKID, followedPKID *PKID) error {
  1174  
  1175  	if len(followerPKID) != btcec.PubKeyBytesLenCompressed {
  1176  		return fmt.Errorf("DbPutFollowMappingsWithTxn: Follower PKID "+
  1177  			"length %d != %d", len(followerPKID[:]), btcec.PubKeyBytesLenCompressed)
  1178  	}
  1179  	if len(followedPKID) != btcec.PubKeyBytesLenCompressed {
  1180  		return fmt.Errorf("DbPutFollowMappingsWithTxn: Followed PKID "+
  1181  			"length %d != %d", len(followerPKID), btcec.PubKeyBytesLenCompressed)
  1182  	}
  1183  
  1184  	if err := txn.Set(_dbKeyForFollowerToFollowedMapping(
  1185  		followerPKID, followedPKID), []byte{}); err != nil {
  1186  
  1187  		return errors.Wrapf(
  1188  			err, "DbPutFollowMappingsWithTxn: Problem adding follower to followed mapping: ")
  1189  	}
  1190  	if err := txn.Set(_dbKeyForFollowedToFollowerMapping(
  1191  		followedPKID, followerPKID), []byte{}); err != nil {
  1192  
  1193  		return errors.Wrapf(
  1194  			err, "DbPutFollowMappingsWithTxn: Problem adding followed to follower mapping: ")
  1195  	}
  1196  
  1197  	return nil
  1198  }
  1199  
  1200  func DbPutFollowMappings(
  1201  	handle *badger.DB, followerPKID *PKID, followedPKID *PKID) error {
  1202  
  1203  	return handle.Update(func(txn *badger.Txn) error {
  1204  		return DbPutFollowMappingsWithTxn(txn, followerPKID, followedPKID)
  1205  	})
  1206  }
  1207  
  1208  func DbGetFollowerToFollowedMappingWithTxn(
  1209  	txn *badger.Txn, followerPKID *PKID, followedPKID *PKID) []byte {
  1210  
  1211  	key := _dbKeyForFollowerToFollowedMapping(followerPKID, followedPKID)
  1212  	_, err := txn.Get(key)
  1213  	if err != nil {
  1214  		return nil
  1215  	}
  1216  
  1217  	// Typically we return a DB entry here but we don't store anything for like mappings.
  1218  	// We use this function instead of one returning true / false for feature consistency.
  1219  	return []byte{}
  1220  }
  1221  
  1222  func DbGetFollowerToFollowedMapping(db *badger.DB, followerPKID *PKID, followedPKID *PKID) []byte {
  1223  	var ret []byte
  1224  	db.View(func(txn *badger.Txn) error {
  1225  		ret = DbGetFollowerToFollowedMappingWithTxn(txn, followerPKID, followedPKID)
  1226  		return nil
  1227  	})
  1228  	return ret
  1229  }
  1230  
  1231  // Note this deletes the follow for the follower *and* followed since a mapping
  1232  // should exist for each.
  1233  func DbDeleteFollowMappingsWithTxn(
  1234  	txn *badger.Txn, followerPKID *PKID, followedPKID *PKID) error {
  1235  
  1236  	// First check that a mapping exists for the PKIDs passed in.
  1237  	// If one doesn't exist then there's nothing to do.
  1238  	existingMapping := DbGetFollowerToFollowedMappingWithTxn(
  1239  		txn, followerPKID, followedPKID)
  1240  	if existingMapping == nil {
  1241  		return nil
  1242  	}
  1243  
  1244  	// When a message exists, delete the mapping for the sender and receiver.
  1245  	if err := txn.Delete(_dbKeyForFollowerToFollowedMapping(followerPKID, followedPKID)); err != nil {
  1246  		return errors.Wrapf(err, "DbDeleteFollowMappingsWithTxn: Deleting "+
  1247  			"followerPKID %s and followedPKID %s failed",
  1248  			PkToStringMainnet(followerPKID[:]), PkToStringMainnet(followedPKID[:]))
  1249  	}
  1250  	if err := txn.Delete(_dbKeyForFollowedToFollowerMapping(followedPKID, followerPKID)); err != nil {
  1251  		return errors.Wrapf(err, "DbDeleteFollowMappingsWithTxn: Deleting "+
  1252  			"followedPKID %s and followerPKID %s failed",
  1253  			PkToStringMainnet(followedPKID[:]), PkToStringMainnet(followerPKID[:]))
  1254  	}
  1255  
  1256  	return nil
  1257  }
  1258  
  1259  func DbDeleteFollowMappings(
  1260  	handle *badger.DB, followerPKID *PKID, followedPKID *PKID) error {
  1261  	return handle.Update(func(txn *badger.Txn) error {
  1262  		return DbDeleteFollowMappingsWithTxn(txn, followerPKID, followedPKID)
  1263  	})
  1264  }
  1265  
  1266  func DbGetPKIDsYouFollow(handle *badger.DB, yourPKID *PKID) (
  1267  	_pkids []*PKID, _err error) {
  1268  
  1269  	prefix := _dbSeekPrefixForPKIDsYouFollow(yourPKID)
  1270  	keysFound, _ := _enumerateKeysForPrefix(handle, prefix)
  1271  
  1272  	pkidsYouFollow := []*PKID{}
  1273  	for _, keyBytes := range keysFound {
  1274  		// We must slice off the first byte and followerPKID to get the followedPKID.
  1275  		followedPKIDBytes := keyBytes[1+btcec.PubKeyBytesLenCompressed:]
  1276  		followedPKID := &PKID{}
  1277  		copy(followedPKID[:], followedPKIDBytes)
  1278  		pkidsYouFollow = append(pkidsYouFollow, followedPKID)
  1279  	}
  1280  
  1281  	return pkidsYouFollow, nil
  1282  }
  1283  
  1284  func DbGetPKIDsFollowingYou(handle *badger.DB, yourPKID *PKID) (
  1285  	_pkids []*PKID, _err error) {
  1286  
  1287  	prefix := _dbSeekPrefixForPKIDsFollowingYou(yourPKID)
  1288  	keysFound, _ := _enumerateKeysForPrefix(handle, prefix)
  1289  
  1290  	pkidsFollowingYou := []*PKID{}
  1291  	for _, keyBytes := range keysFound {
  1292  		// We must slice off the first byte and followedPKID to get the followerPKID.
  1293  		followerPKIDBytes := keyBytes[1+btcec.PubKeyBytesLenCompressed:]
  1294  		followerPKID := &PKID{}
  1295  		copy(followerPKID[:], followerPKIDBytes)
  1296  		pkidsFollowingYou = append(pkidsFollowingYou, followerPKID)
  1297  	}
  1298  
  1299  	return pkidsFollowingYou, nil
  1300  }
  1301  
  1302  func DbGetPubKeysYouFollow(handle *badger.DB, yourPubKey []byte) (
  1303  	_pubKeys [][]byte, _err error) {
  1304  
  1305  	// Get the PKID for the pub key
  1306  	yourPKID := DBGetPKIDEntryForPublicKey(handle, yourPubKey)
  1307  	followPKIDs, err := DbGetPKIDsYouFollow(handle, yourPKID.PKID)
  1308  	if err != nil {
  1309  		return nil, errors.Wrap(err, "DbGetPubKeysYouFollow: ")
  1310  	}
  1311  
  1312  	// Convert the pkids to public keys
  1313  	followPubKeys := [][]byte{}
  1314  	for _, fpkid := range followPKIDs {
  1315  		followPk := DBGetPublicKeyForPKID(handle, fpkid)
  1316  		followPubKeys = append(followPubKeys, followPk)
  1317  	}
  1318  
  1319  	return followPubKeys, nil
  1320  }
  1321  
  1322  func DbGetPubKeysFollowingYou(handle *badger.DB, yourPubKey []byte) (
  1323  	_pubKeys [][]byte, _err error) {
  1324  
  1325  	// Get the PKID for the pub key
  1326  	yourPKID := DBGetPKIDEntryForPublicKey(handle, yourPubKey)
  1327  	followPKIDs, err := DbGetPKIDsFollowingYou(handle, yourPKID.PKID)
  1328  	if err != nil {
  1329  		return nil, errors.Wrap(err, "DbGetPubKeysFollowingYou: ")
  1330  	}
  1331  
  1332  	// Convert the pkids to public keys
  1333  	followPubKeys := [][]byte{}
  1334  	for _, fpkid := range followPKIDs {
  1335  		followPk := DBGetPublicKeyForPKID(handle, fpkid)
  1336  		followPubKeys = append(followPubKeys, followPk)
  1337  	}
  1338  
  1339  	return followPubKeys, nil
  1340  }
  1341  
  1342  // -------------------------------------------------------------------------------------
  1343  // Diamonds mapping functions
  1344  //  <prefix, DiamondReceiverPKID [33]byte, DiamondSenderPKID [33]byte, posthash> -> <[]byte{DiamondLevel}>
  1345  // -------------------------------------------------------------------------------------
  1346  
  1347  func _dbKeyForDiamondReceiverToDiamondSenderMapping(diamondEntry *DiamondEntry) []byte {
  1348  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1349  	prefixCopy := append([]byte{}, _PrefixDiamondReceiverPKIDDiamondSenderPKIDPostHash...)
  1350  	key := append(prefixCopy, diamondEntry.ReceiverPKID[:]...)
  1351  	key = append(key, diamondEntry.SenderPKID[:]...)
  1352  	key = append(key, diamondEntry.DiamondPostHash[:]...)
  1353  	return key
  1354  }
  1355  
  1356  func _dbKeyForDiamondReceiverToDiamondSenderMappingWithoutEntry(
  1357  	diamondReceiverPKID *PKID, diamondSenderPKID *PKID, diamondPostHash *BlockHash) []byte {
  1358  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1359  	prefixCopy := append([]byte{}, _PrefixDiamondReceiverPKIDDiamondSenderPKIDPostHash...)
  1360  	key := append(prefixCopy, diamondReceiverPKID[:]...)
  1361  	key = append(key, diamondSenderPKID[:]...)
  1362  	key = append(key, diamondPostHash[:]...)
  1363  	return key
  1364  }
  1365  
  1366  func _dbKeyForDiamondedPostHashDiamonderPKIDDiamondLevel(diamondEntry *DiamondEntry) []byte {
  1367  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1368  	prefixCopy := append([]byte{}, _PrefixDiamondedPostHashDiamonderPKIDDiamondLevel...)
  1369  	key := append(prefixCopy, diamondEntry.DiamondPostHash[:]...)
  1370  	key = append(key, diamondEntry.SenderPKID[:]...)
  1371  	// Diamond level is an int64 in extraData but it forced to be non-negative in consensus.
  1372  	key = append(key, EncodeUint64(uint64(diamondEntry.DiamondLevel))...)
  1373  	return key
  1374  }
  1375  
  1376  func _dbSeekPrefixForPKIDsThatDiamondedYou(yourPKID *PKID) []byte {
  1377  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1378  	prefixCopy := append([]byte{}, _PrefixDiamondReceiverPKIDDiamondSenderPKIDPostHash...)
  1379  	return append(prefixCopy, yourPKID[:]...)
  1380  }
  1381  
  1382  func _dbKeyForDiamondSenderToDiamondReceiverMapping(diamondEntry *DiamondEntry) []byte {
  1383  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1384  	prefixCopy := append([]byte{}, _PrefixDiamondSenderPKIDDiamondReceiverPKIDPostHash...)
  1385  	key := append(prefixCopy, diamondEntry.SenderPKID[:]...)
  1386  	key = append(key, diamondEntry.ReceiverPKID[:]...)
  1387  	key = append(key, diamondEntry.DiamondPostHash[:]...)
  1388  	return key
  1389  }
  1390  
  1391  func _dbKeyForDiamondSenderToDiamondReceiverMappingWithoutEntry(
  1392  	diamondReceiverPKID *PKID, diamondSenderPKID *PKID, diamondPostHash *BlockHash) []byte {
  1393  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1394  	prefixCopy := append([]byte{}, _PrefixDiamondSenderPKIDDiamondReceiverPKIDPostHash...)
  1395  	key := append(prefixCopy, diamondSenderPKID[:]...)
  1396  	key = append(key, diamondReceiverPKID[:]...)
  1397  	key = append(key, diamondPostHash[:]...)
  1398  	return key
  1399  }
  1400  
  1401  func _dbSeekPrefixForPKIDsThatYouDiamonded(yourPKID *PKID) []byte {
  1402  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1403  	prefixCopy := append([]byte{}, _PrefixDiamondSenderPKIDDiamondReceiverPKIDPostHash...)
  1404  	return append(prefixCopy, yourPKID[:]...)
  1405  }
  1406  
  1407  func _dbSeekPrefixForReceiverPKIDAndSenderPKID(receiverPKID *PKID, senderPKID *PKID) []byte {
  1408  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  1409  	prefixCopy := append([]byte{}, _PrefixDiamondReceiverPKIDDiamondSenderPKIDPostHash...)
  1410  	key := append(prefixCopy, receiverPKID[:]...)
  1411  	return append(key, senderPKID[:]...)
  1412  }
  1413  
  1414  func _DbBufForDiamondEntry(diamondEntry *DiamondEntry) []byte {
  1415  	diamondEntryBuf := bytes.NewBuffer([]byte{})
  1416  	gob.NewEncoder(diamondEntryBuf).Encode(diamondEntry)
  1417  	return diamondEntryBuf.Bytes()
  1418  }
  1419  
  1420  func _DbDiamondEntryForDbBuf(buf []byte) *DiamondEntry {
  1421  	if len(buf) == 0 {
  1422  		return nil
  1423  	}
  1424  	ret := &DiamondEntry{}
  1425  	if err := gob.NewDecoder(bytes.NewReader(buf)).Decode(&ret); err != nil {
  1426  		glog.Errorf("Error decoding DiamondEntry from DB: %v", err)
  1427  		return nil
  1428  	}
  1429  	return ret
  1430  }
  1431  
  1432  func DbPutDiamondMappingsWithTxn(
  1433  	txn *badger.Txn,
  1434  	diamondEntry *DiamondEntry) error {
  1435  
  1436  	if len(diamondEntry.ReceiverPKID) != btcec.PubKeyBytesLenCompressed {
  1437  		return fmt.Errorf("DbPutDiamondMappingsWithTxn: Receiver PKID "+
  1438  			"length %d != %d", len(diamondEntry.ReceiverPKID[:]), btcec.PubKeyBytesLenCompressed)
  1439  	}
  1440  	if len(diamondEntry.SenderPKID) != btcec.PubKeyBytesLenCompressed {
  1441  		return fmt.Errorf("DbPutDiamondMappingsWithTxn: Sender PKID "+
  1442  			"length %d != %d", len(diamondEntry.SenderPKID), btcec.PubKeyBytesLenCompressed)
  1443  	}
  1444  
  1445  	diamondEntryBytes := _DbBufForDiamondEntry(diamondEntry)
  1446  	if err := txn.Set(_dbKeyForDiamondReceiverToDiamondSenderMapping(diamondEntry), diamondEntryBytes); err != nil {
  1447  		return errors.Wrapf(
  1448  			err, "DbPutDiamondMappingsWithTxn: Problem adding receiver to giver mapping: ")
  1449  	}
  1450  
  1451  	if err := txn.Set(_dbKeyForDiamondSenderToDiamondReceiverMapping(diamondEntry), diamondEntryBytes); err != nil {
  1452  		return errors.Wrapf(err, "DbPutDiamondMappingsWithTxn: Problem adding sender to receiver mapping: ")
  1453  	}
  1454  
  1455  	if err := txn.Set(_dbKeyForDiamondedPostHashDiamonderPKIDDiamondLevel(diamondEntry),
  1456  		[]byte{}); err != nil {
  1457  		return errors.Wrapf(
  1458  			err, "DbPutDiamondMappingsWithTxn: Problem adding DiamondedPostHash Diamonder Diamond Level mapping: ")
  1459  	}
  1460  
  1461  	return nil
  1462  }
  1463  
  1464  func DbPutDiamondMappings(
  1465  	handle *badger.DB,
  1466  	diamondEntry *DiamondEntry) error {
  1467  
  1468  	return handle.Update(func(txn *badger.Txn) error {
  1469  		return DbPutDiamondMappingsWithTxn(
  1470  			txn, diamondEntry)
  1471  	})
  1472  }
  1473  
  1474  func DbGetDiamondMappingsWithTxn(
  1475  	txn *badger.Txn, diamondReceiverPKID *PKID, diamondSenderPKID *PKID, diamondPostHash *BlockHash) *DiamondEntry {
  1476  
  1477  	key := _dbKeyForDiamondReceiverToDiamondSenderMappingWithoutEntry(
  1478  		diamondReceiverPKID, diamondSenderPKID, diamondPostHash)
  1479  	item, err := txn.Get(key)
  1480  	if err != nil {
  1481  		return nil
  1482  	}
  1483  
  1484  	diamondEntryBuf, err := item.ValueCopy(nil)
  1485  	if err != nil {
  1486  		return nil
  1487  	}
  1488  
  1489  	// We return the byte array stored for this diamond mapping. This mapping should only
  1490  	// hold one uint8 with a value between 1 and 5 but the caller is responsible for sanity
  1491  	// checking in order to maintain consistency with other DB functions that do not error.
  1492  	return _DbDiamondEntryForDbBuf(diamondEntryBuf)
  1493  }
  1494  
  1495  func DbGetDiamondMappings(
  1496  	db *badger.DB, diamondReceiverPKID *PKID, diamondSenderPKID *PKID, diamondPostHash *BlockHash) *DiamondEntry {
  1497  	var ret *DiamondEntry
  1498  	db.View(func(txn *badger.Txn) error {
  1499  		ret = DbGetDiamondMappingsWithTxn(
  1500  			txn, diamondReceiverPKID, diamondSenderPKID, diamondPostHash)
  1501  		return nil
  1502  	})
  1503  	return ret
  1504  }
  1505  
  1506  func DbDeleteDiamondMappingsWithTxn(txn *badger.Txn, diamondEntry *DiamondEntry) error {
  1507  
  1508  	// First check that a mapping exists for the PKIDs passed in.
  1509  	// If one doesn't exist then there's nothing to do.
  1510  	existingMapping := DbGetDiamondMappingsWithTxn(
  1511  		txn, diamondEntry.ReceiverPKID, diamondEntry.SenderPKID, diamondEntry.DiamondPostHash)
  1512  	if existingMapping == nil {
  1513  		return nil
  1514  	}
  1515  
  1516  	// When a DiamondEntry exists, delete the diamond mappings.
  1517  	if err := txn.Delete(_dbKeyForDiamondReceiverToDiamondSenderMapping(diamondEntry)); err != nil {
  1518  		return errors.Wrapf(err, "DbDeleteDiamondMappingsWithTxn: Deleting "+
  1519  			"diamondReceiverPKID %s and diamondSenderPKID %s and diamondPostHash %s failed",
  1520  			PkToStringMainnet(diamondEntry.ReceiverPKID[:]),
  1521  			PkToStringMainnet(diamondEntry.SenderPKID[:]),
  1522  			diamondEntry.DiamondPostHash.String(),
  1523  		)
  1524  	}
  1525  	// When a DiamondEntry exists, delete the diamond mappings.
  1526  	if err := txn.Delete(_dbKeyForDiamondedPostHashDiamonderPKIDDiamondLevel(diamondEntry)); err != nil {
  1527  		return errors.Wrapf(err, "DbDeleteDiamondMappingsWithTxn: Deleting "+
  1528  			"diamondedPostHash %s and diamonderPKID %s and diamondLevel %s failed",
  1529  			diamondEntry.DiamondPostHash.String(),
  1530  			PkToStringMainnet(diamondEntry.SenderPKID[:]),
  1531  			diamondEntry.DiamondPostHash.String(),
  1532  		)
  1533  	}
  1534  
  1535  	if err := txn.Delete(_dbKeyForDiamondSenderToDiamondReceiverMapping(diamondEntry)); err != nil {
  1536  		return errors.Wrapf(err, "DbDeleteDiamondMappingsWithTxn: Deleting "+
  1537  			"diamondSenderPKID %s and diamondReceiverPKID %s and diamondPostHash %s failed",
  1538  			PkToStringMainnet(diamondEntry.SenderPKID[:]),
  1539  			PkToStringMainnet(diamondEntry.ReceiverPKID[:]),
  1540  			diamondEntry.DiamondPostHash.String(),
  1541  		)
  1542  	}
  1543  
  1544  	return nil
  1545  }
  1546  
  1547  func DbDeleteDiamondMappings(handle *badger.DB, diamondEntry *DiamondEntry) error {
  1548  	return handle.Update(func(txn *badger.Txn) error {
  1549  		return DbDeleteDiamondMappingsWithTxn(txn, diamondEntry)
  1550  	})
  1551  }
  1552  
  1553  // This function returns a map of PKIDs that gave diamonds to a list of DiamondEntrys
  1554  // that contain post hashes.
  1555  func DbGetPKIDsThatDiamondedYouMap(handle *badger.DB, yourPKID *PKID, fetchYouDiamonded bool) (
  1556  	_pkidToDiamondsMap map[PKID][]*DiamondEntry, _err error) {
  1557  
  1558  	prefix := _dbSeekPrefixForPKIDsThatDiamondedYou(yourPKID)
  1559  	diamondSenderStartIdx := 1 + btcec.PubKeyBytesLenCompressed
  1560  	diamondSenderEndIdx := 1 + 2*btcec.PubKeyBytesLenCompressed
  1561  	diamondReceiverStartIdx := 1
  1562  	diamondReceiverEndIdx := 1 + btcec.PubKeyBytesLenCompressed
  1563  	if fetchYouDiamonded {
  1564  		prefix = _dbSeekPrefixForPKIDsThatYouDiamonded(yourPKID)
  1565  		diamondSenderStartIdx = 1
  1566  		diamondSenderEndIdx = 1 + btcec.PubKeyBytesLenCompressed
  1567  		diamondReceiverStartIdx = 1 + btcec.PubKeyBytesLenCompressed
  1568  		diamondReceiverEndIdx = 1 + 2*btcec.PubKeyBytesLenCompressed
  1569  	}
  1570  	keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix)
  1571  
  1572  	pkidsToDiamondEntryMap := make(map[PKID][]*DiamondEntry)
  1573  	for ii, keyBytes := range keysFound {
  1574  		// The DiamondEntry found must not be nil.
  1575  		diamondEntry := _DbDiamondEntryForDbBuf(valsFound[ii])
  1576  		if diamondEntry == nil {
  1577  			return nil, fmt.Errorf(
  1578  				"DbGetPKIDsThatDiamondedYouMap: Found nil DiamondEntry for public key %v "+
  1579  					"and key bytes %#v when seeking; this should never happen",
  1580  				PkToStringMainnet(yourPKID[:]), keyBytes)
  1581  		}
  1582  		expectedDiamondKeyLen := 1 + 2*btcec.PubKeyBytesLenCompressed + HashSizeBytes
  1583  		if len(keyBytes) != expectedDiamondKeyLen {
  1584  			return nil, fmt.Errorf(
  1585  				"DbGetPKIDsThatDiamondedYouMap: Invalid key length %v should be %v",
  1586  				len(keyBytes), expectedDiamondKeyLen)
  1587  		}
  1588  
  1589  		// Note: The code below is mainly just sanity-checking. Checking the key isn't actually
  1590  		// needed in this function, since all the information is duplicated in the entry.
  1591  
  1592  		// Chop out the diamond sender PKID.
  1593  		diamondSenderPKIDBytes := keyBytes[diamondSenderStartIdx:diamondSenderEndIdx]
  1594  		diamondSenderPKID := &PKID{}
  1595  		copy(diamondSenderPKID[:], diamondSenderPKIDBytes)
  1596  		// It must match what's in the DiamondEntry
  1597  		if !reflect.DeepEqual(diamondSenderPKID, diamondEntry.SenderPKID) {
  1598  			return nil, fmt.Errorf(
  1599  				"DbGetPKIDsThatDiamondedYouMap: Sender PKID in DB %v did not "+
  1600  					"match Sender PKID in DiamondEntry %v; this should never happen",
  1601  				PkToStringBoth(diamondSenderPKID[:]), PkToStringBoth(diamondEntry.SenderPKID[:]))
  1602  		}
  1603  
  1604  		// Chop out the diamond receiver PKID
  1605  		diamondReceiverPKIDBytes := keyBytes[diamondReceiverStartIdx:diamondReceiverEndIdx]
  1606  		diamondReceiverPKID := &PKID{}
  1607  		copy(diamondReceiverPKID[:], diamondReceiverPKIDBytes)
  1608  		// It must match what's in the DiamondEntry
  1609  		if !reflect.DeepEqual(diamondReceiverPKID, diamondEntry.ReceiverPKID) {
  1610  			return nil, fmt.Errorf(
  1611  				"DbGetPKIDsThatDiamondedYouMap: Receiver PKID in DB %v did not "+
  1612  					"match Receiver PKID in DiamondEntry %v; this should never happen",
  1613  				PkToStringBoth(diamondReceiverPKID[:]), PkToStringBoth(diamondEntry.ReceiverPKID[:]))
  1614  		}
  1615  
  1616  		// Chop out the diamond post hash.
  1617  		diamondPostHashBytes := keyBytes[1+2*btcec.PubKeyBytesLenCompressed:]
  1618  		diamondPostHash := &BlockHash{}
  1619  		copy(diamondPostHash[:], diamondPostHashBytes)
  1620  		// It must match what's in the entry
  1621  		if *diamondPostHash != *diamondEntry.DiamondPostHash {
  1622  			return nil, fmt.Errorf(
  1623  				"DbGetPKIDsThatDiamondedYouMap: Post hash found in DB key %v "+
  1624  					"did not match post hash in DiamondEntry %v; this should never happen",
  1625  				diamondPostHash, diamondEntry.DiamondPostHash)
  1626  		}
  1627  
  1628  		// If a map entry doesn't exist for this sender, create one.
  1629  		newListOfEntrys := pkidsToDiamondEntryMap[*diamondSenderPKID]
  1630  		newListOfEntrys = append(newListOfEntrys, diamondEntry)
  1631  		pkidsToDiamondEntryMap[*diamondSenderPKID] = newListOfEntrys
  1632  	}
  1633  
  1634  	return pkidsToDiamondEntryMap, nil
  1635  }
  1636  
  1637  // This function returns a list of DiamondEntrys given by giverPKID to receiverPKID that contain post hashes.
  1638  func DbGetDiamondEntriesForSenderToReceiver(handle *badger.DB, receiverPKID *PKID, senderPKID *PKID) (
  1639  	_diamondEntries []*DiamondEntry, _err error) {
  1640  
  1641  	prefix := _dbSeekPrefixForReceiverPKIDAndSenderPKID(receiverPKID, senderPKID)
  1642  	keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix)
  1643  	var diamondEntries []*DiamondEntry
  1644  	for ii, keyBytes := range keysFound {
  1645  		// The DiamondEntry found must not be nil.
  1646  		diamondEntry := _DbDiamondEntryForDbBuf(valsFound[ii])
  1647  		if diamondEntry == nil {
  1648  			return nil, fmt.Errorf(
  1649  				"DbGetDiamondEntriesForGiverToReceiver: Found nil DiamondEntry for receiver key %v "+
  1650  					"and giver key %v when seeking; this should never happen",
  1651  				PkToStringMainnet(receiverPKID[:]), PkToStringMainnet(senderPKID[:]))
  1652  		}
  1653  		expectedDiamondKeyLen := 1 + 2*btcec.PubKeyBytesLenCompressed + HashSizeBytes
  1654  		if len(keyBytes) != expectedDiamondKeyLen {
  1655  			return nil, fmt.Errorf(
  1656  				"DbGetDiamondEntriesForGiverToReceiver: Invalid key length %v should be %v",
  1657  				len(keyBytes), expectedDiamondKeyLen)
  1658  		}
  1659  
  1660  		// Note: The code below is mainly just sanity-checking. Checking the key isn't actually
  1661  		// needed in this function, since all the information is duplicated in the entry.
  1662  
  1663  		// Chop out the diamond sender PKID.
  1664  		diamondSenderPKIDBytes := keyBytes[1+btcec.PubKeyBytesLenCompressed : 1+2*btcec.PubKeyBytesLenCompressed]
  1665  		diamondSenderPKID := &PKID{}
  1666  		copy(diamondSenderPKID[:], diamondSenderPKIDBytes)
  1667  		// It must match what's in the DiamondEntry
  1668  		if !reflect.DeepEqual(diamondSenderPKID, diamondEntry.SenderPKID) {
  1669  			return nil, fmt.Errorf(
  1670  				"DbGetDiamondEntriesForGiverToReceiver: Sender PKID in DB %v did not "+
  1671  					"match Sender PKID in DiamondEntry %v; this should never happen",
  1672  				PkToStringBoth(diamondSenderPKID[:]), PkToStringBoth(diamondEntry.SenderPKID[:]))
  1673  		}
  1674  
  1675  		// Chop out the diamond post hash.
  1676  		diamondPostHashBytes := keyBytes[1+2*btcec.PubKeyBytesLenCompressed:]
  1677  		diamondPostHash := &BlockHash{}
  1678  		copy(diamondPostHash[:], diamondPostHashBytes)
  1679  		// It must match what's in the entry
  1680  		if *diamondPostHash != *diamondEntry.DiamondPostHash {
  1681  			return nil, fmt.Errorf(
  1682  				"DbGetDiamondEntriesForGiverToReceiver: Post hash found in DB key %v "+
  1683  					"did not match post hash in DiamondEntry %v; this should never happen",
  1684  				diamondPostHash, diamondEntry.DiamondPostHash)
  1685  		}
  1686  		// Append the diamond entry to the slice
  1687  		diamondEntries = append(diamondEntries, diamondEntry)
  1688  	}
  1689  	return diamondEntries, nil
  1690  }
  1691  
  1692  // -------------------------------------------------------------------------------------
  1693  // BitcoinBurnTxID mapping functions
  1694  // <BitcoinBurnTxID BlockHash> -> <>
  1695  // -------------------------------------------------------------------------------------
  1696  
  1697  func _keyForBitcoinBurnTxID(bitcoinBurnTxID *BlockHash) []byte {
  1698  	// Make a copy to avoid multiple calls to this function re-using the same
  1699  	// underlying array.
  1700  	prefixCopy := append([]byte{}, _PrefixBitcoinBurnTxIDs...)
  1701  	return append(prefixCopy, bitcoinBurnTxID[:]...)
  1702  }
  1703  
  1704  func DbPutBitcoinBurnTxIDWithTxn(txn *badger.Txn, bitcoinBurnTxID *BlockHash) error {
  1705  	return txn.Set(_keyForBitcoinBurnTxID(bitcoinBurnTxID), []byte{})
  1706  }
  1707  
  1708  func DbExistsBitcoinBurnTxIDWithTxn(txn *badger.Txn, bitcoinBurnTxID *BlockHash) bool {
  1709  	// We don't care about the value because we're just checking to see if the key exists.
  1710  	if _, err := txn.Get(_keyForBitcoinBurnTxID(bitcoinBurnTxID)); err != nil {
  1711  		return false
  1712  	}
  1713  	return true
  1714  }
  1715  
  1716  func DbExistsBitcoinBurnTxID(db *badger.DB, bitcoinBurnTxID *BlockHash) bool {
  1717  	var exists bool
  1718  	db.View(func(txn *badger.Txn) error {
  1719  		exists = DbExistsBitcoinBurnTxIDWithTxn(txn, bitcoinBurnTxID)
  1720  		return nil
  1721  	})
  1722  	return exists
  1723  }
  1724  
  1725  func DbDeleteBitcoinBurnTxIDWithTxn(txn *badger.Txn, bitcoinBurnTxID *BlockHash) error {
  1726  	return txn.Delete(_keyForBitcoinBurnTxID(bitcoinBurnTxID))
  1727  }
  1728  
  1729  func DbGetAllBitcoinBurnTxIDs(handle *badger.DB) (_bitcoinBurnTxIDs []*BlockHash) {
  1730  	keysFound, _ := _enumerateKeysForPrefix(handle, _PrefixBitcoinBurnTxIDs)
  1731  	bitcoinBurnTxIDs := []*BlockHash{}
  1732  	for _, key := range keysFound {
  1733  		bbtxid := &BlockHash{}
  1734  		copy(bbtxid[:], key[1:])
  1735  		bitcoinBurnTxIDs = append(bitcoinBurnTxIDs, bbtxid)
  1736  	}
  1737  
  1738  	return bitcoinBurnTxIDs
  1739  }
  1740  
  1741  func _getBlockHashForPrefixWithTxn(txn *badger.Txn, prefix []byte) *BlockHash {
  1742  	var ret BlockHash
  1743  	bhItem, err := txn.Get(prefix)
  1744  	if err != nil {
  1745  		return nil
  1746  	}
  1747  	_, err = bhItem.ValueCopy(ret[:])
  1748  	if err != nil {
  1749  		return nil
  1750  	}
  1751  
  1752  	return &ret
  1753  }
  1754  
  1755  func _getBlockHashForPrefix(handle *badger.DB, prefix []byte) *BlockHash {
  1756  	var ret *BlockHash
  1757  	err := handle.View(func(txn *badger.Txn) error {
  1758  		ret = _getBlockHashForPrefixWithTxn(txn, prefix)
  1759  		return nil
  1760  	})
  1761  	if err != nil {
  1762  		return nil
  1763  	}
  1764  	return ret
  1765  }
  1766  
  1767  // GetBadgerDbPath returns the path where we store the badgerdb data.
  1768  func GetBadgerDbPath(dataDir string) string {
  1769  	return filepath.Join(dataDir, badgerDbFolder)
  1770  }
  1771  
  1772  func _EncodeUint32(num uint32) []byte {
  1773  	numBytes := make([]byte, 4)
  1774  	binary.BigEndian.PutUint32(numBytes, num)
  1775  	return numBytes
  1776  }
  1777  
  1778  func DecodeUint32(num []byte) uint32 {
  1779  	return binary.BigEndian.Uint32(num)
  1780  }
  1781  
  1782  func EncodeUint64(num uint64) []byte {
  1783  	numBytes := make([]byte, 8)
  1784  	binary.BigEndian.PutUint64(numBytes, num)
  1785  	return numBytes
  1786  }
  1787  
  1788  func DecodeUint64(scoreBytes []byte) uint64 {
  1789  	return binary.BigEndian.Uint64(scoreBytes)
  1790  }
  1791  
  1792  func DbPutNanosPurchasedWithTxn(txn *badger.Txn, nanosPurchased uint64) error {
  1793  	return txn.Set(_KeyNanosPurchased, EncodeUint64(nanosPurchased))
  1794  }
  1795  
  1796  func DbPutNanosPurchased(handle *badger.DB, nanosPurchased uint64) error {
  1797  	return handle.Update(func(txn *badger.Txn) error {
  1798  		return DbPutNanosPurchasedWithTxn(txn, nanosPurchased)
  1799  	})
  1800  }
  1801  
  1802  func DbGetNanosPurchasedWithTxn(txn *badger.Txn) uint64 {
  1803  	nanosPurchasedItem, err := txn.Get(_KeyNanosPurchased)
  1804  	if err != nil {
  1805  		return 0
  1806  	}
  1807  	nanosPurchasedBuf, err := nanosPurchasedItem.ValueCopy(nil)
  1808  	if err != nil {
  1809  		return 0
  1810  	}
  1811  
  1812  	return DecodeUint64(nanosPurchasedBuf)
  1813  }
  1814  
  1815  func DbGetNanosPurchased(handle *badger.DB) uint64 {
  1816  	var nanosPurchased uint64
  1817  	handle.View(func(txn *badger.Txn) error {
  1818  		nanosPurchased = DbGetNanosPurchasedWithTxn(txn)
  1819  		return nil
  1820  	})
  1821  
  1822  	return nanosPurchased
  1823  }
  1824  
  1825  func DbPutGlobalParamsEntry(handle *badger.DB, globalParamsEntry GlobalParamsEntry) error {
  1826  	return handle.Update(func(txn *badger.Txn) error {
  1827  		return DbPutGlobalParamsEntryWithTxn(txn, globalParamsEntry)
  1828  	})
  1829  }
  1830  
  1831  func DbPutGlobalParamsEntryWithTxn(txn *badger.Txn, globalParamsEntry GlobalParamsEntry) error {
  1832  	globalParamsDataBuf := bytes.NewBuffer([]byte{})
  1833  	err := gob.NewEncoder(globalParamsDataBuf).Encode(globalParamsEntry)
  1834  	if err != nil {
  1835  		return errors.Wrapf(err, "DbPutGlobalParamsEntryWithTxn: Problem encoding global params entry: ")
  1836  	}
  1837  
  1838  	err = txn.Set(_KeyGlobalParams, globalParamsDataBuf.Bytes())
  1839  	if err != nil {
  1840  		return errors.Wrapf(err, "DbPutGlobalParamsEntryWithTxn: Problem adding global params entry to db: ")
  1841  	}
  1842  	return nil
  1843  }
  1844  
  1845  func DbGetGlobalParamsEntryWithTxn(txn *badger.Txn) *GlobalParamsEntry {
  1846  	globalParamsEntryItem, err := txn.Get(_KeyGlobalParams)
  1847  	if err != nil {
  1848  		return &InitialGlobalParamsEntry
  1849  	}
  1850  	globalParamsEntryObj := &GlobalParamsEntry{}
  1851  	err = globalParamsEntryItem.Value(func(valBytes []byte) error {
  1852  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(globalParamsEntryObj)
  1853  	})
  1854  	if err != nil {
  1855  		glog.Errorf("DbGetGlobalParamsEntryWithTxn: Problem reading "+
  1856  			"GlobalParamsEntry: %v", err)
  1857  		return &InitialGlobalParamsEntry
  1858  	}
  1859  
  1860  	return globalParamsEntryObj
  1861  }
  1862  
  1863  func DbGetGlobalParamsEntry(handle *badger.DB) *GlobalParamsEntry {
  1864  	var globalParamsEntry *GlobalParamsEntry
  1865  	handle.View(func(txn *badger.Txn) error {
  1866  		globalParamsEntry = DbGetGlobalParamsEntryWithTxn(txn)
  1867  		return nil
  1868  	})
  1869  	return globalParamsEntry
  1870  }
  1871  
  1872  func DbPutUSDCentsPerBitcoinExchangeRateWithTxn(txn *badger.Txn, usdCentsPerBitcoinExchangeRate uint64) error {
  1873  	return txn.Set(_KeyUSDCentsPerBitcoinExchangeRate, EncodeUint64(usdCentsPerBitcoinExchangeRate))
  1874  }
  1875  
  1876  func DbGetUSDCentsPerBitcoinExchangeRateWithTxn(txn *badger.Txn) uint64 {
  1877  	usdCentsPerBitcoinExchangeRateItem, err := txn.Get(_KeyUSDCentsPerBitcoinExchangeRate)
  1878  	if err != nil {
  1879  		return InitialUSDCentsPerBitcoinExchangeRate
  1880  	}
  1881  	usdCentsPerBitcoinExchangeRateBuf, err := usdCentsPerBitcoinExchangeRateItem.ValueCopy(nil)
  1882  	if err != nil {
  1883  		glog.Error("DbGetUSDCentsPerBitcoinExchangeRateWithTxn: Error parsing DB " +
  1884  			"value; this shouldn't really happen ever")
  1885  		return InitialUSDCentsPerBitcoinExchangeRate
  1886  	}
  1887  
  1888  	return DecodeUint64(usdCentsPerBitcoinExchangeRateBuf)
  1889  }
  1890  
  1891  func DbGetUSDCentsPerBitcoinExchangeRate(handle *badger.DB) uint64 {
  1892  	var usdCentsPerBitcoinExchangeRate uint64
  1893  	handle.View(func(txn *badger.Txn) error {
  1894  		usdCentsPerBitcoinExchangeRate = DbGetUSDCentsPerBitcoinExchangeRateWithTxn(txn)
  1895  		return nil
  1896  	})
  1897  
  1898  	return usdCentsPerBitcoinExchangeRate
  1899  }
  1900  
  1901  func GetUtxoNumEntriesWithTxn(txn *badger.Txn) uint64 {
  1902  	indexItem, err := txn.Get(_KeyUtxoNumEntries)
  1903  	if err != nil {
  1904  		return 0
  1905  	}
  1906  	// Get the current index.
  1907  	indexBytes, err := indexItem.ValueCopy(nil)
  1908  	if err != nil {
  1909  		return 0
  1910  	}
  1911  	numEntries := DecodeUint64(indexBytes)
  1912  
  1913  	return numEntries
  1914  }
  1915  
  1916  func GetUtxoNumEntries(handle *badger.DB) uint64 {
  1917  	var numEntries uint64
  1918  	handle.View(func(txn *badger.Txn) error {
  1919  		numEntries = GetUtxoNumEntriesWithTxn(txn)
  1920  
  1921  		return nil
  1922  	})
  1923  
  1924  	return numEntries
  1925  }
  1926  
  1927  func _SerializeUtxoKey(utxoKey *UtxoKey) []byte {
  1928  	indexBytes := make([]byte, 4)
  1929  	binary.BigEndian.PutUint32(indexBytes, utxoKey.Index)
  1930  	return append(utxoKey.TxID[:], indexBytes...)
  1931  
  1932  }
  1933  
  1934  func _DbKeyForUtxoKey(utxoKey *UtxoKey) []byte {
  1935  	return append(append([]byte{}, _PrefixUtxoKeyToUtxoEntry...), _SerializeUtxoKey(utxoKey)...)
  1936  }
  1937  
  1938  // Implements the reverse of _DbKeyForUtxoKey. This doesn't error-check
  1939  // and caller should make sure they're passing a properly-sized key to
  1940  // this function.
  1941  func _UtxoKeyFromDbKey(utxoDbKey []byte) *UtxoKey {
  1942  	// Read in the TxID, which is at the beginning.
  1943  	txIDBytes := utxoDbKey[:HashSizeBytes]
  1944  	txID := BlockHash{}
  1945  	copy(txID[:], txIDBytes)
  1946  	// Read in the index, which is encoded as a bigint at the end.
  1947  	indexBytes := utxoDbKey[HashSizeBytes:]
  1948  	indexValue := binary.BigEndian.Uint32(indexBytes)
  1949  	return &UtxoKey{
  1950  		Index: indexValue,
  1951  		TxID:  txID,
  1952  	}
  1953  }
  1954  
  1955  func _DbBufForUtxoEntry(utxoEntry *UtxoEntry) []byte {
  1956  	utxoEntryBuf := bytes.NewBuffer([]byte{})
  1957  	gob.NewEncoder(utxoEntryBuf).Encode(utxoEntry)
  1958  	return utxoEntryBuf.Bytes()
  1959  }
  1960  
  1961  func PutUtxoNumEntriesWithTxn(txn *badger.Txn, newNumEntries uint64) error {
  1962  	return txn.Set(_KeyUtxoNumEntries, EncodeUint64(newNumEntries))
  1963  }
  1964  
  1965  func PutUtxoEntryForUtxoKeyWithTxn(txn *badger.Txn, utxoKey *UtxoKey, utxoEntry *UtxoEntry) error {
  1966  	return txn.Set(_DbKeyForUtxoKey(utxoKey), _DbBufForUtxoEntry(utxoEntry))
  1967  }
  1968  
  1969  func DbGetUtxoEntryForUtxoKeyWithTxn(txn *badger.Txn, utxoKey *UtxoKey) *UtxoEntry {
  1970  	var ret UtxoEntry
  1971  	utxoDbKey := _DbKeyForUtxoKey(utxoKey)
  1972  	item, err := txn.Get(utxoDbKey)
  1973  	if err != nil {
  1974  		return nil
  1975  	}
  1976  
  1977  	err = item.Value(func(valBytes []byte) error {
  1978  		// TODO: Storing with gob is very slow due to reflection. Would be
  1979  		// better if we serialized/deserialized manually.
  1980  		if err := gob.NewDecoder(bytes.NewReader(valBytes)).Decode(&ret); err != nil {
  1981  			return err
  1982  		}
  1983  
  1984  		return nil
  1985  	})
  1986  
  1987  	if err != nil {
  1988  		return nil
  1989  	}
  1990  
  1991  	return &ret
  1992  }
  1993  
  1994  func DbGetUtxoEntryForUtxoKey(handle *badger.DB, utxoKey *UtxoKey) *UtxoEntry {
  1995  	var ret *UtxoEntry
  1996  	handle.View(func(txn *badger.Txn) error {
  1997  		ret = DbGetUtxoEntryForUtxoKeyWithTxn(txn, utxoKey)
  1998  		return nil
  1999  	})
  2000  
  2001  	return ret
  2002  }
  2003  
  2004  func DeleteUtxoEntryForKeyWithTxn(txn *badger.Txn, utxoKey *UtxoKey) error {
  2005  	return txn.Delete(_DbKeyForUtxoKey(utxoKey))
  2006  }
  2007  
  2008  func DeletePubKeyUtxoKeyMappingWithTxn(txn *badger.Txn, publicKey []byte, utxoKey *UtxoKey) error {
  2009  	if len(publicKey) != btcec.PubKeyBytesLenCompressed {
  2010  		return fmt.Errorf("DeletePubKeyUtxoKeyMappingWithTxn: Public key has improper length %d != %d", len(publicKey), btcec.PubKeyBytesLenCompressed)
  2011  	}
  2012  
  2013  	keyToDelete := append(append([]byte{}, _PrefixPubKeyUtxoKey...), publicKey...)
  2014  	keyToDelete = append(keyToDelete, _SerializeUtxoKey(utxoKey)...)
  2015  
  2016  	return txn.Delete(keyToDelete)
  2017  }
  2018  
  2019  func DbBufForUtxoKey(utxoKey *UtxoKey) []byte {
  2020  	utxoKeyBuf := bytes.NewBuffer([]byte{})
  2021  	gob.NewEncoder(utxoKeyBuf).Encode(utxoKey)
  2022  	return utxoKeyBuf.Bytes()
  2023  }
  2024  
  2025  func PutPubKeyUtxoKeyWithTxn(txn *badger.Txn, publicKey []byte, utxoKey *UtxoKey) error {
  2026  	if len(publicKey) != btcec.PubKeyBytesLenCompressed {
  2027  		return fmt.Errorf("PutPubKeyUtxoKeyWithTxn: Public key has improper length %d != %d", len(publicKey), btcec.PubKeyBytesLenCompressed)
  2028  	}
  2029  
  2030  	keyToAdd := append(append([]byte{}, _PrefixPubKeyUtxoKey...), publicKey...)
  2031  	keyToAdd = append(keyToAdd, _SerializeUtxoKey(utxoKey)...)
  2032  
  2033  	return txn.Set(keyToAdd, []byte{})
  2034  }
  2035  
  2036  // DbGetUtxosForPubKey finds the UtxoEntry's corresponding to the public
  2037  // key passed in. It also attaches the UtxoKeys to the UtxoEntry's it
  2038  // returns for easy access.
  2039  func DbGetUtxosForPubKey(publicKey []byte, handle *badger.DB) ([]*UtxoEntry, error) {
  2040  	// Verify the length of the public key.
  2041  	if len(publicKey) != btcec.PubKeyBytesLenCompressed {
  2042  		return nil, fmt.Errorf("DbGetUtxosForPubKey: Public key has improper "+
  2043  			"length %d != %d", len(publicKey), btcec.PubKeyBytesLenCompressed)
  2044  	}
  2045  	// Look up the utxo keys for this public key.
  2046  	utxoEntriesFound := []*UtxoEntry{}
  2047  	err := handle.View(func(txn *badger.Txn) error {
  2048  		// Start by looping through to find all the UtxoKeys.
  2049  		utxoKeysFound := []*UtxoKey{}
  2050  		opts := badger.DefaultIteratorOptions
  2051  		nodeIterator := txn.NewIterator(opts)
  2052  		defer nodeIterator.Close()
  2053  		prefix := append(append([]byte{}, _PrefixPubKeyUtxoKey...), publicKey...)
  2054  		for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() {
  2055  			// Strip the prefix off the key. What's left should be the UtxoKey.
  2056  			pkUtxoKey := nodeIterator.Item().Key()
  2057  			utxoKeyBytes := pkUtxoKey[len(prefix):]
  2058  			// The size of the utxo key bytes should be equal to the size of a
  2059  			// standard hash (the txid) plus the size of a uint32.
  2060  			if len(utxoKeyBytes) != HashSizeBytes+4 {
  2061  				return fmt.Errorf("Problem reading <pk, utxoKey> mapping; key size %d "+
  2062  					"is not equal to (prefix_byte=%d + len(publicKey)=%d + len(utxoKey)=%d)=%d. "+
  2063  					"Key found: %#v", len(pkUtxoKey), len(_PrefixPubKeyUtxoKey), len(publicKey), HashSizeBytes+4, len(prefix)+HashSizeBytes+4, pkUtxoKey)
  2064  			}
  2065  			// Try and convert the utxo key bytes into a utxo key.
  2066  			utxoKey := _UtxoKeyFromDbKey(utxoKeyBytes)
  2067  			if utxoKey == nil {
  2068  				return fmt.Errorf("Problem reading <pk, utxoKey> mapping; parsing UtxoKey bytes %#v returned nil", utxoKeyBytes)
  2069  			}
  2070  
  2071  			// Now that we have the utxoKey, enqueue it.
  2072  			utxoKeysFound = append(utxoKeysFound, utxoKey)
  2073  		}
  2074  
  2075  		// Once all the UtxoKeys are found, fetch all the UtxoEntries.
  2076  		for ii := range utxoKeysFound {
  2077  			foundUtxoKey := utxoKeysFound[ii]
  2078  			utxoEntry := DbGetUtxoEntryForUtxoKeyWithTxn(txn, foundUtxoKey)
  2079  			if utxoEntry == nil {
  2080  				return fmt.Errorf("UtxoEntry for UtxoKey %v was not found", foundUtxoKey)
  2081  			}
  2082  
  2083  			// Set a back-reference to the utxo key.
  2084  			utxoEntry.UtxoKey = foundUtxoKey
  2085  
  2086  			utxoEntriesFound = append(utxoEntriesFound, utxoEntry)
  2087  		}
  2088  
  2089  		return nil
  2090  	})
  2091  	if err != nil {
  2092  		return nil, errors.Wrapf(err, "DbGetUtxosForPubKey: ")
  2093  	}
  2094  
  2095  	// If there are no errors, return everything we found.
  2096  	return utxoEntriesFound, nil
  2097  }
  2098  
  2099  func DeleteUnmodifiedMappingsForUtxoWithTxn(txn *badger.Txn, utxoKey *UtxoKey) error {
  2100  	// Get the entry for the utxoKey from the db.
  2101  	utxoEntry := DbGetUtxoEntryForUtxoKeyWithTxn(txn, utxoKey)
  2102  	if utxoEntry == nil {
  2103  		// If an entry doesn't exist for this key then there is nothing in the
  2104  		// db to delete.
  2105  		return nil
  2106  	}
  2107  
  2108  	// If the entry exists, delete the <UtxoKey -> UtxoEntry> mapping from the db.
  2109  	// It is assumed that the entry corresponding to a key has not been modified
  2110  	// and so is OK to delete
  2111  	if err := DeleteUtxoEntryForKeyWithTxn(txn, utxoKey); err != nil {
  2112  		return err
  2113  	}
  2114  
  2115  	// Delete the <pubkey, utxoKey> -> <> mapping.
  2116  	if err := DeletePubKeyUtxoKeyMappingWithTxn(txn, utxoEntry.PublicKey, utxoKey); err != nil {
  2117  		return err
  2118  	}
  2119  
  2120  	return nil
  2121  }
  2122  
  2123  func PutMappingsForUtxoWithTxn(txn *badger.Txn, utxoKey *UtxoKey, utxoEntry *UtxoEntry) error {
  2124  	// Put the <utxoKey -> utxoEntry> mapping.
  2125  	if err := PutUtxoEntryForUtxoKeyWithTxn(txn, utxoKey, utxoEntry); err != nil {
  2126  		return nil
  2127  	}
  2128  
  2129  	// Put the <pubkey, utxoKey> -> <> mapping.
  2130  	if err := PutPubKeyUtxoKeyWithTxn(txn, utxoEntry.PublicKey, utxoKey); err != nil {
  2131  		return err
  2132  	}
  2133  
  2134  	return nil
  2135  }
  2136  
  2137  func _DecodeUtxoOperations(data []byte) ([][]*UtxoOperation, error) {
  2138  	ret := [][]*UtxoOperation{}
  2139  	if err := gob.NewDecoder(bytes.NewReader(data)).Decode(&ret); err != nil {
  2140  		return nil, err
  2141  	}
  2142  	return ret, nil
  2143  }
  2144  
  2145  func _EncodeUtxoOperations(utxoOp [][]*UtxoOperation) []byte {
  2146  	opBuf := bytes.NewBuffer([]byte{})
  2147  	gob.NewEncoder(opBuf).Encode(utxoOp)
  2148  	return opBuf.Bytes()
  2149  }
  2150  
  2151  func _DbKeyForUtxoOps(blockHash *BlockHash) []byte {
  2152  	return append(append([]byte{}, _PrefixBlockHashToUtxoOperations...), blockHash[:]...)
  2153  }
  2154  
  2155  func GetUtxoOperationsForBlockWithTxn(txn *badger.Txn, blockHash *BlockHash) ([][]*UtxoOperation, error) {
  2156  	var retOps [][]*UtxoOperation
  2157  	utxoOpsItem, err := txn.Get(_DbKeyForUtxoOps(blockHash))
  2158  	if err != nil {
  2159  		return nil, err
  2160  	}
  2161  	err = utxoOpsItem.Value(func(valBytes []byte) error {
  2162  		retOps, err = _DecodeUtxoOperations(valBytes)
  2163  		if err != nil {
  2164  			return err
  2165  		}
  2166  
  2167  		return nil
  2168  	})
  2169  
  2170  	if err != nil {
  2171  		return nil, err
  2172  	}
  2173  
  2174  	return retOps, err
  2175  }
  2176  
  2177  func GetUtxoOperationsForBlock(handle *badger.DB, blockHash *BlockHash) ([][]*UtxoOperation, error) {
  2178  	var ops [][]*UtxoOperation
  2179  	err := handle.View(func(txn *badger.Txn) error {
  2180  		var err error
  2181  		ops, err = GetUtxoOperationsForBlockWithTxn(txn, blockHash)
  2182  		return err
  2183  	})
  2184  
  2185  	return ops, err
  2186  }
  2187  
  2188  func PutUtxoOperationsForBlockWithTxn(txn *badger.Txn, blockHash *BlockHash, utxoOpsForBlock [][]*UtxoOperation) error {
  2189  	return txn.Set(_DbKeyForUtxoOps(blockHash), _EncodeUtxoOperations(utxoOpsForBlock))
  2190  }
  2191  
  2192  func DeleteUtxoOperationsForBlockWithTxn(txn *badger.Txn, blockHash *BlockHash) error {
  2193  	return txn.Delete(_DbKeyForUtxoOps(blockHash))
  2194  }
  2195  
  2196  func SerializeBlockNode(blockNode *BlockNode) ([]byte, error) {
  2197  	data := []byte{}
  2198  
  2199  	// Hash
  2200  	if blockNode.Hash == nil {
  2201  		return nil, fmt.Errorf("SerializeBlockNode: Hash cannot be nil")
  2202  	}
  2203  	data = append(data, blockNode.Hash[:]...)
  2204  
  2205  	// Height
  2206  	data = append(data, UintToBuf(uint64(blockNode.Height))...)
  2207  
  2208  	// DifficultyTarget
  2209  	if blockNode.DifficultyTarget == nil {
  2210  		return nil, fmt.Errorf("SerializeBlockNode: DifficultyTarget cannot be nil")
  2211  	}
  2212  	data = append(data, blockNode.DifficultyTarget[:]...)
  2213  
  2214  	// CumWork
  2215  	data = append(data, BigintToHash(blockNode.CumWork)[:]...)
  2216  
  2217  	// Header
  2218  	serializedHeader, err := blockNode.Header.ToBytes(false)
  2219  	if err != nil {
  2220  		return nil, errors.Wrapf(err, "SerializeBlockNode: Problem serializing header")
  2221  	}
  2222  	data = append(data, IntToBuf(int64(len(serializedHeader)))...)
  2223  	data = append(data, serializedHeader...)
  2224  
  2225  	// Status
  2226  	// It's assumed this field is one byte long.
  2227  	data = append(data, UintToBuf(uint64(blockNode.Status))...)
  2228  
  2229  	return data, nil
  2230  }
  2231  
  2232  func DeserializeBlockNode(data []byte) (*BlockNode, error) {
  2233  	blockNode := NewBlockNode(
  2234  		nil,          // Parent
  2235  		&BlockHash{}, // Hash
  2236  		0,            // Height
  2237  		&BlockHash{}, // DifficultyTarget
  2238  		nil,          // CumWork
  2239  		nil,          // Header
  2240  		StatusNone,   // Status
  2241  
  2242  	)
  2243  
  2244  	rr := bytes.NewReader(data)
  2245  
  2246  	// Hash
  2247  	_, err := io.ReadFull(rr, blockNode.Hash[:])
  2248  	if err != nil {
  2249  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding Hash")
  2250  	}
  2251  
  2252  	// Height
  2253  	height, err := ReadUvarint(rr)
  2254  	if err != nil {
  2255  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding Height")
  2256  	}
  2257  	blockNode.Height = uint32(height)
  2258  
  2259  	// DifficultyTarget
  2260  	_, err = io.ReadFull(rr, blockNode.DifficultyTarget[:])
  2261  	if err != nil {
  2262  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding DifficultyTarget")
  2263  	}
  2264  
  2265  	// CumWork
  2266  	tmp := BlockHash{}
  2267  	_, err = io.ReadFull(rr, tmp[:])
  2268  	if err != nil {
  2269  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding CumWork")
  2270  	}
  2271  	blockNode.CumWork = HashToBigint(&tmp)
  2272  
  2273  	// Header
  2274  	payloadLen, err := ReadVarint(rr)
  2275  	if err != nil {
  2276  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding Header length")
  2277  	}
  2278  	headerBytes := make([]byte, payloadLen)
  2279  	_, err = io.ReadFull(rr, headerBytes[:])
  2280  	if err != nil {
  2281  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem reading Header bytes")
  2282  	}
  2283  	blockNode.Header = NewMessage(MsgTypeHeader).(*MsgDeSoHeader)
  2284  	err = blockNode.Header.FromBytes(headerBytes)
  2285  	if err != nil {
  2286  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem parsing Header bytes")
  2287  	}
  2288  
  2289  	// Status
  2290  	status, err := ReadUvarint(rr)
  2291  	if err != nil {
  2292  		return nil, errors.Wrapf(err, "DeserializeBlockNode: Problem decoding Status")
  2293  	}
  2294  	blockNode.Status = BlockStatus(uint32(status))
  2295  
  2296  	return blockNode, nil
  2297  }
  2298  
  2299  type ChainType uint8
  2300  
  2301  const (
  2302  	ChainTypeDeSoBlock = iota
  2303  	ChainTypeBitcoinHeader
  2304  )
  2305  
  2306  func _prefixForChainType(chainType ChainType) []byte {
  2307  	var prefix []byte
  2308  	switch chainType {
  2309  	case ChainTypeDeSoBlock:
  2310  		prefix = _KeyBestDeSoBlockHash
  2311  	case ChainTypeBitcoinHeader:
  2312  		prefix = _KeyBestBitcoinHeaderHash
  2313  	default:
  2314  		glog.Errorf("_prefixForChainType: Unknown ChainType %d; this should never happen", chainType)
  2315  		return nil
  2316  	}
  2317  
  2318  	return prefix
  2319  }
  2320  
  2321  func DbGetBestHash(handle *badger.DB, chainType ChainType) *BlockHash {
  2322  	prefix := _prefixForChainType(chainType)
  2323  	if len(prefix) == 0 {
  2324  		glog.Errorf("DbGetBestHash: Problem getting prefix for ChainType: %d", chainType)
  2325  		return nil
  2326  	}
  2327  	return _getBlockHashForPrefix(handle, prefix)
  2328  }
  2329  
  2330  func PutBestHashWithTxn(txn *badger.Txn, bh *BlockHash, chainType ChainType) error {
  2331  	prefix := _prefixForChainType(chainType)
  2332  	if len(prefix) == 0 {
  2333  		glog.Errorf("PutBestHashWithTxn: Problem getting prefix for ChainType: %d", chainType)
  2334  		return nil
  2335  	}
  2336  	return txn.Set(prefix, bh[:])
  2337  }
  2338  
  2339  func PutBestHash(bh *BlockHash, handle *badger.DB, chainType ChainType) error {
  2340  	return handle.Update(func(txn *badger.Txn) error {
  2341  		return PutBestHashWithTxn(txn, bh, chainType)
  2342  	})
  2343  }
  2344  
  2345  func BlockHashToBlockKey(blockHash *BlockHash) []byte {
  2346  	return append(append([]byte{}, _PrefixBlockHashToBlock...), blockHash[:]...)
  2347  }
  2348  
  2349  func PublicKeyBlockHashToBlockRewardKey(publicKey []byte, blockHash *BlockHash) []byte {
  2350  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  2351  	prefixCopy := append([]byte{}, _PrefixPublicKeyBlockHashToBlockReward...)
  2352  	key := append(prefixCopy, publicKey...)
  2353  	key = append(key, blockHash[:]...)
  2354  	return key
  2355  }
  2356  
  2357  func GetBlockWithTxn(txn *badger.Txn, blockHash *BlockHash) *MsgDeSoBlock {
  2358  	hashKey := BlockHashToBlockKey(blockHash)
  2359  	var blockRet *MsgDeSoBlock
  2360  
  2361  	item, err := txn.Get(hashKey)
  2362  	if err != nil {
  2363  		return nil
  2364  	}
  2365  
  2366  	err = item.Value(func(valBytes []byte) error {
  2367  		ret := NewMessage(MsgTypeBlock).(*MsgDeSoBlock)
  2368  		if err := ret.FromBytes(valBytes); err != nil {
  2369  			return err
  2370  		}
  2371  		blockRet = ret
  2372  
  2373  		return nil
  2374  	})
  2375  	if err != nil {
  2376  		return nil
  2377  	}
  2378  
  2379  	return blockRet
  2380  }
  2381  
  2382  func GetBlock(blockHash *BlockHash, handle *badger.DB) (*MsgDeSoBlock, error) {
  2383  	hashKey := BlockHashToBlockKey(blockHash)
  2384  	var blockRet *MsgDeSoBlock
  2385  	err := handle.View(func(txn *badger.Txn) error {
  2386  		item, err := txn.Get(hashKey)
  2387  		if err != nil {
  2388  			return err
  2389  		}
  2390  
  2391  		err = item.Value(func(valBytes []byte) error {
  2392  			ret := NewMessage(MsgTypeBlock).(*MsgDeSoBlock)
  2393  			if err := ret.FromBytes(valBytes); err != nil {
  2394  				return err
  2395  			}
  2396  			blockRet = ret
  2397  
  2398  			return nil
  2399  		})
  2400  
  2401  		if err != nil {
  2402  			return err
  2403  		}
  2404  
  2405  		return nil
  2406  	})
  2407  	if err != nil {
  2408  		return nil, err
  2409  	}
  2410  
  2411  	return blockRet, nil
  2412  }
  2413  
  2414  func PutBlockWithTxn(txn *badger.Txn, desoBlock *MsgDeSoBlock) error {
  2415  	if desoBlock.Header == nil {
  2416  		return fmt.Errorf("PutBlockWithTxn: Header was nil in block %v", desoBlock)
  2417  	}
  2418  	blockHash, err := desoBlock.Header.Hash()
  2419  	if err != nil {
  2420  		return errors.Wrapf(err, "PutBlockWithTxn: Problem hashing header: ")
  2421  	}
  2422  	blockKey := BlockHashToBlockKey(blockHash)
  2423  	data, err := desoBlock.ToBytes(false)
  2424  	if err != nil {
  2425  		return err
  2426  	}
  2427  	// First check to see if the block is already in the db.
  2428  	if _, err := txn.Get(blockKey); err == nil {
  2429  		// err == nil means the block already exists in the db so
  2430  		// no need to store it.
  2431  		return nil
  2432  	}
  2433  	// If the block is not in the db then set it.
  2434  	if err := txn.Set(blockKey, data); err != nil {
  2435  		return err
  2436  	}
  2437  
  2438  	// Index the block reward. Used for deducting immature block rewards from user balances.
  2439  	if len(desoBlock.Txns) == 0 {
  2440  		return fmt.Errorf("PutBlockWithTxn: Got block without any txns %v", desoBlock)
  2441  	}
  2442  	blockRewardTxn := desoBlock.Txns[0]
  2443  	if blockRewardTxn.TxnMeta.GetTxnType() != TxnTypeBlockReward {
  2444  		return fmt.Errorf("PutBlockWithTxn: Got block without block reward as first txn %v", desoBlock)
  2445  	}
  2446  	// It's possible the block reward is split across multiple public keys.
  2447  	pubKeyToBlockRewardMap := make(map[PkMapKey]uint64)
  2448  	for _, bro := range desoBlock.Txns[0].TxOutputs {
  2449  		pkMapKey := MakePkMapKey(bro.PublicKey)
  2450  		if _, hasKey := pubKeyToBlockRewardMap[pkMapKey]; !hasKey {
  2451  			pubKeyToBlockRewardMap[pkMapKey] = bro.AmountNanos
  2452  		} else {
  2453  			pubKeyToBlockRewardMap[pkMapKey] += bro.AmountNanos
  2454  		}
  2455  	}
  2456  	for pkMapKey, blockReward := range pubKeyToBlockRewardMap {
  2457  		blockRewardKey := PublicKeyBlockHashToBlockRewardKey(pkMapKey[:], blockHash)
  2458  		if err := txn.Set(blockRewardKey, EncodeUint64(blockReward)); err != nil {
  2459  			return err
  2460  		}
  2461  	}
  2462  
  2463  	return nil
  2464  }
  2465  
  2466  func PutBlock(desoBlock *MsgDeSoBlock, handle *badger.DB) error {
  2467  	err := handle.Update(func(txn *badger.Txn) error {
  2468  		return PutBlockWithTxn(txn, desoBlock)
  2469  	})
  2470  	if err != nil {
  2471  		return err
  2472  	}
  2473  
  2474  	return nil
  2475  }
  2476  
  2477  func DbGetBlockRewardForPublicKeyBlockHashWithTxn(txn *badger.Txn, publicKey []byte, blockHash *BlockHash,
  2478  ) (_balance uint64, _err error) {
  2479  	key := PublicKeyBlockHashToBlockRewardKey(publicKey, blockHash)
  2480  	desoBalanceItem, err := txn.Get(key)
  2481  	if err != nil {
  2482  		return uint64(0), nil
  2483  	}
  2484  	desoBalanceBytes, err := desoBalanceItem.ValueCopy(nil)
  2485  	if err != nil {
  2486  		return uint64(0), errors.Wrap(err, "DbGetBlockRewardForPublicKeyBlockHashWithTxn: "+
  2487  			"Problem getting block reward value, this should never happen: ")
  2488  	}
  2489  	desoBalance := DecodeUint64(desoBalanceBytes)
  2490  
  2491  	return desoBalance, nil
  2492  }
  2493  
  2494  func DbGetBlockRewardForPublicKeyBlockHash(db *badger.DB, publicKey []byte, blockHash *BlockHash,
  2495  ) (_balance uint64, _err error) {
  2496  	ret := uint64(0)
  2497  	dbErr := db.View(func(txn *badger.Txn) error {
  2498  		var err error
  2499  		ret, err = DbGetBlockRewardForPublicKeyBlockHashWithTxn(txn, publicKey, blockHash)
  2500  		if err != nil {
  2501  			return errors.Wrap(err, "DbGetBlockRewardForPublicKeyBlockHash: ")
  2502  		}
  2503  		return nil
  2504  	})
  2505  	if dbErr != nil {
  2506  		return uint64(0), dbErr
  2507  	}
  2508  	return ret, nil
  2509  }
  2510  
  2511  func _heightHashToNodeIndexPrefix(bitcoinNodes bool) []byte {
  2512  	prefix := append([]byte{}, _PrefixHeightHashToNodeInfo...)
  2513  	if bitcoinNodes {
  2514  		prefix = append([]byte{}, _PrefixBitcoinHeightHashToNodeInfo...)
  2515  	}
  2516  
  2517  	return prefix
  2518  }
  2519  
  2520  func _heightHashToNodeIndexKey(height uint32, hash *BlockHash, bitcoinNodes bool) []byte {
  2521  	prefix := _heightHashToNodeIndexPrefix(bitcoinNodes)
  2522  
  2523  	heightBytes := make([]byte, 4)
  2524  	binary.BigEndian.PutUint32(heightBytes[:], height)
  2525  	key := append(prefix, heightBytes[:]...)
  2526  	key = append(key, hash[:]...)
  2527  
  2528  	return key
  2529  }
  2530  
  2531  func GetHeightHashToNodeInfoWithTxn(
  2532  	txn *badger.Txn, height uint32, hash *BlockHash, bitcoinNodes bool) *BlockNode {
  2533  
  2534  	key := _heightHashToNodeIndexKey(height, hash, bitcoinNodes)
  2535  	nodeValue, err := txn.Get(key)
  2536  	if err != nil {
  2537  		return nil
  2538  	}
  2539  	var blockNode *BlockNode
  2540  	nodeValue.Value(func(nodeBytes []byte) error {
  2541  		blockNode, err = DeserializeBlockNode(nodeBytes)
  2542  		if err != nil {
  2543  			return err
  2544  		}
  2545  		return nil
  2546  	})
  2547  	if err != nil {
  2548  		return nil
  2549  	}
  2550  	return blockNode
  2551  }
  2552  
  2553  func GetHeightHashToNodeInfo(
  2554  	handle *badger.DB, height uint32, hash *BlockHash, bitcoinNodes bool) *BlockNode {
  2555  
  2556  	var blockNode *BlockNode
  2557  	handle.View(func(txn *badger.Txn) error {
  2558  		blockNode = GetHeightHashToNodeInfoWithTxn(txn, height, hash, bitcoinNodes)
  2559  		return nil
  2560  	})
  2561  	return blockNode
  2562  }
  2563  
  2564  func PutHeightHashToNodeInfoWithTxn(txn *badger.Txn, node *BlockNode, bitcoinNodes bool) error {
  2565  
  2566  	key := _heightHashToNodeIndexKey(node.Height, node.Hash, bitcoinNodes)
  2567  	serializedNode, err := SerializeBlockNode(node)
  2568  	if err != nil {
  2569  		return errors.Wrapf(err, "PutHeightHashToNodeInfoWithTxn: Problem serializing node")
  2570  	}
  2571  
  2572  	if err := txn.Set(key, serializedNode); err != nil {
  2573  		return err
  2574  	}
  2575  	return nil
  2576  }
  2577  
  2578  func PutHeightHashToNodeInfo(node *BlockNode, handle *badger.DB, bitcoinNodes bool) error {
  2579  	err := handle.Update(func(txn *badger.Txn) error {
  2580  		return PutHeightHashToNodeInfoWithTxn(txn, node, bitcoinNodes)
  2581  	})
  2582  
  2583  	if err != nil {
  2584  		return err
  2585  	}
  2586  
  2587  	return nil
  2588  }
  2589  
  2590  func DbDeleteHeightHashToNodeInfoWithTxn(
  2591  	node *BlockNode, txn *badger.Txn, bitcoinNodes bool) error {
  2592  
  2593  	return txn.Delete(_heightHashToNodeIndexKey(node.Height, node.Hash, bitcoinNodes))
  2594  }
  2595  
  2596  func DbBulkDeleteHeightHashToNodeInfo(
  2597  	nodes []*BlockNode, handle *badger.DB, bitcoinNodes bool) error {
  2598  
  2599  	err := handle.Update(func(txn *badger.Txn) error {
  2600  		for _, nn := range nodes {
  2601  			if err := DbDeleteHeightHashToNodeInfoWithTxn(nn, txn, bitcoinNodes); err != nil {
  2602  				return err
  2603  			}
  2604  		}
  2605  		return nil
  2606  	})
  2607  
  2608  	if err != nil {
  2609  		return err
  2610  	}
  2611  
  2612  	return nil
  2613  }
  2614  
  2615  // InitDbWithGenesisBlock initializes the database to contain only the genesis
  2616  // block.
  2617  func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, eventManager *EventManager) error {
  2618  	// Construct a node for the genesis block. Its height is zero and it has
  2619  	// no parents. Its difficulty should be set to the initial
  2620  	// difficulty specified in the parameters and it should be assumed to be
  2621  	// valid and stored by the end of this function.
  2622  	genesisBlock := params.GenesisBlock
  2623  	diffTarget := MustDecodeHexBlockHash(params.MinDifficultyTargetHex)
  2624  	blockHash := MustDecodeHexBlockHash(params.GenesisBlockHashHex)
  2625  	genesisNode := NewBlockNode(
  2626  		nil, // Parent
  2627  		blockHash,
  2628  		0, // Height
  2629  		diffTarget,
  2630  		BytesToBigint(ExpectedWorkForBlockHash(diffTarget)[:]), // CumWork
  2631  		genesisBlock.Header, // Header
  2632  		StatusHeaderValidated|StatusBlockProcessed|StatusBlockStored|StatusBlockValidated, // Status
  2633  	)
  2634  
  2635  	// Set the fields in the db to reflect the current state of our chain.
  2636  	//
  2637  	// Set the best hash to the genesis block in the db since its the only node
  2638  	// we're currently aware of. Set it for both the header chain and the block
  2639  	// chain.
  2640  	if err := PutBestHash(blockHash, handle, ChainTypeDeSoBlock); err != nil {
  2641  		return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain")
  2642  	}
  2643  	// Add the genesis block to the (hash -> block) index.
  2644  	if err := PutBlock(genesisBlock, handle); err != nil {
  2645  		return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block into db")
  2646  	}
  2647  	// Add the genesis block to the (height, hash -> node info) index in the db.
  2648  	if err := PutHeightHashToNodeInfo(genesisNode, handle, false /*bitcoinNodes*/); err != nil {
  2649  		return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting (height, hash -> node) in db")
  2650  	}
  2651  	if err := DbPutNanosPurchased(handle, params.DeSoNanosPurchasedAtGenesis); err != nil {
  2652  		return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting genesis block hash into db for block chain")
  2653  	}
  2654  	if err := DbPutGlobalParamsEntry(handle, InitialGlobalParamsEntry); err != nil {
  2655  		return errors.Wrapf(err, "InitDbWithGenesisBlock: Problem putting GlobalParamsEntry into db for block chain")
  2656  	}
  2657  
  2658  	// We apply seed transactions here. This step is useful for setting
  2659  	// up the blockchain with a particular set of transactions, e.g. when
  2660  	// hard forking the chain.
  2661  	//
  2662  	// TODO: Right now there's an issue where if we hit an errur during this
  2663  	// step of the initialization, the next time we run the program it will
  2664  	// think things are initialized because we set the best block hash at the
  2665  	// top. We should fix this at some point so that an error in this step
  2666  	// wipes out the best hash.
  2667  	utxoView, err := NewUtxoView(handle, params, nil)
  2668  	if err != nil {
  2669  		return fmt.Errorf(
  2670  			"InitDbWithDeSoGenesisBlock: Error initializing UtxoView")
  2671  	}
  2672  
  2673  	// Add the seed balances to the view.
  2674  	for index, txOutput := range params.SeedBalances {
  2675  		outputKey := UtxoKey{
  2676  			TxID:  BlockHash{},
  2677  			Index: uint32(index),
  2678  		}
  2679  		utxoEntry := UtxoEntry{
  2680  			AmountNanos: txOutput.AmountNanos,
  2681  			PublicKey:   txOutput.PublicKey,
  2682  			BlockHeight: 0,
  2683  			// Just make this a normal transaction so that we don't have to wait for
  2684  			// the block reward maturity.
  2685  			UtxoType: UtxoTypeOutput,
  2686  			UtxoKey:  &outputKey,
  2687  		}
  2688  
  2689  		_, err := utxoView._addUtxo(&utxoEntry)
  2690  		if err != nil {
  2691  			return fmt.Errorf("InitDbWithDeSoGenesisBlock: Error adding "+
  2692  				"seed balance at index %v ; output: %v: %v", index, txOutput, err)
  2693  		}
  2694  	}
  2695  
  2696  	// Add the seed txns to the view
  2697  	utxoOpsForBlock := [][]*UtxoOperation{}
  2698  	for txnIndex, txnHex := range params.SeedTxns {
  2699  		txnBytes, err := hex.DecodeString(txnHex)
  2700  		if err != nil {
  2701  			return fmt.Errorf(
  2702  				"InitDbWithDeSoGenesisBlock: Error decoding seed "+
  2703  					"txn HEX: %v, txn index: %v, txn hex: %v",
  2704  				err, txnIndex, txnHex)
  2705  		}
  2706  		txn := &MsgDeSoTxn{}
  2707  		if err := txn.FromBytes(txnBytes); err != nil {
  2708  			return fmt.Errorf(
  2709  				"InitDbWithDeSoGenesisBlock: Error decoding seed "+
  2710  					"txn BYTES: %v, txn index: %v, txn hex: %v",
  2711  				err, txnIndex, txnHex)
  2712  		}
  2713  		// Important: ignoreUtxos makes it so that the inputs/outputs aren't
  2714  		// processed, which is important.
  2715  		// Set txnSizeBytes to 0 here as the minimum network fee is 0 at genesis block, so there is no need to serialize
  2716  		// these transactions to check if they meet the minimum network fee requirement.
  2717  		var utxoOpsForTxn []*UtxoOperation
  2718  		utxoOpsForTxn, _, _, _, err = utxoView.ConnectTransaction(
  2719  			txn, txn.Hash(), 0, 0 /*blockHeight*/, false /*verifySignatures*/, true /*ignoreUtxos*/)
  2720  		if err != nil {
  2721  			return fmt.Errorf(
  2722  				"InitDbWithDeSoGenesisBlock: Error connecting transaction: %v, "+
  2723  					"txn index: %v, txn hex: %v",
  2724  				err, txnIndex, txnHex)
  2725  		}
  2726  		utxoOpsForBlock = append(utxoOpsForBlock, utxoOpsForTxn)
  2727  	}
  2728  
  2729  	// If we have an event manager, initialize the genesis block with the current
  2730  	// state of the view.
  2731  	if eventManager != nil {
  2732  		eventManager.blockConnected(&BlockEvent{
  2733  			Block:    genesisBlock,
  2734  			UtxoView: utxoView,
  2735  			UtxoOps:  utxoOpsForBlock,
  2736  		})
  2737  	}
  2738  
  2739  	// Flush all the data in the view.
  2740  	err = utxoView.FlushToDb()
  2741  	if err != nil {
  2742  		return fmt.Errorf(
  2743  			"InitDbWithDeSoGenesisBlock: Error flushing seed txns to DB: %v", err)
  2744  	}
  2745  
  2746  	return nil
  2747  }
  2748  
  2749  func GetBlockIndex(handle *badger.DB, bitcoinNodes bool) (map[BlockHash]*BlockNode, error) {
  2750  	blockIndex := make(map[BlockHash]*BlockNode)
  2751  
  2752  	prefix := _heightHashToNodeIndexPrefix(bitcoinNodes)
  2753  
  2754  	err := handle.View(func(txn *badger.Txn) error {
  2755  		opts := badger.DefaultIteratorOptions
  2756  		nodeIterator := txn.NewIterator(opts)
  2757  		defer nodeIterator.Close()
  2758  		for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() {
  2759  			var blockNode *BlockNode
  2760  
  2761  			// Don't bother checking the key. We assume that the key lines up
  2762  			// with what we've stored in the value in terms of (height, block hash).
  2763  			item := nodeIterator.Item()
  2764  			err := item.Value(func(blockNodeBytes []byte) error {
  2765  				// Deserialize the block node.
  2766  				var err error
  2767  				// TODO: There is room for optimization here by pre-allocating a
  2768  				// contiguous list of block nodes and then populating that list
  2769  				// rather than having each blockNode be a stand-alone allocation.
  2770  				blockNode, err = DeserializeBlockNode(blockNodeBytes)
  2771  				if err != nil {
  2772  					return err
  2773  				}
  2774  				return nil
  2775  			})
  2776  			if err != nil {
  2777  				return err
  2778  			}
  2779  
  2780  			// If we got hear it means we read a blockNode successfully. Store it
  2781  			// into our node index.
  2782  			blockIndex[*blockNode.Hash] = blockNode
  2783  
  2784  			// Find the parent of this block, which should already have been read
  2785  			// in and connect it. Skip the genesis block, which has height 0. Also
  2786  			// skip the block if its PrevBlockHash is empty, which will be true for
  2787  			// the BitcoinStartBlockNode.
  2788  			//
  2789  			// TODO: There is room for optimization here by keeping a reference to
  2790  			// the last node we've iterated over and checking if that node is the
  2791  			// parent. Doing this would avoid an expensive hashmap check to get
  2792  			// the parent by its block hash.
  2793  			if blockNode.Height == 0 || (*blockNode.Header.PrevBlockHash == BlockHash{}) {
  2794  				continue
  2795  			}
  2796  			if parent, ok := blockIndex[*blockNode.Header.PrevBlockHash]; ok {
  2797  				// We found the parent node so connect it.
  2798  				blockNode.Parent = parent
  2799  			} else {
  2800  				// In this case we didn't find the parent so error. There shouldn't
  2801  				// be any unconnectedTxns in our block index.
  2802  				return fmt.Errorf("GetBlockIndex: Could not find parent for blockNode: %+v", blockNode)
  2803  			}
  2804  		}
  2805  		return nil
  2806  	})
  2807  	if err != nil {
  2808  		return nil, errors.Wrapf(err, "GetBlockIndex: Problem reading block index from db")
  2809  	}
  2810  
  2811  	return blockIndex, nil
  2812  }
  2813  
  2814  func GetBestChain(tipNode *BlockNode, blockIndex map[BlockHash]*BlockNode) ([]*BlockNode, error) {
  2815  	reversedBestChain := []*BlockNode{}
  2816  	for tipNode != nil {
  2817  		if (tipNode.Status&StatusBlockValidated) == 0 &&
  2818  			(tipNode.Status&StatusBitcoinHeaderValidated) == 0 {
  2819  
  2820  			return nil, fmt.Errorf("GetBestChain: Invalid node found in main chain: %+v", tipNode)
  2821  		}
  2822  
  2823  		reversedBestChain = append(reversedBestChain, tipNode)
  2824  		tipNode = tipNode.Parent
  2825  	}
  2826  
  2827  	bestChain := make([]*BlockNode, len(reversedBestChain))
  2828  	for ii := 0; ii < len(reversedBestChain); ii++ {
  2829  		bestChain[ii] = reversedBestChain[len(reversedBestChain)-1-ii]
  2830  	}
  2831  
  2832  	return bestChain, nil
  2833  }
  2834  
  2835  // RandomBytes returns a []byte with random values.
  2836  func RandomBytes(numBytes int32) []byte {
  2837  	randomBytes := make([]byte, numBytes)
  2838  	_, err := rand.Read(randomBytes)
  2839  	if err != nil {
  2840  		glog.Errorf("Problem reading random bytes: %v", err)
  2841  	}
  2842  	return randomBytes
  2843  }
  2844  
  2845  // RandomBytesHex returns a hex string representing numBytes of
  2846  // entropy.
  2847  func RandomBytesHex(numBytes int32) string {
  2848  	return hex.EncodeToString(RandomBytes(numBytes))
  2849  }
  2850  
  2851  // RandInt64 returns a random 64-bit int.
  2852  func RandInt64(max int64) int64 {
  2853  	val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
  2854  	if err != nil {
  2855  		glog.Errorf("Problem generating random int64: %v", err)
  2856  	}
  2857  	return val.Int64()
  2858  }
  2859  
  2860  // RandInt32 returns a random 32-bit int.
  2861  func RandInt32(max int32) int32 {
  2862  	val, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt32))
  2863  	if err != nil {
  2864  		glog.Errorf("Problem generating random int32: %v", err)
  2865  	}
  2866  	if val.Int64() > math.MaxInt32 {
  2867  		glog.Errorf("Generated a random number out of range: %d (max: %d)", val.Int64(), math.MaxInt32)
  2868  	}
  2869  	// This cast is OK since we initialized the number to be
  2870  	// < MaxInt32 above.
  2871  	return int32(val.Int64())
  2872  }
  2873  
  2874  // PPrintJSON prints a JSON object but pretty.
  2875  func PPrintJSON(xx interface{}) {
  2876  	yy, _ := json.MarshalIndent(xx, "", "  ")
  2877  	log.Println(string(yy))
  2878  }
  2879  
  2880  func BlocksPerDuration(duration time.Duration, timeBetweenBlocks time.Duration) uint32 {
  2881  	return uint32(int64(duration) / int64(timeBetweenBlocks))
  2882  }
  2883  
  2884  func PkToString(pk []byte, params *DeSoParams) string {
  2885  	return Base58CheckEncode(pk, false, params)
  2886  }
  2887  
  2888  func PrivToString(priv []byte, params *DeSoParams) string {
  2889  	return Base58CheckEncode(priv, true, params)
  2890  }
  2891  
  2892  func PkToStringMainnet(pk []byte) string {
  2893  	return Base58CheckEncode(pk, false, &DeSoMainnetParams)
  2894  }
  2895  
  2896  func PkToStringBoth(pk []byte) string {
  2897  	return PkToStringMainnet(pk) + ":" + PkToStringTestnet(pk)
  2898  }
  2899  
  2900  func PkToStringTestnet(pk []byte) string {
  2901  	return Base58CheckEncode(pk, false, &DeSoTestnetParams)
  2902  }
  2903  
  2904  func DbGetTxindexTip(handle *badger.DB) *BlockHash {
  2905  	return _getBlockHashForPrefix(handle, _KeyTransactionIndexTip)
  2906  }
  2907  
  2908  func DbPutTxindexTipWithTxn(dbTxn *badger.Txn, tipHash *BlockHash) error {
  2909  	return dbTxn.Set(_KeyTransactionIndexTip, tipHash[:])
  2910  }
  2911  
  2912  func DbPutTxindexTip(handle *badger.DB, tipHash *BlockHash) error {
  2913  	return handle.Update(func(txn *badger.Txn) error {
  2914  		return DbPutTxindexTipWithTxn(txn, tipHash)
  2915  	})
  2916  }
  2917  
  2918  func _DbTxindexPublicKeyNextIndexPrefix(publicKey []byte) []byte {
  2919  	return append(append([]byte{}, _PrefixPublicKeyToNextIndex...), publicKey...)
  2920  }
  2921  
  2922  func DbTxindexPublicKeyPrefix(publicKey []byte) []byte {
  2923  	return append(append([]byte{}, _PrefixPublicKeyIndexToTransactionIDs...), publicKey...)
  2924  }
  2925  
  2926  func DbTxindexPublicKeyIndexToTxnKey(publicKey []byte, index uint32) []byte {
  2927  	prefix := DbTxindexPublicKeyPrefix(publicKey)
  2928  	return append(prefix, _EncodeUint32(index)...)
  2929  }
  2930  
  2931  func DbGetTxindexTxnsForPublicKeyWithTxn(dbTxn *badger.Txn, publicKey []byte) []*BlockHash {
  2932  	txIDs := []*BlockHash{}
  2933  	_, valsFound, err := _enumerateKeysForPrefixWithTxn(dbTxn, DbTxindexPublicKeyPrefix(publicKey))
  2934  	if err != nil {
  2935  		return txIDs
  2936  	}
  2937  	for _, txIDBytes := range valsFound {
  2938  		blockHash := &BlockHash{}
  2939  		copy(blockHash[:], txIDBytes[:])
  2940  		txIDs = append(txIDs, blockHash)
  2941  	}
  2942  
  2943  	return txIDs
  2944  }
  2945  
  2946  func DbGetTxindexTxnsForPublicKey(handle *badger.DB, publicKey []byte) []*BlockHash {
  2947  	txIDs := []*BlockHash{}
  2948  	handle.Update(func(dbTxn *badger.Txn) error {
  2949  		txIDs = DbGetTxindexTxnsForPublicKeyWithTxn(dbTxn, publicKey)
  2950  		return nil
  2951  	})
  2952  	return txIDs
  2953  }
  2954  
  2955  func _DbGetTxindexNextIndexForPublicKeBySeekWithTxn(dbTxn *badger.Txn, publicKey []byte) uint64 {
  2956  	dbPrefixx := DbTxindexPublicKeyPrefix(publicKey)
  2957  
  2958  	opts := badger.DefaultIteratorOptions
  2959  
  2960  	opts.PrefetchValues = false
  2961  
  2962  	// Go in reverse order.
  2963  	opts.Reverse = true
  2964  
  2965  	it := dbTxn.NewIterator(opts)
  2966  	defer it.Close()
  2967  	// Since we iterate backwards, the prefix must be bigger than all possible
  2968  	// counts that could actually exist. We use four bytes since the index is
  2969  	// encoded as a 32-bit big-endian byte slice, which will be four bytes long.
  2970  	maxBigEndianUint32Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF}
  2971  	prefix := append([]byte{}, dbPrefixx...)
  2972  	prefix = append(prefix, maxBigEndianUint32Bytes...)
  2973  	for it.Seek(prefix); it.ValidForPrefix(dbPrefixx); it.Next() {
  2974  		countKey := it.Item().Key()
  2975  
  2976  		// Strip the prefix off the key and check its length. If it contains
  2977  		// a big-endian uint32 then it should be at least four bytes.
  2978  		countKey = countKey[len(dbPrefixx):]
  2979  		if len(countKey) < len(maxBigEndianUint32Bytes) {
  2980  			glog.Errorf("DbGetTxindexNextIndexForPublicKey: Invalid public key "+
  2981  				"index key length %d should be at least %d",
  2982  				len(countKey), len(maxBigEndianUint32Bytes))
  2983  			return 0
  2984  		}
  2985  
  2986  		countVal := DecodeUint32(countKey[:len(maxBigEndianUint32Bytes)])
  2987  		return uint64(countVal + 1)
  2988  	}
  2989  	// If we get here it means we didn't find anything in the db so return zero.
  2990  	return 0
  2991  }
  2992  
  2993  func DbGetTxindexNextIndexForPublicKey(handle *badger.DB, publicKey []byte) *uint64 {
  2994  	var nextIndex *uint64
  2995  	handle.View(func(txn *badger.Txn) error {
  2996  		nextIndex = _DbGetTxindexNextIndexForPublicKeyWithTxn(txn, publicKey)
  2997  		return nil
  2998  	})
  2999  	return nextIndex
  3000  }
  3001  
  3002  func _DbGetTxindexNextIndexForPublicKeyWithTxn(txn *badger.Txn, publicKey []byte) *uint64 {
  3003  	key := _DbTxindexPublicKeyNextIndexPrefix(publicKey)
  3004  	valItem, err := txn.Get(key)
  3005  	if err != nil {
  3006  		// If we haven't seen this public key yet, we won't have a next index for this key yet, so return 0.
  3007  		if errors.Is(err, badger.ErrKeyNotFound) {
  3008  			nextIndexVal := _DbGetTxindexNextIndexForPublicKeBySeekWithTxn(txn, publicKey)
  3009  			return &nextIndexVal
  3010  		} else {
  3011  			return nil
  3012  		}
  3013  	}
  3014  	valBytes, err := valItem.ValueCopy(nil)
  3015  	if err != nil {
  3016  		return nil
  3017  	}
  3018  	nextIndexVal, bytesRead := Uvarint(valBytes)
  3019  	if bytesRead <= 0 {
  3020  		return nil
  3021  	}
  3022  	return &nextIndexVal
  3023  
  3024  }
  3025  
  3026  func DbPutTxindexNextIndexForPublicKeyWithTxn(txn *badger.Txn, publicKey []byte, nextIndex uint64) error {
  3027  	key := _DbTxindexPublicKeyNextIndexPrefix(publicKey)
  3028  	valBuf := UintToBuf(nextIndex)
  3029  
  3030  	return txn.Set(key, valBuf)
  3031  }
  3032  
  3033  func DbDeleteTxindexNextIndexForPublicKeyWithTxn(txn *badger.Txn, publicKey []byte) error {
  3034  	key := _DbTxindexPublicKeyNextIndexPrefix(publicKey)
  3035  	return txn.Delete(key)
  3036  }
  3037  
  3038  func DbPutTxindexPublicKeyToTxnMappingSingleWithTxn(
  3039  	dbTxn *badger.Txn, publicKey []byte, txID *BlockHash) error {
  3040  
  3041  	nextIndex := _DbGetTxindexNextIndexForPublicKeyWithTxn(dbTxn, publicKey)
  3042  	if nextIndex == nil {
  3043  		return fmt.Errorf("Error getting next index")
  3044  	}
  3045  	key := DbTxindexPublicKeyIndexToTxnKey(publicKey, uint32(*nextIndex))
  3046  	err := DbPutTxindexNextIndexForPublicKeyWithTxn(dbTxn, publicKey, uint64(*nextIndex+1))
  3047  	if err != nil {
  3048  		return err
  3049  	}
  3050  	return dbTxn.Set(key, txID[:])
  3051  }
  3052  
  3053  func DbDeleteTxindexPublicKeyToTxnMappingSingleWithTxn(
  3054  	dbTxn *badger.Txn, publicKey []byte, txID *BlockHash) error {
  3055  
  3056  	// Get all the mappings corresponding to the public key passed in.
  3057  	// TODO: This is inefficient but reorgs are rare so whatever.
  3058  	txIDsInDB := DbGetTxindexTxnsForPublicKeyWithTxn(dbTxn, publicKey)
  3059  	numMappingsInDB := len(txIDsInDB)
  3060  
  3061  	// Loop over the list of txIDs and delete the one
  3062  	// corresponding to the passed-in transaction. Note we can assume that
  3063  	// only one occurrence exists in the list.
  3064  	// TODO: Looping backwards would be more efficient.
  3065  	for ii, singleTxID := range txIDsInDB {
  3066  		if *singleTxID == *txID {
  3067  			// If we get here it means the transaction we need to delete is at
  3068  			// this index.
  3069  			txIDsInDB = append(txIDsInDB[:ii], txIDsInDB[ii+1:]...)
  3070  			break
  3071  		}
  3072  	}
  3073  
  3074  	// Delete all the mappings from the db.
  3075  	for pkIndex := 0; pkIndex < numMappingsInDB; pkIndex++ {
  3076  		key := DbTxindexPublicKeyIndexToTxnKey(publicKey, uint32(pkIndex))
  3077  		if err := dbTxn.Delete(key); err != nil {
  3078  			return err
  3079  		}
  3080  	}
  3081  
  3082  	// Delete the next index for this public key
  3083  	err := DbDeleteTxindexNextIndexForPublicKeyWithTxn(dbTxn, publicKey)
  3084  	if err != nil {
  3085  		return err
  3086  	}
  3087  
  3088  	// Re-add all the mappings to the db except the one we just deleted.
  3089  	for _, singleTxID := range txIDsInDB {
  3090  		if err := DbPutTxindexPublicKeyToTxnMappingSingleWithTxn(dbTxn, publicKey, singleTxID); err != nil {
  3091  			return err
  3092  		}
  3093  	}
  3094  
  3095  	// At this point the db should contain all transactions except the one
  3096  	// that was deleted.
  3097  	return nil
  3098  }
  3099  
  3100  func DbTxindexTxIDKey(txID *BlockHash) []byte {
  3101  	return append(append([]byte{}, _PrefixTransactionIDToMetadata...), txID[:]...)
  3102  }
  3103  
  3104  type AffectedPublicKey struct {
  3105  	PublicKeyBase58Check string
  3106  	// Metadata about how this public key was affected by the transaction.
  3107  	Metadata string
  3108  }
  3109  
  3110  type BasicTransferTxindexMetadata struct {
  3111  	TotalInputNanos  uint64
  3112  	TotalOutputNanos uint64
  3113  	FeeNanos         uint64
  3114  	UtxoOpsDump      string
  3115  	UtxoOps          []*UtxoOperation
  3116  	DiamondLevel     int64
  3117  	PostHashHex      string
  3118  }
  3119  type BitcoinExchangeTxindexMetadata struct {
  3120  	BitcoinSpendAddress string
  3121  	// DeSoOutputPubKeyBase58Check = TransactorPublicKeyBase58Check
  3122  	SatoshisBurned uint64
  3123  	// NanosCreated = 0 OR TotalOutputNanos+FeeNanos
  3124  	NanosCreated uint64
  3125  	// TotalNanosPurchasedBefore = TotalNanosPurchasedAfter - NanosCreated
  3126  	TotalNanosPurchasedBefore uint64
  3127  	TotalNanosPurchasedAfter  uint64
  3128  	BitcoinTxnHash            string
  3129  }
  3130  type CreatorCoinTxindexMetadata struct {
  3131  	OperationType string
  3132  	// TransactorPublicKeyBase58Check = TransactorPublicKeyBase58Check
  3133  	// CreatorPublicKeyBase58Check in AffectedPublicKeys
  3134  
  3135  	// Differs depending on OperationType.
  3136  	DeSoToSellNanos        uint64
  3137  	CreatorCoinToSellNanos uint64
  3138  	DeSoToAddNanos         uint64
  3139  
  3140  	// Rosetta needs to know how much DESO was added or removed so it can
  3141  	// model the change to the total deso locked in the creator coin
  3142  	DESOLockedNanosDiff int64
  3143  }
  3144  
  3145  type CreatorCoinTransferTxindexMetadata struct {
  3146  	CreatorUsername            string
  3147  	CreatorCoinToTransferNanos uint64
  3148  	DiamondLevel               int64
  3149  	PostHashHex                string
  3150  }
  3151  
  3152  type UpdateProfileTxindexMetadata struct {
  3153  	ProfilePublicKeyBase58Check string
  3154  
  3155  	NewUsername    string
  3156  	NewDescription string
  3157  	NewProfilePic  string
  3158  
  3159  	NewCreatorBasisPoints uint64
  3160  
  3161  	NewStakeMultipleBasisPoints uint64
  3162  
  3163  	IsHidden bool
  3164  }
  3165  type SubmitPostTxindexMetadata struct {
  3166  	PostHashBeingModifiedHex string
  3167  	// PosterPublicKeyBase58Check = TransactorPublicKeyBase58Check
  3168  
  3169  	// If this is a reply to an existing post, then the ParentPostHashHex
  3170  	ParentPostHashHex string
  3171  	// ParentPosterPublicKeyBase58Check in AffectedPublicKeys
  3172  
  3173  	// The profiles that are mentioned are in the AffectedPublicKeys
  3174  	// MentionedPublicKeyBase58Check in AffectedPublicKeys
  3175  }
  3176  type LikeTxindexMetadata struct {
  3177  	// LikerPublicKeyBase58Check = TransactorPublicKeyBase58Check
  3178  	IsUnlike bool
  3179  
  3180  	PostHashHex string
  3181  	// PosterPublicKeyBase58Check in AffectedPublicKeys
  3182  }
  3183  type FollowTxindexMetadata struct {
  3184  	// FollowerPublicKeyBase58Check = TransactorPublicKeyBase58Check
  3185  	// FollowedPublicKeyBase58Check in AffectedPublicKeys
  3186  
  3187  	IsUnfollow bool
  3188  }
  3189  type PrivateMessageTxindexMetadata struct {
  3190  	// SenderPublicKeyBase58Check = TransactorPublicKeyBase58Check
  3191  	// RecipientPublicKeyBase58Check in AffectedPublicKeys
  3192  
  3193  	TimestampNanos uint64
  3194  }
  3195  type SwapIdentityTxindexMetadata struct {
  3196  	// ParamUpdater = TransactorPublicKeyBase58Check
  3197  
  3198  	FromPublicKeyBase58Check string
  3199  	ToPublicKeyBase58Check   string
  3200  
  3201  	// Rosetta needs this information to track creator coin balances
  3202  	FromDeSoLockedNanos uint64
  3203  	ToDeSoLockedNanos   uint64
  3204  }
  3205  
  3206  type NFTBidTxindexMetadata struct {
  3207  	NFTPostHashHex string
  3208  	SerialNumber   uint64
  3209  	BidAmountNanos uint64
  3210  }
  3211  
  3212  type AcceptNFTBidTxindexMetadata struct {
  3213  	NFTPostHashHex              string
  3214  	SerialNumber                uint64
  3215  	BidAmountNanos              uint64
  3216  	CreatorCoinRoyaltyNanos     uint64
  3217  	CreatorPublicKeyBase58Check string
  3218  }
  3219  
  3220  type NFTTransferTxindexMetadata struct {
  3221  	NFTPostHashHex string
  3222  	SerialNumber   uint64
  3223  }
  3224  
  3225  type TransactionMetadata struct {
  3226  	BlockHashHex    string
  3227  	TxnIndexInBlock uint64
  3228  	TxnType         string
  3229  	// All transactions have a public key who executed the transaction and some
  3230  	// public keys that are affected by the transaction. Notifications are created
  3231  	// for the affected public keys. _getPublicKeysForTxn uses this to set entries in the
  3232  	// database.
  3233  	TransactorPublicKeyBase58Check string
  3234  	AffectedPublicKeys             []*AffectedPublicKey
  3235  
  3236  	// We store these outputs so we don't have to load the full transaction from disk
  3237  	// when looking up output amounts
  3238  	TxnOutputs []*DeSoOutput
  3239  
  3240  	BasicTransferTxindexMetadata       *BasicTransferTxindexMetadata       `json:",omitempty"`
  3241  	BitcoinExchangeTxindexMetadata     *BitcoinExchangeTxindexMetadata     `json:",omitempty"`
  3242  	CreatorCoinTxindexMetadata         *CreatorCoinTxindexMetadata         `json:",omitempty"`
  3243  	CreatorCoinTransferTxindexMetadata *CreatorCoinTransferTxindexMetadata `json:",omitempty"`
  3244  	UpdateProfileTxindexMetadata       *UpdateProfileTxindexMetadata       `json:",omitempty"`
  3245  	SubmitPostTxindexMetadata          *SubmitPostTxindexMetadata          `json:",omitempty"`
  3246  	LikeTxindexMetadata                *LikeTxindexMetadata                `json:",omitempty"`
  3247  	FollowTxindexMetadata              *FollowTxindexMetadata              `json:",omitempty"`
  3248  	PrivateMessageTxindexMetadata      *PrivateMessageTxindexMetadata      `json:",omitempty"`
  3249  	SwapIdentityTxindexMetadata        *SwapIdentityTxindexMetadata        `json:",omitempty"`
  3250  	NFTBidTxindexMetadata              *NFTBidTxindexMetadata              `json:",omitempty"`
  3251  	AcceptNFTBidTxindexMetadata        *AcceptNFTBidTxindexMetadata        `json:",omitempty"`
  3252  	NFTTransferTxindexMetadata         *NFTTransferTxindexMetadata         `json:",omitempty"`
  3253  }
  3254  
  3255  func DBCheckTxnExistenceWithTxn(txn *badger.Txn, txID *BlockHash) bool {
  3256  	key := DbTxindexTxIDKey(txID)
  3257  	_, err := txn.Get(key)
  3258  	if err != nil {
  3259  		return false
  3260  	}
  3261  	return true
  3262  }
  3263  
  3264  func DbCheckTxnExistence(handle *badger.DB, txID *BlockHash) bool {
  3265  	var exists bool
  3266  	handle.View(func(txn *badger.Txn) error {
  3267  		exists = DBCheckTxnExistenceWithTxn(txn, txID)
  3268  		return nil
  3269  	})
  3270  	return exists
  3271  }
  3272  
  3273  func DbGetTxindexTransactionRefByTxIDWithTxn(txn *badger.Txn, txID *BlockHash) *TransactionMetadata {
  3274  	key := DbTxindexTxIDKey(txID)
  3275  	valObj := TransactionMetadata{}
  3276  
  3277  	valItem, err := txn.Get(key)
  3278  	if err != nil {
  3279  		return nil
  3280  	}
  3281  	valBytes, err := valItem.ValueCopy(nil)
  3282  	if err != nil {
  3283  		return nil
  3284  	}
  3285  	if err := gob.NewDecoder(bytes.NewReader(valBytes)).Decode(&valObj); err != nil {
  3286  		return nil
  3287  	}
  3288  	return &valObj
  3289  }
  3290  
  3291  func DbGetTxindexTransactionRefByTxID(handle *badger.DB, txID *BlockHash) *TransactionMetadata {
  3292  	var valObj *TransactionMetadata
  3293  	handle.View(func(txn *badger.Txn) error {
  3294  		valObj = DbGetTxindexTransactionRefByTxIDWithTxn(txn, txID)
  3295  		return nil
  3296  	})
  3297  	return valObj
  3298  }
  3299  func DbPutTxindexTransactionWithTxn(
  3300  	txn *badger.Txn, txID *BlockHash, txnMeta *TransactionMetadata) error {
  3301  
  3302  	key := append(append([]byte{}, _PrefixTransactionIDToMetadata...), txID[:]...)
  3303  	valBuf := bytes.NewBuffer([]byte{})
  3304  	gob.NewEncoder(valBuf).Encode(txnMeta)
  3305  
  3306  	return txn.Set(key, valBuf.Bytes())
  3307  }
  3308  
  3309  func DbPutTxindexTransaction(
  3310  	handle *badger.DB, txID *BlockHash, txnMeta *TransactionMetadata) error {
  3311  
  3312  	return handle.Update(func(txn *badger.Txn) error {
  3313  		return DbPutTxindexTransactionWithTxn(txn, txID, txnMeta)
  3314  	})
  3315  }
  3316  
  3317  func _getPublicKeysForTxn(
  3318  	txn *MsgDeSoTxn, txnMeta *TransactionMetadata, params *DeSoParams) map[PkMapKey]bool {
  3319  
  3320  	// Collect the public keys in the transaction.
  3321  	publicKeys := make(map[PkMapKey]bool)
  3322  
  3323  	// TODO: For AddStake transactions, we don't have a way of getting the implicit
  3324  	// outputs. This means that if you get paid from someone else staking to a post
  3325  	// after you, the output won't be explicitly included in the transaction, and so
  3326  	// it won't be added to our index. We should fix this at some point. I think the
  3327  	// "right way" to fix this problem is to index UTXOs rather than transactions (or
  3328  	// in addition to them).
  3329  	// TODO(updated): We can fix this by populating AffectedPublicKeys
  3330  
  3331  	// Add the TransactorPublicKey
  3332  	{
  3333  		res, _, err := Base58CheckDecode(txnMeta.TransactorPublicKeyBase58Check)
  3334  		if err != nil {
  3335  			glog.Errorf("_getPublicKeysForTxn: Error decoding "+
  3336  				"TransactorPublicKeyBase58Check: %v %v",
  3337  				txnMeta.TransactorPublicKeyBase58Check, err)
  3338  		} else {
  3339  			publicKeys[MakePkMapKey(res)] = true
  3340  		}
  3341  	}
  3342  
  3343  	// Add each AffectedPublicKey
  3344  	for _, affectedPk := range txnMeta.AffectedPublicKeys {
  3345  		res, _, err := Base58CheckDecode(affectedPk.PublicKeyBase58Check)
  3346  		if err != nil {
  3347  			glog.Errorf("_getPublicKeysForTxn: Error decoding AffectedPublicKey: %v %v %v",
  3348  				affectedPk.PublicKeyBase58Check, affectedPk.Metadata, err)
  3349  		} else {
  3350  			publicKeys[MakePkMapKey(res)] = true
  3351  		}
  3352  	}
  3353  
  3354  	return publicKeys
  3355  }
  3356  
  3357  func DbPutTxindexTransactionMappingsWithTxn(
  3358  	dbTx *badger.Txn, txn *MsgDeSoTxn, params *DeSoParams, txnMeta *TransactionMetadata) error {
  3359  
  3360  	txID := txn.Hash()
  3361  
  3362  	if err := DbPutTxindexTransactionWithTxn(dbTx, txID, txnMeta); err != nil {
  3363  		return fmt.Errorf("Problem adding txn to txindex transaction index: %v", err)
  3364  	}
  3365  
  3366  	// Get the public keys involved with this transaction.
  3367  	publicKeys := _getPublicKeysForTxn(txn, txnMeta, params)
  3368  
  3369  	// For each public key found, add the txID from its list.
  3370  	for pkFound := range publicKeys {
  3371  		// Simply add a new entry for each of the public keys found.
  3372  		if err := DbPutTxindexPublicKeyToTxnMappingSingleWithTxn(dbTx, pkFound[:], txID); err != nil {
  3373  			return err
  3374  		}
  3375  	}
  3376  
  3377  	// If we get here, it means everything went smoothly.
  3378  	return nil
  3379  }
  3380  
  3381  func DbPutTxindexTransactionMappings(
  3382  	handle *badger.DB, desoTxn *MsgDeSoTxn, params *DeSoParams, txnMeta *TransactionMetadata) error {
  3383  
  3384  	return handle.Update(func(dbTx *badger.Txn) error {
  3385  		return DbPutTxindexTransactionMappingsWithTxn(
  3386  			dbTx, desoTxn, params, txnMeta)
  3387  	})
  3388  }
  3389  
  3390  func DbDeleteTxindexTransactionMappingsWithTxn(
  3391  	dbTxn *badger.Txn, txn *MsgDeSoTxn, params *DeSoParams) error {
  3392  
  3393  	txID := txn.Hash()
  3394  
  3395  	// If the txnMeta isn't in the db then that's an error.
  3396  	txnMeta := DbGetTxindexTransactionRefByTxIDWithTxn(dbTxn, txID)
  3397  	if txnMeta == nil {
  3398  		return fmt.Errorf("DbDeleteTxindexTransactionMappingsWithTxn: Missing txnMeta for txID %v", txID)
  3399  	}
  3400  
  3401  	// Get the public keys involved with this transaction.
  3402  	publicKeys := _getPublicKeysForTxn(txn, txnMeta, params)
  3403  
  3404  	// For each public key found, delete the txID mapping from the db.
  3405  	for pkFound := range publicKeys {
  3406  		if err := DbDeleteTxindexPublicKeyToTxnMappingSingleWithTxn(dbTxn, pkFound[:], txID); err != nil {
  3407  			return err
  3408  		}
  3409  	}
  3410  
  3411  	// Delete the metadata
  3412  	transactionIndexKey := DbTxindexTxIDKey(txID)
  3413  	if err := dbTxn.Delete(transactionIndexKey); err != nil {
  3414  		return fmt.Errorf("Problem deleting transaction index key: %v", err)
  3415  	}
  3416  
  3417  	// If we get here, it means everything went smoothly.
  3418  	return nil
  3419  }
  3420  
  3421  func DbDeleteTxindexTransactionMappings(
  3422  	handle *badger.DB, txn *MsgDeSoTxn, params *DeSoParams) error {
  3423  
  3424  	return handle.Update(func(dbTx *badger.Txn) error {
  3425  		return DbDeleteTxindexTransactionMappingsWithTxn(dbTx, txn, params)
  3426  	})
  3427  }
  3428  
  3429  // DbGetTxindexFullTransactionByTxID
  3430  // TODO: This makes lookups inefficient when blocks are large. Shouldn't be a
  3431  // problem for a while, but keep an eye on it.
  3432  func DbGetTxindexFullTransactionByTxID(
  3433  	txindexDBHandle *badger.DB, blockchainDBHandle *badger.DB, txID *BlockHash) (
  3434  	_txn *MsgDeSoTxn, _txnMeta *TransactionMetadata) {
  3435  
  3436  	var txnFound *MsgDeSoTxn
  3437  	var txnMeta *TransactionMetadata
  3438  	err := txindexDBHandle.View(func(dbTxn *badger.Txn) error {
  3439  		txnMeta = DbGetTxindexTransactionRefByTxIDWithTxn(dbTxn, txID)
  3440  		if txnMeta == nil {
  3441  			return fmt.Errorf("DbGetTxindexFullTransactionByTxID: Transaction not found")
  3442  		}
  3443  		blockHashBytes, err := hex.DecodeString(txnMeta.BlockHashHex)
  3444  		if err != nil {
  3445  			return fmt.Errorf("DbGetTxindexFullTransactionByTxID: Error parsing block "+
  3446  				"hash hex: %v %v", txnMeta.BlockHashHex, err)
  3447  		}
  3448  		blockHash := &BlockHash{}
  3449  		copy(blockHash[:], blockHashBytes)
  3450  		blockFound, err := GetBlock(blockHash, blockchainDBHandle)
  3451  		if blockFound == nil || err != nil {
  3452  			return fmt.Errorf("DbGetTxindexFullTransactionByTxID: Block corresponding to txn not found")
  3453  		}
  3454  
  3455  		txnFound = blockFound.Txns[txnMeta.TxnIndexInBlock]
  3456  		return nil
  3457  	})
  3458  	if err != nil {
  3459  		return nil, nil
  3460  	}
  3461  
  3462  	return txnFound, txnMeta
  3463  }
  3464  
  3465  // =======================================================================================
  3466  // DeSo app code start
  3467  // =======================================================================================
  3468  
  3469  func _dbKeyForPostEntryHash(postHash *BlockHash) []byte {
  3470  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  3471  	prefixCopy := append([]byte{}, _PrefixPostHashToPostEntry...)
  3472  	key := append(prefixCopy, postHash[:]...)
  3473  	return key
  3474  }
  3475  func _dbKeyForPublicKeyPostHash(publicKey []byte, postHash *BlockHash) []byte {
  3476  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  3477  	key := append([]byte{}, _PrefixPosterPublicKeyPostHash...)
  3478  	key = append(key, publicKey...)
  3479  	key = append(key, postHash[:]...)
  3480  	return key
  3481  }
  3482  func _dbKeyForPosterPublicKeyTimestampPostHash(publicKey []byte, timestampNanos uint64, postHash *BlockHash) []byte {
  3483  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  3484  	key := append([]byte{}, _PrefixPosterPublicKeyTimestampPostHash...)
  3485  	key = append(key, publicKey...)
  3486  	key = append(key, EncodeUint64(timestampNanos)...)
  3487  	key = append(key, postHash[:]...)
  3488  	return key
  3489  }
  3490  func _dbKeyForTstampPostHash(tstampNanos uint64, postHash *BlockHash) []byte {
  3491  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  3492  	key := append([]byte{}, _PrefixTstampNanosPostHash...)
  3493  	key = append(key, EncodeUint64(tstampNanos)...)
  3494  	key = append(key, postHash[:]...)
  3495  	return key
  3496  }
  3497  func _dbKeyForCreatorBpsPostHash(creatorBps uint64, postHash *BlockHash) []byte {
  3498  	key := append([]byte{}, _PrefixCreatorBpsPostHash...)
  3499  	key = append(key, EncodeUint64(creatorBps)...)
  3500  	key = append(key, postHash[:]...)
  3501  	return key
  3502  }
  3503  func _dbKeyForStakeMultipleBpsPostHash(stakeMultipleBps uint64, postHash *BlockHash) []byte {
  3504  	key := append([]byte{}, _PrefixMultipleBpsPostHash...)
  3505  	key = append(key, EncodeUint64(stakeMultipleBps)...)
  3506  	key = append(key, postHash[:]...)
  3507  	return key
  3508  }
  3509  func _dbKeyForCommentParentStakeIDToPostHash(
  3510  	stakeID []byte, tstampNanos uint64, postHash *BlockHash) []byte {
  3511  	key := append([]byte{}, _PrefixCommentParentStakeIDToPostHash...)
  3512  	key = append(key, stakeID[:]...)
  3513  	key = append(key, EncodeUint64(tstampNanos)...)
  3514  	key = append(key, postHash[:]...)
  3515  	return key
  3516  }
  3517  
  3518  func DBGetPostEntryByPostHashWithTxn(
  3519  	txn *badger.Txn, postHash *BlockHash) *PostEntry {
  3520  
  3521  	key := _dbKeyForPostEntryHash(postHash)
  3522  	postEntryObj := &PostEntry{}
  3523  	postEntryItem, err := txn.Get(key)
  3524  	if err != nil {
  3525  		return nil
  3526  	}
  3527  	err = postEntryItem.Value(func(valBytes []byte) error {
  3528  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(postEntryObj)
  3529  	})
  3530  	if err != nil {
  3531  		glog.Errorf("DBGetPostEntryByPostHashWithTxn: Problem reading "+
  3532  			"PostEntry for postHash %v", postHash)
  3533  		return nil
  3534  	}
  3535  	return postEntryObj
  3536  }
  3537  
  3538  func DBGetPostEntryByPostHash(db *badger.DB, postHash *BlockHash) *PostEntry {
  3539  	var ret *PostEntry
  3540  	db.View(func(txn *badger.Txn) error {
  3541  		ret = DBGetPostEntryByPostHashWithTxn(txn, postHash)
  3542  		return nil
  3543  	})
  3544  	return ret
  3545  }
  3546  
  3547  func DBDeletePostEntryMappingsWithTxn(
  3548  	txn *badger.Txn, postHash *BlockHash, params *DeSoParams) error {
  3549  
  3550  	// First pull up the mapping that exists for the post hash passed in.
  3551  	// If one doesn't exist then there's nothing to do.
  3552  	postEntry := DBGetPostEntryByPostHashWithTxn(txn, postHash)
  3553  	if postEntry == nil {
  3554  		return nil
  3555  	}
  3556  
  3557  	// When a post exists, delete the mapping for the post.
  3558  	if err := txn.Delete(_dbKeyForPostEntryHash(postHash)); err != nil {
  3559  		return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Deleting "+
  3560  			"post mapping for post hash %v", postHash)
  3561  	}
  3562  
  3563  	// If the post is a comment we store it in a separate index. Comments are
  3564  	// technically posts but they really should be treated as their own entity.
  3565  	// The only reason they're not actually implemented that way is so that we
  3566  	// get code re-use.
  3567  	isComment := len(postEntry.ParentStakeID) == HashSizeBytes
  3568  	if isComment {
  3569  		// Extend the parent stake ID, which is a block hash, to 33 bytes, which
  3570  		// is the length of a public key and the standard length we use for this
  3571  		// key.
  3572  		extendedStakeID := append([]byte{}, postEntry.ParentStakeID...)
  3573  		extendedStakeID = append(extendedStakeID, 0x00)
  3574  		parentStakeIDKey := _dbKeyForCommentParentStakeIDToPostHash(
  3575  			extendedStakeID, postEntry.TimestampNanos, postEntry.PostHash)
  3576  		if err := txn.Delete(parentStakeIDKey); err != nil {
  3577  
  3578  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Problem "+
  3579  				"deleting mapping for comment: %v: %v", postEntry, err)
  3580  		}
  3581  	} else {
  3582  		if err := txn.Delete(_dbKeyForPosterPublicKeyTimestampPostHash(
  3583  			postEntry.PosterPublicKey, postEntry.TimestampNanos, postEntry.PostHash)); err != nil {
  3584  
  3585  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Deleting "+
  3586  				"public key mapping for post hash %v: %v", postHash, err)
  3587  		}
  3588  		if err := txn.Delete(_dbKeyForTstampPostHash(
  3589  			postEntry.TimestampNanos, postEntry.PostHash)); err != nil {
  3590  
  3591  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Deleting "+
  3592  				"tstamp mapping for post hash %v: %v", postHash, err)
  3593  		}
  3594  		if err := txn.Delete(_dbKeyForCreatorBpsPostHash(
  3595  			postEntry.CreatorBasisPoints, postEntry.PostHash)); err != nil {
  3596  
  3597  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Deleting "+
  3598  				"creatorBps mapping for post hash %v: %v", postHash, err)
  3599  		}
  3600  		if err := txn.Delete(_dbKeyForStakeMultipleBpsPostHash(
  3601  			postEntry.StakeMultipleBasisPoints, postEntry.PostHash)); err != nil {
  3602  
  3603  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Deleting "+
  3604  				"stakeMultiple mapping for post hash %v: %v", postHash, err)
  3605  		}
  3606  	}
  3607  
  3608  	// Delete the repost entries for the post.
  3609  	if IsVanillaRepost(postEntry) {
  3610  		if err := txn.Delete(
  3611  			_dbKeyForReposterPubKeyRepostedPostHashToRepostPostHash(postEntry.PosterPublicKey, *postEntry.RepostedPostHash)); err != nil {
  3612  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Error problem deleting mapping for repostPostHash to ReposterPubKey: %v", err)
  3613  		}
  3614  		if err := txn.Delete(
  3615  			_dbKeyForRepostedPostHashReposterPubKey(postEntry.RepostedPostHash, postEntry.PosterPublicKey)); err != nil {
  3616  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Error problem adding "+
  3617  				"mapping for _dbKeyForRepostedPostHashReposterPubKey: %v", err)
  3618  		}
  3619  	} else if IsQuotedRepost(postEntry) {
  3620  		// Put quoted repost stuff.
  3621  		if err := txn.Delete(
  3622  			_dbKeyForRepostedPostHashReposterPubKeyRepostPostHash(
  3623  				postEntry.RepostedPostHash, postEntry.PosterPublicKey, postEntry.PostHash)); err != nil {
  3624  			return errors.Wrapf(err, "DbDeletePostEntryMappingsWithTxn: Error problem adding "+
  3625  				"mapping for _dbKeyForRepostedPostHashReposterPubKeyRepostPostHash: %v", err)
  3626  
  3627  		}
  3628  	}
  3629  
  3630  	return nil
  3631  }
  3632  
  3633  func DBDeletePostEntryMappings(
  3634  	handle *badger.DB, postHash *BlockHash, params *DeSoParams) error {
  3635  
  3636  	return handle.Update(func(txn *badger.Txn) error {
  3637  		return DBDeletePostEntryMappingsWithTxn(txn, postHash, params)
  3638  	})
  3639  }
  3640  
  3641  func DBPutPostEntryMappingsWithTxn(
  3642  	txn *badger.Txn, postEntry *PostEntry, params *DeSoParams) error {
  3643  
  3644  	postDataBuf := bytes.NewBuffer([]byte{})
  3645  	gob.NewEncoder(postDataBuf).Encode(postEntry)
  3646  
  3647  	if err := txn.Set(_dbKeyForPostEntryHash(
  3648  		postEntry.PostHash), postDataBuf.Bytes()); err != nil {
  3649  
  3650  		return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Problem "+
  3651  			"adding mapping for post: %v", postEntry.PostHash)
  3652  	}
  3653  
  3654  	// If the post is a comment we store it in a separate index. Comments are
  3655  	// technically posts but they really should be treated as their own entity.
  3656  	// The only reason they're not actually implemented that way is so that we
  3657  	// get code re-use.
  3658  	isComment := len(postEntry.ParentStakeID) != 0
  3659  	if isComment {
  3660  		// Extend the parent stake ID, which is a block hash, to 33 bytes, which
  3661  		// is the length of a public key and the standard length we use for this
  3662  		// key.
  3663  		extendedStakeID := append([]byte{}, postEntry.ParentStakeID...)
  3664  		if len(extendedStakeID) == HashSizeBytes {
  3665  			extendedStakeID = append(extendedStakeID, 0x00)
  3666  		}
  3667  		if len(extendedStakeID) != btcec.PubKeyBytesLenCompressed {
  3668  			return fmt.Errorf("DbPutPostEntryMappingsWithTxn: extended "+
  3669  				"ParentStakeID %#v must have length %v",
  3670  				extendedStakeID, btcec.PubKeyBytesLenCompressed)
  3671  		}
  3672  		parentStakeIDKey := _dbKeyForCommentParentStakeIDToPostHash(
  3673  			extendedStakeID, postEntry.TimestampNanos, postEntry.PostHash)
  3674  		if err := txn.Set(parentStakeIDKey, []byte{}); err != nil {
  3675  
  3676  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Problem "+
  3677  				"adding mapping for comment: %v: %v", postEntry, err)
  3678  		}
  3679  
  3680  	} else {
  3681  		if err := txn.Set(_dbKeyForPosterPublicKeyTimestampPostHash(
  3682  			postEntry.PosterPublicKey, postEntry.TimestampNanos, postEntry.PostHash), []byte{}); err != nil {
  3683  
  3684  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Problem "+
  3685  				"adding mapping for public key: %v: %v", postEntry, err)
  3686  		}
  3687  		if err := txn.Set(_dbKeyForTstampPostHash(
  3688  			postEntry.TimestampNanos, postEntry.PostHash), []byte{}); err != nil {
  3689  
  3690  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Problem "+
  3691  				"adding mapping for tstamp: %v", postEntry)
  3692  		}
  3693  		if err := txn.Set(_dbKeyForCreatorBpsPostHash(
  3694  			postEntry.CreatorBasisPoints, postEntry.PostHash), []byte{}); err != nil {
  3695  
  3696  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Problem "+
  3697  				"adding mapping for creatorBps: %v", postEntry)
  3698  		}
  3699  		if err := txn.Set(_dbKeyForStakeMultipleBpsPostHash(
  3700  			postEntry.StakeMultipleBasisPoints, postEntry.PostHash), []byte{}); err != nil {
  3701  
  3702  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Problem "+
  3703  				"adding mapping for stakeMultipleBps: %v", postEntry)
  3704  		}
  3705  	}
  3706  	// We treat reposting the same for both comments and posts.
  3707  	// We only store repost entry mappings for vanilla reposts
  3708  	if IsVanillaRepost(postEntry) {
  3709  		repostEntry := RepostEntry{
  3710  			RepostPostHash:   postEntry.PostHash,
  3711  			RepostedPostHash: postEntry.RepostedPostHash,
  3712  			ReposterPubKey:   postEntry.PosterPublicKey,
  3713  		}
  3714  		repostDataBuf := bytes.NewBuffer([]byte{})
  3715  		gob.NewEncoder(repostDataBuf).Encode(repostEntry)
  3716  		if err := txn.Set(
  3717  			_dbKeyForReposterPubKeyRepostedPostHashToRepostPostHash(postEntry.PosterPublicKey, *postEntry.RepostedPostHash),
  3718  			repostDataBuf.Bytes()); err != nil {
  3719  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Error problem adding mapping for repostPostHash to ReposterPubKey: %v", err)
  3720  		}
  3721  		if err := txn.Set(
  3722  			_dbKeyForRepostedPostHashReposterPubKey(postEntry.RepostedPostHash, postEntry.PosterPublicKey),
  3723  			[]byte{}); err != nil {
  3724  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Error problem adding "+
  3725  				"mapping for _dbKeyForRepostedPostHashReposterPubKey: %v", err)
  3726  		}
  3727  	} else if IsQuotedRepost(postEntry) {
  3728  		// Put quoted repost stuff.
  3729  		if err := txn.Set(
  3730  			_dbKeyForRepostedPostHashReposterPubKeyRepostPostHash(
  3731  				postEntry.RepostedPostHash, postEntry.PosterPublicKey, postEntry.PostHash),
  3732  			[]byte{}); err != nil {
  3733  			return errors.Wrapf(err, "DbPutPostEntryMappingsWithTxn: Error problem adding "+
  3734  				"mapping for _dbKeyForRepostedPostHashReposterPubKeyRepostPostHash: %v", err)
  3735  		}
  3736  	}
  3737  	return nil
  3738  }
  3739  
  3740  func DBPutPostEntryMappings(handle *badger.DB, postEntry *PostEntry, params *DeSoParams) error {
  3741  
  3742  	return handle.Update(func(txn *badger.Txn) error {
  3743  		return DBPutPostEntryMappingsWithTxn(txn, postEntry, params)
  3744  	})
  3745  }
  3746  
  3747  // Specifying minTimestampNanos gives you all posts after minTimestampNanos
  3748  // Pass minTimestampNanos = 0 && maxTimestampNanos = 0 if you want all posts
  3749  // Setting maxTimestampNanos = 0, will default maxTimestampNanos to the current time.
  3750  func DBGetAllPostsAndCommentsForPublicKeyOrderedByTimestamp(
  3751  	handle *badger.DB, publicKey []byte, fetchEntries bool, minTimestampNanos uint64, maxTimestampNanos uint64) (
  3752  	_tstamps []uint64, _postAndCommentHashes []*BlockHash, _postAndCommentEntries []*PostEntry, _err error) {
  3753  
  3754  	tstampsFetched := []uint64{}
  3755  	postAndCommentHashesFetched := []*BlockHash{}
  3756  	postAndCommentEntriesFetched := []*PostEntry{}
  3757  	dbPrefixx := append([]byte{}, _PrefixPosterPublicKeyTimestampPostHash...)
  3758  	dbPrefixx = append(dbPrefixx, publicKey...)
  3759  
  3760  	err := handle.View(func(txn *badger.Txn) error {
  3761  		opts := badger.DefaultIteratorOptions
  3762  
  3763  		opts.PrefetchValues = false
  3764  
  3765  		// Go in reverse order since a larger count is better.
  3766  		opts.Reverse = true
  3767  
  3768  		it := txn.NewIterator(opts)
  3769  		defer it.Close()
  3770  		// Since we iterate backwards, the prefix must be bigger than all possible
  3771  		// timestamps that could actually exist. We use eight bytes since the timestamp is
  3772  		// encoded as a 64-bit big-endian byte slice, which will be eight bytes long.
  3773  		maxBigEndianUint64Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
  3774  		prefix := append(dbPrefixx, maxBigEndianUint64Bytes...)
  3775  
  3776  		// If we have a maxTimeStamp, we use that instead of the maxBigEndianUint64.
  3777  		if maxTimestampNanos != 0 {
  3778  			prefix = append(dbPrefixx, EncodeUint64(maxTimestampNanos)...)
  3779  		}
  3780  
  3781  		for it.Seek(prefix); it.ValidForPrefix(dbPrefixx); it.Next() {
  3782  			rawKey := it.Item().Key()
  3783  
  3784  			// Key should be
  3785  			// [prefix][posterPublicKey][Timestamp][PostHash]
  3786  
  3787  			// Pull out the relevant fields
  3788  			timestampSizeBytes := 8
  3789  			keyWithoutPrefix := rawKey[1:]
  3790  			//posterPublicKey := keyWithoutPrefix[:HashSizeBytes]
  3791  			publicKeySizeBytes := HashSizeBytes + 1
  3792  			tstampNanos := DecodeUint64(keyWithoutPrefix[publicKeySizeBytes:(publicKeySizeBytes + timestampSizeBytes)])
  3793  
  3794  			postHash := &BlockHash{}
  3795  			copy(postHash[:], keyWithoutPrefix[(publicKeySizeBytes+timestampSizeBytes):])
  3796  
  3797  			if tstampNanos < minTimestampNanos {
  3798  				break
  3799  			}
  3800  
  3801  			tstampsFetched = append(tstampsFetched, tstampNanos)
  3802  			postAndCommentHashesFetched = append(postAndCommentHashesFetched, postHash)
  3803  		}
  3804  		return nil
  3805  	})
  3806  	if err != nil {
  3807  		return nil, nil, nil, err
  3808  	}
  3809  
  3810  	if !fetchEntries {
  3811  		return tstampsFetched, postAndCommentHashesFetched, nil, nil
  3812  	}
  3813  
  3814  	for _, postHash := range postAndCommentHashesFetched {
  3815  		postEntry := DBGetPostEntryByPostHash(handle, postHash)
  3816  		if postEntry == nil {
  3817  			return nil, nil, nil, fmt.Errorf("DBGetPostEntryByPostHash: "+
  3818  				"PostHash %v does not have corresponding entry", postHash)
  3819  		}
  3820  		postAndCommentEntriesFetched = append(postAndCommentEntriesFetched, postEntry)
  3821  	}
  3822  
  3823  	return tstampsFetched, postAndCommentHashesFetched, postAndCommentEntriesFetched, nil
  3824  }
  3825  
  3826  // DBGetAllPostsByTstamp returns all the posts in the db with the newest
  3827  // posts first.
  3828  //
  3829  // TODO(performance): This currently fetches all posts. We should implement
  3830  // some kind of pagination instead though.
  3831  func DBGetAllPostsByTstamp(handle *badger.DB, fetchEntries bool) (
  3832  	_tstamps []uint64, _postHashes []*BlockHash, _postEntries []*PostEntry, _err error) {
  3833  
  3834  	tstampsFetched := []uint64{}
  3835  	postHashesFetched := []*BlockHash{}
  3836  	postEntriesFetched := []*PostEntry{}
  3837  	dbPrefixx := append([]byte{}, _PrefixTstampNanosPostHash...)
  3838  
  3839  	err := handle.View(func(txn *badger.Txn) error {
  3840  		opts := badger.DefaultIteratorOptions
  3841  
  3842  		opts.PrefetchValues = false
  3843  
  3844  		// Go in reverse order since a larger count is better.
  3845  		opts.Reverse = true
  3846  
  3847  		it := txn.NewIterator(opts)
  3848  		defer it.Close()
  3849  		// Since we iterate backwards, the prefix must be bigger than all possible
  3850  		// timestamps that could actually exist. We use eight bytes since the timestamp is
  3851  		// encoded as a 64-bit big-endian byte slice, which will be eight bytes long.
  3852  		maxBigEndianUint64Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
  3853  		prefix := append(dbPrefixx, maxBigEndianUint64Bytes...)
  3854  		for it.Seek(prefix); it.ValidForPrefix(dbPrefixx); it.Next() {
  3855  			rawKey := it.Item().Key()
  3856  
  3857  			// Strip the prefix off the key and check its length. If it contains
  3858  			// a big-endian uint64 then it should be at least eight bytes.
  3859  			tstampPostHashKey := rawKey[1:]
  3860  			uint64BytesLen := len(maxBigEndianUint64Bytes)
  3861  			if len(tstampPostHashKey) != uint64BytesLen+HashSizeBytes {
  3862  				return fmt.Errorf("DBGetAllPostsByTstamp: Invalid key "+
  3863  					"length %d should be at least %d", len(tstampPostHashKey),
  3864  					uint64BytesLen+HashSizeBytes)
  3865  			}
  3866  
  3867  			tstampNanos := DecodeUint64(tstampPostHashKey[:uint64BytesLen])
  3868  
  3869  			// Appended to the tstamp should be the post hash so extract it here.
  3870  			postHash := &BlockHash{}
  3871  			copy(postHash[:], tstampPostHashKey[uint64BytesLen:])
  3872  
  3873  			tstampsFetched = append(tstampsFetched, tstampNanos)
  3874  			postHashesFetched = append(postHashesFetched, postHash)
  3875  		}
  3876  		return nil
  3877  	})
  3878  	if err != nil {
  3879  		return nil, nil, nil, err
  3880  	}
  3881  
  3882  	if !fetchEntries {
  3883  		return tstampsFetched, postHashesFetched, nil, nil
  3884  	}
  3885  
  3886  	for _, postHash := range postHashesFetched {
  3887  		postEntry := DBGetPostEntryByPostHash(handle, postHash)
  3888  		if postEntry == nil {
  3889  			return nil, nil, nil, fmt.Errorf("DBGetPostEntryByPostHash: "+
  3890  				"PostHash %v does not have corresponding entry", postHash)
  3891  		}
  3892  		postEntriesFetched = append(postEntriesFetched, postEntry)
  3893  	}
  3894  
  3895  	return tstampsFetched, postHashesFetched, postEntriesFetched, nil
  3896  }
  3897  
  3898  // DBGetCommentPostHashesForParentStakeID returns all the comments, which are indexed by their
  3899  // stake ID rather than by their timestamp.
  3900  //
  3901  // TODO(performance): This currently fetches all comments. We should implement
  3902  // something where we only get the comments for particular posts instead.
  3903  func DBGetCommentPostHashesForParentStakeID(
  3904  	handle *badger.DB, stakeIDXXX []byte, fetchEntries bool) (
  3905  	_tstamps []uint64, _commentPostHashes []*BlockHash, _commentPostEntryes []*PostEntry, _err error) {
  3906  
  3907  	tstampsFetched := []uint64{}
  3908  	commentPostHashes := []*BlockHash{}
  3909  	commentEntriesFetched := []*PostEntry{}
  3910  	dbPrefixx := append([]byte{}, _PrefixCommentParentStakeIDToPostHash...)
  3911  	dbPrefixx = append(dbPrefixx, stakeIDXXX...)
  3912  
  3913  	err := handle.View(func(txn *badger.Txn) error {
  3914  		opts := badger.DefaultIteratorOptions
  3915  
  3916  		opts.PrefetchValues = false
  3917  
  3918  		it := txn.NewIterator(opts)
  3919  		defer it.Close()
  3920  		// Since we iterate backwards, the prefix must be bigger than all possible
  3921  		// counts that could actually exist. We use eight bytes since the count is
  3922  		// encoded as a 64-bit big-endian byte slice, which will be eight bytes long.
  3923  		maxBigEndianUint64Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
  3924  		//prefix := append(dbPrefixx, maxBigEndianUint64Bytes...)
  3925  		prefix := dbPrefixx
  3926  		for it.Seek(prefix); it.ValidForPrefix(dbPrefixx); it.Next() {
  3927  			rawKey := it.Item().Key()
  3928  
  3929  			// Strip the prefix off the key and check its length. It should contain
  3930  			// a 33-byte stake id, an 8 byte tstamp, and a 32 byte comment hash.
  3931  			stakeIDTstampPostHashKey := rawKey[1:]
  3932  			uint64BytesLen := len(maxBigEndianUint64Bytes)
  3933  			if len(stakeIDTstampPostHashKey) != btcec.PubKeyBytesLenCompressed+uint64BytesLen+HashSizeBytes {
  3934  				return fmt.Errorf("DBGetCommentPostHashesForParentStakeID: Invalid key "+
  3935  					"length %d should be at least %d", len(stakeIDTstampPostHashKey),
  3936  					btcec.PubKeyBytesLenCompressed+uint64BytesLen+HashSizeBytes)
  3937  			}
  3938  
  3939  			//stakeID := stakeIDTstampPostHashKey[:btcec.PubKeyBytesLenCompressed]
  3940  			tstampNanos := DecodeUint64(stakeIDTstampPostHashKey[btcec.PubKeyBytesLenCompressed : btcec.PubKeyBytesLenCompressed+uint64BytesLen])
  3941  
  3942  			commentPostHashBytes := stakeIDTstampPostHashKey[btcec.PubKeyBytesLenCompressed+uint64BytesLen:]
  3943  			commentPostHash := &BlockHash{}
  3944  			copy(commentPostHash[:], commentPostHashBytes)
  3945  
  3946  			//stakeIDsFetched = append(stakeIDsFetched, stakeID)
  3947  			tstampsFetched = append(tstampsFetched, tstampNanos)
  3948  			commentPostHashes = append(commentPostHashes, commentPostHash)
  3949  		}
  3950  		return nil
  3951  	})
  3952  	if err != nil {
  3953  		return nil, nil, nil, err
  3954  	}
  3955  
  3956  	if !fetchEntries {
  3957  		return tstampsFetched, commentPostHashes, nil, nil
  3958  	}
  3959  
  3960  	for _, postHash := range commentPostHashes {
  3961  		postEntry := DBGetPostEntryByPostHash(handle, postHash)
  3962  		if postEntry == nil {
  3963  			return nil, nil, nil, fmt.Errorf("DBGetCommentPostHashesForParentStakeID: "+
  3964  				"PostHash %v does not have corresponding entry", postHash)
  3965  		}
  3966  		commentEntriesFetched = append(commentEntriesFetched, postEntry)
  3967  	}
  3968  
  3969  	return tstampsFetched, commentPostHashes, commentEntriesFetched, nil
  3970  }
  3971  
  3972  // =======================================================================================
  3973  // NFTEntry db functions
  3974  // =======================================================================================
  3975  func _dbKeyForNFTPostHashSerialNumber(nftPostHash *BlockHash, serialNumber uint64) []byte {
  3976  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  3977  	prefixCopy := append([]byte{}, _PrefixPostHashSerialNumberToNFTEntry...)
  3978  	key := append(prefixCopy, nftPostHash[:]...)
  3979  	key = append(key, EncodeUint64(serialNumber)...)
  3980  	return key
  3981  }
  3982  
  3983  func _dbKeyForPKIDIsForSaleBidAmountNanosNFTPostHashSerialNumber(pkid *PKID, isForSale bool, bidAmountNanos uint64, nftPostHash *BlockHash, serialNumber uint64) []byte {
  3984  	prefixCopy := append([]byte{}, _PrefixPKIDIsForSaleBidAmountNanosPostHashSerialNumberToNFTEntry...)
  3985  	key := append(prefixCopy, pkid[:]...)
  3986  	key = append(key, BoolToByte(isForSale))
  3987  	key = append(key, EncodeUint64(bidAmountNanos)...)
  3988  	key = append(key, nftPostHash[:]...)
  3989  	key = append(key, EncodeUint64(serialNumber)...)
  3990  	return key
  3991  }
  3992  
  3993  func DBGetNFTEntryByPostHashSerialNumberWithTxn(
  3994  	txn *badger.Txn, postHash *BlockHash, serialNumber uint64) *NFTEntry {
  3995  
  3996  	key := _dbKeyForNFTPostHashSerialNumber(postHash, serialNumber)
  3997  	nftEntryObj := &NFTEntry{}
  3998  	nftEntryItem, err := txn.Get(key)
  3999  	if err != nil {
  4000  		return nil
  4001  	}
  4002  	err = nftEntryItem.Value(func(valBytes []byte) error {
  4003  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(nftEntryObj)
  4004  	})
  4005  	if err != nil {
  4006  		glog.Errorf("DBGetNFTEntryByPostHashSerialNumberWithTxn: Problem reading "+
  4007  			"NFTEntry for postHash %v", postHash)
  4008  		return nil
  4009  	}
  4010  	return nftEntryObj
  4011  }
  4012  
  4013  func DBGetNFTEntryByPostHashSerialNumber(db *badger.DB, postHash *BlockHash, serialNumber uint64) *NFTEntry {
  4014  	var ret *NFTEntry
  4015  	db.View(func(txn *badger.Txn) error {
  4016  		ret = DBGetNFTEntryByPostHashSerialNumberWithTxn(txn, postHash, serialNumber)
  4017  		return nil
  4018  	})
  4019  	return ret
  4020  }
  4021  
  4022  func DBDeleteNFTMappingsWithTxn(txn *badger.Txn, nftPostHash *BlockHash, serialNumber uint64) error {
  4023  
  4024  	// First pull up the mapping that exists for the post / serial # passed in.
  4025  	// If one doesn't exist then there's nothing to do.
  4026  	nftEntry := DBGetNFTEntryByPostHashSerialNumberWithTxn(txn, nftPostHash, serialNumber)
  4027  	if nftEntry == nil {
  4028  		return nil
  4029  	}
  4030  
  4031  	// When an nftEntry exists, delete the mapping.
  4032  	if err := txn.Delete(_dbKeyForPKIDIsForSaleBidAmountNanosNFTPostHashSerialNumber(nftEntry.OwnerPKID, nftEntry.IsForSale, nftEntry.LastAcceptedBidAmountNanos, nftPostHash, serialNumber)); err != nil {
  4033  		return errors.Wrapf(err, "DbDeleteNFTMappingsWithTxn: Deleting "+
  4034  			"nft mapping for pkid %v post hash %v serial number %d", nftEntry.OwnerPKID, nftPostHash, serialNumber)
  4035  	}
  4036  
  4037  	// When an nftEntry exists, delete the mapping.
  4038  	if err := txn.Delete(_dbKeyForNFTPostHashSerialNumber(nftPostHash, serialNumber)); err != nil {
  4039  		return errors.Wrapf(err, "DbDeleteNFTMappingsWithTxn: Deleting "+
  4040  			"nft mapping for post hash %v serial number %d", nftPostHash, serialNumber)
  4041  	}
  4042  
  4043  	return nil
  4044  }
  4045  
  4046  func DBDeleteNFTMappings(
  4047  	handle *badger.DB, postHash *BlockHash, serialNumber uint64) error {
  4048  
  4049  	return handle.Update(func(txn *badger.Txn) error {
  4050  		return DBDeleteNFTMappingsWithTxn(txn, postHash, serialNumber)
  4051  	})
  4052  }
  4053  
  4054  func DBPutNFTEntryMappingsWithTxn(txn *badger.Txn, nftEntry *NFTEntry) error {
  4055  
  4056  	nftDataBuf := bytes.NewBuffer([]byte{})
  4057  	gob.NewEncoder(nftDataBuf).Encode(nftEntry)
  4058  
  4059  	nftEntryBytes := nftDataBuf.Bytes()
  4060  	if err := txn.Set(_dbKeyForNFTPostHashSerialNumber(
  4061  		nftEntry.NFTPostHash, nftEntry.SerialNumber), nftEntryBytes); err != nil {
  4062  
  4063  		return errors.Wrapf(err, "DbPutNFTEntryMappingsWithTxn: Problem "+
  4064  			"adding mapping for post: %v, serial number: %d", nftEntry.NFTPostHash, nftEntry.SerialNumber)
  4065  	}
  4066  
  4067  	if err := txn.Set(_dbKeyForPKIDIsForSaleBidAmountNanosNFTPostHashSerialNumber(
  4068  		nftEntry.OwnerPKID, nftEntry.IsForSale, nftEntry.LastAcceptedBidAmountNanos, nftEntry.NFTPostHash, nftEntry.SerialNumber), nftEntryBytes); err != nil {
  4069  		return errors.Wrapf(err, "DbPutNFTEntryMappingsWithTxn: Problem "+
  4070  			"adding mapping for pkid: %v, post: %v, serial number: %d", nftEntry.OwnerPKID, nftEntry.NFTPostHash, nftEntry.SerialNumber)
  4071  	}
  4072  
  4073  	return nil
  4074  }
  4075  
  4076  func DBPutNFTEntryMappings(handle *badger.DB, nftEntry *NFTEntry) error {
  4077  
  4078  	return handle.Update(func(txn *badger.Txn) error {
  4079  		return DBPutNFTEntryMappingsWithTxn(txn, nftEntry)
  4080  	})
  4081  }
  4082  
  4083  // DBGetNFTEntriesForPostHash gets NFT Entries *from the DB*. Does not include mempool txns.
  4084  func DBGetNFTEntriesForPostHash(handle *badger.DB, nftPostHash *BlockHash) (_nftEntries []*NFTEntry) {
  4085  	nftEntries := []*NFTEntry{}
  4086  	prefix := append([]byte{}, _PrefixPostHashSerialNumberToNFTEntry...)
  4087  	keyPrefix := append(prefix, nftPostHash[:]...)
  4088  	_, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix)
  4089  	for _, byteString := range entryByteStringsFound {
  4090  		currentEntry := &NFTEntry{}
  4091  		gob.NewDecoder(bytes.NewReader(byteString)).Decode(currentEntry)
  4092  		nftEntries = append(nftEntries, currentEntry)
  4093  	}
  4094  	return nftEntries
  4095  }
  4096  
  4097  // =======================================================================================
  4098  // NFTOwnership db functions
  4099  // NOTE: This index is not essential to running the protocol and should be computed
  4100  // outside of the protocol layer once update to the creation of TxIndex are complete.
  4101  // =======================================================================================
  4102  
  4103  func DBGetNFTEntryByNFTOwnershipDetailsWithTxn(
  4104  	txn *badger.Txn, ownerPKID *PKID, isForSale bool, bidAmountNanos uint64, postHash *BlockHash, serialNumber uint64) *NFTEntry {
  4105  
  4106  	key := _dbKeyForPKIDIsForSaleBidAmountNanosNFTPostHashSerialNumber(ownerPKID, isForSale, bidAmountNanos, postHash, serialNumber)
  4107  	nftEntryObj := &NFTEntry{}
  4108  	nftEntryItem, err := txn.Get(key)
  4109  	if err != nil {
  4110  		return nil
  4111  	}
  4112  	err = nftEntryItem.Value(func(valBytes []byte) error {
  4113  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(nftEntryObj)
  4114  	})
  4115  	if err != nil {
  4116  		glog.Errorf("DBGetNFTEntryByNFTOwnershipDetailsWithTxn: Problem reading "+
  4117  			"NFTEntry for postHash %v serial number %d", postHash, serialNumber)
  4118  		return nil
  4119  	}
  4120  	return nftEntryObj
  4121  }
  4122  
  4123  func DBGetNFTEntryByNFTOwnershipDetails(db *badger.DB, ownerPKID *PKID, isForSale bool, bidAmountNanos uint64, postHash *BlockHash, serialNumber uint64) *NFTEntry {
  4124  	var ret *NFTEntry
  4125  	db.View(func(txn *badger.Txn) error {
  4126  		ret = DBGetNFTEntryByNFTOwnershipDetailsWithTxn(txn, ownerPKID, isForSale, bidAmountNanos, postHash, serialNumber)
  4127  		return nil
  4128  	})
  4129  	return ret
  4130  }
  4131  
  4132  // DBGetNFTEntriesForPKID gets NFT Entries *from the DB*. Does not include mempool txns.
  4133  func DBGetNFTEntriesForPKID(handle *badger.DB, ownerPKID *PKID) (_nftEntries []*NFTEntry) {
  4134  	nftEntries := []*NFTEntry{}
  4135  	prefix := append([]byte{}, _PrefixPKIDIsForSaleBidAmountNanosPostHashSerialNumberToNFTEntry...)
  4136  	keyPrefix := append(prefix, ownerPKID[:]...)
  4137  	_, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix)
  4138  	for _, byteString := range entryByteStringsFound {
  4139  		currentEntry := &NFTEntry{}
  4140  		gob.NewDecoder(bytes.NewReader(byteString)).Decode(currentEntry)
  4141  		nftEntries = append(nftEntries, currentEntry)
  4142  	}
  4143  	return nftEntries
  4144  }
  4145  
  4146  // =======================================================================================
  4147  // AcceptedNFTBidEntries db functions
  4148  // NOTE: This index is not essential to running the protocol and should be computed
  4149  // outside of the protocol layer once update to the creation of TxIndex are complete.
  4150  // =======================================================================================
  4151  func _dbKeyForPostHashSerialNumberToAcceptedBidEntries(nftPostHash *BlockHash, serialNumber uint64) []byte {
  4152  	prefixCopy := append([]byte{}, _PrefixPostHashSerialNumberToAcceptedBidEntries...)
  4153  	key := append(prefixCopy, nftPostHash[:]...)
  4154  	key = append(key, EncodeUint64(serialNumber)...)
  4155  	return key
  4156  }
  4157  
  4158  func DBPutAcceptedNFTBidEntriesMappingWithTxn(txn *badger.Txn, nftKey NFTKey, nftBidEntries *[]*NFTBidEntry) error {
  4159  	nftDataBuf := bytes.NewBuffer([]byte{})
  4160  	gob.NewEncoder(nftDataBuf).Encode(nftBidEntries)
  4161  
  4162  	acceptedNFTBidEntryBytes := nftDataBuf.Bytes()
  4163  	if err := txn.Set(_dbKeyForPostHashSerialNumberToAcceptedBidEntries(
  4164  		&nftKey.NFTPostHash, nftKey.SerialNumber), acceptedNFTBidEntryBytes); err != nil {
  4165  
  4166  		return errors.Wrapf(err, "DBPutAcceptedNFTBidEntriesMappingWithTxn: Problem "+
  4167  			"adding accepted bid mapping for post: %v, serial number: %d", nftKey.NFTPostHash, nftKey.SerialNumber)
  4168  	}
  4169  	return nil
  4170  }
  4171  
  4172  func DBPutAcceptedNFTBidEntriesMapping(handle *badger.DB, nftKey NFTKey, nftBidEntries *[]*NFTBidEntry) error {
  4173  	return handle.Update(func(txn *badger.Txn) error {
  4174  		return DBPutAcceptedNFTBidEntriesMappingWithTxn(txn, nftKey, nftBidEntries)
  4175  	})
  4176  }
  4177  
  4178  func DBGetAcceptedNFTBidEntriesByPostHashSerialNumberWithTxn(
  4179  	txn *badger.Txn, postHash *BlockHash, serialNumber uint64) *[]*NFTBidEntry {
  4180  
  4181  	key := _dbKeyForPostHashSerialNumberToAcceptedBidEntries(postHash, serialNumber)
  4182  	nftBidEntriesObj := &[]*NFTBidEntry{}
  4183  	nftBidEntriesItem, err := txn.Get(key)
  4184  	if err != nil {
  4185  		return nil
  4186  	}
  4187  	err = nftBidEntriesItem.Value(func(valBytes []byte) error {
  4188  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(nftBidEntriesObj)
  4189  	})
  4190  	if err != nil {
  4191  		glog.Errorf("DBGetAcceptedNFTBidEntriesByPostHashSerialNumberWithTxn: Problem reading "+
  4192  			"NFTBidEntries for postHash %v serialNumber %d", postHash, serialNumber)
  4193  		return nil
  4194  	}
  4195  	return nftBidEntriesObj
  4196  }
  4197  
  4198  func DBGetAcceptedNFTBidEntriesByPostHashSerialNumber(db *badger.DB, postHash *BlockHash, serialNumber uint64) *[]*NFTBidEntry {
  4199  	var ret *[]*NFTBidEntry
  4200  	db.View(func(txn *badger.Txn) error {
  4201  		ret = DBGetAcceptedNFTBidEntriesByPostHashSerialNumberWithTxn(txn, postHash, serialNumber)
  4202  		return nil
  4203  	})
  4204  	return ret
  4205  }
  4206  
  4207  func DBDeleteAcceptedNFTBidEntriesMappingsWithTxn(txn *badger.Txn, nftPostHash *BlockHash, serialNumber uint64) error {
  4208  
  4209  	// First check to see if there is an existing mapping. If one doesn't exist, there's nothing to do.
  4210  	nftBidEntries := DBGetAcceptedNFTBidEntriesByPostHashSerialNumberWithTxn(txn, nftPostHash, serialNumber)
  4211  	if nftBidEntries == nil {
  4212  		return nil
  4213  	}
  4214  
  4215  	// When an nftEntry exists, delete both mapping.
  4216  	if err := txn.Delete(_dbKeyForPostHashSerialNumberToAcceptedBidEntries(nftPostHash, serialNumber)); err != nil {
  4217  		return errors.Wrapf(err, "DBDeleteAcceptedNFTBidEntriesMappingsWithTxn: Deleting "+
  4218  			"accepted nft bid mapping for post hash %v serial number %d", nftPostHash, serialNumber)
  4219  	}
  4220  
  4221  	return nil
  4222  }
  4223  
  4224  func DBDeleteAcceptedNFTBidMappings(
  4225  	handle *badger.DB, postHash *BlockHash, serialNumber uint64) error {
  4226  
  4227  	return handle.Update(func(txn *badger.Txn) error {
  4228  		return DBDeleteAcceptedNFTBidEntriesMappingsWithTxn(txn, postHash, serialNumber)
  4229  	})
  4230  }
  4231  
  4232  // =======================================================================================
  4233  // NFTBidEntry db functions
  4234  // =======================================================================================
  4235  
  4236  func _dbKeyForNFTPostHashSerialNumberBidNanosBidderPKID(bidEntry *NFTBidEntry) []byte {
  4237  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  4238  	prefixCopy := append([]byte{}, _PrefixPostHashSerialNumberBidNanosBidderPKID...)
  4239  	key := append(prefixCopy, bidEntry.NFTPostHash[:]...)
  4240  	key = append(key, EncodeUint64(bidEntry.SerialNumber)...)
  4241  	key = append(key, EncodeUint64(bidEntry.BidAmountNanos)...)
  4242  	key = append(key, bidEntry.BidderPKID[:]...)
  4243  	return key
  4244  }
  4245  
  4246  func _dbKeyForNFTBidderPKIDPostHashSerialNumber(
  4247  	bidderPKID *PKID, nftPostHash *BlockHash, serialNumber uint64) []byte {
  4248  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  4249  	prefixCopy := append([]byte{}, _PrefixBidderPKIDPostHashSerialNumberToBidNanos...)
  4250  	key := append(prefixCopy, bidderPKID[:]...)
  4251  	key = append(key, nftPostHash[:]...)
  4252  	key = append(key, EncodeUint64(serialNumber)...)
  4253  	return key
  4254  }
  4255  
  4256  func _dbSeekKeyForNFTBids(nftHash *BlockHash, serialNumber uint64) []byte {
  4257  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  4258  	prefixCopy := append([]byte{}, _PrefixPostHashSerialNumberBidNanosBidderPKID...)
  4259  	key := append(prefixCopy, nftHash[:]...)
  4260  	key = append(key, EncodeUint64(serialNumber)...)
  4261  	return key
  4262  }
  4263  
  4264  func DBGetNFTBidEntryForNFTBidKeyWithTxn(txn *badger.Txn, nftBidKey *NFTBidKey) *NFTBidEntry {
  4265  
  4266  	key := _dbKeyForNFTBidderPKIDPostHashSerialNumber(
  4267  		&nftBidKey.BidderPKID, &nftBidKey.NFTPostHash, nftBidKey.SerialNumber)
  4268  
  4269  	nftBidItem, err := txn.Get(key)
  4270  	if err != nil {
  4271  		return nil
  4272  	}
  4273  
  4274  	// If we get here then it means we actually had a bid amount for this key in the DB.
  4275  	nftBidBytes, err := nftBidItem.ValueCopy(nil)
  4276  	if err != nil {
  4277  		// If we had a problem reading the mapping then log an error and return nil.
  4278  		glog.Errorf("DBGetNFTBidEntryForNFTBidKeyWithTxn: Problem reading "+
  4279  			"bid bytes for bidKey: %v", nftBidKey)
  4280  		return nil
  4281  	}
  4282  
  4283  	nftBidAmountNanos := DecodeUint64(nftBidBytes)
  4284  
  4285  	nftBidEntry := &NFTBidEntry{
  4286  		BidderPKID:     &nftBidKey.BidderPKID,
  4287  		NFTPostHash:    &nftBidKey.NFTPostHash,
  4288  		SerialNumber:   nftBidKey.SerialNumber,
  4289  		BidAmountNanos: nftBidAmountNanos,
  4290  	}
  4291  
  4292  	return nftBidEntry
  4293  }
  4294  
  4295  func DBGetNFTBidEntryForNFTBidKey(db *badger.DB, nftBidKey *NFTBidKey) *NFTBidEntry {
  4296  	var ret *NFTBidEntry
  4297  	db.View(func(txn *badger.Txn) error {
  4298  		ret = DBGetNFTBidEntryForNFTBidKeyWithTxn(txn, nftBidKey)
  4299  		return nil
  4300  	})
  4301  	return ret
  4302  }
  4303  
  4304  func DBDeleteNFTBidMappingsWithTxn(txn *badger.Txn, nftBidKey *NFTBidKey) error {
  4305  
  4306  	// First check to see if there is an existing mapping. If one doesn't exist, there's nothing to do.
  4307  	nftBidEntry := DBGetNFTBidEntryForNFTBidKeyWithTxn(txn, nftBidKey)
  4308  	if nftBidEntry == nil {
  4309  		return nil
  4310  	}
  4311  
  4312  	// When an nftEntry exists, delete both mapping.
  4313  	if err := txn.Delete(_dbKeyForNFTPostHashSerialNumberBidNanosBidderPKID(nftBidEntry)); err != nil {
  4314  		return errors.Wrapf(err, "DbDeleteNFTBidMappingsWithTxn: Deleting "+
  4315  			"nft bid mapping for nftBidKey %v", nftBidKey)
  4316  	}
  4317  
  4318  	// When an nftEntry exists, delete both mapping.
  4319  	if err := txn.Delete(_dbKeyForNFTBidderPKIDPostHashSerialNumber(
  4320  		nftBidEntry.BidderPKID, nftBidEntry.NFTPostHash, nftBidEntry.SerialNumber)); err != nil {
  4321  		return errors.Wrapf(err, "DbDeleteNFTBidMappingsWithTxn: Deleting "+
  4322  			"nft bid mapping for nftBidKey %v", nftBidKey)
  4323  	}
  4324  
  4325  	return nil
  4326  }
  4327  
  4328  func DBDeleteNFTBidMappings(handle *badger.DB, nftBidKey *NFTBidKey) error {
  4329  
  4330  	return handle.Update(func(txn *badger.Txn) error {
  4331  		return DBDeleteNFTBidMappingsWithTxn(txn, nftBidKey)
  4332  	})
  4333  }
  4334  
  4335  func DBPutNFTBidEntryMappingsWithTxn(txn *badger.Txn, nftBidEntry *NFTBidEntry) error {
  4336  	// We store two indexes for NFT bids. (1) sorted by bid amount nanos in the key and
  4337  	// (2) sorted by the bidder PKID. Both come in handy.
  4338  
  4339  	// Put the first index --> []byte{} (no data needs to be stored since it all info is in the key)
  4340  	if err := txn.Set(_dbKeyForNFTPostHashSerialNumberBidNanosBidderPKID(nftBidEntry), []byte{}); err != nil {
  4341  
  4342  		return errors.Wrapf(err, "DbPutNFTBidEntryMappingsWithTxn: Problem "+
  4343  			"adding mapping to BidderPKID for bid entry: %v", nftBidEntry)
  4344  	}
  4345  
  4346  	// Put the second index --> BidAmountNanos
  4347  	if err := txn.Set(_dbKeyForNFTBidderPKIDPostHashSerialNumber(
  4348  		nftBidEntry.BidderPKID, nftBidEntry.NFTPostHash, nftBidEntry.SerialNumber,
  4349  	), EncodeUint64(nftBidEntry.BidAmountNanos)); err != nil {
  4350  
  4351  		return errors.Wrapf(err, "DbPutNFTBidEntryMappingsWithTxn: Problem "+
  4352  			"adding mapping to BidAmountNanos for bid entry: %v", nftBidEntry)
  4353  	}
  4354  
  4355  	return nil
  4356  }
  4357  
  4358  func DBPutNFTBidEntryMappings(handle *badger.DB, nftEntry *NFTBidEntry) error {
  4359  
  4360  	return handle.Update(func(txn *badger.Txn) error {
  4361  		return DBPutNFTBidEntryMappingsWithTxn(txn, nftEntry)
  4362  	})
  4363  }
  4364  
  4365  func DBGetNFTBidEntriesForPKID(handle *badger.DB, bidderPKID *PKID) (_nftBidEntries []*NFTBidEntry) {
  4366  	nftBidEntries := []*NFTBidEntry{}
  4367  	{
  4368  		prefix := append([]byte{}, _PrefixBidderPKIDPostHashSerialNumberToBidNanos...)
  4369  		keyPrefix := append(prefix, bidderPKID[:]...)
  4370  		keysFound, valuesFound := _enumerateKeysForPrefix(handle, keyPrefix)
  4371  		bidderPKIDLength := len(bidderPKID[:])
  4372  		for ii, keyFound := range keysFound {
  4373  
  4374  			postHashStartIdx := 1 + bidderPKIDLength           // The length of prefix + length of PKID
  4375  			postHashEndIdx := postHashStartIdx + HashSizeBytes // Add the length of the bid amount (uint64).
  4376  
  4377  			// Cut the bid amount out of the key and decode.
  4378  			postHashBytes := keyFound[postHashStartIdx:postHashEndIdx]
  4379  
  4380  			nftHash := &BlockHash{}
  4381  			copy(nftHash[:], postHashBytes)
  4382  
  4383  			serialNumber := DecodeUint64(keyFound[postHashEndIdx:])
  4384  
  4385  			bidAmountNanos := DecodeUint64(valuesFound[ii])
  4386  
  4387  			currentEntry := &NFTBidEntry{
  4388  				NFTPostHash:    nftHash,
  4389  				SerialNumber:   serialNumber,
  4390  				BidderPKID:     bidderPKID,
  4391  				BidAmountNanos: bidAmountNanos,
  4392  			}
  4393  			nftBidEntries = append(nftBidEntries, currentEntry)
  4394  		}
  4395  	}
  4396  	return nftBidEntries
  4397  }
  4398  
  4399  // Get NFT bid Entries *from the DB*. Does not include mempool txns.
  4400  func DBGetNFTBidEntries(handle *badger.DB, nftPostHash *BlockHash, serialNumber uint64,
  4401  ) (_nftBidEntries []*NFTBidEntry) {
  4402  	nftBidEntries := []*NFTBidEntry{}
  4403  	{
  4404  		prefix := append([]byte{}, _PrefixPostHashSerialNumberBidNanosBidderPKID...)
  4405  		keyPrefix := append(prefix, nftPostHash[:]...)
  4406  		keyPrefix = append(keyPrefix, EncodeUint64(serialNumber)...)
  4407  		keysFound, _ := _enumerateKeysForPrefix(handle, keyPrefix)
  4408  		for _, keyFound := range keysFound {
  4409  			bidAmountStartIdx := 1 + HashSizeBytes + 8 // The length of prefix + the post hash + the serial #.
  4410  			bidAmountEndIdx := bidAmountStartIdx + 8   // Add the length of the bid amount (uint64).
  4411  
  4412  			// Cut the bid amount out of the key and decode.
  4413  			bidAmountBytes := keyFound[bidAmountStartIdx:bidAmountEndIdx]
  4414  			bidAmountNanos := DecodeUint64(bidAmountBytes)
  4415  
  4416  			// Cut the pkid bytes out of the keys
  4417  			bidderPKIDBytes := keyFound[bidAmountEndIdx:]
  4418  
  4419  			// Construct the bidder PKID.
  4420  			bidderPKID := PublicKeyToPKID(bidderPKIDBytes)
  4421  
  4422  			currentEntry := &NFTBidEntry{
  4423  				NFTPostHash:    nftPostHash,
  4424  				SerialNumber:   serialNumber,
  4425  				BidderPKID:     bidderPKID,
  4426  				BidAmountNanos: bidAmountNanos,
  4427  			}
  4428  			nftBidEntries = append(nftBidEntries, currentEntry)
  4429  		}
  4430  	}
  4431  	return nftBidEntries
  4432  }
  4433  
  4434  func DBGetNFTBidEntriesPaginated(
  4435  	handle *badger.DB,
  4436  	nftHash *BlockHash,
  4437  	serialNumber uint64,
  4438  	startEntry *NFTBidEntry,
  4439  	limit int,
  4440  	reverse bool,
  4441  ) (_bidEntries []*NFTBidEntry) {
  4442  	seekKey := _dbSeekKeyForNFTBids(nftHash, serialNumber)
  4443  	startKey := seekKey
  4444  	if startEntry != nil {
  4445  		startKey = _dbKeyForNFTPostHashSerialNumberBidNanosBidderPKID(startEntry)
  4446  	}
  4447  	// The key length consists of: (1 prefix byte) + (BlockHash) + (2 x uint64) + (PKID)
  4448  	maxKeyLen := 1 + HashSizeBytes + 16 + btcec.PubKeyBytesLenCompressed
  4449  	keysBytes, _, _ := DBGetPaginatedKeysAndValuesForPrefix(
  4450  		handle,
  4451  		startKey,
  4452  		seekKey,
  4453  		maxKeyLen,
  4454  		limit,
  4455  		reverse,
  4456  		false)
  4457  	// TODO: We should probably handle the err case for this function.
  4458  
  4459  	// Chop up the keyBytes into bid entries.
  4460  	var bidEntries []*NFTBidEntry
  4461  	for _, keyBytes := range keysBytes {
  4462  		serialNumStartIdx := 1 + HashSizeBytes
  4463  		bidAmountStartIdx := serialNumStartIdx + 8
  4464  		bidderPKIDStartIdx := bidAmountStartIdx + 8
  4465  
  4466  		nftHashBytes := keyBytes[1:serialNumStartIdx]
  4467  		serialNumberBytes := keyBytes[serialNumStartIdx:bidAmountStartIdx]
  4468  		bidAmountBytes := keyBytes[bidAmountStartIdx:bidderPKIDStartIdx]
  4469  		bidderPKIDBytes := keyBytes[bidderPKIDStartIdx:]
  4470  
  4471  		nftHash := &BlockHash{}
  4472  		copy(nftHash[:], nftHashBytes)
  4473  		serialNumber := DecodeUint64(serialNumberBytes)
  4474  		bidAmount := DecodeUint64(bidAmountBytes)
  4475  		bidderPKID := &PKID{}
  4476  		copy(bidderPKID[:], bidderPKIDBytes)
  4477  
  4478  		bidEntry := &NFTBidEntry{
  4479  			NFTPostHash:    nftHash,
  4480  			SerialNumber:   serialNumber,
  4481  			BidAmountNanos: bidAmount,
  4482  			BidderPKID:     bidderPKID,
  4483  		}
  4484  
  4485  		bidEntries = append(bidEntries, bidEntry)
  4486  	}
  4487  
  4488  	return bidEntries
  4489  }
  4490  
  4491  // ======================================================================================
  4492  // Authorize derived key functions
  4493  //  	<prefix, owner pub key [33]byte, derived pub key [33]byte> -> <DerivedKeyEntry>
  4494  // ======================================================================================
  4495  
  4496  func _dbKeyForOwnerToDerivedKeyMapping(
  4497  	ownerPublicKey PublicKey, derivedPublicKey PublicKey) []byte {
  4498  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  4499  	prefixCopy := append([]byte{}, _PrefixAuthorizeDerivedKey...)
  4500  	key := append(prefixCopy, ownerPublicKey[:]...)
  4501  	key = append(key, derivedPublicKey[:]...)
  4502  	return key
  4503  }
  4504  
  4505  func _dbSeekPrefixForDerivedKeyMappings(
  4506  	ownerPublicKey PublicKey) []byte {
  4507  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  4508  	prefixCopy := append([]byte{}, _PrefixAuthorizeDerivedKey...)
  4509  	key := append(prefixCopy, ownerPublicKey[:]...)
  4510  	return key
  4511  }
  4512  
  4513  func DBPutDerivedKeyMappingWithTxn(
  4514  	txn *badger.Txn, ownerPublicKey PublicKey, derivedPublicKey PublicKey, derivedKeyEntry *DerivedKeyEntry) error {
  4515  
  4516  	if len(ownerPublicKey) != btcec.PubKeyBytesLenCompressed {
  4517  		return fmt.Errorf("DBPutDerivedKeyMappingsWithTxn: Owner Public Key "+
  4518  			"length %d != %d", len(ownerPublicKey), btcec.PubKeyBytesLenCompressed)
  4519  	}
  4520  	if len(derivedPublicKey) != btcec.PubKeyBytesLenCompressed {
  4521  		return fmt.Errorf("DBPutDerivedKeyMappingsWithTxn: Derived Public Key "+
  4522  			"length %d != %d", len(derivedPublicKey), btcec.PubKeyBytesLenCompressed)
  4523  	}
  4524  
  4525  	key := _dbKeyForOwnerToDerivedKeyMapping(ownerPublicKey, derivedPublicKey)
  4526  
  4527  	derivedKeyEntryBuffer := bytes.NewBuffer([]byte{})
  4528  	gob.NewEncoder(derivedKeyEntryBuffer).Encode(derivedKeyEntry)
  4529  	return txn.Set(key, derivedKeyEntryBuffer.Bytes())
  4530  }
  4531  
  4532  func DBPutDerivedKeyMapping(
  4533  	handle *badger.DB, ownerPublicKey PublicKey, derivedPublicKey PublicKey, derivedKeyEntry *DerivedKeyEntry) error {
  4534  
  4535  	return handle.Update(func(txn *badger.Txn) error {
  4536  		return DBPutDerivedKeyMappingWithTxn(txn, ownerPublicKey, derivedPublicKey, derivedKeyEntry)
  4537  	})
  4538  }
  4539  
  4540  func DBGetOwnerToDerivedKeyMappingWithTxn(
  4541  	txn *badger.Txn, ownerPublicKey PublicKey, derivedPublicKey PublicKey) *DerivedKeyEntry {
  4542  
  4543  	key := _dbKeyForOwnerToDerivedKeyMapping(ownerPublicKey, derivedPublicKey)
  4544  	derivedKeyEntryItem, err := txn.Get(key)
  4545  	if err != nil {
  4546  		return nil
  4547  	}
  4548  	derivedKeyEntryBytes, err := derivedKeyEntryItem.ValueCopy(nil)
  4549  	if err != nil {
  4550  		return nil
  4551  	}
  4552  	derivedKeyEntry := &DerivedKeyEntry{}
  4553  	err = derivedKeyEntryItem.Value(func(valBytes []byte) error {
  4554  		return gob.NewDecoder(bytes.NewReader(derivedKeyEntryBytes)).Decode(derivedKeyEntry)
  4555  	})
  4556  
  4557  	return derivedKeyEntry
  4558  }
  4559  
  4560  func DBGetOwnerToDerivedKeyMapping(
  4561  	db *badger.DB, ownerPublicKey PublicKey, derivedPublicKey PublicKey) *DerivedKeyEntry {
  4562  
  4563  	var derivedKeyEntry *DerivedKeyEntry
  4564  	db.View(func(txn *badger.Txn) error {
  4565  		derivedKeyEntry = DBGetOwnerToDerivedKeyMappingWithTxn(txn, ownerPublicKey, derivedPublicKey)
  4566  		return nil
  4567  	})
  4568  	return derivedKeyEntry
  4569  }
  4570  
  4571  func DBDeleteDerivedKeyMappingWithTxn(
  4572  	txn *badger.Txn, ownerPublicKey PublicKey, derivedPublicKey PublicKey) error {
  4573  
  4574  	// First check that a mapping exists for the passed in public keys.
  4575  	// If one doesn't exist then there's nothing to do.
  4576  	derivedKeyEntry := DBGetOwnerToDerivedKeyMappingWithTxn(
  4577  		txn, ownerPublicKey, derivedPublicKey)
  4578  	if derivedKeyEntry == nil {
  4579  		return nil
  4580  	}
  4581  
  4582  	// When a mapping exists, delete it.
  4583  	if err := txn.Delete(_dbKeyForOwnerToDerivedKeyMapping(ownerPublicKey, derivedPublicKey)); err != nil {
  4584  		return errors.Wrapf(err, "DBDeleteDerivedKeyMappingWithTxn: Deleting "+
  4585  			"ownerPublicKey %s and derivedPublicKey %s failed",
  4586  			PkToStringMainnet(ownerPublicKey[:]), PkToStringMainnet(derivedPublicKey[:]))
  4587  	}
  4588  
  4589  	return nil
  4590  }
  4591  
  4592  func DBDeleteDerivedKeyMapping(
  4593  	handle *badger.DB, ownerPublicKey PublicKey, derivedPublicKey PublicKey) error {
  4594  	return handle.Update(func(txn *badger.Txn) error {
  4595  		return DBDeleteDerivedKeyMappingWithTxn(txn, ownerPublicKey, derivedPublicKey)
  4596  	})
  4597  }
  4598  
  4599  func DBGetAllOwnerToDerivedKeyMappings(handle *badger.DB, ownerPublicKey PublicKey) (
  4600  	_entries []*DerivedKeyEntry, _err error) {
  4601  
  4602  	prefix := _dbSeekPrefixForDerivedKeyMappings(ownerPublicKey)
  4603  	_, valsFound := _enumerateKeysForPrefix(handle, prefix)
  4604  
  4605  	var derivedEntries []*DerivedKeyEntry
  4606  	for _, keyBytes := range valsFound {
  4607  		derivedKeyEntry := &DerivedKeyEntry{}
  4608  		err := gob.NewDecoder(bytes.NewReader(keyBytes)).Decode(derivedKeyEntry)
  4609  		if err != nil {
  4610  			return nil, err
  4611  		}
  4612  		derivedEntries = append(derivedEntries, derivedKeyEntry)
  4613  	}
  4614  
  4615  	return derivedEntries, nil
  4616  }
  4617  
  4618  // ======================================================================================
  4619  // Profile code
  4620  // ======================================================================================
  4621  func _dbKeyForPKIDToProfileEntry(pkid *PKID) []byte {
  4622  	prefixCopy := append([]byte{}, _PrefixPKIDToProfileEntry...)
  4623  	key := append(prefixCopy, pkid[:]...)
  4624  	return key
  4625  }
  4626  func _dbKeyForProfileUsernameToPKID(nonLowercaseUsername []byte) []byte {
  4627  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  4628  	key := append([]byte{}, _PrefixProfileUsernameToPKID...)
  4629  	// Always lowercase the username when we use it as a key in our db. This allows
  4630  	// us to check uniqueness in a case-insensitive way.
  4631  	lowercaseUsername := []byte(strings.ToLower(string(nonLowercaseUsername)))
  4632  	key = append(key, lowercaseUsername...)
  4633  	return key
  4634  }
  4635  
  4636  // This is the key we use to sort profiles by their amount of DeSo locked
  4637  func _dbKeyForCreatorDeSoLockedNanosCreatorPKID(desoLockedNanos uint64, pkid *PKID) []byte {
  4638  	key := append([]byte{}, _PrefixCreatorDeSoLockedNanosCreatorPKID...)
  4639  	key = append(key, EncodeUint64(desoLockedNanos)...)
  4640  	key = append(key, pkid[:]...)
  4641  	return key
  4642  }
  4643  
  4644  func DbPrefixForCreatorDeSoLockedNanosCreatorPKID() []byte {
  4645  	return append([]byte{}, _PrefixCreatorDeSoLockedNanosCreatorPKID...)
  4646  }
  4647  
  4648  func DBGetPKIDForUsernameWithTxn(
  4649  	txn *badger.Txn, username []byte) *PKID {
  4650  
  4651  	key := _dbKeyForProfileUsernameToPKID(username)
  4652  	profileEntryItem, err := txn.Get(key)
  4653  	if err != nil {
  4654  		return nil
  4655  	}
  4656  	pkidBytes, err := profileEntryItem.ValueCopy(nil)
  4657  	if err != nil {
  4658  		glog.Errorf("DBGetProfileEntryForUsernameWithTxn: Problem reading "+
  4659  			"public key for username %v: %v", string(username), err)
  4660  		return nil
  4661  	}
  4662  
  4663  	return PublicKeyToPKID(pkidBytes)
  4664  }
  4665  
  4666  func DBGetPKIDForUsername(db *badger.DB, username []byte) *PKID {
  4667  	var ret *PKID
  4668  	db.View(func(txn *badger.Txn) error {
  4669  		ret = DBGetPKIDForUsernameWithTxn(txn, username)
  4670  		return nil
  4671  	})
  4672  	return ret
  4673  }
  4674  
  4675  func DBGetProfileEntryForUsernameWithTxn(
  4676  	txn *badger.Txn, username []byte) *ProfileEntry {
  4677  
  4678  	pkid := DBGetPKIDForUsernameWithTxn(txn, username)
  4679  	if pkid == nil {
  4680  		return nil
  4681  	}
  4682  
  4683  	return DBGetProfileEntryForPKIDWithTxn(txn, pkid)
  4684  }
  4685  
  4686  func DBGetProfileEntryForUsername(db *badger.DB, username []byte) *ProfileEntry {
  4687  	var ret *ProfileEntry
  4688  	db.View(func(txn *badger.Txn) error {
  4689  		ret = DBGetProfileEntryForUsernameWithTxn(txn, username)
  4690  		return nil
  4691  	})
  4692  	return ret
  4693  }
  4694  
  4695  func DBGetProfileEntryForPKIDWithTxn(
  4696  	txn *badger.Txn, pkid *PKID) *ProfileEntry {
  4697  
  4698  	key := _dbKeyForPKIDToProfileEntry(pkid)
  4699  	profileEntryObj := &ProfileEntry{}
  4700  	profileEntryItem, err := txn.Get(key)
  4701  	if err != nil {
  4702  		return nil
  4703  	}
  4704  	err = profileEntryItem.Value(func(valBytes []byte) error {
  4705  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(profileEntryObj)
  4706  	})
  4707  	if err != nil {
  4708  		glog.Errorf("DBGetProfileEntryForPubKeyWithTxnhWithTxn: Problem reading "+
  4709  			"ProfileEntry for PKID %v", pkid)
  4710  		return nil
  4711  	}
  4712  	return profileEntryObj
  4713  }
  4714  
  4715  func DBGetProfileEntryForPKID(db *badger.DB, pkid *PKID) *ProfileEntry {
  4716  	var ret *ProfileEntry
  4717  	db.View(func(txn *badger.Txn) error {
  4718  		ret = DBGetProfileEntryForPKIDWithTxn(txn, pkid)
  4719  		return nil
  4720  	})
  4721  	return ret
  4722  }
  4723  
  4724  func DBDeleteProfileEntryMappingsWithTxn(
  4725  	txn *badger.Txn, pkid *PKID, params *DeSoParams) error {
  4726  
  4727  	// First pull up the mapping that exists for the profile pub key passed in.
  4728  	// If one doesn't exist then there's nothing to do.
  4729  	profileEntry := DBGetProfileEntryForPKIDWithTxn(txn, pkid)
  4730  	if profileEntry == nil {
  4731  		return nil
  4732  	}
  4733  
  4734  	// When a profile exists, delete the pkid mapping for the profile.
  4735  	if err := txn.Delete(_dbKeyForPKIDToProfileEntry(pkid)); err != nil {
  4736  		return errors.Wrapf(err, "DbDeleteProfileEntryMappingsWithTxn: Deleting "+
  4737  			"profile mapping for profile PKID: %v",
  4738  			PkToString(pkid[:], params))
  4739  	}
  4740  
  4741  	if err := txn.Delete(
  4742  		_dbKeyForProfileUsernameToPKID(profileEntry.Username)); err != nil {
  4743  
  4744  		return errors.Wrapf(err, "DbDeleteProfileEntryMappingsWithTxn: Deleting "+
  4745  			"username mapping for profile username %v", string(profileEntry.Username))
  4746  	}
  4747  
  4748  	// The coin deso mapping
  4749  	if err := txn.Delete(
  4750  		_dbKeyForCreatorDeSoLockedNanosCreatorPKID(
  4751  			profileEntry.DeSoLockedNanos, pkid)); err != nil {
  4752  
  4753  		return errors.Wrapf(err, "DbDeleteProfileEntryMappingsWithTxn: Deleting "+
  4754  			"coin mapping for profile username %v", string(profileEntry.Username))
  4755  	}
  4756  
  4757  	return nil
  4758  }
  4759  
  4760  func DBDeleteProfileEntryMappings(
  4761  	handle *badger.DB, pkid *PKID, params *DeSoParams) error {
  4762  
  4763  	return handle.Update(func(txn *badger.Txn) error {
  4764  		return DBDeleteProfileEntryMappingsWithTxn(txn, pkid, params)
  4765  	})
  4766  }
  4767  
  4768  func DBPutProfileEntryMappingsWithTxn(
  4769  	txn *badger.Txn, profileEntry *ProfileEntry, pkid *PKID, params *DeSoParams) error {
  4770  
  4771  	profileDataBuf := bytes.NewBuffer([]byte{})
  4772  	gob.NewEncoder(profileDataBuf).Encode(profileEntry)
  4773  
  4774  	// Set the main PKID -> profile entry mapping.
  4775  	if err := txn.Set(_dbKeyForPKIDToProfileEntry(pkid), profileDataBuf.Bytes()); err != nil {
  4776  
  4777  		return errors.Wrapf(err, "DbPutProfileEntryMappingsWithTxn: Problem "+
  4778  			"adding mapping for profile: %v", PkToString(pkid[:], params))
  4779  	}
  4780  
  4781  	// Username
  4782  	if err := txn.Set(
  4783  		_dbKeyForProfileUsernameToPKID(profileEntry.Username),
  4784  		pkid[:]); err != nil {
  4785  
  4786  		return errors.Wrapf(err, "DbPutProfileEntryMappingsWithTxn: Problem "+
  4787  			"adding mapping for profile with username: %v", string(profileEntry.Username))
  4788  	}
  4789  
  4790  	// The coin deso mapping
  4791  	if err := txn.Set(
  4792  		_dbKeyForCreatorDeSoLockedNanosCreatorPKID(
  4793  			profileEntry.DeSoLockedNanos, pkid), []byte{}); err != nil {
  4794  
  4795  		return errors.Wrapf(err, "DbPutProfileEntryMappingsWithTxn: Problem "+
  4796  			"adding mapping for profile coin: ")
  4797  	}
  4798  
  4799  	return nil
  4800  }
  4801  
  4802  func DBPutProfileEntryMappings(
  4803  	handle *badger.DB, profileEntry *ProfileEntry, pkid *PKID, params *DeSoParams) error {
  4804  
  4805  	return handle.Update(func(txn *badger.Txn) error {
  4806  		return DBPutProfileEntryMappingsWithTxn(txn, profileEntry, pkid, params)
  4807  	})
  4808  }
  4809  
  4810  // DBGetAllProfilesByCoinValue returns all the profiles in the db with the
  4811  // highest coin values first.
  4812  //
  4813  // TODO(performance): This currently fetches all profiles. We should implement
  4814  // some kind of pagination instead though.
  4815  func DBGetAllProfilesByCoinValue(handle *badger.DB, fetchEntries bool) (
  4816  	_lockedDeSoNanos []uint64, _profilePublicKeys []*PKID,
  4817  	_profileEntries []*ProfileEntry, _err error) {
  4818  
  4819  	lockedDeSoNanosFetched := []uint64{}
  4820  	profilePublicKeysFetched := []*PKID{}
  4821  	profileEntriesFetched := []*ProfileEntry{}
  4822  	dbPrefixx := append([]byte{}, _PrefixCreatorDeSoLockedNanosCreatorPKID...)
  4823  
  4824  	err := handle.View(func(txn *badger.Txn) error {
  4825  		opts := badger.DefaultIteratorOptions
  4826  
  4827  		opts.PrefetchValues = false
  4828  
  4829  		// Go in reverse order since a larger count is better.
  4830  		opts.Reverse = true
  4831  
  4832  		it := txn.NewIterator(opts)
  4833  		defer it.Close()
  4834  		// Since we iterate backwards, the prefix must be bigger than all possible
  4835  		// counts that could actually exist. We use eight bytes since the count is
  4836  		// encoded as a 64-bit big-endian byte slice, which will be eight bytes long.
  4837  		maxBigEndianUint64Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
  4838  		prefix := append(dbPrefixx, maxBigEndianUint64Bytes...)
  4839  		for it.Seek(prefix); it.ValidForPrefix(dbPrefixx); it.Next() {
  4840  			rawKey := it.Item().Key()
  4841  
  4842  			// Strip the prefix off the key and check its length. If it contains
  4843  			// a big-endian uint64 then it should be at least eight bytes.
  4844  			lockedDeSoPubKeyConcatKey := rawKey[1:]
  4845  			uint64BytesLen := len(maxBigEndianUint64Bytes)
  4846  			expectedLength := uint64BytesLen + btcec.PubKeyBytesLenCompressed
  4847  			if len(lockedDeSoPubKeyConcatKey) != expectedLength {
  4848  				return fmt.Errorf("DBGetAllProfilesByLockedDeSo: Invalid key "+
  4849  					"length %d should be at least %d", len(lockedDeSoPubKeyConcatKey),
  4850  					expectedLength)
  4851  			}
  4852  
  4853  			lockedDeSoNanos := DecodeUint64(lockedDeSoPubKeyConcatKey[:uint64BytesLen])
  4854  
  4855  			// Appended to the stake should be the profile pub key so extract it here.
  4856  			profilePKID := make([]byte, btcec.PubKeyBytesLenCompressed)
  4857  			copy(profilePKID[:], lockedDeSoPubKeyConcatKey[uint64BytesLen:])
  4858  
  4859  			lockedDeSoNanosFetched = append(lockedDeSoNanosFetched, lockedDeSoNanos)
  4860  			profilePublicKeysFetched = append(profilePublicKeysFetched, PublicKeyToPKID(profilePKID))
  4861  		}
  4862  		return nil
  4863  	})
  4864  	if err != nil {
  4865  		return nil, nil, nil, err
  4866  	}
  4867  
  4868  	if !fetchEntries {
  4869  		return lockedDeSoNanosFetched, profilePublicKeysFetched, nil, nil
  4870  	}
  4871  
  4872  	for _, profilePKID := range profilePublicKeysFetched {
  4873  		profileEntry := DBGetProfileEntryForPKID(handle, profilePKID)
  4874  		if profileEntry == nil {
  4875  			return nil, nil, nil, fmt.Errorf("DBGetAllProfilesByLockedDeSo: "+
  4876  				"ProfilePubKey %v does not have corresponding entry",
  4877  				PkToStringBoth(profilePKID[:]))
  4878  		}
  4879  		profileEntriesFetched = append(profileEntriesFetched, profileEntry)
  4880  	}
  4881  
  4882  	return lockedDeSoNanosFetched, profilePublicKeysFetched, profileEntriesFetched, nil
  4883  }
  4884  
  4885  // =====================================================================================
  4886  // Creator coin balance entry code
  4887  // =====================================================================================
  4888  func _dbKeyForHODLerPKIDCreatorPKIDToBalanceEntry(hodlerPKID *PKID, creatorPKID *PKID) []byte {
  4889  	key := append([]byte{}, _PrefixHODLerPKIDCreatorPKIDToBalanceEntry...)
  4890  	key = append(key, hodlerPKID[:]...)
  4891  	key = append(key, creatorPKID[:]...)
  4892  	return key
  4893  }
  4894  func _dbKeyForCreatorPKIDHODLerPKIDToBalanceEntry(creatorPKID *PKID, hodlerPKID *PKID) []byte {
  4895  	key := append([]byte{}, _PrefixCreatorPKIDHODLerPKIDToBalanceEntry...)
  4896  	key = append(key, creatorPKID[:]...)
  4897  	key = append(key, hodlerPKID[:]...)
  4898  	return key
  4899  }
  4900  
  4901  func DBGetCreatorCoinBalanceEntryForHODLerAndCreatorPKIDsWithTxn(
  4902  	txn *badger.Txn, hodlerPKID *PKID, creatorPKID *PKID) *BalanceEntry {
  4903  
  4904  	key := _dbKeyForHODLerPKIDCreatorPKIDToBalanceEntry(hodlerPKID, creatorPKID)
  4905  	balanceEntryObj := &BalanceEntry{}
  4906  	balanceEntryItem, err := txn.Get(key)
  4907  	if err != nil {
  4908  		return nil
  4909  	}
  4910  	err = balanceEntryItem.Value(func(valBytes []byte) error {
  4911  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(balanceEntryObj)
  4912  	})
  4913  	if err != nil {
  4914  		glog.Errorf("DBGetCreatorCoinBalanceEntryForHODLerAndCreatorPubKeysWithTxn: Problem reading "+
  4915  			"BalanceEntry for PKIDs %v %v",
  4916  			PkToStringBoth(hodlerPKID[:]), PkToStringBoth(creatorPKID[:]))
  4917  		return nil
  4918  	}
  4919  	return balanceEntryObj
  4920  }
  4921  
  4922  func DBGetCreatorCoinBalanceEntryForHODLerAndCreatorPKIDs(
  4923  	handle *badger.DB, hodlerPKID *PKID, creatorPKID *PKID) *BalanceEntry {
  4924  
  4925  	var ret *BalanceEntry
  4926  	handle.View(func(txn *badger.Txn) error {
  4927  		ret = DBGetCreatorCoinBalanceEntryForHODLerAndCreatorPKIDsWithTxn(
  4928  			txn, hodlerPKID, creatorPKID)
  4929  		return nil
  4930  	})
  4931  	return ret
  4932  }
  4933  
  4934  func DBGetCreatorCoinBalanceEntryForCreatorPKIDAndHODLerPubKeyWithTxn(
  4935  	txn *badger.Txn, creatorPKID *PKID, hodlerPKID *PKID) *BalanceEntry {
  4936  
  4937  	key := _dbKeyForCreatorPKIDHODLerPKIDToBalanceEntry(creatorPKID, hodlerPKID)
  4938  	balanceEntryObj := &BalanceEntry{}
  4939  	balanceEntryItem, err := txn.Get(key)
  4940  	if err != nil {
  4941  		return nil
  4942  	}
  4943  	err = balanceEntryItem.Value(func(valBytes []byte) error {
  4944  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(balanceEntryObj)
  4945  	})
  4946  	if err != nil {
  4947  		glog.Errorf("DBGetCreatorCoinBalanceEntryForCreatorPubKeyAndHODLerPubKeyWithTxn: Problem reading "+
  4948  			"BalanceEntry for PKIDs %v %v",
  4949  			PkToStringBoth(hodlerPKID[:]), PkToStringBoth(creatorPKID[:]))
  4950  		return nil
  4951  	}
  4952  	return balanceEntryObj
  4953  }
  4954  
  4955  func DBDeleteCreatorCoinBalanceEntryMappingsWithTxn(
  4956  	txn *badger.Txn, hodlerPKID *PKID, creatorPKID *PKID,
  4957  	params *DeSoParams) error {
  4958  
  4959  	// First pull up the mappings that exists for the keys passed in.
  4960  	// If one doesn't exist then there's nothing to do.
  4961  	balanceEntry := DBGetCreatorCoinBalanceEntryForHODLerAndCreatorPKIDsWithTxn(
  4962  		txn, hodlerPKID, creatorPKID)
  4963  	if balanceEntry == nil {
  4964  		return nil
  4965  	}
  4966  
  4967  	// When an entry exists, delete the mappings for it.
  4968  	if err := txn.Delete(_dbKeyForHODLerPKIDCreatorPKIDToBalanceEntry(hodlerPKID, creatorPKID)); err != nil {
  4969  		return errors.Wrapf(err, "DbDeleteCreatorCoinBalanceEntryMappingsWithTxn: Deleting "+
  4970  			"mappings with keys: %v %v",
  4971  			PkToStringBoth(hodlerPKID[:]), PkToStringBoth(creatorPKID[:]))
  4972  	}
  4973  	if err := txn.Delete(_dbKeyForCreatorPKIDHODLerPKIDToBalanceEntry(creatorPKID, hodlerPKID)); err != nil {
  4974  		return errors.Wrapf(err, "DbDeleteCreatorCoinBalanceEntryMappingsWithTxn: Deleting "+
  4975  			"mappings with keys: %v %v",
  4976  			PkToStringBoth(hodlerPKID[:]), PkToStringBoth(creatorPKID[:]))
  4977  	}
  4978  
  4979  	// Note: We don't update the CreatorDeSoLockedNanosCreatorPubKeyIIndex
  4980  	// because we expect that the caller is keeping the individual holdings in
  4981  	// sync with the "total" coins stored in the profile.
  4982  
  4983  	return nil
  4984  }
  4985  
  4986  func DBDeleteCreatorCoinBalanceEntryMappings(
  4987  	handle *badger.DB, hodlerPKID *PKID, creatorPKID *PKID,
  4988  	params *DeSoParams) error {
  4989  
  4990  	return handle.Update(func(txn *badger.Txn) error {
  4991  		return DBDeleteCreatorCoinBalanceEntryMappingsWithTxn(
  4992  			txn, hodlerPKID, creatorPKID, params)
  4993  	})
  4994  }
  4995  
  4996  func DBPutCreatorCoinBalanceEntryMappingsWithTxn(
  4997  	txn *badger.Txn, balanceEntry *BalanceEntry,
  4998  	params *DeSoParams) error {
  4999  
  5000  	balanceEntryDataBuf := bytes.NewBuffer([]byte{})
  5001  	gob.NewEncoder(balanceEntryDataBuf).Encode(balanceEntry)
  5002  
  5003  	// Set the forward direction for the HODLer
  5004  	if err := txn.Set(_dbKeyForHODLerPKIDCreatorPKIDToBalanceEntry(
  5005  		balanceEntry.HODLerPKID, balanceEntry.CreatorPKID),
  5006  		balanceEntryDataBuf.Bytes()); err != nil {
  5007  
  5008  		return errors.Wrapf(err, "DbPutCreatorCoinBalanceEntryMappingsWithTxn: Problem "+
  5009  			"adding forward mappings for pub keys: %v %v",
  5010  			PkToStringBoth(balanceEntry.HODLerPKID[:]),
  5011  			PkToStringBoth(balanceEntry.CreatorPKID[:]))
  5012  	}
  5013  
  5014  	// Set the reverse direction for the creator
  5015  	if err := txn.Set(_dbKeyForCreatorPKIDHODLerPKIDToBalanceEntry(
  5016  		balanceEntry.CreatorPKID, balanceEntry.HODLerPKID),
  5017  		balanceEntryDataBuf.Bytes()); err != nil {
  5018  
  5019  		return errors.Wrapf(err, "DbPutCreatorCoinBalanceEntryMappingsWithTxn: Problem "+
  5020  			"adding reverse mappings for pub keys: %v %v",
  5021  			PkToStringBoth(balanceEntry.HODLerPKID[:]),
  5022  			PkToStringBoth(balanceEntry.CreatorPKID[:]))
  5023  	}
  5024  
  5025  	return nil
  5026  }
  5027  
  5028  func DBPutCreatorCoinBalanceEntryMappings(
  5029  	handle *badger.DB, balanceEntry *BalanceEntry, params *DeSoParams) error {
  5030  
  5031  	return handle.Update(func(txn *badger.Txn) error {
  5032  		return DBPutCreatorCoinBalanceEntryMappingsWithTxn(
  5033  			txn, balanceEntry, params)
  5034  	})
  5035  }
  5036  
  5037  // GetSingleBalanceEntryFromPublicKeys fetchs a single balance entry of a holder's creator coin.
  5038  // Returns nil if the balance entry never existed.
  5039  func GetSingleBalanceEntryFromPublicKeys(holder []byte, creator []byte, utxoView *UtxoView) (*BalanceEntry, error) {
  5040  	holderPKIDEntry := utxoView.GetPKIDForPublicKey(holder)
  5041  	if holderPKIDEntry == nil || holderPKIDEntry.isDeleted {
  5042  		return nil, fmt.Errorf("DbGetSingleBalanceEntryFromPublicKeys: holderPKID was nil or deleted; this should never happen")
  5043  	}
  5044  	holderPKID := holderPKIDEntry.PKID
  5045  	creatorPKIDEntry := utxoView.GetPKIDForPublicKey(creator)
  5046  	if creatorPKIDEntry == nil || creatorPKIDEntry.isDeleted {
  5047  		return nil, fmt.Errorf("DbGetSingleBalanceEntryFromPublicKeys: creatorPKID was nil or deleted; this should never happen")
  5048  	}
  5049  	creatorPKID := creatorPKIDEntry.PKID
  5050  
  5051  	// Check if there's a balance entry in the view
  5052  	balanceEntryMapKey := BalanceEntryMapKey{HODLerPKID: *holderPKID, CreatorPKID: *creatorPKID}
  5053  	balanceEntryFromView, _ := utxoView.HODLerPKIDCreatorPKIDToBalanceEntry[balanceEntryMapKey]
  5054  	if balanceEntryFromView != nil {
  5055  		return balanceEntryFromView, nil
  5056  	}
  5057  
  5058  	// Check if there's a balance entry in the database
  5059  	balanceEntryFromDb := DbGetBalanceEntry(utxoView.Handle, holderPKID, creatorPKID)
  5060  	return balanceEntryFromDb, nil
  5061  }
  5062  
  5063  // DbGetBalanceEntry returns a balance entry from the database
  5064  func DbGetBalanceEntry(db *badger.DB, holder *PKID, creator *PKID) *BalanceEntry {
  5065  	var ret *BalanceEntry
  5066  	db.View(func(txn *badger.Txn) error {
  5067  		ret = DbGetHolderPKIDCreatorPKIDToBalanceEntryWithTxn(txn, holder, creator)
  5068  		return nil
  5069  	})
  5070  	return ret
  5071  }
  5072  
  5073  func DbGetHolderPKIDCreatorPKIDToBalanceEntryWithTxn(txn *badger.Txn, holder *PKID, creator *PKID) *BalanceEntry {
  5074  	key := _dbKeyForCreatorPKIDHODLerPKIDToBalanceEntry(creator, holder)
  5075  	balanceEntryObj := &BalanceEntry{}
  5076  	balanceEntryItem, err := txn.Get(key)
  5077  	if err != nil {
  5078  		return nil
  5079  	}
  5080  	err = balanceEntryItem.Value(func(valBytes []byte) error {
  5081  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(balanceEntryObj)
  5082  	})
  5083  	if err != nil {
  5084  		glog.Errorf("DbGetReposterPubKeyRepostedPostHashToRepostedPostMappingWithTxn: Problem decoding "+
  5085  			"balance entry for holder %v and creator %v", PkToStringMainnet(PKIDToPublicKey(holder)), PkToStringMainnet(PKIDToPublicKey(creator)))
  5086  		return nil
  5087  	}
  5088  	return balanceEntryObj
  5089  }
  5090  
  5091  // DbGetBalanceEntriesHodlingYou fetchs the BalanceEntries that the passed in pkid holds.
  5092  func DbGetBalanceEntriesYouHold(db *badger.DB, pkid *PKID, filterOutZeroBalances bool) ([]*BalanceEntry, error) {
  5093  	// Get the balance entries for the coins that *you hold*
  5094  	balanceEntriesYouHodl := []*BalanceEntry{}
  5095  	{
  5096  		prefix := append([]byte{}, _PrefixHODLerPKIDCreatorPKIDToBalanceEntry...)
  5097  		keyPrefix := append(prefix, pkid[:]...)
  5098  		_, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix)
  5099  		for _, byteString := range entryByteStringsFound {
  5100  			currentEntry := &BalanceEntry{}
  5101  			gob.NewDecoder(bytes.NewReader(byteString)).Decode(currentEntry)
  5102  			if filterOutZeroBalances && currentEntry.BalanceNanos == 0 {
  5103  				continue
  5104  			}
  5105  			balanceEntriesYouHodl = append(balanceEntriesYouHodl, currentEntry)
  5106  		}
  5107  	}
  5108  
  5109  	return balanceEntriesYouHodl, nil
  5110  }
  5111  
  5112  // DbGetBalanceEntriesHodlingYou fetches the BalanceEntries that hold the pkid passed in.
  5113  func DbGetBalanceEntriesHodlingYou(db *badger.DB, pkid *PKID, filterOutZeroBalances bool) ([]*BalanceEntry, error) {
  5114  	// Get the balance entries for the coins that *hold you*
  5115  	balanceEntriesThatHodlYou := []*BalanceEntry{}
  5116  	{
  5117  		prefix := append([]byte{}, _PrefixCreatorPKIDHODLerPKIDToBalanceEntry...)
  5118  		keyPrefix := append(prefix, pkid[:]...)
  5119  		_, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix)
  5120  		for _, byteString := range entryByteStringsFound {
  5121  			currentEntry := &BalanceEntry{}
  5122  			gob.NewDecoder(bytes.NewReader(byteString)).Decode(currentEntry)
  5123  			if filterOutZeroBalances && currentEntry.BalanceNanos == 0 {
  5124  				continue
  5125  			}
  5126  			balanceEntriesThatHodlYou = append(balanceEntriesThatHodlYou, currentEntry)
  5127  		}
  5128  	}
  5129  
  5130  	return balanceEntriesThatHodlYou, nil
  5131  }
  5132  
  5133  // =====================================================================================
  5134  // End coin balance entry code
  5135  // =====================================================================================
  5136  
  5137  // startPrefix specifies a point in the DB at which the iteration should start.
  5138  // It doesn't have to map to an exact key because badger will just binary search
  5139  // and start right before/after that location.
  5140  //
  5141  // validForPrefix helps determine when the iteration should stop. The iteration
  5142  // stops at the last entry that has this prefix. Setting it to
  5143  // an empty byte string would cause the iteration to seek to the beginning of the db,
  5144  // whereas setting it to one of the _Prefix bytes would cause the iteration to stop
  5145  // at the last entry with that prefix.
  5146  //
  5147  // maxKeyLen is required so we can pad the key with FF in the case the user wants
  5148  // to seek backwards. This is required due to a quirk of badgerdb. It is ignored
  5149  // if reverse == false.
  5150  //
  5151  // numToFetch specifies the number of entries to fetch. If set to zero then it
  5152  // fetches all entries that match the validForPrefix passed in.
  5153  func DBGetPaginatedKeysAndValuesForPrefixWithTxn(
  5154  	dbTxn *badger.Txn, startPrefix []byte, validForPrefix []byte,
  5155  	maxKeyLen int, numToFetch int, reverse bool, fetchValues bool) (
  5156  
  5157  	_keysFound [][]byte, _valsFound [][]byte, _err error) {
  5158  
  5159  	keysFound := [][]byte{}
  5160  	valsFound := [][]byte{}
  5161  
  5162  	opts := badger.DefaultIteratorOptions
  5163  
  5164  	opts.PrefetchValues = fetchValues
  5165  
  5166  	// Optionally go in reverse order.
  5167  	opts.Reverse = reverse
  5168  
  5169  	it := dbTxn.NewIterator(opts)
  5170  	defer it.Close()
  5171  	prefix := startPrefix
  5172  	if reverse {
  5173  		// When we iterate backwards, the prefix must be bigger than all possible
  5174  		// keys that could actually exist with this prefix. We achieve this by
  5175  		// padding the end of the dbPrefixx passed in up to the key length.
  5176  		prefix = make([]byte, maxKeyLen)
  5177  		for ii := 0; ii < maxKeyLen; ii++ {
  5178  			if ii < len(startPrefix) {
  5179  				prefix[ii] = startPrefix[ii]
  5180  			} else {
  5181  				prefix[ii] = 0xFF
  5182  			}
  5183  		}
  5184  	}
  5185  	for it.Seek(prefix); it.ValidForPrefix(validForPrefix); it.Next() {
  5186  		keyCopy := it.Item().KeyCopy(nil)
  5187  		if maxKeyLen != 0 && len(keyCopy) != maxKeyLen {
  5188  			return nil, nil, fmt.Errorf(
  5189  				"DBGetPaginatedKeysAndValuesForPrefixWithTxn: Invalid key length %v != %v",
  5190  				len(keyCopy), maxKeyLen)
  5191  		}
  5192  
  5193  		var valCopy []byte
  5194  		if fetchValues {
  5195  			var err error
  5196  			valCopy, err = it.Item().ValueCopy(nil)
  5197  			if err != nil {
  5198  				return nil, nil, fmt.Errorf("DBGetPaginatedKeysAndValuesForPrefixWithTxn: "+
  5199  					"Error fetching value: %v", err)
  5200  			}
  5201  		}
  5202  
  5203  		keysFound = append(keysFound, keyCopy)
  5204  		valsFound = append(valsFound, valCopy)
  5205  
  5206  		if numToFetch != 0 && len(keysFound) == numToFetch {
  5207  			break
  5208  		}
  5209  	}
  5210  
  5211  	// Return whatever we found.
  5212  	return keysFound, valsFound, nil
  5213  }
  5214  
  5215  func DBGetPaginatedKeysAndValuesForPrefix(
  5216  	db *badger.DB, startPrefix []byte, validForPrefix []byte,
  5217  	keyLen int, numToFetch int, reverse bool, fetchValues bool) (
  5218  	_keysFound [][]byte, _valsFound [][]byte, _err error) {
  5219  
  5220  	keysFound := [][]byte{}
  5221  	valsFound := [][]byte{}
  5222  
  5223  	dbErr := db.View(func(txn *badger.Txn) error {
  5224  		var err error
  5225  		keysFound, valsFound, err = DBGetPaginatedKeysAndValuesForPrefixWithTxn(
  5226  			txn, startPrefix, validForPrefix, keyLen,
  5227  			numToFetch, reverse, fetchValues)
  5228  		if err != nil {
  5229  			return fmt.Errorf("DBGetPaginatedKeysAndValuesForPrefix: %v", err)
  5230  		}
  5231  		return nil
  5232  	})
  5233  	if dbErr != nil {
  5234  		return nil, nil, dbErr
  5235  	}
  5236  
  5237  	return keysFound, valsFound, nil
  5238  }
  5239  
  5240  func DBGetPaginatedPostsOrderedByTime(
  5241  	db *badger.DB, startPostTimestampNanos uint64, startPostHash *BlockHash,
  5242  	numToFetch int, fetchPostEntries bool, reverse bool) (
  5243  	_postHashes []*BlockHash, _tstampNanos []uint64, _postEntries []*PostEntry,
  5244  	_err error) {
  5245  
  5246  	startPostPrefix := append([]byte{}, _PrefixTstampNanosPostHash...)
  5247  
  5248  	if startPostTimestampNanos > 0 {
  5249  		startTstampBytes := EncodeUint64(startPostTimestampNanos)
  5250  		startPostPrefix = append(startPostPrefix, startTstampBytes...)
  5251  	}
  5252  
  5253  	if startPostHash != nil {
  5254  		startPostPrefix = append(startPostPrefix, startPostHash[:]...)
  5255  	}
  5256  
  5257  	// We fetch in reverse to get the latest posts.
  5258  	maxUint64Tstamp := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
  5259  	postIndexKeys, _, err := DBGetPaginatedKeysAndValuesForPrefix(
  5260  		db, startPostPrefix, _PrefixTstampNanosPostHash, /*validForPrefix*/
  5261  		len(_PrefixTstampNanosPostHash)+len(maxUint64Tstamp)+HashSizeBytes, /*keyLen*/
  5262  		numToFetch, reverse /*reverse*/, false /*fetchValues*/)
  5263  	if err != nil {
  5264  		return nil, nil, nil, fmt.Errorf("DBGetPaginatedPostsOrderedByTime: %v", err)
  5265  	}
  5266  
  5267  	// Cut the post hashes and timestamps out of the returned keys.
  5268  	postHashes := []*BlockHash{}
  5269  	tstamps := []uint64{}
  5270  	startTstampIndex := len(_PrefixTstampNanosPostHash)
  5271  	hashStartIndex := len(_PrefixTstampNanosPostHash) + len(maxUint64Tstamp)
  5272  	hashEndIndex := hashStartIndex + HashSizeBytes
  5273  	for _, postKeyBytes := range postIndexKeys {
  5274  		currentPostHash := &BlockHash{}
  5275  		copy(currentPostHash[:], postKeyBytes[hashStartIndex:hashEndIndex])
  5276  		postHashes = append(postHashes, currentPostHash)
  5277  
  5278  		tstamps = append(tstamps, DecodeUint64(
  5279  			postKeyBytes[startTstampIndex:hashStartIndex]))
  5280  	}
  5281  
  5282  	// Fetch the PostEntries if desired.
  5283  	var postEntries []*PostEntry
  5284  	if fetchPostEntries {
  5285  		for _, postHash := range postHashes {
  5286  			postEntry := DBGetPostEntryByPostHash(db, postHash)
  5287  			if postEntry == nil {
  5288  				return nil, nil, nil, fmt.Errorf("DBGetPaginatedPostsOrderedByTime: "+
  5289  					"PostHash %v does not have corresponding entry", postHash)
  5290  			}
  5291  			postEntries = append(postEntries, postEntry)
  5292  		}
  5293  	}
  5294  
  5295  	return postHashes, tstamps, postEntries, nil
  5296  }
  5297  
  5298  func DBGetProfilesByUsernamePrefixAndDeSoLocked(
  5299  	db *badger.DB, usernamePrefix string, utxoView *UtxoView) (
  5300  	_profileEntries []*ProfileEntry, _err error) {
  5301  
  5302  	startPrefix := append([]byte{}, _PrefixProfileUsernameToPKID...)
  5303  	lowercaseUsernamePrefixString := strings.ToLower(usernamePrefix)
  5304  	lowercaseUsernamePrefix := []byte(lowercaseUsernamePrefixString)
  5305  	startPrefix = append(startPrefix, lowercaseUsernamePrefix...)
  5306  
  5307  	_, pkidsFound, err := DBGetPaginatedKeysAndValuesForPrefix(
  5308  		db /*db*/, startPrefix, /*startPrefix*/
  5309  		startPrefix /*validForPrefix*/, 0, /*keyLen (ignored when reverse == false)*/
  5310  		0 /*numToFetch (zero fetches all)*/, false, /*reverse*/
  5311  		true /*fetchValues*/)
  5312  	if err != nil {
  5313  		return nil, fmt.Errorf("DBGetProfilesByUsernamePrefixAndDeSoLocked: %v", err)
  5314  	}
  5315  
  5316  	// Have to do this to convert the PKIDs back into public keys
  5317  	// TODO: We should clean things up around public keys vs PKIDs
  5318  	pubKeysMap := make(map[PkMapKey][]byte)
  5319  	for _, pkidBytes := range pkidsFound {
  5320  		if len(pkidBytes) != btcec.PubKeyBytesLenCompressed {
  5321  			continue
  5322  		}
  5323  		pkid := &PKID{}
  5324  		copy(pkid[:], pkidBytes)
  5325  		pubKey := DBGetPublicKeyForPKID(db, pkid)
  5326  		if len(pubKey) != 0 {
  5327  			pubKeysMap[MakePkMapKey(pubKey)] = pubKey
  5328  		}
  5329  	}
  5330  
  5331  	for username, profileEntry := range utxoView.ProfileUsernameToProfileEntry {
  5332  		if strings.HasPrefix(string(username[:]), lowercaseUsernamePrefixString) {
  5333  			pkMapKey := MakePkMapKey(profileEntry.PublicKey)
  5334  			pubKeysMap[pkMapKey] = profileEntry.PublicKey
  5335  		}
  5336  	}
  5337  
  5338  	// Sigh.. convert the public keys *back* into PKIDs...
  5339  	profilesFound := []*ProfileEntry{}
  5340  	for _, pk := range pubKeysMap {
  5341  		pkid := utxoView.GetPKIDForPublicKey(pk).PKID
  5342  		profile := utxoView.GetProfileEntryForPKID(pkid)
  5343  		// Double-check that a username matches the prefix.
  5344  		// If a user had the handle "elon" and then changed to "jeff" and that transaction hadn't mined yet,
  5345  		// we would return the profile for "jeff" when we search for "elon" which is incorrect.
  5346  		if profile != nil && strings.HasPrefix(strings.ToLower(string(profile.Username[:])), lowercaseUsernamePrefixString) {
  5347  			profilesFound = append(profilesFound, profile)
  5348  		}
  5349  	}
  5350  
  5351  	// If there is no error, sort and return numToFetch. Username searches are always
  5352  	// sorted by coin value.
  5353  	sort.Slice(profilesFound, func(ii, jj int) bool {
  5354  		return profilesFound[ii].CoinEntry.DeSoLockedNanos > profilesFound[jj].CoinEntry.DeSoLockedNanos
  5355  	})
  5356  
  5357  	return profilesFound, nil
  5358  }
  5359  
  5360  // DBGetPaginatedProfilesByDeSoLocked returns up to 'numToFetch' profiles from the db.
  5361  func DBGetPaginatedProfilesByDeSoLocked(
  5362  	db *badger.DB, startDeSoLockedNanos uint64,
  5363  	startProfilePubKeyy []byte, numToFetch int, fetchProfileEntries bool) (
  5364  	_profilePublicKeys [][]byte, _profileEntries []*ProfileEntry, _err error) {
  5365  
  5366  	// Convert the start public key to a PKID.
  5367  	pkidEntry := DBGetPKIDEntryForPublicKey(db, startProfilePubKeyy)
  5368  
  5369  	startProfilePrefix := append([]byte{}, _PrefixCreatorDeSoLockedNanosCreatorPKID...)
  5370  	var startDeSoLockedBytes []byte
  5371  	if pkidEntry != nil {
  5372  		startDeSoLockedBytes = EncodeUint64(startDeSoLockedNanos)
  5373  		startProfilePrefix = append(startProfilePrefix, startDeSoLockedBytes...)
  5374  		startProfilePrefix = append(startProfilePrefix, pkidEntry.PKID[:]...)
  5375  	} else {
  5376  		// If no pub key is provided, we just max out deso locked and start at the top of the list.
  5377  		maxBigEndianUint64Bytes := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
  5378  		startDeSoLockedBytes = maxBigEndianUint64Bytes
  5379  		startProfilePrefix = append(startProfilePrefix, startDeSoLockedBytes...)
  5380  	}
  5381  
  5382  	keyLen := len(_PrefixCreatorDeSoLockedNanosCreatorPKID) + len(startDeSoLockedBytes) + btcec.PubKeyBytesLenCompressed
  5383  	// We fetch in reverse to get the profiles with the most DeSo locked.
  5384  	profileIndexKeys, _, err := DBGetPaginatedKeysAndValuesForPrefix(
  5385  		db, startProfilePrefix, _PrefixCreatorDeSoLockedNanosCreatorPKID, /*validForPrefix*/
  5386  		keyLen /*keyLen*/, numToFetch,
  5387  		true /*reverse*/, false /*fetchValues*/)
  5388  	if err != nil {
  5389  		return nil, nil, fmt.Errorf("DBGetPaginatedProfilesByDeSoLocked: %v", err)
  5390  	}
  5391  
  5392  	// Cut the pkids out of the returned keys.
  5393  	profilePKIDs := [][]byte{}
  5394  	startPKIDIndex := len(_PrefixCreatorDeSoLockedNanosCreatorPKID) + len(startDeSoLockedBytes)
  5395  	endPKIDIndex := startPKIDIndex + btcec.PubKeyBytesLenCompressed
  5396  	for _, profileKeyBytes := range profileIndexKeys {
  5397  		currentPKID := make([]byte, btcec.PubKeyBytesLenCompressed)
  5398  		copy(currentPKID[:], profileKeyBytes[startPKIDIndex:endPKIDIndex][:])
  5399  		profilePKIDs = append(profilePKIDs, currentPKID)
  5400  	}
  5401  
  5402  	profilePubKeys := [][]byte{}
  5403  	for _, pkidBytes := range profilePKIDs {
  5404  		pkid := &PKID{}
  5405  		copy(pkid[:], pkidBytes)
  5406  		profilePubKeys = append(profilePubKeys, DBGetPublicKeyForPKID(db, pkid))
  5407  	}
  5408  
  5409  	if !fetchProfileEntries {
  5410  		return profilePubKeys, nil, nil
  5411  	}
  5412  
  5413  	// Fetch the ProfileEntries if desired.
  5414  	var profileEntries []*ProfileEntry
  5415  	for _, profilePKID := range profilePKIDs {
  5416  		pkid := &PKID{}
  5417  		copy(pkid[:], profilePKID)
  5418  		profileEntry := DBGetProfileEntryForPKID(db, pkid)
  5419  		if profileEntry == nil {
  5420  			return nil, nil, fmt.Errorf("DBGetAllProfilesByLockedDeSo: "+
  5421  				"ProfilePKID %v does not have corresponding entry",
  5422  				PkToStringBoth(profilePKID))
  5423  		}
  5424  		profileEntries = append(profileEntries, profileEntry)
  5425  	}
  5426  
  5427  	return profilePubKeys, profileEntries, nil
  5428  }
  5429  
  5430  // -------------------------------------------------------------------------------------
  5431  // Mempool Txn mapping funcions
  5432  // <prefix, txn hash BlockHash> -> <*MsgDeSoTxn>
  5433  // -------------------------------------------------------------------------------------
  5434  
  5435  func _dbKeyForMempoolTxn(mempoolTx *MempoolTx) []byte {
  5436  	// Make a copy to avoid multiple calls to this function re-using the same slice.
  5437  	prefixCopy := append([]byte{}, _PrefixMempoolTxnHashToMsgDeSoTxn...)
  5438  	timeAddedBytes := EncodeUint64(uint64(mempoolTx.Added.UnixNano()))
  5439  	key := append(prefixCopy, timeAddedBytes...)
  5440  	key = append(key, mempoolTx.Hash[:]...)
  5441  
  5442  	return key
  5443  }
  5444  
  5445  func DbPutMempoolTxnWithTxn(txn *badger.Txn, mempoolTx *MempoolTx) error {
  5446  
  5447  	mempoolTxnBytes, err := mempoolTx.Tx.ToBytes(false /*preSignatureBool*/)
  5448  	if err != nil {
  5449  		return errors.Wrapf(err, "DbPutMempoolTxnWithTxn: Problem encoding mempoolTxn to bytes.")
  5450  	}
  5451  
  5452  	if err := txn.Set(_dbKeyForMempoolTxn(mempoolTx), mempoolTxnBytes); err != nil {
  5453  		return errors.Wrapf(err, "DbPutMempoolTxnWithTxn: Problem putting mapping for txn hash: %s", mempoolTx.Hash.String())
  5454  	}
  5455  
  5456  	return nil
  5457  }
  5458  
  5459  func DbPutMempoolTxn(handle *badger.DB, mempoolTx *MempoolTx) error {
  5460  
  5461  	return handle.Update(func(txn *badger.Txn) error {
  5462  		return DbPutMempoolTxnWithTxn(txn, mempoolTx)
  5463  	})
  5464  }
  5465  
  5466  func DbGetMempoolTxnWithTxn(txn *badger.Txn, mempoolTx *MempoolTx) *MsgDeSoTxn {
  5467  
  5468  	mempoolTxnObj := &MsgDeSoTxn{}
  5469  	mempoolTxnItem, err := txn.Get(_dbKeyForMempoolTxn(mempoolTx))
  5470  	if err != nil {
  5471  		return nil
  5472  	}
  5473  	err = mempoolTxnItem.Value(func(valBytes []byte) error {
  5474  		return gob.NewDecoder(bytes.NewReader(valBytes)).Decode(mempoolTxnObj)
  5475  	})
  5476  	if err != nil {
  5477  		glog.Errorf("DbGetMempoolTxnWithTxn: Problem reading "+
  5478  			"Tx for tx hash %s: %v", mempoolTx.Hash.String(), err)
  5479  		return nil
  5480  	}
  5481  	return mempoolTxnObj
  5482  }
  5483  
  5484  func DbGetMempoolTxn(db *badger.DB, mempoolTx *MempoolTx) *MsgDeSoTxn {
  5485  	var ret *MsgDeSoTxn
  5486  	db.View(func(txn *badger.Txn) error {
  5487  		ret = DbGetMempoolTxnWithTxn(txn, mempoolTx)
  5488  		return nil
  5489  	})
  5490  	return ret
  5491  }
  5492  
  5493  func DbGetAllMempoolTxnsSortedByTimeAdded(handle *badger.DB) (_mempoolTxns []*MsgDeSoTxn, _error error) {
  5494  	_, valuesFound := _enumerateKeysForPrefix(handle, _PrefixMempoolTxnHashToMsgDeSoTxn)
  5495  
  5496  	mempoolTxns := []*MsgDeSoTxn{}
  5497  	for _, mempoolTxnBytes := range valuesFound {
  5498  		mempoolTxn := &MsgDeSoTxn{}
  5499  		err := mempoolTxn.FromBytes(mempoolTxnBytes)
  5500  		if err != nil {
  5501  			return nil, errors.Wrapf(err, "DbGetAllMempoolTxnsSortedByTimeAdded: failed to decode mempoolTxnBytes.")
  5502  		}
  5503  		mempoolTxns = append(mempoolTxns, mempoolTxn)
  5504  	}
  5505  
  5506  	// We don't need to sort the transactions because the DB keys include the time added and
  5507  	// are therefore retrieved from badger in order.
  5508  
  5509  	return mempoolTxns, nil
  5510  }
  5511  
  5512  func DbDeleteAllMempoolTxnsWithTxn(txn *badger.Txn) error {
  5513  	txnKeysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, _PrefixMempoolTxnHashToMsgDeSoTxn)
  5514  	if err != nil {
  5515  		return errors.Wrapf(err, "DbDeleteAllMempoolTxnsWithTxn: ")
  5516  	}
  5517  
  5518  	for _, txnKey := range txnKeysFound {
  5519  		err := DbDeleteMempoolTxnKeyWithTxn(txn, txnKey)
  5520  		if err != nil {
  5521  			return errors.Wrapf(err, "DbDeleteAllMempoolTxMappings: Deleting mempool txnKey failed.")
  5522  		}
  5523  	}
  5524  
  5525  	return nil
  5526  }
  5527  
  5528  func FlushMempoolToDbWithTxn(txn *badger.Txn, allTxns []*MempoolTx) error {
  5529  	for _, mempoolTx := range allTxns {
  5530  		err := DbPutMempoolTxnWithTxn(txn, mempoolTx)
  5531  		if err != nil {
  5532  			return errors.Wrapf(err, "FlushMempoolToDb: Putting "+
  5533  				"mempool tx hash %s failed.", mempoolTx.Hash.String())
  5534  		}
  5535  	}
  5536  
  5537  	return nil
  5538  }
  5539  
  5540  func FlushMempoolToDb(handle *badger.DB, allTxns []*MempoolTx) error {
  5541  	err := handle.Update(func(txn *badger.Txn) error {
  5542  		return FlushMempoolToDbWithTxn(txn, allTxns)
  5543  	})
  5544  	if err != nil {
  5545  		return err
  5546  	}
  5547  
  5548  	return nil
  5549  }
  5550  
  5551  func DbDeleteAllMempoolTxns(handle *badger.DB) error {
  5552  	handle.Update(func(txn *badger.Txn) error {
  5553  		return DbDeleteAllMempoolTxnsWithTxn(txn)
  5554  	})
  5555  
  5556  	return nil
  5557  }
  5558  
  5559  func DbDeleteMempoolTxnWithTxn(txn *badger.Txn, mempoolTx *MempoolTx) error {
  5560  
  5561  	// When a mapping exists, delete it.
  5562  	if err := txn.Delete(_dbKeyForMempoolTxn(mempoolTx)); err != nil {
  5563  		return errors.Wrapf(err, "DbDeleteMempoolTxMappingWithTxn: Deleting "+
  5564  			"mempool tx key failed.")
  5565  	}
  5566  
  5567  	return nil
  5568  }
  5569  
  5570  func DbDeleteMempoolTxn(handle *badger.DB, mempoolTx *MempoolTx) error {
  5571  	return handle.Update(func(txn *badger.Txn) error {
  5572  		return DbDeleteMempoolTxnWithTxn(txn, mempoolTx)
  5573  	})
  5574  }
  5575  
  5576  func DbDeleteMempoolTxnKey(handle *badger.DB, txnKey []byte) error {
  5577  	return handle.Update(func(txn *badger.Txn) error {
  5578  		return DbDeleteMempoolTxnKeyWithTxn(txn, txnKey)
  5579  	})
  5580  }
  5581  
  5582  func DbDeleteMempoolTxnKeyWithTxn(txn *badger.Txn, txnKey []byte) error {
  5583  
  5584  	// When a mapping exists, delete it.
  5585  	if err := txn.Delete(txnKey); err != nil {
  5586  		return errors.Wrapf(err, "DbDeleteMempoolTxMappingWithTxn: Deleting "+
  5587  			"mempool tx key failed.")
  5588  	}
  5589  
  5590  	return nil
  5591  }
  5592  
  5593  func LogDBSummarySnapshot(db *badger.DB) {
  5594  	keyCountMap := make(map[byte]int)
  5595  	for prefixByte := byte(0); prefixByte < byte(40); prefixByte++ {
  5596  		keysForPrefix, _ := EnumerateKeysForPrefix(db, []byte{prefixByte})
  5597  		keyCountMap[prefixByte] = len(keysForPrefix)
  5598  	}
  5599  	glog.Info(spew.Printf("LogDBSummarySnapshot: Current DB summary snapshot: %v", keyCountMap))
  5600  }
  5601  
  5602  func StartDBSummarySnapshots(db *badger.DB) {
  5603  	// Periodically count the number of keys for each prefix in the DB and log.
  5604  	go func() {
  5605  		for {
  5606  			// Figure out how many keys there are for each prefix and log.
  5607  			glog.Info("StartDBSummarySnapshots: Counting DB keys...")
  5608  			LogDBSummarySnapshot(db)
  5609  			time.Sleep(30 * time.Second)
  5610  		}
  5611  	}()
  5612  }