github.com/cryptohub-digital/blockbook@v0.3.5-0.20240403155730-99ab40b9104c/db/rocksdb.go (about)

     1  package db
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"encoding/hex"
     7  	"fmt"
     8  	"math/big"
     9  	"os"
    10  	"path/filepath"
    11  	"sort"
    12  	"strconv"
    13  	"sync"
    14  	"time"
    15  	"unsafe"
    16  
    17  	vlq "github.com/bsm/go-vlq"
    18  	"github.com/cryptohub-digital/blockbook/bchain"
    19  	"github.com/cryptohub-digital/blockbook/common"
    20  	"github.com/golang/glog"
    21  	"github.com/juju/errors"
    22  	"github.com/linxGnu/grocksdb"
    23  )
    24  
    25  const dbVersion = 6
    26  
    27  const packedHeightBytes = 4
    28  const maxAddrDescLen = 1024
    29  
    30  // iterator creates snapshot, which takes lots of resources
    31  // when doing huge scan, it is better to close it and reopen from time to time to free the resources
    32  const refreshIterator = 5000000
    33  
    34  // RepairRocksDB calls RocksDb db repair function
    35  func RepairRocksDB(name string) error {
    36  	glog.Infof("rocksdb: repair")
    37  	opts := grocksdb.NewDefaultOptions()
    38  	return grocksdb.RepairDb(name, opts)
    39  }
    40  
    41  type connectBlockStats struct {
    42  	txAddressesHit  int
    43  	txAddressesMiss int
    44  	balancesHit     int
    45  	balancesMiss    int
    46  }
    47  
    48  // AddressBalanceDetail specifies what data are returned by GetAddressBalance
    49  type AddressBalanceDetail int
    50  
    51  const (
    52  	// AddressBalanceDetailNoUTXO returns address balance without utxos
    53  	AddressBalanceDetailNoUTXO = 0
    54  	// AddressBalanceDetailUTXO returns address balance with utxos
    55  	AddressBalanceDetailUTXO = 1
    56  	// addressBalanceDetailUTXOIndexed returns address balance with utxos and index for updates, used only internally
    57  	addressBalanceDetailUTXOIndexed = 2
    58  )
    59  
    60  // RocksDB handle
    61  type RocksDB struct {
    62  	path          string
    63  	db            *grocksdb.DB
    64  	wo            *grocksdb.WriteOptions
    65  	ro            *grocksdb.ReadOptions
    66  	cfh           []*grocksdb.ColumnFamilyHandle
    67  	chainParser   bchain.BlockChainParser
    68  	is            *common.InternalState
    69  	metrics       *common.Metrics
    70  	cache         *grocksdb.Cache
    71  	maxOpenFiles  int
    72  	cbs           connectBlockStats
    73  	extendedIndex bool
    74  }
    75  
    76  const (
    77  	cfDefault = iota
    78  	cfHeight
    79  	cfAddresses
    80  	cfBlockTxs
    81  	cfTransactions
    82  	cfFiatRates
    83  	// BitcoinType
    84  	cfAddressBalance
    85  	cfTxAddresses
    86  
    87  	__break__
    88  
    89  	// EthereumType
    90  	cfAddressContracts = iota - __break__ + cfAddressBalance - 1
    91  	cfInternalData
    92  	cfContracts
    93  	cfFunctionSignatures
    94  	cfBlockInternalDataErrors
    95  
    96  	// TODO move to common section
    97  	cfAddressAliases
    98  )
    99  
   100  // common columns
   101  var cfNames []string
   102  var cfBaseNames = []string{"default", "height", "addresses", "blockTxs", "transactions", "fiatRates"}
   103  
   104  // type specific columns
   105  var cfNamesBitcoinType = []string{"addressBalance", "txAddresses"}
   106  var cfNamesEthereumType = []string{"addressContracts", "internalData", "contracts", "functionSignatures", "blockInternalDataErrors", "addressAliases"}
   107  
   108  func openDB(path string, c *grocksdb.Cache, openFiles int) (*grocksdb.DB, []*grocksdb.ColumnFamilyHandle, error) {
   109  	// opts with bloom filter
   110  	opts := createAndSetDBOptions(10, c, openFiles)
   111  	// opts for addresses without bloom filter
   112  	// from documentation: if most of your queries are executed using iterators, you shouldn't set bloom filter
   113  	optsAddresses := createAndSetDBOptions(0, c, openFiles)
   114  	// default, height, addresses, blockTxids, transactions
   115  	cfOptions := []*grocksdb.Options{opts, opts, optsAddresses, opts, opts, opts}
   116  	// append type specific options
   117  	count := len(cfNames) - len(cfOptions)
   118  	for i := 0; i < count; i++ {
   119  		cfOptions = append(cfOptions, opts)
   120  	}
   121  	db, cfh, err := grocksdb.OpenDbColumnFamilies(opts, path, cfNames, cfOptions)
   122  	if err != nil {
   123  		return nil, nil, err
   124  	}
   125  	return db, cfh, nil
   126  }
   127  
   128  // NewRocksDB opens an internal handle to RocksDB environment.  Close
   129  // needs to be called to release it.
   130  func NewRocksDB(path string, cacheSize, maxOpenFiles int, parser bchain.BlockChainParser, metrics *common.Metrics, extendedIndex bool) (d *RocksDB, err error) {
   131  	glog.Infof("rocksdb: opening %s, required data version %v, cache size %v, max open files %v", path, dbVersion, cacheSize, maxOpenFiles)
   132  
   133  	cfNames = append([]string{}, cfBaseNames...)
   134  	chainType := parser.GetChainType()
   135  	if chainType == bchain.ChainBitcoinType {
   136  		cfNames = append(cfNames, cfNamesBitcoinType...)
   137  	} else if chainType == bchain.ChainEthereumType || chainType == bchain.ChainCoreCoinType {
   138  		cfNames = append(cfNames, cfNamesEthereumType...)
   139  		extendedIndex = false
   140  	} else {
   141  		return nil, errors.New("Unknown chain type")
   142  	}
   143  
   144  	c := grocksdb.NewLRUCache(uint64(cacheSize))
   145  	db, cfh, err := openDB(path, c, maxOpenFiles)
   146  	if err != nil {
   147  		return nil, err
   148  	}
   149  	wo := grocksdb.NewDefaultWriteOptions()
   150  	ro := grocksdb.NewDefaultReadOptions()
   151  	return &RocksDB{path, db, wo, ro, cfh, parser, nil, metrics, c, maxOpenFiles, connectBlockStats{}, extendedIndex}, nil
   152  }
   153  
   154  func (d *RocksDB) closeDB() error {
   155  	for _, h := range d.cfh {
   156  		h.Destroy()
   157  	}
   158  	d.db.Close()
   159  	d.db = nil
   160  	return nil
   161  }
   162  
   163  // Close releases the RocksDB environment opened in NewRocksDB.
   164  func (d *RocksDB) Close() error {
   165  	if d.db != nil {
   166  		// store the internal state of the app
   167  		if d.is != nil && d.is.DbState == common.DbStateOpen {
   168  			d.is.DbState = common.DbStateClosed
   169  			if err := d.StoreInternalState(d.is); err != nil {
   170  				glog.Info("internalState: ", err)
   171  			}
   172  		}
   173  		glog.Infof("rocksdb: close")
   174  		d.closeDB()
   175  		d.wo.Destroy()
   176  		d.ro.Destroy()
   177  	}
   178  	return nil
   179  }
   180  
   181  // Reopen reopens the database
   182  // It closes and reopens db, nobody can access the database during the operation!
   183  func (d *RocksDB) Reopen() error {
   184  	err := d.closeDB()
   185  	if err != nil {
   186  		return err
   187  	}
   188  	d.db = nil
   189  	db, cfh, err := openDB(d.path, d.cache, d.maxOpenFiles)
   190  	if err != nil {
   191  		return err
   192  	}
   193  	d.db, d.cfh = db, cfh
   194  	return nil
   195  }
   196  
   197  func atoUint64(s string) uint64 {
   198  	i, err := strconv.Atoi(s)
   199  	if err != nil {
   200  		return 0
   201  	}
   202  	return uint64(i)
   203  }
   204  
   205  func (d *RocksDB) WriteBatch(wb *grocksdb.WriteBatch) error {
   206  	return d.db.Write(d.wo, wb)
   207  }
   208  
   209  // HasExtendedIndex returns true if the DB indexes input txids and spending data
   210  func (d *RocksDB) HasExtendedIndex() bool {
   211  	return d.extendedIndex
   212  }
   213  
   214  // GetMemoryStats returns memory usage statistics as reported by RocksDB
   215  func (d *RocksDB) GetMemoryStats() string {
   216  	var total, indexAndFilter, memtable uint64
   217  	type columnStats struct {
   218  		name           string
   219  		indexAndFilter string
   220  		memtable       string
   221  	}
   222  	cs := make([]columnStats, len(cfNames))
   223  	for i := 0; i < len(cfNames); i++ {
   224  		cs[i].name = cfNames[i]
   225  		cs[i].indexAndFilter = d.db.GetPropertyCF("rocksdb.estimate-table-readers-mem", d.cfh[i])
   226  		cs[i].memtable = d.db.GetPropertyCF("rocksdb.cur-size-all-mem-tables", d.cfh[i])
   227  		indexAndFilter += atoUint64(cs[i].indexAndFilter)
   228  		memtable += atoUint64(cs[i].memtable)
   229  	}
   230  	m := struct {
   231  		cacheUsage       uint64
   232  		pinnedCacheUsage uint64
   233  		columns          []columnStats
   234  	}{
   235  		cacheUsage:       d.cache.GetUsage(),
   236  		pinnedCacheUsage: d.cache.GetPinnedUsage(),
   237  		columns:          cs,
   238  	}
   239  	total = m.cacheUsage + indexAndFilter + memtable
   240  	return fmt.Sprintf("Total %d, indexAndFilter %d, memtable %d, %+v", total, indexAndFilter, memtable, m)
   241  }
   242  
   243  // StopIteration is returned by callback function to signal stop of iteration
   244  type StopIteration struct{}
   245  
   246  func (e *StopIteration) Error() string {
   247  	return ""
   248  }
   249  
   250  // GetTransactionsCallback is called by GetTransactions/GetAddrDescTransactions for each found tx
   251  // indexes contain array of indexes (input negative, output positive) in tx where is given address
   252  type GetTransactionsCallback func(txid string, height uint32, indexes []int32) error
   253  
   254  // GetTransactions finds all input/output transactions for address
   255  // Transaction are passed to callback function.
   256  func (d *RocksDB) GetTransactions(address string, lower uint32, higher uint32, fn GetTransactionsCallback) (err error) {
   257  	if glog.V(1) {
   258  		glog.Infof("rocksdb: address get %s %d-%d ", address, lower, higher)
   259  	}
   260  	addrDesc, err := d.chainParser.GetAddrDescFromAddress(address)
   261  	if err != nil {
   262  		return err
   263  	}
   264  	return d.GetAddrDescTransactions(addrDesc, lower, higher, fn)
   265  }
   266  
   267  // GetAddrDescTransactions finds all input/output transactions for address descriptor
   268  // Transaction are passed to callback function in the order from newest block to the oldest
   269  func (d *RocksDB) GetAddrDescTransactions(addrDesc bchain.AddressDescriptor, lower uint32, higher uint32, fn GetTransactionsCallback) (err error) {
   270  	txidUnpackedLen := d.chainParser.PackedTxidLen()
   271  	addrDescLen := len(addrDesc)
   272  	startKey := packAddressKey(addrDesc, higher)
   273  	stopKey := packAddressKey(addrDesc, lower)
   274  	indexes := make([]int32, 0, 16)
   275  	it := d.db.NewIteratorCF(d.ro, d.cfh[cfAddresses])
   276  	defer it.Close()
   277  	for it.Seek(startKey); it.Valid(); it.Next() {
   278  		key := it.Key().Data()
   279  		if bytes.Compare(key, stopKey) > 0 {
   280  			break
   281  		}
   282  		if len(key) != addrDescLen+packedHeightBytes {
   283  			if glog.V(2) {
   284  				glog.Warningf("rocksdb: addrDesc %s - mixed with %s", addrDesc, hex.EncodeToString(key))
   285  			}
   286  			continue
   287  		}
   288  		val := it.Value().Data()
   289  		if glog.V(2) {
   290  			glog.Infof("rocksdb: addresses %s: %s", hex.EncodeToString(key), hex.EncodeToString(val))
   291  		}
   292  		_, height, err := unpackAddressKey(key)
   293  		if err != nil {
   294  			return err
   295  		}
   296  		for len(val) > txidUnpackedLen {
   297  			tx, err := d.chainParser.UnpackTxid(val[:txidUnpackedLen])
   298  			if err != nil {
   299  				return err
   300  			}
   301  			indexes = indexes[:0]
   302  			val = val[txidUnpackedLen:]
   303  			for {
   304  				index, l := unpackVarint32(val)
   305  				indexes = append(indexes, index>>1)
   306  				val = val[l:]
   307  				if index&1 == 1 {
   308  					break
   309  				} else if len(val) == 0 {
   310  					glog.Warningf("rocksdb: addresses contain incorrect data %s: %s", hex.EncodeToString(key), hex.EncodeToString(val))
   311  					break
   312  				}
   313  			}
   314  			if err := fn(tx, height, indexes); err != nil {
   315  				if _, ok := err.(*StopIteration); ok {
   316  					return nil
   317  				}
   318  				return err
   319  			}
   320  		}
   321  		if len(val) != 0 {
   322  			glog.Warningf("rocksdb: addresses contain incorrect data %s: %s", hex.EncodeToString(key), hex.EncodeToString(val))
   323  		}
   324  	}
   325  	return nil
   326  }
   327  
   328  const (
   329  	opInsert = 0
   330  	opDelete = 1
   331  )
   332  
   333  // ConnectBlock indexes addresses in the block and stores them in db
   334  func (d *RocksDB) ConnectBlock(block *bchain.Block) error {
   335  	wb := grocksdb.NewWriteBatch()
   336  	defer wb.Destroy()
   337  
   338  	if glog.V(2) {
   339  		glog.Infof("rocksdb: insert %d %s", block.Height, block.Hash)
   340  	}
   341  
   342  	chainType := d.chainParser.GetChainType()
   343  
   344  	if err := d.writeHeightFromBlock(wb, block, opInsert); err != nil {
   345  		return err
   346  	}
   347  	addresses := make(addressesMap)
   348  	if chainType == bchain.ChainBitcoinType {
   349  		txAddressesMap := make(map[string]*TxAddresses)
   350  		balances := make(map[string]*AddrBalance)
   351  		if err := d.processAddressesBitcoinType(block, addresses, txAddressesMap, balances); err != nil {
   352  			return err
   353  		}
   354  		if err := d.storeTxAddresses(wb, txAddressesMap); err != nil {
   355  			return err
   356  		}
   357  		if err := d.storeBalances(wb, balances); err != nil {
   358  			return err
   359  		}
   360  		if err := d.storeAndCleanupBlockTxs(wb, block); err != nil {
   361  			return err
   362  		}
   363  	} else if chainType == bchain.ChainEthereumType {
   364  		addressContracts := make(map[string]*AddrContracts)
   365  		blockTxs, err := d.processAddressesEthereumType(block, addresses, addressContracts)
   366  		if err != nil {
   367  			return err
   368  		}
   369  		if err := d.storeAddressContracts(wb, addressContracts); err != nil {
   370  			return err
   371  		}
   372  		if err := d.storeInternalDataEthereumType(wb, blockTxs); err != nil {
   373  			return err
   374  		}
   375  		if err = d.storeBlockSpecificDataEthereumType(wb, block); err != nil {
   376  			return err
   377  		}
   378  		if err := d.storeAndCleanupBlockTxsEthereumType(wb, block, blockTxs); err != nil {
   379  			return err
   380  		}
   381  	} else if chainType == bchain.ChainCoreCoinType {
   382  		addressContracts := make(map[string]*AddrContracts)
   383  		blockTxs, err := d.processAddressesCoreCoinType(block, addresses, addressContracts)
   384  		if err != nil {
   385  			return err
   386  		}
   387  		if err := d.storeAddressContracts(wb, addressContracts); err != nil {
   388  			return err
   389  		}
   390  		if err = d.storeBlockSpecificDataCoreCoinType(wb, block); err != nil {
   391  			return err
   392  		}
   393  		if err := d.storeAndCleanupBlockTxsCoreCoinType(wb, block, blockTxs); err != nil {
   394  			return err
   395  		}
   396  	} else {
   397  		return errors.New("Unknown chain type")
   398  	}
   399  	if err := d.storeAddresses(wb, block.Height, addresses); err != nil {
   400  		return err
   401  	}
   402  	if err := d.WriteBatch(wb); err != nil {
   403  		return err
   404  	}
   405  	avg := d.is.AppendBlockTime(uint32(block.Time))
   406  	if d.metrics != nil {
   407  		d.metrics.AvgBlockPeriod.Set(float64(avg))
   408  	}
   409  	return nil
   410  }
   411  
   412  // Addresses index
   413  
   414  type txIndexes struct {
   415  	btxID   []byte
   416  	indexes []int32
   417  }
   418  
   419  // addressesMap is a map of addresses in a block
   420  // each address contains a slice of transactions with indexes where the address appears
   421  // slice is used instead of map so that order is defined and also search in case of few items
   422  type addressesMap map[string][]txIndexes
   423  
   424  type outpoint struct {
   425  	btxID []byte
   426  	index int32
   427  }
   428  
   429  // TxInput holds input data of the transaction in TxAddresses
   430  type TxInput struct {
   431  	AddrDesc bchain.AddressDescriptor
   432  	ValueSat big.Int
   433  	// extended index properties
   434  	Txid string
   435  	Vout uint32
   436  }
   437  
   438  // Addresses converts AddressDescriptor of the input to array of strings
   439  func (ti *TxInput) Addresses(p bchain.BlockChainParser) ([]string, bool, error) {
   440  	return p.GetAddressesFromAddrDesc(ti.AddrDesc)
   441  }
   442  
   443  // TxOutput holds output data of the transaction in TxAddresses
   444  type TxOutput struct {
   445  	AddrDesc bchain.AddressDescriptor
   446  	Spent    bool
   447  	ValueSat big.Int
   448  	// extended index properties
   449  	SpentTxid   string
   450  	SpentIndex  uint32
   451  	SpentHeight uint32
   452  }
   453  
   454  // Addresses converts AddressDescriptor of the output to array of strings
   455  func (to *TxOutput) Addresses(p bchain.BlockChainParser) ([]string, bool, error) {
   456  	return p.GetAddressesFromAddrDesc(to.AddrDesc)
   457  }
   458  
   459  // TxAddresses stores transaction inputs and outputs with amounts
   460  type TxAddresses struct {
   461  	Height  uint32
   462  	Inputs  []TxInput
   463  	Outputs []TxOutput
   464  	// extended index properties
   465  	VSize uint32
   466  }
   467  
   468  // Utxo holds information about unspent transaction output
   469  type Utxo struct {
   470  	BtxID    []byte
   471  	Vout     int32
   472  	Height   uint32
   473  	ValueSat big.Int
   474  }
   475  
   476  // AddrBalance stores number of transactions and balances of an address
   477  type AddrBalance struct {
   478  	Txs        uint32
   479  	SentSat    big.Int
   480  	BalanceSat big.Int
   481  	Utxos      []Utxo
   482  	utxosMap   map[string]int
   483  }
   484  
   485  // ReceivedSat computes received amount from total balance and sent amount
   486  func (ab *AddrBalance) ReceivedSat() *big.Int {
   487  	var r big.Int
   488  	r.Add(&ab.BalanceSat, &ab.SentSat)
   489  	return &r
   490  }
   491  
   492  // addUtxo
   493  func (ab *AddrBalance) addUtxo(u *Utxo) {
   494  	ab.Utxos = append(ab.Utxos, *u)
   495  	ab.manageUtxoMap(u)
   496  }
   497  
   498  func (ab *AddrBalance) manageUtxoMap(u *Utxo) {
   499  	l := len(ab.Utxos)
   500  	if l >= 16 {
   501  		if len(ab.utxosMap) == 0 {
   502  			ab.utxosMap = make(map[string]int, 32)
   503  			for i := 0; i < l; i++ {
   504  				s := string(ab.Utxos[i].BtxID)
   505  				if _, e := ab.utxosMap[s]; !e {
   506  					ab.utxosMap[s] = i
   507  				}
   508  			}
   509  		} else {
   510  			s := string(u.BtxID)
   511  			if _, e := ab.utxosMap[s]; !e {
   512  				ab.utxosMap[s] = l - 1
   513  			}
   514  		}
   515  	}
   516  }
   517  
   518  // on disconnect, the added utxos must be inserted in the right position so that utxosMap index works
   519  func (ab *AddrBalance) addUtxoInDisconnect(u *Utxo) {
   520  	insert := -1
   521  	if len(ab.utxosMap) > 0 {
   522  		if i, e := ab.utxosMap[string(u.BtxID)]; e {
   523  			insert = i
   524  		}
   525  	} else {
   526  		for i := range ab.Utxos {
   527  			utxo := &ab.Utxos[i]
   528  			if *(*int)(unsafe.Pointer(&utxo.BtxID[0])) == *(*int)(unsafe.Pointer(&u.BtxID[0])) && bytes.Equal(utxo.BtxID, u.BtxID) {
   529  				insert = i
   530  				break
   531  			}
   532  		}
   533  	}
   534  	if insert > -1 {
   535  		// check if it is necessary to insert the utxo into the array
   536  		for i := insert; i < len(ab.Utxos); i++ {
   537  			utxo := &ab.Utxos[i]
   538  			// either the vout is greater than the inserted vout or it is a different tx
   539  			if utxo.Vout > u.Vout || *(*int)(unsafe.Pointer(&utxo.BtxID[0])) != *(*int)(unsafe.Pointer(&u.BtxID[0])) || !bytes.Equal(utxo.BtxID, u.BtxID) {
   540  				// found the right place, insert the utxo
   541  				ab.Utxos = append(ab.Utxos, *u)
   542  				copy(ab.Utxos[i+1:], ab.Utxos[i:])
   543  				ab.Utxos[i] = *u
   544  				// reset utxosMap after insert, the index will have to be rebuilt if needed
   545  				ab.utxosMap = nil
   546  				return
   547  			}
   548  		}
   549  	}
   550  	ab.Utxos = append(ab.Utxos, *u)
   551  	ab.manageUtxoMap(u)
   552  }
   553  
   554  // markUtxoAsSpent finds outpoint btxID:vout in utxos and marks it as spent
   555  // for small number of utxos the linear search is done, for larger number there is a hashmap index
   556  // it is much faster than removing the utxo from the slice as it would cause in memory reallocations
   557  func (ab *AddrBalance) markUtxoAsSpent(btxID []byte, vout int32) {
   558  	if len(ab.utxosMap) == 0 {
   559  		for i := range ab.Utxos {
   560  			utxo := &ab.Utxos[i]
   561  			if utxo.Vout == vout && *(*int)(unsafe.Pointer(&utxo.BtxID[0])) == *(*int)(unsafe.Pointer(&btxID[0])) && bytes.Equal(utxo.BtxID, btxID) {
   562  				// mark utxo as spent by setting vout=-1
   563  				utxo.Vout = -1
   564  				return
   565  			}
   566  		}
   567  	} else {
   568  		if i, e := ab.utxosMap[string(btxID)]; e {
   569  			l := len(ab.Utxos)
   570  			for ; i < l; i++ {
   571  				utxo := &ab.Utxos[i]
   572  				if utxo.Vout == vout {
   573  					if bytes.Equal(utxo.BtxID, btxID) {
   574  						// mark utxo as spent by setting vout=-1
   575  						utxo.Vout = -1
   576  						return
   577  					}
   578  					break
   579  				}
   580  			}
   581  		}
   582  	}
   583  	glog.Errorf("Utxo %s:%d not found, utxosMap size %d", hex.EncodeToString(btxID), vout, len(ab.utxosMap))
   584  }
   585  
   586  type blockTxs struct {
   587  	btxID  []byte
   588  	inputs []outpoint
   589  }
   590  
   591  func (d *RocksDB) resetValueSatToZero(valueSat *big.Int, addrDesc bchain.AddressDescriptor, logText string) {
   592  	ad, _, err := d.chainParser.GetAddressesFromAddrDesc(addrDesc)
   593  	if err != nil {
   594  		glog.Warningf("rocksdb: unparsable address hex '%v' reached negative %s %v, resetting to 0. Parser error %v", addrDesc, logText, valueSat.String(), err)
   595  	} else {
   596  		glog.Warningf("rocksdb: address %v hex '%v' reached negative %s %v, resetting to 0", ad, addrDesc, logText, valueSat.String())
   597  	}
   598  	valueSat.SetInt64(0)
   599  }
   600  
   601  // GetAndResetConnectBlockStats gets statistics about cache usage in connect blocks and resets the counters
   602  func (d *RocksDB) GetAndResetConnectBlockStats() string {
   603  	s := fmt.Sprintf("%+v", d.cbs)
   604  	d.cbs = connectBlockStats{}
   605  	return s
   606  }
   607  
   608  func (d *RocksDB) processAddressesBitcoinType(block *bchain.Block, addresses addressesMap, txAddressesMap map[string]*TxAddresses, balances map[string]*AddrBalance) error {
   609  	blockTxIDs := make([][]byte, len(block.Txs))
   610  	blockTxAddresses := make([]*TxAddresses, len(block.Txs))
   611  	// first process all outputs so that inputs can refer to txs in this block
   612  	for txi := range block.Txs {
   613  		tx := &block.Txs[txi]
   614  		btxID, err := d.chainParser.PackTxid(tx.Txid)
   615  		if err != nil {
   616  			return err
   617  		}
   618  		blockTxIDs[txi] = btxID
   619  		ta := TxAddresses{Height: block.Height}
   620  		if d.extendedIndex {
   621  			if tx.VSize > 0 {
   622  				ta.VSize = uint32(tx.VSize)
   623  			} else {
   624  				ta.VSize = uint32(len(tx.Hex))
   625  			}
   626  		}
   627  		ta.Outputs = make([]TxOutput, len(tx.Vout))
   628  		txAddressesMap[string(btxID)] = &ta
   629  		blockTxAddresses[txi] = &ta
   630  		for i := range tx.Vout {
   631  			output := &tx.Vout[i]
   632  			tao := &ta.Outputs[i]
   633  			tao.ValueSat = output.ValueSat
   634  			addrDesc, err := d.chainParser.GetAddrDescFromVout(output)
   635  			if err != nil || len(addrDesc) == 0 || len(addrDesc) > maxAddrDescLen {
   636  				if err != nil {
   637  					// do not log ErrAddressMissing, transactions can be without to address (for example eth contracts)
   638  					if err != bchain.ErrAddressMissing {
   639  						glog.Warningf("rocksdb: addrDesc: %v - height %d, tx %v, output %v, error %v", err, block.Height, tx.Txid, output, err)
   640  					}
   641  				} else {
   642  					glog.V(1).Infof("rocksdb: height %d, tx %v, vout %v, skipping addrDesc of length %d", block.Height, tx.Txid, i, len(addrDesc))
   643  				}
   644  				continue
   645  			}
   646  			tao.AddrDesc = addrDesc
   647  			if d.chainParser.IsAddrDescIndexable(addrDesc) {
   648  				strAddrDesc := string(addrDesc)
   649  				balance, e := balances[strAddrDesc]
   650  				if !e {
   651  					balance, err = d.GetAddrDescBalance(addrDesc, addressBalanceDetailUTXOIndexed)
   652  					if err != nil {
   653  						return err
   654  					}
   655  					if balance == nil {
   656  						balance = &AddrBalance{}
   657  					}
   658  					balances[strAddrDesc] = balance
   659  					d.cbs.balancesMiss++
   660  				} else {
   661  					d.cbs.balancesHit++
   662  				}
   663  				balance.BalanceSat.Add(&balance.BalanceSat, &output.ValueSat)
   664  				balance.addUtxo(&Utxo{
   665  					BtxID:    btxID,
   666  					Vout:     int32(i),
   667  					Height:   block.Height,
   668  					ValueSat: output.ValueSat,
   669  				})
   670  				counted := addToAddressesMap(addresses, strAddrDesc, btxID, int32(i))
   671  				if !counted {
   672  					balance.Txs++
   673  				}
   674  			}
   675  		}
   676  	}
   677  	// process inputs
   678  	for txi := range block.Txs {
   679  		tx := &block.Txs[txi]
   680  		spendingTxid := blockTxIDs[txi]
   681  		ta := blockTxAddresses[txi]
   682  		ta.Inputs = make([]TxInput, len(tx.Vin))
   683  		logged := false
   684  		for i := range tx.Vin {
   685  			input := &tx.Vin[i]
   686  			tai := &ta.Inputs[i]
   687  			btxID, err := d.chainParser.PackTxid(input.Txid)
   688  			if err != nil {
   689  				// do not process inputs without input txid
   690  				if err == bchain.ErrTxidMissing {
   691  					continue
   692  				}
   693  				return err
   694  			}
   695  			stxID := string(btxID)
   696  			ita, e := txAddressesMap[stxID]
   697  			if !e {
   698  				ita, err = d.getTxAddresses(btxID)
   699  				if err != nil {
   700  					return err
   701  				}
   702  				if ita == nil {
   703  					// allow parser to process unknown input, some coins may implement special handling, default is to log warning
   704  					tai.AddrDesc = d.chainParser.GetAddrDescForUnknownInput(tx, i)
   705  					continue
   706  				}
   707  				txAddressesMap[stxID] = ita
   708  				d.cbs.txAddressesMiss++
   709  			} else {
   710  				d.cbs.txAddressesHit++
   711  			}
   712  			if len(ita.Outputs) <= int(input.Vout) {
   713  				glog.Warningf("rocksdb: height %d, tx %v, input tx %v vout %v is out of bounds of stored tx", block.Height, tx.Txid, input.Txid, input.Vout)
   714  				continue
   715  			}
   716  			spentOutput := &ita.Outputs[int(input.Vout)]
   717  			if spentOutput.Spent {
   718  				glog.Warningf("rocksdb: height %d, tx %v, input tx %v vout %v is double spend", block.Height, tx.Txid, input.Txid, input.Vout)
   719  			}
   720  			tai.AddrDesc = spentOutput.AddrDesc
   721  			tai.ValueSat = spentOutput.ValueSat
   722  			// mark the output as spent in tx
   723  			spentOutput.Spent = true
   724  			if d.extendedIndex {
   725  				spentOutput.SpentTxid = tx.Txid
   726  				spentOutput.SpentIndex = uint32(i)
   727  				spentOutput.SpentHeight = block.Height
   728  				tai.Txid = input.Txid
   729  				tai.Vout = input.Vout
   730  			}
   731  			if len(spentOutput.AddrDesc) == 0 {
   732  				if !logged {
   733  					glog.V(1).Infof("rocksdb: height %d, tx %v, input tx %v vout %v skipping empty address", block.Height, tx.Txid, input.Txid, input.Vout)
   734  					logged = true
   735  				}
   736  				continue
   737  			}
   738  			if d.chainParser.IsAddrDescIndexable(spentOutput.AddrDesc) {
   739  				strAddrDesc := string(spentOutput.AddrDesc)
   740  				balance, e := balances[strAddrDesc]
   741  				if !e {
   742  					balance, err = d.GetAddrDescBalance(spentOutput.AddrDesc, addressBalanceDetailUTXOIndexed)
   743  					if err != nil {
   744  						return err
   745  					}
   746  					if balance == nil {
   747  						balance = &AddrBalance{}
   748  					}
   749  					balances[strAddrDesc] = balance
   750  					d.cbs.balancesMiss++
   751  				} else {
   752  					d.cbs.balancesHit++
   753  				}
   754  				counted := addToAddressesMap(addresses, strAddrDesc, spendingTxid, ^int32(i))
   755  				if !counted {
   756  					balance.Txs++
   757  				}
   758  				balance.BalanceSat.Sub(&balance.BalanceSat, &spentOutput.ValueSat)
   759  				balance.markUtxoAsSpent(btxID, int32(input.Vout))
   760  				if balance.BalanceSat.Sign() < 0 {
   761  					d.resetValueSatToZero(&balance.BalanceSat, spentOutput.AddrDesc, "balance")
   762  				}
   763  				balance.SentSat.Add(&balance.SentSat, &spentOutput.ValueSat)
   764  			}
   765  		}
   766  	}
   767  	return nil
   768  }
   769  
   770  // addToAddressesMap maintains mapping between addresses and transactions in one block
   771  // the method assumes that outputs in the block are processed before the inputs
   772  // the return value is true if the tx was processed before, to not to count the tx multiple times
   773  func addToAddressesMap(addresses addressesMap, strAddrDesc string, btxID []byte, index int32) bool {
   774  	// check that the address was already processed in this block
   775  	// if not found, it has certainly not been counted
   776  	at, found := addresses[strAddrDesc]
   777  	if found {
   778  		// if the tx is already in the slice, append the index to the array of indexes
   779  		for i, t := range at {
   780  			if bytes.Equal(btxID, t.btxID) {
   781  				at[i].indexes = append(t.indexes, index)
   782  				return true
   783  			}
   784  		}
   785  	}
   786  	addresses[strAddrDesc] = append(at, txIndexes{
   787  		btxID:   btxID,
   788  		indexes: []int32{index},
   789  	})
   790  	return false
   791  }
   792  
   793  func (d *RocksDB) getTxIndexesForAddressAndBlock(addrDesc bchain.AddressDescriptor, height uint32) ([]txIndexes, error) {
   794  	key := packAddressKey(addrDesc, height)
   795  	val, err := d.db.GetCF(d.ro, d.cfh[cfAddresses], key)
   796  	if err != nil {
   797  		return nil, err
   798  	}
   799  	defer val.Free()
   800  	// nil data means the key was not found in DB
   801  	if val.Data() == nil {
   802  		return nil, nil
   803  	}
   804  	rv, err := d.unpackTxIndexes(val.Data())
   805  	if err != nil {
   806  		return nil, err
   807  	}
   808  	return rv, nil
   809  }
   810  
   811  func (d *RocksDB) storeAddresses(wb *grocksdb.WriteBatch, height uint32, addresses addressesMap) error {
   812  	for addrDesc, txi := range addresses {
   813  		ba := bchain.AddressDescriptor(addrDesc)
   814  		key := packAddressKey(ba, height)
   815  		val := d.packTxIndexes(txi)
   816  		wb.PutCF(d.cfh[cfAddresses], key, val)
   817  	}
   818  	return nil
   819  }
   820  
   821  func (d *RocksDB) storeTxAddresses(wb *grocksdb.WriteBatch, am map[string]*TxAddresses) error {
   822  	varBuf := make([]byte, maxPackedBigintBytes)
   823  	buf := make([]byte, 1024)
   824  	for txID, ta := range am {
   825  		buf = d.packTxAddresses(ta, buf, varBuf)
   826  		wb.PutCF(d.cfh[cfTxAddresses], []byte(txID), buf)
   827  	}
   828  	return nil
   829  }
   830  
   831  func (d *RocksDB) storeBalances(wb *grocksdb.WriteBatch, abm map[string]*AddrBalance) error {
   832  	// allocate buffer initial buffer
   833  	buf := make([]byte, 1024)
   834  	varBuf := make([]byte, maxPackedBigintBytes)
   835  	for addrDesc, ab := range abm {
   836  		// balance with 0 transactions is removed from db - happens on disconnect
   837  		if ab == nil || ab.Txs <= 0 {
   838  			wb.DeleteCF(d.cfh[cfAddressBalance], bchain.AddressDescriptor(addrDesc))
   839  		} else {
   840  			buf = packAddrBalance(ab, buf, varBuf)
   841  			wb.PutCF(d.cfh[cfAddressBalance], bchain.AddressDescriptor(addrDesc), buf)
   842  		}
   843  	}
   844  	return nil
   845  }
   846  
   847  func (d *RocksDB) cleanupBlockTxs(wb *grocksdb.WriteBatch, block *bchain.Block) error {
   848  	keep := d.chainParser.KeepBlockAddresses()
   849  	// cleanup old block address
   850  	if block.Height > uint32(keep) {
   851  		for rh := block.Height - uint32(keep); rh > 0; rh-- {
   852  			key := packUint(rh)
   853  			val, err := d.db.GetCF(d.ro, d.cfh[cfBlockTxs], key)
   854  			if err != nil {
   855  				return err
   856  			}
   857  			// nil data means the key was not found in DB
   858  			if val.Data() == nil {
   859  				break
   860  			}
   861  			val.Free()
   862  			d.db.DeleteCF(d.wo, d.cfh[cfBlockTxs], key)
   863  		}
   864  	}
   865  	return nil
   866  }
   867  
   868  func (d *RocksDB) storeAndCleanupBlockTxs(wb *grocksdb.WriteBatch, block *bchain.Block) error {
   869  	pl := d.chainParser.PackedTxidLen()
   870  	buf := make([]byte, 0, pl*len(block.Txs))
   871  	varBuf := make([]byte, vlq.MaxLen64)
   872  	zeroTx := make([]byte, pl)
   873  	for i := range block.Txs {
   874  		tx := &block.Txs[i]
   875  		o := make([]outpoint, len(tx.Vin))
   876  		for v := range tx.Vin {
   877  			vin := &tx.Vin[v]
   878  			btxID, err := d.chainParser.PackTxid(vin.Txid)
   879  			if err != nil {
   880  				// do not process inputs without input txid
   881  				if err == bchain.ErrTxidMissing {
   882  					btxID = zeroTx
   883  				} else {
   884  					return err
   885  				}
   886  			}
   887  			o[v].btxID = btxID
   888  			o[v].index = int32(vin.Vout)
   889  		}
   890  		btxID, err := d.chainParser.PackTxid(tx.Txid)
   891  		if err != nil {
   892  			return err
   893  		}
   894  		buf = append(buf, btxID...)
   895  		l := packVaruint(uint(len(o)), varBuf)
   896  		buf = append(buf, varBuf[:l]...)
   897  		buf = append(buf, d.packOutpoints(o)...)
   898  	}
   899  	key := packUint(block.Height)
   900  	wb.PutCF(d.cfh[cfBlockTxs], key, buf)
   901  	return d.cleanupBlockTxs(wb, block)
   902  }
   903  
   904  func (d *RocksDB) getBlockTxs(height uint32) ([]blockTxs, error) {
   905  	pl := d.chainParser.PackedTxidLen()
   906  	val, err := d.db.GetCF(d.ro, d.cfh[cfBlockTxs], packUint(height))
   907  	if err != nil {
   908  		return nil, err
   909  	}
   910  	defer val.Free()
   911  	buf := val.Data()
   912  	bt := make([]blockTxs, 0, 8)
   913  	for i := 0; i < len(buf); {
   914  		if len(buf)-i < pl {
   915  			glog.Error("rocksdb: Inconsistent data in blockTxs ", hex.EncodeToString(buf))
   916  			return nil, errors.New("Inconsistent data in blockTxs")
   917  		}
   918  		txid := append([]byte(nil), buf[i:i+pl]...)
   919  		i += pl
   920  		o, ol, err := d.unpackNOutpoints(buf[i:])
   921  		if err != nil {
   922  			glog.Error("rocksdb: Inconsistent data in blockTxs ", hex.EncodeToString(buf))
   923  			return nil, errors.New("Inconsistent data in blockTxs")
   924  		}
   925  		bt = append(bt, blockTxs{
   926  			btxID:  txid,
   927  			inputs: o,
   928  		})
   929  		i += ol
   930  	}
   931  	return bt, nil
   932  }
   933  
   934  // GetAddrDescBalance returns AddrBalance for given addrDesc
   935  func (d *RocksDB) GetAddrDescBalance(addrDesc bchain.AddressDescriptor, detail AddressBalanceDetail) (*AddrBalance, error) {
   936  	val, err := d.db.GetCF(d.ro, d.cfh[cfAddressBalance], addrDesc)
   937  	if err != nil {
   938  		return nil, err
   939  	}
   940  	defer val.Free()
   941  	buf := val.Data()
   942  	// 3 is minimum length of addrBalance - 1 byte txs, 1 byte sent, 1 byte balance
   943  	if len(buf) < 3 {
   944  		return nil, nil
   945  	}
   946  	return unpackAddrBalance(buf, d.chainParser.PackedTxidLen(), detail)
   947  }
   948  
   949  // GetAddressBalance returns address balance for an address or nil if address not found
   950  func (d *RocksDB) GetAddressBalance(address string, detail AddressBalanceDetail) (*AddrBalance, error) {
   951  	addrDesc, err := d.chainParser.GetAddrDescFromAddress(address)
   952  	if err != nil {
   953  		return nil, err
   954  	}
   955  	return d.GetAddrDescBalance(addrDesc, detail)
   956  }
   957  
   958  func (d *RocksDB) getTxAddresses(btxID []byte) (*TxAddresses, error) {
   959  	val, err := d.db.GetCF(d.ro, d.cfh[cfTxAddresses], btxID)
   960  	if err != nil {
   961  		return nil, err
   962  	}
   963  	defer val.Free()
   964  	buf := val.Data()
   965  	// 2 is minimum length of addrBalance - 1 byte height, 1 byte inputs len, 1 byte outputs len
   966  	if len(buf) < 3 {
   967  		return nil, nil
   968  	}
   969  	return d.unpackTxAddresses(buf)
   970  }
   971  
   972  // GetTxAddresses returns TxAddresses for given txid or nil if not found
   973  func (d *RocksDB) GetTxAddresses(txid string) (*TxAddresses, error) {
   974  	btxID, err := d.chainParser.PackTxid(txid)
   975  	if err != nil {
   976  		return nil, err
   977  	}
   978  	return d.getTxAddresses(btxID)
   979  }
   980  
   981  // AddrDescForOutpoint is a function that returns address descriptor and value for given outpoint or nil if outpoint not found
   982  func (d *RocksDB) AddrDescForOutpoint(outpoint bchain.Outpoint) (bchain.AddressDescriptor, *big.Int) {
   983  	ta, err := d.GetTxAddresses(outpoint.Txid)
   984  	if err != nil || ta == nil {
   985  		return nil, nil
   986  	}
   987  	if outpoint.Vout < 0 {
   988  		vin := ^outpoint.Vout
   989  		if len(ta.Inputs) <= int(vin) {
   990  			return nil, nil
   991  		}
   992  		return ta.Inputs[vin].AddrDesc, &ta.Inputs[vin].ValueSat
   993  	}
   994  	if len(ta.Outputs) <= int(outpoint.Vout) {
   995  		return nil, nil
   996  	}
   997  	return ta.Outputs[outpoint.Vout].AddrDesc, &ta.Outputs[outpoint.Vout].ValueSat
   998  }
   999  
  1000  func (d *RocksDB) packTxAddresses(ta *TxAddresses, buf []byte, varBuf []byte) []byte {
  1001  	buf = buf[:0]
  1002  	l := packVaruint(uint(ta.Height), varBuf)
  1003  	buf = append(buf, varBuf[:l]...)
  1004  	if d.extendedIndex {
  1005  		l = packVaruint(uint(ta.VSize), varBuf)
  1006  		buf = append(buf, varBuf[:l]...)
  1007  	}
  1008  	l = packVaruint(uint(len(ta.Inputs)), varBuf)
  1009  	buf = append(buf, varBuf[:l]...)
  1010  	for i := range ta.Inputs {
  1011  		buf = d.appendTxInput(&ta.Inputs[i], buf, varBuf)
  1012  	}
  1013  	l = packVaruint(uint(len(ta.Outputs)), varBuf)
  1014  	buf = append(buf, varBuf[:l]...)
  1015  	for i := range ta.Outputs {
  1016  		buf = d.appendTxOutput(&ta.Outputs[i], buf, varBuf)
  1017  	}
  1018  	return buf
  1019  }
  1020  
  1021  func (d *RocksDB) appendTxInput(txi *TxInput, buf []byte, varBuf []byte) []byte {
  1022  	la := len(txi.AddrDesc)
  1023  	var l int
  1024  	if d.extendedIndex {
  1025  		if txi.Txid == "" {
  1026  			// coinbase transaction
  1027  			la = ^la
  1028  		}
  1029  		l = packVarint(la, varBuf)
  1030  		buf = append(buf, varBuf[:l]...)
  1031  		buf = append(buf, txi.AddrDesc...)
  1032  		l = packBigint(&txi.ValueSat, varBuf)
  1033  		buf = append(buf, varBuf[:l]...)
  1034  		if la >= 0 {
  1035  			btxID, err := d.chainParser.PackTxid(txi.Txid)
  1036  			if err != nil {
  1037  				if err != bchain.ErrTxidMissing {
  1038  					glog.Error("Cannot pack txid ", txi.Txid)
  1039  				}
  1040  				btxID = make([]byte, d.chainParser.PackedTxidLen())
  1041  			}
  1042  			buf = append(buf, btxID...)
  1043  			l = packVaruint(uint(txi.Vout), varBuf)
  1044  			buf = append(buf, varBuf[:l]...)
  1045  		}
  1046  	} else {
  1047  		l = packVaruint(uint(la), varBuf)
  1048  		buf = append(buf, varBuf[:l]...)
  1049  		buf = append(buf, txi.AddrDesc...)
  1050  		l = packBigint(&txi.ValueSat, varBuf)
  1051  		buf = append(buf, varBuf[:l]...)
  1052  	}
  1053  	return buf
  1054  }
  1055  
  1056  func (d *RocksDB) appendTxOutput(txo *TxOutput, buf []byte, varBuf []byte) []byte {
  1057  	la := len(txo.AddrDesc)
  1058  	if txo.Spent {
  1059  		la = ^la
  1060  	}
  1061  	l := packVarint(la, varBuf)
  1062  	buf = append(buf, varBuf[:l]...)
  1063  	buf = append(buf, txo.AddrDesc...)
  1064  	l = packBigint(&txo.ValueSat, varBuf)
  1065  	buf = append(buf, varBuf[:l]...)
  1066  	if d.extendedIndex && txo.Spent {
  1067  		btxID, err := d.chainParser.PackTxid(txo.SpentTxid)
  1068  		if err != nil {
  1069  			if err != bchain.ErrTxidMissing {
  1070  				glog.Error("Cannot pack txid ", txo.SpentTxid)
  1071  			}
  1072  			btxID = make([]byte, d.chainParser.PackedTxidLen())
  1073  		}
  1074  		buf = append(buf, btxID...)
  1075  		l = packVaruint(uint(txo.SpentIndex), varBuf)
  1076  		buf = append(buf, varBuf[:l]...)
  1077  		l = packVaruint(uint(txo.SpentHeight), varBuf)
  1078  		buf = append(buf, varBuf[:l]...)
  1079  	}
  1080  	return buf
  1081  }
  1082  
  1083  func unpackAddrBalance(buf []byte, txidUnpackedLen int, detail AddressBalanceDetail) (*AddrBalance, error) {
  1084  	txs, l := unpackVaruint(buf)
  1085  	sentSat, sl := unpackBigint(buf[l:])
  1086  	balanceSat, bl := unpackBigint(buf[l+sl:])
  1087  	l = l + sl + bl
  1088  	ab := &AddrBalance{
  1089  		Txs:        uint32(txs),
  1090  		SentSat:    sentSat,
  1091  		BalanceSat: balanceSat,
  1092  	}
  1093  	if detail != AddressBalanceDetailNoUTXO {
  1094  		// estimate the size of utxos to avoid reallocation
  1095  		ab.Utxos = make([]Utxo, 0, len(buf[l:])/txidUnpackedLen+3)
  1096  		// ab.utxosMap = make(map[string]int, cap(ab.Utxos))
  1097  		for len(buf[l:]) >= txidUnpackedLen+3 {
  1098  			btxID := append([]byte(nil), buf[l:l+txidUnpackedLen]...)
  1099  			l += txidUnpackedLen
  1100  			vout, ll := unpackVaruint(buf[l:])
  1101  			l += ll
  1102  			height, ll := unpackVaruint(buf[l:])
  1103  			l += ll
  1104  			valueSat, ll := unpackBigint(buf[l:])
  1105  			l += ll
  1106  			u := Utxo{
  1107  				BtxID:    btxID,
  1108  				Vout:     int32(vout),
  1109  				Height:   uint32(height),
  1110  				ValueSat: valueSat,
  1111  			}
  1112  			if detail == AddressBalanceDetailUTXO {
  1113  				ab.Utxos = append(ab.Utxos, u)
  1114  			} else {
  1115  				ab.addUtxo(&u)
  1116  			}
  1117  		}
  1118  	}
  1119  	return ab, nil
  1120  }
  1121  
  1122  func packAddrBalance(ab *AddrBalance, buf, varBuf []byte) []byte {
  1123  	buf = buf[:0]
  1124  	l := packVaruint(uint(ab.Txs), varBuf)
  1125  	buf = append(buf, varBuf[:l]...)
  1126  	l = packBigint(&ab.SentSat, varBuf)
  1127  	buf = append(buf, varBuf[:l]...)
  1128  	l = packBigint(&ab.BalanceSat, varBuf)
  1129  	buf = append(buf, varBuf[:l]...)
  1130  	for _, utxo := range ab.Utxos {
  1131  		// if Vout < 0, utxo is marked as spent and removed from the entry
  1132  		if utxo.Vout >= 0 {
  1133  			buf = append(buf, utxo.BtxID...)
  1134  			l = packVaruint(uint(utxo.Vout), varBuf)
  1135  			buf = append(buf, varBuf[:l]...)
  1136  			l = packVaruint(uint(utxo.Height), varBuf)
  1137  			buf = append(buf, varBuf[:l]...)
  1138  			l = packBigint(&utxo.ValueSat, varBuf)
  1139  			buf = append(buf, varBuf[:l]...)
  1140  		}
  1141  	}
  1142  	return buf
  1143  }
  1144  
  1145  func (d *RocksDB) unpackTxAddresses(buf []byte) (*TxAddresses, error) {
  1146  	ta := TxAddresses{}
  1147  	height, l := unpackVaruint(buf)
  1148  	ta.Height = uint32(height)
  1149  	if d.extendedIndex {
  1150  		vsize, ll := unpackVaruint(buf[l:])
  1151  		ta.VSize = uint32(vsize)
  1152  		l += ll
  1153  	}
  1154  	inputs, ll := unpackVaruint(buf[l:])
  1155  	l += ll
  1156  	ta.Inputs = make([]TxInput, inputs)
  1157  	for i := uint(0); i < inputs; i++ {
  1158  		l += d.unpackTxInput(&ta.Inputs[i], buf[l:])
  1159  	}
  1160  	outputs, ll := unpackVaruint(buf[l:])
  1161  	l += ll
  1162  	ta.Outputs = make([]TxOutput, outputs)
  1163  	for i := uint(0); i < outputs; i++ {
  1164  		l += d.unpackTxOutput(&ta.Outputs[i], buf[l:])
  1165  	}
  1166  	return &ta, nil
  1167  }
  1168  
  1169  func (d *RocksDB) unpackTxInput(ti *TxInput, buf []byte) int {
  1170  	if d.extendedIndex {
  1171  		al, l := unpackVarint(buf)
  1172  		var coinbase bool
  1173  		if al < 0 {
  1174  			coinbase = true
  1175  			al = ^al
  1176  		}
  1177  		ti.AddrDesc = append([]byte(nil), buf[l:l+al]...)
  1178  		al += l
  1179  		ti.ValueSat, l = unpackBigint(buf[al:])
  1180  		al += l
  1181  		if !coinbase {
  1182  			l = d.chainParser.PackedTxidLen()
  1183  			ti.Txid, _ = d.chainParser.UnpackTxid(buf[al : al+l])
  1184  			al += l
  1185  			var i uint
  1186  			i, l = unpackVaruint(buf[al:])
  1187  			ti.Vout = uint32(i)
  1188  			al += l
  1189  		}
  1190  		return al
  1191  	} else {
  1192  		al, l := unpackVaruint(buf)
  1193  		ti.AddrDesc = append([]byte(nil), buf[l:l+int(al)]...)
  1194  		al += uint(l)
  1195  		ti.ValueSat, l = unpackBigint(buf[al:])
  1196  		return l + int(al)
  1197  	}
  1198  }
  1199  
  1200  func (d *RocksDB) unpackTxOutput(to *TxOutput, buf []byte) int {
  1201  	al, l := unpackVarint(buf)
  1202  	if al < 0 {
  1203  		to.Spent = true
  1204  		al = ^al
  1205  	}
  1206  	to.AddrDesc = append([]byte(nil), buf[l:l+al]...)
  1207  	al += l
  1208  	to.ValueSat, l = unpackBigint(buf[al:])
  1209  	al += l
  1210  	if d.extendedIndex && to.Spent {
  1211  		l = d.chainParser.PackedTxidLen()
  1212  		to.SpentTxid, _ = d.chainParser.UnpackTxid(buf[al : al+l])
  1213  		al += l
  1214  		var i uint
  1215  		i, l = unpackVaruint(buf[al:])
  1216  		al += l
  1217  		to.SpentIndex = uint32(i)
  1218  		i, l = unpackVaruint(buf[al:])
  1219  		to.SpentHeight = uint32(i)
  1220  		al += l
  1221  	}
  1222  	return al
  1223  }
  1224  
  1225  func (d *RocksDB) packTxIndexes(txi []txIndexes) []byte {
  1226  	buf := make([]byte, 0, 32)
  1227  	bvout := make([]byte, vlq.MaxLen32)
  1228  	// store the txs in reverse order for ordering from newest to oldest
  1229  	for j := len(txi) - 1; j >= 0; j-- {
  1230  		t := &txi[j]
  1231  		buf = append(buf, []byte(t.btxID)...)
  1232  		for i, index := range t.indexes {
  1233  			index <<= 1
  1234  			if i == len(t.indexes)-1 {
  1235  				index |= 1
  1236  			}
  1237  			l := packVarint32(index, bvout)
  1238  			buf = append(buf, bvout[:l]...)
  1239  		}
  1240  	}
  1241  	return buf
  1242  }
  1243  
  1244  func (d *RocksDB) unpackTxIndexes(buf []byte) ([]txIndexes, error) {
  1245  	var retval []txIndexes
  1246  	txidUnpackedLen := d.chainParser.PackedTxidLen()
  1247  	for len(buf) > txidUnpackedLen {
  1248  		btxID := make([]byte, txidUnpackedLen)
  1249  		copy(btxID, buf[:txidUnpackedLen])
  1250  		indexes := make([]int32, 0, 16)
  1251  		buf = buf[txidUnpackedLen:]
  1252  		for {
  1253  			index, l := unpackVarint32(buf)
  1254  			indexes = append(indexes, index>>1)
  1255  			buf = buf[l:]
  1256  			if index&1 == 1 {
  1257  				break
  1258  			}
  1259  		}
  1260  		retval = append(retval, txIndexes{
  1261  			btxID:   btxID,
  1262  			indexes: indexes,
  1263  		})
  1264  	}
  1265  	// reverse the return values, packTxIndexes is storing it in reverse
  1266  	for i, j := 0, len(retval)-1; i < j; i, j = i+1, j-1 {
  1267  		retval[i], retval[j] = retval[j], retval[i]
  1268  	}
  1269  	return retval, nil
  1270  }
  1271  
  1272  func (d *RocksDB) packOutpoints(outpoints []outpoint) []byte {
  1273  	buf := make([]byte, 0, 32)
  1274  	bvout := make([]byte, vlq.MaxLen32)
  1275  	for _, o := range outpoints {
  1276  		l := packVarint32(o.index, bvout)
  1277  		buf = append(buf, []byte(o.btxID)...)
  1278  		buf = append(buf, bvout[:l]...)
  1279  	}
  1280  	return buf
  1281  }
  1282  
  1283  func (d *RocksDB) unpackNOutpoints(buf []byte) ([]outpoint, int, error) {
  1284  	txidUnpackedLen := d.chainParser.PackedTxidLen()
  1285  	n, p := unpackVaruint(buf)
  1286  	outpoints := make([]outpoint, n)
  1287  	for i := uint(0); i < n; i++ {
  1288  		if p+txidUnpackedLen >= len(buf) {
  1289  			return nil, 0, errors.New("Inconsistent data in unpackNOutpoints")
  1290  		}
  1291  		btxID := append([]byte(nil), buf[p:p+txidUnpackedLen]...)
  1292  		p += txidUnpackedLen
  1293  		vout, voutLen := unpackVarint32(buf[p:])
  1294  		p += voutLen
  1295  		outpoints[i] = outpoint{
  1296  			btxID: btxID,
  1297  			index: vout,
  1298  		}
  1299  	}
  1300  	return outpoints, p, nil
  1301  }
  1302  
  1303  // Block index
  1304  
  1305  // BlockInfo holds information about blocks kept in column height
  1306  type BlockInfo struct {
  1307  	Hash   string
  1308  	Time   int64
  1309  	Txs    uint32
  1310  	Size   uint32
  1311  	Height uint32 // Height is not packed!
  1312  }
  1313  
  1314  func (d *RocksDB) packBlockInfo(block *BlockInfo) ([]byte, error) {
  1315  	packed := make([]byte, 0, 64)
  1316  	varBuf := make([]byte, vlq.MaxLen64)
  1317  	b, err := d.chainParser.PackBlockHash(block.Hash)
  1318  	if err != nil {
  1319  		return nil, err
  1320  	}
  1321  	pl := d.chainParser.PackedTxidLen()
  1322  	if len(b) != pl {
  1323  		glog.Warning("Non standard block hash for height ", block.Height, ", hash [", block.Hash, "]")
  1324  		if len(b) > pl {
  1325  			b = b[:pl]
  1326  		} else {
  1327  			b = append(b, make([]byte, pl-len(b))...)
  1328  		}
  1329  	}
  1330  	packed = append(packed, b...)
  1331  	packed = append(packed, packUint(uint32(block.Time))...)
  1332  	l := packVaruint(uint(block.Txs), varBuf)
  1333  	packed = append(packed, varBuf[:l]...)
  1334  	l = packVaruint(uint(block.Size), varBuf)
  1335  	packed = append(packed, varBuf[:l]...)
  1336  	return packed, nil
  1337  }
  1338  
  1339  func (d *RocksDB) unpackBlockInfo(buf []byte) (*BlockInfo, error) {
  1340  	pl := d.chainParser.PackedTxidLen()
  1341  	// minimum length is PackedTxidLen + 4 bytes time + 1 byte txs + 1 byte size
  1342  	if len(buf) < pl+4+2 {
  1343  		return nil, nil
  1344  	}
  1345  	txid, err := d.chainParser.UnpackBlockHash(buf[:pl])
  1346  	if err != nil {
  1347  		return nil, err
  1348  	}
  1349  	t := unpackUint(buf[pl:])
  1350  	txs, l := unpackVaruint(buf[pl+4:])
  1351  	size, _ := unpackVaruint(buf[pl+4+l:])
  1352  	return &BlockInfo{
  1353  		Hash: txid,
  1354  		Time: int64(t),
  1355  		Txs:  uint32(txs),
  1356  		Size: uint32(size),
  1357  	}, nil
  1358  }
  1359  
  1360  // GetBestBlock returns the block hash of the block with highest height in the db
  1361  func (d *RocksDB) GetBestBlock() (uint32, string, error) {
  1362  	it := d.db.NewIteratorCF(d.ro, d.cfh[cfHeight])
  1363  	defer it.Close()
  1364  	if it.SeekToLast(); it.Valid() {
  1365  		bestHeight := unpackUint(it.Key().Data())
  1366  		info, err := d.unpackBlockInfo(it.Value().Data())
  1367  		if info != nil {
  1368  			if glog.V(1) {
  1369  				glog.Infof("rocksdb: bestblock %d %+v", bestHeight, info)
  1370  			}
  1371  			return bestHeight, info.Hash, err
  1372  		}
  1373  	}
  1374  	return 0, "", nil
  1375  }
  1376  
  1377  // GetBlockHash returns block hash at given height or empty string if not found
  1378  func (d *RocksDB) GetBlockHash(height uint32) (string, error) {
  1379  	key := packUint(height)
  1380  	val, err := d.db.GetCF(d.ro, d.cfh[cfHeight], key)
  1381  	if err != nil {
  1382  		return "", err
  1383  	}
  1384  	defer val.Free()
  1385  	info, err := d.unpackBlockInfo(val.Data())
  1386  	if info == nil {
  1387  		return "", err
  1388  	}
  1389  	return info.Hash, nil
  1390  }
  1391  
  1392  // GetBlockInfo returns block info stored in db
  1393  func (d *RocksDB) GetBlockInfo(height uint32) (*BlockInfo, error) {
  1394  	key := packUint(height)
  1395  	val, err := d.db.GetCF(d.ro, d.cfh[cfHeight], key)
  1396  	if err != nil {
  1397  		return nil, err
  1398  	}
  1399  	defer val.Free()
  1400  	bi, err := d.unpackBlockInfo(val.Data())
  1401  	if err != nil || bi == nil {
  1402  		return nil, err
  1403  	}
  1404  	bi.Height = height
  1405  	return bi, err
  1406  }
  1407  
  1408  func (d *RocksDB) writeHeightFromBlock(wb *grocksdb.WriteBatch, block *bchain.Block, op int) error {
  1409  	return d.writeHeight(wb, block.Height, &BlockInfo{
  1410  		Hash:   block.Hash,
  1411  		Time:   block.Time,
  1412  		Txs:    uint32(len(block.Txs)),
  1413  		Size:   uint32(block.Size),
  1414  		Height: block.Height,
  1415  	}, op)
  1416  }
  1417  
  1418  func (d *RocksDB) writeHeight(wb *grocksdb.WriteBatch, height uint32, bi *BlockInfo, op int) error {
  1419  	key := packUint(height)
  1420  	switch op {
  1421  	case opInsert:
  1422  		val, err := d.packBlockInfo(bi)
  1423  		if err != nil {
  1424  			return err
  1425  		}
  1426  		wb.PutCF(d.cfh[cfHeight], key, val)
  1427  		d.is.UpdateBestHeight(height)
  1428  	case opDelete:
  1429  		wb.DeleteCF(d.cfh[cfHeight], key)
  1430  		d.is.UpdateBestHeight(height - 1)
  1431  	}
  1432  	return nil
  1433  }
  1434  
  1435  // address alias support
  1436  var cachedAddressAliasRecords = make(map[string]string)
  1437  var cachedAddressAliasRecordsMux sync.Mutex
  1438  
  1439  // InitAddressAliasRecords loads all records to cache
  1440  func (d *RocksDB) InitAddressAliasRecords() (int, error) {
  1441  	count := 0
  1442  	cachedAddressAliasRecordsMux.Lock()
  1443  	defer cachedAddressAliasRecordsMux.Unlock()
  1444  	it := d.db.NewIteratorCF(d.ro, d.cfh[cfAddressAliases])
  1445  	defer it.Close()
  1446  	for it.SeekToFirst(); it.Valid(); it.Next() {
  1447  		address := string(it.Key().Data())
  1448  		name := string(it.Value().Data())
  1449  		if address != "" && name != "" {
  1450  			cachedAddressAliasRecords[address] = d.chainParser.FormatAddressAlias(address, name)
  1451  			count++
  1452  		}
  1453  	}
  1454  	return count, nil
  1455  }
  1456  
  1457  func (d *RocksDB) GetAddressAlias(address string) string {
  1458  	cachedAddressAliasRecordsMux.Lock()
  1459  	name := cachedAddressAliasRecords[address]
  1460  	cachedAddressAliasRecordsMux.Unlock()
  1461  	return name
  1462  }
  1463  
  1464  func (d *RocksDB) storeAddressAliasRecords(wb *grocksdb.WriteBatch, records []bchain.AddressAliasRecord) error {
  1465  	if d.chainParser.UseAddressAliases() {
  1466  		for i := range records {
  1467  			r := &records[i]
  1468  			if len(r.Name) > 0 {
  1469  				wb.PutCF(d.cfh[cfAddressAliases], []byte(r.Address), []byte(r.Name))
  1470  				cachedAddressAliasRecordsMux.Lock()
  1471  				cachedAddressAliasRecords[r.Address] = d.chainParser.FormatAddressAlias(r.Address, r.Name)
  1472  				cachedAddressAliasRecordsMux.Unlock()
  1473  			}
  1474  		}
  1475  	}
  1476  	return nil
  1477  }
  1478  
  1479  // Disconnect blocks
  1480  
  1481  func (d *RocksDB) disconnectTxAddressesInputs(wb *grocksdb.WriteBatch, btxID []byte, inputs []outpoint, txa *TxAddresses, txAddressesToUpdate map[string]*TxAddresses,
  1482  	getAddressBalance func(addrDesc bchain.AddressDescriptor) (*AddrBalance, error),
  1483  	addressFoundInTx func(addrDesc bchain.AddressDescriptor, btxID []byte) bool) error {
  1484  	var err error
  1485  	var balance *AddrBalance
  1486  	for i, t := range txa.Inputs {
  1487  		if len(t.AddrDesc) > 0 {
  1488  			input := &inputs[i]
  1489  			exist := addressFoundInTx(t.AddrDesc, btxID)
  1490  			s := string(input.btxID)
  1491  			sa, found := txAddressesToUpdate[s]
  1492  			if !found {
  1493  				sa, err = d.getTxAddresses(input.btxID)
  1494  				if err != nil {
  1495  					return err
  1496  				}
  1497  				if sa != nil {
  1498  					txAddressesToUpdate[s] = sa
  1499  				}
  1500  			}
  1501  			var inputHeight uint32
  1502  			if sa != nil {
  1503  				sa.Outputs[input.index].Spent = false
  1504  				inputHeight = sa.Height
  1505  			}
  1506  			if d.chainParser.IsAddrDescIndexable(t.AddrDesc) {
  1507  				balance, err = getAddressBalance(t.AddrDesc)
  1508  				if err != nil {
  1509  					return err
  1510  				}
  1511  				if balance != nil {
  1512  					// subtract number of txs only once
  1513  					if !exist {
  1514  						balance.Txs--
  1515  					}
  1516  					balance.SentSat.Sub(&balance.SentSat, &t.ValueSat)
  1517  					if balance.SentSat.Sign() < 0 {
  1518  						d.resetValueSatToZero(&balance.SentSat, t.AddrDesc, "sent amount")
  1519  					}
  1520  					balance.BalanceSat.Add(&balance.BalanceSat, &t.ValueSat)
  1521  					balance.addUtxoInDisconnect(&Utxo{
  1522  						BtxID:    input.btxID,
  1523  						Vout:     input.index,
  1524  						Height:   inputHeight,
  1525  						ValueSat: t.ValueSat,
  1526  					})
  1527  				} else {
  1528  					ad, _, _ := d.chainParser.GetAddressesFromAddrDesc(t.AddrDesc)
  1529  					glog.Warningf("Balance for address %s (%s) not found", ad, t.AddrDesc)
  1530  				}
  1531  			}
  1532  		}
  1533  	}
  1534  	return nil
  1535  }
  1536  
  1537  func (d *RocksDB) disconnectTxAddressesOutputs(wb *grocksdb.WriteBatch, btxID []byte, txa *TxAddresses,
  1538  	getAddressBalance func(addrDesc bchain.AddressDescriptor) (*AddrBalance, error),
  1539  	addressFoundInTx func(addrDesc bchain.AddressDescriptor, btxID []byte) bool) error {
  1540  	for i, t := range txa.Outputs {
  1541  		if len(t.AddrDesc) > 0 {
  1542  			exist := addressFoundInTx(t.AddrDesc, btxID)
  1543  			if d.chainParser.IsAddrDescIndexable(t.AddrDesc) {
  1544  				balance, err := getAddressBalance(t.AddrDesc)
  1545  				if err != nil {
  1546  					return err
  1547  				}
  1548  				if balance != nil {
  1549  					// subtract number of txs only once
  1550  					if !exist {
  1551  						balance.Txs--
  1552  					}
  1553  					balance.BalanceSat.Sub(&balance.BalanceSat, &t.ValueSat)
  1554  					if balance.BalanceSat.Sign() < 0 {
  1555  						d.resetValueSatToZero(&balance.BalanceSat, t.AddrDesc, "balance")
  1556  					}
  1557  					balance.markUtxoAsSpent(btxID, int32(i))
  1558  				} else {
  1559  					ad, _, _ := d.chainParser.GetAddressesFromAddrDesc(t.AddrDesc)
  1560  					glog.Warningf("Balance for address %s (%s) not found", ad, t.AddrDesc)
  1561  				}
  1562  			}
  1563  		}
  1564  	}
  1565  	return nil
  1566  }
  1567  
  1568  func (d *RocksDB) disconnectBlock(height uint32, blockTxs []blockTxs) error {
  1569  	wb := grocksdb.NewWriteBatch()
  1570  	defer wb.Destroy()
  1571  	txAddressesToUpdate := make(map[string]*TxAddresses)
  1572  	txAddresses := make([]*TxAddresses, len(blockTxs))
  1573  	txsToDelete := make(map[string]struct{})
  1574  
  1575  	balances := make(map[string]*AddrBalance)
  1576  	getAddressBalance := func(addrDesc bchain.AddressDescriptor) (*AddrBalance, error) {
  1577  		var err error
  1578  		s := string(addrDesc)
  1579  		b, fb := balances[s]
  1580  		if !fb {
  1581  			b, err = d.GetAddrDescBalance(addrDesc, addressBalanceDetailUTXOIndexed)
  1582  			if err != nil {
  1583  				return nil, err
  1584  			}
  1585  			balances[s] = b
  1586  		}
  1587  		return b, nil
  1588  	}
  1589  
  1590  	// all addresses in the block are stored in blockAddressesTxs, together with a map of transactions where they appear
  1591  	blockAddressesTxs := make(map[string]map[string]struct{})
  1592  	// addressFoundInTx handles updates of the blockAddressesTxs map and returns true if the address+tx was already encountered
  1593  	addressFoundInTx := func(addrDesc bchain.AddressDescriptor, btxID []byte) bool {
  1594  		sAddrDesc := string(addrDesc)
  1595  		sBtxID := string(btxID)
  1596  		a, exist := blockAddressesTxs[sAddrDesc]
  1597  		if !exist {
  1598  			blockAddressesTxs[sAddrDesc] = map[string]struct{}{sBtxID: {}}
  1599  		} else {
  1600  			_, exist = a[sBtxID]
  1601  			if !exist {
  1602  				a[sBtxID] = struct{}{}
  1603  			}
  1604  		}
  1605  		return exist
  1606  	}
  1607  
  1608  	glog.Info("Disconnecting block ", height, " containing ", len(blockTxs), " transactions")
  1609  	// when connecting block, outputs are processed first
  1610  	// when disconnecting, inputs must be reversed first
  1611  	for i := range blockTxs {
  1612  		btxID := blockTxs[i].btxID
  1613  		s := string(btxID)
  1614  		txsToDelete[s] = struct{}{}
  1615  		txa, err := d.getTxAddresses(btxID)
  1616  		if err != nil {
  1617  			return err
  1618  		}
  1619  		if txa == nil {
  1620  			ut, _ := d.chainParser.UnpackTxid(btxID)
  1621  			glog.Warning("TxAddress for txid ", ut, " not found")
  1622  			continue
  1623  		}
  1624  		txAddresses[i] = txa
  1625  		if err := d.disconnectTxAddressesInputs(wb, btxID, blockTxs[i].inputs, txa, txAddressesToUpdate, getAddressBalance, addressFoundInTx); err != nil {
  1626  			return err
  1627  		}
  1628  	}
  1629  	for i := range blockTxs {
  1630  		btxID := blockTxs[i].btxID
  1631  		txa := txAddresses[i]
  1632  		if txa == nil {
  1633  			continue
  1634  		}
  1635  		if err := d.disconnectTxAddressesOutputs(wb, btxID, txa, getAddressBalance, addressFoundInTx); err != nil {
  1636  			return err
  1637  		}
  1638  	}
  1639  	for a := range blockAddressesTxs {
  1640  		key := packAddressKey([]byte(a), height)
  1641  		wb.DeleteCF(d.cfh[cfAddresses], key)
  1642  	}
  1643  	key := packUint(height)
  1644  	wb.DeleteCF(d.cfh[cfBlockTxs], key)
  1645  	wb.DeleteCF(d.cfh[cfHeight], key)
  1646  	d.storeTxAddresses(wb, txAddressesToUpdate)
  1647  	d.storeBalancesDisconnect(wb, balances)
  1648  	for s := range txsToDelete {
  1649  		b := []byte(s)
  1650  		wb.DeleteCF(d.cfh[cfTransactions], b)
  1651  		wb.DeleteCF(d.cfh[cfTxAddresses], b)
  1652  	}
  1653  	return d.WriteBatch(wb)
  1654  }
  1655  
  1656  // DisconnectBlockRangeBitcoinType removes all data belonging to blocks in range lower-higher
  1657  // it is able to disconnect only blocks for which there are data in the blockTxs column
  1658  func (d *RocksDB) DisconnectBlockRangeBitcoinType(lower uint32, higher uint32) error {
  1659  	blocks := make([][]blockTxs, higher-lower+1)
  1660  	for height := lower; height <= higher; height++ {
  1661  		blockTxs, err := d.getBlockTxs(height)
  1662  		if err != nil {
  1663  			return err
  1664  		}
  1665  		if len(blockTxs) == 0 {
  1666  			return errors.Errorf("Cannot disconnect blocks with height %v and lower. It is necessary to rebuild index.", height)
  1667  		}
  1668  		blocks[height-lower] = blockTxs
  1669  	}
  1670  	for height := higher; height >= lower; height-- {
  1671  		err := d.disconnectBlock(height, blocks[height-lower])
  1672  		if err != nil {
  1673  			return err
  1674  		}
  1675  	}
  1676  	d.is.RemoveLastBlockTimes(int(higher-lower) + 1)
  1677  	glog.Infof("rocksdb: blocks %d-%d disconnected", lower, higher)
  1678  	return nil
  1679  }
  1680  
  1681  func (d *RocksDB) storeBalancesDisconnect(wb *grocksdb.WriteBatch, balances map[string]*AddrBalance) {
  1682  	for _, b := range balances {
  1683  		if b != nil {
  1684  			// remove spent utxos
  1685  			us := make([]Utxo, 0, len(b.Utxos))
  1686  			for _, u := range b.Utxos {
  1687  				// remove utxos marked as spent
  1688  				if u.Vout >= 0 {
  1689  					us = append(us, u)
  1690  				}
  1691  			}
  1692  			b.Utxos = us
  1693  			// sort utxos by height
  1694  			sort.SliceStable(b.Utxos, func(i, j int) bool {
  1695  				return b.Utxos[i].Height < b.Utxos[j].Height
  1696  			})
  1697  		}
  1698  	}
  1699  	d.storeBalances(wb, balances)
  1700  
  1701  }
  1702  func dirSize(path string) (int64, error) {
  1703  	var size int64
  1704  	err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
  1705  		if err == nil {
  1706  			if !info.IsDir() {
  1707  				size += info.Size()
  1708  			}
  1709  		}
  1710  		return err
  1711  	})
  1712  	return size, err
  1713  }
  1714  
  1715  // limit the number of size on disk calculations by restricting it to once a minute
  1716  var databaseSizeOnDisk int64
  1717  var nextDatabaseSizeOnDisk time.Time
  1718  var databaseSizeOnDiskMux sync.Mutex
  1719  
  1720  // DatabaseSizeOnDisk returns size of the database in bytes
  1721  func (d *RocksDB) DatabaseSizeOnDisk() int64 {
  1722  	databaseSizeOnDiskMux.Lock()
  1723  	defer databaseSizeOnDiskMux.Unlock()
  1724  	now := time.Now().UTC()
  1725  	if now.Before(nextDatabaseSizeOnDisk) {
  1726  		return databaseSizeOnDisk
  1727  	}
  1728  	size, err := dirSize(d.path)
  1729  	if err != nil {
  1730  		glog.Warning("rocksdb: DatabaseSizeOnDisk: ", err)
  1731  		return 0
  1732  	}
  1733  	databaseSizeOnDisk = size
  1734  	nextDatabaseSizeOnDisk = now.Add(60 * time.Second)
  1735  	return size
  1736  }
  1737  
  1738  // GetTx returns transaction stored in db and height of the block containing it
  1739  func (d *RocksDB) GetTx(txid string) (*bchain.Tx, uint32, error) {
  1740  	key, err := d.chainParser.PackTxid(txid)
  1741  	if err != nil {
  1742  		return nil, 0, err
  1743  	}
  1744  	val, err := d.db.GetCF(d.ro, d.cfh[cfTransactions], key)
  1745  	if err != nil {
  1746  		return nil, 0, err
  1747  	}
  1748  	defer val.Free()
  1749  	data := val.Data()
  1750  	if len(data) > 4 {
  1751  		return d.chainParser.UnpackTx(data)
  1752  	}
  1753  	return nil, 0, nil
  1754  }
  1755  
  1756  // PutTx stores transactions in db
  1757  func (d *RocksDB) PutTx(tx *bchain.Tx, height uint32, blockTime int64) error {
  1758  	key, err := d.chainParser.PackTxid(tx.Txid)
  1759  	if err != nil {
  1760  		return nil
  1761  	}
  1762  	buf, err := d.chainParser.PackTx(tx, height, blockTime)
  1763  	if err != nil {
  1764  		return err
  1765  	}
  1766  	err = d.db.PutCF(d.wo, d.cfh[cfTransactions], key, buf)
  1767  	if err == nil {
  1768  		d.is.AddDBColumnStats(cfTransactions, 1, int64(len(key)), int64(len(buf)))
  1769  	}
  1770  	return err
  1771  }
  1772  
  1773  // DeleteTx removes transactions from db
  1774  func (d *RocksDB) DeleteTx(txid string) error {
  1775  	key, err := d.chainParser.PackTxid(txid)
  1776  	if err != nil {
  1777  		return nil
  1778  	}
  1779  	// use write batch so that this delete matches other deletes
  1780  	wb := grocksdb.NewWriteBatch()
  1781  	defer wb.Destroy()
  1782  	d.internalDeleteTx(wb, key)
  1783  	return d.WriteBatch(wb)
  1784  }
  1785  
  1786  // internalDeleteTx checks if tx is cached and updates internal state accordingly
  1787  func (d *RocksDB) internalDeleteTx(wb *grocksdb.WriteBatch, key []byte) {
  1788  	val, err := d.db.GetCF(d.ro, d.cfh[cfTransactions], key)
  1789  	// ignore error, it is only for statistics
  1790  	if err == nil {
  1791  		l := len(val.Data())
  1792  		if l > 0 {
  1793  			d.is.AddDBColumnStats(cfTransactions, -1, int64(-len(key)), int64(-l))
  1794  		}
  1795  		defer val.Free()
  1796  	}
  1797  	wb.DeleteCF(d.cfh[cfTransactions], key)
  1798  }
  1799  
  1800  // internal state
  1801  const internalStateKey = "internalState"
  1802  
  1803  func (d *RocksDB) loadBlockTimes() ([]uint32, error) {
  1804  	var times []uint32
  1805  	it := d.db.NewIteratorCF(d.ro, d.cfh[cfHeight])
  1806  	defer it.Close()
  1807  	counter := uint32(0)
  1808  	time := uint32(0)
  1809  	for it.SeekToFirst(); it.Valid(); it.Next() {
  1810  		height := unpackUint(it.Key().Data())
  1811  		if height > counter {
  1812  			glog.Warning("gap in cfHeight: expecting ", counter, ", got ", height)
  1813  			for ; counter < height; counter++ {
  1814  				times = append(times, time)
  1815  			}
  1816  		}
  1817  		counter++
  1818  		info, err := d.unpackBlockInfo(it.Value().Data())
  1819  		if err != nil {
  1820  			return nil, err
  1821  		}
  1822  		if info != nil {
  1823  			time = uint32(info.Time)
  1824  		}
  1825  		times = append(times, time)
  1826  	}
  1827  	return times, nil
  1828  }
  1829  
  1830  func (d *RocksDB) checkColumns(is *common.InternalState) ([]common.InternalStateColumn, error) {
  1831  	// make sure that column stats match the columns
  1832  	sc := is.DbColumns
  1833  	nc := make([]common.InternalStateColumn, len(cfNames))
  1834  	for i := 0; i < len(nc); i++ {
  1835  		nc[i].Name = cfNames[i]
  1836  		nc[i].Version = dbVersion
  1837  		for j := 0; j < len(sc); j++ {
  1838  			if sc[j].Name == nc[i].Name {
  1839  				// check the version of the column, if it does not match, the db is not compatible
  1840  				if sc[j].Version != dbVersion {
  1841  					// upgrade of DB 5 to 6 for BitcoinType coins is possible
  1842  					// columns transactions and fiatRates must be cleared as they are not compatible
  1843  					if sc[j].Version == 5 && dbVersion == 6 && d.chainParser.GetChainType() == bchain.ChainBitcoinType {
  1844  						if nc[i].Name == "transactions" {
  1845  							d.db.DeleteRangeCF(d.wo, d.cfh[cfTransactions], []byte{0}, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff})
  1846  						} else if nc[i].Name == "fiatRates" {
  1847  							d.db.DeleteRangeCF(d.wo, d.cfh[cfFiatRates], []byte{0}, []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff})
  1848  						}
  1849  						glog.Infof("Column %s upgraded from v%d to v%d", nc[i].Name, sc[j].Version, dbVersion)
  1850  					} else {
  1851  						return nil, errors.Errorf("DB version %v of column '%v' does not match the required version %v. DB is not compatible.", sc[j].Version, sc[j].Name, dbVersion)
  1852  					}
  1853  				}
  1854  				nc[i].Rows = sc[j].Rows
  1855  				nc[i].KeyBytes = sc[j].KeyBytes
  1856  				nc[i].ValueBytes = sc[j].ValueBytes
  1857  				nc[i].Updated = sc[j].Updated
  1858  				break
  1859  			}
  1860  		}
  1861  	}
  1862  	return nc, nil
  1863  }
  1864  
  1865  // LoadInternalState loads from db internal state or initializes a new one if not yet stored
  1866  func (d *RocksDB) LoadInternalState(rpcCoin string) (*common.InternalState, error) {
  1867  	val, err := d.db.GetCF(d.ro, d.cfh[cfDefault], []byte(internalStateKey))
  1868  	if err != nil {
  1869  		return nil, err
  1870  	}
  1871  	defer val.Free()
  1872  	data := val.Data()
  1873  	var is *common.InternalState
  1874  	if len(data) == 0 {
  1875  		is = &common.InternalState{Coin: rpcCoin, UtxoChecked: true, SortedAddressContracts: true, ExtendedIndex: d.extendedIndex}
  1876  	} else {
  1877  		is, err = common.UnpackInternalState(data)
  1878  		if err != nil {
  1879  			return nil, err
  1880  		}
  1881  		// verify that the rpc coin matches DB coin
  1882  		// running it mismatched would corrupt the database
  1883  		if is.Coin == "" {
  1884  			is.Coin = rpcCoin
  1885  		} else if is.Coin != rpcCoin {
  1886  			return nil, errors.Errorf("Coins do not match. DB coin %v, RPC coin %v", is.Coin, rpcCoin)
  1887  		}
  1888  		if is.ExtendedIndex != d.extendedIndex {
  1889  			return nil, errors.Errorf("ExtendedIndex setting does not match. DB extendedIndex %v, extendedIndex in options %v", is.ExtendedIndex, d.extendedIndex)
  1890  		}
  1891  	}
  1892  	nc, err := d.checkColumns(is)
  1893  	if err != nil {
  1894  		return nil, err
  1895  	}
  1896  	is.DbColumns = nc
  1897  	bt, err := d.loadBlockTimes()
  1898  	if err != nil {
  1899  		return nil, err
  1900  	}
  1901  	avg := is.SetBlockTimes(bt)
  1902  	if d.metrics != nil {
  1903  		d.metrics.AvgBlockPeriod.Set(float64(avg))
  1904  	}
  1905  
  1906  	// after load, reset the synchronization data
  1907  	is.IsSynchronized = false
  1908  	is.IsMempoolSynchronized = false
  1909  	var t time.Time
  1910  	is.LastMempoolSync = t
  1911  	is.SyncMode = false
  1912  
  1913  	if d.chainParser.UseAddressAliases() {
  1914  		recordsCount, err := d.InitAddressAliasRecords()
  1915  		if err != nil {
  1916  			return nil, err
  1917  		}
  1918  		glog.Infof("loaded %d address alias records", recordsCount)
  1919  	}
  1920  
  1921  	return is, nil
  1922  }
  1923  
  1924  // SetInconsistentState sets the internal state to DbStateInconsistent or DbStateOpen based on inconsistent parameter
  1925  // db in left in DbStateInconsistent state cannot be used and must be recreated
  1926  func (d *RocksDB) SetInconsistentState(inconsistent bool) error {
  1927  	if d.is == nil {
  1928  		return errors.New("Internal state not created")
  1929  	}
  1930  	if inconsistent {
  1931  		d.is.DbState = common.DbStateInconsistent
  1932  	} else {
  1933  		d.is.DbState = common.DbStateOpen
  1934  	}
  1935  	return d.storeState(d.is)
  1936  }
  1937  
  1938  // SetInternalState sets the InternalState to be used by db to collect internal state
  1939  func (d *RocksDB) SetInternalState(is *common.InternalState) {
  1940  	d.is = is
  1941  }
  1942  
  1943  // GetInternalState gets the InternalState
  1944  func (d *RocksDB) GetInternalState() *common.InternalState {
  1945  	return d.is
  1946  }
  1947  
  1948  // StoreInternalState stores the internal state to db
  1949  func (d *RocksDB) StoreInternalState(is *common.InternalState) error {
  1950  	if d.metrics != nil {
  1951  		for c := 0; c < len(cfNames); c++ {
  1952  			rows, keyBytes, valueBytes := d.is.GetDBColumnStatValues(c)
  1953  			d.metrics.DbColumnRows.With(common.Labels{"column": cfNames[c]}).Set(float64(rows))
  1954  			d.metrics.DbColumnSize.With(common.Labels{"column": cfNames[c]}).Set(float64(keyBytes + valueBytes))
  1955  		}
  1956  	}
  1957  	return d.storeState(is)
  1958  }
  1959  
  1960  func (d *RocksDB) storeState(is *common.InternalState) error {
  1961  	buf, err := is.Pack()
  1962  	if err != nil {
  1963  		return err
  1964  	}
  1965  	return d.db.PutCF(d.wo, d.cfh[cfDefault], []byte(internalStateKey), buf)
  1966  }
  1967  
  1968  func (d *RocksDB) computeColumnSize(col int, stopCompute chan os.Signal) (int64, int64, int64, error) {
  1969  	var rows, keysSum, valuesSum int64
  1970  	var seekKey []byte
  1971  	// do not use cache
  1972  	ro := grocksdb.NewDefaultReadOptions()
  1973  	ro.SetFillCache(false)
  1974  	for {
  1975  		var key []byte
  1976  		it := d.db.NewIteratorCF(ro, d.cfh[col])
  1977  		if rows == 0 {
  1978  			it.SeekToFirst()
  1979  		} else {
  1980  			glog.Info("db: Column ", cfNames[col], ": rows ", rows, ", key bytes ", keysSum, ", value bytes ", valuesSum, ", in progress...")
  1981  			it.Seek(seekKey)
  1982  			it.Next()
  1983  		}
  1984  		for count := 0; it.Valid() && count < refreshIterator; it.Next() {
  1985  			select {
  1986  			case <-stopCompute:
  1987  				return 0, 0, 0, errors.New("Interrupted")
  1988  			default:
  1989  			}
  1990  			key = it.Key().Data()
  1991  			count++
  1992  			rows++
  1993  			keysSum += int64(len(key))
  1994  			valuesSum += int64(len(it.Value().Data()))
  1995  		}
  1996  		seekKey = append([]byte{}, key...)
  1997  		valid := it.Valid()
  1998  		it.Close()
  1999  		if !valid {
  2000  			break
  2001  		}
  2002  	}
  2003  	return rows, keysSum, valuesSum, nil
  2004  }
  2005  
  2006  // ComputeInternalStateColumnStats computes stats of all db columns and sets them to internal state
  2007  // can be very slow operation
  2008  func (d *RocksDB) ComputeInternalStateColumnStats(stopCompute chan os.Signal) error {
  2009  	start := time.Now()
  2010  	glog.Info("db: ComputeInternalStateColumnStats start")
  2011  	for c := 0; c < len(cfNames); c++ {
  2012  		rows, keysSum, valuesSum, err := d.computeColumnSize(c, stopCompute)
  2013  		if err != nil {
  2014  			return err
  2015  		}
  2016  		d.is.SetDBColumnStats(c, rows, keysSum, valuesSum)
  2017  		glog.Info("db: Column ", cfNames[c], ": rows ", rows, ", key bytes ", keysSum, ", value bytes ", valuesSum)
  2018  	}
  2019  	glog.Info("db: ComputeInternalStateColumnStats finished in ", time.Since(start))
  2020  	return nil
  2021  }
  2022  
  2023  func reorderUtxo(utxos []Utxo, index int) {
  2024  	var from, to int
  2025  	for from = index; from >= 0; from-- {
  2026  		if !bytes.Equal(utxos[from].BtxID, utxos[index].BtxID) {
  2027  			break
  2028  		}
  2029  	}
  2030  	from++
  2031  	for to = index + 1; to < len(utxos); to++ {
  2032  		if !bytes.Equal(utxos[to].BtxID, utxos[index].BtxID) {
  2033  			break
  2034  		}
  2035  	}
  2036  	toSort := utxos[from:to]
  2037  	sort.SliceStable(toSort, func(i, j int) bool {
  2038  		return toSort[i].Vout < toSort[j].Vout
  2039  	})
  2040  
  2041  }
  2042  
  2043  func (d *RocksDB) fixUtxo(addrDesc bchain.AddressDescriptor, ba *AddrBalance) (bool, bool, error) {
  2044  	reorder := false
  2045  	var checksum big.Int
  2046  	var prevUtxo *Utxo
  2047  	for i := range ba.Utxos {
  2048  		utxo := &ba.Utxos[i]
  2049  		checksum.Add(&checksum, &utxo.ValueSat)
  2050  		if prevUtxo != nil {
  2051  			if prevUtxo.Vout > utxo.Vout && *(*int)(unsafe.Pointer(&utxo.BtxID[0])) == *(*int)(unsafe.Pointer(&prevUtxo.BtxID[0])) && bytes.Equal(utxo.BtxID, prevUtxo.BtxID) {
  2052  				reorderUtxo(ba.Utxos, i)
  2053  				reorder = true
  2054  			}
  2055  		}
  2056  		prevUtxo = utxo
  2057  	}
  2058  	if reorder {
  2059  		// get the checksum again after reorder
  2060  		checksum.SetInt64(0)
  2061  		for i := range ba.Utxos {
  2062  			utxo := &ba.Utxos[i]
  2063  			checksum.Add(&checksum, &utxo.ValueSat)
  2064  		}
  2065  	}
  2066  	if checksum.Cmp(&ba.BalanceSat) != 0 {
  2067  		var checksumFromTxs big.Int
  2068  		var utxos []Utxo
  2069  		err := d.GetAddrDescTransactions(addrDesc, 0, ^uint32(0), func(txid string, height uint32, indexes []int32) error {
  2070  			var ta *TxAddresses
  2071  			var err error
  2072  			// sort the indexes so that the utxos are appended in the reverse order
  2073  			sort.Slice(indexes, func(i, j int) bool {
  2074  				return indexes[i] > indexes[j]
  2075  			})
  2076  			for _, index := range indexes {
  2077  				// take only outputs
  2078  				if index < 0 {
  2079  					break
  2080  				}
  2081  				if ta == nil {
  2082  					ta, err = d.GetTxAddresses(txid)
  2083  					if err != nil {
  2084  						return err
  2085  					}
  2086  				}
  2087  				if ta == nil {
  2088  					return errors.New("DB inconsistency:  tx " + txid + ": not found in txAddresses")
  2089  				}
  2090  				if len(ta.Outputs) <= int(index) {
  2091  					glog.Warning("DB inconsistency:  txAddresses " + txid + " does not have enough outputs")
  2092  				} else {
  2093  					tao := &ta.Outputs[index]
  2094  					if !tao.Spent {
  2095  						bTxid, _ := d.chainParser.PackTxid(txid)
  2096  						checksumFromTxs.Add(&checksumFromTxs, &tao.ValueSat)
  2097  						utxos = append(utxos, Utxo{BtxID: bTxid, Height: height, Vout: index, ValueSat: tao.ValueSat})
  2098  						if checksumFromTxs.Cmp(&ba.BalanceSat) == 0 {
  2099  							return &StopIteration{}
  2100  						}
  2101  					}
  2102  				}
  2103  			}
  2104  			return nil
  2105  		})
  2106  		if err != nil {
  2107  			return false, false, err
  2108  		}
  2109  		fixed := false
  2110  		if checksumFromTxs.Cmp(&ba.BalanceSat) == 0 {
  2111  			// reverse the utxos as they are added in descending order by height
  2112  			for i := len(utxos)/2 - 1; i >= 0; i-- {
  2113  				opp := len(utxos) - 1 - i
  2114  				utxos[i], utxos[opp] = utxos[opp], utxos[i]
  2115  			}
  2116  			ba.Utxos = utxos
  2117  			wb := grocksdb.NewWriteBatch()
  2118  			err = d.storeBalances(wb, map[string]*AddrBalance{string(addrDesc): ba})
  2119  			if err == nil {
  2120  				err = d.WriteBatch(wb)
  2121  			}
  2122  			wb.Destroy()
  2123  			if err != nil {
  2124  				return false, false, errors.Errorf("balance %s, checksum %s, from txa %s, txs %d, error storing fixed utxos %v", ba.BalanceSat.String(), checksum.String(), checksumFromTxs.String(), ba.Txs, err)
  2125  			}
  2126  			fixed = true
  2127  		}
  2128  		return fixed, false, errors.Errorf("balance %s, checksum %s, from txa %s, txs %d", ba.BalanceSat.String(), checksum.String(), checksumFromTxs.String(), ba.Txs)
  2129  	} else if reorder {
  2130  		wb := grocksdb.NewWriteBatch()
  2131  		err := d.storeBalances(wb, map[string]*AddrBalance{string(addrDesc): ba})
  2132  		if err == nil {
  2133  			err = d.WriteBatch(wb)
  2134  		}
  2135  		wb.Destroy()
  2136  		if err != nil {
  2137  			return false, false, errors.Errorf("error storing reordered utxos %v", err)
  2138  		}
  2139  	}
  2140  	return false, reorder, nil
  2141  }
  2142  
  2143  // FixUtxos checks and fixes possible
  2144  func (d *RocksDB) FixUtxos(stop chan os.Signal) error {
  2145  	if d.chainParser.GetChainType() != bchain.ChainBitcoinType {
  2146  		glog.Info("FixUtxos: applicable only for bitcoin type coins")
  2147  		return nil
  2148  	}
  2149  	glog.Info("FixUtxos: starting")
  2150  	var row, errorsCount, fixedCount int64
  2151  	var seekKey []byte
  2152  	// do not use cache
  2153  	ro := grocksdb.NewDefaultReadOptions()
  2154  	ro.SetFillCache(false)
  2155  	for {
  2156  		var addrDesc bchain.AddressDescriptor
  2157  		it := d.db.NewIteratorCF(ro, d.cfh[cfAddressBalance])
  2158  		if row == 0 {
  2159  			it.SeekToFirst()
  2160  		} else {
  2161  			glog.Info("FixUtxos: row ", row, ", errors ", errorsCount)
  2162  			it.Seek(seekKey)
  2163  			it.Next()
  2164  		}
  2165  		for count := 0; it.Valid() && count < refreshIterator; it.Next() {
  2166  			select {
  2167  			case <-stop:
  2168  				return errors.New("Interrupted")
  2169  			default:
  2170  			}
  2171  			addrDesc = it.Key().Data()
  2172  			buf := it.Value().Data()
  2173  			count++
  2174  			row++
  2175  			if len(buf) < 3 {
  2176  				glog.Error("FixUtxos: row ", row, ", addrDesc ", addrDesc, ", empty data")
  2177  				errorsCount++
  2178  				continue
  2179  			}
  2180  			ba, err := unpackAddrBalance(buf, d.chainParser.PackedTxidLen(), AddressBalanceDetailUTXO)
  2181  			if err != nil {
  2182  				glog.Error("FixUtxos: row ", row, ", addrDesc ", addrDesc, ", unpackAddrBalance error ", err)
  2183  				errorsCount++
  2184  				continue
  2185  			}
  2186  			fixed, reordered, err := d.fixUtxo(addrDesc, ba)
  2187  			if err != nil {
  2188  				errorsCount++
  2189  				glog.Error("FixUtxos: row ", row, ", addrDesc ", addrDesc, ", error ", err, ", fixed ", fixed)
  2190  				if fixed {
  2191  					fixedCount++
  2192  				}
  2193  			} else if reordered {
  2194  				glog.Error("FixUtxos: row ", row, ", addrDesc ", addrDesc, " reordered")
  2195  				fixedCount++
  2196  			}
  2197  		}
  2198  		seekKey = append([]byte{}, addrDesc...)
  2199  		valid := it.Valid()
  2200  		it.Close()
  2201  		if !valid {
  2202  			break
  2203  		}
  2204  	}
  2205  	glog.Info("FixUtxos: finished, scanned ", row, " rows, found ", errorsCount, " errors, fixed ", fixedCount)
  2206  	return nil
  2207  }
  2208  
  2209  // Helpers
  2210  
  2211  func packAddressKey(addrDesc bchain.AddressDescriptor, height uint32) []byte {
  2212  	buf := make([]byte, len(addrDesc)+packedHeightBytes)
  2213  	copy(buf, addrDesc)
  2214  	// pack height as binary complement to achieve ordering from newest to oldest block
  2215  	binary.BigEndian.PutUint32(buf[len(addrDesc):], ^height)
  2216  	return buf
  2217  }
  2218  
  2219  func unpackAddressKey(key []byte) ([]byte, uint32, error) {
  2220  	i := len(key) - packedHeightBytes
  2221  	if i <= 0 {
  2222  		return nil, 0, errors.New("Invalid address key")
  2223  	}
  2224  	// height is packed in binary complement, convert it
  2225  	return key[:i], ^unpackUint(key[i : i+packedHeightBytes]), nil
  2226  }
  2227  
  2228  func packUint(i uint32) []byte {
  2229  	buf := make([]byte, 4)
  2230  	binary.BigEndian.PutUint32(buf, i)
  2231  	return buf
  2232  }
  2233  
  2234  func unpackUint(buf []byte) uint32 {
  2235  	return binary.BigEndian.Uint32(buf)
  2236  }
  2237  
  2238  func packVarint32(i int32, buf []byte) int {
  2239  	return vlq.PutInt(buf, int64(i))
  2240  }
  2241  
  2242  func packVarint(i int, buf []byte) int {
  2243  	return vlq.PutInt(buf, int64(i))
  2244  }
  2245  
  2246  func packVaruint(i uint, buf []byte) int {
  2247  	return vlq.PutUint(buf, uint64(i))
  2248  }
  2249  
  2250  func unpackVarint32(buf []byte) (int32, int) {
  2251  	i, ofs := vlq.Int(buf)
  2252  	return int32(i), ofs
  2253  }
  2254  
  2255  func unpackVarint(buf []byte) (int, int) {
  2256  	i, ofs := vlq.Int(buf)
  2257  	return int(i), ofs
  2258  }
  2259  
  2260  func unpackVaruint(buf []byte) (uint, int) {
  2261  	i, ofs := vlq.Uint(buf)
  2262  	return uint(i), ofs
  2263  }
  2264  
  2265  func packString(s string) []byte {
  2266  	varBuf := make([]byte, vlq.MaxLen64)
  2267  	l := len(s)
  2268  	i := packVaruint(uint(l), varBuf)
  2269  	buf := make([]byte, 0, i+l)
  2270  	buf = append(buf, varBuf[:i]...)
  2271  	buf = append(buf, s...)
  2272  	return buf
  2273  }
  2274  
  2275  func unpackString(buf []byte) (string, int) {
  2276  	sl, l := unpackVaruint(buf)
  2277  	so := l + int(sl)
  2278  	s := string(buf[l:so])
  2279  	return s, so
  2280  }
  2281  
  2282  const (
  2283  	// number of bits in a big.Word
  2284  	wordBits = 32 << (uint64(^big.Word(0)) >> 63)
  2285  	// number of bytes in a big.Word
  2286  	wordBytes = wordBits / 8
  2287  	// max packed bigint words
  2288  	maxPackedBigintWords = (256 - wordBytes) / wordBytes
  2289  	maxPackedBigintBytes = 249
  2290  )
  2291  
  2292  // big int is packed in BigEndian order without memory allocation as 1 byte length followed by bytes of big int
  2293  // number of written bytes is returned
  2294  // limitation: big ints longer than 248 bytes are truncated to 248 bytes
  2295  // caution: buffer must be big enough to hold the packed big int, buffer 249 bytes big is always safe
  2296  func packBigint(bi *big.Int, buf []byte) int {
  2297  	w := bi.Bits()
  2298  	lw := len(w)
  2299  	// zero returns only one byte - zero length
  2300  	if lw == 0 {
  2301  		buf[0] = 0
  2302  		return 1
  2303  	}
  2304  	// pack the most significant word in a special way - skip leading zeros
  2305  	w0 := w[lw-1]
  2306  	fb := 8
  2307  	mask := big.Word(0xff) << (wordBits - 8)
  2308  	for w0&mask == 0 {
  2309  		fb--
  2310  		mask >>= 8
  2311  	}
  2312  	for i := fb; i > 0; i-- {
  2313  		buf[i] = byte(w0)
  2314  		w0 >>= 8
  2315  	}
  2316  	// if the big int is too big (> 2^1984), the number of bytes would not fit to 1 byte
  2317  	// in this case, truncate the number, it is not expected to work with this big numbers as amounts
  2318  	s := 0
  2319  	if lw > maxPackedBigintWords {
  2320  		s = lw - maxPackedBigintWords
  2321  	}
  2322  	// pack the rest of the words in reverse order
  2323  	for j := lw - 2; j >= s; j-- {
  2324  		d := w[j]
  2325  		for i := fb + wordBytes; i > fb; i-- {
  2326  			buf[i] = byte(d)
  2327  			d >>= 8
  2328  		}
  2329  		fb += wordBytes
  2330  	}
  2331  	buf[0] = byte(fb)
  2332  	return fb + 1
  2333  }
  2334  
  2335  func unpackBigint(buf []byte) (big.Int, int) {
  2336  	var r big.Int
  2337  	l := int(buf[0]) + 1
  2338  	r.SetBytes(buf[1:l])
  2339  	return r, l
  2340  }