github.com/fibonacci-chain/fbc@v0.0.0-20231124064014-c7636198c1e9/x/evm/watcher/watcher.go (about)

     1  package watcher
     2  
     3  import (
     4  	"encoding/hex"
     5  	"fmt"
     6  	"math/big"
     7  	"sync"
     8  
     9  	"github.com/ethereum/go-ethereum/common"
    10  	ethtypes "github.com/ethereum/go-ethereum/core/types"
    11  	"github.com/fibonacci-chain/fbc/app/rpc/namespaces/eth/state"
    12  	sdk "github.com/fibonacci-chain/fbc/libs/cosmos-sdk/types"
    13  	"github.com/fibonacci-chain/fbc/libs/cosmos-sdk/x/auth"
    14  	"github.com/fibonacci-chain/fbc/libs/tendermint/abci/types"
    15  	"github.com/fibonacci-chain/fbc/libs/tendermint/crypto/tmhash"
    16  	"github.com/fibonacci-chain/fbc/libs/tendermint/libs/log"
    17  	ctypes "github.com/fibonacci-chain/fbc/libs/tendermint/rpc/core/types"
    18  	tmstate "github.com/fibonacci-chain/fbc/libs/tendermint/state"
    19  	tmtypes "github.com/fibonacci-chain/fbc/libs/tendermint/types"
    20  	evmtypes "github.com/fibonacci-chain/fbc/x/evm/types"
    21  	jsoniter "github.com/json-iterator/go"
    22  	"github.com/spf13/viper"
    23  	"github.com/tendermint/go-amino"
    24  )
    25  
    26  const version = "v1"
    27  
    28  var itjs = jsoniter.ConfigCompatibleWithStandardLibrary
    29  
    30  type Watcher struct {
    31  	store          *WatchStore
    32  	height         uint64
    33  	blockHash      common.Hash
    34  	header         types.Header
    35  	batch          []WatchMessage
    36  	cumulativeGas  map[uint64]uint64
    37  	gasUsed        uint64
    38  	blockTxs       []common.Hash
    39  	blockStdTxs    []common.Hash
    40  	enable         bool
    41  	firstUse       bool
    42  	delayEraseKey  [][]byte
    43  	eraseKeyFilter map[string][]byte
    44  	log            log.Logger
    45  	// for state delta transfering in network
    46  	watchData     *WatchData
    47  	jobChan       chan func()
    48  	jobDone       *sync.WaitGroup
    49  	evmTxIndex    uint64
    50  	checkWd       bool
    51  	filterMap     map[string]struct{}
    52  	InfuraKeeper  InfuraKeeper
    53  	delAccountMtx sync.Mutex
    54  }
    55  
    56  var (
    57  	watcherEnable  = false
    58  	watcherLruSize = 1000
    59  	onceEnable     sync.Once
    60  	onceLru        sync.Once
    61  )
    62  
    63  func IsWatcherEnabled() bool {
    64  	onceEnable.Do(func() {
    65  		watcherEnable = viper.GetBool(FlagFastQuery)
    66  	})
    67  	return watcherEnable
    68  }
    69  
    70  func GetWatchLruSize() int {
    71  	onceLru.Do(func() {
    72  		watcherLruSize = viper.GetInt(FlagFastQueryLru)
    73  	})
    74  	return watcherLruSize
    75  }
    76  
    77  func NewWatcher(logger log.Logger) *Watcher {
    78  	return &Watcher{store: InstanceOfWatchStore(),
    79  		cumulativeGas:  make(map[uint64]uint64),
    80  		enable:         IsWatcherEnabled(),
    81  		firstUse:       true,
    82  		delayEraseKey:  make([][]byte, 0),
    83  		watchData:      &WatchData{},
    84  		log:            logger,
    85  		checkWd:        viper.GetBool(FlagCheckWd),
    86  		filterMap:      make(map[string]struct{}),
    87  		eraseKeyFilter: make(map[string][]byte),
    88  	}
    89  }
    90  
    91  func (w *Watcher) IsFirstUse() bool {
    92  	return w.firstUse
    93  }
    94  
    95  // SetFirstUse sets fistUse of Watcher only could use for ut
    96  func (w *Watcher) SetFirstUse(v bool) {
    97  	w.firstUse = v
    98  }
    99  
   100  func (w *Watcher) Used() {
   101  	w.firstUse = false
   102  }
   103  
   104  func (w *Watcher) Enabled() bool {
   105  	return w.enable
   106  }
   107  
   108  func (w *Watcher) Enable(enable bool) {
   109  	w.enable = enable
   110  }
   111  
   112  func (w *Watcher) GetEvmTxIndex() uint64 {
   113  	return w.evmTxIndex
   114  }
   115  
   116  func (w *Watcher) NewHeight(height uint64, blockHash common.Hash, header types.Header) {
   117  	if !w.Enabled() {
   118  		return
   119  	}
   120  	w.header = header
   121  	w.height = height
   122  	w.blockHash = blockHash
   123  	w.batch = []WatchMessage{} // reset batch
   124  	// ResetTransferWatchData
   125  	w.watchData = &WatchData{}
   126  	w.evmTxIndex = 0
   127  	for k := range w.cumulativeGas {
   128  		delete(w.cumulativeGas, k)
   129  	}
   130  	w.gasUsed = 0
   131  	w.blockTxs = []common.Hash{}
   132  	w.blockStdTxs = []common.Hash{}
   133  }
   134  
   135  func (w *Watcher) SaveTransactionReceipt(status uint32, msg *evmtypes.MsgEthereumTx, txHash common.Hash, txIndex uint64, data *evmtypes.ResultData, gasUsed uint64) {
   136  	if !w.Enabled() {
   137  		return
   138  	}
   139  	w.UpdateCumulativeGas(txIndex, gasUsed)
   140  	tr := newTransactionReceipt(status, msg, txHash, w.blockHash, txIndex, w.height, data, w.cumulativeGas[txIndex], gasUsed)
   141  	if w.InfuraKeeper != nil {
   142  		w.InfuraKeeper.OnSaveTransactionReceipt(tr)
   143  	}
   144  	wMsg := NewMsgTransactionReceipt(tr, txHash)
   145  	if wMsg != nil {
   146  		w.batch = append(w.batch, wMsg)
   147  	}
   148  }
   149  
   150  func (w *Watcher) UpdateCumulativeGas(txIndex, gasUsed uint64) {
   151  	if !w.Enabled() {
   152  		return
   153  	}
   154  	if len(w.cumulativeGas) == 0 {
   155  		w.cumulativeGas[txIndex] = gasUsed
   156  	} else {
   157  		w.cumulativeGas[txIndex] = w.cumulativeGas[txIndex-1] + gasUsed
   158  	}
   159  	w.gasUsed += gasUsed
   160  }
   161  
   162  func (w *Watcher) SaveAccount(account auth.Account) {
   163  	if !w.Enabled() {
   164  		return
   165  	}
   166  	wMsg := NewMsgAccount(account)
   167  	if wMsg != nil {
   168  		w.batch = append(w.batch, wMsg)
   169  	}
   170  }
   171  
   172  func (w *Watcher) DeleteAccount(addr sdk.AccAddress) {
   173  	if !w.Enabled() {
   174  		return
   175  	}
   176  	key1 := GetMsgAccountKey(addr.Bytes())
   177  	key2 := append(prefixRpcDb, key1...)
   178  	w.delAccountMtx.Lock()
   179  	w.delayEraseKey = append(w.delayEraseKey, key1)
   180  	w.delayEraseKey = append(w.delayEraseKey, key2)
   181  	w.delAccountMtx.Unlock()
   182  }
   183  
   184  func (w *Watcher) DelayEraseKey() {
   185  	if !w.Enabled() {
   186  		return
   187  	}
   188  	//hold it in temp
   189  	delayEraseKey := w.delayEraseKey
   190  	w.delayEraseKey = make([][]byte, 0)
   191  	w.dispatchJob(func() {
   192  		w.ExecuteDelayEraseKey(delayEraseKey)
   193  	})
   194  }
   195  
   196  func (w *Watcher) ExecuteDelayEraseKey(delayEraseKey [][]byte) {
   197  	if !w.Enabled() || len(delayEraseKey) <= 0 {
   198  		return
   199  	}
   200  	for _, k := range delayEraseKey {
   201  		w.eraseKeyFilter[bytes2Key(k)] = k
   202  	}
   203  	batch := w.store.db.NewBatch()
   204  	defer batch.Close()
   205  	for _, k := range w.eraseKeyFilter {
   206  		batch.Delete(k)
   207  	}
   208  	batch.Write()
   209  	for k := range w.eraseKeyFilter {
   210  		delete(w.eraseKeyFilter, k)
   211  	}
   212  }
   213  
   214  func (w *Watcher) SaveBlock(bloom ethtypes.Bloom) {
   215  	if !w.Enabled() {
   216  		return
   217  	}
   218  	block := newBlock(w.height, bloom, w.blockHash, w.header, uint64(0xffffffff), big.NewInt(int64(w.gasUsed)), w.blockTxs)
   219  	if w.InfuraKeeper != nil {
   220  		w.InfuraKeeper.OnSaveBlock(block)
   221  	}
   222  	wMsg := NewMsgBlock(block)
   223  	if wMsg != nil {
   224  		w.batch = append(w.batch, wMsg)
   225  	}
   226  
   227  	wInfo := NewMsgBlockInfo(w.height, w.blockHash)
   228  	if wInfo != nil {
   229  		w.batch = append(w.batch, wInfo)
   230  	}
   231  	w.SaveLatestHeight(w.height)
   232  }
   233  
   234  func (w *Watcher) SaveBlockStdTxHash() {
   235  	if !w.Enabled() || (len(w.blockStdTxs) == 0) {
   236  		return
   237  	}
   238  	wMsg := NewMsgBlockStdTxHash(w.blockStdTxs, w.blockHash)
   239  	if wMsg != nil {
   240  		w.batch = append(w.batch, wMsg)
   241  	}
   242  }
   243  
   244  func (w *Watcher) SaveLatestHeight(height uint64) {
   245  	if !w.Enabled() {
   246  		return
   247  	}
   248  	wMsg := NewMsgLatestHeight(height)
   249  	if wMsg != nil {
   250  		w.batch = append(w.batch, wMsg)
   251  	}
   252  }
   253  
   254  func (w *Watcher) SaveParams(params evmtypes.Params) {
   255  	if !w.Enabled() {
   256  		return
   257  	}
   258  	wMsg := NewMsgParams(params)
   259  	if wMsg != nil {
   260  		w.batch = append(w.batch, wMsg)
   261  	}
   262  }
   263  
   264  func (w *Watcher) SaveContractBlockedListItem(addr sdk.AccAddress) {
   265  	if !w.Enabled() {
   266  		return
   267  	}
   268  	wMsg := NewMsgContractBlockedListItem(addr)
   269  	if wMsg != nil {
   270  		w.batch = append(w.batch, wMsg)
   271  	}
   272  }
   273  
   274  func (w *Watcher) SaveContractMethodBlockedListItem(addr sdk.AccAddress, methods []byte) {
   275  	if !w.Enabled() {
   276  		return
   277  	}
   278  	wMsg := NewMsgContractMethodBlockedListItem(addr, methods)
   279  	if wMsg != nil {
   280  		w.batch = append(w.batch, wMsg)
   281  	}
   282  }
   283  
   284  func (w *Watcher) SaveContractDeploymentWhitelistItem(addr sdk.AccAddress) {
   285  	if !w.Enabled() {
   286  		return
   287  	}
   288  	wMsg := NewMsgContractDeploymentWhitelistItem(addr)
   289  	if wMsg != nil {
   290  		w.batch = append(w.batch, wMsg)
   291  	}
   292  }
   293  
   294  func (w *Watcher) CommitStateToRpcDb(addr common.Address, key, value []byte) {
   295  	if !w.Enabled() {
   296  		return
   297  	}
   298  	wMsg := NewMsgState(addr, key, value)
   299  	if wMsg != nil {
   300  		w.store.Set(append(prefixRpcDb, wMsg.GetKey()...), []byte(wMsg.GetValue()))
   301  	}
   302  }
   303  
   304  func (w *Watcher) CommitAccountToRpcDb(account auth.Account) {
   305  	if !w.Enabled() {
   306  		return
   307  	}
   308  	wMsg := NewMsgAccount(account)
   309  	if wMsg != nil {
   310  		key := append(prefixRpcDb, wMsg.GetKey()...)
   311  		w.store.Set(key, []byte(wMsg.GetValue()))
   312  	}
   313  }
   314  
   315  func (w *Watcher) CommitCodeHashToDb(hash []byte, code []byte) {
   316  	if !w.Enabled() {
   317  		return
   318  	}
   319  	wMsg := NewMsgCodeByHash(hash, code)
   320  	if wMsg != nil {
   321  		w.store.Set(wMsg.GetKey(), []byte(wMsg.GetValue()))
   322  	}
   323  }
   324  
   325  func (w *Watcher) Commit() {
   326  	if !w.Enabled() {
   327  		return
   328  	}
   329  	//hold it in temp
   330  	batch := w.batch
   331  	// No need to write db when upload delta is enabled.
   332  	if tmtypes.UploadDelta {
   333  		return
   334  	}
   335  	w.dispatchJob(func() {
   336  		w.commitBatch(batch)
   337  	})
   338  }
   339  
   340  func (w *Watcher) CommitWatchData(data WatchData) {
   341  	if data.Size() == 0 {
   342  		return
   343  	}
   344  	if data.Batches != nil {
   345  		w.commitCenterBatch(data.Batches)
   346  	}
   347  	if data.DirtyList != nil {
   348  		w.delDirtyList(data.DirtyList)
   349  	}
   350  	if data.BloomData != nil {
   351  		w.commitBloomData(data.BloomData)
   352  	}
   353  	w.delayEraseKey = data.DelayEraseKey
   354  
   355  	if w.checkWd {
   356  		keys := make([][]byte, len(data.Batches))
   357  		for i, _ := range data.Batches {
   358  			keys[i] = data.Batches[i].Key
   359  		}
   360  		w.CheckWatchDB(keys, "consumer")
   361  	}
   362  }
   363  func isDuplicated(key []byte, filterMap map[string]struct{}) bool {
   364  	filterKey := bytes2Key(key)
   365  	if _, exist := filterMap[filterKey]; exist {
   366  		return true
   367  	} else {
   368  		filterMap[filterKey] = struct{}{}
   369  		return false
   370  	}
   371  }
   372  func (w *Watcher) commitBatch(batch []WatchMessage) {
   373  	dbBatch := w.store.db.NewBatch()
   374  	defer dbBatch.Close()
   375  	for i := len(batch) - 1; i >= 0; i-- { //iterate batch from the end to start, to save the latest batch msgs
   376  		//and to skip the duplicated batch msgs by key
   377  		b := batch[i]
   378  		key := b.GetKey()
   379  		if isDuplicated(key, w.filterMap) {
   380  			continue
   381  		}
   382  		value := []byte(b.GetValue())
   383  		typeValue := b.GetType()
   384  		if typeValue == TypeDelete {
   385  			dbBatch.Delete(key)
   386  		} else {
   387  			dbBatch.Set(key, value)
   388  			//need update params
   389  			if typeValue == TypeEvmParams {
   390  				msgParams := b.(*MsgParams)
   391  				w.store.SetEvmParams(msgParams.Params)
   392  			}
   393  			if typeValue == TypeState {
   394  				state.SetStateToLru(key, value)
   395  			}
   396  		}
   397  	}
   398  	dbBatch.Write()
   399  	for k := range w.filterMap {
   400  		delete(w.filterMap, k)
   401  	}
   402  	if w.checkWd {
   403  		keys := make([][]byte, len(batch))
   404  		for i, _ := range batch {
   405  			keys[i] = batch[i].GetKey()
   406  		}
   407  		w.CheckWatchDB(keys, "producer")
   408  	}
   409  }
   410  
   411  func (w *Watcher) commitCenterBatch(batch []*Batch) {
   412  	dbBatch := w.store.db.NewBatch()
   413  	defer dbBatch.Close()
   414  	for _, b := range batch {
   415  		if b.TypeValue == TypeDelete {
   416  			dbBatch.Delete(b.Key)
   417  		} else {
   418  			dbBatch.Set(b.Key, b.Value)
   419  			if b.TypeValue == TypeState {
   420  				state.SetStateToLru(b.Key, b.Value)
   421  			}
   422  		}
   423  	}
   424  	dbBatch.Write()
   425  }
   426  
   427  func (w *Watcher) delDirtyList(list [][]byte) {
   428  	for _, key := range list {
   429  		w.store.Delete(key)
   430  	}
   431  }
   432  
   433  func (w *Watcher) commitBloomData(bloomData []*evmtypes.KV) {
   434  	db := evmtypes.GetIndexer().GetDB()
   435  	batch := db.NewBatch()
   436  	defer batch.Close()
   437  	for _, bd := range bloomData {
   438  		batch.Set(bd.Key, bd.Value)
   439  	}
   440  	batch.Write()
   441  }
   442  
   443  func (w *Watcher) CreateWatchDataGenerator() func() ([]byte, error) {
   444  	value := w.watchData
   445  	value.DelayEraseKey = w.delayEraseKey
   446  
   447  	// hold it in temp
   448  	batch := w.batch
   449  	return func() ([]byte, error) {
   450  		ddsBatch := make([]*Batch, len(batch))
   451  		for i, b := range batch {
   452  			ddsBatch[i] = &Batch{b.GetKey(), []byte(b.GetValue()), b.GetType()}
   453  		}
   454  		value.Batches = ddsBatch
   455  
   456  		filterWatcher := filterCopy(value)
   457  		valueByte, err := filterWatcher.MarshalToAmino(nil)
   458  		if err != nil {
   459  			return nil, err
   460  		}
   461  		return valueByte, nil
   462  	}
   463  }
   464  
   465  func (w *Watcher) UnmarshalWatchData(wdByte []byte) (interface{}, error) {
   466  	if len(wdByte) == 0 {
   467  		return nil, fmt.Errorf("failed unmarshal watch data: empty data")
   468  	}
   469  	wd := WatchData{}
   470  	if err := wd.UnmarshalFromAmino(nil, wdByte); err != nil {
   471  		return nil, err
   472  	}
   473  	return wd, nil
   474  }
   475  
   476  func (w *Watcher) ApplyWatchData(watchData interface{}) {
   477  	wd, ok := watchData.(WatchData)
   478  	if !ok {
   479  		panic("use watch data failed")
   480  	}
   481  	w.dispatchJob(func() { w.CommitWatchData(wd) })
   482  }
   483  
   484  func (w *Watcher) SetWatchDataManager() {
   485  	go w.jobRoutine()
   486  	tmstate.SetEvmWatchDataManager(w)
   487  }
   488  
   489  func (w *Watcher) GetBloomDataPoint() *[]*evmtypes.KV {
   490  	return &w.watchData.BloomData
   491  }
   492  
   493  func (w *Watcher) CheckWatchDB(keys [][]byte, mode string) {
   494  	output := make(map[string]string, len(keys))
   495  	kvHash := tmhash.New()
   496  	for _, key := range keys {
   497  		value, err := w.store.Get(key)
   498  		if err != nil {
   499  			continue
   500  		}
   501  		kvHash.Write(key)
   502  		kvHash.Write(value)
   503  		output[hex.EncodeToString(key)] = string(value)
   504  	}
   505  
   506  	w.log.Info("watchDB delta", "mode", mode, "height", w.height, "hash", hex.EncodeToString(kvHash.Sum(nil)), "kv", output)
   507  }
   508  
   509  func bytes2Key(keyBytes []byte) string {
   510  	return amino.BytesToStr(keyBytes)
   511  }
   512  
   513  func filterCopy(origin *WatchData) *WatchData {
   514  	return &WatchData{
   515  		Batches:       filterBatch(origin.Batches),
   516  		DelayEraseKey: filterDelayEraseKey(origin.DelayEraseKey),
   517  		BloomData:     filterBloomData(origin.BloomData),
   518  		DirtyList:     filterDirtyList(origin.DirtyList),
   519  	}
   520  }
   521  
   522  func filterBatch(datas []*Batch) []*Batch {
   523  	if len(datas) == 0 {
   524  		return nil
   525  	}
   526  
   527  	filterBatch := make(map[string]*Batch)
   528  	for _, b := range datas {
   529  		filterBatch[bytes2Key(b.Key)] = b
   530  	}
   531  
   532  	ret := make([]*Batch, len(filterBatch))
   533  	i := 0
   534  	for _, b := range filterBatch {
   535  		ret[i] = b
   536  		i++
   537  	}
   538  
   539  	return ret
   540  }
   541  
   542  func filterDelayEraseKey(datas [][]byte) [][]byte {
   543  	if len(datas) == 0 {
   544  		return nil
   545  	}
   546  
   547  	filterDelayEraseKey := make(map[string][]byte, 0)
   548  	for _, b := range datas {
   549  		filterDelayEraseKey[bytes2Key(b)] = b
   550  	}
   551  
   552  	ret := make([][]byte, len(filterDelayEraseKey))
   553  	i := 0
   554  	for _, k := range filterDelayEraseKey {
   555  		ret[i] = k
   556  		i++
   557  	}
   558  
   559  	return ret
   560  }
   561  func filterBloomData(datas []*evmtypes.KV) []*evmtypes.KV {
   562  	if len(datas) == 0 {
   563  		return nil
   564  	}
   565  
   566  	filterBloomData := make(map[string]*evmtypes.KV, 0)
   567  	for _, k := range datas {
   568  		filterBloomData[bytes2Key(k.Key)] = k
   569  	}
   570  
   571  	ret := make([]*evmtypes.KV, len(filterBloomData))
   572  	i := 0
   573  	for _, k := range filterBloomData {
   574  		ret[i] = k
   575  		i++
   576  	}
   577  
   578  	return ret
   579  }
   580  
   581  func filterDirtyList(datas [][]byte) [][]byte {
   582  	if len(datas) == 0 {
   583  		return nil
   584  	}
   585  
   586  	filterDirtyList := make(map[string][]byte, 0)
   587  	for _, k := range datas {
   588  		filterDirtyList[bytes2Key(k)] = k
   589  	}
   590  
   591  	ret := make([][]byte, len(filterDirtyList))
   592  	i := 0
   593  	for _, k := range filterDirtyList {
   594  		ret[i] = k
   595  		i++
   596  	}
   597  
   598  	return ret
   599  }
   600  
   601  // ///////// job
   602  func (w *Watcher) jobRoutine() {
   603  	if !w.Enabled() {
   604  		return
   605  	}
   606  
   607  	w.lazyInitialization()
   608  	for job := range w.jobChan {
   609  		job()
   610  	}
   611  	w.jobDone.Done()
   612  }
   613  
   614  func (w *Watcher) lazyInitialization() {
   615  	// lazy initial:
   616  	// now we will allocate chan memory
   617  	// 5*3 means watcherCommitJob+DelayEraseKey+commitBatchJob(just in case)
   618  	w.jobChan = make(chan func(), 5*3)
   619  	w.jobDone = new(sync.WaitGroup)
   620  	w.jobDone.Add(1)
   621  }
   622  func (w *Watcher) Stop() {
   623  	if !w.Enabled() {
   624  		return
   625  	}
   626  	close(w.jobChan)
   627  	w.jobDone.Wait()
   628  }
   629  func (w *Watcher) dispatchJob(f func()) {
   630  	// if jobRoutine were too slow to write data  to disk
   631  	// we have to wait
   632  	// why: something wrong happened: such as db panic(disk maybe is full)(it should be the only reason)
   633  	//								  ApplyWatchData were executed every 4 seoncds(block schedual)
   634  	w.jobChan <- f
   635  }
   636  
   637  func (w *Watcher) Height() uint64 {
   638  	return w.height
   639  }
   640  
   641  func (w *Watcher) Collect(watchers ...sdk.IWatcher) {
   642  	if !w.enable {
   643  		return
   644  	}
   645  	for _, watcher := range watchers {
   646  		batch := watcher.Destruct()
   647  		w.batch = append(w.batch, batch...)
   648  	}
   649  }
   650  
   651  func (w *Watcher) saveStdTxResponse(result *ctypes.ResultTx) {
   652  	wMsg := NewStdTransactionResponse(result, w.header.Time, common.BytesToHash(result.Hash))
   653  	if wMsg != nil {
   654  		w.batch = append(w.batch, wMsg)
   655  	}
   656  }