github.com/fibonacci-chain/fbc@v0.0.0-20231124064014-c7636198c1e9/libs/cosmos-sdk/baseapp/baseapp_parallel.go (about)

     1  package baseapp
     2  
     3  import (
     4  	"bytes"
     5  	"runtime"
     6  	"sync"
     7  
     8  	"github.com/fibonacci-chain/fbc/libs/cosmos-sdk/store/types"
     9  	sdk "github.com/fibonacci-chain/fbc/libs/cosmos-sdk/types"
    10  	sdkerrors "github.com/fibonacci-chain/fbc/libs/cosmos-sdk/types/errors"
    11  	abci "github.com/fibonacci-chain/fbc/libs/tendermint/abci/types"
    12  	sm "github.com/fibonacci-chain/fbc/libs/tendermint/state"
    13  	"github.com/spf13/viper"
    14  )
    15  
    16  var (
    17  	maxTxResultInChan           = 20000
    18  	maxGoroutineNumberInParaTx  = runtime.NumCPU()
    19  	multiCacheListClearInterval = int64(100)
    20  )
    21  
    22  type extraDataForTx struct {
    23  	fee       sdk.Coins
    24  	isEvm     bool
    25  	from      string
    26  	to        string
    27  	stdTx     sdk.Tx
    28  	decodeErr error
    29  }
    30  
    31  type txWithIndex struct {
    32  	index   int
    33  	txBytes []byte
    34  }
    35  
    36  // getExtraDataByTxs preprocessing tx : verify tx, get sender, get toAddress, get txFee
    37  func (app *BaseApp) getExtraDataByTxs(txs [][]byte) {
    38  	para := app.parallelTxManage
    39  
    40  	var wg sync.WaitGroup
    41  	wg.Add(len(txs))
    42  	jobChan := make(chan txWithIndex, len(txs))
    43  	for groupIndex := 0; groupIndex < maxGoroutineNumberInParaTx; groupIndex++ {
    44  		go func(ch chan txWithIndex) {
    45  			for j := range ch {
    46  				index := j.index
    47  				txBytes := j.txBytes
    48  				var tx sdk.Tx
    49  				var err error
    50  
    51  				if mem := GetGlobalMempool(); mem != nil {
    52  					tx, _ = mem.ReapEssentialTx(txBytes).(sdk.Tx)
    53  				}
    54  				if tx == nil {
    55  					tx, err = app.txDecoder(txBytes)
    56  					if err != nil {
    57  						para.extraTxsInfo[index] = &extraDataForTx{
    58  							decodeErr: err,
    59  						}
    60  						wg.Done()
    61  						continue
    62  					}
    63  				}
    64  				if tx != nil {
    65  					app.blockDataCache.SetTx(txBytes, tx)
    66  				}
    67  
    68  				coin, isEvm, s, toAddr, _ := app.getTxFeeAndFromHandler(app.getContextForTx(runTxModeDeliver, txBytes), tx)
    69  				para.extraTxsInfo[index] = &extraDataForTx{
    70  					fee:   coin,
    71  					isEvm: isEvm,
    72  					from:  s,
    73  					to:    toAddr,
    74  					stdTx: tx,
    75  				}
    76  				wg.Done()
    77  			}
    78  		}(jobChan)
    79  	}
    80  
    81  	for index, v := range txs {
    82  		jobChan <- txWithIndex{
    83  			index:   index,
    84  			txBytes: v,
    85  		}
    86  	}
    87  	close(jobChan)
    88  	wg.Wait()
    89  }
    90  
    91  var (
    92  	rootAddr = make(map[string]string, 0)
    93  )
    94  
    95  // Find father node
    96  func Find(x string) string {
    97  	if rootAddr[x] != x {
    98  		rootAddr[x] = Find(rootAddr[x])
    99  	}
   100  	return rootAddr[x]
   101  }
   102  
   103  // Union from and to
   104  func Union(x string, yString string) {
   105  	if _, ok := rootAddr[x]; !ok {
   106  		rootAddr[x] = x
   107  	}
   108  	if yString == "" {
   109  		return
   110  	}
   111  	if _, ok := rootAddr[yString]; !ok {
   112  		rootAddr[yString] = yString
   113  	}
   114  	fx := Find(x)
   115  	fy := Find(yString)
   116  	if fx != fy {
   117  		rootAddr[fy] = fx
   118  	}
   119  }
   120  
   121  // calGroup cal group by txs
   122  func (app *BaseApp) calGroup() {
   123  
   124  	para := app.parallelTxManage
   125  
   126  	rootAddr = make(map[string]string, 0)
   127  	for index, tx := range para.extraTxsInfo {
   128  		if tx.isEvm { //evmTx
   129  			Union(tx.from, tx.to)
   130  		} else {
   131  			para.haveCosmosTxInBlock = true
   132  			app.parallelTxManage.putResult(index, &executeResult{paraMsg: &sdk.ParaMsg{}, msIsNil: true})
   133  		}
   134  	}
   135  
   136  	addrToID := make(map[string]int, 0)
   137  
   138  	for index, txInfo := range para.extraTxsInfo {
   139  		if !txInfo.isEvm {
   140  			continue
   141  		}
   142  		rootAddr := Find(txInfo.from)
   143  		id, exist := addrToID[rootAddr]
   144  		if !exist {
   145  			id = len(para.groupList)
   146  			addrToID[rootAddr] = id
   147  
   148  		}
   149  		para.groupList[id] = append(para.groupList[id], index)
   150  		para.txIndexWithGroup[index] = id
   151  	}
   152  
   153  	groupSize := len(para.groupList)
   154  	for groupIndex := 0; groupIndex < groupSize; groupIndex++ {
   155  		list := para.groupList[groupIndex]
   156  		for index := 0; index < len(list); index++ {
   157  			if index+1 <= len(list)-1 {
   158  				app.parallelTxManage.nextTxInGroup[list[index]] = list[index+1]
   159  			}
   160  			if index-1 >= 0 {
   161  				app.parallelTxManage.preTxInGroup[list[index]] = list[index-1]
   162  			}
   163  		}
   164  	}
   165  }
   166  
   167  // ParallelTxs run txs
   168  func (app *BaseApp) ParallelTxs(txs [][]byte, onlyCalSender bool) []*abci.ResponseDeliverTx {
   169  	txSize := len(txs)
   170  
   171  	if txSize == 0 {
   172  		return make([]*abci.ResponseDeliverTx, 0)
   173  	}
   174  
   175  	pm := app.parallelTxManage
   176  	pm.init(txs, app.deliverState.ctx.BlockHeight(), app.deliverState.ms)
   177  
   178  	app.getExtraDataByTxs(txs)
   179  
   180  	app.calGroup()
   181  
   182  	return app.runTxs()
   183  }
   184  
   185  func (app *BaseApp) fixFeeCollector() {
   186  	ctx, _ := app.cacheTxContext(app.getContextForTx(runTxModeDeliver, []byte{}), []byte{})
   187  
   188  	ctx.SetMultiStore(app.parallelTxManage.cms)
   189  	// The feesplit is only processed at the endblock
   190  	if err := app.updateFeeCollectorAccHandler(ctx, app.parallelTxManage.currTxFee, nil); err != nil {
   191  		panic(err)
   192  	}
   193  }
   194  
   195  func (app *BaseApp) runTxs() []*abci.ResponseDeliverTx {
   196  	maxGas := app.getMaximumBlockGas()
   197  	currentGas := uint64(0)
   198  	overFlow := func(sumGas uint64, currGas int64, maxGas uint64) bool {
   199  		if maxGas <= 0 {
   200  			return false
   201  		}
   202  		if sumGas+uint64(currGas) >= maxGas { // TODO : fix later
   203  			return true
   204  		}
   205  		return false
   206  	}
   207  	signal := make(chan int, 1)
   208  	rerunIdx := 0
   209  
   210  	pm := app.parallelTxManage
   211  
   212  	asyncCb := func(receiveTxIndex int) {
   213  		if pm.alreadyEnd {
   214  			return
   215  		}
   216  		//skip old txIndex
   217  		if receiveTxIndex < pm.upComingTxIndex || receiveTxIndex >= pm.txSize {
   218  			return
   219  		}
   220  
   221  		for true {
   222  			res := pm.getTxResult(pm.upComingTxIndex)
   223  			if res == nil {
   224  				break
   225  			}
   226  			isReRun := false
   227  			if pm.isConflict(res) || overFlow(currentGas, res.resp.GasUsed, maxGas) {
   228  				rerunIdx++
   229  				isReRun = true
   230  				// conflict rerun tx
   231  				if !pm.extraTxsInfo[pm.upComingTxIndex].isEvm {
   232  					app.fixFeeCollector()
   233  				}
   234  				res = app.deliverTxWithCache(pm.upComingTxIndex)
   235  			}
   236  			if res.paraMsg.AnteErr != nil {
   237  				res.msIsNil = true
   238  			}
   239  
   240  			pm.deliverTxs[pm.upComingTxIndex] = &res.resp
   241  			pm.finalResult[pm.upComingTxIndex] = res
   242  
   243  			pm.blockGasMeterMu.Lock()
   244  			// Note : don't take care of the case of ErrorGasOverflow
   245  			app.deliverState.ctx.BlockGasMeter().ConsumeGas(sdk.Gas(res.resp.GasUsed), "unexpected error")
   246  			pm.blockGasMeterMu.Unlock()
   247  
   248  			pm.SetCurrentIndex(pm.upComingTxIndex, res)
   249  			currentGas += uint64(res.resp.GasUsed)
   250  
   251  			if isReRun {
   252  				if pm.nextTxInGroup[pm.upComingTxIndex] != 0 {
   253  					pm.groupTasks[pm.txIndexWithGroup[pm.upComingTxIndex]].addRerun(pm.upComingTxIndex)
   254  				}
   255  			}
   256  			pm.upComingTxIndex++
   257  
   258  			if pm.upComingTxIndex == pm.txSize {
   259  				app.logger.Info("Paralleled-tx", "blockHeight", app.deliverState.ctx.BlockHeight(), "len(txs)", pm.txSize,
   260  					"Parallel run", pm.txSize-rerunIdx, "ReRun", rerunIdx, "len(group)", len(pm.groupList))
   261  				signal <- 0
   262  				return
   263  			}
   264  		}
   265  	}
   266  
   267  	pm.resultCb = asyncCb
   268  	pm.StartResultHandle()
   269  	for index := 0; index < len(pm.groupList); index++ {
   270  		pm.groupTasks = append(pm.groupTasks, newGroupTask(len(pm.groupList[index]), pm.addMultiCache, pm.nextTxInThisGroup, app.asyncDeliverTx, pm.putResult))
   271  		pm.groupTasks[index].addTask(pm.groupList[index][0])
   272  	}
   273  	if len(pm.groupList) == 0 {
   274  		pm.resultCh <- 0
   275  	}
   276  
   277  	//waiting for call back
   278  	<-signal
   279  
   280  	for _, v := range pm.groupTasks {
   281  		v.stopChan <- struct{}{}
   282  	}
   283  	pm.alreadyEnd = true
   284  	pm.stop <- struct{}{}
   285  
   286  	// fix logs
   287  	app.feeChanged = true
   288  	app.feeCollector = app.parallelTxManage.currTxFee
   289  	receiptsLogs := app.endParallelTxs(pm.txSize)
   290  	for index, v := range receiptsLogs {
   291  		if len(v) != 0 { // only update evm tx result
   292  			pm.deliverTxs[index].Data = v
   293  		}
   294  	}
   295  
   296  	pm.cms.Write()
   297  	return pm.deliverTxs
   298  }
   299  
   300  func (pm *parallelTxManager) nextTxInThisGroup(txindex int) (int, bool) {
   301  	if pm.alreadyEnd {
   302  		return 0, false
   303  	}
   304  	data, ok := pm.nextTxInGroup[txindex]
   305  	return data, ok
   306  }
   307  
   308  func (app *BaseApp) endParallelTxs(txSize int) [][]byte {
   309  
   310  	// handle receipt's logs
   311  	logIndex := make([]int, txSize)
   312  	errs := make([]error, txSize)
   313  	hasEnterEvmTx := make([]bool, txSize)
   314  	resp := make([]abci.ResponseDeliverTx, txSize)
   315  	watchers := make([]sdk.IWatcher, txSize)
   316  	txs := make([]sdk.Tx, txSize)
   317  	app.FeeSplitCollector = make([]*sdk.FeeSplitInfo, 0)
   318  	for index := 0; index < txSize; index++ {
   319  		txRes := app.parallelTxManage.finalResult[index]
   320  		logIndex[index] = txRes.paraMsg.LogIndex
   321  		errs[index] = txRes.paraMsg.AnteErr
   322  		hasEnterEvmTx[index] = txRes.paraMsg.HasRunEvmTx
   323  		resp[index] = txRes.resp
   324  		watchers[index] = txRes.watcher
   325  		txs[index] = app.parallelTxManage.extraTxsInfo[index].stdTx
   326  
   327  		if txRes.FeeSpiltInfo.HasFee {
   328  			app.FeeSplitCollector = append(app.FeeSplitCollector, txRes.FeeSpiltInfo)
   329  		}
   330  	}
   331  	app.watcherCollector(watchers...)
   332  	app.parallelTxManage.clear()
   333  
   334  	return app.logFix(txs, logIndex, hasEnterEvmTx, errs, resp)
   335  }
   336  
   337  // we reuse the nonce that changed by the last async call
   338  // if last ante handler has been failed, we need rerun it ? or not?
   339  func (app *BaseApp) deliverTxWithCache(txIndex int) *executeResult {
   340  	app.parallelTxManage.currentRerunIndex = txIndex
   341  	defer func() {
   342  		app.parallelTxManage.currentRerunIndex = -1
   343  	}()
   344  	txStatus := app.parallelTxManage.extraTxsInfo[txIndex]
   345  
   346  	if txStatus.stdTx == nil {
   347  		asyncExe := newExecuteResult(sdkerrors.ResponseDeliverTx(txStatus.decodeErr,
   348  			0, 0, app.trace), nil, uint32(txIndex), nil, 0, sdk.EmptyWatcher{}, nil, app.parallelTxManage, nil)
   349  		return asyncExe
   350  	}
   351  	var (
   352  		resp abci.ResponseDeliverTx
   353  		mode runTxMode
   354  	)
   355  	mode = runTxModeDeliverInAsync
   356  	info, errM := app.runTxWithIndex(txIndex, mode, app.parallelTxManage.txs[txIndex], txStatus.stdTx, LatestSimulateTxHeight)
   357  	if errM != nil {
   358  		resp = sdkerrors.ResponseDeliverTx(errM, info.gInfo.GasWanted, info.gInfo.GasUsed, app.trace)
   359  	} else {
   360  		resp = abci.ResponseDeliverTx{
   361  			GasWanted: int64(info.gInfo.GasWanted), // TODO: Should type accept unsigned ints?
   362  			GasUsed:   int64(info.gInfo.GasUsed),   // TODO: Should type accept unsigned ints?
   363  			Log:       info.result.Log,
   364  			Data:      info.result.Data,
   365  			Events:    info.result.Events.ToABCIEvents(),
   366  		}
   367  	}
   368  
   369  	asyncExe := newExecuteResult(resp, info.msCacheAnte, uint32(txIndex), info.ctx.ParaMsg(),
   370  		0, info.runMsgCtx.GetWatcher(), info.tx.GetMsgs(), app.parallelTxManage, info.ctx.GetFeeSplitInfo())
   371  	app.parallelTxManage.addMultiCache(info.msCacheAnte, info.msCache)
   372  	return asyncExe
   373  }
   374  
   375  type executeResult struct {
   376  	resp         abci.ResponseDeliverTx
   377  	ms           sdk.CacheMultiStore
   378  	msIsNil      bool // TODO delete it
   379  	counter      uint32
   380  	paraMsg      *sdk.ParaMsg
   381  	blockHeight  int64
   382  	watcher      sdk.IWatcher
   383  	msgs         []sdk.Msg
   384  	FeeSpiltInfo *sdk.FeeSplitInfo
   385  
   386  	rwSet types.MsRWSet
   387  }
   388  
   389  func newExecuteResult(r abci.ResponseDeliverTx, ms sdk.CacheMultiStore, counter uint32,
   390  	paraMsg *sdk.ParaMsg, height int64, watcher sdk.IWatcher, msgs []sdk.Msg, para *parallelTxManager, feeSpiltInfo *sdk.FeeSplitInfo) *executeResult {
   391  
   392  	rwSet := para.chainMpCache.GetRWSet()
   393  	if ms != nil {
   394  		ms.GetRWSet(rwSet)
   395  	}
   396  	para.blockMpCache.PutRwSet(rwSet)
   397  
   398  	if feeSpiltInfo == nil {
   399  		feeSpiltInfo = &sdk.FeeSplitInfo{}
   400  	}
   401  	ans := &executeResult{
   402  		resp:         r,
   403  		ms:           ms,
   404  		msIsNil:      ms == nil,
   405  		counter:      counter,
   406  		paraMsg:      paraMsg,
   407  		blockHeight:  height,
   408  		watcher:      watcher,
   409  		msgs:         msgs,
   410  		rwSet:        rwSet,
   411  		FeeSpiltInfo: feeSpiltInfo,
   412  	}
   413  
   414  	if paraMsg == nil {
   415  		ans.paraMsg = &sdk.ParaMsg{}
   416  	}
   417  	return ans
   418  }
   419  
   420  type parallelTxManager struct {
   421  	blockHeight         int64
   422  	groupTasks          []*groupTask
   423  	blockGasMeterMu     sync.Mutex
   424  	haveCosmosTxInBlock bool
   425  	isAsyncDeliverTx    bool
   426  	txs                 [][]byte
   427  	txSize              int
   428  	alreadyEnd          bool
   429  
   430  	resultCh chan int
   431  	resultCb func(data int)
   432  	stop     chan struct{}
   433  
   434  	groupList        map[int][]int
   435  	nextTxInGroup    map[int]int
   436  	preTxInGroup     map[int]int
   437  	txIndexWithGroup map[int]int
   438  
   439  	currentRerunIndex int
   440  	upComingTxIndex   int
   441  	currTxFee         sdk.Coins
   442  	cms               sdk.CacheMultiStore
   443  	conflictCheck     types.MsRWSet
   444  
   445  	blockMpCache     *cacheRWSetList
   446  	chainMpCache     *cacheRWSetList
   447  	blockMultiStores *cacheMultiStoreList
   448  	chainMultiStores *cacheMultiStoreList
   449  
   450  	extraTxsInfo []*extraDataForTx
   451  	txReps       []*executeResult
   452  	finalResult  []*executeResult
   453  	deliverTxs   []*abci.ResponseDeliverTx
   454  }
   455  
   456  func (pm *parallelTxManager) putResult(txIndex int, res *executeResult) {
   457  	if pm.alreadyEnd {
   458  		return
   459  	}
   460  
   461  	pm.txReps[txIndex] = res
   462  	if res != nil {
   463  		pm.resultCh <- txIndex
   464  	}
   465  }
   466  
   467  func (pm *parallelTxManager) getTxResult(txIndex int) *executeResult {
   468  	if pm.alreadyEnd {
   469  		return nil
   470  	}
   471  	return pm.txReps[txIndex]
   472  }
   473  
   474  func (pm *parallelTxManager) StartResultHandle() {
   475  	go func() {
   476  		for {
   477  			select {
   478  			case exec := <-pm.resultCh:
   479  				pm.resultCb(exec)
   480  
   481  			case <-pm.stop:
   482  				return
   483  			}
   484  		}
   485  	}()
   486  }
   487  
   488  type groupTask struct {
   489  	addMultiCache func(msAnte types.CacheMultiStore, msCache types.CacheMultiStore)
   490  	mu            sync.Mutex
   491  	groupIndex    map[int]int
   492  
   493  	nextTx    func(int) (int, bool)
   494  	taskRun   func(int) *executeResult
   495  	putResult func(index int, txResult *executeResult)
   496  
   497  	txChan    chan int
   498  	reRunChan chan int
   499  	stopChan  chan struct{}
   500  	ms        sdk.CacheMultiStore
   501  }
   502  
   503  func newGroupTask(txSizeInGroup int, addMultiCache func(msAnte types.CacheMultiStore, msCache types.CacheMultiStore), nextTx func(int2 int) (int, bool), task func(int2 int) *executeResult, putResult func(index int, txResult *executeResult)) *groupTask {
   504  	g := &groupTask{
   505  		addMultiCache: addMultiCache,
   506  		mu:            sync.Mutex{},
   507  		nextTx:        nextTx,
   508  		taskRun:       task,
   509  		txChan:        make(chan int, txSizeInGroup),
   510  		reRunChan:     make(chan int, 1000),
   511  		stopChan:      make(chan struct{}, 1),
   512  		putResult:     putResult,
   513  	}
   514  	go g.run()
   515  	return g
   516  }
   517  
   518  func (g *groupTask) addTask(txIndex int) {
   519  	g.txChan <- txIndex
   520  }
   521  
   522  func (g *groupTask) addRerun(txIndex int) {
   523  	g.mu.Lock()
   524  	defer g.mu.Unlock()
   525  
   526  	g.clearResultChan(txIndex)
   527  	g.reRunChan <- txIndex
   528  }
   529  
   530  func (g *groupTask) clearResultChan(rerunIndex int) {
   531  	for true {
   532  		next, ok := g.nextTx(rerunIndex) // TODO add currIndex
   533  		if ok {
   534  			g.putResult(next, nil)
   535  		} else {
   536  			return
   537  		}
   538  		rerunIndex = next
   539  	}
   540  }
   541  
   542  func (g *groupTask) run() {
   543  
   544  	for true {
   545  		select {
   546  		case txIndex := <-g.txChan:
   547  			g.mu.Lock()
   548  			res := g.taskRun(txIndex)
   549  			if res.paraMsg.UseCurrentState {
   550  				g.addMultiCache(g.ms, nil)
   551  				g.ms = res.ms.CacheMultiStore()
   552  			} else {
   553  				if res.ms != nil {
   554  					res.ms.Write()
   555  				}
   556  			}
   557  
   558  			if len(g.reRunChan) == 0 {
   559  				g.putResult(int(res.counter), res)
   560  				if n, ok := g.nextTx(txIndex); ok {
   561  					g.addTask(n)
   562  				}
   563  			}
   564  			g.mu.Unlock()
   565  
   566  		case rerunIndex := <-g.reRunChan:
   567  			g.clearResultChan(rerunIndex)
   568  			g.addMultiCache(g.ms, nil)
   569  			g.ms = nil
   570  			size := len(g.txChan)
   571  			for index := 0; index < size; index++ {
   572  				<-g.txChan
   573  			}
   574  
   575  			if n, ok := g.nextTx(rerunIndex); ok {
   576  				g.addTask(n)
   577  			}
   578  		case <-g.stopChan:
   579  			return
   580  		}
   581  	}
   582  }
   583  
   584  func newParallelTxManager() *parallelTxManager {
   585  	isAsync := sm.DeliverTxsExecMode(viper.GetInt(sm.FlagDeliverTxsExecMode)) == sm.DeliverTxsExecModeParallel
   586  	para := &parallelTxManager{
   587  		blockGasMeterMu:  sync.Mutex{},
   588  		isAsyncDeliverTx: isAsync,
   589  		stop:             make(chan struct{}, 1),
   590  
   591  		conflictCheck: make(types.MsRWSet),
   592  
   593  		groupList:        make(map[int][]int),
   594  		nextTxInGroup:    make(map[int]int),
   595  		preTxInGroup:     make(map[int]int),
   596  		txIndexWithGroup: make(map[int]int),
   597  		resultCh:         make(chan int, maxTxResultInChan),
   598  
   599  		blockMpCache:     newCacheRWSetList(),
   600  		chainMpCache:     newCacheRWSetList(),
   601  		blockMultiStores: newCacheMultiStoreList(),
   602  		chainMultiStores: newCacheMultiStoreList(),
   603  	}
   604  	return para
   605  }
   606  
   607  func (pm *parallelTxManager) addMultiCache(ms1 types.CacheMultiStore, ms2 types.CacheMultiStore) {
   608  	if ms1 != nil {
   609  		pm.blockMultiStores.PushStore(ms1)
   610  	}
   611  
   612  	if ms2 != nil {
   613  		pm.blockMultiStores.PushStore(ms2)
   614  	}
   615  }
   616  
   617  func shouldCleanChainCache(height int64) bool {
   618  	return height%multiCacheListClearInterval == 0
   619  }
   620  
   621  func (pm *parallelTxManager) addMpCacheToChainCache() {
   622  	if shouldCleanChainCache(pm.blockHeight) {
   623  		pm.chainMpCache.Clear()
   624  	} else {
   625  		jobChan := make(chan types.MsRWSet, pm.blockMpCache.Len())
   626  		go func() {
   627  			for index := 0; index < maxGoroutineNumberInParaTx; index++ {
   628  				go func(ch chan types.MsRWSet) {
   629  					for j := range ch {
   630  						types.ClearMsRWSet(j)
   631  						pm.chainMpCache.PutRwSet(j)
   632  					}
   633  				}(jobChan)
   634  			}
   635  
   636  		}()
   637  
   638  		pm.blockMpCache.Range(func(c types.MsRWSet) {
   639  			jobChan <- c
   640  		})
   641  		close(jobChan)
   642  	}
   643  	pm.blockMpCache.Clear()
   644  
   645  }
   646  
   647  func (pm *parallelTxManager) addBlockCacheToChainCache() {
   648  	if shouldCleanChainCache(pm.blockHeight) {
   649  		pm.chainMultiStores.Clear()
   650  	} else {
   651  		jobChan := make(chan types.CacheMultiStore, pm.blockMultiStores.Len())
   652  		go func() {
   653  			for index := 0; index < maxGoroutineNumberInParaTx; index++ {
   654  				go func(ch chan types.CacheMultiStore) {
   655  					for j := range ch {
   656  						j.Clear()
   657  						pm.chainMultiStores.PushStore(j)
   658  					}
   659  				}(jobChan)
   660  			}
   661  		}()
   662  
   663  		pm.blockMultiStores.Range(func(c types.CacheMultiStore) {
   664  			jobChan <- c
   665  		})
   666  		close(jobChan)
   667  	}
   668  	pm.blockMultiStores.Clear()
   669  }
   670  
   671  func (pm *parallelTxManager) isConflict(e *executeResult) bool {
   672  	if e.msIsNil {
   673  		return true //TODO fix later
   674  	}
   675  	for storeKey, rw := range e.rwSet {
   676  
   677  		for key, value := range rw.Read {
   678  			if data, ok := pm.conflictCheck[storeKey].Write[key]; ok {
   679  				if !bytes.Equal(data.Value, value) {
   680  					return true
   681  				}
   682  			}
   683  		}
   684  	}
   685  	return false
   686  }
   687  
   688  func (pm *parallelTxManager) clear() {
   689  
   690  	pm.addBlockCacheToChainCache()
   691  	pm.addMpCacheToChainCache()
   692  
   693  	for key := range pm.groupList {
   694  		delete(pm.groupList, key)
   695  	}
   696  	for key := range pm.preTxInGroup {
   697  		delete(pm.preTxInGroup, key)
   698  	}
   699  	for key := range pm.txIndexWithGroup {
   700  		delete(pm.txIndexWithGroup, key)
   701  	}
   702  
   703  	for _, v := range pm.conflictCheck {
   704  		for k := range v.Read {
   705  			delete(v.Read, k)
   706  		}
   707  		for k := range v.Write {
   708  			delete(v.Write, k)
   709  		}
   710  	}
   711  }
   712  
   713  func (pm *parallelTxManager) init(txs [][]byte, blockHeight int64, deliverStateMs sdk.CacheMultiStore) {
   714  
   715  	txSize := len(txs)
   716  	pm.blockHeight = blockHeight
   717  	pm.groupTasks = make([]*groupTask, 0)
   718  	pm.haveCosmosTxInBlock = false
   719  	pm.isAsyncDeliverTx = true
   720  	pm.txs = txs
   721  	pm.txSize = txSize
   722  	pm.alreadyEnd = false
   723  
   724  	pm.currentRerunIndex = -1
   725  	pm.upComingTxIndex = 0
   726  	pm.currTxFee = sdk.Coins{}
   727  	pm.cms = deliverStateMs.CacheMultiStore()
   728  	pm.cms.DisableCacheReadList()
   729  	deliverStateMs.DisableCacheReadList()
   730  
   731  	if txSize > cap(pm.resultCh) {
   732  		pm.resultCh = make(chan int, txSize)
   733  	}
   734  
   735  	pm.nextTxInGroup = make(map[int]int)
   736  
   737  	pm.extraTxsInfo = make([]*extraDataForTx, txSize)
   738  	pm.txReps = make([]*executeResult, txSize)
   739  	pm.finalResult = make([]*executeResult, txSize)
   740  	pm.deliverTxs = make([]*abci.ResponseDeliverTx, txSize)
   741  }
   742  
   743  func (pm *parallelTxManager) getParentMsByTxIndex(txIndex int) (sdk.CacheMultiStore, bool) {
   744  
   745  	if txIndex <= pm.upComingTxIndex-1 {
   746  		return nil, false
   747  	}
   748  
   749  	useCurrent := false
   750  	var ms types.CacheMultiStore
   751  	if pm.currentRerunIndex != txIndex && pm.preTxInGroup[txIndex] > pm.upComingTxIndex-1 {
   752  		if groupMs := pm.groupTasks[pm.txIndexWithGroup[txIndex]].ms; groupMs != nil {
   753  			ms = pm.chainMultiStores.GetStoreWithParent(groupMs)
   754  		}
   755  	}
   756  
   757  	if ms == nil {
   758  		useCurrent = true
   759  		ms = pm.chainMultiStores.GetStoreWithParent(pm.cms)
   760  	}
   761  	return ms, useCurrent
   762  }
   763  
   764  func (pm *parallelTxManager) SetCurrentIndex(txIndex int, res *executeResult) {
   765  	if res.msIsNil {
   766  		return
   767  	}
   768  
   769  	for storeKey, rw := range res.rwSet {
   770  		if _, ok := pm.conflictCheck[storeKey]; !ok {
   771  			pm.conflictCheck[storeKey] = types.NewCacheKvRWSet()
   772  		}
   773  
   774  		ms := pm.cms.GetKVStore(storeKey)
   775  		for key, value := range rw.Write {
   776  			if value.Deleted {
   777  				ms.Delete([]byte(key))
   778  			} else {
   779  				ms.Set([]byte(key), value.Value)
   780  			}
   781  			pm.conflictCheck[storeKey].Write[key] = value
   782  		}
   783  	}
   784  	pm.currTxFee = pm.currTxFee.Add(pm.extraTxsInfo[txIndex].fee.Sub(pm.finalResult[txIndex].paraMsg.RefundFee)...)
   785  }