github.com/nspcc-dev/neo-go@v0.105.2-0.20240517133400-6be757af3eba/pkg/core/blockchain.go (about)

     1  package core
     2  
     3  import (
     4  	"bytes"
     5  	"encoding/binary"
     6  	"errors"
     7  	"fmt"
     8  	"math"
     9  	"math/big"
    10  	"sort"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	json "github.com/nspcc-dev/go-ordered-json"
    16  	"github.com/nspcc-dev/neo-go/pkg/config"
    17  	"github.com/nspcc-dev/neo-go/pkg/config/limits"
    18  	"github.com/nspcc-dev/neo-go/pkg/core/block"
    19  	"github.com/nspcc-dev/neo-go/pkg/core/dao"
    20  	"github.com/nspcc-dev/neo-go/pkg/core/interop"
    21  	"github.com/nspcc-dev/neo-go/pkg/core/interop/contract"
    22  	"github.com/nspcc-dev/neo-go/pkg/core/mempool"
    23  	"github.com/nspcc-dev/neo-go/pkg/core/mpt"
    24  	"github.com/nspcc-dev/neo-go/pkg/core/native"
    25  	"github.com/nspcc-dev/neo-go/pkg/core/native/noderoles"
    26  	"github.com/nspcc-dev/neo-go/pkg/core/state"
    27  	"github.com/nspcc-dev/neo-go/pkg/core/stateroot"
    28  	"github.com/nspcc-dev/neo-go/pkg/core/statesync"
    29  	"github.com/nspcc-dev/neo-go/pkg/core/storage"
    30  	"github.com/nspcc-dev/neo-go/pkg/core/transaction"
    31  	"github.com/nspcc-dev/neo-go/pkg/crypto/hash"
    32  	"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
    33  	"github.com/nspcc-dev/neo-go/pkg/encoding/fixedn"
    34  	"github.com/nspcc-dev/neo-go/pkg/io"
    35  	"github.com/nspcc-dev/neo-go/pkg/smartcontract"
    36  	"github.com/nspcc-dev/neo-go/pkg/smartcontract/callflag"
    37  	"github.com/nspcc-dev/neo-go/pkg/smartcontract/manifest"
    38  	"github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger"
    39  	"github.com/nspcc-dev/neo-go/pkg/util"
    40  	"github.com/nspcc-dev/neo-go/pkg/vm"
    41  	"github.com/nspcc-dev/neo-go/pkg/vm/stackitem"
    42  	"github.com/nspcc-dev/neo-go/pkg/vm/vmstate"
    43  	"go.uber.org/zap"
    44  )
    45  
    46  // Tuning parameters.
    47  const (
    48  	version = "0.2.12"
    49  
    50  	// DefaultInitialGAS is the default amount of GAS emitted to the standby validators
    51  	// multisignature account during native GAS contract initialization.
    52  	DefaultInitialGAS                      = 52000000_00000000
    53  	defaultGCPeriod                        = 10000
    54  	defaultMemPoolSize                     = 50000
    55  	defaultP2PNotaryRequestPayloadPoolSize = 1000
    56  	defaultMaxBlockSize                    = 262144
    57  	defaultMaxBlockSystemFee               = 900000000000
    58  	defaultMaxTraceableBlocks              = 2102400 // 1 year of 15s blocks
    59  	defaultMaxTransactionsPerBlock         = 512
    60  	defaultTimePerBlock                    = 15 * time.Second
    61  	// HeaderVerificationGasLimit is the maximum amount of GAS for block header verification.
    62  	HeaderVerificationGasLimit = 3_00000000 // 3 GAS
    63  	defaultStateSyncInterval   = 40000
    64  )
    65  
    66  // stateChangeStage denotes the stage of state modification process.
    67  type stateChangeStage byte
    68  
    69  // A set of stages used to split state jump / state reset into atomic operations.
    70  const (
    71  	// none means that no state jump or state reset process was initiated yet.
    72  	none stateChangeStage = 1 << iota
    73  	// stateJumpStarted means that state jump was just initiated, but outdated storage items
    74  	// were not yet removed.
    75  	stateJumpStarted
    76  	// newStorageItemsAdded means that contract storage items are up-to-date with the current
    77  	// state.
    78  	newStorageItemsAdded
    79  	// staleBlocksRemoved means that state corresponding to the stale blocks (genesis block in
    80  	// in case of state jump) was removed from the storage.
    81  	staleBlocksRemoved
    82  	// headersReset denotes stale SYS-prefixed and IX-prefixed information was removed from
    83  	// the storage (applicable to state reset only).
    84  	headersReset
    85  	// transfersReset denotes NEP transfers were successfully updated (applicable to state reset only).
    86  	transfersReset
    87  	// stateResetBit represents a bit identifier for state reset process. If this bit is not set, then
    88  	// it's an unfinished state jump.
    89  	stateResetBit byte = 1 << 7
    90  )
    91  
    92  var (
    93  	// ErrAlreadyExists is returned when trying to add some transaction
    94  	// that already exists on chain.
    95  	ErrAlreadyExists = errors.New("already exists in blockchain")
    96  	// ErrAlreadyInPool is returned when trying to add some already existing
    97  	// transaction into the mempool.
    98  	ErrAlreadyInPool = errors.New("already exists in mempool")
    99  	// ErrOOM is returned when adding transaction to the memory pool because
   100  	// it reached its full capacity.
   101  	ErrOOM = errors.New("no space left in the memory pool")
   102  	// ErrPolicy is returned on attempt to add transaction that doesn't
   103  	// comply with node's configured policy into the mempool.
   104  	ErrPolicy = errors.New("not allowed by policy")
   105  	// ErrInvalidBlockIndex is returned when trying to add block with index
   106  	// other than expected height of the blockchain.
   107  	ErrInvalidBlockIndex = errors.New("invalid block index")
   108  	// ErrHasConflicts is returned when trying to add some transaction which
   109  	// conflicts with other transaction in the chain or pool according to
   110  	// Conflicts attribute.
   111  	ErrHasConflicts = errors.New("has conflicts")
   112  )
   113  var (
   114  	persistInterval = 1 * time.Second
   115  )
   116  
   117  // Blockchain represents the blockchain. It maintans internal state representing
   118  // the state of the ledger that can be accessed in various ways and changed by
   119  // adding new blocks or headers.
   120  type Blockchain struct {
   121  	HeaderHashes
   122  
   123  	config config.Blockchain
   124  
   125  	// The only way chain state changes is by adding blocks, so we can't
   126  	// allow concurrent block additions. It differs from the next lock in
   127  	// that it's only for AddBlock method itself, the chain state is
   128  	// protected by the lock below, but holding it during all of AddBlock
   129  	// is too expensive (because the state only changes when persisting
   130  	// change cache).
   131  	addLock sync.Mutex
   132  
   133  	// This lock ensures blockchain immutability for operations that need
   134  	// that while performing their tasks. It's mostly used as a read lock
   135  	// with the only writer being the block addition logic.
   136  	lock sync.RWMutex
   137  
   138  	// Data access object for CRUD operations around storage. It's write-cached.
   139  	dao *dao.Simple
   140  
   141  	// persistent is the same DB as dao, but we never write to it, so all reads
   142  	// are directly from underlying persistent store.
   143  	persistent *dao.Simple
   144  
   145  	// Underlying persistent store.
   146  	store storage.Store
   147  
   148  	// Current index/height of the highest block.
   149  	// Read access should always be called by BlockHeight().
   150  	// Write access should only happen in storeBlock().
   151  	blockHeight uint32
   152  
   153  	// Current top Block wrapped in an atomic.Value for safe access.
   154  	topBlock atomic.Value
   155  
   156  	// Current persisted block count.
   157  	persistedHeight uint32
   158  
   159  	// Stop synchronization mechanisms.
   160  	stopCh      chan struct{}
   161  	runToExitCh chan struct{}
   162  	// isRunning denotes whether blockchain routines are currently running.
   163  	isRunning atomic.Value
   164  
   165  	memPool *mempool.Pool
   166  
   167  	// postBlock is a set of callback methods which should be run under the Blockchain lock after new block is persisted.
   168  	// Block's transactions are passed via mempool.
   169  	postBlock []func(func(*transaction.Transaction, *mempool.Pool, bool) bool, *mempool.Pool, *block.Block)
   170  
   171  	log *zap.Logger
   172  
   173  	lastBatch *storage.MemBatch
   174  
   175  	contracts native.Contracts
   176  
   177  	extensible atomic.Value
   178  
   179  	// knownValidatorsCount is the latest known validators count used
   180  	// for defaultBlockWitness.
   181  	knownValidatorsCount atomic.Value
   182  	// defaultBlockWitness stores transaction.Witness with m out of n multisig,
   183  	// where n = knownValidatorsCount.
   184  	defaultBlockWitness atomic.Value
   185  
   186  	stateRoot *stateroot.Module
   187  
   188  	// Notification subsystem.
   189  	events  chan bcEvent
   190  	subCh   chan any
   191  	unsubCh chan any
   192  }
   193  
   194  // StateRoot represents local state root module.
   195  type StateRoot interface {
   196  	CurrentLocalHeight() uint32
   197  	CurrentLocalStateRoot() util.Uint256
   198  	CurrentValidatedHeight() uint32
   199  	FindStates(root util.Uint256, prefix, start []byte, max int) ([]storage.KeyValue, error)
   200  	SeekStates(root util.Uint256, prefix []byte, f func(k, v []byte) bool)
   201  	GetState(root util.Uint256, key []byte) ([]byte, error)
   202  	GetStateProof(root util.Uint256, key []byte) ([][]byte, error)
   203  	GetStateRoot(height uint32) (*state.MPTRoot, error)
   204  	GetLatestStateHeight(root util.Uint256) (uint32, error)
   205  }
   206  
   207  // bcEvent is an internal event generated by the Blockchain and then
   208  // broadcasted to other parties. It joins the new block and associated
   209  // invocation logs, all the other events visible from outside can be produced
   210  // from this combination.
   211  type bcEvent struct {
   212  	block          *block.Block
   213  	appExecResults []*state.AppExecResult
   214  }
   215  
   216  // transferData is used for transfer caching during storeBlock.
   217  type transferData struct {
   218  	Info  state.TokenTransferInfo
   219  	Log11 state.TokenTransferLog
   220  	Log17 state.TokenTransferLog
   221  }
   222  
   223  // NewBlockchain returns a new blockchain object the will use the
   224  // given Store as its underlying storage. For it to work correctly you need
   225  // to spawn a goroutine for its Run method after this initialization.
   226  func NewBlockchain(s storage.Store, cfg config.Blockchain, log *zap.Logger) (*Blockchain, error) {
   227  	if log == nil {
   228  		return nil, errors.New("empty logger")
   229  	}
   230  
   231  	// Protocol configuration fixups/checks.
   232  	if cfg.InitialGASSupply <= 0 {
   233  		cfg.InitialGASSupply = fixedn.Fixed8(DefaultInitialGAS)
   234  		log.Info("initial gas supply is not set or wrong, setting default value", zap.Stringer("InitialGASSupply", cfg.InitialGASSupply))
   235  	}
   236  	if cfg.MemPoolSize <= 0 {
   237  		cfg.MemPoolSize = defaultMemPoolSize
   238  		log.Info("mempool size is not set or wrong, setting default value", zap.Int("MemPoolSize", cfg.MemPoolSize))
   239  	}
   240  	if cfg.P2PSigExtensions && cfg.P2PNotaryRequestPayloadPoolSize <= 0 {
   241  		cfg.P2PNotaryRequestPayloadPoolSize = defaultP2PNotaryRequestPayloadPoolSize
   242  		log.Info("P2PNotaryRequestPayloadPool size is not set or wrong, setting default value", zap.Int("P2PNotaryRequestPayloadPoolSize", cfg.P2PNotaryRequestPayloadPoolSize))
   243  	}
   244  	if cfg.MaxBlockSize == 0 {
   245  		cfg.MaxBlockSize = defaultMaxBlockSize
   246  		log.Info("MaxBlockSize is not set or wrong, setting default value", zap.Uint32("MaxBlockSize", cfg.MaxBlockSize))
   247  	}
   248  	if cfg.MaxBlockSystemFee <= 0 {
   249  		cfg.MaxBlockSystemFee = defaultMaxBlockSystemFee
   250  		log.Info("MaxBlockSystemFee is not set or wrong, setting default value", zap.Int64("MaxBlockSystemFee", cfg.MaxBlockSystemFee))
   251  	}
   252  	if cfg.MaxTraceableBlocks == 0 {
   253  		cfg.MaxTraceableBlocks = defaultMaxTraceableBlocks
   254  		log.Info("MaxTraceableBlocks is not set or wrong, using default value", zap.Uint32("MaxTraceableBlocks", cfg.MaxTraceableBlocks))
   255  	}
   256  	if cfg.MaxTransactionsPerBlock == 0 {
   257  		cfg.MaxTransactionsPerBlock = defaultMaxTransactionsPerBlock
   258  		log.Info("MaxTransactionsPerBlock is not set or wrong, using default value",
   259  			zap.Uint16("MaxTransactionsPerBlock", cfg.MaxTransactionsPerBlock))
   260  	}
   261  	if cfg.TimePerBlock <= 0 {
   262  		cfg.TimePerBlock = defaultTimePerBlock
   263  		log.Info("TimePerBlock is not set or wrong, using default value",
   264  			zap.Duration("TimePerBlock", cfg.TimePerBlock))
   265  	}
   266  	if cfg.MaxValidUntilBlockIncrement == 0 {
   267  		const timePerDay = 24 * time.Hour
   268  
   269  		cfg.MaxValidUntilBlockIncrement = uint32(timePerDay / cfg.TimePerBlock)
   270  		log.Info("MaxValidUntilBlockIncrement is not set or wrong, using default value",
   271  			zap.Uint32("MaxValidUntilBlockIncrement", cfg.MaxValidUntilBlockIncrement))
   272  	}
   273  	if cfg.P2PStateExchangeExtensions {
   274  		if !cfg.StateRootInHeader {
   275  			return nil, errors.New("P2PStatesExchangeExtensions are enabled, but StateRootInHeader is off")
   276  		}
   277  		if cfg.KeepOnlyLatestState && !cfg.RemoveUntraceableBlocks {
   278  			return nil, errors.New("P2PStateExchangeExtensions can be enabled either on MPT-complete node (KeepOnlyLatestState=false) or on light GC-enabled node (RemoveUntraceableBlocks=true)")
   279  		}
   280  		if cfg.StateSyncInterval <= 0 {
   281  			cfg.StateSyncInterval = defaultStateSyncInterval
   282  			log.Info("StateSyncInterval is not set or wrong, using default value",
   283  				zap.Int("StateSyncInterval", cfg.StateSyncInterval))
   284  		}
   285  	}
   286  	if cfg.Hardforks == nil {
   287  		cfg.Hardforks = map[string]uint32{}
   288  		for _, hf := range config.Hardforks {
   289  			cfg.Hardforks[hf.String()] = 0
   290  		}
   291  		log.Info("Hardforks are not set, using default value")
   292  	} else if len(cfg.Hardforks) != 0 {
   293  		// Explicitly set the height of all old omitted hardforks to 0 for proper
   294  		// IsHardforkEnabled behaviour.
   295  		for _, hf := range config.Hardforks {
   296  			if _, ok := cfg.Hardforks[hf.String()]; !ok {
   297  				cfg.Hardforks[hf.String()] = 0
   298  				continue
   299  			}
   300  			break
   301  		}
   302  	}
   303  
   304  	// Local config consistency checks.
   305  	if cfg.Ledger.RemoveUntraceableBlocks && cfg.Ledger.GarbageCollectionPeriod == 0 {
   306  		cfg.Ledger.GarbageCollectionPeriod = defaultGCPeriod
   307  		log.Info("GarbageCollectionPeriod is not set or wrong, using default value", zap.Uint32("GarbageCollectionPeriod", cfg.Ledger.GarbageCollectionPeriod))
   308  	}
   309  	bc := &Blockchain{
   310  		config:      cfg,
   311  		dao:         dao.NewSimple(s, cfg.StateRootInHeader),
   312  		persistent:  dao.NewSimple(s, cfg.StateRootInHeader),
   313  		store:       s,
   314  		stopCh:      make(chan struct{}),
   315  		runToExitCh: make(chan struct{}),
   316  		memPool:     mempool.New(cfg.MemPoolSize, 0, false, updateMempoolMetrics),
   317  		log:         log,
   318  		events:      make(chan bcEvent),
   319  		subCh:       make(chan any),
   320  		unsubCh:     make(chan any),
   321  		contracts:   *native.NewContracts(cfg.ProtocolConfiguration),
   322  	}
   323  
   324  	bc.stateRoot = stateroot.NewModule(cfg, bc.VerifyWitness, bc.log, bc.dao.Store)
   325  	bc.contracts.Designate.StateRootService = bc.stateRoot
   326  
   327  	if err := bc.init(); err != nil {
   328  		return nil, err
   329  	}
   330  
   331  	bc.isRunning.Store(false)
   332  	return bc, nil
   333  }
   334  
   335  // GetDesignatedByRole returns a set of designated public keys for the given role
   336  // relevant for the next block.
   337  func (bc *Blockchain) GetDesignatedByRole(r noderoles.Role) (keys.PublicKeys, uint32, error) {
   338  	// Retrieve designated nodes starting from the next block, because the current
   339  	// block is already stored, thus, dependant services can't use PostPersist callback
   340  	// to fetch relevant information at their start.
   341  	res, h, err := bc.contracts.Designate.GetDesignatedByRole(bc.dao, r, bc.BlockHeight()+1)
   342  	return res, h, err
   343  }
   344  
   345  // getCurrentHF returns the latest currently enabled hardfork. In case if no hardforks are enabled, the
   346  // default config.Hardfork(0) value is returned.
   347  func (bc *Blockchain) getCurrentHF() config.Hardfork {
   348  	var (
   349  		height  = bc.BlockHeight()
   350  		current config.Hardfork
   351  	)
   352  	// Rely on the fact that hardforks list is continuous.
   353  	for _, hf := range config.Hardforks {
   354  		enableHeight, ok := bc.config.Hardforks[hf.String()]
   355  		if !ok || height < enableHeight {
   356  			break
   357  		}
   358  		current = hf
   359  	}
   360  	return current
   361  }
   362  
   363  // SetOracle sets oracle module. It can safely be called on the running blockchain.
   364  // To unregister Oracle service use SetOracle(nil).
   365  func (bc *Blockchain) SetOracle(mod native.OracleService) {
   366  	orc := bc.contracts.Oracle
   367  	currentHF := bc.getCurrentHF()
   368  	if mod != nil {
   369  		orcMd := orc.HFSpecificContractMD(&currentHF)
   370  		md, ok := orcMd.GetMethod(manifest.MethodVerify, -1)
   371  		if !ok {
   372  			panic(fmt.Errorf("%s method not found", manifest.MethodVerify))
   373  		}
   374  		mod.UpdateNativeContract(orcMd.NEF.Script, orc.GetOracleResponseScript(),
   375  			orc.Hash, md.MD.Offset)
   376  		keys, _, err := bc.GetDesignatedByRole(noderoles.Oracle)
   377  		if err != nil {
   378  			bc.log.Error("failed to get oracle key list")
   379  			return
   380  		}
   381  		mod.UpdateOracleNodes(keys)
   382  		reqs, err := bc.contracts.Oracle.GetRequests(bc.dao)
   383  		if err != nil {
   384  			bc.log.Error("failed to get current oracle request list")
   385  			return
   386  		}
   387  		mod.AddRequests(reqs)
   388  	}
   389  	orc.Module.Store(&mod)
   390  	bc.contracts.Designate.OracleService.Store(&mod)
   391  }
   392  
   393  // SetNotary sets notary module. It may safely be called on the running blockchain.
   394  // To unregister Notary service use SetNotary(nil).
   395  func (bc *Blockchain) SetNotary(mod native.NotaryService) {
   396  	if mod != nil {
   397  		keys, _, err := bc.GetDesignatedByRole(noderoles.P2PNotary)
   398  		if err != nil {
   399  			bc.log.Error("failed to get notary key list")
   400  			return
   401  		}
   402  		mod.UpdateNotaryNodes(keys)
   403  	}
   404  	bc.contracts.Designate.NotaryService.Store(&mod)
   405  }
   406  
   407  func (bc *Blockchain) init() error {
   408  	// If we could not find the version in the Store, we know that there is nothing stored.
   409  	ver, err := bc.dao.GetVersion()
   410  	if err != nil {
   411  		bc.log.Info("no storage version found! creating genesis block")
   412  		ver = dao.Version{
   413  			StoragePrefix:              storage.STStorage,
   414  			StateRootInHeader:          bc.config.StateRootInHeader,
   415  			P2PSigExtensions:           bc.config.P2PSigExtensions,
   416  			P2PStateExchangeExtensions: bc.config.P2PStateExchangeExtensions,
   417  			KeepOnlyLatestState:        bc.config.Ledger.KeepOnlyLatestState,
   418  			Magic:                      uint32(bc.config.Magic),
   419  			Value:                      version,
   420  		}
   421  		bc.dao.PutVersion(ver)
   422  		bc.dao.Version = ver
   423  		bc.persistent.Version = ver
   424  		genesisBlock, err := CreateGenesisBlock(bc.config.ProtocolConfiguration)
   425  		if err != nil {
   426  			return err
   427  		}
   428  		bc.HeaderHashes.initGenesis(bc.dao, genesisBlock.Hash())
   429  		if err := bc.stateRoot.Init(0); err != nil {
   430  			return fmt.Errorf("can't init MPT: %w", err)
   431  		}
   432  		return bc.storeBlock(genesisBlock, nil)
   433  	}
   434  	if ver.Value != version {
   435  		return fmt.Errorf("storage version mismatch (expected=%s, actual=%s)", version, ver.Value)
   436  	}
   437  	if ver.StateRootInHeader != bc.config.StateRootInHeader {
   438  		return fmt.Errorf("StateRootInHeader setting mismatch (config=%t, db=%t)",
   439  			bc.config.StateRootInHeader, ver.StateRootInHeader)
   440  	}
   441  	if ver.P2PSigExtensions != bc.config.P2PSigExtensions {
   442  		return fmt.Errorf("P2PSigExtensions setting mismatch (old=%t, new=%t)",
   443  			ver.P2PSigExtensions, bc.config.P2PSigExtensions)
   444  	}
   445  	if ver.P2PStateExchangeExtensions != bc.config.P2PStateExchangeExtensions {
   446  		return fmt.Errorf("P2PStateExchangeExtensions setting mismatch (old=%t, new=%t)",
   447  			ver.P2PStateExchangeExtensions, bc.config.P2PStateExchangeExtensions)
   448  	}
   449  	if ver.KeepOnlyLatestState != bc.config.Ledger.KeepOnlyLatestState {
   450  		return fmt.Errorf("KeepOnlyLatestState setting mismatch (old=%v, new=%v)",
   451  			ver.KeepOnlyLatestState, bc.config.Ledger.KeepOnlyLatestState)
   452  	}
   453  	if ver.Magic != uint32(bc.config.Magic) {
   454  		return fmt.Errorf("protocol configuration Magic mismatch (old=%v, new=%v)",
   455  			ver.Magic, bc.config.Magic)
   456  	}
   457  	bc.dao.Version = ver
   458  	bc.persistent.Version = ver
   459  
   460  	// At this point there was no version found in the storage which
   461  	// implies a creating fresh storage with the version specified
   462  	// and the genesis block as first block.
   463  	bc.log.Info("restoring blockchain", zap.String("version", version))
   464  
   465  	err = bc.HeaderHashes.init(bc.dao)
   466  	if err != nil {
   467  		return err
   468  	}
   469  
   470  	// Check whether StateChangeState stage is in the storage and continue interrupted state jump / state reset if so.
   471  	stateChStage, err := bc.dao.Store.Get([]byte{byte(storage.SYSStateChangeStage)})
   472  	if err == nil {
   473  		if len(stateChStage) != 1 {
   474  			return fmt.Errorf("invalid state jump stage format")
   475  		}
   476  		// State jump / state reset wasn't finished yet, thus continue it.
   477  		stateSyncPoint, err := bc.dao.GetStateSyncPoint()
   478  		if err != nil {
   479  			return fmt.Errorf("failed to get state sync point from the storage")
   480  		}
   481  		if (stateChStage[0] & stateResetBit) != 0 {
   482  			return bc.resetStateInternal(stateSyncPoint, stateChangeStage(stateChStage[0]&(^stateResetBit)))
   483  		}
   484  		if !(bc.config.P2PStateExchangeExtensions && bc.config.Ledger.RemoveUntraceableBlocks) {
   485  			return errors.New("state jump was not completed, but P2PStateExchangeExtensions are disabled or archival node capability is on. " +
   486  				"To start an archival node drop the database manually and restart the node")
   487  		}
   488  		return bc.jumpToStateInternal(stateSyncPoint, stateChangeStage(stateChStage[0]))
   489  	}
   490  
   491  	bHeight, err := bc.dao.GetCurrentBlockHeight()
   492  	if err != nil {
   493  		return fmt.Errorf("failed to retrieve current block height: %w", err)
   494  	}
   495  	bc.blockHeight = bHeight
   496  	bc.persistedHeight = bHeight
   497  
   498  	bc.log.Debug("initializing caches", zap.Uint32("blockHeight", bHeight))
   499  	if err = bc.stateRoot.Init(bHeight); err != nil {
   500  		return fmt.Errorf("can't init MPT at height %d: %w", bHeight, err)
   501  	}
   502  
   503  	err = bc.initializeNativeCache(bc.blockHeight, bc.dao)
   504  	if err != nil {
   505  		return fmt.Errorf("can't init natives cache: %w", err)
   506  	}
   507  
   508  	// Check autogenerated native contracts' manifests and NEFs against the stored ones.
   509  	// Need to be done after native Management cache initialization to be able to get
   510  	// contract state from DAO via high-level bc API.
   511  	var current = bc.getCurrentHF()
   512  	for _, c := range bc.contracts.Contracts {
   513  		md := c.Metadata()
   514  		storedCS := bc.GetContractState(md.Hash)
   515  		// Check that contract was deployed.
   516  		if !bc.isHardforkEnabled(c.ActiveIn(), bHeight) {
   517  			if storedCS != nil {
   518  				return fmt.Errorf("native contract %s is already stored, but marked as inactive for height %d in config", md.Name, bHeight)
   519  			}
   520  			continue
   521  		}
   522  		if storedCS == nil {
   523  			return fmt.Errorf("native contract %s is not stored, but should be active at height %d according to config", md.Name, bHeight)
   524  		}
   525  		storedCSBytes, err := stackitem.SerializeConvertible(storedCS)
   526  		if err != nil {
   527  			return fmt.Errorf("failed to check native %s state against autogenerated one: %w", md.Name, err)
   528  		}
   529  		hfMD := md.HFSpecificContractMD(&current)
   530  		autogenCS := &state.Contract{
   531  			ContractBase:  hfMD.ContractBase,
   532  			UpdateCounter: storedCS.UpdateCounter, // it can be restored only from the DB, so use the stored value.
   533  		}
   534  		autogenCSBytes, err := stackitem.SerializeConvertible(autogenCS)
   535  		if err != nil {
   536  			return fmt.Errorf("failed to check native %s state against autogenerated one: %w", md.Name, err)
   537  		}
   538  		if !bytes.Equal(storedCSBytes, autogenCSBytes) {
   539  			storedJ, _ := json.Marshal(storedCS)
   540  			autogenJ, _ := json.Marshal(autogenCS)
   541  			return fmt.Errorf("native %s: version mismatch for the latest hardfork %s (stored contract state differs from autogenerated one), "+
   542  				"try to resynchronize the node from the genesis: %s vs %s", md.Name, current, string(storedJ), string(autogenJ))
   543  		}
   544  	}
   545  
   546  	updateBlockHeightMetric(bHeight)
   547  	updatePersistedHeightMetric(bHeight)
   548  	updateHeaderHeightMetric(bc.HeaderHeight())
   549  
   550  	return bc.updateExtensibleWhitelist(bHeight)
   551  }
   552  
   553  // jumpToState is an atomic operation that changes Blockchain state to the one
   554  // specified by the state sync point p. All the data needed for the jump must be
   555  // collected by the state sync module.
   556  func (bc *Blockchain) jumpToState(p uint32) error {
   557  	bc.addLock.Lock()
   558  	bc.lock.Lock()
   559  	defer bc.lock.Unlock()
   560  	defer bc.addLock.Unlock()
   561  
   562  	return bc.jumpToStateInternal(p, none)
   563  }
   564  
   565  // jumpToStateInternal is an internal representation of jumpToState callback that
   566  // changes Blockchain state to the one specified by state sync point p and state
   567  // jump stage. All the data needed for the jump must be in the DB, otherwise an
   568  // error is returned. It is not protected by mutex.
   569  func (bc *Blockchain) jumpToStateInternal(p uint32, stage stateChangeStage) error {
   570  	if p >= bc.HeaderHeight() {
   571  		return fmt.Errorf("invalid state sync point %d: headerHeignt is %d", p, bc.HeaderHeight())
   572  	}
   573  
   574  	bc.log.Info("jumping to state sync point", zap.Uint32("state sync point", p))
   575  
   576  	jumpStageKey := []byte{byte(storage.SYSStateChangeStage)}
   577  	switch stage {
   578  	case none:
   579  		bc.dao.Store.Put(jumpStageKey, []byte{byte(stateJumpStarted)})
   580  		fallthrough
   581  	case stateJumpStarted:
   582  		newPrefix := statesync.TemporaryPrefix(bc.dao.Version.StoragePrefix)
   583  		v, err := bc.dao.GetVersion()
   584  		if err != nil {
   585  			return fmt.Errorf("failed to get dao.Version: %w", err)
   586  		}
   587  		v.StoragePrefix = newPrefix
   588  		bc.dao.PutVersion(v)
   589  		bc.persistent.Version = v
   590  
   591  		bc.dao.Store.Put(jumpStageKey, []byte{byte(newStorageItemsAdded)})
   592  
   593  		fallthrough
   594  	case newStorageItemsAdded:
   595  		cache := bc.dao.GetPrivate()
   596  		prefix := statesync.TemporaryPrefix(bc.dao.Version.StoragePrefix)
   597  		bc.dao.Store.Seek(storage.SeekRange{Prefix: []byte{byte(prefix)}}, func(k, _ []byte) bool {
   598  			// #1468, but don't need to copy here, because it is done by Store.
   599  			cache.Store.Delete(k)
   600  			return true
   601  		})
   602  
   603  		// After current state is updated, we need to remove outdated state-related data if so.
   604  		// The only outdated data we might have is genesis-related data, so check it.
   605  		if p-bc.config.MaxTraceableBlocks > 0 {
   606  			err := cache.DeleteBlock(bc.GetHeaderHash(0))
   607  			if err != nil {
   608  				return fmt.Errorf("failed to remove outdated state data for the genesis block: %w", err)
   609  			}
   610  			prefixes := []byte{byte(storage.STNEP11Transfers), byte(storage.STNEP17Transfers), byte(storage.STTokenTransferInfo)}
   611  			for i := range prefixes {
   612  				cache.Store.Seek(storage.SeekRange{Prefix: prefixes[i : i+1]}, func(k, v []byte) bool {
   613  					cache.Store.Delete(k)
   614  					return true
   615  				})
   616  			}
   617  		}
   618  		// Update SYS-prefixed info.
   619  		block, err := bc.dao.GetBlock(bc.GetHeaderHash(p))
   620  		if err != nil {
   621  			return fmt.Errorf("failed to get current block: %w", err)
   622  		}
   623  		cache.StoreAsCurrentBlock(block)
   624  		cache.Store.Put(jumpStageKey, []byte{byte(staleBlocksRemoved)})
   625  		_, err = cache.Persist()
   626  		if err != nil {
   627  			return fmt.Errorf("failed to persist old items removal: %w", err)
   628  		}
   629  	case staleBlocksRemoved:
   630  		// there's nothing to do after that, so just continue with common operations
   631  		// and remove state jump stage in the end.
   632  	default:
   633  		return fmt.Errorf("unknown state jump stage: %d", stage)
   634  	}
   635  	block, err := bc.dao.GetBlock(bc.GetHeaderHash(p + 1))
   636  	if err != nil {
   637  		return fmt.Errorf("failed to get block to init MPT: %w", err)
   638  	}
   639  	bc.stateRoot.JumpToState(&state.MPTRoot{
   640  		Index: p,
   641  		Root:  block.PrevStateRoot,
   642  	})
   643  
   644  	bc.dao.Store.Delete(jumpStageKey)
   645  
   646  	err = bc.resetRAMState(p, false)
   647  	if err != nil {
   648  		return fmt.Errorf("failed to update in-memory blockchain data: %w", err)
   649  	}
   650  	return nil
   651  }
   652  
   653  // resetRAMState resets in-memory cached info.
   654  func (bc *Blockchain) resetRAMState(height uint32, resetHeaders bool) error {
   655  	if resetHeaders {
   656  		err := bc.HeaderHashes.init(bc.dao)
   657  		if err != nil {
   658  			return err
   659  		}
   660  	}
   661  	block, err := bc.dao.GetBlock(bc.GetHeaderHash(height))
   662  	if err != nil {
   663  		return fmt.Errorf("failed to get current block: %w", err)
   664  	}
   665  	bc.topBlock.Store(block)
   666  	atomic.StoreUint32(&bc.blockHeight, height)
   667  	atomic.StoreUint32(&bc.persistedHeight, height)
   668  
   669  	err = bc.initializeNativeCache(block.Index, bc.dao)
   670  	if err != nil {
   671  		return fmt.Errorf("failed to initialize natives cache: %w", err)
   672  	}
   673  
   674  	if err := bc.updateExtensibleWhitelist(height); err != nil {
   675  		return fmt.Errorf("failed to update extensible whitelist: %w", err)
   676  	}
   677  
   678  	updateBlockHeightMetric(height)
   679  	updatePersistedHeightMetric(height)
   680  	updateHeaderHeightMetric(bc.HeaderHeight())
   681  	return nil
   682  }
   683  
   684  // Reset resets chain state to the specified height if possible. This method
   685  // performs direct DB changes and can be called on non-running Blockchain only.
   686  func (bc *Blockchain) Reset(height uint32) error {
   687  	if bc.isRunning.Load().(bool) {
   688  		return errors.New("can't reset state of the running blockchain")
   689  	}
   690  	bc.dao.PutStateSyncPoint(height)
   691  	return bc.resetStateInternal(height, none)
   692  }
   693  
   694  func (bc *Blockchain) resetStateInternal(height uint32, stage stateChangeStage) error {
   695  	// Cache isn't yet initialized, so retrieve block height right from DAO.
   696  	currHeight, err := bc.dao.GetCurrentBlockHeight()
   697  	if err != nil {
   698  		return fmt.Errorf("failed to retrieve current block height: %w", err)
   699  	}
   700  	// Headers are already initialized by this moment, thus may use chain's API.
   701  	hHeight := bc.HeaderHeight()
   702  	// State reset may already be started by this moment, so perform these checks only if it wasn't.
   703  	if stage == none {
   704  		if height > currHeight {
   705  			return fmt.Errorf("current block height is %d, can't reset state to height %d", currHeight, height)
   706  		}
   707  		if height == currHeight && hHeight == currHeight {
   708  			bc.log.Info("chain is at the proper state", zap.Uint32("height", height))
   709  			return nil
   710  		}
   711  		if bc.config.Ledger.KeepOnlyLatestState {
   712  			return fmt.Errorf("KeepOnlyLatestState is enabled, state for height %d is outdated and removed from the storage", height)
   713  		}
   714  		if bc.config.Ledger.RemoveUntraceableBlocks && currHeight >= bc.config.MaxTraceableBlocks {
   715  			return fmt.Errorf("RemoveUntraceableBlocks is enabled, a necessary batch of traceable blocks has already been removed")
   716  		}
   717  	}
   718  
   719  	// Retrieve necessary state before the DB modification.
   720  	b, err := bc.GetBlock(bc.GetHeaderHash(height))
   721  	if err != nil {
   722  		return fmt.Errorf("failed to retrieve block %d: %w", height, err)
   723  	}
   724  	sr, err := bc.stateRoot.GetStateRoot(height)
   725  	if err != nil {
   726  		return fmt.Errorf("failed to retrieve stateroot for height %d: %w", height, err)
   727  	}
   728  	v := bc.dao.Version
   729  	// dao is MemCachedStore over DB, we use dao directly to persist cached changes
   730  	// right to the underlying DB.
   731  	cache := bc.dao
   732  	// upperCache is a private MemCachedStore over cache. During each of the state
   733  	// sync stages we put the data inside the upperCache; in the end of each stage
   734  	// we persist changes from upperCache to cache. Changes from cache are persisted
   735  	// directly to the underlying persistent storage (boltDB, levelDB, etc.).
   736  	// upperCache/cache segregation is needed to keep the DB state clean and to
   737  	// persist data from different stages separately.
   738  	upperCache := cache.GetPrivate()
   739  
   740  	bc.log.Info("initializing state reset", zap.Uint32("target height", height))
   741  	start := time.Now()
   742  	p := start
   743  
   744  	// Start batch persisting routine, it will be used for blocks/txs/AERs/storage items batches persist.
   745  	type postPersist func(persistedKeys int, err error) error
   746  	var (
   747  		persistCh       = make(chan postPersist)
   748  		persistToExitCh = make(chan struct{})
   749  	)
   750  	go func() {
   751  		for {
   752  			f, ok := <-persistCh
   753  			if !ok {
   754  				break
   755  			}
   756  			persistErr := f(cache.Persist())
   757  			if persistErr != nil {
   758  				bc.log.Fatal("persist failed", zap.Error(persistErr))
   759  				panic(persistErr)
   760  			}
   761  		}
   762  		close(persistToExitCh)
   763  	}()
   764  	defer func() {
   765  		close(persistCh)
   766  		<-persistToExitCh
   767  		bc.log.Info("reset finished successfully", zap.Duration("took", time.Since(start)))
   768  	}()
   769  
   770  	resetStageKey := []byte{byte(storage.SYSStateChangeStage)}
   771  	switch stage {
   772  	case none:
   773  		upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(stateJumpStarted)})
   774  		// Technically, there's no difference between Persist() and PersistSync() for the private
   775  		// MemCached storage, but we'd better use the sync version in case of some further code changes.
   776  		_, uerr := upperCache.PersistSync()
   777  		if uerr != nil {
   778  			panic(uerr)
   779  		}
   780  		upperCache = cache.GetPrivate()
   781  		persistCh <- func(persistedKeys int, err error) error {
   782  			if err != nil {
   783  				return fmt.Errorf("failed to persist state reset start marker to the DB: %w", err)
   784  			}
   785  			return nil
   786  		}
   787  		fallthrough
   788  	case stateJumpStarted:
   789  		bc.log.Debug("trying to reset blocks, transactions and AERs")
   790  		// Remove blocks/transactions/aers from currHeight down to height (not including height itself).
   791  		// Keep headers for now, they'll be removed later. It's hard to handle the whole set of changes in
   792  		// one stage, so persist periodically.
   793  		const persistBatchSize = 100 * headerBatchCount // count blocks only, should be enough to avoid OOM killer even for large blocks
   794  		var (
   795  			pBlocksStart        = p
   796  			blocksCnt, batchCnt int
   797  			keysCnt             = new(int)
   798  		)
   799  		for i := height + 1; i <= currHeight; i++ {
   800  			err := upperCache.DeleteBlock(bc.GetHeaderHash(i))
   801  			if err != nil {
   802  				return fmt.Errorf("error while removing block %d: %w", i, err)
   803  			}
   804  			blocksCnt++
   805  			if blocksCnt == persistBatchSize {
   806  				blocksCnt = 0
   807  				batchCnt++
   808  				bc.log.Info("intermediate batch of removed blocks, transactions and AERs is collected",
   809  					zap.Int("batch", batchCnt),
   810  					zap.Duration("took", time.Since(p)))
   811  
   812  				persistStart := time.Now()
   813  				persistBatch := batchCnt
   814  				_, uerr := upperCache.PersistSync()
   815  				if uerr != nil {
   816  					panic(uerr)
   817  				}
   818  				upperCache = cache.GetPrivate()
   819  				persistCh <- func(persistedKeys int, err error) error {
   820  					if err != nil {
   821  						return fmt.Errorf("failed to persist intermediate batch of removed blocks, transactions and AERs: %w", err)
   822  					}
   823  					*keysCnt += persistedKeys
   824  					bc.log.Debug("intermediate batch of removed blocks, transactions and AERs is persisted",
   825  						zap.Int("batch", persistBatch),
   826  						zap.Duration("took", time.Since(persistStart)),
   827  						zap.Int("keys", persistedKeys))
   828  					return nil
   829  				}
   830  				p = time.Now()
   831  			}
   832  		}
   833  		upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(staleBlocksRemoved)})
   834  		batchCnt++
   835  		bc.log.Info("last batch of removed blocks, transactions and AERs is collected",
   836  			zap.Int("batch", batchCnt),
   837  			zap.Duration("took", time.Since(p)))
   838  		bc.log.Info("blocks, transactions ans AERs are reset", zap.Duration("took", time.Since(pBlocksStart)))
   839  
   840  		persistStart := time.Now()
   841  		persistBatch := batchCnt
   842  		_, uerr := upperCache.PersistSync()
   843  		if uerr != nil {
   844  			panic(uerr)
   845  		}
   846  		upperCache = cache.GetPrivate()
   847  		persistCh <- func(persistedKeys int, err error) error {
   848  			if err != nil {
   849  				return fmt.Errorf("failed to persist last batch of removed blocks, transactions ans AERs: %w", err)
   850  			}
   851  			*keysCnt += persistedKeys
   852  			bc.log.Debug("last batch of removed blocks, transactions and AERs is persisted",
   853  				zap.Int("batch", persistBatch),
   854  				zap.Duration("took", time.Since(persistStart)),
   855  				zap.Int("keys", persistedKeys))
   856  			return nil
   857  		}
   858  		p = time.Now()
   859  		fallthrough
   860  	case staleBlocksRemoved:
   861  		// Completely remove contract IDs to update them later.
   862  		bc.log.Debug("trying to reset contract storage items")
   863  		pStorageStart := p
   864  
   865  		p = time.Now()
   866  		var mode = mpt.ModeAll
   867  		if bc.config.Ledger.RemoveUntraceableBlocks {
   868  			mode |= mpt.ModeGCFlag
   869  		}
   870  		trieStore := mpt.NewTrieStore(sr.Root, mode, upperCache.Store)
   871  		oldStoragePrefix := v.StoragePrefix
   872  		newStoragePrefix := statesync.TemporaryPrefix(oldStoragePrefix)
   873  
   874  		const persistBatchSize = 200000
   875  		var cnt, storageItmsCnt, batchCnt int
   876  		trieStore.Seek(storage.SeekRange{Prefix: []byte{byte(oldStoragePrefix)}}, func(k, v []byte) bool {
   877  			if cnt >= persistBatchSize {
   878  				cnt = 0
   879  				batchCnt++
   880  				bc.log.Info("intermediate batch of contract storage items and IDs is collected",
   881  					zap.Int("batch", batchCnt),
   882  					zap.Duration("took", time.Since(p)))
   883  
   884  				persistStart := time.Now()
   885  				persistBatch := batchCnt
   886  				_, uerr := upperCache.PersistSync()
   887  				if uerr != nil {
   888  					panic(uerr)
   889  				}
   890  				upperCache = cache.GetPrivate()
   891  				persistCh <- func(persistedKeys int, err error) error {
   892  					if err != nil {
   893  						return fmt.Errorf("failed to persist intermediate batch of contract storage items: %w", err)
   894  					}
   895  					bc.log.Debug("intermediate batch of contract storage items is persisted",
   896  						zap.Int("batch", persistBatch),
   897  						zap.Duration("took", time.Since(persistStart)),
   898  						zap.Int("keys", persistedKeys))
   899  					return nil
   900  				}
   901  				p = time.Now()
   902  			}
   903  			// May safely omit KV copying.
   904  			k[0] = byte(newStoragePrefix)
   905  			upperCache.Store.Put(k, v)
   906  			cnt++
   907  			storageItmsCnt++
   908  
   909  			return true
   910  		})
   911  		trieStore.Close()
   912  
   913  		upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(newStorageItemsAdded)})
   914  		batchCnt++
   915  		persistBatch := batchCnt
   916  		bc.log.Info("last batch of contract storage items is collected", zap.Int("batch", batchCnt), zap.Duration("took", time.Since(p)))
   917  		bc.log.Info("contract storage items are reset", zap.Duration("took", time.Since(pStorageStart)),
   918  			zap.Int("keys", storageItmsCnt))
   919  
   920  		lastStart := time.Now()
   921  		_, uerr := upperCache.PersistSync()
   922  		if uerr != nil {
   923  			panic(uerr)
   924  		}
   925  		upperCache = cache.GetPrivate()
   926  		persistCh <- func(persistedKeys int, err error) error {
   927  			if err != nil {
   928  				return fmt.Errorf("failed to persist contract storage items and IDs changes to the DB: %w", err)
   929  			}
   930  			bc.log.Debug("last batch of contract storage items and IDs is persisted", zap.Int("batch", persistBatch), zap.Duration("took", time.Since(lastStart)), zap.Int("keys", persistedKeys))
   931  			return nil
   932  		}
   933  		p = time.Now()
   934  		fallthrough
   935  	case newStorageItemsAdded:
   936  		// Reset SYS-prefixed and IX-prefixed information.
   937  		bc.log.Debug("trying to reset headers information")
   938  		for i := height + 1; i <= hHeight; i++ {
   939  			upperCache.PurgeHeader(bc.GetHeaderHash(i))
   940  		}
   941  		upperCache.DeleteHeaderHashes(height+1, headerBatchCount)
   942  		upperCache.StoreAsCurrentBlock(b)
   943  		upperCache.PutCurrentHeader(b.Hash(), height)
   944  		v.StoragePrefix = statesync.TemporaryPrefix(v.StoragePrefix)
   945  		upperCache.PutVersion(v)
   946  		// It's important to manually change the cache's Version at this stage, so that native cache
   947  		// can be properly initialized (with the correct contract storage data prefix) at the final
   948  		// stage of the state reset. At the same time, DB's SYSVersion-prefixed data will be persisted
   949  		// from upperCache to cache in a standard way (several lines below).
   950  		cache.Version = v
   951  		bc.persistent.Version = v
   952  
   953  		upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(headersReset)})
   954  		bc.log.Info("headers information is reset", zap.Duration("took", time.Since(p)))
   955  
   956  		persistStart := time.Now()
   957  		_, uerr := upperCache.PersistSync()
   958  		if uerr != nil {
   959  			panic(uerr)
   960  		}
   961  		upperCache = cache.GetPrivate()
   962  		persistCh <- func(persistedKeys int, err error) error {
   963  			if err != nil {
   964  				return fmt.Errorf("failed to persist headers changes to the DB: %w", err)
   965  			}
   966  			bc.log.Debug("headers information is persisted", zap.Duration("took", time.Since(persistStart)), zap.Int("keys", persistedKeys))
   967  			return nil
   968  		}
   969  		p = time.Now()
   970  		fallthrough
   971  	case headersReset:
   972  		// Reset MPT.
   973  		bc.log.Debug("trying to reset state root information and NEP transfers")
   974  		err = bc.stateRoot.ResetState(height, upperCache.Store)
   975  		if err != nil {
   976  			return fmt.Errorf("failed to rollback MPT state: %w", err)
   977  		}
   978  
   979  		// Reset transfers.
   980  		err = bc.resetTransfers(upperCache, height)
   981  		if err != nil {
   982  			return fmt.Errorf("failed to strip transfer log / transfer info: %w", err)
   983  		}
   984  
   985  		upperCache.Store.Put(resetStageKey, []byte{stateResetBit | byte(transfersReset)})
   986  		bc.log.Info("state root information and NEP transfers are reset", zap.Duration("took", time.Since(p)))
   987  
   988  		persistStart := time.Now()
   989  		_, uerr := upperCache.PersistSync()
   990  		if uerr != nil {
   991  			panic(uerr)
   992  		}
   993  		upperCache = cache.GetPrivate()
   994  		persistCh <- func(persistedKeys int, err error) error {
   995  			if err != nil {
   996  				return fmt.Errorf("failed to persist contract storage items changes to the DB: %w", err)
   997  			}
   998  
   999  			bc.log.Debug("state root information and NEP transfers are persisted", zap.Duration("took", time.Since(persistStart)), zap.Int("keys", persistedKeys))
  1000  			return nil
  1001  		}
  1002  		p = time.Now()
  1003  		fallthrough
  1004  	case transfersReset:
  1005  		// there's nothing to do after that, so just continue with common operations
  1006  		// and remove state reset stage in the end.
  1007  	default:
  1008  		return fmt.Errorf("unknown state reset stage: %d", stage)
  1009  	}
  1010  
  1011  	// Direct (cache-less) DB operation:  remove stale storage items.
  1012  	bc.log.Debug("trying to remove stale storage items")
  1013  	keys := 0
  1014  	err = bc.store.SeekGC(storage.SeekRange{
  1015  		Prefix: []byte{byte(statesync.TemporaryPrefix(v.StoragePrefix))},
  1016  	}, func(_, _ []byte) bool {
  1017  		keys++
  1018  		return false
  1019  	})
  1020  	if err != nil {
  1021  		return fmt.Errorf("faield to remove stale storage items from DB: %w", err)
  1022  	}
  1023  	bc.log.Info("stale storage items are reset", zap.Duration("took", time.Since(p)), zap.Int("keys", keys))
  1024  	p = time.Now()
  1025  
  1026  	bc.log.Debug("trying to remove state reset point")
  1027  	upperCache.Store.Delete(resetStageKey)
  1028  	// Unlike the state jump, state sync point must be removed as we have complete state for this height.
  1029  	upperCache.Store.Delete([]byte{byte(storage.SYSStateSyncPoint)})
  1030  	bc.log.Info("state reset point is removed", zap.Duration("took", time.Since(p)))
  1031  
  1032  	persistStart := time.Now()
  1033  	_, uerr := upperCache.PersistSync()
  1034  	if uerr != nil {
  1035  		panic(uerr)
  1036  	}
  1037  	persistCh <- func(persistedKeys int, err error) error {
  1038  		if err != nil {
  1039  			return fmt.Errorf("failed to persist state reset stage to DAO: %w", err)
  1040  		}
  1041  		bc.log.Info("state reset point information is persisted", zap.Duration("took", time.Since(persistStart)), zap.Int("keys", persistedKeys))
  1042  		return nil
  1043  	}
  1044  	p = time.Now()
  1045  
  1046  	err = bc.resetRAMState(height, true)
  1047  	if err != nil {
  1048  		return fmt.Errorf("failed to update in-memory blockchain data: %w", err)
  1049  	}
  1050  	return nil
  1051  }
  1052  
  1053  func (bc *Blockchain) initializeNativeCache(blockHeight uint32, d *dao.Simple) error {
  1054  	for _, c := range bc.contracts.Contracts {
  1055  		// Check that contract was deployed.
  1056  		if !bc.isHardforkEnabled(c.ActiveIn(), blockHeight) {
  1057  			continue
  1058  		}
  1059  		err := c.InitializeCache(blockHeight, d)
  1060  		if err != nil {
  1061  			return fmt.Errorf("failed to initialize cache for %s: %w", c.Metadata().Name, err)
  1062  		}
  1063  	}
  1064  	return nil
  1065  }
  1066  
  1067  // isHardforkEnabled returns true if the specified hardfork is enabled at the
  1068  // given height. nil hardfork is treated as always enabled.
  1069  func (bc *Blockchain) isHardforkEnabled(hf *config.Hardfork, blockHeight uint32) bool {
  1070  	hfs := bc.config.Hardforks
  1071  	if hf != nil {
  1072  		start, ok := hfs[hf.String()]
  1073  		if !ok || start < blockHeight {
  1074  			return false
  1075  		}
  1076  	}
  1077  	return true
  1078  }
  1079  
  1080  // Run runs chain loop, it needs to be run as goroutine and executing it is
  1081  // critical for correct Blockchain operation.
  1082  func (bc *Blockchain) Run() {
  1083  	bc.isRunning.Store(true)
  1084  	persistTimer := time.NewTimer(persistInterval)
  1085  	defer func() {
  1086  		persistTimer.Stop()
  1087  		if _, err := bc.persist(true); err != nil {
  1088  			bc.log.Warn("failed to persist", zap.Error(err))
  1089  		}
  1090  		if err := bc.dao.Store.Close(); err != nil {
  1091  			bc.log.Warn("failed to close db", zap.Error(err))
  1092  		}
  1093  		bc.isRunning.Store(false)
  1094  		close(bc.runToExitCh)
  1095  	}()
  1096  	go bc.notificationDispatcher()
  1097  	var nextSync bool
  1098  	for {
  1099  		select {
  1100  		case <-bc.stopCh:
  1101  			return
  1102  		case <-persistTimer.C:
  1103  			var oldPersisted uint32
  1104  			var gcDur time.Duration
  1105  
  1106  			if bc.config.Ledger.RemoveUntraceableBlocks {
  1107  				oldPersisted = atomic.LoadUint32(&bc.persistedHeight)
  1108  			}
  1109  			dur, err := bc.persist(nextSync)
  1110  			if err != nil {
  1111  				bc.log.Warn("failed to persist blockchain", zap.Error(err))
  1112  			}
  1113  			if bc.config.Ledger.RemoveUntraceableBlocks {
  1114  				gcDur = bc.tryRunGC(oldPersisted)
  1115  			}
  1116  			nextSync = dur > persistInterval*2
  1117  			interval := persistInterval - dur - gcDur
  1118  			if interval <= 0 {
  1119  				interval = time.Microsecond // Reset doesn't work with zero value
  1120  			}
  1121  			persistTimer.Reset(interval)
  1122  		}
  1123  	}
  1124  }
  1125  
  1126  func (bc *Blockchain) tryRunGC(oldHeight uint32) time.Duration {
  1127  	var dur time.Duration
  1128  
  1129  	newHeight := atomic.LoadUint32(&bc.persistedHeight)
  1130  	var tgtBlock = int64(newHeight)
  1131  
  1132  	tgtBlock -= int64(bc.config.MaxTraceableBlocks)
  1133  	if bc.config.P2PStateExchangeExtensions {
  1134  		syncP := newHeight / uint32(bc.config.StateSyncInterval)
  1135  		syncP--
  1136  		syncP *= uint32(bc.config.StateSyncInterval)
  1137  		if tgtBlock > int64(syncP) {
  1138  			tgtBlock = int64(syncP)
  1139  		}
  1140  	}
  1141  	// Always round to the GCP.
  1142  	tgtBlock /= int64(bc.config.Ledger.GarbageCollectionPeriod)
  1143  	tgtBlock *= int64(bc.config.Ledger.GarbageCollectionPeriod)
  1144  	// Count periods.
  1145  	oldHeight /= bc.config.Ledger.GarbageCollectionPeriod
  1146  	newHeight /= bc.config.Ledger.GarbageCollectionPeriod
  1147  	if tgtBlock > int64(bc.config.Ledger.GarbageCollectionPeriod) && newHeight != oldHeight {
  1148  		tgtBlock /= int64(bc.config.Ledger.GarbageCollectionPeriod)
  1149  		tgtBlock *= int64(bc.config.Ledger.GarbageCollectionPeriod)
  1150  		dur = bc.stateRoot.GC(uint32(tgtBlock), bc.store)
  1151  		dur += bc.removeOldTransfers(uint32(tgtBlock))
  1152  	}
  1153  	return dur
  1154  }
  1155  
  1156  // resetTransfers is a helper function that strips the top newest NEP17 and NEP11 transfer logs
  1157  // down to the given height (not including the height itself) and updates corresponding token
  1158  // transfer info.
  1159  func (bc *Blockchain) resetTransfers(cache *dao.Simple, height uint32) error {
  1160  	// Completely remove transfer info, updating it takes too much effort. We'll gather new
  1161  	// transfer info on-the-fly later.
  1162  	cache.Store.Seek(storage.SeekRange{
  1163  		Prefix: []byte{byte(storage.STTokenTransferInfo)},
  1164  	}, func(k, v []byte) bool {
  1165  		cache.Store.Delete(k)
  1166  		return true
  1167  	})
  1168  
  1169  	// Look inside each transfer batch and iterate over the batch transfers, picking those that
  1170  	// not newer than the given height. Also, for each suitable transfer update transfer info
  1171  	// flushing changes after complete account's transfers processing.
  1172  	prefixes := []byte{byte(storage.STNEP11Transfers), byte(storage.STNEP17Transfers)}
  1173  	for i := range prefixes {
  1174  		var (
  1175  			acc             util.Uint160
  1176  			trInfo          *state.TokenTransferInfo
  1177  			removeFollowing bool
  1178  			seekErr         error
  1179  		)
  1180  
  1181  		cache.Store.Seek(storage.SeekRange{
  1182  			Prefix:    prefixes[i : i+1],
  1183  			Backwards: false, // From oldest to newest batch.
  1184  		}, func(k, v []byte) bool {
  1185  			var batchAcc util.Uint160
  1186  			copy(batchAcc[:], k[1:])
  1187  
  1188  			if batchAcc != acc { // Some new account we're iterating over.
  1189  				if trInfo != nil {
  1190  					seekErr = cache.PutTokenTransferInfo(acc, trInfo)
  1191  					if seekErr != nil {
  1192  						return false
  1193  					}
  1194  				}
  1195  				acc = batchAcc
  1196  				trInfo = nil
  1197  				removeFollowing = false
  1198  			} else if removeFollowing {
  1199  				cache.Store.Delete(bytes.Clone(k))
  1200  				return seekErr == nil
  1201  			}
  1202  
  1203  			r := io.NewBinReaderFromBuf(v[1:])
  1204  			l := len(v)
  1205  			bytesRead := 1 // 1 is for batch size byte which is read by default.
  1206  			var (
  1207  				oldBatchSize = v[0]
  1208  				newBatchSize byte
  1209  			)
  1210  			for i := byte(0); i < v[0]; i++ { // From oldest to newest transfer of the batch.
  1211  				var t *state.NEP17Transfer
  1212  				if k[0] == byte(storage.STNEP11Transfers) {
  1213  					tr := new(state.NEP11Transfer)
  1214  					tr.DecodeBinary(r)
  1215  					t = &tr.NEP17Transfer
  1216  				} else {
  1217  					t = new(state.NEP17Transfer)
  1218  					t.DecodeBinary(r)
  1219  				}
  1220  				if r.Err != nil {
  1221  					seekErr = fmt.Errorf("failed to decode subsequent transfer: %w", r.Err)
  1222  					break
  1223  				}
  1224  
  1225  				if t.Block > height {
  1226  					break
  1227  				}
  1228  				bytesRead = l - r.Len() // Including batch size byte.
  1229  				newBatchSize++
  1230  				if trInfo == nil {
  1231  					var err error
  1232  					trInfo, err = cache.GetTokenTransferInfo(batchAcc)
  1233  					if err != nil {
  1234  						seekErr = fmt.Errorf("failed to retrieve token transfer info for %s: %w", batchAcc.StringLE(), r.Err)
  1235  						return false
  1236  					}
  1237  				}
  1238  				appendTokenTransferInfo(trInfo, t.Asset, t.Block, t.Timestamp, k[0] == byte(storage.STNEP11Transfers), newBatchSize >= state.TokenTransferBatchSize)
  1239  			}
  1240  			if newBatchSize == oldBatchSize {
  1241  				// The batch is already in storage and doesn't need to be changed.
  1242  				return seekErr == nil
  1243  			}
  1244  			if newBatchSize > 0 {
  1245  				v[0] = newBatchSize
  1246  				cache.Store.Put(k, v[:bytesRead])
  1247  			} else {
  1248  				cache.Store.Delete(k)
  1249  				removeFollowing = true
  1250  			}
  1251  			return seekErr == nil
  1252  		})
  1253  		if seekErr != nil {
  1254  			return seekErr
  1255  		}
  1256  		if trInfo != nil {
  1257  			// Flush the last batch of transfer info changes.
  1258  			err := cache.PutTokenTransferInfo(acc, trInfo)
  1259  			if err != nil {
  1260  				return err
  1261  			}
  1262  		}
  1263  	}
  1264  	return nil
  1265  }
  1266  
  1267  // appendTokenTransferInfo is a helper for resetTransfers that updates token transfer info
  1268  // wrt the given transfer that was added to the subsequent transfer batch.
  1269  func appendTokenTransferInfo(transferData *state.TokenTransferInfo,
  1270  	token int32, bIndex uint32, bTimestamp uint64, isNEP11 bool, lastTransferInBatch bool) {
  1271  	var (
  1272  		newBatch      *bool
  1273  		nextBatch     *uint32
  1274  		currTimestamp *uint64
  1275  	)
  1276  	if !isNEP11 {
  1277  		newBatch = &transferData.NewNEP17Batch
  1278  		nextBatch = &transferData.NextNEP17Batch
  1279  		currTimestamp = &transferData.NextNEP17NewestTimestamp
  1280  	} else {
  1281  		newBatch = &transferData.NewNEP11Batch
  1282  		nextBatch = &transferData.NextNEP11Batch
  1283  		currTimestamp = &transferData.NextNEP11NewestTimestamp
  1284  	}
  1285  	transferData.LastUpdated[token] = bIndex
  1286  	*newBatch = lastTransferInBatch
  1287  	if *newBatch {
  1288  		*nextBatch++
  1289  		*currTimestamp = bTimestamp
  1290  	}
  1291  }
  1292  
  1293  func (bc *Blockchain) removeOldTransfers(index uint32) time.Duration {
  1294  	bc.log.Info("starting transfer data garbage collection", zap.Uint32("index", index))
  1295  	start := time.Now()
  1296  	h, err := bc.GetHeader(bc.GetHeaderHash(index))
  1297  	if err != nil {
  1298  		dur := time.Since(start)
  1299  		bc.log.Error("failed to find block header for transfer GC", zap.Duration("time", dur), zap.Error(err))
  1300  		return dur
  1301  	}
  1302  	var removed, kept int64
  1303  	var ts = h.Timestamp
  1304  	prefixes := []byte{byte(storage.STNEP11Transfers), byte(storage.STNEP17Transfers)}
  1305  
  1306  	for i := range prefixes {
  1307  		var acc util.Uint160
  1308  		var canDrop bool
  1309  
  1310  		err = bc.store.SeekGC(storage.SeekRange{
  1311  			Prefix:    prefixes[i : i+1],
  1312  			Backwards: true, // From new to old.
  1313  		}, func(k, v []byte) bool {
  1314  			// We don't look inside of the batches, it requires too much effort, instead
  1315  			// we drop batches that are confirmed to contain outdated entries.
  1316  			var batchAcc util.Uint160
  1317  			var batchTs = binary.BigEndian.Uint64(k[1+util.Uint160Size:])
  1318  			copy(batchAcc[:], k[1:])
  1319  
  1320  			if batchAcc != acc { // Some new account we're iterating over.
  1321  				acc = batchAcc
  1322  			} else if canDrop { // We've seen this account and all entries in this batch are guaranteed to be outdated.
  1323  				removed++
  1324  				return false
  1325  			}
  1326  			// We don't know what's inside, so keep the current
  1327  			// batch anyway, but allow to drop older ones.
  1328  			canDrop = batchTs <= ts
  1329  			kept++
  1330  			return true
  1331  		})
  1332  		if err != nil {
  1333  			break
  1334  		}
  1335  	}
  1336  	dur := time.Since(start)
  1337  	if err != nil {
  1338  		bc.log.Error("failed to flush transfer data GC changeset", zap.Duration("time", dur), zap.Error(err))
  1339  	} else {
  1340  		bc.log.Info("finished transfer data garbage collection",
  1341  			zap.Int64("removed", removed),
  1342  			zap.Int64("kept", kept),
  1343  			zap.Duration("time", dur))
  1344  	}
  1345  	return dur
  1346  }
  1347  
  1348  // notificationDispatcher manages subscription to events and broadcasts new events.
  1349  func (bc *Blockchain) notificationDispatcher() {
  1350  	var (
  1351  		// These are just sets of subscribers, though modelled as maps
  1352  		// for ease of management (not a lot of subscriptions is really
  1353  		// expected, but maps are convenient for adding/deleting elements).
  1354  		blockFeed        = make(map[chan *block.Block]bool)
  1355  		headerFeed       = make(map[chan *block.Header]bool)
  1356  		txFeed           = make(map[chan *transaction.Transaction]bool)
  1357  		notificationFeed = make(map[chan *state.ContainedNotificationEvent]bool)
  1358  		executionFeed    = make(map[chan *state.AppExecResult]bool)
  1359  	)
  1360  	for {
  1361  		select {
  1362  		case <-bc.stopCh:
  1363  			return
  1364  		case sub := <-bc.subCh:
  1365  			switch ch := sub.(type) {
  1366  			case chan *block.Header:
  1367  				headerFeed[ch] = true
  1368  			case chan *block.Block:
  1369  				blockFeed[ch] = true
  1370  			case chan *transaction.Transaction:
  1371  				txFeed[ch] = true
  1372  			case chan *state.ContainedNotificationEvent:
  1373  				notificationFeed[ch] = true
  1374  			case chan *state.AppExecResult:
  1375  				executionFeed[ch] = true
  1376  			default:
  1377  				panic(fmt.Sprintf("bad subscription: %T", sub))
  1378  			}
  1379  		case unsub := <-bc.unsubCh:
  1380  			switch ch := unsub.(type) {
  1381  			case chan *block.Header:
  1382  				delete(headerFeed, ch)
  1383  			case chan *block.Block:
  1384  				delete(blockFeed, ch)
  1385  			case chan *transaction.Transaction:
  1386  				delete(txFeed, ch)
  1387  			case chan *state.ContainedNotificationEvent:
  1388  				delete(notificationFeed, ch)
  1389  			case chan *state.AppExecResult:
  1390  				delete(executionFeed, ch)
  1391  			default:
  1392  				panic(fmt.Sprintf("bad unsubscription: %T", unsub))
  1393  			}
  1394  		case event := <-bc.events:
  1395  			// We don't want to waste time looping through transactions when there are no
  1396  			// subscribers.
  1397  			if len(txFeed) != 0 || len(notificationFeed) != 0 || len(executionFeed) != 0 {
  1398  				aer := event.appExecResults[0]
  1399  				if !aer.Container.Equals(event.block.Hash()) {
  1400  					panic("inconsistent application execution results")
  1401  				}
  1402  				for ch := range executionFeed {
  1403  					ch <- aer
  1404  				}
  1405  				for i := range aer.Events {
  1406  					for ch := range notificationFeed {
  1407  						ch <- &state.ContainedNotificationEvent{
  1408  							Container:         aer.Container,
  1409  							NotificationEvent: aer.Events[i],
  1410  						}
  1411  					}
  1412  				}
  1413  
  1414  				aerIdx := 1
  1415  				for _, tx := range event.block.Transactions {
  1416  					aer := event.appExecResults[aerIdx]
  1417  					if !aer.Container.Equals(tx.Hash()) {
  1418  						panic("inconsistent application execution results")
  1419  					}
  1420  					aerIdx++
  1421  					for ch := range executionFeed {
  1422  						ch <- aer
  1423  					}
  1424  					if aer.VMState == vmstate.Halt {
  1425  						for i := range aer.Events {
  1426  							for ch := range notificationFeed {
  1427  								ch <- &state.ContainedNotificationEvent{
  1428  									Container:         aer.Container,
  1429  									NotificationEvent: aer.Events[i],
  1430  								}
  1431  							}
  1432  						}
  1433  					}
  1434  					for ch := range txFeed {
  1435  						ch <- tx
  1436  					}
  1437  				}
  1438  
  1439  				aer = event.appExecResults[aerIdx]
  1440  				if !aer.Container.Equals(event.block.Hash()) {
  1441  					panic("inconsistent application execution results")
  1442  				}
  1443  				for ch := range executionFeed {
  1444  					ch <- aer
  1445  				}
  1446  				for i := range aer.Events {
  1447  					for ch := range notificationFeed {
  1448  						ch <- &state.ContainedNotificationEvent{
  1449  							Container:         aer.Container,
  1450  							NotificationEvent: aer.Events[i],
  1451  						}
  1452  					}
  1453  				}
  1454  			}
  1455  			for ch := range headerFeed {
  1456  				ch <- &event.block.Header
  1457  			}
  1458  			for ch := range blockFeed {
  1459  				ch <- event.block
  1460  			}
  1461  		}
  1462  	}
  1463  }
  1464  
  1465  // Close stops Blockchain's internal loop, syncs changes to persistent storage
  1466  // and closes it. The Blockchain is no longer functional after the call to Close.
  1467  func (bc *Blockchain) Close() {
  1468  	// If there is a block addition in progress, wait for it to finish and
  1469  	// don't allow new ones.
  1470  	bc.addLock.Lock()
  1471  	close(bc.stopCh)
  1472  	<-bc.runToExitCh
  1473  	bc.addLock.Unlock()
  1474  	_ = bc.log.Sync()
  1475  }
  1476  
  1477  // AddBlock accepts successive block for the Blockchain, verifies it and
  1478  // stores internally. Eventually it will be persisted to the backing storage.
  1479  func (bc *Blockchain) AddBlock(block *block.Block) error {
  1480  	bc.addLock.Lock()
  1481  	defer bc.addLock.Unlock()
  1482  
  1483  	var mp *mempool.Pool
  1484  	expectedHeight := bc.BlockHeight() + 1
  1485  	if expectedHeight != block.Index {
  1486  		return fmt.Errorf("expected %d, got %d: %w", expectedHeight, block.Index, ErrInvalidBlockIndex)
  1487  	}
  1488  	if bc.config.StateRootInHeader != block.StateRootEnabled {
  1489  		return fmt.Errorf("%w: %v != %v",
  1490  			ErrHdrStateRootSetting, bc.config.StateRootInHeader, block.StateRootEnabled)
  1491  	}
  1492  
  1493  	if block.Index == bc.HeaderHeight()+1 {
  1494  		err := bc.addHeaders(!bc.config.SkipBlockVerification, &block.Header)
  1495  		if err != nil {
  1496  			return err
  1497  		}
  1498  	}
  1499  	if !bc.config.SkipBlockVerification {
  1500  		merkle := block.ComputeMerkleRoot()
  1501  		if !block.MerkleRoot.Equals(merkle) {
  1502  			return errors.New("invalid block: MerkleRoot mismatch")
  1503  		}
  1504  		mp = mempool.New(len(block.Transactions), 0, false, nil)
  1505  		for _, tx := range block.Transactions {
  1506  			var err error
  1507  			// Transactions are verified before adding them
  1508  			// into the pool, so there is no point in doing
  1509  			// it again even if we're verifying in-block transactions.
  1510  			if bc.memPool.ContainsKey(tx.Hash()) {
  1511  				err = mp.Add(tx, bc)
  1512  				if err == nil {
  1513  					continue
  1514  				}
  1515  			} else {
  1516  				err = bc.verifyAndPoolTx(tx, mp, bc)
  1517  			}
  1518  			if err != nil {
  1519  				if bc.config.VerifyTransactions {
  1520  					return fmt.Errorf("transaction %s failed to verify: %w", tx.Hash().StringLE(), err)
  1521  				}
  1522  				bc.log.Warn(fmt.Sprintf("transaction %s failed to verify: %s", tx.Hash().StringLE(), err))
  1523  			}
  1524  		}
  1525  	}
  1526  	return bc.storeBlock(block, mp)
  1527  }
  1528  
  1529  // AddHeaders processes the given headers and add them to the
  1530  // HeaderHashList. It expects headers to be sorted by index.
  1531  func (bc *Blockchain) AddHeaders(headers ...*block.Header) error {
  1532  	return bc.addHeaders(!bc.config.SkipBlockVerification, headers...)
  1533  }
  1534  
  1535  // addHeaders is an internal implementation of AddHeaders (`verify` parameter
  1536  // tells it to verify or not verify given headers).
  1537  func (bc *Blockchain) addHeaders(verify bool, headers ...*block.Header) error {
  1538  	var (
  1539  		start = time.Now()
  1540  		err   error
  1541  	)
  1542  
  1543  	if len(headers) > 0 {
  1544  		var i int
  1545  		curHeight := bc.HeaderHeight()
  1546  		for i = range headers {
  1547  			if headers[i].Index > curHeight {
  1548  				break
  1549  			}
  1550  		}
  1551  		headers = headers[i:]
  1552  	}
  1553  
  1554  	if len(headers) == 0 {
  1555  		return nil
  1556  	} else if verify {
  1557  		// Verify that the chain of the headers is consistent.
  1558  		var lastHeader *block.Header
  1559  		if lastHeader, err = bc.GetHeader(headers[0].PrevHash); err != nil {
  1560  			return fmt.Errorf("previous header was not found: %w", err)
  1561  		}
  1562  		for _, h := range headers {
  1563  			if err = bc.verifyHeader(h, lastHeader); err != nil {
  1564  				return err
  1565  			}
  1566  			lastHeader = h
  1567  		}
  1568  	}
  1569  	res := bc.HeaderHashes.addHeaders(headers...)
  1570  	if res == nil {
  1571  		bc.log.Debug("done processing headers",
  1572  			zap.Uint32("headerIndex", bc.HeaderHeight()),
  1573  			zap.Uint32("blockHeight", bc.BlockHeight()),
  1574  			zap.Duration("took", time.Since(start)))
  1575  	}
  1576  	return res
  1577  }
  1578  
  1579  // GetStateRoot returns state root for the given height.
  1580  func (bc *Blockchain) GetStateRoot(height uint32) (*state.MPTRoot, error) {
  1581  	return bc.stateRoot.GetStateRoot(height)
  1582  }
  1583  
  1584  // GetStateModule returns state root service instance.
  1585  func (bc *Blockchain) GetStateModule() StateRoot {
  1586  	return bc.stateRoot
  1587  }
  1588  
  1589  // GetStateSyncModule returns new state sync service instance.
  1590  func (bc *Blockchain) GetStateSyncModule() *statesync.Module {
  1591  	return statesync.NewModule(bc, bc.stateRoot, bc.log, bc.dao, bc.jumpToState)
  1592  }
  1593  
  1594  // storeBlock performs chain update using the block given, it executes all
  1595  // transactions with all appropriate side-effects and updates Blockchain state.
  1596  // This is the only way to change Blockchain state.
  1597  func (bc *Blockchain) storeBlock(block *block.Block, txpool *mempool.Pool) error {
  1598  	var (
  1599  		cache          = bc.dao.GetPrivate()
  1600  		aerCache       = bc.dao.GetPrivate()
  1601  		appExecResults = make([]*state.AppExecResult, 0, 2+len(block.Transactions))
  1602  		aerchan        = make(chan *state.AppExecResult, len(block.Transactions)/8) // Tested 8 and 4 with no practical difference, but feel free to test more and tune.
  1603  		aerdone        = make(chan error)
  1604  	)
  1605  	go func() {
  1606  		var (
  1607  			kvcache      = aerCache
  1608  			err          error
  1609  			txCnt        int
  1610  			baer1, baer2 *state.AppExecResult
  1611  			transCache   = make(map[util.Uint160]transferData)
  1612  		)
  1613  		kvcache.StoreAsCurrentBlock(block)
  1614  		if bc.config.Ledger.RemoveUntraceableBlocks {
  1615  			var start, stop uint32
  1616  			if bc.config.P2PStateExchangeExtensions {
  1617  				// remove batch of old blocks starting from P2-MaxTraceableBlocks-StateSyncInterval up to P2-MaxTraceableBlocks
  1618  				if block.Index >= 2*uint32(bc.config.StateSyncInterval) &&
  1619  					block.Index >= uint32(bc.config.StateSyncInterval)+bc.config.MaxTraceableBlocks && // check this in case if MaxTraceableBlocks>StateSyncInterval
  1620  					int(block.Index)%bc.config.StateSyncInterval == 0 {
  1621  					stop = block.Index - uint32(bc.config.StateSyncInterval) - bc.config.MaxTraceableBlocks
  1622  					if stop > uint32(bc.config.StateSyncInterval) {
  1623  						start = stop - uint32(bc.config.StateSyncInterval)
  1624  					}
  1625  				}
  1626  			} else if block.Index > bc.config.MaxTraceableBlocks {
  1627  				start = block.Index - bc.config.MaxTraceableBlocks // is at least 1
  1628  				stop = start + 1
  1629  			}
  1630  			for index := start; index < stop; index++ {
  1631  				err := kvcache.DeleteBlock(bc.GetHeaderHash(index))
  1632  				if err != nil {
  1633  					bc.log.Warn("error while removing old block",
  1634  						zap.Uint32("index", index),
  1635  						zap.Error(err))
  1636  				}
  1637  			}
  1638  		}
  1639  		for aer := range aerchan {
  1640  			if aer.Container == block.Hash() {
  1641  				if baer1 == nil {
  1642  					baer1 = aer
  1643  				} else {
  1644  					baer2 = aer
  1645  				}
  1646  			} else {
  1647  				err = kvcache.StoreAsTransaction(block.Transactions[txCnt], block.Index, aer)
  1648  				txCnt++
  1649  			}
  1650  			if err != nil {
  1651  				err = fmt.Errorf("failed to store exec result: %w", err)
  1652  				break
  1653  			}
  1654  			if aer.Execution.VMState == vmstate.Halt {
  1655  				for j := range aer.Execution.Events {
  1656  					bc.handleNotification(&aer.Execution.Events[j], kvcache, transCache, block, aer.Container)
  1657  				}
  1658  			}
  1659  		}
  1660  		if err != nil {
  1661  			aerdone <- err
  1662  			return
  1663  		}
  1664  		if err := kvcache.StoreAsBlock(block, baer1, baer2); err != nil {
  1665  			aerdone <- err
  1666  			return
  1667  		}
  1668  		for acc, trData := range transCache {
  1669  			err = kvcache.PutTokenTransferInfo(acc, &trData.Info)
  1670  			if err != nil {
  1671  				aerdone <- err
  1672  				return
  1673  			}
  1674  			if !trData.Info.NewNEP11Batch {
  1675  				kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP11NewestTimestamp, trData.Info.NextNEP11Batch, true, &trData.Log11)
  1676  			}
  1677  			if !trData.Info.NewNEP17Batch {
  1678  				kvcache.PutTokenTransferLog(acc, trData.Info.NextNEP17NewestTimestamp, trData.Info.NextNEP17Batch, false, &trData.Log17)
  1679  			}
  1680  		}
  1681  		close(aerdone)
  1682  	}()
  1683  	_ = cache.GetItemCtx() // Prime serialization context cache (it'll be reused by upper layer DAOs).
  1684  	aer, v, err := bc.runPersist(bc.contracts.GetPersistScript(), block, cache, trigger.OnPersist, nil)
  1685  	if err != nil {
  1686  		// Release goroutines, don't care about errors, we already have one.
  1687  		close(aerchan)
  1688  		<-aerdone
  1689  		return fmt.Errorf("onPersist failed: %w", err)
  1690  	}
  1691  	appExecResults = append(appExecResults, aer)
  1692  	aerchan <- aer
  1693  
  1694  	for _, tx := range block.Transactions {
  1695  		systemInterop := bc.newInteropContext(trigger.Application, cache, block, tx)
  1696  		systemInterop.ReuseVM(v)
  1697  		v.LoadScriptWithFlags(tx.Script, callflag.All)
  1698  		v.GasLimit = tx.SystemFee
  1699  
  1700  		err := systemInterop.Exec()
  1701  		var faultException string
  1702  		if !v.HasFailed() {
  1703  			_, err := systemInterop.DAO.Persist()
  1704  			if err != nil {
  1705  				// Release goroutines, don't care about errors, we already have one.
  1706  				close(aerchan)
  1707  				<-aerdone
  1708  				return fmt.Errorf("failed to persist invocation results: %w", err)
  1709  			}
  1710  		} else {
  1711  			bc.log.Warn("contract invocation failed",
  1712  				zap.String("tx", tx.Hash().StringLE()),
  1713  				zap.Uint32("block", block.Index),
  1714  				zap.Error(err))
  1715  			faultException = err.Error()
  1716  		}
  1717  		aer := &state.AppExecResult{
  1718  			Container: tx.Hash(),
  1719  			Execution: state.Execution{
  1720  				Trigger:        trigger.Application,
  1721  				VMState:        v.State(),
  1722  				GasConsumed:    v.GasConsumed(),
  1723  				Stack:          v.Estack().ToArray(),
  1724  				Events:         systemInterop.Notifications,
  1725  				FaultException: faultException,
  1726  			},
  1727  		}
  1728  		appExecResults = append(appExecResults, aer)
  1729  		aerchan <- aer
  1730  	}
  1731  
  1732  	aer, _, err = bc.runPersist(bc.contracts.GetPostPersistScript(), block, cache, trigger.PostPersist, v)
  1733  	if err != nil {
  1734  		// Release goroutines, don't care about errors, we already have one.
  1735  		close(aerchan)
  1736  		<-aerdone
  1737  		return fmt.Errorf("postPersist failed: %w", err)
  1738  	}
  1739  	appExecResults = append(appExecResults, aer)
  1740  	aerchan <- aer
  1741  	close(aerchan)
  1742  	b := mpt.MapToMPTBatch(cache.Store.GetStorageChanges())
  1743  	mpt, sr, err := bc.stateRoot.AddMPTBatch(block.Index, b, cache.Store)
  1744  	if err != nil {
  1745  		// Release goroutines, don't care about errors, we already have one.
  1746  		<-aerdone
  1747  		// Here MPT can be left in a half-applied state.
  1748  		// However if this error occurs, this is a bug somewhere in code
  1749  		// because changes applied are the ones from HALTed transactions.
  1750  		return fmt.Errorf("error while trying to apply MPT changes: %w", err)
  1751  	}
  1752  	if bc.config.StateRootInHeader && bc.HeaderHeight() > sr.Index {
  1753  		h, err := bc.GetHeader(bc.GetHeaderHash(sr.Index + 1))
  1754  		if err != nil {
  1755  			err = fmt.Errorf("failed to get next header: %w", err)
  1756  		} else if h.PrevStateRoot != sr.Root {
  1757  			err = fmt.Errorf("local stateroot and next header's PrevStateRoot mismatch: %s vs %s", sr.Root.StringBE(), h.PrevStateRoot.StringBE())
  1758  		}
  1759  		if err != nil {
  1760  			// Release goroutines, don't care about errors, we already have one.
  1761  			<-aerdone
  1762  			return err
  1763  		}
  1764  	}
  1765  
  1766  	if bc.config.Ledger.SaveStorageBatch {
  1767  		bc.lastBatch = cache.GetBatch()
  1768  	}
  1769  	// Every persist cycle we also compact our in-memory MPT. It's flushed
  1770  	// already in AddMPTBatch, so collapsing it is safe.
  1771  	persistedHeight := atomic.LoadUint32(&bc.persistedHeight)
  1772  	if persistedHeight == block.Index-1 {
  1773  		// 10 is good and roughly estimated to fit remaining trie into 1M of memory.
  1774  		mpt.Collapse(10)
  1775  	}
  1776  
  1777  	aererr := <-aerdone
  1778  	if aererr != nil {
  1779  		return aererr
  1780  	}
  1781  
  1782  	bc.lock.Lock()
  1783  	_, err = aerCache.Persist()
  1784  	if err != nil {
  1785  		bc.lock.Unlock()
  1786  		return err
  1787  	}
  1788  	_, err = cache.Persist()
  1789  	if err != nil {
  1790  		bc.lock.Unlock()
  1791  		return err
  1792  	}
  1793  
  1794  	mpt.Store = bc.dao.Store
  1795  	bc.stateRoot.UpdateCurrentLocal(mpt, sr)
  1796  	bc.topBlock.Store(block)
  1797  	atomic.StoreUint32(&bc.blockHeight, block.Index)
  1798  	bc.memPool.RemoveStale(func(tx *transaction.Transaction) bool { return bc.IsTxStillRelevant(tx, txpool, false) }, bc)
  1799  	for _, f := range bc.postBlock {
  1800  		f(bc.IsTxStillRelevant, txpool, block)
  1801  	}
  1802  	if err := bc.updateExtensibleWhitelist(block.Index); err != nil {
  1803  		bc.lock.Unlock()
  1804  		return err
  1805  	}
  1806  	bc.lock.Unlock()
  1807  
  1808  	updateBlockHeightMetric(block.Index)
  1809  	// Genesis block is stored when Blockchain is not yet running, so there
  1810  	// is no one to read this event. And it doesn't make much sense as event
  1811  	// anyway.
  1812  	if block.Index != 0 {
  1813  		bc.events <- bcEvent{block, appExecResults}
  1814  	}
  1815  	return nil
  1816  }
  1817  
  1818  func (bc *Blockchain) updateExtensibleWhitelist(height uint32) error {
  1819  	updateCommittee := bc.config.ShouldUpdateCommitteeAt(height)
  1820  	stateVals, sh, err := bc.contracts.Designate.GetDesignatedByRole(bc.dao, noderoles.StateValidator, height)
  1821  	if err != nil {
  1822  		return err
  1823  	}
  1824  
  1825  	if bc.extensible.Load() != nil && !updateCommittee && sh != height {
  1826  		return nil
  1827  	}
  1828  
  1829  	newList := []util.Uint160{bc.contracts.NEO.GetCommitteeAddress(bc.dao)}
  1830  	nextVals := bc.contracts.NEO.GetNextBlockValidatorsInternal(bc.dao)
  1831  	script, err := smartcontract.CreateDefaultMultiSigRedeemScript(nextVals)
  1832  	if err != nil {
  1833  		return err
  1834  	}
  1835  	newList = append(newList, hash.Hash160(script))
  1836  	bc.updateExtensibleList(&newList, bc.contracts.NEO.GetNextBlockValidatorsInternal(bc.dao))
  1837  
  1838  	if len(stateVals) > 0 {
  1839  		h, err := bc.contracts.Designate.GetLastDesignatedHash(bc.dao, noderoles.StateValidator)
  1840  		if err != nil {
  1841  			return err
  1842  		}
  1843  		newList = append(newList, h)
  1844  		bc.updateExtensibleList(&newList, stateVals)
  1845  	}
  1846  
  1847  	sort.Slice(newList, func(i, j int) bool {
  1848  		return newList[i].Less(newList[j])
  1849  	})
  1850  	bc.extensible.Store(newList)
  1851  	return nil
  1852  }
  1853  
  1854  func (bc *Blockchain) updateExtensibleList(s *[]util.Uint160, pubs keys.PublicKeys) {
  1855  	for _, pub := range pubs {
  1856  		*s = append(*s, pub.GetScriptHash())
  1857  	}
  1858  }
  1859  
  1860  // IsExtensibleAllowed determines if script hash is allowed to send extensible payloads.
  1861  func (bc *Blockchain) IsExtensibleAllowed(u util.Uint160) bool {
  1862  	us := bc.extensible.Load().([]util.Uint160)
  1863  	n := sort.Search(len(us), func(i int) bool { return !us[i].Less(u) })
  1864  	return n < len(us)
  1865  }
  1866  
  1867  func (bc *Blockchain) runPersist(script []byte, block *block.Block, cache *dao.Simple, trig trigger.Type, v *vm.VM) (*state.AppExecResult, *vm.VM, error) {
  1868  	systemInterop := bc.newInteropContext(trig, cache, block, nil)
  1869  	if v == nil {
  1870  		v = systemInterop.SpawnVM()
  1871  	} else {
  1872  		systemInterop.ReuseVM(v)
  1873  	}
  1874  	v.LoadScriptWithFlags(script, callflag.All)
  1875  	if err := systemInterop.Exec(); err != nil {
  1876  		return nil, v, fmt.Errorf("VM has failed: %w", err)
  1877  	} else if _, err := systemInterop.DAO.Persist(); err != nil {
  1878  		return nil, v, fmt.Errorf("can't save changes: %w", err)
  1879  	}
  1880  	return &state.AppExecResult{
  1881  		Container: block.Hash(), // application logs can be retrieved by block hash
  1882  		Execution: state.Execution{
  1883  			Trigger:     trig,
  1884  			VMState:     v.State(),
  1885  			GasConsumed: v.GasConsumed(),
  1886  			Stack:       v.Estack().ToArray(),
  1887  			Events:      systemInterop.Notifications,
  1888  		},
  1889  	}, v, nil
  1890  }
  1891  
  1892  func (bc *Blockchain) handleNotification(note *state.NotificationEvent, d *dao.Simple,
  1893  	transCache map[util.Uint160]transferData, b *block.Block, h util.Uint256) {
  1894  	if note.Name != "Transfer" {
  1895  		return
  1896  	}
  1897  	arr, ok := note.Item.Value().([]stackitem.Item)
  1898  	if !ok || !(len(arr) == 3 || len(arr) == 4) {
  1899  		return
  1900  	}
  1901  	from, err := parseUint160(arr[0])
  1902  	if err != nil {
  1903  		return
  1904  	}
  1905  	to, err := parseUint160(arr[1])
  1906  	if err != nil {
  1907  		return
  1908  	}
  1909  	amount, err := arr[2].TryInteger()
  1910  	if err != nil {
  1911  		return
  1912  	}
  1913  	var id []byte
  1914  	if len(arr) == 4 {
  1915  		id, err = arr[3].TryBytes()
  1916  		if err != nil || len(id) > limits.MaxStorageKeyLen {
  1917  			return
  1918  		}
  1919  	}
  1920  	bc.processTokenTransfer(d, transCache, h, b, note.ScriptHash, from, to, amount, id)
  1921  }
  1922  
  1923  func parseUint160(itm stackitem.Item) (util.Uint160, error) {
  1924  	_, ok := itm.(stackitem.Null) // Minting or burning.
  1925  	if ok {
  1926  		return util.Uint160{}, nil
  1927  	}
  1928  	bytes, err := itm.TryBytes()
  1929  	if err != nil {
  1930  		return util.Uint160{}, err
  1931  	}
  1932  	return util.Uint160DecodeBytesBE(bytes)
  1933  }
  1934  
  1935  func (bc *Blockchain) processTokenTransfer(cache *dao.Simple, transCache map[util.Uint160]transferData,
  1936  	h util.Uint256, b *block.Block, sc util.Uint160, from util.Uint160, to util.Uint160,
  1937  	amount *big.Int, tokenID []byte) {
  1938  	var id int32
  1939  	nativeContract := bc.contracts.ByHash(sc)
  1940  	if nativeContract != nil {
  1941  		id = nativeContract.Metadata().ID
  1942  	} else {
  1943  		assetContract, err := native.GetContract(cache, sc)
  1944  		if err != nil {
  1945  			return
  1946  		}
  1947  		id = assetContract.ID
  1948  	}
  1949  	var transfer io.Serializable
  1950  	var nep17xfer *state.NEP17Transfer
  1951  	var isNEP11 = (tokenID != nil)
  1952  	if !isNEP11 {
  1953  		nep17xfer = &state.NEP17Transfer{
  1954  			Asset:        id,
  1955  			Amount:       amount,
  1956  			Block:        b.Index,
  1957  			Counterparty: to,
  1958  			Timestamp:    b.Timestamp,
  1959  			Tx:           h,
  1960  		}
  1961  		transfer = nep17xfer
  1962  	} else {
  1963  		nep11xfer := &state.NEP11Transfer{
  1964  			NEP17Transfer: state.NEP17Transfer{
  1965  				Asset:        id,
  1966  				Amount:       amount,
  1967  				Block:        b.Index,
  1968  				Counterparty: to,
  1969  				Timestamp:    b.Timestamp,
  1970  				Tx:           h,
  1971  			},
  1972  			ID: tokenID,
  1973  		}
  1974  		transfer = nep11xfer
  1975  		nep17xfer = &nep11xfer.NEP17Transfer
  1976  	}
  1977  	if !from.Equals(util.Uint160{}) {
  1978  		_ = nep17xfer.Amount.Neg(nep17xfer.Amount)
  1979  		err := appendTokenTransfer(cache, transCache, from, transfer, id, b.Index, b.Timestamp, isNEP11)
  1980  		_ = nep17xfer.Amount.Neg(nep17xfer.Amount)
  1981  		if err != nil {
  1982  			return
  1983  		}
  1984  	}
  1985  	if !to.Equals(util.Uint160{}) {
  1986  		nep17xfer.Counterparty = from
  1987  		_ = appendTokenTransfer(cache, transCache, to, transfer, id, b.Index, b.Timestamp, isNEP11) // Nothing useful we can do.
  1988  	}
  1989  }
  1990  
  1991  func appendTokenTransfer(cache *dao.Simple, transCache map[util.Uint160]transferData, addr util.Uint160, transfer io.Serializable,
  1992  	token int32, bIndex uint32, bTimestamp uint64, isNEP11 bool) error {
  1993  	transferData, ok := transCache[addr]
  1994  	if !ok {
  1995  		balances, err := cache.GetTokenTransferInfo(addr)
  1996  		if err != nil {
  1997  			return err
  1998  		}
  1999  		if !balances.NewNEP11Batch {
  2000  			trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP11NewestTimestamp, balances.NextNEP11Batch, true)
  2001  			if err != nil {
  2002  				return err
  2003  			}
  2004  			transferData.Log11 = *trLog
  2005  		}
  2006  		if !balances.NewNEP17Batch {
  2007  			trLog, err := cache.GetTokenTransferLog(addr, balances.NextNEP17NewestTimestamp, balances.NextNEP17Batch, false)
  2008  			if err != nil {
  2009  				return err
  2010  			}
  2011  			transferData.Log17 = *trLog
  2012  		}
  2013  		transferData.Info = *balances
  2014  	}
  2015  	var (
  2016  		log           *state.TokenTransferLog
  2017  		nextBatch     uint32
  2018  		currTimestamp uint64
  2019  	)
  2020  	if !isNEP11 {
  2021  		log = &transferData.Log17
  2022  		nextBatch = transferData.Info.NextNEP17Batch
  2023  		currTimestamp = transferData.Info.NextNEP17NewestTimestamp
  2024  	} else {
  2025  		log = &transferData.Log11
  2026  		nextBatch = transferData.Info.NextNEP11Batch
  2027  		currTimestamp = transferData.Info.NextNEP11NewestTimestamp
  2028  	}
  2029  	err := log.Append(transfer)
  2030  	if err != nil {
  2031  		return err
  2032  	}
  2033  	newBatch := log.Size() >= state.TokenTransferBatchSize
  2034  	if newBatch {
  2035  		cache.PutTokenTransferLog(addr, currTimestamp, nextBatch, isNEP11, log)
  2036  		// Put makes a copy of it anyway.
  2037  		log.Reset()
  2038  	}
  2039  	appendTokenTransferInfo(&transferData.Info, token, bIndex, bTimestamp, isNEP11, newBatch)
  2040  	transCache[addr] = transferData
  2041  	return nil
  2042  }
  2043  
  2044  // ForEachNEP17Transfer executes f for each NEP-17 transfer in log starting from
  2045  // the transfer with the newest timestamp up to the oldest transfer. It continues
  2046  // iteration until false is returned from f. The last non-nil error is returned.
  2047  func (bc *Blockchain) ForEachNEP17Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP17Transfer) (bool, error)) error {
  2048  	return bc.dao.SeekNEP17TransferLog(acc, newestTimestamp, f)
  2049  }
  2050  
  2051  // ForEachNEP11Transfer executes f for each NEP-11 transfer in log starting from
  2052  // the transfer with the newest timestamp up to the oldest transfer. It continues
  2053  // iteration until false is returned from f. The last non-nil error is returned.
  2054  func (bc *Blockchain) ForEachNEP11Transfer(acc util.Uint160, newestTimestamp uint64, f func(*state.NEP11Transfer) (bool, error)) error {
  2055  	return bc.dao.SeekNEP11TransferLog(acc, newestTimestamp, f)
  2056  }
  2057  
  2058  // GetNEP17Contracts returns the list of deployed NEP-17 contracts.
  2059  func (bc *Blockchain) GetNEP17Contracts() []util.Uint160 {
  2060  	return bc.contracts.Management.GetNEP17Contracts(bc.dao)
  2061  }
  2062  
  2063  // GetNEP11Contracts returns the list of deployed NEP-11 contracts.
  2064  func (bc *Blockchain) GetNEP11Contracts() []util.Uint160 {
  2065  	return bc.contracts.Management.GetNEP11Contracts(bc.dao)
  2066  }
  2067  
  2068  // GetTokenLastUpdated returns a set of contract ids with the corresponding last updated
  2069  // block indexes. In case of an empty account, latest stored state synchronisation point
  2070  // is returned under Math.MinInt32 key.
  2071  func (bc *Blockchain) GetTokenLastUpdated(acc util.Uint160) (map[int32]uint32, error) {
  2072  	info, err := bc.dao.GetTokenTransferInfo(acc)
  2073  	if err != nil {
  2074  		return nil, err
  2075  	}
  2076  	if bc.config.P2PStateExchangeExtensions && bc.config.Ledger.RemoveUntraceableBlocks {
  2077  		if _, ok := info.LastUpdated[bc.contracts.NEO.ID]; !ok {
  2078  			nBalance, lub := bc.contracts.NEO.BalanceOf(bc.dao, acc)
  2079  			if nBalance.Sign() != 0 {
  2080  				info.LastUpdated[bc.contracts.NEO.ID] = lub
  2081  			}
  2082  		}
  2083  	}
  2084  	stateSyncPoint, err := bc.dao.GetStateSyncPoint()
  2085  	if err == nil {
  2086  		info.LastUpdated[math.MinInt32] = stateSyncPoint
  2087  	}
  2088  	return info.LastUpdated, nil
  2089  }
  2090  
  2091  // GetUtilityTokenBalance returns utility token (GAS) balance for the acc.
  2092  func (bc *Blockchain) GetUtilityTokenBalance(acc util.Uint160) *big.Int {
  2093  	bs := bc.contracts.GAS.BalanceOf(bc.dao, acc)
  2094  	if bs == nil {
  2095  		return big.NewInt(0)
  2096  	}
  2097  	return bs
  2098  }
  2099  
  2100  // GetGoverningTokenBalance returns governing token (NEO) balance and the height
  2101  // of the last balance change for the account.
  2102  func (bc *Blockchain) GetGoverningTokenBalance(acc util.Uint160) (*big.Int, uint32) {
  2103  	return bc.contracts.NEO.BalanceOf(bc.dao, acc)
  2104  }
  2105  
  2106  // GetNotaryBalance returns Notary deposit amount for the specified account.
  2107  func (bc *Blockchain) GetNotaryBalance(acc util.Uint160) *big.Int {
  2108  	return bc.contracts.Notary.BalanceOf(bc.dao, acc)
  2109  }
  2110  
  2111  // GetNotaryServiceFeePerKey returns a NotaryAssisted transaction attribute fee
  2112  // per key which is a reward per notary request key for designated notary nodes.
  2113  func (bc *Blockchain) GetNotaryServiceFeePerKey() int64 {
  2114  	return bc.contracts.Policy.GetAttributeFeeInternal(bc.dao, transaction.NotaryAssistedT)
  2115  }
  2116  
  2117  // GetNotaryContractScriptHash returns Notary native contract hash.
  2118  func (bc *Blockchain) GetNotaryContractScriptHash() util.Uint160 {
  2119  	if bc.P2PSigExtensionsEnabled() {
  2120  		return bc.contracts.Notary.Hash
  2121  	}
  2122  	return util.Uint160{}
  2123  }
  2124  
  2125  // GetNotaryDepositExpiration returns Notary deposit expiration height for the specified account.
  2126  func (bc *Blockchain) GetNotaryDepositExpiration(acc util.Uint160) uint32 {
  2127  	return bc.contracts.Notary.ExpirationOf(bc.dao, acc)
  2128  }
  2129  
  2130  // LastBatch returns last persisted storage batch.
  2131  func (bc *Blockchain) LastBatch() *storage.MemBatch {
  2132  	return bc.lastBatch
  2133  }
  2134  
  2135  // persist flushes current in-memory Store contents to the persistent storage.
  2136  func (bc *Blockchain) persist(isSync bool) (time.Duration, error) {
  2137  	var (
  2138  		start     = time.Now()
  2139  		duration  time.Duration
  2140  		persisted int
  2141  		err       error
  2142  	)
  2143  
  2144  	if isSync {
  2145  		persisted, err = bc.dao.PersistSync()
  2146  	} else {
  2147  		persisted, err = bc.dao.Persist()
  2148  	}
  2149  	if err != nil {
  2150  		return 0, err
  2151  	}
  2152  	if persisted > 0 {
  2153  		bHeight, err := bc.persistent.GetCurrentBlockHeight()
  2154  		if err != nil {
  2155  			return 0, err
  2156  		}
  2157  		oldHeight := atomic.SwapUint32(&bc.persistedHeight, bHeight)
  2158  		diff := bHeight - oldHeight
  2159  
  2160  		storedHeaderHeight, _, err := bc.persistent.GetCurrentHeaderHeight()
  2161  		if err != nil {
  2162  			return 0, err
  2163  		}
  2164  		duration = time.Since(start)
  2165  		bc.log.Info("persisted to disk",
  2166  			zap.Uint32("blocks", diff),
  2167  			zap.Int("keys", persisted),
  2168  			zap.Uint32("headerHeight", storedHeaderHeight),
  2169  			zap.Uint32("blockHeight", bHeight),
  2170  			zap.Duration("took", duration))
  2171  
  2172  		// update monitoring metrics.
  2173  		updatePersistedHeightMetric(bHeight)
  2174  	}
  2175  
  2176  	return duration, nil
  2177  }
  2178  
  2179  // GetTransaction returns a TX and its height by the given hash. The height is MaxUint32 if tx is in the mempool.
  2180  func (bc *Blockchain) GetTransaction(hash util.Uint256) (*transaction.Transaction, uint32, error) {
  2181  	if tx, ok := bc.memPool.TryGetValue(hash); ok {
  2182  		return tx, math.MaxUint32, nil // the height is not actually defined for memPool transaction.
  2183  	}
  2184  	return bc.dao.GetTransaction(hash)
  2185  }
  2186  
  2187  // GetAppExecResults returns application execution results with the specified trigger by the given
  2188  // tx hash or block hash.
  2189  func (bc *Blockchain) GetAppExecResults(hash util.Uint256, trig trigger.Type) ([]state.AppExecResult, error) {
  2190  	return bc.dao.GetAppExecResults(hash, trig)
  2191  }
  2192  
  2193  // GetStorageItem returns an item from storage.
  2194  func (bc *Blockchain) GetStorageItem(id int32, key []byte) state.StorageItem {
  2195  	return bc.dao.GetStorageItem(id, key)
  2196  }
  2197  
  2198  // SeekStorage performs seek operation over contract storage. Prefix is trimmed in the resulting pair's key.
  2199  func (bc *Blockchain) SeekStorage(id int32, prefix []byte, cont func(k, v []byte) bool) {
  2200  	bc.dao.Seek(id, storage.SeekRange{Prefix: prefix}, cont)
  2201  }
  2202  
  2203  // GetBlock returns a Block by the given hash.
  2204  func (bc *Blockchain) GetBlock(hash util.Uint256) (*block.Block, error) {
  2205  	topBlock := bc.topBlock.Load()
  2206  	if topBlock != nil {
  2207  		tb := topBlock.(*block.Block)
  2208  		if tb.Hash().Equals(hash) {
  2209  			return tb, nil
  2210  		}
  2211  	}
  2212  
  2213  	block, err := bc.dao.GetBlock(hash)
  2214  	if err != nil {
  2215  		return nil, err
  2216  	}
  2217  	if !block.MerkleRoot.Equals(util.Uint256{}) && len(block.Transactions) == 0 {
  2218  		return nil, errors.New("only header is found")
  2219  	}
  2220  	for _, tx := range block.Transactions {
  2221  		stx, _, err := bc.dao.GetTransaction(tx.Hash())
  2222  		if err != nil {
  2223  			return nil, err
  2224  		}
  2225  		*tx = *stx
  2226  	}
  2227  	return block, nil
  2228  }
  2229  
  2230  // GetHeader returns data block header identified with the given hash value.
  2231  func (bc *Blockchain) GetHeader(hash util.Uint256) (*block.Header, error) {
  2232  	topBlock := bc.topBlock.Load()
  2233  	if topBlock != nil {
  2234  		tb := topBlock.(*block.Block)
  2235  		if tb.Hash().Equals(hash) {
  2236  			return &tb.Header, nil
  2237  		}
  2238  	}
  2239  	block, err := bc.dao.GetBlock(hash)
  2240  	if err != nil {
  2241  		return nil, err
  2242  	}
  2243  	return &block.Header, nil
  2244  }
  2245  
  2246  // HasBlock returns true if the blockchain contains the given
  2247  // block hash.
  2248  func (bc *Blockchain) HasBlock(hash util.Uint256) bool {
  2249  	if bc.HeaderHashes.haveRecentHash(hash, bc.BlockHeight()) {
  2250  		return true
  2251  	}
  2252  
  2253  	if header, err := bc.GetHeader(hash); err == nil {
  2254  		return header.Index <= bc.BlockHeight()
  2255  	}
  2256  	return false
  2257  }
  2258  
  2259  // CurrentBlockHash returns the highest processed block hash.
  2260  func (bc *Blockchain) CurrentBlockHash() util.Uint256 {
  2261  	topBlock := bc.topBlock.Load()
  2262  	if topBlock != nil {
  2263  		tb := topBlock.(*block.Block)
  2264  		return tb.Hash()
  2265  	}
  2266  	return bc.GetHeaderHash(bc.BlockHeight())
  2267  }
  2268  
  2269  // BlockHeight returns the height/index of the highest block.
  2270  func (bc *Blockchain) BlockHeight() uint32 {
  2271  	return atomic.LoadUint32(&bc.blockHeight)
  2272  }
  2273  
  2274  // GetContractState returns contract by its script hash.
  2275  func (bc *Blockchain) GetContractState(hash util.Uint160) *state.Contract {
  2276  	contract, err := native.GetContract(bc.dao, hash)
  2277  	if contract == nil && !errors.Is(err, storage.ErrKeyNotFound) {
  2278  		bc.log.Warn("failed to get contract state", zap.Error(err))
  2279  	}
  2280  	return contract
  2281  }
  2282  
  2283  // GetContractScriptHash returns contract script hash by its ID.
  2284  func (bc *Blockchain) GetContractScriptHash(id int32) (util.Uint160, error) {
  2285  	return native.GetContractScriptHash(bc.dao, id)
  2286  }
  2287  
  2288  // GetNativeContractScriptHash returns native contract script hash by its name.
  2289  func (bc *Blockchain) GetNativeContractScriptHash(name string) (util.Uint160, error) {
  2290  	c := bc.contracts.ByName(name)
  2291  	if c != nil {
  2292  		return c.Metadata().Hash, nil
  2293  	}
  2294  	return util.Uint160{}, errors.New("Unknown native contract")
  2295  }
  2296  
  2297  // GetNatives returns list of native contracts.
  2298  func (bc *Blockchain) GetNatives() []state.Contract {
  2299  	res := make([]state.Contract, 0, len(bc.contracts.Contracts))
  2300  	current := bc.getCurrentHF()
  2301  	for _, c := range bc.contracts.Contracts {
  2302  		activeIn := c.ActiveIn()
  2303  		if !(activeIn == nil || activeIn.Cmp(current) <= 0) {
  2304  			continue
  2305  		}
  2306  
  2307  		st := bc.GetContractState(c.Metadata().Hash)
  2308  		if st != nil { // Should never happen, but better safe than sorry.
  2309  			res = append(res, *st)
  2310  		}
  2311  	}
  2312  	return res
  2313  }
  2314  
  2315  // GetConfig returns the config stored in the blockchain.
  2316  func (bc *Blockchain) GetConfig() config.Blockchain {
  2317  	return bc.config
  2318  }
  2319  
  2320  // SubscribeForBlocks adds given channel to new block event broadcasting, so when
  2321  // there is a new block added to the chain you'll receive it via this channel.
  2322  // Make sure it's read from regularly as not reading these events might affect
  2323  // other Blockchain functions. Make sure you're not changing the received blocks,
  2324  // as it may affect the functionality of Blockchain and other subscribers.
  2325  func (bc *Blockchain) SubscribeForBlocks(ch chan *block.Block) {
  2326  	bc.subCh <- ch
  2327  }
  2328  
  2329  // SubscribeForHeadersOfAddedBlocks adds given channel to new header event broadcasting, so
  2330  // when there is a new block added to the chain you'll receive its header via this
  2331  // channel. Make sure it's read from regularly as not reading these events might
  2332  // affect other Blockchain functions. Make sure you're not changing the received
  2333  // headers, as it may affect the functionality of Blockchain and other
  2334  // subscribers.
  2335  func (bc *Blockchain) SubscribeForHeadersOfAddedBlocks(ch chan *block.Header) {
  2336  	bc.subCh <- ch
  2337  }
  2338  
  2339  // SubscribeForTransactions adds given channel to new transaction event
  2340  // broadcasting, so when there is a new transaction added to the chain (in a
  2341  // block) you'll receive it via this channel. Make sure it's read from regularly
  2342  // as not reading these events might affect other Blockchain functions. Make sure
  2343  // you're not changing the received transactions, as it may affect the
  2344  // functionality of Blockchain and other subscribers.
  2345  func (bc *Blockchain) SubscribeForTransactions(ch chan *transaction.Transaction) {
  2346  	bc.subCh <- ch
  2347  }
  2348  
  2349  // SubscribeForNotifications adds given channel to new notifications event
  2350  // broadcasting, so when an in-block transaction execution generates a
  2351  // notification you'll receive it via this channel. Only notifications from
  2352  // successful transactions are broadcasted, if you're interested in failed
  2353  // transactions use SubscribeForExecutions instead. Make sure this channel is
  2354  // read from regularly as not reading these events might affect other Blockchain
  2355  // functions. Make sure you're not changing the received notification events, as
  2356  // it may affect the functionality of Blockchain and other subscribers.
  2357  func (bc *Blockchain) SubscribeForNotifications(ch chan *state.ContainedNotificationEvent) {
  2358  	bc.subCh <- ch
  2359  }
  2360  
  2361  // SubscribeForExecutions adds given channel to new transaction execution event
  2362  // broadcasting, so when an in-block transaction execution happens you'll receive
  2363  // the result of it via this channel. Make sure it's read from regularly as not
  2364  // reading these events might affect other Blockchain functions. Make sure you're
  2365  // not changing the received execution results, as it may affect the
  2366  // functionality of Blockchain and other subscribers.
  2367  func (bc *Blockchain) SubscribeForExecutions(ch chan *state.AppExecResult) {
  2368  	bc.subCh <- ch
  2369  }
  2370  
  2371  // UnsubscribeFromBlocks unsubscribes given channel from new block notifications,
  2372  // you can close it afterwards. Passing non-subscribed channel is a no-op, but
  2373  // the method can read from this channel (discarding any read data).
  2374  func (bc *Blockchain) UnsubscribeFromBlocks(ch chan *block.Block) {
  2375  unsubloop:
  2376  	for {
  2377  		select {
  2378  		case <-ch:
  2379  		case bc.unsubCh <- ch:
  2380  			break unsubloop
  2381  		}
  2382  	}
  2383  }
  2384  
  2385  // UnsubscribeFromHeadersOfAddedBlocks unsubscribes given channel from new
  2386  // block's header notifications, you can close it afterwards. Passing
  2387  // non-subscribed channel is a no-op, but the method can read from this
  2388  // channel (discarding any read data).
  2389  func (bc *Blockchain) UnsubscribeFromHeadersOfAddedBlocks(ch chan *block.Header) {
  2390  unsubloop:
  2391  	for {
  2392  		select {
  2393  		case <-ch:
  2394  		case bc.unsubCh <- ch:
  2395  			break unsubloop
  2396  		}
  2397  	}
  2398  }
  2399  
  2400  // UnsubscribeFromTransactions unsubscribes given channel from new transaction
  2401  // notifications, you can close it afterwards. Passing non-subscribed channel is
  2402  // a no-op, but the method can read from this channel (discarding any read data).
  2403  func (bc *Blockchain) UnsubscribeFromTransactions(ch chan *transaction.Transaction) {
  2404  unsubloop:
  2405  	for {
  2406  		select {
  2407  		case <-ch:
  2408  		case bc.unsubCh <- ch:
  2409  			break unsubloop
  2410  		}
  2411  	}
  2412  }
  2413  
  2414  // UnsubscribeFromNotifications unsubscribes given channel from new
  2415  // execution-generated notifications, you can close it afterwards. Passing
  2416  // non-subscribed channel is a no-op, but the method can read from this channel
  2417  // (discarding any read data).
  2418  func (bc *Blockchain) UnsubscribeFromNotifications(ch chan *state.ContainedNotificationEvent) {
  2419  unsubloop:
  2420  	for {
  2421  		select {
  2422  		case <-ch:
  2423  		case bc.unsubCh <- ch:
  2424  			break unsubloop
  2425  		}
  2426  	}
  2427  }
  2428  
  2429  // UnsubscribeFromExecutions unsubscribes given channel from new execution
  2430  // notifications, you can close it afterwards. Passing non-subscribed channel is
  2431  // a no-op, but the method can read from this channel (discarding any read data).
  2432  func (bc *Blockchain) UnsubscribeFromExecutions(ch chan *state.AppExecResult) {
  2433  unsubloop:
  2434  	for {
  2435  		select {
  2436  		case <-ch:
  2437  		case bc.unsubCh <- ch:
  2438  			break unsubloop
  2439  		}
  2440  	}
  2441  }
  2442  
  2443  // CalculateClaimable calculates the amount of GAS generated by owning specified
  2444  // amount of NEO between specified blocks.
  2445  func (bc *Blockchain) CalculateClaimable(acc util.Uint160, endHeight uint32) (*big.Int, error) {
  2446  	nextBlock, err := bc.getFakeNextBlock(bc.BlockHeight() + 1)
  2447  	if err != nil {
  2448  		return nil, err
  2449  	}
  2450  	ic := bc.newInteropContext(trigger.Application, bc.dao, nextBlock, nil)
  2451  	return bc.contracts.NEO.CalculateBonus(ic, acc, endHeight)
  2452  }
  2453  
  2454  // FeePerByte returns transaction network fee per byte.
  2455  func (bc *Blockchain) FeePerByte() int64 {
  2456  	return bc.contracts.Policy.GetFeePerByteInternal(bc.dao)
  2457  }
  2458  
  2459  // GetMemPool returns the memory pool of the blockchain.
  2460  func (bc *Blockchain) GetMemPool() *mempool.Pool {
  2461  	return bc.memPool
  2462  }
  2463  
  2464  // ApplyPolicyToTxSet applies configured policies to given transaction set. It
  2465  // expects slice to be ordered by fee and returns a subslice of it.
  2466  func (bc *Blockchain) ApplyPolicyToTxSet(txes []*transaction.Transaction) []*transaction.Transaction {
  2467  	maxTx := bc.config.MaxTransactionsPerBlock
  2468  	if maxTx != 0 && len(txes) > int(maxTx) {
  2469  		txes = txes[:maxTx]
  2470  	}
  2471  	maxBlockSize := bc.config.MaxBlockSize
  2472  	maxBlockSysFee := bc.config.MaxBlockSystemFee
  2473  	oldVC := bc.knownValidatorsCount.Load()
  2474  	defaultWitness := bc.defaultBlockWitness.Load()
  2475  	curVC := bc.config.GetNumOfCNs(bc.BlockHeight() + 1)
  2476  	if oldVC == nil || oldVC != curVC {
  2477  		m := smartcontract.GetDefaultHonestNodeCount(curVC)
  2478  		verification, _ := smartcontract.CreateDefaultMultiSigRedeemScript(bc.contracts.NEO.GetNextBlockValidatorsInternal(bc.dao))
  2479  		defaultWitness = transaction.Witness{
  2480  			InvocationScript:   make([]byte, 66*m),
  2481  			VerificationScript: verification,
  2482  		}
  2483  		bc.knownValidatorsCount.Store(curVC)
  2484  		bc.defaultBlockWitness.Store(defaultWitness)
  2485  	}
  2486  	var (
  2487  		b           = &block.Block{Header: block.Header{Script: defaultWitness.(transaction.Witness)}}
  2488  		blockSize   = uint32(b.GetExpectedBlockSizeWithoutTransactions(len(txes)))
  2489  		blockSysFee int64
  2490  	)
  2491  	for i, tx := range txes {
  2492  		blockSize += uint32(tx.Size())
  2493  		blockSysFee += tx.SystemFee
  2494  		if blockSize > maxBlockSize || blockSysFee > maxBlockSysFee {
  2495  			txes = txes[:i]
  2496  			break
  2497  		}
  2498  	}
  2499  	return txes
  2500  }
  2501  
  2502  // Various errors that could be returns upon header verification.
  2503  var (
  2504  	ErrHdrHashMismatch     = errors.New("previous header hash doesn't match")
  2505  	ErrHdrIndexMismatch    = errors.New("previous header index doesn't match")
  2506  	ErrHdrInvalidTimestamp = errors.New("block is not newer than the previous one")
  2507  	ErrHdrStateRootSetting = errors.New("state root setting mismatch")
  2508  	ErrHdrInvalidStateRoot = errors.New("state root for previous block is invalid")
  2509  )
  2510  
  2511  func (bc *Blockchain) verifyHeader(currHeader, prevHeader *block.Header) error {
  2512  	if bc.config.StateRootInHeader {
  2513  		if bc.stateRoot.CurrentLocalHeight() == prevHeader.Index {
  2514  			if sr := bc.stateRoot.CurrentLocalStateRoot(); currHeader.PrevStateRoot != sr {
  2515  				return fmt.Errorf("%w: %s != %s",
  2516  					ErrHdrInvalidStateRoot, currHeader.PrevStateRoot.StringLE(), sr.StringLE())
  2517  			}
  2518  		}
  2519  	}
  2520  	if prevHeader.Hash() != currHeader.PrevHash {
  2521  		return ErrHdrHashMismatch
  2522  	}
  2523  	if prevHeader.Index+1 != currHeader.Index {
  2524  		return ErrHdrIndexMismatch
  2525  	}
  2526  	if prevHeader.Timestamp >= currHeader.Timestamp {
  2527  		return ErrHdrInvalidTimestamp
  2528  	}
  2529  	return bc.verifyHeaderWitnesses(currHeader, prevHeader)
  2530  }
  2531  
  2532  // Various errors that could be returned upon verification.
  2533  var (
  2534  	ErrTxExpired         = errors.New("transaction has expired")
  2535  	ErrInsufficientFunds = errors.New("insufficient funds")
  2536  	ErrTxSmallNetworkFee = errors.New("too small network fee")
  2537  	ErrTxTooBig          = errors.New("too big transaction")
  2538  	ErrMemPoolConflict   = errors.New("invalid transaction due to conflicts with the memory pool")
  2539  	ErrInvalidScript     = errors.New("invalid script")
  2540  	ErrInvalidAttribute  = errors.New("invalid attribute")
  2541  )
  2542  
  2543  // verifyAndPoolTx verifies whether a transaction is bonafide or not and tries
  2544  // to add it to the mempool given.
  2545  func (bc *Blockchain) verifyAndPoolTx(t *transaction.Transaction, pool *mempool.Pool, feer mempool.Feer, data ...any) error {
  2546  	// This code can technically be moved out of here, because it doesn't
  2547  	// really require a chain lock.
  2548  	err := vm.IsScriptCorrect(t.Script, nil)
  2549  	if err != nil {
  2550  		return fmt.Errorf("%w: %w", ErrInvalidScript, err)
  2551  	}
  2552  
  2553  	height := bc.BlockHeight()
  2554  	isPartialTx := data != nil
  2555  	if t.ValidUntilBlock <= height || !isPartialTx && t.ValidUntilBlock > height+bc.config.MaxValidUntilBlockIncrement {
  2556  		return fmt.Errorf("%w: ValidUntilBlock = %d, current height = %d", ErrTxExpired, t.ValidUntilBlock, height)
  2557  	}
  2558  	// Policying.
  2559  	if err := bc.contracts.Policy.CheckPolicy(bc.dao, t); err != nil {
  2560  		// Only one %w can be used.
  2561  		return fmt.Errorf("%w: %w", ErrPolicy, err)
  2562  	}
  2563  	if t.SystemFee > bc.config.MaxBlockSystemFee {
  2564  		return fmt.Errorf("%w: too big system fee (%d > MaxBlockSystemFee %d)", ErrPolicy, t.SystemFee, bc.config.MaxBlockSystemFee)
  2565  	}
  2566  	size := t.Size()
  2567  	if size > transaction.MaxTransactionSize {
  2568  		return fmt.Errorf("%w: (%d > MaxTransactionSize %d)", ErrTxTooBig, size, transaction.MaxTransactionSize)
  2569  	}
  2570  	needNetworkFee := int64(size)*bc.FeePerByte() + bc.CalculateAttributesFee(t)
  2571  	netFee := t.NetworkFee - needNetworkFee
  2572  	if netFee < 0 {
  2573  		return fmt.Errorf("%w: net fee is %v, need %v", ErrTxSmallNetworkFee, t.NetworkFee, needNetworkFee)
  2574  	}
  2575  	// check that current tx wasn't included in the conflicts attributes of some other transaction which is already in the chain
  2576  	if err := bc.dao.HasTransaction(t.Hash(), t.Signers, height, bc.config.MaxTraceableBlocks); err != nil {
  2577  		switch {
  2578  		case errors.Is(err, dao.ErrAlreadyExists):
  2579  			return ErrAlreadyExists
  2580  		case errors.Is(err, dao.ErrHasConflicts):
  2581  			return fmt.Errorf("blockchain: %w", ErrHasConflicts)
  2582  		default:
  2583  			return err
  2584  		}
  2585  	}
  2586  	err = bc.verifyTxWitnesses(t, nil, isPartialTx, netFee)
  2587  	if err != nil {
  2588  		return err
  2589  	}
  2590  	if err := bc.verifyTxAttributes(bc.dao, t, isPartialTx); err != nil {
  2591  		return err
  2592  	}
  2593  	err = pool.Add(t, feer, data...)
  2594  	if err != nil {
  2595  		switch {
  2596  		case errors.Is(err, mempool.ErrConflict):
  2597  			return ErrMemPoolConflict
  2598  		case errors.Is(err, mempool.ErrDup):
  2599  			return ErrAlreadyInPool
  2600  		case errors.Is(err, mempool.ErrInsufficientFunds):
  2601  			return ErrInsufficientFunds
  2602  		case errors.Is(err, mempool.ErrOOM):
  2603  			return ErrOOM
  2604  		case errors.Is(err, mempool.ErrConflictsAttribute):
  2605  			return fmt.Errorf("mempool: %w: %w", ErrHasConflicts, err)
  2606  		default:
  2607  			return err
  2608  		}
  2609  	}
  2610  
  2611  	return nil
  2612  }
  2613  
  2614  // CalculateAttributesFee returns network fee for all transaction attributes that should be
  2615  // paid according to native Policy.
  2616  func (bc *Blockchain) CalculateAttributesFee(tx *transaction.Transaction) int64 {
  2617  	var feeSum int64
  2618  	for _, attr := range tx.Attributes {
  2619  		base := bc.contracts.Policy.GetAttributeFeeInternal(bc.dao, attr.Type)
  2620  		switch attr.Type {
  2621  		case transaction.ConflictsT:
  2622  			feeSum += base * int64(len(tx.Signers))
  2623  		case transaction.NotaryAssistedT:
  2624  			if bc.P2PSigExtensionsEnabled() {
  2625  				na := attr.Value.(*transaction.NotaryAssisted)
  2626  				feeSum += base * (int64(na.NKeys) + 1)
  2627  			}
  2628  		default:
  2629  			feeSum += base
  2630  		}
  2631  	}
  2632  	return feeSum
  2633  }
  2634  
  2635  func (bc *Blockchain) verifyTxAttributes(d *dao.Simple, tx *transaction.Transaction, isPartialTx bool) error {
  2636  	for i := range tx.Attributes {
  2637  		switch attrType := tx.Attributes[i].Type; attrType {
  2638  		case transaction.HighPriority:
  2639  			h := bc.contracts.NEO.GetCommitteeAddress(d)
  2640  			if !tx.HasSigner(h) {
  2641  				return fmt.Errorf("%w: high priority tx is not signed by committee", ErrInvalidAttribute)
  2642  			}
  2643  		case transaction.OracleResponseT:
  2644  			h, err := bc.contracts.Oracle.GetScriptHash(bc.dao)
  2645  			if err != nil || h.Equals(util.Uint160{}) {
  2646  				return fmt.Errorf("%w: %w", ErrInvalidAttribute, err)
  2647  			}
  2648  			hasOracle := false
  2649  			for i := range tx.Signers {
  2650  				if tx.Signers[i].Scopes != transaction.None {
  2651  					return fmt.Errorf("%w: oracle tx has invalid signer scope", ErrInvalidAttribute)
  2652  				}
  2653  				if tx.Signers[i].Account.Equals(h) {
  2654  					hasOracle = true
  2655  				}
  2656  			}
  2657  			if !hasOracle {
  2658  				return fmt.Errorf("%w: oracle tx is not signed by oracle nodes", ErrInvalidAttribute)
  2659  			}
  2660  			if !bytes.Equal(tx.Script, bc.contracts.Oracle.GetOracleResponseScript()) {
  2661  				return fmt.Errorf("%w: oracle tx has invalid script", ErrInvalidAttribute)
  2662  			}
  2663  			resp := tx.Attributes[i].Value.(*transaction.OracleResponse)
  2664  			req, err := bc.contracts.Oracle.GetRequestInternal(bc.dao, resp.ID)
  2665  			if err != nil {
  2666  				return fmt.Errorf("%w: oracle tx points to invalid request: %w", ErrInvalidAttribute, err)
  2667  			}
  2668  			if uint64(tx.NetworkFee+tx.SystemFee) < req.GasForResponse {
  2669  				return fmt.Errorf("%w: oracle tx has insufficient gas", ErrInvalidAttribute)
  2670  			}
  2671  		case transaction.NotValidBeforeT:
  2672  			nvb := tx.Attributes[i].Value.(*transaction.NotValidBefore).Height
  2673  			curHeight := bc.BlockHeight()
  2674  			if isPartialTx {
  2675  				maxNVBDelta, err := bc.GetMaxNotValidBeforeDelta()
  2676  				if err != nil {
  2677  					return fmt.Errorf("%w: failed to retrieve MaxNotValidBeforeDelta value from native Notary contract: %w", ErrInvalidAttribute, err)
  2678  				}
  2679  				if curHeight+maxNVBDelta < nvb {
  2680  					return fmt.Errorf("%w: NotValidBefore (%d) bigger than MaxNVBDelta (%d) allows at height %d", ErrInvalidAttribute, nvb, maxNVBDelta, curHeight)
  2681  				}
  2682  				if nvb+maxNVBDelta < tx.ValidUntilBlock {
  2683  					return fmt.Errorf("%w: NotValidBefore (%d) set more than MaxNVBDelta (%d) away from VUB (%d)", ErrInvalidAttribute, nvb, maxNVBDelta, tx.ValidUntilBlock)
  2684  				}
  2685  			} else {
  2686  				if curHeight < nvb {
  2687  					return fmt.Errorf("%w: transaction is not yet valid: NotValidBefore = %d, current height = %d", ErrInvalidAttribute, nvb, curHeight)
  2688  				}
  2689  			}
  2690  		case transaction.ConflictsT:
  2691  			conflicts := tx.Attributes[i].Value.(*transaction.Conflicts)
  2692  			// Only fully-qualified dao.ErrAlreadyExists error bothers us here, thus, we
  2693  			// can safely omit the signers, current index and MTB arguments to HasTransaction call to improve performance a bit.
  2694  			if err := bc.dao.HasTransaction(conflicts.Hash, nil, 0, 0); errors.Is(err, dao.ErrAlreadyExists) {
  2695  				return fmt.Errorf("%w: conflicting transaction %s is already on chain", ErrInvalidAttribute, conflicts.Hash.StringLE())
  2696  			}
  2697  		case transaction.NotaryAssistedT:
  2698  			if !bc.config.P2PSigExtensions {
  2699  				return fmt.Errorf("%w: NotaryAssisted attribute was found, but P2PSigExtensions are disabled", ErrInvalidAttribute)
  2700  			}
  2701  			if !tx.HasSigner(bc.contracts.Notary.Hash) {
  2702  				return fmt.Errorf("%w: NotaryAssisted attribute was found, but transaction is not signed by the Notary native contract", ErrInvalidAttribute)
  2703  			}
  2704  		default:
  2705  			if !bc.config.ReservedAttributes && attrType >= transaction.ReservedLowerBound && attrType <= transaction.ReservedUpperBound {
  2706  				return fmt.Errorf("%w: attribute of reserved type was found, but ReservedAttributes are disabled", ErrInvalidAttribute)
  2707  			}
  2708  		}
  2709  	}
  2710  	return nil
  2711  }
  2712  
  2713  // IsTxStillRelevant is a callback for mempool transaction filtering after the
  2714  // new block addition. It returns false for transactions added by the new block
  2715  // (passed via txpool) and does witness reverification for non-standard
  2716  // contracts. It operates under the assumption that full transaction verification
  2717  // was already done so we don't need to check basic things like size, input/output
  2718  // correctness, presence in blocks before the new one, etc.
  2719  func (bc *Blockchain) IsTxStillRelevant(t *transaction.Transaction, txpool *mempool.Pool, isPartialTx bool) bool {
  2720  	var (
  2721  		recheckWitness bool
  2722  		curheight      = bc.BlockHeight()
  2723  	)
  2724  
  2725  	if t.ValidUntilBlock <= curheight {
  2726  		return false
  2727  	}
  2728  	if txpool == nil {
  2729  		if bc.dao.HasTransaction(t.Hash(), t.Signers, curheight, bc.config.MaxTraceableBlocks) != nil {
  2730  			return false
  2731  		}
  2732  	} else if txpool.HasConflicts(t, bc) {
  2733  		return false
  2734  	}
  2735  	if err := bc.verifyTxAttributes(bc.dao, t, isPartialTx); err != nil {
  2736  		return false
  2737  	}
  2738  	for i := range t.Scripts {
  2739  		if !vm.IsStandardContract(t.Scripts[i].VerificationScript) {
  2740  			recheckWitness = true
  2741  			break
  2742  		}
  2743  	}
  2744  	if recheckWitness {
  2745  		return bc.verifyTxWitnesses(t, nil, isPartialTx) == nil
  2746  	}
  2747  	return true
  2748  }
  2749  
  2750  // VerifyTx verifies whether transaction is bonafide or not relative to the
  2751  // current blockchain state. Note that this verification is completely isolated
  2752  // from the main node's mempool.
  2753  func (bc *Blockchain) VerifyTx(t *transaction.Transaction) error {
  2754  	var mp = mempool.New(1, 0, false, nil)
  2755  	bc.lock.RLock()
  2756  	defer bc.lock.RUnlock()
  2757  	return bc.verifyAndPoolTx(t, mp, bc)
  2758  }
  2759  
  2760  // PoolTx verifies and tries to add given transaction into the mempool. If not
  2761  // given, the default mempool is used. Passing multiple pools is not supported.
  2762  func (bc *Blockchain) PoolTx(t *transaction.Transaction, pools ...*mempool.Pool) error {
  2763  	var pool = bc.memPool
  2764  
  2765  	bc.lock.RLock()
  2766  	defer bc.lock.RUnlock()
  2767  	// Programmer error.
  2768  	if len(pools) > 1 {
  2769  		panic("too many pools given")
  2770  	}
  2771  	if len(pools) == 1 {
  2772  		pool = pools[0]
  2773  	}
  2774  	return bc.verifyAndPoolTx(t, pool, bc)
  2775  }
  2776  
  2777  // PoolTxWithData verifies and tries to add given transaction with additional data into the mempool.
  2778  func (bc *Blockchain) PoolTxWithData(t *transaction.Transaction, data any, mp *mempool.Pool, feer mempool.Feer, verificationFunction func(tx *transaction.Transaction, data any) error) error {
  2779  	bc.lock.RLock()
  2780  	defer bc.lock.RUnlock()
  2781  
  2782  	if verificationFunction != nil {
  2783  		err := verificationFunction(t, data)
  2784  		if err != nil {
  2785  			return err
  2786  		}
  2787  	}
  2788  	return bc.verifyAndPoolTx(t, mp, feer, data)
  2789  }
  2790  
  2791  // GetCommittee returns the sorted list of public keys of nodes in committee.
  2792  func (bc *Blockchain) GetCommittee() (keys.PublicKeys, error) {
  2793  	pubs := bc.contracts.NEO.GetCommitteeMembers(bc.dao)
  2794  	sort.Sort(pubs)
  2795  	return pubs, nil
  2796  }
  2797  
  2798  // ComputeNextBlockValidators returns current validators. Validators list
  2799  // returned from this method is updated once per CommitteeSize number of blocks.
  2800  // For the last block in the dBFT epoch this method returns the list of validators
  2801  // recalculated from the latest relevant information about NEO votes; in this case
  2802  // list of validators may differ from the one returned by GetNextBlockValidators.
  2803  // For the not-last block of dBFT epoch this method returns the same list as
  2804  // GetNextBlockValidators.
  2805  func (bc *Blockchain) ComputeNextBlockValidators() []*keys.PublicKey {
  2806  	return bc.contracts.NEO.ComputeNextBlockValidators(bc.dao)
  2807  }
  2808  
  2809  // GetNextBlockValidators returns next block validators. Validators list returned
  2810  // from this method is the sorted top NumOfCNs number of public keys from the
  2811  // committee of the current dBFT round (that was calculated once for the
  2812  // CommitteeSize number of blocks), thus, validators list returned from this
  2813  // method is being updated once per (committee size) number of blocks, but not
  2814  // every block.
  2815  func (bc *Blockchain) GetNextBlockValidators() ([]*keys.PublicKey, error) {
  2816  	return bc.contracts.NEO.GetNextBlockValidatorsInternal(bc.dao), nil
  2817  }
  2818  
  2819  // GetEnrollments returns all registered validators.
  2820  func (bc *Blockchain) GetEnrollments() ([]state.Validator, error) {
  2821  	return bc.contracts.NEO.GetCandidates(bc.dao)
  2822  }
  2823  
  2824  // GetTestVM returns an interop context with VM set up for a test run.
  2825  func (bc *Blockchain) GetTestVM(t trigger.Type, tx *transaction.Transaction, b *block.Block) (*interop.Context, error) {
  2826  	if b == nil {
  2827  		var err error
  2828  		h := bc.BlockHeight() + 1
  2829  		b, err = bc.getFakeNextBlock(h)
  2830  		if err != nil {
  2831  			return nil, fmt.Errorf("failed to create fake block for height %d: %w", h, err)
  2832  		}
  2833  	}
  2834  	systemInterop := bc.newInteropContext(t, bc.dao, b, tx)
  2835  	_ = systemInterop.SpawnVM() // All the other code suppose that the VM is ready.
  2836  	return systemInterop, nil
  2837  }
  2838  
  2839  // GetTestHistoricVM returns an interop context with VM set up for a test run.
  2840  func (bc *Blockchain) GetTestHistoricVM(t trigger.Type, tx *transaction.Transaction, nextBlockHeight uint32) (*interop.Context, error) {
  2841  	if bc.config.Ledger.KeepOnlyLatestState {
  2842  		return nil, errors.New("only latest state is supported")
  2843  	}
  2844  	b, err := bc.getFakeNextBlock(nextBlockHeight)
  2845  	if err != nil {
  2846  		return nil, fmt.Errorf("failed to create fake block for height %d: %w", nextBlockHeight, err)
  2847  	}
  2848  	var mode = mpt.ModeAll
  2849  	if bc.config.Ledger.RemoveUntraceableBlocks {
  2850  		if b.Index < bc.BlockHeight()-bc.config.MaxTraceableBlocks {
  2851  			return nil, fmt.Errorf("state for height %d is outdated and removed from the storage", b.Index)
  2852  		}
  2853  		mode |= mpt.ModeGCFlag
  2854  	}
  2855  	if b.Index < 1 || b.Index > bc.BlockHeight()+1 {
  2856  		return nil, fmt.Errorf("unsupported historic chain's height: requested state for %d, chain height %d", b.Index, bc.blockHeight)
  2857  	}
  2858  	// Assuming that block N-th is processing during historic call, the historic invocation should be based on the storage state of height N-1.
  2859  	sr, err := bc.stateRoot.GetStateRoot(b.Index - 1)
  2860  	if err != nil {
  2861  		return nil, fmt.Errorf("failed to retrieve stateroot for height %d: %w", b.Index, err)
  2862  	}
  2863  	s := mpt.NewTrieStore(sr.Root, mode, storage.NewPrivateMemCachedStore(bc.dao.Store))
  2864  	dTrie := dao.NewSimple(s, bc.config.StateRootInHeader)
  2865  	dTrie.Version = bc.dao.Version
  2866  	// Initialize native cache before passing DAO to interop context constructor, because
  2867  	// the constructor will call BaseExecFee/StoragePrice policy methods on the passed DAO.
  2868  	err = bc.initializeNativeCache(b.Index, dTrie)
  2869  	if err != nil {
  2870  		return nil, fmt.Errorf("failed to initialize native cache backed by historic DAO: %w", err)
  2871  	}
  2872  	systemInterop := bc.newInteropContext(t, dTrie, b, tx)
  2873  	_ = systemInterop.SpawnVM() // All the other code suppose that the VM is ready.
  2874  	return systemInterop, nil
  2875  }
  2876  
  2877  // getFakeNextBlock returns fake block with the specified index and pre-filled Timestamp field.
  2878  func (bc *Blockchain) getFakeNextBlock(nextBlockHeight uint32) (*block.Block, error) {
  2879  	b := block.New(bc.config.StateRootInHeader)
  2880  	b.Index = nextBlockHeight
  2881  	hdr, err := bc.GetHeader(bc.GetHeaderHash(nextBlockHeight - 1))
  2882  	if err != nil {
  2883  		return nil, err
  2884  	}
  2885  	b.Timestamp = hdr.Timestamp + uint64(bc.config.TimePerBlock/time.Millisecond)
  2886  	return b, nil
  2887  }
  2888  
  2889  // Various witness verification errors.
  2890  var (
  2891  	ErrWitnessHashMismatch         = errors.New("witness hash mismatch")
  2892  	ErrNativeContractWitness       = errors.New("native contract witness must have empty verification script")
  2893  	ErrVerificationFailed          = errors.New("signature check failed")
  2894  	ErrInvalidInvocationScript     = errors.New("invalid invocation script")
  2895  	ErrInvalidSignature            = fmt.Errorf("%w: invalid signature", ErrVerificationFailed)
  2896  	ErrInvalidVerificationScript   = errors.New("invalid verification script")
  2897  	ErrUnknownVerificationContract = errors.New("unknown verification contract")
  2898  	ErrInvalidVerificationContract = errors.New("verification contract is missing `verify` method or `verify` method has unexpected return value")
  2899  )
  2900  
  2901  // InitVerificationContext initializes context for witness check.
  2902  func (bc *Blockchain) InitVerificationContext(ic *interop.Context, hash util.Uint160, witness *transaction.Witness) error {
  2903  	if len(witness.VerificationScript) != 0 {
  2904  		if witness.ScriptHash() != hash {
  2905  			return ErrWitnessHashMismatch
  2906  		}
  2907  		if bc.contracts.ByHash(hash) != nil {
  2908  			return ErrNativeContractWitness
  2909  		}
  2910  		err := vm.IsScriptCorrect(witness.VerificationScript, nil)
  2911  		if err != nil {
  2912  			return fmt.Errorf("%w: %w", ErrInvalidVerificationScript, err)
  2913  		}
  2914  		ic.VM.LoadScriptWithHash(witness.VerificationScript, hash, callflag.ReadOnly)
  2915  	} else {
  2916  		cs, err := ic.GetContract(hash)
  2917  		if err != nil {
  2918  			return ErrUnknownVerificationContract
  2919  		}
  2920  		md := cs.Manifest.ABI.GetMethod(manifest.MethodVerify, -1)
  2921  		if md == nil || md.ReturnType != smartcontract.BoolType {
  2922  			return ErrInvalidVerificationContract
  2923  		}
  2924  		verifyOffset := md.Offset
  2925  		initOffset := -1
  2926  		md = cs.Manifest.ABI.GetMethod(manifest.MethodInit, 0)
  2927  		if md != nil {
  2928  			initOffset = md.Offset
  2929  		}
  2930  		ic.Invocations[cs.Hash]++
  2931  		ic.VM.LoadNEFMethod(&cs.NEF, util.Uint160{}, hash, callflag.ReadOnly,
  2932  			true, verifyOffset, initOffset, nil)
  2933  	}
  2934  	if len(witness.InvocationScript) != 0 {
  2935  		err := vm.IsScriptCorrect(witness.InvocationScript, nil)
  2936  		if err != nil {
  2937  			return fmt.Errorf("%w: %w", ErrInvalidInvocationScript, err)
  2938  		}
  2939  		ic.VM.LoadScript(witness.InvocationScript)
  2940  	}
  2941  	return nil
  2942  }
  2943  
  2944  // VerifyWitness checks that w is a correct witness for c signed by h. It returns
  2945  // the amount of GAS consumed during verification and an error.
  2946  func (bc *Blockchain) VerifyWitness(h util.Uint160, c hash.Hashable, w *transaction.Witness, gas int64) (int64, error) {
  2947  	ic := bc.newInteropContext(trigger.Verification, bc.dao, nil, nil)
  2948  	ic.Container = c
  2949  	if tx, ok := c.(*transaction.Transaction); ok {
  2950  		ic.Tx = tx
  2951  	}
  2952  	return bc.verifyHashAgainstScript(h, w, ic, gas)
  2953  }
  2954  
  2955  // verifyHashAgainstScript verifies given hash against the given witness and returns the amount of GAS consumed.
  2956  func (bc *Blockchain) verifyHashAgainstScript(hash util.Uint160, witness *transaction.Witness, interopCtx *interop.Context, gas int64) (int64, error) {
  2957  	gasPolicy := bc.contracts.Policy.GetMaxVerificationGas(interopCtx.DAO)
  2958  	if gas > gasPolicy {
  2959  		gas = gasPolicy
  2960  	}
  2961  
  2962  	vm := interopCtx.SpawnVM()
  2963  	vm.GasLimit = gas
  2964  	if err := bc.InitVerificationContext(interopCtx, hash, witness); err != nil {
  2965  		return 0, err
  2966  	}
  2967  	err := interopCtx.Exec()
  2968  	if vm.HasFailed() {
  2969  		return 0, fmt.Errorf("%w: vm execution has failed: %w", ErrVerificationFailed, err)
  2970  	}
  2971  	estack := vm.Estack()
  2972  	if estack.Len() > 0 {
  2973  		resEl := estack.Pop()
  2974  		res, err := resEl.Item().TryBool()
  2975  		if err != nil {
  2976  			return 0, fmt.Errorf("%w: invalid return value", ErrVerificationFailed)
  2977  		}
  2978  		if vm.Estack().Len() != 0 {
  2979  			return 0, fmt.Errorf("%w: expected exactly one returned value", ErrVerificationFailed)
  2980  		}
  2981  		if !res {
  2982  			return vm.GasConsumed(), ErrInvalidSignature
  2983  		}
  2984  	} else {
  2985  		return 0, fmt.Errorf("%w: no result returned from the script", ErrVerificationFailed)
  2986  	}
  2987  	return vm.GasConsumed(), nil
  2988  }
  2989  
  2990  // verifyTxWitnesses verifies the scripts (witnesses) that come with a given
  2991  // transaction. It can reorder them by ScriptHash, because that's required to
  2992  // match a slice of script hashes from the Blockchain. Block parameter
  2993  // is used for easy interop access and can be omitted for transactions that are
  2994  // not yet added into any block. verificationFee argument can be provided to
  2995  // restrict the maximum amount of GAS allowed to spend on transaction
  2996  // verification.
  2997  // Golang implementation of VerifyWitnesses method in C# (https://github.com/neo-project/neo/blob/master/neo/SmartContract/Helper.cs#L87).
  2998  func (bc *Blockchain) verifyTxWitnesses(t *transaction.Transaction, block *block.Block, isPartialTx bool, verificationFee ...int64) error {
  2999  	interopCtx := bc.newInteropContext(trigger.Verification, bc.dao, block, t)
  3000  	var gasLimit int64
  3001  	if len(verificationFee) == 0 {
  3002  		gasLimit = t.NetworkFee - int64(t.Size())*bc.FeePerByte() - bc.CalculateAttributesFee(t)
  3003  	} else {
  3004  		gasLimit = verificationFee[0]
  3005  	}
  3006  	for i := range t.Signers {
  3007  		gasConsumed, err := bc.verifyHashAgainstScript(t.Signers[i].Account, &t.Scripts[i], interopCtx, gasLimit)
  3008  		if err != nil &&
  3009  			!(i == 0 && isPartialTx && errors.Is(err, ErrInvalidSignature)) { // it's OK for partially-filled transaction with dummy first witness.
  3010  			return fmt.Errorf("witness #%d: %w", i, err)
  3011  		}
  3012  		gasLimit -= gasConsumed
  3013  	}
  3014  
  3015  	return nil
  3016  }
  3017  
  3018  // verifyHeaderWitnesses is a block-specific implementation of VerifyWitnesses logic.
  3019  func (bc *Blockchain) verifyHeaderWitnesses(currHeader, prevHeader *block.Header) error {
  3020  	var hash util.Uint160
  3021  	if prevHeader == nil && currHeader.PrevHash.Equals(util.Uint256{}) {
  3022  		hash = currHeader.Script.ScriptHash()
  3023  	} else {
  3024  		hash = prevHeader.NextConsensus
  3025  	}
  3026  	_, err := bc.VerifyWitness(hash, currHeader, &currHeader.Script, HeaderVerificationGasLimit)
  3027  	return err
  3028  }
  3029  
  3030  // GoverningTokenHash returns the governing token (NEO) native contract hash.
  3031  func (bc *Blockchain) GoverningTokenHash() util.Uint160 {
  3032  	return bc.contracts.NEO.Hash
  3033  }
  3034  
  3035  // UtilityTokenHash returns the utility token (GAS) native contract hash.
  3036  func (bc *Blockchain) UtilityTokenHash() util.Uint160 {
  3037  	return bc.contracts.GAS.Hash
  3038  }
  3039  
  3040  // ManagementContractHash returns management contract's hash.
  3041  func (bc *Blockchain) ManagementContractHash() util.Uint160 {
  3042  	return bc.contracts.Management.Hash
  3043  }
  3044  
  3045  func (bc *Blockchain) newInteropContext(trigger trigger.Type, d *dao.Simple, block *block.Block, tx *transaction.Transaction) *interop.Context {
  3046  	baseExecFee := int64(interop.DefaultBaseExecFee)
  3047  	if block == nil || block.Index != 0 {
  3048  		// Use provided dao instead of Blockchain's one to fetch possible ExecFeeFactor
  3049  		// changes that were not yet persisted to Blockchain's dao.
  3050  		baseExecFee = bc.contracts.Policy.GetExecFeeFactorInternal(d)
  3051  	}
  3052  	baseStorageFee := int64(native.DefaultStoragePrice)
  3053  	if block == nil || block.Index != 0 {
  3054  		// Use provided dao instead of Blockchain's one to fetch possible StoragePrice
  3055  		// changes that were not yet persisted to Blockchain's dao.
  3056  		baseStorageFee = bc.contracts.Policy.GetStoragePriceInternal(d)
  3057  	}
  3058  	ic := interop.NewContext(trigger, bc, d, baseExecFee, baseStorageFee, native.GetContract, bc.contracts.Contracts, contract.LoadToken, block, tx, bc.log)
  3059  	ic.Functions = systemInterops
  3060  	switch {
  3061  	case tx != nil:
  3062  		ic.Container = tx
  3063  	case block != nil:
  3064  		ic.Container = block
  3065  	}
  3066  	ic.InitNonceData()
  3067  	return ic
  3068  }
  3069  
  3070  // P2PSigExtensionsEnabled defines whether P2P signature extensions are enabled.
  3071  func (bc *Blockchain) P2PSigExtensionsEnabled() bool {
  3072  	return bc.config.P2PSigExtensions
  3073  }
  3074  
  3075  // RegisterPostBlock appends provided function to the list of functions which should be run after new block
  3076  // is stored.
  3077  func (bc *Blockchain) RegisterPostBlock(f func(func(*transaction.Transaction, *mempool.Pool, bool) bool, *mempool.Pool, *block.Block)) {
  3078  	bc.postBlock = append(bc.postBlock, f)
  3079  }
  3080  
  3081  // GetBaseExecFee return execution price for `NOP`.
  3082  func (bc *Blockchain) GetBaseExecFee() int64 {
  3083  	if bc.BlockHeight() == 0 {
  3084  		return interop.DefaultBaseExecFee
  3085  	}
  3086  	return bc.contracts.Policy.GetExecFeeFactorInternal(bc.dao)
  3087  }
  3088  
  3089  // GetMaxVerificationGAS returns maximum verification GAS Policy limit.
  3090  func (bc *Blockchain) GetMaxVerificationGAS() int64 {
  3091  	return bc.contracts.Policy.GetMaxVerificationGas(bc.dao)
  3092  }
  3093  
  3094  // GetMaxNotValidBeforeDelta returns maximum NotValidBeforeDelta Notary limit.
  3095  func (bc *Blockchain) GetMaxNotValidBeforeDelta() (uint32, error) {
  3096  	if !bc.config.P2PSigExtensions {
  3097  		panic("disallowed call to Notary") // critical error, thus panic.
  3098  	}
  3099  	if !bc.isHardforkEnabled(bc.contracts.Notary.ActiveIn(), bc.BlockHeight()) {
  3100  		return 0, fmt.Errorf("native Notary is active starting from %s", bc.contracts.Notary.ActiveIn().String())
  3101  	}
  3102  	return bc.contracts.Notary.GetMaxNotValidBeforeDelta(bc.dao), nil
  3103  }
  3104  
  3105  // GetStoragePrice returns current storage price.
  3106  func (bc *Blockchain) GetStoragePrice() int64 {
  3107  	if bc.BlockHeight() == 0 {
  3108  		return native.DefaultStoragePrice
  3109  	}
  3110  	return bc.contracts.Policy.GetStoragePriceInternal(bc.dao)
  3111  }