github.com/nspcc-dev/neo-go@v0.105.2-0.20240517133400-6be757af3eba/pkg/core/blockchain_core_test.go (about) 1 package core 2 3 import ( 4 "bytes" 5 "encoding/binary" 6 "fmt" 7 "strings" 8 "sync/atomic" 9 "testing" 10 "time" 11 12 "github.com/nspcc-dev/neo-go/internal/testchain" 13 "github.com/nspcc-dev/neo-go/pkg/config" 14 "github.com/nspcc-dev/neo-go/pkg/core/block" 15 "github.com/nspcc-dev/neo-go/pkg/core/state" 16 "github.com/nspcc-dev/neo-go/pkg/core/storage" 17 "github.com/nspcc-dev/neo-go/pkg/core/transaction" 18 "github.com/nspcc-dev/neo-go/pkg/smartcontract" 19 "github.com/nspcc-dev/neo-go/pkg/smartcontract/trigger" 20 "github.com/nspcc-dev/neo-go/pkg/util" 21 "github.com/nspcc-dev/neo-go/pkg/vm/opcode" 22 "github.com/stretchr/testify/assert" 23 "github.com/stretchr/testify/require" 24 "go.uber.org/zap/zaptest" 25 ) 26 27 func TestVerifyHeader(t *testing.T) { 28 bc := newTestChain(t) 29 prev := bc.topBlock.Load().(*block.Block).Header 30 t.Run("Invalid", func(t *testing.T) { 31 t.Run("Hash", func(t *testing.T) { 32 h := prev.Hash() 33 h[0] = ^h[0] 34 hdr := newBlock(bc.config.ProtocolConfiguration, 1, h).Header 35 require.ErrorIs(t, bc.verifyHeader(&hdr, &prev), ErrHdrHashMismatch) 36 }) 37 t.Run("Index", func(t *testing.T) { 38 hdr := newBlock(bc.config.ProtocolConfiguration, 3, prev.Hash()).Header 39 require.ErrorIs(t, bc.verifyHeader(&hdr, &prev), ErrHdrIndexMismatch) 40 }) 41 t.Run("Timestamp", func(t *testing.T) { 42 hdr := newBlock(bc.config.ProtocolConfiguration, 1, prev.Hash()).Header 43 hdr.Timestamp = 0 44 require.ErrorIs(t, bc.verifyHeader(&hdr, &prev), ErrHdrInvalidTimestamp) 45 }) 46 }) 47 t.Run("Valid", func(t *testing.T) { 48 hdr := newBlock(bc.config.ProtocolConfiguration, 1, prev.Hash()).Header 49 require.NoError(t, bc.verifyHeader(&hdr, &prev)) 50 }) 51 } 52 53 func TestAddBlock(t *testing.T) { 54 const size = 3 55 bc := newTestChain(t) 56 blocks, err := bc.genBlocks(size) 57 require.NoError(t, err) 58 59 lastBlock := blocks[len(blocks)-1] 60 assert.Equal(t, lastBlock.Index, bc.HeaderHeight()) 61 assert.Equal(t, lastBlock.Hash(), bc.CurrentHeaderHash()) 62 63 // This one tests persisting blocks, so it does need to persist() 64 _, err = bc.persist(false) 65 require.NoError(t, err) 66 67 key := make([]byte, 1+util.Uint256Size) 68 key[0] = byte(storage.DataExecutable) 69 for _, block := range blocks { 70 copy(key[1:], block.Hash().BytesBE()) 71 _, err := bc.dao.Store.Get(key) 72 require.NoErrorf(t, err, "block %s not persisted", block.Hash()) 73 } 74 75 assert.Equal(t, lastBlock.Index, bc.BlockHeight()) 76 assert.Equal(t, lastBlock.Hash(), bc.CurrentHeaderHash()) 77 } 78 79 func TestRemoveOldTransfers(t *testing.T) { 80 // Creating proper number of transfers/blocks takes unnecessary time, so emulate 81 // some DB with stale entries. 82 bc := newTestChain(t) 83 h, err := bc.GetHeader(bc.GetHeaderHash(0)) 84 require.NoError(t, err) 85 older := h.Timestamp - 1000 86 newer := h.Timestamp + 1000 87 acc1 := util.Uint160{1} 88 acc2 := util.Uint160{2} 89 acc3 := util.Uint160{3} 90 ttl := state.TokenTransferLog{Raw: []byte{1}} // It's incorrect, but who cares. 91 92 for i := uint32(0); i < 3; i++ { 93 bc.dao.PutTokenTransferLog(acc1, older, i, false, &ttl) 94 } 95 for i := uint32(0); i < 3; i++ { 96 bc.dao.PutTokenTransferLog(acc2, newer, i, false, &ttl) 97 } 98 for i := uint32(0); i < 2; i++ { 99 bc.dao.PutTokenTransferLog(acc3, older, i, true, &ttl) 100 } 101 for i := uint32(0); i < 2; i++ { 102 bc.dao.PutTokenTransferLog(acc3, newer, i, true, &ttl) 103 } 104 105 _, err = bc.dao.Persist() 106 require.NoError(t, err) 107 _ = bc.removeOldTransfers(0) 108 109 for i := uint32(0); i < 2; i++ { 110 log, err := bc.dao.GetTokenTransferLog(acc1, older, i, false) 111 require.NoError(t, err) 112 require.Equal(t, 0, len(log.Raw)) 113 } 114 115 log, err := bc.dao.GetTokenTransferLog(acc1, older, 2, false) 116 require.NoError(t, err) 117 require.NotEqual(t, 0, len(log.Raw)) 118 119 for i := uint32(0); i < 3; i++ { 120 log, err = bc.dao.GetTokenTransferLog(acc2, newer, i, false) 121 require.NoError(t, err) 122 require.NotEqual(t, 0, len(log.Raw)) 123 } 124 125 log, err = bc.dao.GetTokenTransferLog(acc3, older, 0, true) 126 require.NoError(t, err) 127 require.Equal(t, 0, len(log.Raw)) 128 129 log, err = bc.dao.GetTokenTransferLog(acc3, older, 1, true) 130 require.NoError(t, err) 131 require.NotEqual(t, 0, len(log.Raw)) 132 133 for i := uint32(0); i < 2; i++ { 134 log, err = bc.dao.GetTokenTransferLog(acc3, newer, i, true) 135 require.NoError(t, err) 136 require.NotEqual(t, 0, len(log.Raw)) 137 } 138 } 139 140 func TestBlockchain_InitWithIncompleteStateJump(t *testing.T) { 141 var ( 142 stateSyncInterval = 4 143 maxTraceable uint32 = 6 144 ) 145 spountCfg := func(c *config.Config) { 146 c.ApplicationConfiguration.RemoveUntraceableBlocks = true 147 c.ProtocolConfiguration.StateRootInHeader = true 148 c.ProtocolConfiguration.P2PStateExchangeExtensions = true 149 c.ProtocolConfiguration.StateSyncInterval = stateSyncInterval 150 c.ProtocolConfiguration.MaxTraceableBlocks = maxTraceable 151 c.ApplicationConfiguration.KeepOnlyLatestState = true 152 } 153 bcSpout := newTestChainWithCustomCfg(t, spountCfg) 154 155 // Generate some content. 156 for i := 0; i < len(bcSpout.GetConfig().StandbyCommittee); i++ { 157 require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock())) 158 } 159 160 // reach next to the latest state sync point and pretend that we've just restored 161 stateSyncPoint := (int(bcSpout.BlockHeight())/stateSyncInterval + 1) * stateSyncInterval 162 for i := bcSpout.BlockHeight() + 1; i <= uint32(stateSyncPoint); i++ { 163 require.NoError(t, bcSpout.AddBlock(bcSpout.newBlock())) 164 } 165 require.Equal(t, uint32(stateSyncPoint), bcSpout.BlockHeight()) 166 b := bcSpout.newBlock() 167 require.NoError(t, bcSpout.AddHeaders(&b.Header)) 168 169 // put storage items with STTemp prefix 170 batch := storage.NewMemCachedStore(bcSpout.dao.Store) 171 tempPrefix := storage.STTempStorage 172 if bcSpout.dao.Version.StoragePrefix == tempPrefix { 173 tempPrefix = storage.STStorage 174 } 175 bPrefix := make([]byte, 1) 176 bPrefix[0] = byte(bcSpout.dao.Version.StoragePrefix) 177 bcSpout.dao.Store.Seek(storage.SeekRange{Prefix: bPrefix}, func(k, v []byte) bool { 178 key := bytes.Clone(k) 179 key[0] = byte(tempPrefix) 180 value := bytes.Clone(v) 181 batch.Put(key, value) 182 return true 183 }) 184 _, err := batch.Persist() 185 require.NoError(t, err) 186 187 checkNewBlockchainErr := func(t *testing.T, cfg func(c *config.Config), store storage.Store, errText string) { 188 unitTestNetCfg, err := config.Load("../../config", testchain.Network()) 189 require.NoError(t, err) 190 cfg(&unitTestNetCfg) 191 log := zaptest.NewLogger(t) 192 _, err = NewBlockchain(store, unitTestNetCfg.Blockchain(), log) 193 if len(errText) != 0 { 194 require.Error(t, err) 195 require.True(t, strings.Contains(err.Error(), errText)) 196 } else { 197 require.NoError(t, err) 198 } 199 } 200 boltCfg := func(c *config.Config) { 201 spountCfg(c) 202 c.ApplicationConfiguration.KeepOnlyLatestState = true 203 } 204 // manually store statejump stage to check statejump recover process 205 bPrefix[0] = byte(storage.SYSStateChangeStage) 206 t.Run("invalid state jump stage format", func(t *testing.T) { 207 bcSpout.dao.Store.Put(bPrefix, []byte{0x01, 0x02}) 208 checkNewBlockchainErr(t, boltCfg, bcSpout.dao.Store, "invalid state jump stage format") 209 }) 210 t.Run("missing state sync point", func(t *testing.T) { 211 bcSpout.dao.Store.Put(bPrefix, []byte{byte(stateJumpStarted)}) 212 checkNewBlockchainErr(t, boltCfg, bcSpout.dao.Store, "failed to get state sync point from the storage") 213 }) 214 t.Run("invalid RemoveUntraceableBlocks setting", func(t *testing.T) { 215 bcSpout.dao.Store.Put(bPrefix, []byte{byte(stateJumpStarted)}) 216 point := make([]byte, 4) 217 binary.LittleEndian.PutUint32(point, uint32(stateSyncPoint)) 218 bcSpout.dao.Store.Put([]byte{byte(storage.SYSStateSyncPoint)}, point) 219 checkNewBlockchainErr(t, func(c *config.Config) { 220 boltCfg(c) 221 c.ApplicationConfiguration.RemoveUntraceableBlocks = false 222 }, bcSpout.dao.Store, "P2PStateExchangeExtensions can be enabled either on MPT-complete node") 223 }) 224 t.Run("invalid state sync point", func(t *testing.T) { 225 bcSpout.dao.Store.Put(bPrefix, []byte{byte(stateJumpStarted)}) 226 point := make([]byte, 4) 227 binary.LittleEndian.PutUint32(point, bcSpout.lastHeaderIndex()+1) 228 bcSpout.dao.Store.Put([]byte{byte(storage.SYSStateSyncPoint)}, point) 229 checkNewBlockchainErr(t, boltCfg, bcSpout.dao.Store, "invalid state sync point") 230 }) 231 for _, stage := range []stateChangeStage{stateJumpStarted, newStorageItemsAdded, staleBlocksRemoved, 0x03} { 232 t.Run(fmt.Sprintf("state jump stage %d", stage), func(t *testing.T) { 233 bcSpout.dao.Store.Put(bPrefix, []byte{byte(stage)}) 234 point := make([]byte, 4) 235 binary.LittleEndian.PutUint32(point, uint32(stateSyncPoint)) 236 bcSpout.dao.Store.Put([]byte{byte(storage.SYSStateSyncPoint)}, point) 237 var errText string 238 if stage == 0x03 { 239 errText = "unknown state jump stage" 240 } 241 checkNewBlockchainErr(t, spountCfg, bcSpout.dao.Store, errText) 242 }) 243 } 244 } 245 246 func TestChainWithVolatileNumOfValidators(t *testing.T) { 247 bc := newTestChainWithCustomCfg(t, func(c *config.Config) { 248 c.ProtocolConfiguration.ValidatorsCount = 0 249 c.ProtocolConfiguration.CommitteeHistory = map[uint32]uint32{ 250 0: 1, 251 4: 4, 252 24: 6, 253 } 254 c.ProtocolConfiguration.ValidatorsHistory = map[uint32]uint32{ 255 0: 1, 256 4: 4, 257 } 258 require.NoError(t, c.ProtocolConfiguration.Validate()) 259 }) 260 require.Equal(t, uint32(0), bc.BlockHeight()) 261 262 priv0 := testchain.PrivateKeyByID(0) 263 264 vals := bc.ComputeNextBlockValidators() 265 script, err := smartcontract.CreateDefaultMultiSigRedeemScript(vals) 266 require.NoError(t, err) 267 curWit := transaction.Witness{ 268 VerificationScript: script, 269 } 270 for i := 1; i < 26; i++ { 271 comm, err := bc.GetCommittee() 272 require.NoError(t, err) 273 if i < 5 { 274 require.Equal(t, 1, len(comm)) 275 } else if i < 25 { 276 require.Equal(t, 4, len(comm)) 277 } else { 278 require.Equal(t, 6, len(comm)) 279 } 280 // Mimic consensus. 281 if bc.config.ShouldUpdateCommitteeAt(uint32(i)) { 282 vals = bc.ComputeNextBlockValidators() 283 } else { 284 vals, err = bc.GetNextBlockValidators() 285 } 286 require.NoError(t, err) 287 if i < 4 { 288 require.Equalf(t, 1, len(vals), "at %d", i) 289 } else { 290 require.Equalf(t, 4, len(vals), "at %d", i) 291 } 292 require.NoError(t, err) 293 script, err := smartcontract.CreateDefaultMultiSigRedeemScript(vals) 294 require.NoError(t, err) 295 nextWit := transaction.Witness{ 296 VerificationScript: script, 297 } 298 b := &block.Block{ 299 Header: block.Header{ 300 NextConsensus: nextWit.ScriptHash(), 301 Script: curWit, 302 }, 303 } 304 curWit = nextWit 305 b.PrevHash = bc.GetHeaderHash(uint32(i) - 1) 306 b.Timestamp = uint64(time.Now().UTC().Unix())*1000 + uint64(i) 307 b.Index = uint32(i) 308 b.RebuildMerkleRoot() 309 if i < 5 { 310 signa := priv0.SignHashable(uint32(bc.config.Magic), b) 311 b.Script.InvocationScript = append([]byte{byte(opcode.PUSHDATA1), byte(len(signa))}, signa...) 312 } else { 313 b.Script.InvocationScript = testchain.Sign(b) 314 } 315 err = bc.AddBlock(b) 316 require.NoErrorf(t, err, "at %d", i) 317 } 318 } 319 320 func setSigner(tx *transaction.Transaction, h util.Uint160) { 321 tx.Signers = []transaction.Signer{{ 322 Account: h, 323 Scopes: transaction.Global, 324 }} 325 } 326 327 // This test checks that value of BaseExecFee returned from corresponding Blockchain's method matches 328 // the one provided to the constructor of new interop context. 329 func TestBlockchain_BaseExecFeeBaseStoragePrice_Compat(t *testing.T) { 330 bc := newTestChain(t) 331 332 check := func(t *testing.T) { 333 ic := bc.newInteropContext(trigger.Application, bc.dao, bc.topBlock.Load().(*block.Block), nil) 334 require.Equal(t, bc.GetBaseExecFee(), ic.BaseExecFee()) 335 require.Equal(t, bc.GetStoragePrice(), ic.BaseStorageFee()) 336 } 337 t.Run("zero block", func(t *testing.T) { 338 check(t) 339 }) 340 t.Run("non-zero block", func(t *testing.T) { 341 require.NoError(t, bc.AddBlock(bc.newBlock())) 342 check(t) 343 }) 344 } 345 346 func TestBlockchain_IsRunning(t *testing.T) { 347 chain := initTestChain(t, nil, nil) 348 require.False(t, chain.isRunning.Load().(bool)) 349 oldPersisted := atomic.LoadUint32(&chain.persistedHeight) 350 351 go chain.Run() 352 require.NoError(t, chain.AddBlock(chain.newBlock())) 353 require.Eventually(t, func() bool { 354 persisted := atomic.LoadUint32(&chain.persistedHeight) 355 return persisted > oldPersisted 356 }, 2*persistInterval, 100*time.Millisecond) 357 require.True(t, chain.isRunning.Load().(bool)) 358 359 chain.Close() 360 require.False(t, chain.isRunning.Load().(bool)) 361 } 362 363 func TestNewBlockchain_InitHardforks(t *testing.T) { 364 t.Run("nil set", func(t *testing.T) { 365 bc := newTestChainWithCustomCfg(t, func(c *config.Config) { 366 c.ProtocolConfiguration.Hardforks = nil 367 require.NoError(t, c.ProtocolConfiguration.Validate()) 368 }) 369 require.Equal(t, map[string]uint32{ 370 config.HFAspidochelone.String(): 0, 371 config.HFBasilisk.String(): 0, 372 config.HFCockatrice.String(): 0, 373 }, bc.GetConfig().Hardforks) 374 }) 375 t.Run("empty set", func(t *testing.T) { 376 bc := newTestChainWithCustomCfg(t, func(c *config.Config) { 377 c.ProtocolConfiguration.Hardforks = map[string]uint32{} 378 require.NoError(t, c.ProtocolConfiguration.Validate()) 379 }) 380 require.Equal(t, map[string]uint32{}, bc.GetConfig().Hardforks) 381 }) 382 t.Run("missing old", func(t *testing.T) { 383 bc := newTestChainWithCustomCfg(t, func(c *config.Config) { 384 c.ProtocolConfiguration.Hardforks = map[string]uint32{config.HFBasilisk.String(): 5} 385 require.NoError(t, c.ProtocolConfiguration.Validate()) 386 }) 387 require.Equal(t, map[string]uint32{ 388 config.HFAspidochelone.String(): 0, 389 config.HFBasilisk.String(): 5, 390 }, bc.GetConfig().Hardforks) 391 }) 392 t.Run("missing new", func(t *testing.T) { 393 bc := newTestChainWithCustomCfg(t, func(c *config.Config) { 394 c.ProtocolConfiguration.Hardforks = map[string]uint32{config.HFAspidochelone.String(): 5} 395 require.NoError(t, c.ProtocolConfiguration.Validate()) 396 }) 397 require.Equal(t, map[string]uint32{ 398 config.HFAspidochelone.String(): 5, 399 }, bc.GetConfig().Hardforks) 400 }) 401 t.Run("all present", func(t *testing.T) { 402 bc := newTestChainWithCustomCfg(t, func(c *config.Config) { 403 c.ProtocolConfiguration.Hardforks = map[string]uint32{config.HFAspidochelone.String(): 5, config.HFBasilisk.String(): 10, config.HFCockatrice.String(): 15} 404 require.NoError(t, c.ProtocolConfiguration.Validate()) 405 }) 406 require.Equal(t, map[string]uint32{ 407 config.HFAspidochelone.String(): 5, 408 config.HFBasilisk.String(): 10, 409 config.HFCockatrice.String(): 15, 410 }, bc.GetConfig().Hardforks) 411 }) 412 }