github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/common/ledger/blkstorage/snapshot_test.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package blkstorage 8 9 import ( 10 "fmt" 11 "io/ioutil" 12 "os" 13 "path/filepath" 14 "sort" 15 "testing" 16 17 "github.com/hyperledger/fabric-protos-go/common" 18 "github.com/hyperledger/fabric-protos-go/peer" 19 "github.com/osdi23p228/fabric/common/ledger/snapshot" 20 "github.com/osdi23p228/fabric/common/ledger/testutil" 21 "github.com/osdi23p228/fabric/internal/pkg/txflags" 22 "github.com/osdi23p228/fabric/protoutil" 23 "github.com/stretchr/testify/require" 24 ) 25 26 type testBlockDetails struct { 27 txIDs []string 28 validationCodes []peer.TxValidationCode 29 } 30 31 func TestBootstrapFromSnapshot(t *testing.T) { 32 var testDir string 33 var env *testEnv 34 var blocksDetailsBeforeSnapshot, blocksDetailsAfterSnapshot []*testBlockDetails 35 var blocksBeforeSnapshot, blocksAfterSnapshot []*common.Block 36 var blocksGenerator *testutil.BlockGenerator 37 38 var snapshotDir string 39 var snapshotInfo *SnapshotInfo 40 var bootstrappedBlockStore *BlockStore 41 42 bootstrappedLedgerName := "bootstrappedLedger" 43 44 setup := func() { 45 testDir = testPath() 46 env = newTestEnv(t, NewConf(testDir, 0)) 47 snapshotDir = filepath.Join(testDir, "snapshot") 48 require.NoError(t, os.Mkdir(snapshotDir, 0755)) 49 50 bg, genesisBlock := testutil.NewBlockGenerator(t, "testLedger", false) 51 blocksGenerator = bg 52 originalBlockStore, err := env.provider.Open("originalLedger") 53 require.NoError(t, err) 54 txIDGenesisTx, err := protoutil.GetOrComputeTxIDFromEnvelope(genesisBlock.Data.Data[0]) 55 require.NoError(t, err) 56 57 blocksDetailsBeforeSnapshot = []*testBlockDetails{ 58 { 59 txIDs: []string{txIDGenesisTx}, 60 }, 61 { 62 txIDs: []string{"txid1", "txid2"}, 63 }, 64 { 65 txIDs: []string{"txid3", "txid4", "txid5"}, 66 }, 67 } 68 69 blocksBeforeSnapshot = []*common.Block{ 70 genesisBlock, 71 generateNextTestBlock(blocksGenerator, blocksDetailsBeforeSnapshot[1]), 72 generateNextTestBlock(blocksGenerator, blocksDetailsBeforeSnapshot[2]), 73 } 74 75 blocksDetailsAfterSnapshot = []*testBlockDetails{ 76 { 77 txIDs: []string{"txid7", "txid8"}, 78 }, 79 { 80 txIDs: []string{"txid9", "txid10", "txid11"}, 81 validationCodes: []peer.TxValidationCode{ 82 peer.TxValidationCode_BAD_CHANNEL_HEADER, 83 peer.TxValidationCode_BAD_CREATOR_SIGNATURE, 84 }, 85 }, 86 } 87 88 blocksAfterSnapshot = []*common.Block{ 89 generateNextTestBlock(blocksGenerator, blocksDetailsAfterSnapshot[0]), 90 generateNextTestBlock(blocksGenerator, blocksDetailsAfterSnapshot[1]), 91 } 92 93 for _, b := range blocksBeforeSnapshot { 94 err := originalBlockStore.AddBlock(b) 95 require.NoError(t, err) 96 } 97 _, err = originalBlockStore.ExportTxIds(snapshotDir, testNewHashFunc) 98 require.NoError(t, err) 99 lastBlockInSnapshot := blocksBeforeSnapshot[len(blocksBeforeSnapshot)-1] 100 101 snapshotInfo = &SnapshotInfo{ 102 LedgerID: bootstrappedLedgerName, 103 LastBlockHash: protoutil.BlockHeaderHash(lastBlockInSnapshot.Header), 104 LastBlockNum: lastBlockInSnapshot.Header.Number, 105 PreviousBlockHash: lastBlockInSnapshot.Header.PreviousHash, 106 } 107 108 // bootstrap another blockstore from the snapshot and verify its APIs 109 importTxIDsBatchSize = uint64(2) // smaller batch size for testing 110 111 bootstrappedBlockStore, err = env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 112 require.NoError(t, err) 113 } 114 115 cleanup := func() { 116 env.Cleanup() 117 } 118 119 closeBlockStore := func() { 120 env.provider.Close() 121 } 122 123 reopenBlockStore := func() error { 124 env = newTestEnv(t, env.provider.conf) 125 s, err := env.provider.Open(bootstrappedLedgerName) 126 bootstrappedBlockStore = s 127 return err 128 } 129 130 t.Run("query-just-after-bootstrap", func(t *testing.T) { 131 setup() 132 defer cleanup() 133 134 verifyQueriesOnBlocksPriorToSnapshot( 135 t, 136 bootstrappedBlockStore, 137 &common.BlockchainInfo{ 138 Height: snapshotInfo.LastBlockNum + 1, 139 CurrentBlockHash: snapshotInfo.LastBlockHash, 140 PreviousBlockHash: snapshotInfo.PreviousBlockHash, 141 }, 142 blocksDetailsBeforeSnapshot, 143 blocksBeforeSnapshot, 144 ) 145 }) 146 147 t.Run("add-more-blocks", func(t *testing.T) { 148 setup() 149 defer cleanup() 150 151 require.EqualError(t, 152 bootstrappedBlockStore.AddBlock(blocksAfterSnapshot[1]), 153 "block number should have been 3 but was 4", 154 ) 155 156 for _, b := range blocksAfterSnapshot { 157 require.NoError(t, bootstrappedBlockStore.AddBlock(b)) 158 } 159 finalBlock := blocksAfterSnapshot[len(blocksAfterSnapshot)-1] 160 expectedBCInfo := &common.BlockchainInfo{ 161 Height: finalBlock.Header.Number + 1, 162 CurrentBlockHash: protoutil.BlockHeaderHash(finalBlock.Header), 163 PreviousBlockHash: finalBlock.Header.PreviousHash, 164 } 165 verifyQueriesOnBlocksPriorToSnapshot(t, 166 bootstrappedBlockStore, 167 expectedBCInfo, 168 blocksDetailsBeforeSnapshot, 169 blocksBeforeSnapshot, 170 ) 171 verifyQueriesOnBlocksAddedAfterBootstrapping(t, 172 bootstrappedBlockStore, 173 expectedBCInfo, 174 blocksDetailsAfterSnapshot, 175 blocksAfterSnapshot, 176 ) 177 }) 178 179 t.Run("close-and-reopen", func(t *testing.T) { 180 setup() 181 defer cleanup() 182 183 closeBlockStore() 184 require.NoError(t, reopenBlockStore()) 185 verifyQueriesOnBlocksPriorToSnapshot(t, 186 bootstrappedBlockStore, 187 &common.BlockchainInfo{ 188 Height: snapshotInfo.LastBlockNum + 1, 189 CurrentBlockHash: snapshotInfo.LastBlockHash, 190 PreviousBlockHash: snapshotInfo.PreviousBlockHash, 191 }, 192 blocksDetailsBeforeSnapshot, 193 blocksBeforeSnapshot, 194 ) 195 196 for _, b := range blocksAfterSnapshot { 197 require.NoError(t, bootstrappedBlockStore.AddBlock(b)) 198 } 199 closeBlockStore() 200 require.NoError(t, reopenBlockStore()) 201 finalBlock := blocksAfterSnapshot[len(blocksAfterSnapshot)-1] 202 expectedBCInfo := &common.BlockchainInfo{ 203 Height: finalBlock.Header.Number + 1, 204 CurrentBlockHash: protoutil.BlockHeaderHash(finalBlock.Header), 205 PreviousBlockHash: finalBlock.Header.PreviousHash, 206 } 207 verifyQueriesOnBlocksAddedAfterBootstrapping(t, 208 bootstrappedBlockStore, 209 expectedBCInfo, 210 blocksDetailsAfterSnapshot, 211 blocksAfterSnapshot, 212 ) 213 }) 214 215 t.Run("export-txids", func(t *testing.T) { 216 setup() 217 defer cleanup() 218 219 anotherSnapshotDir := filepath.Join(testDir, "anotherSnapshot") 220 require.NoError(t, os.Mkdir(anotherSnapshotDir, 0755)) 221 222 for _, b := range blocksAfterSnapshot { 223 require.NoError(t, bootstrappedBlockStore.AddBlock(b)) 224 } 225 226 fileHashes, err := bootstrappedBlockStore.ExportTxIds(anotherSnapshotDir, testNewHashFunc) 227 require.NoError(t, err) 228 expectedTxIDs := []string{} 229 for _, b := range append(blocksDetailsBeforeSnapshot, blocksDetailsAfterSnapshot...) { 230 expectedTxIDs = append(expectedTxIDs, b.txIDs...) 231 } 232 sort.Slice(expectedTxIDs, func(i, j int) bool { 233 ith := expectedTxIDs[i] 234 jth := expectedTxIDs[j] 235 if len(ith) == len(jth) { 236 return ith < jth 237 } 238 return len(ith) < len(jth) 239 }) 240 verifyExportedTxIDs(t, anotherSnapshotDir, fileHashes, expectedTxIDs...) 241 }) 242 243 t.Run("sync-up-indexes", func(t *testing.T) { 244 setup() 245 defer cleanup() 246 247 blockDetails := []*testBlockDetails{} 248 blocks := []*common.Block{} 249 for i, blockDetail := range blocksDetailsAfterSnapshot { 250 block := blocksAfterSnapshot[i] 251 blockDetails = append(blockDetails, blockDetail) 252 blocks = append(blocks, block) 253 254 // redirect index writes to some random place and add two blocks and then set the original index back 255 blkfileMgr := bootstrappedBlockStore.fileMgr 256 originalIndexDB := blkfileMgr.index.db 257 bootstrappedBlockStore.fileMgr.index.db = env.provider.leveldbProvider.GetDBHandle(filepath.Join(testDir, "someRandomPlace")) 258 require.NoError(t, blkfileMgr.addBlock(block)) 259 blkfileMgr.index.db = originalIndexDB 260 261 // before, we test for index sync-up, verify that the last set of blocks not indexed in the original index 262 _, err := blkfileMgr.retrieveBlockByNumber(block.Header.Number) 263 require.Exactly(t, ErrNotFoundInIndex, err) 264 265 // close and open should be able to sync-up the index 266 closeBlockStore() 267 require.NoError(t, reopenBlockStore()) 268 269 finalBlock := blocks[len(blocks)-1] 270 verifyQueriesOnBlocksAddedAfterBootstrapping( 271 t, 272 bootstrappedBlockStore, 273 &common.BlockchainInfo{ 274 Height: finalBlock.Header.Number + 1, 275 CurrentBlockHash: protoutil.BlockHeaderHash(finalBlock.Header), 276 PreviousBlockHash: finalBlock.Header.PreviousHash, 277 }, 278 blockDetails, 279 blocks, 280 ) 281 } 282 }) 283 284 t.Run("error-when-indexes-deleted", func(t *testing.T) { 285 setup() 286 defer cleanup() 287 288 closeBlockStore() 289 require.NoError(t, os.RemoveAll(env.provider.conf.getIndexDir())) 290 err := reopenBlockStore() 291 require.EqualError(t, err, 292 fmt.Sprintf( 293 "cannot sync index with block files. blockstore is bootstrapped from a snapshot and first available block=[%d]", 294 len(blocksBeforeSnapshot), 295 ), 296 ) 297 }) 298 } 299 300 func TestBootstrapFromSnapshotErrorPaths(t *testing.T) { 301 testPath := testPath() 302 env := newTestEnv(t, NewConf(testPath, 0)) 303 defer func() { 304 env.Cleanup() 305 }() 306 snapshotDir := filepath.Join(testPath, "snapshot") 307 metadataFile := filepath.Join(snapshotDir, snapshotMetadataFileName) 308 dataFile := filepath.Join(snapshotDir, snapshotDataFileName) 309 ledgerDir := filepath.Join(testPath, "chains", "bootstrappedLedger") 310 bootstrappingSnapshotInfoFile := filepath.Join(ledgerDir, bootstrappingSnapshotInfoFile) 311 312 snapshotInfo := &SnapshotInfo{ 313 LedgerID: "bootstrappedLedger", 314 LastBlockHash: []byte("LastBlockHash"), 315 LastBlockNum: 5, 316 PreviousBlockHash: []byte("PreviousBlockHash"), 317 } 318 319 cleanupDirs := func() { 320 require.NoError(t, os.RemoveAll(ledgerDir)) 321 require.NoError(t, os.RemoveAll(snapshotDir)) 322 require.NoError(t, os.Mkdir(snapshotDir, 0755)) 323 } 324 325 createSnapshotMetadataFile := func(content uint64) { 326 mf, err := snapshot.CreateFile(metadataFile, snapshotFileFormat, testNewHashFunc) 327 require.NoError(t, err) 328 require.NoError(t, mf.EncodeUVarint(content)) 329 _, err = mf.Done() 330 require.NoError(t, err) 331 } 332 createSnapshotDataFile := func(content ...string) { 333 df, err := snapshot.CreateFile(dataFile, snapshotFileFormat, testNewHashFunc) 334 require.NoError(t, err) 335 for _, c := range content { 336 require.NoError(t, df.EncodeString(c)) 337 } 338 _, err = df.Done() 339 require.NoError(t, err) 340 } 341 342 t.Run("metadata-file-missing", func(t *testing.T) { 343 cleanupDirs() 344 _, err := env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 345 require.Contains(t, err.Error(), "error while opening the snapshot file: "+metadataFile) 346 }) 347 348 t.Run("bootstapping-more-than-once", func(t *testing.T) { 349 cleanupDirs() 350 env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 351 _, err := env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 352 require.EqualError(t, err, "dir "+ledgerDir+" not empty") 353 }) 354 355 t.Run("metadata-file-corrupted", func(t *testing.T) { 356 cleanupDirs() 357 mf, err := snapshot.CreateFile(metadataFile, snapshotFileFormat, testNewHashFunc) 358 require.NoError(t, err) 359 require.NoError(t, mf.Close()) 360 _, err = env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 361 require.Contains(t, err.Error(), "error while reading from the snapshot file: "+metadataFile) 362 }) 363 364 t.Run("data-file-missing", func(t *testing.T) { 365 cleanupDirs() 366 createSnapshotMetadataFile(1) 367 _, err := env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 368 require.Contains(t, err.Error(), "error while opening the snapshot file: "+dataFile) 369 }) 370 371 t.Run("data-file-corrupt", func(t *testing.T) { 372 cleanupDirs() 373 createSnapshotMetadataFile(2) 374 createSnapshotDataFile("single-tx-id") 375 _, err := env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 376 require.Contains(t, err.Error(), "error while reading from snapshot file: "+dataFile) 377 }) 378 379 t.Run("db-error", func(t *testing.T) { 380 cleanupDirs() 381 createSnapshotMetadataFile(1) 382 createSnapshotDataFile("single-tx-id") 383 env.provider.Close() 384 defer func() { 385 env = newTestEnv(t, NewConf(testPath, 0)) 386 }() 387 _, err := env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 388 require.Contains(t, err.Error(), "error writing batch to leveldb") 389 }) 390 391 t.Run("bootstrappedsnapshotInfo-file-corrupt", func(t *testing.T) { 392 cleanupDirs() 393 createSnapshotMetadataFile(1) 394 createSnapshotDataFile("single-tx-id") 395 _, err := env.provider.BootstrapFromSnapshottedTxIDs(snapshotDir, snapshotInfo) 396 require.NoError(t, err) 397 env.provider.Close() 398 env = newTestEnv(t, NewConf(testPath, 0)) 399 require.NoError(t, ioutil.WriteFile(bootstrappingSnapshotInfoFile, []byte("junk-data"), 0644)) 400 _, err = env.provider.Open(snapshotInfo.LedgerID) 401 require.Contains(t, err.Error(), "error while unmarshalling bootstrappingSnapshotInfo") 402 }) 403 404 } 405 406 func generateNextTestBlock(bg *testutil.BlockGenerator, d *testBlockDetails) *common.Block { 407 txContents := [][]byte{} 408 for _, txID := range d.txIDs { 409 txContents = append(txContents, []byte("dummy content for txid = "+txID)) 410 } 411 block := bg.NextBlockWithTxid(txContents, d.txIDs) 412 for i, validationCode := range d.validationCodes { 413 txflags.ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER]).SetFlag(i, validationCode) 414 } 415 return block 416 } 417 418 func verifyQueriesOnBlocksPriorToSnapshot( 419 t *testing.T, 420 bootstrappedBlockStore *BlockStore, 421 expectedBCInfo *common.BlockchainInfo, 422 blocksDetailsBeforeSnapshot []*testBlockDetails, 423 blocksBeforeSnapshot []*common.Block, 424 ) { 425 bci, err := bootstrappedBlockStore.GetBlockchainInfo() 426 require.NoError(t, err) 427 require.Equal(t, expectedBCInfo, bci) 428 429 for _, b := range blocksBeforeSnapshot { 430 blockNum := b.Header.Number 431 blockHash := protoutil.BlockHeaderHash(b.Header) 432 expectedErrStr := fmt.Sprintf( 433 "cannot serve block [%d]. The ledger is bootstrapped from a snapshot. First available block = [%d]", 434 blockNum, len(blocksBeforeSnapshot), 435 ) 436 437 _, err := bootstrappedBlockStore.RetrieveBlockByNumber(blockNum) 438 require.EqualError(t, err, expectedErrStr) 439 440 _, err = bootstrappedBlockStore.RetrieveBlocks(blockNum) 441 require.EqualError(t, err, expectedErrStr) 442 443 _, err = bootstrappedBlockStore.RetrieveTxByBlockNumTranNum(blockNum, 0) 444 require.EqualError(t, err, expectedErrStr) 445 446 _, err = bootstrappedBlockStore.RetrieveBlockByHash(blockHash) 447 require.Equal(t, ErrNotFoundInIndex, err) 448 } 449 450 bootstrappingSnapshotHeight := uint64(len(blocksDetailsBeforeSnapshot)) 451 for _, d := range blocksDetailsBeforeSnapshot { 452 for _, txID := range d.txIDs { 453 expectedErrorStr := fmt.Sprintf( 454 "details for the TXID [%s] not available. Ledger bootstrapped from a snapshot. First available block = [%d]", 455 txID, bootstrappingSnapshotHeight, 456 ) 457 _, err := bootstrappedBlockStore.RetrieveBlockByTxID(txID) 458 require.EqualError(t, err, expectedErrorStr) 459 460 _, err = bootstrappedBlockStore.RetrieveTxByID(txID) 461 require.EqualError(t, err, expectedErrorStr) 462 463 _, err = bootstrappedBlockStore.RetrieveTxValidationCodeByTxID(txID) 464 require.EqualError(t, err, expectedErrorStr) 465 } 466 } 467 } 468 469 func verifyQueriesOnBlocksAddedAfterBootstrapping(t *testing.T, 470 bootstrappedBlockStore *BlockStore, 471 expectedBCInfo *common.BlockchainInfo, 472 blocksDetailsAfterSnapshot []*testBlockDetails, 473 blocksAfterSnapshot []*common.Block, 474 ) { 475 bci, err := bootstrappedBlockStore.GetBlockchainInfo() 476 require.NoError(t, err) 477 require.Equal(t, expectedBCInfo, bci) 478 479 for _, b := range blocksAfterSnapshot { 480 retrievedBlock, err := bootstrappedBlockStore.RetrieveBlockByNumber(b.Header.Number) 481 require.NoError(t, err) 482 require.Equal(t, b, retrievedBlock) 483 484 retrievedBlock, err = bootstrappedBlockStore.RetrieveBlockByHash(protoutil.BlockHeaderHash(b.Header)) 485 require.NoError(t, err) 486 require.Equal(t, b, retrievedBlock) 487 488 itr, err := bootstrappedBlockStore.RetrieveBlocks(b.Header.Number) 489 require.NoError(t, err) 490 blk, err := itr.Next() 491 require.NoError(t, err) 492 require.Equal(t, b, blk) 493 itr.Close() 494 495 retrievedTxEnv, err := bootstrappedBlockStore.RetrieveTxByBlockNumTranNum(b.Header.Number, 0) 496 require.NoError(t, err) 497 expectedTxEnv, err := protoutil.GetEnvelopeFromBlock(b.Data.Data[0]) 498 require.NoError(t, err) 499 require.Equal(t, expectedTxEnv, retrievedTxEnv) 500 } 501 502 for i, d := range blocksDetailsAfterSnapshot { 503 block := blocksAfterSnapshot[i] 504 for j, txID := range d.txIDs { 505 retrievedBlock, err := bootstrappedBlockStore.RetrieveBlockByTxID(txID) 506 require.Equal(t, block, retrievedBlock) 507 508 retrievedTxEnv, err := bootstrappedBlockStore.RetrieveTxByID(txID) 509 expectedTxEnv, err := protoutil.GetEnvelopeFromBlock(block.Data.Data[j]) 510 require.NoError(t, err) 511 require.Equal(t, expectedTxEnv, retrievedTxEnv) 512 } 513 514 for j, validationCode := range d.validationCodes { 515 retrievedValidationCode, err := bootstrappedBlockStore.RetrieveTxValidationCodeByTxID(d.txIDs[j]) 516 require.NoError(t, err) 517 require.Equal(t, validationCode, retrievedValidationCode) 518 } 519 } 520 }