github.com/sykesm/fabric@v1.1.0-preview.0.20200129034918-2aa12b1a0181/common/ledger/blkstorage/fsblkstorage/reset_test.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package fsblkstorage 8 9 import ( 10 "io/ioutil" 11 "os" 12 "path" 13 "testing" 14 15 "github.com/davecgh/go-spew/spew" 16 "github.com/golang/protobuf/proto" 17 "github.com/hyperledger/fabric-protos-go/common" 18 "github.com/hyperledger/fabric/common/ledger/blkstorage" 19 "github.com/hyperledger/fabric/common/ledger/testutil" 20 "github.com/hyperledger/fabric/protoutil" 21 "github.com/stretchr/testify/require" 22 ) 23 24 func TestResetToGenesisBlkSingleBlkFile(t *testing.T) { 25 blockStoreRootDir := "/tmp/testBlockStoreReset" 26 require.NoError(t, os.RemoveAll(blockStoreRootDir)) 27 env := newTestEnv(t, NewConf(blockStoreRootDir, 0)) 28 defer env.Cleanup() 29 provider := env.provider 30 store, err := provider.CreateBlockStore("ledger1") 31 require.NoError(t, err) 32 33 // Add 50 blocks and shutdown blockstore store 34 blocks := testutil.ConstructTestBlocks(t, 50) 35 for _, b := range blocks { 36 require.NoError(t, store.AddBlock(b)) 37 } 38 store.Shutdown() 39 provider.Close() 40 41 ledgerDir := (&Conf{blockStorageDir: blockStoreRootDir}).getLedgerBlockDir("ledger1") 42 43 _, lastOffsetOriginal, numBlocksOriginal, err := scanForLastCompleteBlock(ledgerDir, 0, 0) 44 t.Logf("lastOffsetOriginal=%d", lastOffsetOriginal) 45 require.NoError(t, err) 46 require.Equal(t, 50, numBlocksOriginal) 47 fileInfo, err := os.Stat(deriveBlockfilePath(ledgerDir, 0)) 48 require.NoError(t, err) 49 require.Equal(t, fileInfo.Size(), lastOffsetOriginal) 50 51 resetToGenesisBlk(ledgerDir) 52 assertBlocksDirOnlyFileWithGenesisBlock(t, ledgerDir, blocks[0]) 53 } 54 55 func TestResetToGenesisBlkMultipleBlkFiles(t *testing.T) { 56 blockStoreRootDir := "/tmp/testBlockStoreReset" 57 require.NoError(t, os.RemoveAll(blockStoreRootDir)) 58 blocks := testutil.ConstructTestBlocks(t, 20) // 20 blocks persisted in ~5 block files 59 blocksPerFile := 20 / 5 60 env := newTestEnv(t, NewConf(blockStoreRootDir, 0)) 61 defer env.Cleanup() 62 provider := env.provider 63 store, err := provider.CreateBlockStore("ledger1") 64 require.NoError(t, err) 65 for i, b := range blocks { 66 require.NoError(t, store.AddBlock(b)) 67 if i != 0 && i%blocksPerFile == 0 { 68 // block ranges in files [(0, 4):file0, (5,8):file1, (9,12):file2, (13, 16):file3, (17,19):file4] 69 store.(*fsBlockStore).fileMgr.moveToNextFile() 70 } 71 } 72 store.Shutdown() 73 provider.Close() 74 75 ledgerDir := (&Conf{blockStorageDir: blockStoreRootDir}).getLedgerBlockDir("ledger1") 76 files, err := ioutil.ReadDir(ledgerDir) 77 require.Len(t, files, 5) 78 resetToGenesisBlk(ledgerDir) 79 assertBlocksDirOnlyFileWithGenesisBlock(t, ledgerDir, blocks[0]) 80 } 81 82 func TestResetBlockStore(t *testing.T) { 83 blockStoreRootDir := "/tmp/testBlockStoreReset" 84 os.RemoveAll(blockStoreRootDir) 85 blocks1 := testutil.ConstructTestBlocks(t, 20) // 20 blocks persisted in ~5 block files 86 blocks2 := testutil.ConstructTestBlocks(t, 40) // 40 blocks persisted in ~5 block files 87 maxFileSie := int(0.2 * float64(testutilEstimateTotalSizeOnDisk(t, blocks1))) 88 89 env := newTestEnv(t, NewConf(blockStoreRootDir, maxFileSie)) 90 defer env.Cleanup() 91 provider := env.provider 92 store1, err := provider.OpenBlockStore("ledger1") 93 require.NoError(t, err) 94 store2, _ := provider.CreateBlockStore("ledger2") 95 96 for _, b := range blocks1 { 97 store1.AddBlock(b) 98 } 99 100 for _, b := range blocks2 { 101 store2.AddBlock(b) 102 } 103 104 store1.Shutdown() 105 store2.Shutdown() 106 provider.Close() 107 108 require.NoError(t, ResetBlockStore(blockStoreRootDir)) 109 // test load and clear preResetHeight for ledger1 and ledger2 110 ledgerIDs := []string{"ledger1", "ledger2"} 111 h, err := LoadPreResetHeight(blockStoreRootDir, ledgerIDs) 112 require.Equal(t, 113 map[string]uint64{ 114 "ledger1": 20, 115 "ledger2": 40, 116 }, 117 h, 118 ) 119 120 env = newTestEnv(t, NewConf(blockStoreRootDir, maxFileSie)) 121 provider = env.provider 122 store1, _ = provider.OpenBlockStore("ledger1") 123 store2, _ = provider.CreateBlockStore("ledger2") 124 assertBlockStorePostReset(t, store1, blocks1) 125 assertBlockStorePostReset(t, store2, blocks2) 126 127 require.NoError(t, ClearPreResetHeight(blockStoreRootDir, ledgerIDs)) 128 h, err = LoadPreResetHeight(blockStoreRootDir, ledgerIDs) 129 require.Equal(t, 130 map[string]uint64{}, 131 h, 132 ) 133 134 // reset again to test load and clear preResetHeight for ledger2 135 require.NoError(t, ResetBlockStore(blockStoreRootDir)) 136 ledgerIDs = []string{"ledger2"} 137 h, err = LoadPreResetHeight(blockStoreRootDir, ledgerIDs) 138 require.Equal(t, 139 map[string]uint64{ 140 "ledger2": 40, 141 }, 142 h, 143 ) 144 require.NoError(t, ClearPreResetHeight(blockStoreRootDir, ledgerIDs)) 145 // verify that ledger1 has preResetHeight file is not deleted 146 h, err = LoadPreResetHeight(blockStoreRootDir, []string{"ledger1", "ledger2"}) 147 require.Equal(t, 148 map[string]uint64{ 149 "ledger1": 20, 150 }, 151 h, 152 ) 153 } 154 155 func TestRecordHeight(t *testing.T) { 156 blockStoreRootDir := "/tmp/testBlockStoreReset" 157 require.NoError(t, os.RemoveAll(blockStoreRootDir)) 158 env := newTestEnv(t, NewConf(blockStoreRootDir, 0)) 159 defer env.Cleanup() 160 provider := env.provider 161 store, err := provider.CreateBlockStore("ledger1") 162 require.NoError(t, err) 163 164 blocks := testutil.ConstructTestBlocks(t, 60) 165 166 // Add 50 blocks, record, and require the recording of the current height 167 for _, b := range blocks[:50] { 168 require.NoError(t, store.AddBlock(b)) 169 } 170 ledgerDir := (&Conf{blockStorageDir: blockStoreRootDir}).getLedgerBlockDir("ledger1") 171 require.NoError(t, recordHeightIfGreaterThanPreviousRecording(ledgerDir)) 172 assertRecordedHeight(t, ledgerDir, "50") 173 174 // Add 10 more blocks, record again and require that the previous recorded info is overwritten with new current height 175 for _, b := range blocks[50:] { 176 require.NoError(t, store.AddBlock(b)) 177 } 178 require.NoError(t, recordHeightIfGreaterThanPreviousRecording(ledgerDir)) 179 assertRecordedHeight(t, ledgerDir, "60") 180 181 // truncate the most recent block file to half 182 // record again and require that the previous recorded info is NOT overwritten with new current height 183 // because the current height is less than the previously recorded height 184 lastFileNum, err := retrieveLastFileSuffix(ledgerDir) 185 require.NoError(t, err) 186 lastFile := deriveBlockfilePath(ledgerDir, lastFileNum) 187 fileInfo, err := os.Stat(lastFile) 188 require.NoError(t, err) 189 require.NoError(t, os.Truncate(lastFile, fileInfo.Size()/2)) 190 checkpointInfo, err := constructCheckpointInfoFromBlockFiles(ledgerDir) 191 require.NoError(t, err) 192 require.True(t, checkpointInfo.lastBlockNumber < 59) 193 require.NoError(t, recordHeightIfGreaterThanPreviousRecording(ledgerDir)) 194 assertRecordedHeight(t, ledgerDir, "60") 195 } 196 197 func assertBlocksDirOnlyFileWithGenesisBlock(t *testing.T, ledgerDir string, genesisBlock *common.Block) { 198 files, err := ioutil.ReadDir(ledgerDir) 199 require.Len(t, files, 2) 200 require.Equal(t, "__backupGenesisBlockBytes", files[0].Name()) 201 require.Equal(t, "blockfile_000000", files[1].Name()) 202 blockBytes, lastOffset, numBlocks, err := scanForLastCompleteBlock(ledgerDir, 0, 0) 203 require.NoError(t, err) 204 t.Logf("lastOffset=%d", lastOffset) 205 require.Equal(t, 1, numBlocks) 206 block0, err := deserializeBlock(blockBytes) 207 require.NoError(t, err) 208 require.Equal(t, genesisBlock, block0) 209 fileInfo, err := os.Stat(deriveBlockfilePath(ledgerDir, 0)) 210 require.NoError(t, err) 211 require.Equal(t, fileInfo.Size(), lastOffset) 212 } 213 214 func assertBlockStorePostReset(t *testing.T, store blkstorage.BlockStore, originallyCommittedBlocks []*common.Block) { 215 bcInfo, _ := store.GetBlockchainInfo() 216 t.Logf("bcInfo = %s", spew.Sdump(bcInfo)) 217 require.Equal(t, 218 &common.BlockchainInfo{ 219 Height: 1, 220 CurrentBlockHash: protoutil.BlockHeaderHash(originallyCommittedBlocks[0].Header), 221 PreviousBlockHash: nil, 222 }, 223 bcInfo) 224 225 blk, err := store.RetrieveBlockByNumber(0) 226 require.NoError(t, err) 227 require.Equal(t, originallyCommittedBlocks[0], blk) 228 229 blk, err = store.RetrieveBlockByNumber(1) 230 require.Error(t, err) 231 require.Equal(t, err, blkstorage.ErrNotFoundInIndex) 232 233 err = store.AddBlock(originallyCommittedBlocks[0]) 234 require.EqualError(t, err, "block number should have been 1 but was 0") 235 236 for _, b := range originallyCommittedBlocks[1:] { 237 require.NoError(t, store.AddBlock(b)) 238 } 239 240 for i := 0; i < len(originallyCommittedBlocks); i++ { 241 blk, err := store.RetrieveBlockByNumber(uint64(i)) 242 require.NoError(t, err) 243 require.Equal(t, originallyCommittedBlocks[i], blk) 244 } 245 } 246 247 func assertRecordedHeight(t *testing.T, ledgerDir, expectedRecordedHt string) { 248 bytes, err := ioutil.ReadFile(path.Join(ledgerDir, fileNamePreRestHt)) 249 require.NoError(t, err) 250 require.Equal(t, expectedRecordedHt, string(bytes)) 251 } 252 253 func testutilEstimateTotalSizeOnDisk(t *testing.T, blocks []*common.Block) int { 254 size := 0 255 for _, block := range blocks { 256 by, _, err := serializeBlock(block) 257 require.NoError(t, err) 258 blockBytesSize := len(by) 259 encodedLen := proto.EncodeVarint(uint64(blockBytesSize)) 260 size += blockBytesSize + len(encodedLen) 261 } 262 return size 263 }