github.com/klaytn/klaytn@v1.12.1/cmd/utils/nodecmd/snapshot.go (about) 1 // Modifications Copyright 2022 The klaytn Authors 2 // Copyright 2020 The go-ethereum Authors 3 // This file is part of go-ethereum. 4 // 5 // go-ethereum is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // go-ethereum is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU General Public License for more details. 14 // 15 // You should have received a copy of the GNU General Public License 16 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from cmd/utils/nodecmd/snapshot.go (2022/07/08). 19 // Modified and improved for the klaytn development. 20 21 package nodecmd 22 23 import ( 24 "bytes" 25 "errors" 26 "fmt" 27 "sync" 28 "time" 29 30 "github.com/klaytn/klaytn/blockchain/state" 31 "github.com/klaytn/klaytn/cmd/utils" 32 "github.com/klaytn/klaytn/common" 33 "github.com/klaytn/klaytn/snapshot" 34 "github.com/klaytn/klaytn/storage/database" 35 "github.com/klaytn/klaytn/storage/statedb" 36 "github.com/urfave/cli/v2" 37 ) 38 39 var SnapshotCommand = &cli.Command{ 40 Name: "snapshot", 41 Usage: "A set of commands based on the snapshot", 42 Description: "", 43 Subcommands: []*cli.Command{ 44 { 45 Name: "verify-state", 46 Usage: "Recalculate state hash based on the snapshot for verification", 47 ArgsUsage: "<root>", 48 Action: utils.MigrateFlags(verifyState), 49 Flags: utils.SnapshotFlags, 50 Description: ` 51 klay snapshot verify-state <state-root> 52 will traverse the whole accounts and storages set based on the specified 53 snapshot and recalculate the root hash of state for verification. 54 In other words, this command does the snapshot to trie conversion. 55 `, 56 }, 57 { 58 Name: "trace-trie", 59 Usage: "trace all trie nodes for verification", 60 ArgsUsage: "<root>", 61 Action: utils.MigrateFlags(traceTrie), 62 Flags: utils.SnapshotFlags, 63 Description: ` 64 klaytn statedb trace-trie <state-root> 65 trace all account and storage nodes to find missing data 66 during the migration process. 67 Start tracing from the state root of the last block, 68 reading all nodes and logging the missing nodes. 69 `, 70 }, 71 { 72 Name: "iterate-triedb", 73 Usage: "Iterate StateTrie DB for node count", 74 ArgsUsage: "<root>", 75 Action: utils.MigrateFlags(iterateTrie), 76 Flags: utils.SnapshotFlags, 77 Description: ` 78 klaytn statedb iterate-triedb 79 Count the number of nodes in the state-trie db. 80 `, 81 }, 82 }, 83 } 84 85 var ( 86 midAccountCnt = uint64(0) 87 midStorageCnt = uint64(0) 88 codeCnt = uint64(0) 89 leafAccountCnt = uint64(0) 90 leafStorageCnt = uint64(0) 91 unknownCnt = uint64(0) 92 mutex = &sync.Mutex{} 93 ) 94 95 // getConfig returns a database config with the given context. 96 func getConfig(ctx *cli.Context) *database.DBConfig { 97 return &database.DBConfig{ 98 Dir: "chaindata", 99 DBType: database.DBType(ctx.String(utils.DbTypeFlag.Name)).ToValid(), 100 SingleDB: ctx.Bool(utils.SingleDBFlag.Name), 101 NumStateTrieShards: ctx.Uint(utils.NumStateTrieShardsFlag.Name), 102 OpenFilesLimit: database.GetOpenFilesLimit(), 103 104 LevelDBCacheSize: ctx.Int(utils.LevelDBCacheSizeFlag.Name), 105 LevelDBCompression: database.LevelDBCompressionType(ctx.Int(utils.LevelDBCompressionTypeFlag.Name)), 106 EnableDBPerfMetrics: !ctx.Bool(utils.DBNoPerformanceMetricsFlag.Name), 107 108 DynamoDBConfig: &database.DynamoDBConfig{ 109 TableName: ctx.String(utils.DynamoDBTableNameFlag.Name), 110 Region: ctx.String(utils.DynamoDBRegionFlag.Name), 111 IsProvisioned: ctx.Bool(utils.DynamoDBIsProvisionedFlag.Name), 112 ReadCapacityUnits: ctx.Int64(utils.DynamoDBReadCapacityFlag.Name), 113 WriteCapacityUnits: ctx.Int64(utils.DynamoDBWriteCapacityFlag.Name), 114 PerfCheck: !ctx.Bool(utils.DBNoPerformanceMetricsFlag.Name), 115 }, 116 117 RocksDBConfig: &database.RocksDBConfig{ 118 CacheSize: ctx.Uint64(utils.RocksDBCacheSizeFlag.Name), 119 DumpMallocStat: ctx.Bool(utils.RocksDBDumpMallocStatFlag.Name), 120 DisableMetrics: ctx.Bool(utils.RocksDBDisableMetricsFlag.Name), 121 Secondary: ctx.Bool(utils.RocksDBSecondaryFlag.Name), 122 CompressionType: ctx.String(utils.RocksDBCompressionTypeFlag.Name), 123 BottommostCompressionType: ctx.String(utils.RocksDBBottommostCompressionTypeFlag.Name), 124 FilterPolicy: ctx.String(utils.RocksDBFilterPolicyFlag.Name), 125 MaxOpenFiles: ctx.Int(utils.RocksDBMaxOpenFilesFlag.Name), 126 CacheIndexAndFilter: ctx.Bool(utils.RocksDBCacheIndexAndFilterFlag.Name), 127 }, 128 } 129 } 130 131 // parseRoot parse the given hex string to hash. 132 func parseRoot(input string) (common.Hash, error) { 133 var h common.Hash 134 if err := h.UnmarshalText([]byte(input)); err != nil { 135 return h, err 136 } 137 return h, nil 138 } 139 140 // verifyState verifies if the stored snapshot data is correct or not. 141 // if a root hash isn't given, the root hash of current block is investigated. 142 func verifyState(ctx *cli.Context) error { 143 stack := MakeFullNode(ctx) 144 db := stack.OpenDatabase(getConfig(ctx)) 145 head := db.ReadHeadBlockHash() 146 if head == (common.Hash{}) { 147 // Corrupt or empty database, init from scratch 148 return errors.New("empty database") 149 } 150 // Make sure the entire head block is available 151 headBlock := db.ReadBlockByHash(head) 152 if headBlock == nil { 153 return fmt.Errorf("head block missing: %v", head.String()) 154 } 155 156 snaptree, err := snapshot.New(db, statedb.NewDatabase(db), 256, headBlock.Root(), false, false, false) 157 if err != nil { 158 logger.Error("Failed to open snapshot tree", "err", err) 159 return err 160 } 161 if ctx.NArg() > 1 { 162 logger.Error("Too many arguments given") 163 return errors.New("too many arguments") 164 } 165 root := headBlock.Root() 166 if ctx.NArg() == 1 { 167 root, err = parseRoot(ctx.Args().First()) 168 if err != nil { 169 logger.Error("Failed to resolve state root", "err", err) 170 return err 171 } 172 } 173 if err := snaptree.Verify(root); err != nil { 174 logger.Error("Failed to verify state", "root", root, "err", err) 175 return err 176 } 177 logger.Info("Verified the state", "root", root) 178 return nil 179 } 180 181 func traceTrie(ctx *cli.Context) error { 182 var childWait, logWait sync.WaitGroup 183 184 stack := MakeFullNode(ctx) 185 dbm := stack.OpenDatabase(getConfig(ctx)) 186 head := dbm.ReadHeadBlockHash() 187 if head == (common.Hash{}) { 188 // Corrupt or empty database, init from scratch 189 return errors.New("empty database") 190 } 191 // Make sure the entire head block is available 192 tmpHeadBlock := dbm.ReadBlockByHash(head) 193 if tmpHeadBlock == nil { 194 return fmt.Errorf("tmp head block missing: %v", head.String()) 195 } 196 197 blockNumber := (tmpHeadBlock.NumberU64() / 128) * 128 198 headBlock := dbm.ReadBlockByNumber(blockNumber) 199 if headBlock == nil { 200 return fmt.Errorf("head block missing: %v", head.String()) 201 } 202 203 root := headBlock.Root() 204 if root == (common.Hash{}) { 205 // Corrupt or empty database, init from scratch 206 return errors.New("empty root") 207 } 208 209 logger.Info("Trace Start", "BlockNum", blockNumber) 210 211 sdb, err := state.New(root, state.NewDatabase(dbm), nil, nil) 212 if err != nil { 213 return fmt.Errorf("Failed to open newDB trie : %v", err) 214 } 215 trieDB := sdb.Database().TrieDB() 216 217 // Get root-node childrens to create goroutine by number of childrens 218 children, err := trieDB.NodeChildren(root.ExtendZero()) 219 if err != nil { 220 return fmt.Errorf("Fail get childrens of root : %v", err) 221 } 222 223 midAccountCnt, midStorageCnt, codeCnt, leafAccountCnt, leafStorageCnt, unknownCnt = 0, 0, 0, 0, 0, 0 224 endFlag := false 225 226 childWait.Add(len(children)) 227 logWait.Add(1) 228 // create logging goroutine 229 go func() { 230 defer logWait.Done() 231 for !endFlag { 232 time.Sleep(time.Second * 5) 233 logger.Info("Trie Tracer", "AccNode", midAccountCnt, "AccLeaf", leafAccountCnt, "StrgNode", midStorageCnt, "StrgLeaf", leafStorageCnt, "Unknown", unknownCnt, "CodeAcc", codeCnt) 234 } 235 logger.Info("Trie Tracer Finished", "AccNode", midAccountCnt, "AccLeaf", leafAccountCnt, "StrgNode", midStorageCnt, "StrgLeaf", leafStorageCnt, "Unknown", unknownCnt, "CodeAcc", codeCnt) 236 }() 237 238 // Create goroutine by number of childrens 239 for _, child := range children { 240 go func(child common.Hash) { 241 defer childWait.Done() 242 doTraceTrie(sdb.Database(), child) 243 }(child.Unextend()) 244 } 245 246 childWait.Wait() 247 endFlag = true 248 logWait.Wait() 249 return nil 250 } 251 252 func doTraceTrie(db state.Database, root common.Hash) (resultErr error) { 253 logger.Info("Trie Tracer Start", "Hash Root", root) 254 // Create and iterate a state trie rooted in a sub-node 255 oldState, err := state.New(root, db, nil, nil) 256 if err != nil { 257 logger.Error("can not open trie DB", err.Error()) 258 panic(err) 259 } 260 261 oldIt := state.NewNodeIterator(oldState) 262 263 for oldIt.Next() { 264 mutex.Lock() 265 switch oldIt.Type { 266 case "state": 267 midAccountCnt++ 268 case "storage": 269 midStorageCnt++ 270 case "code": 271 codeCnt++ 272 case "state_leaf": 273 leafAccountCnt++ 274 case "storage_leaf": 275 leafStorageCnt++ 276 default: 277 unknownCnt++ 278 } 279 mutex.Unlock() 280 } 281 if oldIt.Error != nil { 282 logger.Error("Error Finished", "Root Hash", root, "Message", oldIt.Error) 283 } 284 logger.Info("Trie Tracer Finished", "Root Hash", root, "AccNode", midAccountCnt, "AccLeaf", leafAccountCnt, "StrgNode", midStorageCnt, "StrgLeaf", leafStorageCnt, "Unknown", unknownCnt, "CodeAcc", codeCnt) 285 return nil 286 } 287 288 func iterateTrie(ctx *cli.Context) error { 289 stack := MakeFullNode(ctx) 290 dbm := stack.OpenDatabase(getConfig(ctx)) 291 sdb, err := state.New(common.Hash{}, state.NewDatabase(dbm), nil, nil) 292 if err != nil { 293 return fmt.Errorf("Failed to open newDB trie : %v", err) 294 } 295 296 logger.Info("TrieDB Iterator Start", "node count : all node count, nil node count : key or value is nil node count") 297 cnt, nilCnt := uint64(0), uint64(0) 298 go func() { 299 for { 300 time.Sleep(time.Second * 5) 301 logger.Info("TrieDB Iterator", "node count", cnt, "nil node count", nilCnt) 302 } 303 }() 304 305 it := sdb.Database().TrieDB().DiskDB().GetStateTrieDB().NewIterator(nil, nil) 306 defer it.Release() 307 for it.Next() { 308 cnt++ 309 if it.Key() == nil || it.Value() == nil || bytes.Equal(it.Key(), []byte("")) || bytes.Equal(it.Value(), []byte("")) { 310 nilCnt++ 311 } 312 } 313 logger.Info("TrieDB Iterator finished", "total node count", cnt, "nil node count", nilCnt) 314 return nil 315 }