github.com/fxsjy/go-ethereum@v1.8.4-0.20180410143526-2e247705cd27/cmd/utils/cmd.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package utils contains internal helper functions for go-ethereum commands. 18 package utils 19 20 import ( 21 "compress/gzip" 22 "fmt" 23 "io" 24 "os" 25 "os/signal" 26 "runtime" 27 "strings" 28 "syscall" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/core" 32 "github.com/ethereum/go-ethereum/core/types" 33 "github.com/ethereum/go-ethereum/crypto" 34 "github.com/ethereum/go-ethereum/ethdb" 35 "github.com/ethereum/go-ethereum/internal/debug" 36 "github.com/ethereum/go-ethereum/log" 37 "github.com/ethereum/go-ethereum/node" 38 "github.com/ethereum/go-ethereum/rlp" 39 ) 40 41 const ( 42 importBatchSize = 2500 43 ) 44 45 // Fatalf formats a message to standard error and exits the program. 46 // The message is also printed to standard output if standard error 47 // is redirected to a different file. 48 func Fatalf(format string, args ...interface{}) { 49 w := io.MultiWriter(os.Stdout, os.Stderr) 50 if runtime.GOOS == "windows" { 51 // The SameFile check below doesn't work on Windows. 52 // stdout is unlikely to get redirected though, so just print there. 53 w = os.Stdout 54 } else { 55 outf, _ := os.Stdout.Stat() 56 errf, _ := os.Stderr.Stat() 57 if outf != nil && errf != nil && os.SameFile(outf, errf) { 58 w = os.Stderr 59 } 60 } 61 fmt.Fprintf(w, "Fatal: "+format+"\n", args...) 62 os.Exit(1) 63 } 64 65 func StartNode(stack *node.Node) { 66 if err := stack.Start(); err != nil { 67 Fatalf("Error starting protocol stack: %v", err) 68 } 69 go func() { 70 sigc := make(chan os.Signal, 1) 71 signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) 72 defer signal.Stop(sigc) 73 <-sigc 74 log.Info("Got interrupt, shutting down...") 75 go stack.Stop() 76 for i := 10; i > 0; i-- { 77 <-sigc 78 if i > 1 { 79 log.Warn("Already shutting down, interrupt more to panic.", "times", i-1) 80 } 81 } 82 debug.Exit() // ensure trace and CPU profile data is flushed. 83 debug.LoudPanic("boom") 84 }() 85 } 86 87 func ImportChain(chain *core.BlockChain, fn string) error { 88 // Watch for Ctrl-C while the import is running. 89 // If a signal is received, the import will stop at the next batch. 90 interrupt := make(chan os.Signal, 1) 91 stop := make(chan struct{}) 92 signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) 93 defer signal.Stop(interrupt) 94 defer close(interrupt) 95 go func() { 96 if _, ok := <-interrupt; ok { 97 log.Info("Interrupted during import, stopping at next batch") 98 } 99 close(stop) 100 }() 101 checkInterrupt := func() bool { 102 select { 103 case <-stop: 104 return true 105 default: 106 return false 107 } 108 } 109 110 log.Info("Importing blockchain", "file", fn) 111 112 // Open the file handle and potentially unwrap the gzip stream 113 fh, err := os.Open(fn) 114 if err != nil { 115 return err 116 } 117 defer fh.Close() 118 119 var reader io.Reader = fh 120 if strings.HasSuffix(fn, ".gz") { 121 if reader, err = gzip.NewReader(reader); err != nil { 122 return err 123 } 124 } 125 stream := rlp.NewStream(reader, 0) 126 127 // Run actual the import. 128 blocks := make(types.Blocks, importBatchSize) 129 n := 0 130 for batch := 0; ; batch++ { 131 // Load a batch of RLP blocks. 132 if checkInterrupt() { 133 return fmt.Errorf("interrupted") 134 } 135 i := 0 136 for ; i < importBatchSize; i++ { 137 var b types.Block 138 if err := stream.Decode(&b); err == io.EOF { 139 break 140 } else if err != nil { 141 return fmt.Errorf("at block %d: %v", n, err) 142 } 143 // don't import first block 144 if b.NumberU64() == 0 { 145 i-- 146 continue 147 } 148 blocks[i] = &b 149 n++ 150 } 151 if i == 0 { 152 break 153 } 154 // Import the batch. 155 if checkInterrupt() { 156 return fmt.Errorf("interrupted") 157 } 158 missing := missingBlocks(chain, blocks[:i]) 159 if len(missing) == 0 { 160 log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash()) 161 continue 162 } 163 if _, err := chain.InsertChain(missing); err != nil { 164 return fmt.Errorf("invalid block %d: %v", n, err) 165 } 166 } 167 return nil 168 } 169 170 func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block { 171 head := chain.CurrentBlock() 172 for i, block := range blocks { 173 // If we're behind the chain head, only check block, state is available at head 174 if head.NumberU64() > block.NumberU64() { 175 if !chain.HasBlock(block.Hash(), block.NumberU64()) { 176 return blocks[i:] 177 } 178 continue 179 } 180 // If we're above the chain head, state availability is a must 181 if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) { 182 return blocks[i:] 183 } 184 } 185 return nil 186 } 187 188 // ExportChain exports a blockchain into the specified file, truncating any data 189 // already present in the file. 190 func ExportChain(blockchain *core.BlockChain, fn string) error { 191 log.Info("Exporting blockchain", "file", fn) 192 193 // Open the file handle and potentially wrap with a gzip stream 194 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 195 if err != nil { 196 return err 197 } 198 defer fh.Close() 199 200 var writer io.Writer = fh 201 if strings.HasSuffix(fn, ".gz") { 202 writer = gzip.NewWriter(writer) 203 defer writer.(*gzip.Writer).Close() 204 } 205 // Iterate over the blocks and export them 206 if err := blockchain.Export(writer); err != nil { 207 return err 208 } 209 log.Info("Exported blockchain", "file", fn) 210 211 return nil 212 } 213 214 // ExportAppendChain exports a blockchain into the specified file, appending to 215 // the file if data already exists in it. 216 func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { 217 log.Info("Exporting blockchain", "file", fn) 218 219 // Open the file handle and potentially wrap with a gzip stream 220 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) 221 if err != nil { 222 return err 223 } 224 defer fh.Close() 225 226 var writer io.Writer = fh 227 if strings.HasSuffix(fn, ".gz") { 228 writer = gzip.NewWriter(writer) 229 defer writer.(*gzip.Writer).Close() 230 } 231 // Iterate over the blocks and export them 232 if err := blockchain.ExportN(writer, first, last); err != nil { 233 return err 234 } 235 log.Info("Exported blockchain to", "file", fn) 236 return nil 237 } 238 239 // ImportPreimages imports a batch of exported hash preimages into the database. 240 func ImportPreimages(db *ethdb.LDBDatabase, fn string) error { 241 log.Info("Importing preimages", "file", fn) 242 243 // Open the file handle and potentially unwrap the gzip stream 244 fh, err := os.Open(fn) 245 if err != nil { 246 return err 247 } 248 defer fh.Close() 249 250 var reader io.Reader = fh 251 if strings.HasSuffix(fn, ".gz") { 252 if reader, err = gzip.NewReader(reader); err != nil { 253 return err 254 } 255 } 256 stream := rlp.NewStream(reader, 0) 257 258 // Import the preimages in batches to prevent disk trashing 259 preimages := make(map[common.Hash][]byte) 260 261 for { 262 // Read the next entry and ensure it's not junk 263 var blob []byte 264 265 if err := stream.Decode(&blob); err != nil { 266 if err == io.EOF { 267 break 268 } 269 return err 270 } 271 // Accumulate the preimages and flush when enough ws gathered 272 preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob) 273 if len(preimages) > 1024 { 274 if err := core.WritePreimages(db, 0, preimages); err != nil { 275 return err 276 } 277 preimages = make(map[common.Hash][]byte) 278 } 279 } 280 // Flush the last batch preimage data 281 if len(preimages) > 0 { 282 return core.WritePreimages(db, 0, preimages) 283 } 284 return nil 285 } 286 287 // ExportPreimages exports all known hash preimages into the specified file, 288 // truncating any data already present in the file. 289 func ExportPreimages(db *ethdb.LDBDatabase, fn string) error { 290 log.Info("Exporting preimages", "file", fn) 291 292 // Open the file handle and potentially wrap with a gzip stream 293 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 294 if err != nil { 295 return err 296 } 297 defer fh.Close() 298 299 var writer io.Writer = fh 300 if strings.HasSuffix(fn, ".gz") { 301 writer = gzip.NewWriter(writer) 302 defer writer.(*gzip.Writer).Close() 303 } 304 // Iterate over the preimages and export them 305 it := db.NewIteratorWithPrefix([]byte("secure-key-")) 306 for it.Next() { 307 if err := rlp.Encode(writer, it.Value()); err != nil { 308 return err 309 } 310 } 311 log.Info("Exported preimages", "file", fn) 312 return nil 313 }