github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/headerchain.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 crand "crypto/rand" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 mrand "math/rand" 26 "sync/atomic" 27 "time" 28 29 lru "github.com/hashicorp/golang-lru" 30 31 "github.com/scroll-tech/go-ethereum/common" 32 "github.com/scroll-tech/go-ethereum/consensus" 33 "github.com/scroll-tech/go-ethereum/core/rawdb" 34 "github.com/scroll-tech/go-ethereum/core/types" 35 "github.com/scroll-tech/go-ethereum/ethdb" 36 "github.com/scroll-tech/go-ethereum/log" 37 "github.com/scroll-tech/go-ethereum/params" 38 ) 39 40 const ( 41 headerCacheLimit = 512 42 tdCacheLimit = 1024 43 numberCacheLimit = 2048 44 ) 45 46 // HeaderChain implements the basic block header chain logic that is shared by 47 // core.BlockChain and light.LightChain. It is not usable in itself, only as 48 // a part of either structure. 49 // 50 // HeaderChain is responsible for maintaining the header chain including the 51 // header query and updating. 52 // 53 // The components maintained by headerchain includes: (1) total difficult 54 // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping 55 // and (5) head header flag. 56 // 57 // It is not thread safe either, the encapsulating chain structures should do 58 // the necessary mutex locking/unlocking. 59 type HeaderChain struct { 60 config *params.ChainConfig 61 62 chainDb ethdb.Database 63 genesisHeader *types.Header 64 65 currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) 66 currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) 67 68 headerCache *lru.Cache // Cache for the most recent block headers 69 tdCache *lru.Cache // Cache for the most recent block total difficulties 70 numberCache *lru.Cache // Cache for the most recent block numbers 71 72 procInterrupt func() bool 73 74 rand *mrand.Rand 75 engine consensus.Engine 76 } 77 78 // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points 79 // to the parent's interrupt semaphore. 80 func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { 81 headerCache, _ := lru.New(headerCacheLimit) 82 tdCache, _ := lru.New(tdCacheLimit) 83 numberCache, _ := lru.New(numberCacheLimit) 84 85 // Seed a fast but crypto originating random generator 86 seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) 87 if err != nil { 88 return nil, err 89 } 90 91 hc := &HeaderChain{ 92 config: config, 93 chainDb: chainDb, 94 headerCache: headerCache, 95 tdCache: tdCache, 96 numberCache: numberCache, 97 procInterrupt: procInterrupt, 98 rand: mrand.New(mrand.NewSource(seed.Int64())), 99 engine: engine, 100 } 101 102 hc.genesisHeader = hc.GetHeaderByNumber(0) 103 if hc.genesisHeader == nil { 104 return nil, ErrNoGenesis 105 } 106 107 hc.currentHeader.Store(hc.genesisHeader) 108 if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { 109 if chead := hc.GetHeaderByHash(head); chead != nil { 110 hc.currentHeader.Store(chead) 111 } 112 } 113 hc.currentHeaderHash = hc.CurrentHeader().Hash() 114 headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) 115 116 return hc, nil 117 } 118 119 // GetBlockNumber retrieves the block number belonging to the given hash 120 // from the cache or database 121 func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { 122 if cached, ok := hc.numberCache.Get(hash); ok { 123 number := cached.(uint64) 124 return &number 125 } 126 number := rawdb.ReadHeaderNumber(hc.chainDb, hash) 127 if number != nil { 128 hc.numberCache.Add(hash, *number) 129 } 130 return number 131 } 132 133 type headerWriteResult struct { 134 status WriteStatus 135 ignored int 136 imported int 137 lastHash common.Hash 138 lastHeader *types.Header 139 } 140 141 // WriteHeaders writes a chain of headers into the local chain, given that the parents 142 // are already known. If the total difficulty of the newly inserted chain becomes 143 // greater than the current known TD, the canonical chain is reorged. 144 // 145 // Note: This method is not concurrent-safe with inserting blocks simultaneously 146 // into the chain, as side effects caused by reorganisations cannot be emulated 147 // without the real blocks. Hence, writing headers directly should only be done 148 // in two scenarios: pure-header mode of operation (light clients), or properly 149 // separated header/block phases (non-archive clients). 150 func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) { 151 if len(headers) == 0 { 152 return &headerWriteResult{}, nil 153 } 154 ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) 155 if ptd == nil { 156 return &headerWriteResult{}, consensus.ErrUnknownAncestor 157 } 158 var ( 159 lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number 160 lastHash = headers[0].ParentHash // Last imported header hash 161 newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain 162 163 lastHeader *types.Header 164 inserted []numberHash // Ephemeral lookup of number/hash for the chain 165 firstInserted = -1 // Index of the first non-ignored header 166 ) 167 168 batch := hc.chainDb.NewBatch() 169 parentKnown := true // Set to true to force hc.HasHeader check the first iteration 170 for i, header := range headers { 171 var hash common.Hash 172 // The headers have already been validated at this point, so we already 173 // know that it's a contiguous chain, where 174 // headers[i].Hash() == headers[i+1].ParentHash 175 if i < len(headers)-1 { 176 hash = headers[i+1].ParentHash 177 } else { 178 hash = header.Hash() 179 } 180 number := header.Number.Uint64() 181 newTD.Add(newTD, header.Difficulty) 182 183 // If the parent was not present, store it 184 // If the header is already known, skip it, otherwise store 185 alreadyKnown := parentKnown && hc.HasHeader(hash, number) 186 if !alreadyKnown { 187 // Irrelevant of the canonical status, write the TD and header to the database. 188 rawdb.WriteTd(batch, hash, number, newTD) 189 hc.tdCache.Add(hash, new(big.Int).Set(newTD)) 190 191 rawdb.WriteHeader(batch, header) 192 inserted = append(inserted, numberHash{number, hash}) 193 hc.headerCache.Add(hash, header) 194 hc.numberCache.Add(hash, number) 195 if firstInserted < 0 { 196 firstInserted = i 197 } 198 } 199 parentKnown = alreadyKnown 200 lastHeader, lastHash, lastNumber = header, hash, number 201 } 202 203 // Skip the slow disk write of all headers if interrupted. 204 if hc.procInterrupt() { 205 log.Debug("Premature abort during headers import") 206 return &headerWriteResult{}, errors.New("aborted") 207 } 208 // Commit to disk! 209 if err := batch.Write(); err != nil { 210 log.Crit("Failed to write headers", "error", err) 211 } 212 batch.Reset() 213 214 var ( 215 head = hc.CurrentHeader().Number.Uint64() 216 localTD = hc.GetTd(hc.currentHeaderHash, head) 217 status = SideStatTy 218 ) 219 // If the total difficulty is higher than our known, add it to the canonical chain 220 // Second clause in the if statement reduces the vulnerability to selfish mining. 221 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 222 reorg := newTD.Cmp(localTD) > 0 223 if !reorg && newTD.Cmp(localTD) == 0 { 224 if lastNumber < head { 225 reorg = true 226 } else if lastNumber == head { 227 reorg = mrand.Float64() < 0.5 228 } 229 } 230 // If the parent of the (first) block is already the canon header, 231 // we don't have to go backwards to delete canon blocks, but 232 // simply pile them onto the existing chain 233 chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash 234 if reorg { 235 // If the header can be added into canonical chain, adjust the 236 // header chain markers(canonical indexes and head header flag). 237 // 238 // Note all markers should be written atomically. 239 markerBatch := batch // we can reuse the batch to keep allocs down 240 if !chainAlreadyCanon { 241 // Delete any canonical number assignments above the new head 242 for i := lastNumber + 1; ; i++ { 243 hash := rawdb.ReadCanonicalHash(hc.chainDb, i) 244 if hash == (common.Hash{}) { 245 break 246 } 247 rawdb.DeleteCanonicalHash(markerBatch, i) 248 } 249 // Overwrite any stale canonical number assignments, going 250 // backwards from the first header in this import 251 var ( 252 headHash = headers[0].ParentHash // inserted[0].parent? 253 headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ? 254 headHeader = hc.GetHeader(headHash, headNumber) 255 ) 256 for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { 257 rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) 258 headHash = headHeader.ParentHash 259 headNumber = headHeader.Number.Uint64() - 1 260 headHeader = hc.GetHeader(headHash, headNumber) 261 } 262 // If some of the older headers were already known, but obtained canon-status 263 // during this import batch, then we need to write that now 264 // Further down, we continue writing the staus for the ones that 265 // were not already known 266 for i := 0; i < firstInserted; i++ { 267 hash := headers[i].Hash() 268 num := headers[i].Number.Uint64() 269 rawdb.WriteCanonicalHash(markerBatch, hash, num) 270 rawdb.WriteHeadHeaderHash(markerBatch, hash) 271 } 272 } 273 // Extend the canonical chain with the new headers 274 for _, hn := range inserted { 275 rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number) 276 rawdb.WriteHeadHeaderHash(markerBatch, hn.hash) 277 } 278 if err := markerBatch.Write(); err != nil { 279 log.Crit("Failed to write header markers into disk", "err", err) 280 } 281 markerBatch.Reset() 282 // Last step update all in-memory head header markers 283 hc.currentHeaderHash = lastHash 284 hc.currentHeader.Store(types.CopyHeader(lastHeader)) 285 headHeaderGauge.Update(lastHeader.Number.Int64()) 286 287 // Chain status is canonical since this insert was a reorg. 288 // Note that all inserts which have higher TD than existing are 'reorg'. 289 status = CanonStatTy 290 } 291 292 if len(inserted) == 0 { 293 status = NonStatTy 294 } 295 return &headerWriteResult{ 296 status: status, 297 ignored: len(headers) - len(inserted), 298 imported: len(inserted), 299 lastHash: lastHash, 300 lastHeader: lastHeader, 301 }, nil 302 } 303 304 func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 305 // Do a sanity check that the provided chain is actually ordered and linked 306 for i := 1; i < len(chain); i++ { 307 if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 { 308 hash := chain[i].Hash() 309 parentHash := chain[i-1].Hash() 310 // Chain broke ancestry, log a message (programming error) and skip insertion 311 log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash, 312 "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash) 313 314 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number, 315 parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4]) 316 } 317 // If the header is a banned one, straight out abort 318 if BadHashes[chain[i].ParentHash] { 319 return i - 1, ErrBannedHash 320 } 321 // If it's the last header in the cunk, we need to check it too 322 if i == len(chain)-1 && BadHashes[chain[i].Hash()] { 323 return i, ErrBannedHash 324 } 325 } 326 327 // Generate the list of seal verification requests, and start the parallel verifier 328 seals := make([]bool, len(chain)) 329 if checkFreq != 0 { 330 // In case of checkFreq == 0 all seals are left false. 331 for i := 0; i <= len(seals)/checkFreq; i++ { 332 index := i*checkFreq + hc.rand.Intn(checkFreq) 333 if index >= len(seals) { 334 index = len(seals) - 1 335 } 336 seals[index] = true 337 } 338 // Last should always be verified to avoid junk. 339 seals[len(seals)-1] = true 340 } 341 342 abort, results := hc.engine.VerifyHeaders(hc, chain, seals) 343 defer close(abort) 344 345 // Iterate over the headers and ensure they all check out 346 for i := range chain { 347 // If the chain is terminating, stop processing blocks 348 if hc.procInterrupt() { 349 log.Debug("Premature abort during headers verification") 350 return 0, errors.New("aborted") 351 } 352 // Otherwise wait for headers checks and ensure they pass 353 if err := <-results; err != nil { 354 return i, err 355 } 356 } 357 358 return 0, nil 359 } 360 361 // InsertHeaderChain inserts the given headers. 362 // 363 // The validity of the headers is NOT CHECKED by this method, i.e. they need to be 364 // validated by ValidateHeaderChain before calling InsertHeaderChain. 365 // 366 // This insert is all-or-nothing. If this returns an error, no headers were written, 367 // otherwise they were all processed successfully. 368 // 369 // The returned 'write status' says if the inserted headers are part of the canonical chain 370 // or a side chain. 371 func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { 372 if hc.procInterrupt() { 373 return 0, errors.New("aborted") 374 } 375 res, err := hc.writeHeaders(chain) 376 377 // Report some public statistics so the user has a clue what's going on 378 context := []interface{}{ 379 "count", res.imported, 380 "elapsed", common.PrettyDuration(time.Since(start)), 381 } 382 if err != nil { 383 context = append(context, "err", err) 384 } 385 if last := res.lastHeader; last != nil { 386 context = append(context, "number", last.Number, "hash", res.lastHash) 387 if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { 388 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 389 } 390 } 391 if res.ignored > 0 { 392 context = append(context, []interface{}{"ignored", res.ignored}...) 393 } 394 log.Info("Imported new block headers", context...) 395 return res.status, err 396 } 397 398 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 399 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 400 // number of blocks to be individually checked before we reach the canonical chain. 401 // 402 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 403 func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 404 if ancestor > number { 405 return common.Hash{}, 0 406 } 407 if ancestor == 1 { 408 // in this case it is cheaper to just read the header 409 if header := hc.GetHeader(hash, number); header != nil { 410 return header.ParentHash, number - 1 411 } 412 return common.Hash{}, 0 413 } 414 for ancestor != 0 { 415 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 416 ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor) 417 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 418 number -= ancestor 419 return ancestorHash, number 420 } 421 } 422 if *maxNonCanonical == 0 { 423 return common.Hash{}, 0 424 } 425 *maxNonCanonical-- 426 ancestor-- 427 header := hc.GetHeader(hash, number) 428 if header == nil { 429 return common.Hash{}, 0 430 } 431 hash = header.ParentHash 432 number-- 433 } 434 return hash, number 435 } 436 437 // GetTd retrieves a block's total difficulty in the canonical chain from the 438 // database by hash and number, caching it if found. 439 func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { 440 // Short circuit if the td's already in the cache, retrieve otherwise 441 if cached, ok := hc.tdCache.Get(hash); ok { 442 return cached.(*big.Int) 443 } 444 td := rawdb.ReadTd(hc.chainDb, hash, number) 445 if td == nil { 446 return nil 447 } 448 // Cache the found body for next time and return 449 hc.tdCache.Add(hash, td) 450 return td 451 } 452 453 // GetHeader retrieves a block header from the database by hash and number, 454 // caching it if found. 455 func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { 456 // Short circuit if the header's already in the cache, retrieve otherwise 457 if header, ok := hc.headerCache.Get(hash); ok { 458 return header.(*types.Header) 459 } 460 header := rawdb.ReadHeader(hc.chainDb, hash, number) 461 if header == nil { 462 return nil 463 } 464 // Cache the found header for next time and return 465 hc.headerCache.Add(hash, header) 466 return header 467 } 468 469 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 470 // found. 471 func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { 472 number := hc.GetBlockNumber(hash) 473 if number == nil { 474 return nil 475 } 476 return hc.GetHeader(hash, *number) 477 } 478 479 // HasHeader checks if a block header is present in the database or not. 480 // In theory, if header is present in the database, all relative components 481 // like td and hash->number should be present too. 482 func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { 483 if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { 484 return true 485 } 486 return rawdb.HasHeader(hc.chainDb, hash, number) 487 } 488 489 // GetHeaderByNumber retrieves a block header from the database by number, 490 // caching it (associated with its hash) if found. 491 func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { 492 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 493 if hash == (common.Hash{}) { 494 return nil 495 } 496 return hc.GetHeader(hash, number) 497 } 498 499 func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { 500 return rawdb.ReadCanonicalHash(hc.chainDb, number) 501 } 502 503 // CurrentHeader retrieves the current head header of the canonical chain. The 504 // header is retrieved from the HeaderChain's internal cache. 505 func (hc *HeaderChain) CurrentHeader() *types.Header { 506 return hc.currentHeader.Load().(*types.Header) 507 } 508 509 // SetCurrentHeader sets the in-memory head header marker of the canonical chan 510 // as the given header. 511 func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { 512 hc.currentHeader.Store(head) 513 hc.currentHeaderHash = head.Hash() 514 headHeaderGauge.Update(head.Number.Int64()) 515 } 516 517 type ( 518 // UpdateHeadBlocksCallback is a callback function that is called by SetHead 519 // before head header is updated. The method will return the actual block it 520 // updated the head to (missing state) and a flag if setHead should continue 521 // rewinding till that forcefully (exceeded ancient limits) 522 UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool) 523 524 // DeleteBlockContentCallback is a callback function that is called by SetHead 525 // before each header is deleted. 526 DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64) 527 ) 528 529 // SetHead rewinds the local chain to a new head. Everything above the new head 530 // will be deleted and the new one set. 531 func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 532 var ( 533 parentHash common.Hash 534 batch = hc.chainDb.NewBatch() 535 origin = true 536 ) 537 for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { 538 num := hdr.Number.Uint64() 539 540 // Rewind block chain to new head. 541 parent := hc.GetHeader(hdr.ParentHash, num-1) 542 if parent == nil { 543 parent = hc.genesisHeader 544 } 545 parentHash = parent.Hash() 546 547 // Notably, since geth has the possibility for setting the head to a low 548 // height which is even lower than ancient head. 549 // In order to ensure that the head is always no higher than the data in 550 // the database (ancient store or active store), we need to update head 551 // first then remove the relative data from the database. 552 // 553 // Update head first(head fast block, head full block) before deleting the data. 554 markerBatch := hc.chainDb.NewBatch() 555 if updateFn != nil { 556 newHead, force := updateFn(markerBatch, parent) 557 if force && newHead < head { 558 log.Warn("Force rewinding till ancient limit", "head", newHead) 559 head = newHead 560 } 561 } 562 // Update head header then. 563 rawdb.WriteHeadHeaderHash(markerBatch, parentHash) 564 if err := markerBatch.Write(); err != nil { 565 log.Crit("Failed to update chain markers", "error", err) 566 } 567 hc.currentHeader.Store(parent) 568 hc.currentHeaderHash = parentHash 569 headHeaderGauge.Update(parent.Number.Int64()) 570 571 // If this is the first iteration, wipe any leftover data upwards too so 572 // we don't end up with dangling daps in the database 573 var nums []uint64 574 if origin { 575 for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { 576 nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path 577 } 578 origin = false 579 } 580 nums = append(nums, num) 581 582 // Remove the related data from the database on all sidechains 583 for _, num := range nums { 584 // Gather all the side fork hashes 585 hashes := rawdb.ReadAllHashes(hc.chainDb, num) 586 if len(hashes) == 0 { 587 // No hashes in the database whatsoever, probably frozen already 588 hashes = append(hashes, hdr.Hash()) 589 } 590 for _, hash := range hashes { 591 if delFn != nil { 592 delFn(batch, hash, num) 593 } 594 rawdb.DeleteHeader(batch, hash, num) 595 rawdb.DeleteTd(batch, hash, num) 596 } 597 rawdb.DeleteCanonicalHash(batch, num) 598 } 599 } 600 // Flush all accumulated deletions. 601 if err := batch.Write(); err != nil { 602 log.Crit("Failed to rewind block", "error", err) 603 } 604 // Clear out any stale content from the caches 605 hc.headerCache.Purge() 606 hc.tdCache.Purge() 607 hc.numberCache.Purge() 608 } 609 610 // SetGenesis sets a new genesis block header for the chain 611 func (hc *HeaderChain) SetGenesis(head *types.Header) { 612 hc.genesisHeader = head 613 } 614 615 // Config retrieves the header chain's chain configuration. 616 func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } 617 618 // Engine retrieves the header chain's consensus engine. 619 func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine } 620 621 // GetBlock implements consensus.ChainReader, and returns nil for every input as 622 // a header chain does not have blocks available for retrieval. 623 func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block { 624 return nil 625 }