github.com/shrimpyuk/bor@v0.2.15-0.20220224151350-fb4ec6020bae/core/rawdb/accessors_chain.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "math/big" 25 "sort" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/core/types" 29 "github.com/ethereum/go-ethereum/crypto" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/log" 32 "github.com/ethereum/go-ethereum/params" 33 "github.com/ethereum/go-ethereum/rlp" 34 ) 35 36 // ReadCanonicalHash retrieves the hash assigned to a canonical block number. 37 func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { 38 data, _ := db.Ancient(freezerHashTable, number) 39 if len(data) == 0 { 40 data, _ = db.Get(headerHashKey(number)) 41 // In the background freezer is moving data from leveldb to flatten files. 42 // So during the first check for ancient db, the data is not yet in there, 43 // but when we reach into leveldb, the data was already moved. That would 44 // result in a not found error. 45 if len(data) == 0 { 46 data, _ = db.Ancient(freezerHashTable, number) 47 } 48 } 49 if len(data) == 0 { 50 return common.Hash{} 51 } 52 return common.BytesToHash(data) 53 } 54 55 // WriteCanonicalHash stores the hash assigned to a canonical block number. 56 func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 57 if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { 58 log.Crit("Failed to store number to hash mapping", "err", err) 59 } 60 } 61 62 // DeleteCanonicalHash removes the number to hash canonical mapping. 63 func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) { 64 if err := db.Delete(headerHashKey(number)); err != nil { 65 log.Crit("Failed to delete number to hash mapping", "err", err) 66 } 67 } 68 69 // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights, 70 // both canonical and reorged forks included. 71 func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash { 72 prefix := headerKeyPrefix(number) 73 74 hashes := make([]common.Hash, 0, 1) 75 it := db.NewIterator(prefix, nil) 76 defer it.Release() 77 78 for it.Next() { 79 if key := it.Key(); len(key) == len(prefix)+32 { 80 hashes = append(hashes, common.BytesToHash(key[len(key)-32:])) 81 } 82 } 83 return hashes 84 } 85 86 type NumberHash struct { 87 Number uint64 88 Hash common.Hash 89 } 90 91 // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights, 92 // both canonical and reorged forks included. 93 // This method considers both limits to be _inclusive_. 94 func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash { 95 var ( 96 start = encodeBlockNumber(first) 97 keyLength = len(headerPrefix) + 8 + 32 98 hashes = make([]*NumberHash, 0, 1+last-first) 99 it = db.NewIterator(headerPrefix, start) 100 ) 101 defer it.Release() 102 for it.Next() { 103 key := it.Key() 104 if len(key) != keyLength { 105 continue 106 } 107 num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8]) 108 if num > last { 109 break 110 } 111 hash := common.BytesToHash(key[len(key)-32:]) 112 hashes = append(hashes, &NumberHash{num, hash}) 113 } 114 return hashes 115 } 116 117 // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the 118 // certain chain range. If the accumulated entries reaches the given threshold, 119 // abort the iteration and return the semi-finish result. 120 func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) { 121 // Short circuit if the limit is 0. 122 if limit == 0 { 123 return nil, nil 124 } 125 var ( 126 numbers []uint64 127 hashes []common.Hash 128 ) 129 // Construct the key prefix of start point. 130 start, end := headerHashKey(from), headerHashKey(to) 131 it := db.NewIterator(nil, start) 132 defer it.Release() 133 134 for it.Next() { 135 if bytes.Compare(it.Key(), end) >= 0 { 136 break 137 } 138 if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) { 139 numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8])) 140 hashes = append(hashes, common.BytesToHash(it.Value())) 141 // If the accumulated entries reaches the limit threshold, return. 142 if len(numbers) >= limit { 143 break 144 } 145 } 146 } 147 return numbers, hashes 148 } 149 150 // ReadHeaderNumber returns the header number assigned to a hash. 151 func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 { 152 data, _ := db.Get(headerNumberKey(hash)) 153 if len(data) != 8 { 154 return nil 155 } 156 number := binary.BigEndian.Uint64(data) 157 return &number 158 } 159 160 // WriteHeaderNumber stores the hash->number mapping. 161 func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 162 key := headerNumberKey(hash) 163 enc := encodeBlockNumber(number) 164 if err := db.Put(key, enc); err != nil { 165 log.Crit("Failed to store hash to number mapping", "err", err) 166 } 167 } 168 169 // DeleteHeaderNumber removes hash->number mapping. 170 func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) { 171 if err := db.Delete(headerNumberKey(hash)); err != nil { 172 log.Crit("Failed to delete hash to number mapping", "err", err) 173 } 174 } 175 176 // ReadHeadHeaderHash retrieves the hash of the current canonical head header. 177 func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash { 178 data, _ := db.Get(headHeaderKey) 179 if len(data) == 0 { 180 return common.Hash{} 181 } 182 return common.BytesToHash(data) 183 } 184 185 // WriteHeadHeaderHash stores the hash of the current canonical head header. 186 func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) { 187 if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { 188 log.Crit("Failed to store last header's hash", "err", err) 189 } 190 } 191 192 // ReadHeadBlockHash retrieves the hash of the current canonical head block. 193 func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash { 194 data, _ := db.Get(headBlockKey) 195 if len(data) == 0 { 196 return common.Hash{} 197 } 198 return common.BytesToHash(data) 199 } 200 201 // WriteHeadBlockHash stores the head block's hash. 202 func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 203 if err := db.Put(headBlockKey, hash.Bytes()); err != nil { 204 log.Crit("Failed to store last block's hash", "err", err) 205 } 206 } 207 208 // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block. 209 func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash { 210 data, _ := db.Get(headFastBlockKey) 211 if len(data) == 0 { 212 return common.Hash{} 213 } 214 return common.BytesToHash(data) 215 } 216 217 // WriteHeadFastBlockHash stores the hash of the current fast-sync head block. 218 func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 219 if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil { 220 log.Crit("Failed to store last fast block's hash", "err", err) 221 } 222 } 223 224 // ReadLastPivotNumber retrieves the number of the last pivot block. If the node 225 // full synced, the last pivot will always be nil. 226 func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 { 227 data, _ := db.Get(lastPivotKey) 228 if len(data) == 0 { 229 return nil 230 } 231 var pivot uint64 232 if err := rlp.DecodeBytes(data, &pivot); err != nil { 233 log.Error("Invalid pivot block number in database", "err", err) 234 return nil 235 } 236 return &pivot 237 } 238 239 // WriteLastPivotNumber stores the number of the last pivot block. 240 func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { 241 enc, err := rlp.EncodeToBytes(pivot) 242 if err != nil { 243 log.Crit("Failed to encode pivot block number", "err", err) 244 } 245 if err := db.Put(lastPivotKey, enc); err != nil { 246 log.Crit("Failed to store pivot block number", "err", err) 247 } 248 } 249 250 // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow 251 // reporting correct numbers across restarts. 252 func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 { 253 data, _ := db.Get(fastTrieProgressKey) 254 if len(data) == 0 { 255 return 0 256 } 257 return new(big.Int).SetBytes(data).Uint64() 258 } 259 260 // WriteFastTrieProgress stores the fast sync trie process counter to support 261 // retrieving it across restarts. 262 func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) { 263 if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil { 264 log.Crit("Failed to store fast sync trie progress", "err", err) 265 } 266 } 267 268 // ReadTxIndexTail retrieves the number of oldest indexed block 269 // whose transaction indices has been indexed. If the corresponding entry 270 // is non-existent in database it means the indexing has been finished. 271 func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 { 272 data, _ := db.Get(txIndexTailKey) 273 if len(data) != 8 { 274 return nil 275 } 276 number := binary.BigEndian.Uint64(data) 277 return &number 278 } 279 280 // WriteTxIndexTail stores the number of oldest indexed block 281 // into database. 282 func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) { 283 if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil { 284 log.Crit("Failed to store the transaction index tail", "err", err) 285 } 286 } 287 288 // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync. 289 func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 { 290 data, _ := db.Get(fastTxLookupLimitKey) 291 if len(data) != 8 { 292 return nil 293 } 294 number := binary.BigEndian.Uint64(data) 295 return &number 296 } 297 298 // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database. 299 func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) { 300 if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil { 301 log.Crit("Failed to store transaction lookup limit for fast sync", "err", err) 302 } 303 } 304 305 // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. 306 func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 307 // First try to look up the data in ancient database. Extra hash 308 // comparison is necessary since ancient database only maintains 309 // the canonical data. 310 data, _ := db.Ancient(freezerHeaderTable, number) 311 if len(data) > 0 && crypto.Keccak256Hash(data) == hash { 312 return data 313 } 314 // Then try to look up the data in leveldb. 315 data, _ = db.Get(headerKey(number, hash)) 316 if len(data) > 0 { 317 return data 318 } 319 // In the background freezer is moving data from leveldb to flatten files. 320 // So during the first check for ancient db, the data is not yet in there, 321 // but when we reach into leveldb, the data was already moved. That would 322 // result in a not found error. 323 data, _ = db.Ancient(freezerHeaderTable, number) 324 if len(data) > 0 && crypto.Keccak256Hash(data) == hash { 325 return data 326 } 327 return nil // Can't find the data anywhere. 328 } 329 330 // HasHeader verifies the existence of a block header corresponding to the hash. 331 func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { 332 if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { 333 return true 334 } 335 if has, err := db.Has(headerKey(number, hash)); !has || err != nil { 336 return false 337 } 338 return true 339 } 340 341 // ReadHeader retrieves the block header corresponding to the hash. 342 func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header { 343 data := ReadHeaderRLP(db, hash, number) 344 if len(data) == 0 { 345 return nil 346 } 347 header := new(types.Header) 348 if err := rlp.Decode(bytes.NewReader(data), header); err != nil { 349 log.Error("Invalid block header RLP", "hash", hash, "err", err) 350 return nil 351 } 352 return header 353 } 354 355 // WriteHeader stores a block header into the database and also stores the hash- 356 // to-number mapping. 357 func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) { 358 var ( 359 hash = header.Hash() 360 number = header.Number.Uint64() 361 ) 362 // Write the hash -> number mapping 363 WriteHeaderNumber(db, hash, number) 364 365 // Write the encoded header 366 data, err := rlp.EncodeToBytes(header) 367 if err != nil { 368 log.Crit("Failed to RLP encode header", "err", err) 369 } 370 key := headerKey(number, hash) 371 if err := db.Put(key, data); err != nil { 372 log.Crit("Failed to store header", "err", err) 373 } 374 } 375 376 // DeleteHeader removes all block header data associated with a hash. 377 func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 378 deleteHeaderWithoutNumber(db, hash, number) 379 if err := db.Delete(headerNumberKey(hash)); err != nil { 380 log.Crit("Failed to delete hash to number mapping", "err", err) 381 } 382 } 383 384 // deleteHeaderWithoutNumber removes only the block header but does not remove 385 // the hash to number mapping. 386 func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 387 if err := db.Delete(headerKey(number, hash)); err != nil { 388 log.Crit("Failed to delete header", "err", err) 389 } 390 } 391 392 // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. 393 func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 394 // First try to look up the data in ancient database. Extra hash 395 // comparison is necessary since ancient database only maintains 396 // the canonical data. 397 data, _ := db.Ancient(freezerBodiesTable, number) 398 if len(data) > 0 { 399 h, _ := db.Ancient(freezerHashTable, number) 400 if common.BytesToHash(h) == hash { 401 return data 402 } 403 } 404 // Then try to look up the data in leveldb. 405 data, _ = db.Get(blockBodyKey(number, hash)) 406 if len(data) > 0 { 407 return data 408 } 409 // In the background freezer is moving data from leveldb to flatten files. 410 // So during the first check for ancient db, the data is not yet in there, 411 // but when we reach into leveldb, the data was already moved. That would 412 // result in a not found error. 413 data, _ = db.Ancient(freezerBodiesTable, number) 414 if len(data) > 0 { 415 h, _ := db.Ancient(freezerHashTable, number) 416 if common.BytesToHash(h) == hash { 417 return data 418 } 419 } 420 return nil // Can't find the data anywhere. 421 } 422 423 // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical 424 // block at number, in RLP encoding. 425 func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { 426 // If it's an ancient one, we don't need the canonical hash 427 data, _ := db.Ancient(freezerBodiesTable, number) 428 if len(data) == 0 { 429 // Need to get the hash 430 data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number))) 431 // In the background freezer is moving data from leveldb to flatten files. 432 // So during the first check for ancient db, the data is not yet in there, 433 // but when we reach into leveldb, the data was already moved. That would 434 // result in a not found error. 435 if len(data) == 0 { 436 data, _ = db.Ancient(freezerBodiesTable, number) 437 } 438 } 439 return data 440 } 441 442 // WriteBodyRLP stores an RLP encoded block body into the database. 443 func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { 444 if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { 445 log.Crit("Failed to store block body", "err", err) 446 } 447 } 448 449 // HasBody verifies the existence of a block body corresponding to the hash. 450 func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { 451 if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { 452 return true 453 } 454 if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { 455 return false 456 } 457 return true 458 } 459 460 // ReadBody retrieves the block body corresponding to the hash. 461 func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body { 462 data := ReadBodyRLP(db, hash, number) 463 if len(data) == 0 { 464 return nil 465 } 466 body := new(types.Body) 467 if err := rlp.Decode(bytes.NewReader(data), body); err != nil { 468 log.Error("Invalid block body RLP", "hash", hash, "err", err) 469 return nil 470 } 471 return body 472 } 473 474 // WriteBody stores a block body into the database. 475 func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) { 476 data, err := rlp.EncodeToBytes(body) 477 if err != nil { 478 log.Crit("Failed to RLP encode body", "err", err) 479 } 480 WriteBodyRLP(db, hash, number, data) 481 } 482 483 // DeleteBody removes all block body data associated with a hash. 484 func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 485 if err := db.Delete(blockBodyKey(number, hash)); err != nil { 486 log.Crit("Failed to delete block body", "err", err) 487 } 488 } 489 490 // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. 491 func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 492 // First try to look up the data in ancient database. Extra hash 493 // comparison is necessary since ancient database only maintains 494 // the canonical data. 495 data, _ := db.Ancient(freezerDifficultyTable, number) 496 if len(data) > 0 { 497 h, _ := db.Ancient(freezerHashTable, number) 498 if common.BytesToHash(h) == hash { 499 return data 500 } 501 } 502 // Then try to look up the data in leveldb. 503 data, _ = db.Get(headerTDKey(number, hash)) 504 if len(data) > 0 { 505 return data 506 } 507 // In the background freezer is moving data from leveldb to flatten files. 508 // So during the first check for ancient db, the data is not yet in there, 509 // but when we reach into leveldb, the data was already moved. That would 510 // result in a not found error. 511 data, _ = db.Ancient(freezerDifficultyTable, number) 512 if len(data) > 0 { 513 h, _ := db.Ancient(freezerHashTable, number) 514 if common.BytesToHash(h) == hash { 515 return data 516 } 517 } 518 return nil // Can't find the data anywhere. 519 } 520 521 // ReadTd retrieves a block's total difficulty corresponding to the hash. 522 func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int { 523 data := ReadTdRLP(db, hash, number) 524 if len(data) == 0 { 525 return nil 526 } 527 td := new(big.Int) 528 if err := rlp.Decode(bytes.NewReader(data), td); err != nil { 529 log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) 530 return nil 531 } 532 return td 533 } 534 535 // WriteTd stores the total difficulty of a block into the database. 536 func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) { 537 data, err := rlp.EncodeToBytes(td) 538 if err != nil { 539 log.Crit("Failed to RLP encode block total difficulty", "err", err) 540 } 541 if err := db.Put(headerTDKey(number, hash), data); err != nil { 542 log.Crit("Failed to store block total difficulty", "err", err) 543 } 544 } 545 546 // DeleteTd removes all block total difficulty data associated with a hash. 547 func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 548 if err := db.Delete(headerTDKey(number, hash)); err != nil { 549 log.Crit("Failed to delete block total difficulty", "err", err) 550 } 551 } 552 553 // HasReceipts verifies the existence of all the transaction receipts belonging 554 // to a block. 555 func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { 556 if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { 557 return true 558 } 559 if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { 560 return false 561 } 562 return true 563 } 564 565 // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. 566 func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 567 // First try to look up the data in ancient database. Extra hash 568 // comparison is necessary since ancient database only maintains 569 // the canonical data. 570 data, _ := db.Ancient(freezerReceiptTable, number) 571 if len(data) > 0 { 572 h, _ := db.Ancient(freezerHashTable, number) 573 if common.BytesToHash(h) == hash { 574 return data 575 } 576 } 577 // Then try to look up the data in leveldb. 578 data, _ = db.Get(blockReceiptsKey(number, hash)) 579 if len(data) > 0 { 580 return data 581 } 582 // In the background freezer is moving data from leveldb to flatten files. 583 // So during the first check for ancient db, the data is not yet in there, 584 // but when we reach into leveldb, the data was already moved. That would 585 // result in a not found error. 586 data, _ = db.Ancient(freezerReceiptTable, number) 587 if len(data) > 0 { 588 h, _ := db.Ancient(freezerHashTable, number) 589 if common.BytesToHash(h) == hash { 590 return data 591 } 592 } 593 return nil // Can't find the data anywhere. 594 } 595 596 // ReadRawReceipts retrieves all the transaction receipts belonging to a block. 597 // The receipt metadata fields are not guaranteed to be populated, so they 598 // should not be used. Use ReadReceipts instead if the metadata is needed. 599 func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts { 600 // Retrieve the flattened receipt slice 601 data := ReadReceiptsRLP(db, hash, number) 602 if len(data) == 0 { 603 return nil 604 } 605 // Convert the receipts from their storage form to their internal representation 606 storageReceipts := []*types.ReceiptForStorage{} 607 if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { 608 log.Error("Invalid receipt array RLP", "hash", hash, "err", err) 609 return nil 610 } 611 receipts := make(types.Receipts, len(storageReceipts)) 612 for i, storageReceipt := range storageReceipts { 613 receipts[i] = (*types.Receipt)(storageReceipt) 614 } 615 return receipts 616 } 617 618 // ReadReceipts retrieves all the transaction receipts belonging to a block, including 619 // its correspoinding metadata fields. If it is unable to populate these metadata 620 // fields then nil is returned. 621 // 622 // The current implementation populates these metadata fields by reading the receipts' 623 // corresponding block body, so if the block body is not found it will return nil even 624 // if the receipt itself is stored. 625 func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { 626 // We're deriving many fields from the block body, retrieve beside the receipt 627 receipts := ReadRawReceipts(db, hash, number) 628 if receipts == nil { 629 return nil 630 } 631 body := ReadBody(db, hash, number) 632 if body == nil { 633 log.Error("Missing body but have receipt", "hash", hash, "number", number) 634 return nil 635 } 636 if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil { 637 log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) 638 return nil 639 } 640 return receipts 641 } 642 643 // WriteReceipts stores all the transaction receipts belonging to a block. 644 func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) { 645 // Convert the receipts into their storage form and serialize them 646 storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) 647 for i, receipt := range receipts { 648 storageReceipts[i] = (*types.ReceiptForStorage)(receipt) 649 } 650 bytes, err := rlp.EncodeToBytes(storageReceipts) 651 if err != nil { 652 log.Crit("Failed to encode block receipts", "err", err) 653 } 654 // Store the flattened receipt slice 655 if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { 656 log.Crit("Failed to store block receipts", "err", err) 657 } 658 } 659 660 // DeleteReceipts removes all receipt data associated with a block hash. 661 func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 662 if err := db.Delete(blockReceiptsKey(number, hash)); err != nil { 663 log.Crit("Failed to delete block receipts", "err", err) 664 } 665 } 666 667 // storedReceiptRLP is the storage encoding of a receipt. 668 // Re-definition in core/types/receipt.go. 669 type storedReceiptRLP struct { 670 PostStateOrStatus []byte 671 CumulativeGasUsed uint64 672 Logs []*types.LogForStorage 673 } 674 675 // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps 676 // the list of logs. When decoding a stored receipt into this object we 677 // avoid creating the bloom filter. 678 type receiptLogs struct { 679 Logs []*types.Log 680 } 681 682 // DecodeRLP implements rlp.Decoder. 683 func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { 684 var stored storedReceiptRLP 685 if err := s.Decode(&stored); err != nil { 686 return err 687 } 688 r.Logs = make([]*types.Log, len(stored.Logs)) 689 for i, log := range stored.Logs { 690 r.Logs[i] = (*types.Log)(log) 691 } 692 return nil 693 } 694 695 // DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc. 696 func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error { 697 logIndex := uint(0) 698 if len(txs) != len(receipts) { 699 return errors.New("transaction and receipt count mismatch") 700 } 701 for i := 0; i < len(receipts); i++ { 702 txHash := txs[i].Hash() 703 // The derived log fields can simply be set from the block and transaction 704 for j := 0; j < len(receipts[i].Logs); j++ { 705 receipts[i].Logs[j].BlockNumber = number 706 receipts[i].Logs[j].BlockHash = hash 707 receipts[i].Logs[j].TxHash = txHash 708 receipts[i].Logs[j].TxIndex = uint(i) 709 receipts[i].Logs[j].Index = logIndex 710 logIndex++ 711 } 712 } 713 return nil 714 } 715 716 // ReadLogs retrieves the logs for all transactions in a block. The log fields 717 // are populated with metadata. In case the receipts or the block body 718 // are not found, a nil is returned. 719 func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64) [][]*types.Log { 720 // Retrieve the flattened receipt slice 721 data := ReadReceiptsRLP(db, hash, number) 722 if len(data) == 0 { 723 return nil 724 } 725 receipts := []*receiptLogs{} 726 if err := rlp.DecodeBytes(data, &receipts); err != nil { 727 log.Error("Invalid receipt array RLP", "hash", hash, "err", err) 728 return nil 729 } 730 731 body := ReadBody(db, hash, number) 732 if body == nil { 733 log.Error("Missing body but have receipt", "hash", hash, "number", number) 734 return nil 735 } 736 if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil { 737 log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) 738 return nil 739 } 740 logs := make([][]*types.Log, len(receipts)) 741 for i, receipt := range receipts { 742 logs[i] = receipt.Logs 743 } 744 return logs 745 } 746 747 // ReadBlock retrieves an entire block corresponding to the hash, assembling it 748 // back from the stored header and body. If either the header or body could not 749 // be retrieved nil is returned. 750 // 751 // Note, due to concurrent download of header and block body the header and thus 752 // canonical hash can be stored in the database but the body data not (yet). 753 func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { 754 header := ReadHeader(db, hash, number) 755 if header == nil { 756 return nil 757 } 758 body := ReadBody(db, hash, number) 759 if body == nil { 760 return nil 761 } 762 return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) 763 } 764 765 // WriteBlock serializes a block into the database, header and body separately. 766 func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) { 767 WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) 768 WriteHeader(db, block.Header()) 769 } 770 771 // WriteAncientBlock writes entire block data into ancient store and returns the total written size. 772 func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, borReceipts []types.Receipts, td *big.Int) (int64, error) { 773 var ( 774 tdSum = new(big.Int).Set(td) 775 stReceipts []*types.ReceiptForStorage 776 borStReceipts []*types.ReceiptForStorage 777 ) 778 return db.ModifyAncients(func(op ethdb.AncientWriteOp) error { 779 for i, block := range blocks { 780 // Convert receipts to storage format and sum up total difficulty. 781 stReceipts = stReceipts[:0] 782 for _, receipt := range receipts[i] { 783 stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt)) 784 } 785 786 // Convert bor receipts to storage format and sum up total difficulty. 787 borStReceipts = borStReceipts[:0] 788 for _, borReceipt := range borReceipts[i] { 789 borStReceipts = append(borStReceipts, (*types.ReceiptForStorage)(borReceipt)) 790 } 791 792 header := block.Header() 793 if i > 0 { 794 tdSum.Add(tdSum, header.Difficulty) 795 } 796 if err := writeAncientBlock(op, block, header, stReceipts, borStReceipts, tdSum); err != nil { 797 return err 798 } 799 } 800 return nil 801 }) 802 } 803 804 func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, borReceipts []*types.ReceiptForStorage, td *big.Int) error { 805 num := block.NumberU64() 806 if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil { 807 return fmt.Errorf("can't add block %d hash: %v", num, err) 808 } 809 if err := op.Append(freezerHeaderTable, num, header); err != nil { 810 return fmt.Errorf("can't append block header %d: %v", num, err) 811 } 812 if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil { 813 return fmt.Errorf("can't append block body %d: %v", num, err) 814 } 815 if err := op.Append(freezerReceiptTable, num, receipts); err != nil { 816 return fmt.Errorf("can't append block %d receipts: %v", num, err) 817 } 818 if err := op.Append(freezerDifficultyTable, num, td); err != nil { 819 return fmt.Errorf("can't append block %d total difficulty: %v", num, err) 820 } 821 if err := op.Append(freezerBorReceiptTable, num, borReceipts); err != nil { 822 return fmt.Errorf("can't append block %d borReceipts: %v", num, err) 823 } 824 return nil 825 } 826 827 // DeleteBlock removes all block data associated with a hash. 828 func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 829 DeleteReceipts(db, hash, number) 830 DeleteHeader(db, hash, number) 831 DeleteBody(db, hash, number) 832 DeleteTd(db, hash, number) 833 834 // delete bor receipt 835 DeleteBorReceipt(db, hash, number) 836 } 837 838 // DeleteBlockWithoutNumber removes all block data associated with a hash, except 839 // the hash to number mapping. 840 func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 841 DeleteReceipts(db, hash, number) 842 deleteHeaderWithoutNumber(db, hash, number) 843 DeleteBody(db, hash, number) 844 DeleteTd(db, hash, number) 845 846 // delete bor receipt 847 DeleteBorReceipt(db, hash, number) 848 } 849 850 const badBlockToKeep = 10 851 852 type badBlock struct { 853 Header *types.Header 854 Body *types.Body 855 } 856 857 // badBlockList implements the sort interface to allow sorting a list of 858 // bad blocks by their number in the reverse order. 859 type badBlockList []*badBlock 860 861 func (s badBlockList) Len() int { return len(s) } 862 func (s badBlockList) Less(i, j int) bool { 863 return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64() 864 } 865 func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 866 867 // ReadBadBlock retrieves the bad block with the corresponding block hash. 868 func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { 869 blob, err := db.Get(badBlockKey) 870 if err != nil { 871 return nil 872 } 873 var badBlocks badBlockList 874 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 875 return nil 876 } 877 for _, bad := range badBlocks { 878 if bad.Header.Hash() == hash { 879 return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles) 880 } 881 } 882 return nil 883 } 884 885 // ReadAllBadBlocks retrieves all the bad blocks in the database. 886 // All returned blocks are sorted in reverse order by number. 887 func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { 888 blob, err := db.Get(badBlockKey) 889 if err != nil { 890 return nil 891 } 892 var badBlocks badBlockList 893 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 894 return nil 895 } 896 var blocks []*types.Block 897 for _, bad := range badBlocks { 898 blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)) 899 } 900 return blocks 901 } 902 903 // WriteBadBlock serializes the bad block into the database. If the cumulated 904 // bad blocks exceeds the limitation, the oldest will be dropped. 905 func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { 906 blob, err := db.Get(badBlockKey) 907 if err != nil { 908 log.Warn("Failed to load old bad blocks", "error", err) 909 } 910 var badBlocks badBlockList 911 if len(blob) > 0 { 912 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 913 log.Crit("Failed to decode old bad blocks", "error", err) 914 } 915 } 916 for _, b := range badBlocks { 917 if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() { 918 log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash()) 919 return 920 } 921 } 922 badBlocks = append(badBlocks, &badBlock{ 923 Header: block.Header(), 924 Body: block.Body(), 925 }) 926 sort.Sort(sort.Reverse(badBlocks)) 927 if len(badBlocks) > badBlockToKeep { 928 badBlocks = badBlocks[:badBlockToKeep] 929 } 930 data, err := rlp.EncodeToBytes(badBlocks) 931 if err != nil { 932 log.Crit("Failed to encode bad blocks", "err", err) 933 } 934 if err := db.Put(badBlockKey, data); err != nil { 935 log.Crit("Failed to write bad blocks", "err", err) 936 } 937 } 938 939 // DeleteBadBlocks deletes all the bad blocks from the database 940 func DeleteBadBlocks(db ethdb.KeyValueWriter) { 941 if err := db.Delete(badBlockKey); err != nil { 942 log.Crit("Failed to delete bad blocks", "err", err) 943 } 944 } 945 946 // FindCommonAncestor returns the last common ancestor of two block headers 947 func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header { 948 for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { 949 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 950 if a == nil { 951 return nil 952 } 953 } 954 for an := a.Number.Uint64(); an < b.Number.Uint64(); { 955 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 956 if b == nil { 957 return nil 958 } 959 } 960 for a.Hash() != b.Hash() { 961 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 962 if a == nil { 963 return nil 964 } 965 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 966 if b == nil { 967 return nil 968 } 969 } 970 return a 971 } 972 973 // ReadHeadHeader returns the current canonical head header. 974 func ReadHeadHeader(db ethdb.Reader) *types.Header { 975 headHeaderHash := ReadHeadHeaderHash(db) 976 if headHeaderHash == (common.Hash{}) { 977 return nil 978 } 979 headHeaderNumber := ReadHeaderNumber(db, headHeaderHash) 980 if headHeaderNumber == nil { 981 return nil 982 } 983 return ReadHeader(db, headHeaderHash, *headHeaderNumber) 984 } 985 986 // ReadHeadBlock returns the current canonical head block. 987 func ReadHeadBlock(db ethdb.Reader) *types.Block { 988 headBlockHash := ReadHeadBlockHash(db) 989 if headBlockHash == (common.Hash{}) { 990 return nil 991 } 992 headBlockNumber := ReadHeaderNumber(db, headBlockHash) 993 if headBlockNumber == nil { 994 return nil 995 } 996 return ReadBlock(db, headBlockHash, *headBlockNumber) 997 }