github.com/carter-ya/go-ethereum@v0.0.0-20230628080049-d2309be3983b/core/rawdb/accessors_chain.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "math/big" 25 "sort" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/core/types" 29 "github.com/ethereum/go-ethereum/crypto" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/log" 32 "github.com/ethereum/go-ethereum/params" 33 "github.com/ethereum/go-ethereum/rlp" 34 ) 35 36 // ReadCanonicalHash retrieves the hash assigned to a canonical block number. 37 func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { 38 var data []byte 39 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 40 data, _ = reader.Ancient(chainFreezerHashTable, number) 41 if len(data) == 0 { 42 // Get it by hash from leveldb 43 data, _ = db.Get(headerHashKey(number)) 44 } 45 return nil 46 }) 47 return common.BytesToHash(data) 48 } 49 50 // WriteCanonicalHash stores the hash assigned to a canonical block number. 51 func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 52 if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { 53 log.Crit("Failed to store number to hash mapping", "err", err) 54 } 55 } 56 57 // DeleteCanonicalHash removes the number to hash canonical mapping. 58 func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) { 59 if err := db.Delete(headerHashKey(number)); err != nil { 60 log.Crit("Failed to delete number to hash mapping", "err", err) 61 } 62 } 63 64 // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights, 65 // both canonical and reorged forks included. 66 func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash { 67 prefix := headerKeyPrefix(number) 68 69 hashes := make([]common.Hash, 0, 1) 70 it := db.NewIterator(prefix, nil) 71 defer it.Release() 72 73 for it.Next() { 74 if key := it.Key(); len(key) == len(prefix)+32 { 75 hashes = append(hashes, common.BytesToHash(key[len(key)-32:])) 76 } 77 } 78 return hashes 79 } 80 81 type NumberHash struct { 82 Number uint64 83 Hash common.Hash 84 } 85 86 // ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain 87 // heights, both canonical and reorged forks included. 88 // This method considers both limits to be _inclusive_. 89 func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash { 90 var ( 91 start = encodeBlockNumber(first) 92 keyLength = len(headerPrefix) + 8 + 32 93 hashes = make([]*NumberHash, 0, 1+last-first) 94 it = db.NewIterator(headerPrefix, start) 95 ) 96 defer it.Release() 97 for it.Next() { 98 key := it.Key() 99 if len(key) != keyLength { 100 continue 101 } 102 num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8]) 103 if num > last { 104 break 105 } 106 hash := common.BytesToHash(key[len(key)-32:]) 107 hashes = append(hashes, &NumberHash{num, hash}) 108 } 109 return hashes 110 } 111 112 // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the 113 // certain chain range. If the accumulated entries reaches the given threshold, 114 // abort the iteration and return the semi-finish result. 115 func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) { 116 // Short circuit if the limit is 0. 117 if limit == 0 { 118 return nil, nil 119 } 120 var ( 121 numbers []uint64 122 hashes []common.Hash 123 ) 124 // Construct the key prefix of start point. 125 start, end := headerHashKey(from), headerHashKey(to) 126 it := db.NewIterator(nil, start) 127 defer it.Release() 128 129 for it.Next() { 130 if bytes.Compare(it.Key(), end) >= 0 { 131 break 132 } 133 if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) { 134 numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8])) 135 hashes = append(hashes, common.BytesToHash(it.Value())) 136 // If the accumulated entries reaches the limit threshold, return. 137 if len(numbers) >= limit { 138 break 139 } 140 } 141 } 142 return numbers, hashes 143 } 144 145 // ReadHeaderNumber returns the header number assigned to a hash. 146 func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 { 147 data, _ := db.Get(headerNumberKey(hash)) 148 if len(data) != 8 { 149 return nil 150 } 151 number := binary.BigEndian.Uint64(data) 152 return &number 153 } 154 155 // WriteHeaderNumber stores the hash->number mapping. 156 func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 157 key := headerNumberKey(hash) 158 enc := encodeBlockNumber(number) 159 if err := db.Put(key, enc); err != nil { 160 log.Crit("Failed to store hash to number mapping", "err", err) 161 } 162 } 163 164 // DeleteHeaderNumber removes hash->number mapping. 165 func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) { 166 if err := db.Delete(headerNumberKey(hash)); err != nil { 167 log.Crit("Failed to delete hash to number mapping", "err", err) 168 } 169 } 170 171 // ReadHeadHeaderHash retrieves the hash of the current canonical head header. 172 func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash { 173 data, _ := db.Get(headHeaderKey) 174 if len(data) == 0 { 175 return common.Hash{} 176 } 177 return common.BytesToHash(data) 178 } 179 180 // WriteHeadHeaderHash stores the hash of the current canonical head header. 181 func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) { 182 if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { 183 log.Crit("Failed to store last header's hash", "err", err) 184 } 185 } 186 187 // ReadHeadBlockHash retrieves the hash of the current canonical head block. 188 func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash { 189 data, _ := db.Get(headBlockKey) 190 if len(data) == 0 { 191 return common.Hash{} 192 } 193 return common.BytesToHash(data) 194 } 195 196 // WriteHeadBlockHash stores the head block's hash. 197 func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 198 if err := db.Put(headBlockKey, hash.Bytes()); err != nil { 199 log.Crit("Failed to store last block's hash", "err", err) 200 } 201 } 202 203 // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block. 204 func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash { 205 data, _ := db.Get(headFastBlockKey) 206 if len(data) == 0 { 207 return common.Hash{} 208 } 209 return common.BytesToHash(data) 210 } 211 212 // WriteHeadFastBlockHash stores the hash of the current fast-sync head block. 213 func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 214 if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil { 215 log.Crit("Failed to store last fast block's hash", "err", err) 216 } 217 } 218 219 // ReadFinalizedBlockHash retrieves the hash of the finalized block. 220 func ReadFinalizedBlockHash(db ethdb.KeyValueReader) common.Hash { 221 data, _ := db.Get(headFinalizedBlockKey) 222 if len(data) == 0 { 223 return common.Hash{} 224 } 225 return common.BytesToHash(data) 226 } 227 228 // WriteFinalizedBlockHash stores the hash of the finalized block. 229 func WriteFinalizedBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 230 if err := db.Put(headFinalizedBlockKey, hash.Bytes()); err != nil { 231 log.Crit("Failed to store last finalized block's hash", "err", err) 232 } 233 } 234 235 // ReadLastPivotNumber retrieves the number of the last pivot block. If the node 236 // full synced, the last pivot will always be nil. 237 func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 { 238 data, _ := db.Get(lastPivotKey) 239 if len(data) == 0 { 240 return nil 241 } 242 var pivot uint64 243 if err := rlp.DecodeBytes(data, &pivot); err != nil { 244 log.Error("Invalid pivot block number in database", "err", err) 245 return nil 246 } 247 return &pivot 248 } 249 250 // WriteLastPivotNumber stores the number of the last pivot block. 251 func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { 252 enc, err := rlp.EncodeToBytes(pivot) 253 if err != nil { 254 log.Crit("Failed to encode pivot block number", "err", err) 255 } 256 if err := db.Put(lastPivotKey, enc); err != nil { 257 log.Crit("Failed to store pivot block number", "err", err) 258 } 259 } 260 261 // ReadTxIndexTail retrieves the number of oldest indexed block 262 // whose transaction indices has been indexed. 263 func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 { 264 data, _ := db.Get(txIndexTailKey) 265 if len(data) != 8 { 266 return nil 267 } 268 number := binary.BigEndian.Uint64(data) 269 return &number 270 } 271 272 // WriteTxIndexTail stores the number of oldest indexed block 273 // into database. 274 func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) { 275 if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil { 276 log.Crit("Failed to store the transaction index tail", "err", err) 277 } 278 } 279 280 // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync. 281 func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 { 282 data, _ := db.Get(fastTxLookupLimitKey) 283 if len(data) != 8 { 284 return nil 285 } 286 number := binary.BigEndian.Uint64(data) 287 return &number 288 } 289 290 // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database. 291 func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) { 292 if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil { 293 log.Crit("Failed to store transaction lookup limit for fast sync", "err", err) 294 } 295 } 296 297 // ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going 298 // backwards towards genesis. This method assumes that the caller already has 299 // placed a cap on count, to prevent DoS issues. 300 // Since this method operates in head-towards-genesis mode, it will return an empty 301 // slice in case the head ('number') is missing. Hence, the caller must ensure that 302 // the head ('number') argument is actually an existing header. 303 // 304 // N.B: Since the input is a number, as opposed to a hash, it's implicit that 305 // this method only operates on canon headers. 306 func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValue { 307 var rlpHeaders []rlp.RawValue 308 if count == 0 { 309 return rlpHeaders 310 } 311 i := number 312 if count-1 > number { 313 // It's ok to request block 0, 1 item 314 count = number + 1 315 } 316 limit, _ := db.Ancients() 317 // First read live blocks 318 if i >= limit { 319 // If we need to read live blocks, we need to figure out the hash first 320 hash := ReadCanonicalHash(db, number) 321 for ; i >= limit && count > 0; i-- { 322 if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 { 323 rlpHeaders = append(rlpHeaders, data) 324 // Get the parent hash for next query 325 hash = types.HeaderParentHashFromRLP(data) 326 } else { 327 break // Maybe got moved to ancients 328 } 329 count-- 330 } 331 } 332 if count == 0 { 333 return rlpHeaders 334 } 335 // read remaining from ancients 336 max := count * 700 337 data, err := db.AncientRange(chainFreezerHeaderTable, i+1-count, count, max) 338 if err == nil && uint64(len(data)) == count { 339 // the data is on the order [h, h+1, .., n] -- reordering needed 340 for i := range data { 341 rlpHeaders = append(rlpHeaders, data[len(data)-1-i]) 342 } 343 } 344 return rlpHeaders 345 } 346 347 // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. 348 func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 349 var data []byte 350 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 351 // First try to look up the data in ancient database. Extra hash 352 // comparison is necessary since ancient database only maintains 353 // the canonical data. 354 data, _ = reader.Ancient(chainFreezerHeaderTable, number) 355 if len(data) > 0 && crypto.Keccak256Hash(data) == hash { 356 return nil 357 } 358 // If not, try reading from leveldb 359 data, _ = db.Get(headerKey(number, hash)) 360 return nil 361 }) 362 return data 363 } 364 365 // HasHeader verifies the existence of a block header corresponding to the hash. 366 func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { 367 if isCanon(db, number, hash) { 368 return true 369 } 370 if has, err := db.Has(headerKey(number, hash)); !has || err != nil { 371 return false 372 } 373 return true 374 } 375 376 // ReadHeader retrieves the block header corresponding to the hash. 377 func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header { 378 data := ReadHeaderRLP(db, hash, number) 379 if len(data) == 0 { 380 return nil 381 } 382 header := new(types.Header) 383 if err := rlp.Decode(bytes.NewReader(data), header); err != nil { 384 log.Error("Invalid block header RLP", "hash", hash, "err", err) 385 return nil 386 } 387 return header 388 } 389 390 // WriteHeader stores a block header into the database and also stores the hash- 391 // to-number mapping. 392 func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) { 393 var ( 394 hash = header.Hash() 395 number = header.Number.Uint64() 396 ) 397 // Write the hash -> number mapping 398 WriteHeaderNumber(db, hash, number) 399 400 // Write the encoded header 401 data, err := rlp.EncodeToBytes(header) 402 if err != nil { 403 log.Crit("Failed to RLP encode header", "err", err) 404 } 405 key := headerKey(number, hash) 406 if err := db.Put(key, data); err != nil { 407 log.Crit("Failed to store header", "err", err) 408 } 409 } 410 411 // DeleteHeader removes all block header data associated with a hash. 412 func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 413 deleteHeaderWithoutNumber(db, hash, number) 414 if err := db.Delete(headerNumberKey(hash)); err != nil { 415 log.Crit("Failed to delete hash to number mapping", "err", err) 416 } 417 } 418 419 // deleteHeaderWithoutNumber removes only the block header but does not remove 420 // the hash to number mapping. 421 func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 422 if err := db.Delete(headerKey(number, hash)); err != nil { 423 log.Crit("Failed to delete header", "err", err) 424 } 425 } 426 427 // isCanon is an internal utility method, to check whether the given number/hash 428 // is part of the ancient (canon) set. 429 func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool { 430 h, err := reader.Ancient(chainFreezerHashTable, number) 431 if err != nil { 432 return false 433 } 434 return bytes.Equal(h, hash[:]) 435 } 436 437 // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. 438 func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 439 // First try to look up the data in ancient database. Extra hash 440 // comparison is necessary since ancient database only maintains 441 // the canonical data. 442 var data []byte 443 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 444 // Check if the data is in ancients 445 if isCanon(reader, number, hash) { 446 data, _ = reader.Ancient(chainFreezerBodiesTable, number) 447 return nil 448 } 449 // If not, try reading from leveldb 450 data, _ = db.Get(blockBodyKey(number, hash)) 451 return nil 452 }) 453 return data 454 } 455 456 // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical 457 // block at number, in RLP encoding. 458 func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { 459 var data []byte 460 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 461 data, _ = reader.Ancient(chainFreezerBodiesTable, number) 462 if len(data) > 0 { 463 return nil 464 } 465 // Block is not in ancients, read from leveldb by hash and number. 466 // Note: ReadCanonicalHash cannot be used here because it also 467 // calls ReadAncients internally. 468 hash, _ := db.Get(headerHashKey(number)) 469 data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash))) 470 return nil 471 }) 472 return data 473 } 474 475 // WriteBodyRLP stores an RLP encoded block body into the database. 476 func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { 477 if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { 478 log.Crit("Failed to store block body", "err", err) 479 } 480 } 481 482 // HasBody verifies the existence of a block body corresponding to the hash. 483 func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { 484 if isCanon(db, number, hash) { 485 return true 486 } 487 if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { 488 return false 489 } 490 return true 491 } 492 493 // ReadBody retrieves the block body corresponding to the hash. 494 func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body { 495 data := ReadBodyRLP(db, hash, number) 496 if len(data) == 0 { 497 return nil 498 } 499 body := new(types.Body) 500 if err := rlp.Decode(bytes.NewReader(data), body); err != nil { 501 log.Error("Invalid block body RLP", "hash", hash, "err", err) 502 return nil 503 } 504 return body 505 } 506 507 // WriteBody stores a block body into the database. 508 func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) { 509 data, err := rlp.EncodeToBytes(body) 510 if err != nil { 511 log.Crit("Failed to RLP encode body", "err", err) 512 } 513 WriteBodyRLP(db, hash, number, data) 514 } 515 516 // DeleteBody removes all block body data associated with a hash. 517 func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 518 if err := db.Delete(blockBodyKey(number, hash)); err != nil { 519 log.Crit("Failed to delete block body", "err", err) 520 } 521 } 522 523 // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. 524 func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 525 var data []byte 526 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 527 // Check if the data is in ancients 528 if isCanon(reader, number, hash) { 529 data, _ = reader.Ancient(chainFreezerDifficultyTable, number) 530 return nil 531 } 532 // If not, try reading from leveldb 533 data, _ = db.Get(headerTDKey(number, hash)) 534 return nil 535 }) 536 return data 537 } 538 539 // ReadTd retrieves a block's total difficulty corresponding to the hash. 540 func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int { 541 data := ReadTdRLP(db, hash, number) 542 if len(data) == 0 { 543 return nil 544 } 545 td := new(big.Int) 546 if err := rlp.Decode(bytes.NewReader(data), td); err != nil { 547 log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) 548 return nil 549 } 550 return td 551 } 552 553 // WriteTd stores the total difficulty of a block into the database. 554 func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) { 555 data, err := rlp.EncodeToBytes(td) 556 if err != nil { 557 log.Crit("Failed to RLP encode block total difficulty", "err", err) 558 } 559 if err := db.Put(headerTDKey(number, hash), data); err != nil { 560 log.Crit("Failed to store block total difficulty", "err", err) 561 } 562 } 563 564 // DeleteTd removes all block total difficulty data associated with a hash. 565 func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 566 if err := db.Delete(headerTDKey(number, hash)); err != nil { 567 log.Crit("Failed to delete block total difficulty", "err", err) 568 } 569 } 570 571 // HasReceipts verifies the existence of all the transaction receipts belonging 572 // to a block. 573 func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { 574 if isCanon(db, number, hash) { 575 return true 576 } 577 if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { 578 return false 579 } 580 return true 581 } 582 583 // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. 584 func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 585 var data []byte 586 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 587 // Check if the data is in ancients 588 if isCanon(reader, number, hash) { 589 data, _ = reader.Ancient(chainFreezerReceiptTable, number) 590 return nil 591 } 592 // If not, try reading from leveldb 593 data, _ = db.Get(blockReceiptsKey(number, hash)) 594 return nil 595 }) 596 return data 597 } 598 599 // ReadRawReceipts retrieves all the transaction receipts belonging to a block. 600 // The receipt metadata fields are not guaranteed to be populated, so they 601 // should not be used. Use ReadReceipts instead if the metadata is needed. 602 func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts { 603 // Retrieve the flattened receipt slice 604 data := ReadReceiptsRLP(db, hash, number) 605 if len(data) == 0 { 606 return nil 607 } 608 // Convert the receipts from their storage form to their internal representation 609 storageReceipts := []*types.ReceiptForStorage{} 610 if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { 611 log.Error("Invalid receipt array RLP", "hash", hash, "err", err) 612 return nil 613 } 614 receipts := make(types.Receipts, len(storageReceipts)) 615 for i, storageReceipt := range storageReceipts { 616 receipts[i] = (*types.Receipt)(storageReceipt) 617 } 618 return receipts 619 } 620 621 // ReadReceipts retrieves all the transaction receipts belonging to a block, including 622 // its corresponding metadata fields. If it is unable to populate these metadata 623 // fields then nil is returned. 624 // 625 // The current implementation populates these metadata fields by reading the receipts' 626 // corresponding block body, so if the block body is not found it will return nil even 627 // if the receipt itself is stored. 628 func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { 629 // We're deriving many fields from the block body, retrieve beside the receipt 630 receipts := ReadRawReceipts(db, hash, number) 631 if receipts == nil { 632 return nil 633 } 634 body := ReadBody(db, hash, number) 635 if body == nil { 636 log.Error("Missing body but have receipt", "hash", hash, "number", number) 637 return nil 638 } 639 if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil { 640 log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) 641 return nil 642 } 643 return receipts 644 } 645 646 // WriteReceipts stores all the transaction receipts belonging to a block. 647 func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) { 648 // Convert the receipts into their storage form and serialize them 649 storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) 650 for i, receipt := range receipts { 651 storageReceipts[i] = (*types.ReceiptForStorage)(receipt) 652 } 653 bytes, err := rlp.EncodeToBytes(storageReceipts) 654 if err != nil { 655 log.Crit("Failed to encode block receipts", "err", err) 656 } 657 // Store the flattened receipt slice 658 if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { 659 log.Crit("Failed to store block receipts", "err", err) 660 } 661 } 662 663 // DeleteReceipts removes all receipt data associated with a block hash. 664 func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 665 if err := db.Delete(blockReceiptsKey(number, hash)); err != nil { 666 log.Crit("Failed to delete block receipts", "err", err) 667 } 668 } 669 670 // storedReceiptRLP is the storage encoding of a receipt. 671 // Re-definition in core/types/receipt.go. 672 type storedReceiptRLP struct { 673 PostStateOrStatus []byte 674 CumulativeGasUsed uint64 675 Logs []*types.LogForStorage 676 } 677 678 // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps 679 // the list of logs. When decoding a stored receipt into this object we 680 // avoid creating the bloom filter. 681 type receiptLogs struct { 682 Logs []*types.Log 683 } 684 685 // DecodeRLP implements rlp.Decoder. 686 func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { 687 var stored storedReceiptRLP 688 if err := s.Decode(&stored); err != nil { 689 return err 690 } 691 r.Logs = make([]*types.Log, len(stored.Logs)) 692 for i, log := range stored.Logs { 693 r.Logs[i] = (*types.Log)(log) 694 } 695 return nil 696 } 697 698 // DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc. 699 func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error { 700 logIndex := uint(0) 701 if len(txs) != len(receipts) { 702 return errors.New("transaction and receipt count mismatch") 703 } 704 for i := 0; i < len(receipts); i++ { 705 txHash := txs[i].Hash() 706 // The derived log fields can simply be set from the block and transaction 707 for j := 0; j < len(receipts[i].Logs); j++ { 708 receipts[i].Logs[j].BlockNumber = number 709 receipts[i].Logs[j].BlockHash = hash 710 receipts[i].Logs[j].TxHash = txHash 711 receipts[i].Logs[j].TxIndex = uint(i) 712 receipts[i].Logs[j].Index = logIndex 713 logIndex++ 714 } 715 } 716 return nil 717 } 718 719 // ReadLogs retrieves the logs for all transactions in a block. The log fields 720 // are populated with metadata. In case the receipts or the block body 721 // are not found, a nil is returned. 722 func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log { 723 // Retrieve the flattened receipt slice 724 data := ReadReceiptsRLP(db, hash, number) 725 if len(data) == 0 { 726 return nil 727 } 728 receipts := []*receiptLogs{} 729 if err := rlp.DecodeBytes(data, &receipts); err != nil { 730 // Receipts might be in the legacy format, try decoding that. 731 // TODO: to be removed after users migrated 732 if logs := readLegacyLogs(db, hash, number, config); logs != nil { 733 return logs 734 } 735 log.Error("Invalid receipt array RLP", "hash", hash, "err", err) 736 return nil 737 } 738 739 body := ReadBody(db, hash, number) 740 if body == nil { 741 log.Error("Missing body but have receipt", "hash", hash, "number", number) 742 return nil 743 } 744 if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil { 745 log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) 746 return nil 747 } 748 logs := make([][]*types.Log, len(receipts)) 749 for i, receipt := range receipts { 750 logs[i] = receipt.Logs 751 } 752 return logs 753 } 754 755 // readLegacyLogs is a temporary workaround for when trying to read logs 756 // from a block which has its receipt stored in the legacy format. It'll 757 // be removed after users have migrated their freezer databases. 758 func readLegacyLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log { 759 receipts := ReadReceipts(db, hash, number, config) 760 if receipts == nil { 761 return nil 762 } 763 logs := make([][]*types.Log, len(receipts)) 764 for i, receipt := range receipts { 765 logs[i] = receipt.Logs 766 } 767 return logs 768 } 769 770 // ReadBlock retrieves an entire block corresponding to the hash, assembling it 771 // back from the stored header and body. If either the header or body could not 772 // be retrieved nil is returned. 773 // 774 // Note, due to concurrent download of header and block body the header and thus 775 // canonical hash can be stored in the database but the body data not (yet). 776 func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { 777 header := ReadHeader(db, hash, number) 778 if header == nil { 779 return nil 780 } 781 body := ReadBody(db, hash, number) 782 if body == nil { 783 return nil 784 } 785 return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) 786 } 787 788 // WriteBlock serializes a block into the database, header and body separately. 789 func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) { 790 WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) 791 WriteHeader(db, block.Header()) 792 } 793 794 // WriteAncientBlocks writes entire block data into ancient store and returns the total written size. 795 func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) { 796 var ( 797 tdSum = new(big.Int).Set(td) 798 stReceipts []*types.ReceiptForStorage 799 ) 800 return db.ModifyAncients(func(op ethdb.AncientWriteOp) error { 801 for i, block := range blocks { 802 // Convert receipts to storage format and sum up total difficulty. 803 stReceipts = stReceipts[:0] 804 for _, receipt := range receipts[i] { 805 stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt)) 806 } 807 header := block.Header() 808 if i > 0 { 809 tdSum.Add(tdSum, header.Difficulty) 810 } 811 if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil { 812 return err 813 } 814 } 815 return nil 816 }) 817 } 818 819 func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error { 820 num := block.NumberU64() 821 if err := op.AppendRaw(chainFreezerHashTable, num, block.Hash().Bytes()); err != nil { 822 return fmt.Errorf("can't add block %d hash: %v", num, err) 823 } 824 if err := op.Append(chainFreezerHeaderTable, num, header); err != nil { 825 return fmt.Errorf("can't append block header %d: %v", num, err) 826 } 827 if err := op.Append(chainFreezerBodiesTable, num, block.Body()); err != nil { 828 return fmt.Errorf("can't append block body %d: %v", num, err) 829 } 830 if err := op.Append(chainFreezerReceiptTable, num, receipts); err != nil { 831 return fmt.Errorf("can't append block %d receipts: %v", num, err) 832 } 833 if err := op.Append(chainFreezerDifficultyTable, num, td); err != nil { 834 return fmt.Errorf("can't append block %d total difficulty: %v", num, err) 835 } 836 return nil 837 } 838 839 // DeleteBlock removes all block data associated with a hash. 840 func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 841 DeleteReceipts(db, hash, number) 842 DeleteHeader(db, hash, number) 843 DeleteBody(db, hash, number) 844 DeleteTd(db, hash, number) 845 } 846 847 // DeleteBlockWithoutNumber removes all block data associated with a hash, except 848 // the hash to number mapping. 849 func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 850 DeleteReceipts(db, hash, number) 851 deleteHeaderWithoutNumber(db, hash, number) 852 DeleteBody(db, hash, number) 853 DeleteTd(db, hash, number) 854 } 855 856 const badBlockToKeep = 10 857 858 type badBlock struct { 859 Header *types.Header 860 Body *types.Body 861 } 862 863 // badBlockList implements the sort interface to allow sorting a list of 864 // bad blocks by their number in the reverse order. 865 type badBlockList []*badBlock 866 867 func (s badBlockList) Len() int { return len(s) } 868 func (s badBlockList) Less(i, j int) bool { 869 return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64() 870 } 871 func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 872 873 // ReadBadBlock retrieves the bad block with the corresponding block hash. 874 func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { 875 blob, err := db.Get(badBlockKey) 876 if err != nil { 877 return nil 878 } 879 var badBlocks badBlockList 880 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 881 return nil 882 } 883 for _, bad := range badBlocks { 884 if bad.Header.Hash() == hash { 885 return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles) 886 } 887 } 888 return nil 889 } 890 891 // ReadAllBadBlocks retrieves all the bad blocks in the database. 892 // All returned blocks are sorted in reverse order by number. 893 func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { 894 blob, err := db.Get(badBlockKey) 895 if err != nil { 896 return nil 897 } 898 var badBlocks badBlockList 899 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 900 return nil 901 } 902 var blocks []*types.Block 903 for _, bad := range badBlocks { 904 blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)) 905 } 906 return blocks 907 } 908 909 // WriteBadBlock serializes the bad block into the database. If the cumulated 910 // bad blocks exceeds the limitation, the oldest will be dropped. 911 func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { 912 blob, err := db.Get(badBlockKey) 913 if err != nil { 914 log.Warn("Failed to load old bad blocks", "error", err) 915 } 916 var badBlocks badBlockList 917 if len(blob) > 0 { 918 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 919 log.Crit("Failed to decode old bad blocks", "error", err) 920 } 921 } 922 for _, b := range badBlocks { 923 if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() { 924 log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash()) 925 return 926 } 927 } 928 badBlocks = append(badBlocks, &badBlock{ 929 Header: block.Header(), 930 Body: block.Body(), 931 }) 932 sort.Sort(sort.Reverse(badBlocks)) 933 if len(badBlocks) > badBlockToKeep { 934 badBlocks = badBlocks[:badBlockToKeep] 935 } 936 data, err := rlp.EncodeToBytes(badBlocks) 937 if err != nil { 938 log.Crit("Failed to encode bad blocks", "err", err) 939 } 940 if err := db.Put(badBlockKey, data); err != nil { 941 log.Crit("Failed to write bad blocks", "err", err) 942 } 943 } 944 945 // DeleteBadBlocks deletes all the bad blocks from the database 946 func DeleteBadBlocks(db ethdb.KeyValueWriter) { 947 if err := db.Delete(badBlockKey); err != nil { 948 log.Crit("Failed to delete bad blocks", "err", err) 949 } 950 } 951 952 // FindCommonAncestor returns the last common ancestor of two block headers 953 func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header { 954 for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { 955 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 956 if a == nil { 957 return nil 958 } 959 } 960 for an := a.Number.Uint64(); an < b.Number.Uint64(); { 961 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 962 if b == nil { 963 return nil 964 } 965 } 966 for a.Hash() != b.Hash() { 967 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 968 if a == nil { 969 return nil 970 } 971 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 972 if b == nil { 973 return nil 974 } 975 } 976 return a 977 } 978 979 // ReadHeadHeader returns the current canonical head header. 980 func ReadHeadHeader(db ethdb.Reader) *types.Header { 981 headHeaderHash := ReadHeadHeaderHash(db) 982 if headHeaderHash == (common.Hash{}) { 983 return nil 984 } 985 headHeaderNumber := ReadHeaderNumber(db, headHeaderHash) 986 if headHeaderNumber == nil { 987 return nil 988 } 989 return ReadHeader(db, headHeaderHash, *headHeaderNumber) 990 } 991 992 // ReadHeadBlock returns the current canonical head block. 993 func ReadHeadBlock(db ethdb.Reader) *types.Block { 994 headBlockHash := ReadHeadBlockHash(db) 995 if headBlockHash == (common.Hash{}) { 996 return nil 997 } 998 headBlockNumber := ReadHeaderNumber(db, headBlockHash) 999 if headBlockNumber == nil { 1000 return nil 1001 } 1002 return ReadBlock(db, headBlockHash, *headBlockNumber) 1003 }