github.com/electroneum/electroneum-sc@v0.0.0-20230105223411-3bc1d078281e/core/rawdb/accessors_chain.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "math/big" 25 "sort" 26 27 "github.com/electroneum/electroneum-sc/common" 28 "github.com/electroneum/electroneum-sc/core/types" 29 "github.com/electroneum/electroneum-sc/ethdb" 30 "github.com/electroneum/electroneum-sc/log" 31 "github.com/electroneum/electroneum-sc/params" 32 "github.com/electroneum/electroneum-sc/rlp" 33 ) 34 35 // ReadCanonicalHash retrieves the hash assigned to a canonical block number. 36 func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash { 37 var data []byte 38 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 39 data, _ = reader.Ancient(freezerHashTable, number) 40 if len(data) == 0 { 41 // Get it by hash from leveldb 42 data, _ = db.Get(headerHashKey(number)) 43 } 44 return nil 45 }) 46 return common.BytesToHash(data) 47 } 48 49 // WriteCanonicalHash stores the hash assigned to a canonical block number. 50 func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 51 if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { 52 log.Crit("Failed to store number to hash mapping", "err", err) 53 } 54 } 55 56 // DeleteCanonicalHash removes the number to hash canonical mapping. 57 func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) { 58 if err := db.Delete(headerHashKey(number)); err != nil { 59 log.Crit("Failed to delete number to hash mapping", "err", err) 60 } 61 } 62 63 // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights, 64 // both canonical and reorged forks included. 65 func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash { 66 prefix := headerKeyPrefix(number) 67 68 hashes := make([]common.Hash, 0, 1) 69 it := db.NewIterator(prefix, nil) 70 defer it.Release() 71 72 for it.Next() { 73 if key := it.Key(); len(key) == len(prefix)+32 { 74 hashes = append(hashes, common.BytesToHash(key[len(key)-32:])) 75 } 76 } 77 return hashes 78 } 79 80 type NumberHash struct { 81 Number uint64 82 Hash common.Hash 83 } 84 85 // ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain 86 // heights, both canonical and reorged forks included. 87 // This method considers both limits to be _inclusive_. 88 func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash { 89 var ( 90 start = encodeBlockNumber(first) 91 keyLength = len(headerPrefix) + 8 + 32 92 hashes = make([]*NumberHash, 0, 1+last-first) 93 it = db.NewIterator(headerPrefix, start) 94 ) 95 defer it.Release() 96 for it.Next() { 97 key := it.Key() 98 if len(key) != keyLength { 99 continue 100 } 101 num := binary.BigEndian.Uint64(key[len(headerPrefix) : len(headerPrefix)+8]) 102 if num > last { 103 break 104 } 105 hash := common.BytesToHash(key[len(key)-32:]) 106 hashes = append(hashes, &NumberHash{num, hash}) 107 } 108 return hashes 109 } 110 111 // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the 112 // certain chain range. If the accumulated entries reaches the given threshold, 113 // abort the iteration and return the semi-finish result. 114 func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) { 115 // Short circuit if the limit is 0. 116 if limit == 0 { 117 return nil, nil 118 } 119 var ( 120 numbers []uint64 121 hashes []common.Hash 122 ) 123 // Construct the key prefix of start point. 124 start, end := headerHashKey(from), headerHashKey(to) 125 it := db.NewIterator(nil, start) 126 defer it.Release() 127 128 for it.Next() { 129 if bytes.Compare(it.Key(), end) >= 0 { 130 break 131 } 132 if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) { 133 numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8])) 134 hashes = append(hashes, common.BytesToHash(it.Value())) 135 // If the accumulated entries reaches the limit threshold, return. 136 if len(numbers) >= limit { 137 break 138 } 139 } 140 } 141 return numbers, hashes 142 } 143 144 // ReadHeaderNumber returns the header number assigned to a hash. 145 func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 { 146 data, _ := db.Get(headerNumberKey(hash)) 147 if len(data) != 8 { 148 return nil 149 } 150 number := binary.BigEndian.Uint64(data) 151 return &number 152 } 153 154 // WriteHeaderNumber stores the hash->number mapping. 155 func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 156 key := headerNumberKey(hash) 157 enc := encodeBlockNumber(number) 158 if err := db.Put(key, enc); err != nil { 159 log.Crit("Failed to store hash to number mapping", "err", err) 160 } 161 } 162 163 // DeleteHeaderNumber removes hash->number mapping. 164 func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) { 165 if err := db.Delete(headerNumberKey(hash)); err != nil { 166 log.Crit("Failed to delete hash to number mapping", "err", err) 167 } 168 } 169 170 // ReadHeadHeaderHash retrieves the hash of the current canonical head header. 171 func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash { 172 data, _ := db.Get(headHeaderKey) 173 if len(data) == 0 { 174 return common.Hash{} 175 } 176 return common.BytesToHash(data) 177 } 178 179 // WriteHeadHeaderHash stores the hash of the current canonical head header. 180 func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) { 181 if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { 182 log.Crit("Failed to store last header's hash", "err", err) 183 } 184 } 185 186 // ReadHeadBlockHash retrieves the hash of the current canonical head block. 187 func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash { 188 data, _ := db.Get(headBlockKey) 189 if len(data) == 0 { 190 return common.Hash{} 191 } 192 return common.BytesToHash(data) 193 } 194 195 // WriteHeadBlockHash stores the head block's hash. 196 func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 197 if err := db.Put(headBlockKey, hash.Bytes()); err != nil { 198 log.Crit("Failed to store last block's hash", "err", err) 199 } 200 } 201 202 // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block. 203 func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash { 204 data, _ := db.Get(headFastBlockKey) 205 if len(data) == 0 { 206 return common.Hash{} 207 } 208 return common.BytesToHash(data) 209 } 210 211 // WriteHeadFastBlockHash stores the hash of the current fast-sync head block. 212 func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 213 if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil { 214 log.Crit("Failed to store last fast block's hash", "err", err) 215 } 216 } 217 218 // ReadFinalizedBlockHash retrieves the hash of the finalized block. 219 func ReadFinalizedBlockHash(db ethdb.KeyValueReader) common.Hash { 220 data, _ := db.Get(headFinalizedBlockKey) 221 if len(data) == 0 { 222 return common.Hash{} 223 } 224 return common.BytesToHash(data) 225 } 226 227 // WriteFinalizedBlockHash stores the hash of the finalized block. 228 func WriteFinalizedBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { 229 if err := db.Put(headFinalizedBlockKey, hash.Bytes()); err != nil { 230 log.Crit("Failed to store last finalized block's hash", "err", err) 231 } 232 } 233 234 // ReadLastPivotNumber retrieves the number of the last pivot block. If the node 235 // full synced, the last pivot will always be nil. 236 func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 { 237 data, _ := db.Get(lastPivotKey) 238 if len(data) == 0 { 239 return nil 240 } 241 var pivot uint64 242 if err := rlp.DecodeBytes(data, &pivot); err != nil { 243 log.Error("Invalid pivot block number in database", "err", err) 244 return nil 245 } 246 return &pivot 247 } 248 249 // WriteLastPivotNumber stores the number of the last pivot block. 250 func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) { 251 enc, err := rlp.EncodeToBytes(pivot) 252 if err != nil { 253 log.Crit("Failed to encode pivot block number", "err", err) 254 } 255 if err := db.Put(lastPivotKey, enc); err != nil { 256 log.Crit("Failed to store pivot block number", "err", err) 257 } 258 } 259 260 // ReadTxIndexTail retrieves the number of oldest indexed block 261 // whose transaction indices has been indexed. If the corresponding entry 262 // is non-existent in database it means the indexing has been finished. 263 func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 { 264 data, _ := db.Get(txIndexTailKey) 265 if len(data) != 8 { 266 return nil 267 } 268 number := binary.BigEndian.Uint64(data) 269 return &number 270 } 271 272 // WriteTxIndexTail stores the number of oldest indexed block 273 // into database. 274 func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) { 275 if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil { 276 log.Crit("Failed to store the transaction index tail", "err", err) 277 } 278 } 279 280 // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync. 281 func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 { 282 data, _ := db.Get(fastTxLookupLimitKey) 283 if len(data) != 8 { 284 return nil 285 } 286 number := binary.BigEndian.Uint64(data) 287 return &number 288 } 289 290 // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database. 291 func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) { 292 if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil { 293 log.Crit("Failed to store transaction lookup limit for fast sync", "err", err) 294 } 295 } 296 297 // ReadHeaderRange returns the rlp-encoded headers, starting at 'number', and going 298 // backwards towards genesis. This method assumes that the caller already has 299 // placed a cap on count, to prevent DoS issues. 300 // Since this method operates in head-towards-genesis mode, it will return an empty 301 // slice in case the head ('number') is missing. Hence, the caller must ensure that 302 // the head ('number') argument is actually an existing header. 303 // 304 // N.B: Since the input is a number, as opposed to a hash, it's implicit that 305 // this method only operates on canon headers. 306 func ReadHeaderRange(db ethdb.Reader, number uint64, count uint64) []rlp.RawValue { 307 var rlpHeaders []rlp.RawValue 308 if count == 0 { 309 return rlpHeaders 310 } 311 i := number 312 if count-1 > number { 313 // It's ok to request block 0, 1 item 314 count = number + 1 315 } 316 limit, _ := db.Ancients() 317 // First read live blocks 318 if i >= limit { 319 // If we need to read live blocks, we need to figure out the hash first 320 hash := ReadCanonicalHash(db, number) 321 for ; i >= limit && count > 0; i-- { 322 if data, _ := db.Get(headerKey(i, hash)); len(data) > 0 { 323 rlpHeaders = append(rlpHeaders, data) 324 // Get the parent hash for next query 325 hash = types.HeaderParentHashFromRLP(data) 326 } else { 327 break // Maybe got moved to ancients 328 } 329 count-- 330 } 331 } 332 if count == 0 { 333 return rlpHeaders 334 } 335 // read remaining from ancients 336 // expect max of 24 validators with each 128bytes (97bytes to be exact) for Vanity+Seal, 928 bytes for other header fields 337 max := count * 4000 338 data, err := db.AncientRange(freezerHeaderTable, i+1-count, count, max) 339 if err == nil && uint64(len(data)) == count { 340 // the data is on the order [h, h+1, .., n] -- reordering needed 341 for i := range data { 342 rlpHeaders = append(rlpHeaders, data[len(data)-1-i]) 343 } 344 } 345 return rlpHeaders 346 } 347 348 // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. 349 func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 350 var data []byte 351 err := db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 352 // First try to look up the data in ancient database. Extra hash 353 // comparison is necessary since ancient database only maintains 354 // the canonical data. 355 data, _ = reader.Ancient(freezerHeaderTable, number) 356 if len(data) > 0 { 357 var header *types.Header 358 err := rlp.DecodeBytes(data, &header) 359 if err != nil { 360 return err 361 } 362 if header != nil && header.Hash() == hash { 363 return nil 364 } 365 } 366 // If not, try reading from leveldb 367 data, _ = db.Get(headerKey(number, hash)) 368 return nil 369 }) 370 if err != nil { 371 return nil 372 } 373 return data 374 } 375 376 // HasHeader verifies the existence of a block header corresponding to the hash. 377 func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool { 378 if isCanon(db, number, hash) { 379 return true 380 } 381 if has, err := db.Has(headerKey(number, hash)); !has || err != nil { 382 return false 383 } 384 return true 385 } 386 387 // ReadHeader retrieves the block header corresponding to the hash. 388 func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header { 389 data := ReadHeaderRLP(db, hash, number) 390 if len(data) == 0 { 391 return nil 392 } 393 header := new(types.Header) 394 if err := rlp.Decode(bytes.NewReader(data), header); err != nil { 395 log.Error("Invalid block header RLP", "hash", hash, "err", err) 396 return nil 397 } 398 return header 399 } 400 401 // WriteHeader stores a block header into the database and also stores the hash- 402 // to-number mapping. 403 func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) { 404 var ( 405 hash = header.Hash() 406 number = header.Number.Uint64() 407 ) 408 // Write the hash -> number mapping 409 WriteHeaderNumber(db, hash, number) 410 411 // Write the encoded header 412 data, err := rlp.EncodeToBytes(header) 413 if err != nil { 414 log.Crit("Failed to RLP encode header", "err", err) 415 } 416 key := headerKey(number, hash) 417 if err := db.Put(key, data); err != nil { 418 log.Crit("Failed to store header", "err", err) 419 } 420 } 421 422 // DeleteHeader removes all block header data associated with a hash. 423 func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 424 deleteHeaderWithoutNumber(db, hash, number) 425 if err := db.Delete(headerNumberKey(hash)); err != nil { 426 log.Crit("Failed to delete hash to number mapping", "err", err) 427 } 428 } 429 430 // deleteHeaderWithoutNumber removes only the block header but does not remove 431 // the hash to number mapping. 432 func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 433 if err := db.Delete(headerKey(number, hash)); err != nil { 434 log.Crit("Failed to delete header", "err", err) 435 } 436 } 437 438 // isCanon is an internal utility method, to check whether the given number/hash 439 // is part of the ancient (canon) set. 440 func isCanon(reader ethdb.AncientReaderOp, number uint64, hash common.Hash) bool { 441 h, err := reader.Ancient(freezerHashTable, number) 442 if err != nil { 443 return false 444 } 445 return bytes.Equal(h, hash[:]) 446 } 447 448 // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. 449 func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 450 // First try to look up the data in ancient database. Extra hash 451 // comparison is necessary since ancient database only maintains 452 // the canonical data. 453 var data []byte 454 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 455 // Check if the data is in ancients 456 if isCanon(reader, number, hash) { 457 data, _ = reader.Ancient(freezerBodiesTable, number) 458 return nil 459 } 460 // If not, try reading from leveldb 461 data, _ = db.Get(blockBodyKey(number, hash)) 462 return nil 463 }) 464 return data 465 } 466 467 // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical 468 // block at number, in RLP encoding. 469 func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue { 470 var data []byte 471 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 472 data, _ = reader.Ancient(freezerBodiesTable, number) 473 if len(data) > 0 { 474 return nil 475 } 476 // Block is not in ancients, read from leveldb by hash and number. 477 // Note: ReadCanonicalHash cannot be used here because it also 478 // calls ReadAncients internally. 479 hash, _ := db.Get(headerHashKey(number)) 480 data, _ = db.Get(blockBodyKey(number, common.BytesToHash(hash))) 481 return nil 482 }) 483 return data 484 } 485 486 // WriteBodyRLP stores an RLP encoded block body into the database. 487 func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { 488 if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { 489 log.Crit("Failed to store block body", "err", err) 490 } 491 } 492 493 // HasBody verifies the existence of a block body corresponding to the hash. 494 func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool { 495 if isCanon(db, number, hash) { 496 return true 497 } 498 if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { 499 return false 500 } 501 return true 502 } 503 504 // ReadBody retrieves the block body corresponding to the hash. 505 func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body { 506 data := ReadBodyRLP(db, hash, number) 507 if len(data) == 0 { 508 return nil 509 } 510 body := new(types.Body) 511 if err := rlp.Decode(bytes.NewReader(data), body); err != nil { 512 log.Error("Invalid block body RLP", "hash", hash, "err", err) 513 return nil 514 } 515 return body 516 } 517 518 // WriteBody stores a block body into the database. 519 func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) { 520 data, err := rlp.EncodeToBytes(body) 521 if err != nil { 522 log.Crit("Failed to RLP encode body", "err", err) 523 } 524 WriteBodyRLP(db, hash, number, data) 525 } 526 527 // DeleteBody removes all block body data associated with a hash. 528 func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 529 if err := db.Delete(blockBodyKey(number, hash)); err != nil { 530 log.Crit("Failed to delete block body", "err", err) 531 } 532 } 533 534 // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. 535 func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 536 var data []byte 537 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 538 // Check if the data is in ancients 539 if isCanon(reader, number, hash) { 540 data, _ = reader.Ancient(freezerDifficultyTable, number) 541 return nil 542 } 543 // If not, try reading from leveldb 544 data, _ = db.Get(headerTDKey(number, hash)) 545 return nil 546 }) 547 return data 548 } 549 550 // ReadTd retrieves a block's total difficulty corresponding to the hash. 551 func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int { 552 data := ReadTdRLP(db, hash, number) 553 if len(data) == 0 { 554 return nil 555 } 556 td := new(big.Int) 557 if err := rlp.Decode(bytes.NewReader(data), td); err != nil { 558 log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) 559 return nil 560 } 561 return td 562 } 563 564 // WriteTd stores the total difficulty of a block into the database. 565 func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) { 566 data, err := rlp.EncodeToBytes(td) 567 if err != nil { 568 log.Crit("Failed to RLP encode block total difficulty", "err", err) 569 } 570 if err := db.Put(headerTDKey(number, hash), data); err != nil { 571 log.Crit("Failed to store block total difficulty", "err", err) 572 } 573 } 574 575 // DeleteTd removes all block total difficulty data associated with a hash. 576 func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 577 if err := db.Delete(headerTDKey(number, hash)); err != nil { 578 log.Crit("Failed to delete block total difficulty", "err", err) 579 } 580 } 581 582 // HasReceipts verifies the existence of all the transaction receipts belonging 583 // to a block. 584 func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { 585 if isCanon(db, number, hash) { 586 return true 587 } 588 if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { 589 return false 590 } 591 return true 592 } 593 594 // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. 595 func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { 596 var data []byte 597 db.ReadAncients(func(reader ethdb.AncientReaderOp) error { 598 // Check if the data is in ancients 599 if isCanon(reader, number, hash) { 600 data, _ = reader.Ancient(freezerReceiptTable, number) 601 return nil 602 } 603 // If not, try reading from leveldb 604 data, _ = db.Get(blockReceiptsKey(number, hash)) 605 return nil 606 }) 607 return data 608 } 609 610 // ReadRawReceipts retrieves all the transaction receipts belonging to a block. 611 // The receipt metadata fields are not guaranteed to be populated, so they 612 // should not be used. Use ReadReceipts instead if the metadata is needed. 613 func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts { 614 // Retrieve the flattened receipt slice 615 data := ReadReceiptsRLP(db, hash, number) 616 if len(data) == 0 { 617 return nil 618 } 619 // Convert the receipts from their storage form to their internal representation 620 storageReceipts := []*types.ReceiptForStorage{} 621 if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { 622 log.Error("Invalid receipt array RLP", "hash", hash, "err", err) 623 return nil 624 } 625 receipts := make(types.Receipts, len(storageReceipts)) 626 for i, storageReceipt := range storageReceipts { 627 receipts[i] = (*types.Receipt)(storageReceipt) 628 } 629 return receipts 630 } 631 632 // ReadReceipts retrieves all the transaction receipts belonging to a block, including 633 // its corresponding metadata fields. If it is unable to populate these metadata 634 // fields then nil is returned. 635 // 636 // The current implementation populates these metadata fields by reading the receipts' 637 // corresponding block body, so if the block body is not found it will return nil even 638 // if the receipt itself is stored. 639 func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { 640 // We're deriving many fields from the block body, retrieve beside the receipt 641 receipts := ReadRawReceipts(db, hash, number) 642 if receipts == nil { 643 return nil 644 } 645 body := ReadBody(db, hash, number) 646 if body == nil { 647 log.Error("Missing body but have receipt", "hash", hash, "number", number) 648 return nil 649 } 650 if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil { 651 log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) 652 return nil 653 } 654 return receipts 655 } 656 657 // WriteReceipts stores all the transaction receipts belonging to a block. 658 func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) { 659 // Convert the receipts into their storage form and serialize them 660 storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) 661 for i, receipt := range receipts { 662 storageReceipts[i] = (*types.ReceiptForStorage)(receipt) 663 } 664 bytes, err := rlp.EncodeToBytes(storageReceipts) 665 if err != nil { 666 log.Crit("Failed to encode block receipts", "err", err) 667 } 668 // Store the flattened receipt slice 669 if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { 670 log.Crit("Failed to store block receipts", "err", err) 671 } 672 } 673 674 // DeleteReceipts removes all receipt data associated with a block hash. 675 func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 676 if err := db.Delete(blockReceiptsKey(number, hash)); err != nil { 677 log.Crit("Failed to delete block receipts", "err", err) 678 } 679 } 680 681 // storedReceiptRLP is the storage encoding of a receipt. 682 // Re-definition in core/types/receipt.go. 683 type storedReceiptRLP struct { 684 PostStateOrStatus []byte 685 CumulativeGasUsed uint64 686 Logs []*types.LogForStorage 687 } 688 689 // ReceiptLogs is a barebone version of ReceiptForStorage which only keeps 690 // the list of logs. When decoding a stored receipt into this object we 691 // avoid creating the bloom filter. 692 type receiptLogs struct { 693 Logs []*types.Log 694 } 695 696 // DecodeRLP implements rlp.Decoder. 697 func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error { 698 var stored storedReceiptRLP 699 if err := s.Decode(&stored); err != nil { 700 return err 701 } 702 r.Logs = make([]*types.Log, len(stored.Logs)) 703 for i, log := range stored.Logs { 704 r.Logs[i] = (*types.Log)(log) 705 } 706 return nil 707 } 708 709 // DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc. 710 func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error { 711 logIndex := uint(0) 712 if len(txs) != len(receipts) { 713 return errors.New("transaction and receipt count mismatch") 714 } 715 for i := 0; i < len(receipts); i++ { 716 txHash := txs[i].Hash() 717 // The derived log fields can simply be set from the block and transaction 718 for j := 0; j < len(receipts[i].Logs); j++ { 719 receipts[i].Logs[j].BlockNumber = number 720 receipts[i].Logs[j].BlockHash = hash 721 receipts[i].Logs[j].TxHash = txHash 722 receipts[i].Logs[j].TxIndex = uint(i) 723 receipts[i].Logs[j].Index = logIndex 724 logIndex++ 725 } 726 } 727 return nil 728 } 729 730 // ReadLogs retrieves the logs for all transactions in a block. The log fields 731 // are populated with metadata. In case the receipts or the block body 732 // are not found, a nil is returned. 733 func ReadLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log { 734 // Retrieve the flattened receipt slice 735 data := ReadReceiptsRLP(db, hash, number) 736 if len(data) == 0 { 737 return nil 738 } 739 receipts := []*receiptLogs{} 740 if err := rlp.DecodeBytes(data, &receipts); err != nil { 741 // Receipts might be in the legacy format, try decoding that. 742 // TODO: to be removed after users migrated 743 if logs := readLegacyLogs(db, hash, number, config); logs != nil { 744 return logs 745 } 746 log.Error("Invalid receipt array RLP", "hash", hash, "err", err) 747 return nil 748 } 749 750 body := ReadBody(db, hash, number) 751 if body == nil { 752 log.Error("Missing body but have receipt", "hash", hash, "number", number) 753 return nil 754 } 755 if err := deriveLogFields(receipts, hash, number, body.Transactions); err != nil { 756 log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) 757 return nil 758 } 759 logs := make([][]*types.Log, len(receipts)) 760 for i, receipt := range receipts { 761 logs[i] = receipt.Logs 762 } 763 return logs 764 } 765 766 // readLegacyLogs is a temporary workaround for when trying to read logs 767 // from a block which has its receipt stored in the legacy format. It'll 768 // be removed after users have migrated their freezer databases. 769 func readLegacyLogs(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) [][]*types.Log { 770 receipts := ReadReceipts(db, hash, number, config) 771 if receipts == nil { 772 return nil 773 } 774 logs := make([][]*types.Log, len(receipts)) 775 for i, receipt := range receipts { 776 logs[i] = receipt.Logs 777 } 778 return logs 779 } 780 781 // ReadBlock retrieves an entire block corresponding to the hash, assembling it 782 // back from the stored header and body. If either the header or body could not 783 // be retrieved nil is returned. 784 // 785 // Note, due to concurrent download of header and block body the header and thus 786 // canonical hash can be stored in the database but the body data not (yet). 787 func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block { 788 header := ReadHeader(db, hash, number) 789 if header == nil { 790 return nil 791 } 792 body := ReadBody(db, hash, number) 793 if body == nil { 794 return nil 795 } 796 return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) 797 } 798 799 // WriteBlock serializes a block into the database, header and body separately. 800 func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) { 801 WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) 802 WriteHeader(db, block.Header()) 803 } 804 805 // WriteAncientBlocks writes entire block data into ancient store and returns the total written size. 806 func WriteAncientBlocks(db ethdb.AncientWriter, blocks []*types.Block, receipts []types.Receipts, td *big.Int) (int64, error) { 807 var ( 808 tdSum = new(big.Int).Set(td) 809 stReceipts []*types.ReceiptForStorage 810 ) 811 return db.ModifyAncients(func(op ethdb.AncientWriteOp) error { 812 for i, block := range blocks { 813 // Convert receipts to storage format and sum up total difficulty. 814 stReceipts = stReceipts[:0] 815 for _, receipt := range receipts[i] { 816 stReceipts = append(stReceipts, (*types.ReceiptForStorage)(receipt)) 817 } 818 header := block.Header() 819 if i > 0 { 820 tdSum.Add(tdSum, header.Difficulty) 821 } 822 if err := writeAncientBlock(op, block, header, stReceipts, tdSum); err != nil { 823 return err 824 } 825 } 826 return nil 827 }) 828 } 829 830 func writeAncientBlock(op ethdb.AncientWriteOp, block *types.Block, header *types.Header, receipts []*types.ReceiptForStorage, td *big.Int) error { 831 num := block.NumberU64() 832 if err := op.AppendRaw(freezerHashTable, num, block.Hash().Bytes()); err != nil { 833 return fmt.Errorf("can't add block %d hash: %v", num, err) 834 } 835 if err := op.Append(freezerHeaderTable, num, header); err != nil { 836 return fmt.Errorf("can't append block header %d: %v", num, err) 837 } 838 if err := op.Append(freezerBodiesTable, num, block.Body()); err != nil { 839 return fmt.Errorf("can't append block body %d: %v", num, err) 840 } 841 if err := op.Append(freezerReceiptTable, num, receipts); err != nil { 842 return fmt.Errorf("can't append block %d receipts: %v", num, err) 843 } 844 if err := op.Append(freezerDifficultyTable, num, td); err != nil { 845 return fmt.Errorf("can't append block %d total difficulty: %v", num, err) 846 } 847 return nil 848 } 849 850 // DeleteBlock removes all block data associated with a hash. 851 func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 852 DeleteReceipts(db, hash, number) 853 DeleteHeader(db, hash, number) 854 DeleteBody(db, hash, number) 855 DeleteTd(db, hash, number) 856 } 857 858 // DeleteBlockWithoutNumber removes all block data associated with a hash, except 859 // the hash to number mapping. 860 func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) { 861 DeleteReceipts(db, hash, number) 862 deleteHeaderWithoutNumber(db, hash, number) 863 DeleteBody(db, hash, number) 864 DeleteTd(db, hash, number) 865 } 866 867 const badBlockToKeep = 10 868 869 type badBlock struct { 870 Header *types.Header 871 Body *types.Body 872 } 873 874 // badBlockList implements the sort interface to allow sorting a list of 875 // bad blocks by their number in the reverse order. 876 type badBlockList []*badBlock 877 878 func (s badBlockList) Len() int { return len(s) } 879 func (s badBlockList) Less(i, j int) bool { 880 return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64() 881 } 882 func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 883 884 // ReadBadBlock retrieves the bad block with the corresponding block hash. 885 func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { 886 blob, err := db.Get(badBlockKey) 887 if err != nil { 888 return nil 889 } 890 var badBlocks badBlockList 891 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 892 return nil 893 } 894 for _, bad := range badBlocks { 895 if bad.Header.Hash() == hash { 896 return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles) 897 } 898 } 899 return nil 900 } 901 902 // ReadAllBadBlocks retrieves all the bad blocks in the database. 903 // All returned blocks are sorted in reverse order by number. 904 func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { 905 blob, err := db.Get(badBlockKey) 906 if err != nil { 907 return nil 908 } 909 var badBlocks badBlockList 910 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 911 return nil 912 } 913 var blocks []*types.Block 914 for _, bad := range badBlocks { 915 blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)) 916 } 917 return blocks 918 } 919 920 // WriteBadBlock serializes the bad block into the database. If the cumulated 921 // bad blocks exceeds the limitation, the oldest will be dropped. 922 func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { 923 blob, err := db.Get(badBlockKey) 924 if err != nil { 925 log.Warn("Failed to load old bad blocks", "error", err) 926 } 927 var badBlocks badBlockList 928 if len(blob) > 0 { 929 if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { 930 log.Crit("Failed to decode old bad blocks", "error", err) 931 } 932 } 933 for _, b := range badBlocks { 934 if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() { 935 log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash()) 936 return 937 } 938 } 939 badBlocks = append(badBlocks, &badBlock{ 940 Header: block.Header(), 941 Body: block.Body(), 942 }) 943 sort.Sort(sort.Reverse(badBlocks)) 944 if len(badBlocks) > badBlockToKeep { 945 badBlocks = badBlocks[:badBlockToKeep] 946 } 947 data, err := rlp.EncodeToBytes(badBlocks) 948 if err != nil { 949 log.Crit("Failed to encode bad blocks", "err", err) 950 } 951 if err := db.Put(badBlockKey, data); err != nil { 952 log.Crit("Failed to write bad blocks", "err", err) 953 } 954 } 955 956 // DeleteBadBlocks deletes all the bad blocks from the database 957 func DeleteBadBlocks(db ethdb.KeyValueWriter) { 958 if err := db.Delete(badBlockKey); err != nil { 959 log.Crit("Failed to delete bad blocks", "err", err) 960 } 961 } 962 963 // HasBadBlock returns whether the block with the hash is a bad block. dep: Istanbul 964 func HasBadBlock(db ethdb.Reader, hash common.Hash) bool { 965 return ReadBadBlock(db, hash) != nil 966 } 967 968 // FindCommonAncestor returns the last common ancestor of two block headers 969 func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header { 970 for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { 971 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 972 if a == nil { 973 return nil 974 } 975 } 976 for an := a.Number.Uint64(); an < b.Number.Uint64(); { 977 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 978 if b == nil { 979 return nil 980 } 981 } 982 for a.Hash() != b.Hash() { 983 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 984 if a == nil { 985 return nil 986 } 987 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 988 if b == nil { 989 return nil 990 } 991 } 992 return a 993 } 994 995 // ReadHeadHeader returns the current canonical head header. 996 func ReadHeadHeader(db ethdb.Reader) *types.Header { 997 headHeaderHash := ReadHeadHeaderHash(db) 998 if headHeaderHash == (common.Hash{}) { 999 return nil 1000 } 1001 headHeaderNumber := ReadHeaderNumber(db, headHeaderHash) 1002 if headHeaderNumber == nil { 1003 return nil 1004 } 1005 return ReadHeader(db, headHeaderHash, *headHeaderNumber) 1006 } 1007 1008 // ReadHeadBlock returns the current canonical head block. 1009 func ReadHeadBlock(db ethdb.Reader) *types.Block { 1010 headBlockHash := ReadHeadBlockHash(db) 1011 if headBlockHash == (common.Hash{}) { 1012 return nil 1013 } 1014 headBlockNumber := ReadHeaderNumber(db, headBlockHash) 1015 if headBlockNumber == nil { 1016 return nil 1017 } 1018 return ReadBlock(db, headBlockHash, *headBlockNumber) 1019 }