github.com/ccm-chain/ccmchain@v1.0.0/core/rawdb/accessors_chain.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "math/big" 23 24 "github.com/ccm-chain/ccmchain/common" 25 "github.com/ccm-chain/ccmchain/core/types" 26 "github.com/ccm-chain/ccmchain/crypto" 27 "github.com/ccm-chain/ccmchain/database" 28 "github.com/ccm-chain/ccmchain/log" 29 "github.com/ccm-chain/ccmchain/params" 30 "github.com/ccm-chain/ccmchain/rlp" 31 ) 32 33 // ReadCanonicalHash retrieves the hash assigned to a canonical block number. 34 func ReadCanonicalHash(db database.Reader, number uint64) common.Hash { 35 data, _ := db.Ancient(freezerHashTable, number) 36 if len(data) == 0 { 37 data, _ = db.Get(headerHashKey(number)) 38 // In the background freezer is moving data from leveldb to flatten files. 39 // So during the first check for ancient db, the data is not yet in there, 40 // but when we reach into leveldb, the data was already moved. That would 41 // result in a not found error. 42 if len(data) == 0 { 43 data, _ = db.Ancient(freezerHashTable, number) 44 } 45 } 46 if len(data) == 0 { 47 return common.Hash{} 48 } 49 return common.BytesToHash(data) 50 } 51 52 // WriteCanonicalHash stores the hash assigned to a canonical block number. 53 func WriteCanonicalHash(db database.KeyValueWriter, hash common.Hash, number uint64) { 54 if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { 55 log.Crit("Failed to store number to hash mapping", "err", err) 56 } 57 } 58 59 // DeleteCanonicalHash removes the number to hash canonical mapping. 60 func DeleteCanonicalHash(db database.KeyValueWriter, number uint64) { 61 if err := db.Delete(headerHashKey(number)); err != nil { 62 log.Crit("Failed to delete number to hash mapping", "err", err) 63 } 64 } 65 66 // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights, 67 // both canonical and reorged forks included. 68 func ReadAllHashes(db database.Iteratee, number uint64) []common.Hash { 69 prefix := headerKeyPrefix(number) 70 71 hashes := make([]common.Hash, 0, 1) 72 it := db.NewIterator(prefix, nil) 73 defer it.Release() 74 75 for it.Next() { 76 if key := it.Key(); len(key) == len(prefix)+32 { 77 hashes = append(hashes, common.BytesToHash(key[len(key)-32:])) 78 } 79 } 80 return hashes 81 } 82 83 // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the 84 // certain chain range. If the accumulated entries reaches the given threshold, 85 // abort the iteration and return the semi-finish result. 86 func ReadAllCanonicalHashes(db database.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) { 87 // Short circuit if the limit is 0. 88 if limit == 0 { 89 return nil, nil 90 } 91 var ( 92 numbers []uint64 93 hashes []common.Hash 94 ) 95 // Construct the key prefix of start point. 96 start, end := headerHashKey(from), headerHashKey(to) 97 it := db.NewIterator(nil, start) 98 defer it.Release() 99 100 for it.Next() { 101 if bytes.Compare(it.Key(), end) >= 0 { 102 break 103 } 104 if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) { 105 numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8])) 106 hashes = append(hashes, common.BytesToHash(it.Value())) 107 // If the accumulated entries reaches the limit threshold, return. 108 if len(numbers) >= limit { 109 break 110 } 111 } 112 } 113 return numbers, hashes 114 } 115 116 // ReadHeaderNumber returns the header number assigned to a hash. 117 func ReadHeaderNumber(db database.KeyValueReader, hash common.Hash) *uint64 { 118 data, _ := db.Get(headerNumberKey(hash)) 119 if len(data) != 8 { 120 return nil 121 } 122 number := binary.BigEndian.Uint64(data) 123 return &number 124 } 125 126 // WriteHeaderNumber stores the hash->number mapping. 127 func WriteHeaderNumber(db database.KeyValueWriter, hash common.Hash, number uint64) { 128 key := headerNumberKey(hash) 129 enc := encodeBlockNumber(number) 130 if err := db.Put(key, enc); err != nil { 131 log.Crit("Failed to store hash to number mapping", "err", err) 132 } 133 } 134 135 // DeleteHeaderNumber removes hash->number mapping. 136 func DeleteHeaderNumber(db database.KeyValueWriter, hash common.Hash) { 137 if err := db.Delete(headerNumberKey(hash)); err != nil { 138 log.Crit("Failed to delete hash to number mapping", "err", err) 139 } 140 } 141 142 // ReadHeadHeaderHash retrieves the hash of the current canonical head header. 143 func ReadHeadHeaderHash(db database.KeyValueReader) common.Hash { 144 data, _ := db.Get(headHeaderKey) 145 if len(data) == 0 { 146 return common.Hash{} 147 } 148 return common.BytesToHash(data) 149 } 150 151 // WriteHeadHeaderHash stores the hash of the current canonical head header. 152 func WriteHeadHeaderHash(db database.KeyValueWriter, hash common.Hash) { 153 if err := db.Put(headHeaderKey, hash.Bytes()); err != nil { 154 log.Crit("Failed to store last header's hash", "err", err) 155 } 156 } 157 158 // ReadHeadBlockHash retrieves the hash of the current canonical head block. 159 func ReadHeadBlockHash(db database.KeyValueReader) common.Hash { 160 data, _ := db.Get(headBlockKey) 161 if len(data) == 0 { 162 return common.Hash{} 163 } 164 return common.BytesToHash(data) 165 } 166 167 // WriteHeadBlockHash stores the head block's hash. 168 func WriteHeadBlockHash(db database.KeyValueWriter, hash common.Hash) { 169 if err := db.Put(headBlockKey, hash.Bytes()); err != nil { 170 log.Crit("Failed to store last block's hash", "err", err) 171 } 172 } 173 174 // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block. 175 func ReadHeadFastBlockHash(db database.KeyValueReader) common.Hash { 176 data, _ := db.Get(headFastBlockKey) 177 if len(data) == 0 { 178 return common.Hash{} 179 } 180 return common.BytesToHash(data) 181 } 182 183 // WriteHeadFastBlockHash stores the hash of the current fast-sync head block. 184 func WriteHeadFastBlockHash(db database.KeyValueWriter, hash common.Hash) { 185 if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil { 186 log.Crit("Failed to store last fast block's hash", "err", err) 187 } 188 } 189 190 // ReadLastPivotNumber retrieves the number of the last pivot block. If the node 191 // full synced, the last pivot will always be nil. 192 func ReadLastPivotNumber(db database.KeyValueReader) *uint64 { 193 data, _ := db.Get(lastPivotKey) 194 if len(data) == 0 { 195 return nil 196 } 197 var pivot uint64 198 if err := rlp.DecodeBytes(data, &pivot); err != nil { 199 log.Error("Invalid pivot block number in database", "err", err) 200 return nil 201 } 202 return &pivot 203 } 204 205 // WriteLastPivotNumber stores the number of the last pivot block. 206 func WriteLastPivotNumber(db database.KeyValueWriter, pivot uint64) { 207 enc, err := rlp.EncodeToBytes(pivot) 208 if err != nil { 209 log.Crit("Failed to encode pivot block number", "err", err) 210 } 211 if err := db.Put(lastPivotKey, enc); err != nil { 212 log.Crit("Failed to store pivot block number", "err", err) 213 } 214 } 215 216 // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow 217 // reporting correct numbers across restarts. 218 func ReadFastTrieProgress(db database.KeyValueReader) uint64 { 219 data, _ := db.Get(fastTrieProgressKey) 220 if len(data) == 0 { 221 return 0 222 } 223 return new(big.Int).SetBytes(data).Uint64() 224 } 225 226 // WriteFastTrieProgress stores the fast sync trie process counter to support 227 // retrieving it across restarts. 228 func WriteFastTrieProgress(db database.KeyValueWriter, count uint64) { 229 if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil { 230 log.Crit("Failed to store fast sync trie progress", "err", err) 231 } 232 } 233 234 // ReadTxIndexTail retrieves the number of oldest indexed block 235 // whose transaction indices has been indexed. If the corresponding entry 236 // is non-existent in database it means the indexing has been finished. 237 func ReadTxIndexTail(db database.KeyValueReader) *uint64 { 238 data, _ := db.Get(txIndexTailKey) 239 if len(data) != 8 { 240 return nil 241 } 242 number := binary.BigEndian.Uint64(data) 243 return &number 244 } 245 246 // WriteTxIndexTail stores the number of oldest indexed block 247 // into database. 248 func WriteTxIndexTail(db database.KeyValueWriter, number uint64) { 249 if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil { 250 log.Crit("Failed to store the transaction index tail", "err", err) 251 } 252 } 253 254 // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync. 255 func ReadFastTxLookupLimit(db database.KeyValueReader) *uint64 { 256 data, _ := db.Get(fastTxLookupLimitKey) 257 if len(data) != 8 { 258 return nil 259 } 260 number := binary.BigEndian.Uint64(data) 261 return &number 262 } 263 264 // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database. 265 func WriteFastTxLookupLimit(db database.KeyValueWriter, number uint64) { 266 if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil { 267 log.Crit("Failed to store transaction lookup limit for fast sync", "err", err) 268 } 269 } 270 271 // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. 272 func ReadHeaderRLP(db database.Reader, hash common.Hash, number uint64) rlp.RawValue { 273 // First try to look up the data in ancient database. Extra hash 274 // comparison is necessary since ancient database only maintains 275 // the canonical data. 276 data, _ := db.Ancient(freezerHeaderTable, number) 277 if len(data) > 0 && crypto.Keccak256Hash(data) == hash { 278 return data 279 } 280 // Then try to look up the data in leveldb. 281 data, _ = db.Get(headerKey(number, hash)) 282 if len(data) > 0 { 283 return data 284 } 285 // In the background freezer is moving data from leveldb to flatten files. 286 // So during the first check for ancient db, the data is not yet in there, 287 // but when we reach into leveldb, the data was already moved. That would 288 // result in a not found error. 289 data, _ = db.Ancient(freezerHeaderTable, number) 290 if len(data) > 0 && crypto.Keccak256Hash(data) == hash { 291 return data 292 } 293 return nil // Can't find the data anywhere. 294 } 295 296 // HasHeader verifies the existence of a block header corresponding to the hash. 297 func HasHeader(db database.Reader, hash common.Hash, number uint64) bool { 298 if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { 299 return true 300 } 301 if has, err := db.Has(headerKey(number, hash)); !has || err != nil { 302 return false 303 } 304 return true 305 } 306 307 // ReadHeader retrieves the block header corresponding to the hash. 308 func ReadHeader(db database.Reader, hash common.Hash, number uint64) *types.Header { 309 data := ReadHeaderRLP(db, hash, number) 310 if len(data) == 0 { 311 return nil 312 } 313 header := new(types.Header) 314 if err := rlp.Decode(bytes.NewReader(data), header); err != nil { 315 log.Error("Invalid block header RLP", "hash", hash, "err", err) 316 return nil 317 } 318 return header 319 } 320 321 // WriteHeader stores a block header into the database and also stores the hash- 322 // to-number mapping. 323 func WriteHeader(db database.KeyValueWriter, header *types.Header) { 324 var ( 325 hash = header.Hash() 326 number = header.Number.Uint64() 327 ) 328 // Write the hash -> number mapping 329 WriteHeaderNumber(db, hash, number) 330 331 // Write the encoded header 332 data, err := rlp.EncodeToBytes(header) 333 if err != nil { 334 log.Crit("Failed to RLP encode header", "err", err) 335 } 336 key := headerKey(number, hash) 337 if err := db.Put(key, data); err != nil { 338 log.Crit("Failed to store header", "err", err) 339 } 340 } 341 342 // DeleteHeader removes all block header data associated with a hash. 343 func DeleteHeader(db database.KeyValueWriter, hash common.Hash, number uint64) { 344 deleteHeaderWithoutNumber(db, hash, number) 345 if err := db.Delete(headerNumberKey(hash)); err != nil { 346 log.Crit("Failed to delete hash to number mapping", "err", err) 347 } 348 } 349 350 // deleteHeaderWithoutNumber removes only the block header but does not remove 351 // the hash to number mapping. 352 func deleteHeaderWithoutNumber(db database.KeyValueWriter, hash common.Hash, number uint64) { 353 if err := db.Delete(headerKey(number, hash)); err != nil { 354 log.Crit("Failed to delete header", "err", err) 355 } 356 } 357 358 // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. 359 func ReadBodyRLP(db database.Reader, hash common.Hash, number uint64) rlp.RawValue { 360 // First try to look up the data in ancient database. Extra hash 361 // comparison is necessary since ancient database only maintains 362 // the canonical data. 363 data, _ := db.Ancient(freezerBodiesTable, number) 364 if len(data) > 0 { 365 h, _ := db.Ancient(freezerHashTable, number) 366 if common.BytesToHash(h) == hash { 367 return data 368 } 369 } 370 // Then try to look up the data in leveldb. 371 data, _ = db.Get(blockBodyKey(number, hash)) 372 if len(data) > 0 { 373 return data 374 } 375 // In the background freezer is moving data from leveldb to flatten files. 376 // So during the first check for ancient db, the data is not yet in there, 377 // but when we reach into leveldb, the data was already moved. That would 378 // result in a not found error. 379 data, _ = db.Ancient(freezerBodiesTable, number) 380 if len(data) > 0 { 381 h, _ := db.Ancient(freezerHashTable, number) 382 if common.BytesToHash(h) == hash { 383 return data 384 } 385 } 386 return nil // Can't find the data anywhere. 387 } 388 389 // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical 390 // block at number, in RLP encoding. 391 func ReadCanonicalBodyRLP(db database.Reader, number uint64) rlp.RawValue { 392 // If it's an ancient one, we don't need the canonical hash 393 data, _ := db.Ancient(freezerBodiesTable, number) 394 if len(data) == 0 { 395 // Need to get the hash 396 data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number))) 397 // In the background freezer is moving data from leveldb to flatten files. 398 // So during the first check for ancient db, the data is not yet in there, 399 // but when we reach into leveldb, the data was already moved. That would 400 // result in a not found error. 401 if len(data) == 0 { 402 data, _ = db.Ancient(freezerBodiesTable, number) 403 } 404 } 405 return data 406 } 407 408 // WriteBodyRLP stores an RLP encoded block body into the database. 409 func WriteBodyRLP(db database.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { 410 if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { 411 log.Crit("Failed to store block body", "err", err) 412 } 413 } 414 415 // HasBody verifies the existence of a block body corresponding to the hash. 416 func HasBody(db database.Reader, hash common.Hash, number uint64) bool { 417 if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { 418 return true 419 } 420 if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { 421 return false 422 } 423 return true 424 } 425 426 // ReadBody retrieves the block body corresponding to the hash. 427 func ReadBody(db database.Reader, hash common.Hash, number uint64) *types.Body { 428 data := ReadBodyRLP(db, hash, number) 429 if len(data) == 0 { 430 return nil 431 } 432 body := new(types.Body) 433 if err := rlp.Decode(bytes.NewReader(data), body); err != nil { 434 log.Error("Invalid block body RLP", "hash", hash, "err", err) 435 return nil 436 } 437 return body 438 } 439 440 // WriteBody stores a block body into the database. 441 func WriteBody(db database.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) { 442 data, err := rlp.EncodeToBytes(body) 443 if err != nil { 444 log.Crit("Failed to RLP encode body", "err", err) 445 } 446 WriteBodyRLP(db, hash, number, data) 447 } 448 449 // DeleteBody removes all block body data associated with a hash. 450 func DeleteBody(db database.KeyValueWriter, hash common.Hash, number uint64) { 451 if err := db.Delete(blockBodyKey(number, hash)); err != nil { 452 log.Crit("Failed to delete block body", "err", err) 453 } 454 } 455 456 // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding. 457 func ReadTdRLP(db database.Reader, hash common.Hash, number uint64) rlp.RawValue { 458 // First try to look up the data in ancient database. Extra hash 459 // comparison is necessary since ancient database only maintains 460 // the canonical data. 461 data, _ := db.Ancient(freezerDifficultyTable, number) 462 if len(data) > 0 { 463 h, _ := db.Ancient(freezerHashTable, number) 464 if common.BytesToHash(h) == hash { 465 return data 466 } 467 } 468 // Then try to look up the data in leveldb. 469 data, _ = db.Get(headerTDKey(number, hash)) 470 if len(data) > 0 { 471 return data 472 } 473 // In the background freezer is moving data from leveldb to flatten files. 474 // So during the first check for ancient db, the data is not yet in there, 475 // but when we reach into leveldb, the data was already moved. That would 476 // result in a not found error. 477 data, _ = db.Ancient(freezerDifficultyTable, number) 478 if len(data) > 0 { 479 h, _ := db.Ancient(freezerHashTable, number) 480 if common.BytesToHash(h) == hash { 481 return data 482 } 483 } 484 return nil // Can't find the data anywhere. 485 } 486 487 // ReadTd retrieves a block's total difficulty corresponding to the hash. 488 func ReadTd(db database.Reader, hash common.Hash, number uint64) *big.Int { 489 data := ReadTdRLP(db, hash, number) 490 if len(data) == 0 { 491 return nil 492 } 493 td := new(big.Int) 494 if err := rlp.Decode(bytes.NewReader(data), td); err != nil { 495 log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err) 496 return nil 497 } 498 return td 499 } 500 501 // WriteTd stores the total difficulty of a block into the database. 502 func WriteTd(db database.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) { 503 data, err := rlp.EncodeToBytes(td) 504 if err != nil { 505 log.Crit("Failed to RLP encode block total difficulty", "err", err) 506 } 507 if err := db.Put(headerTDKey(number, hash), data); err != nil { 508 log.Crit("Failed to store block total difficulty", "err", err) 509 } 510 } 511 512 // DeleteTd removes all block total difficulty data associated with a hash. 513 func DeleteTd(db database.KeyValueWriter, hash common.Hash, number uint64) { 514 if err := db.Delete(headerTDKey(number, hash)); err != nil { 515 log.Crit("Failed to delete block total difficulty", "err", err) 516 } 517 } 518 519 // HasReceipts verifies the existence of all the transaction receipts belonging 520 // to a block. 521 func HasReceipts(db database.Reader, hash common.Hash, number uint64) bool { 522 if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash { 523 return true 524 } 525 if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil { 526 return false 527 } 528 return true 529 } 530 531 // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. 532 func ReadReceiptsRLP(db database.Reader, hash common.Hash, number uint64) rlp.RawValue { 533 // First try to look up the data in ancient database. Extra hash 534 // comparison is necessary since ancient database only maintains 535 // the canonical data. 536 data, _ := db.Ancient(freezerReceiptTable, number) 537 if len(data) > 0 { 538 h, _ := db.Ancient(freezerHashTable, number) 539 if common.BytesToHash(h) == hash { 540 return data 541 } 542 } 543 // Then try to look up the data in leveldb. 544 data, _ = db.Get(blockReceiptsKey(number, hash)) 545 if len(data) > 0 { 546 return data 547 } 548 // In the background freezer is moving data from leveldb to flatten files. 549 // So during the first check for ancient db, the data is not yet in there, 550 // but when we reach into leveldb, the data was already moved. That would 551 // result in a not found error. 552 data, _ = db.Ancient(freezerReceiptTable, number) 553 if len(data) > 0 { 554 h, _ := db.Ancient(freezerHashTable, number) 555 if common.BytesToHash(h) == hash { 556 return data 557 } 558 } 559 return nil // Can't find the data anywhere. 560 } 561 562 // ReadRawReceipts retrieves all the transaction receipts belonging to a block. 563 // The receipt metadata fields are not guaranteed to be populated, so they 564 // should not be used. Use ReadReceipts instead if the metadata is needed. 565 func ReadRawReceipts(db database.Reader, hash common.Hash, number uint64) types.Receipts { 566 // Retrieve the flattened receipt slice 567 data := ReadReceiptsRLP(db, hash, number) 568 if len(data) == 0 { 569 return nil 570 } 571 // Convert the receipts from their storage form to their internal representation 572 storageReceipts := []*types.ReceiptForStorage{} 573 if err := rlp.DecodeBytes(data, &storageReceipts); err != nil { 574 log.Error("Invalid receipt array RLP", "hash", hash, "err", err) 575 return nil 576 } 577 receipts := make(types.Receipts, len(storageReceipts)) 578 for i, storageReceipt := range storageReceipts { 579 receipts[i] = (*types.Receipt)(storageReceipt) 580 } 581 return receipts 582 } 583 584 // ReadReceipts retrieves all the transaction receipts belonging to a block, including 585 // its correspoinding metadata fields. If it is unable to populate these metadata 586 // fields then nil is returned. 587 // 588 // The current implementation populates these metadata fields by reading the receipts' 589 // corresponding block body, so if the block body is not found it will return nil even 590 // if the receipt itself is stored. 591 func ReadReceipts(db database.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts { 592 // We're deriving many fields from the block body, retrieve beside the receipt 593 receipts := ReadRawReceipts(db, hash, number) 594 if receipts == nil { 595 return nil 596 } 597 body := ReadBody(db, hash, number) 598 if body == nil { 599 log.Error("Missing body but have receipt", "hash", hash, "number", number) 600 return nil 601 } 602 if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil { 603 log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) 604 return nil 605 } 606 return receipts 607 } 608 609 // WriteReceipts stores all the transaction receipts belonging to a block. 610 func WriteReceipts(db database.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) { 611 // Convert the receipts into their storage form and serialize them 612 storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) 613 for i, receipt := range receipts { 614 storageReceipts[i] = (*types.ReceiptForStorage)(receipt) 615 } 616 bytes, err := rlp.EncodeToBytes(storageReceipts) 617 if err != nil { 618 log.Crit("Failed to encode block receipts", "err", err) 619 } 620 // Store the flattened receipt slice 621 if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { 622 log.Crit("Failed to store block receipts", "err", err) 623 } 624 } 625 626 // DeleteReceipts removes all receipt data associated with a block hash. 627 func DeleteReceipts(db database.KeyValueWriter, hash common.Hash, number uint64) { 628 if err := db.Delete(blockReceiptsKey(number, hash)); err != nil { 629 log.Crit("Failed to delete block receipts", "err", err) 630 } 631 } 632 633 // ReadBlock retrieves an entire block corresponding to the hash, assembling it 634 // back from the stored header and body. If either the header or body could not 635 // be retrieved nil is returned. 636 // 637 // Note, due to concurrent download of header and block body the header and thus 638 // canonical hash can be stored in the database but the body data not (yet). 639 func ReadBlock(db database.Reader, hash common.Hash, number uint64) *types.Block { 640 header := ReadHeader(db, hash, number) 641 if header == nil { 642 return nil 643 } 644 body := ReadBody(db, hash, number) 645 if body == nil { 646 return nil 647 } 648 return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles) 649 } 650 651 // WriteBlock serializes a block into the database, header and body separately. 652 func WriteBlock(db database.KeyValueWriter, block *types.Block) { 653 WriteBody(db, block.Hash(), block.NumberU64(), block.Body()) 654 WriteHeader(db, block.Header()) 655 } 656 657 // WriteAncientBlock writes entire block data into ancient store and returns the total written size. 658 func WriteAncientBlock(db database.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int { 659 // Encode all block components to RLP format. 660 headerBlob, err := rlp.EncodeToBytes(block.Header()) 661 if err != nil { 662 log.Crit("Failed to RLP encode block header", "err", err) 663 } 664 bodyBlob, err := rlp.EncodeToBytes(block.Body()) 665 if err != nil { 666 log.Crit("Failed to RLP encode body", "err", err) 667 } 668 storageReceipts := make([]*types.ReceiptForStorage, len(receipts)) 669 for i, receipt := range receipts { 670 storageReceipts[i] = (*types.ReceiptForStorage)(receipt) 671 } 672 receiptBlob, err := rlp.EncodeToBytes(storageReceipts) 673 if err != nil { 674 log.Crit("Failed to RLP encode block receipts", "err", err) 675 } 676 tdBlob, err := rlp.EncodeToBytes(td) 677 if err != nil { 678 log.Crit("Failed to RLP encode block total difficulty", "err", err) 679 } 680 // Write all blob to flatten files. 681 err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob) 682 if err != nil { 683 log.Crit("Failed to write block data to ancient store", "err", err) 684 } 685 return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength 686 } 687 688 // DeleteBlock removes all block data associated with a hash. 689 func DeleteBlock(db database.KeyValueWriter, hash common.Hash, number uint64) { 690 DeleteReceipts(db, hash, number) 691 DeleteHeader(db, hash, number) 692 DeleteBody(db, hash, number) 693 DeleteTd(db, hash, number) 694 } 695 696 // DeleteBlockWithoutNumber removes all block data associated with a hash, except 697 // the hash to number mapping. 698 func DeleteBlockWithoutNumber(db database.KeyValueWriter, hash common.Hash, number uint64) { 699 DeleteReceipts(db, hash, number) 700 deleteHeaderWithoutNumber(db, hash, number) 701 DeleteBody(db, hash, number) 702 DeleteTd(db, hash, number) 703 } 704 705 // FindCommonAncestor returns the last common ancestor of two block headers 706 func FindCommonAncestor(db database.Reader, a, b *types.Header) *types.Header { 707 for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { 708 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 709 if a == nil { 710 return nil 711 } 712 } 713 for an := a.Number.Uint64(); an < b.Number.Uint64(); { 714 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 715 if b == nil { 716 return nil 717 } 718 } 719 for a.Hash() != b.Hash() { 720 a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1) 721 if a == nil { 722 return nil 723 } 724 b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1) 725 if b == nil { 726 return nil 727 } 728 } 729 return a 730 }