github.com/ethereum/go-ethereum@v1.16.1/triedb/pathdb/history.go (about) 1 // Copyright 2023 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package pathdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "maps" 25 "slices" 26 "time" 27 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/core/rawdb" 30 "github.com/ethereum/go-ethereum/crypto" 31 "github.com/ethereum/go-ethereum/ethdb" 32 "github.com/ethereum/go-ethereum/log" 33 ) 34 35 // State history records the state changes involved in executing a block. The 36 // state can be reverted to the previous version by applying the associated 37 // history object (state reverse diff). State history objects are kept to 38 // guarantee that the system can perform state rollbacks in case of deep reorg. 39 // 40 // Each state transition will generate a state history object. Note that not 41 // every block has a corresponding state history object. If a block performs 42 // no state changes whatsoever, no state is created for it. Each state history 43 // will have a sequentially increasing number acting as its unique identifier. 44 // 45 // The state history is written to disk (ancient store) when the corresponding 46 // diff layer is merged into the disk layer. At the same time, system can prune 47 // the oldest histories according to config. 48 // 49 // Disk State 50 // ^ 51 // | 52 // +------------+ +---------+ +---------+ +---------+ 53 // | Init State |---->| State 1 |---->| ... |---->| State n | 54 // +------------+ +---------+ +---------+ +---------+ 55 // 56 // +-----------+ +------+ +-----------+ 57 // | History 1 |----> | ... |---->| History n | 58 // +-----------+ +------+ +-----------+ 59 // 60 // # Rollback 61 // 62 // If the system wants to roll back to a previous state n, it needs to ensure 63 // all history objects from n+1 up to the current disk layer are existent. The 64 // history objects are applied to the state in reverse order, starting from the 65 // current disk layer. 66 67 const ( 68 accountIndexSize = common.AddressLength + 13 // The length of encoded account index 69 slotIndexSize = common.HashLength + 5 // The length of encoded slot index 70 historyMetaSize = 9 + 2*common.HashLength // The length of encoded history meta 71 72 stateHistoryV0 = uint8(0) // initial version of state history structure 73 stateHistoryV1 = uint8(1) // use the storage slot raw key as the identifier instead of the key hash 74 historyVersion = stateHistoryV1 // the default state history version 75 ) 76 77 // Each state history entry is consisted of five elements: 78 // 79 // # metadata 80 // This object contains a few meta fields, such as the associated state root, 81 // block number, version tag and so on. This object may contain an extra 82 // accountHash list which means the storage changes belong to these accounts 83 // are not complete due to large contract destruction. The incomplete history 84 // can not be used for rollback and serving archive state request. 85 // 86 // # account index 87 // This object contains some index information of account. For example, offset 88 // and length indicate the location of the data belonging to the account. Besides, 89 // storageOffset and storageSlots indicate the storage modification location 90 // belonging to the account. 91 // 92 // The size of each account index is *fixed*, and all indexes are sorted 93 // lexicographically. Thus binary search can be performed to quickly locate a 94 // specific account. 95 // 96 // # account data 97 // Account data is a concatenated byte stream composed of all account data. 98 // The account data can be solved by the offset and length info indicated 99 // by corresponding account index. 100 // 101 // fixed size 102 // ^ ^ 103 // / \ 104 // +-----------------+-----------------+----------------+-----------------+ 105 // | Account index 1 | Account index 2 | ... | Account index N | 106 // +-----------------+-----------------+----------------+-----------------+ 107 // | 108 // | length 109 // offset |----------------+ 110 // v v 111 // +----------------+----------------+----------------+----------------+ 112 // | Account data 1 | Account data 2 | ... | Account data N | 113 // +----------------+----------------+----------------+----------------+ 114 // 115 // # storage index 116 // This object is similar with account index. It's also fixed size and contains 117 // the location info of storage slot data. 118 // 119 // # storage data 120 // Storage data is a concatenated byte stream composed of all storage slot data. 121 // The storage slot data can be solved by the location info indicated by 122 // corresponding account index and storage slot index. 123 // 124 // fixed size 125 // ^ ^ 126 // / \ 127 // +-----------------+-----------------+----------------+-----------------+ 128 // | Account index 1 | Account index 2 | ... | Account index N | 129 // +-----------------+-----------------+----------------+-----------------+ 130 // | 131 // | storage slots 132 // storage offset |-----------------------------------------------------+ 133 // v v 134 // +-----------------+-----------------+-----------------+ 135 // | storage index 1 | storage index 2 | storage index 3 | 136 // +-----------------+-----------------+-----------------+ 137 // | length 138 // offset |-------------+ 139 // v v 140 // +-------------+ 141 // | slot data 1 | 142 // +-------------+ 143 144 // accountIndex describes the metadata belonging to an account. 145 type accountIndex struct { 146 address common.Address // The address of account 147 length uint8 // The length of account data, size limited by 255 148 offset uint32 // The offset of item in account data table 149 storageOffset uint32 // The offset of storage index in storage index table 150 storageSlots uint32 // The number of mutated storage slots belonging to the account 151 } 152 153 // encode packs account index into byte stream. 154 func (i *accountIndex) encode() []byte { 155 var buf [accountIndexSize]byte 156 copy(buf[:], i.address.Bytes()) 157 buf[common.AddressLength] = i.length 158 binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset) 159 binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset) 160 binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots) 161 return buf[:] 162 } 163 164 // decode unpacks account index from byte stream. 165 func (i *accountIndex) decode(blob []byte) { 166 i.address = common.BytesToAddress(blob[:common.AddressLength]) 167 i.length = blob[common.AddressLength] 168 i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:]) 169 i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:]) 170 i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:]) 171 } 172 173 // slotIndex describes the metadata belonging to a storage slot. 174 type slotIndex struct { 175 // the identifier of the storage slot. Specifically 176 // in v0, it's the hash of the raw storage slot key (32 bytes); 177 // in v1, it's the raw storage slot key (32 bytes); 178 id common.Hash 179 length uint8 // The length of storage slot, up to 32 bytes defined in protocol 180 offset uint32 // The offset of item in storage slot data table 181 } 182 183 // encode packs slot index into byte stream. 184 func (i *slotIndex) encode() []byte { 185 var buf [slotIndexSize]byte 186 copy(buf[:common.HashLength], i.id.Bytes()) 187 buf[common.HashLength] = i.length 188 binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset) 189 return buf[:] 190 } 191 192 // decode unpack slot index from the byte stream. 193 func (i *slotIndex) decode(blob []byte) { 194 i.id = common.BytesToHash(blob[:common.HashLength]) 195 i.length = blob[common.HashLength] 196 i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:]) 197 } 198 199 // meta describes the meta data of state history object. 200 type meta struct { 201 version uint8 // version tag of history object 202 parent common.Hash // prev-state root before the state transition 203 root common.Hash // post-state root after the state transition 204 block uint64 // associated block number 205 } 206 207 // encode packs the meta object into byte stream. 208 func (m *meta) encode() []byte { 209 buf := make([]byte, historyMetaSize) 210 buf[0] = m.version 211 copy(buf[1:1+common.HashLength], m.parent.Bytes()) 212 copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes()) 213 binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block) 214 return buf[:] 215 } 216 217 // decode unpacks the meta object from byte stream. 218 func (m *meta) decode(blob []byte) error { 219 if len(blob) < 1 { 220 return errors.New("no version tag") 221 } 222 switch blob[0] { 223 case stateHistoryV0, stateHistoryV1: 224 if len(blob) != historyMetaSize { 225 return fmt.Errorf("invalid state history meta, len: %d", len(blob)) 226 } 227 m.version = blob[0] 228 m.parent = common.BytesToHash(blob[1 : 1+common.HashLength]) 229 m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength]) 230 m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize]) 231 return nil 232 default: 233 return fmt.Errorf("unknown version %d", blob[0]) 234 } 235 } 236 237 // history represents a set of state changes belong to a block along with 238 // the metadata including the state roots involved in the state transition. 239 // State history objects in disk are linked with each other by a unique id 240 // (8-bytes integer), the oldest state history object can be pruned on demand 241 // in order to control the storage size. 242 type history struct { 243 meta *meta // Meta data of history 244 accounts map[common.Address][]byte // Account data keyed by its address hash 245 accountList []common.Address // Sorted account hash list 246 storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash 247 storageList map[common.Address][]common.Hash // Sorted slot hash list 248 } 249 250 // newHistory constructs the state history object with provided state change set. 251 func newHistory(root common.Hash, parent common.Hash, block uint64, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, rawStorageKey bool) *history { 252 var ( 253 accountList = slices.SortedFunc(maps.Keys(accounts), common.Address.Cmp) 254 storageList = make(map[common.Address][]common.Hash) 255 ) 256 for addr, slots := range storages { 257 storageList[addr] = slices.SortedFunc(maps.Keys(slots), common.Hash.Cmp) 258 } 259 version := historyVersion 260 if !rawStorageKey { 261 version = stateHistoryV0 262 } 263 return &history{ 264 meta: &meta{ 265 version: version, 266 parent: parent, 267 root: root, 268 block: block, 269 }, 270 accounts: accounts, 271 accountList: accountList, 272 storages: storages, 273 storageList: storageList, 274 } 275 } 276 277 // stateSet returns the state set, keyed by the hash of the account address 278 // and the hash of the storage slot key. 279 func (h *history) stateSet() (map[common.Hash][]byte, map[common.Hash]map[common.Hash][]byte) { 280 var ( 281 accounts = make(map[common.Hash][]byte) 282 storages = make(map[common.Hash]map[common.Hash][]byte) 283 ) 284 for addr, blob := range h.accounts { 285 addrHash := crypto.Keccak256Hash(addr.Bytes()) 286 accounts[addrHash] = blob 287 288 storage, exist := h.storages[addr] 289 if !exist { 290 continue 291 } 292 if h.meta.version == stateHistoryV0 { 293 storages[addrHash] = storage 294 } else { 295 subset := make(map[common.Hash][]byte) 296 for key, slot := range storage { 297 subset[crypto.Keccak256Hash(key.Bytes())] = slot 298 } 299 storages[addrHash] = subset 300 } 301 } 302 return accounts, storages 303 } 304 305 // encode serializes the state history and returns four byte streams represent 306 // concatenated account/storage data, account/storage indexes respectively. 307 func (h *history) encode() ([]byte, []byte, []byte, []byte) { 308 var ( 309 slotNumber uint32 // the number of processed slots 310 accountData []byte // the buffer for concatenated account data 311 storageData []byte // the buffer for concatenated storage data 312 accountIndexes []byte // the buffer for concatenated account index 313 storageIndexes []byte // the buffer for concatenated storage index 314 ) 315 for _, addr := range h.accountList { 316 accIndex := accountIndex{ 317 address: addr, 318 length: uint8(len(h.accounts[addr])), 319 offset: uint32(len(accountData)), 320 } 321 slots, exist := h.storages[addr] 322 if exist { 323 // Encode storage slots in order 324 for _, slotHash := range h.storageList[addr] { 325 sIndex := slotIndex{ 326 id: slotHash, 327 length: uint8(len(slots[slotHash])), 328 offset: uint32(len(storageData)), 329 } 330 storageData = append(storageData, slots[slotHash]...) 331 storageIndexes = append(storageIndexes, sIndex.encode()...) 332 } 333 // Fill up the storage meta in account index 334 accIndex.storageOffset = slotNumber 335 accIndex.storageSlots = uint32(len(slots)) 336 slotNumber += uint32(len(slots)) 337 } 338 accountData = append(accountData, h.accounts[addr]...) 339 accountIndexes = append(accountIndexes, accIndex.encode()...) 340 } 341 return accountData, storageData, accountIndexes, storageIndexes 342 } 343 344 // decoder wraps the byte streams for decoding with extra meta fields. 345 type decoder struct { 346 accountData []byte // the buffer for concatenated account data 347 storageData []byte // the buffer for concatenated storage data 348 accountIndexes []byte // the buffer for concatenated account index 349 storageIndexes []byte // the buffer for concatenated storage index 350 351 lastAccount *common.Address // the address of last resolved account 352 lastAccountRead uint32 // the read-cursor position of account data 353 lastSlotIndexRead uint32 // the read-cursor position of storage slot index 354 lastSlotDataRead uint32 // the read-cursor position of storage slot data 355 } 356 357 // verify validates the provided byte streams for decoding state history. A few 358 // checks will be performed to quickly detect data corruption. The byte stream 359 // is regarded as corrupted if: 360 // 361 // - account indexes buffer is empty(empty state set is invalid) 362 // - account indexes/storage indexer buffer is not aligned 363 // 364 // note, these situations are allowed: 365 // 366 // - empty account data: all accounts were not present 367 // - empty storage set: no slots are modified 368 func (r *decoder) verify() error { 369 if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 { 370 return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes)) 371 } 372 if len(r.storageIndexes)%slotIndexSize != 0 { 373 return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes)) 374 } 375 return nil 376 } 377 378 // readAccount parses the account from the byte stream with specified position. 379 func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) { 380 // Decode account index from the index byte stream. 381 var index accountIndex 382 if (pos+1)*accountIndexSize > len(r.accountIndexes) { 383 return accountIndex{}, nil, errors.New("account data buffer is corrupted") 384 } 385 index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize]) 386 387 // Perform validation before parsing account data, ensure 388 // - account is sorted in order in byte stream 389 // - account data is strictly encoded with no gap inside 390 // - account data is not out-of-slice 391 if r.lastAccount != nil { // zero address is possible 392 if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 { 393 return accountIndex{}, nil, errors.New("account is not in order") 394 } 395 } 396 if index.offset != r.lastAccountRead { 397 return accountIndex{}, nil, errors.New("account data buffer is gaped") 398 } 399 last := index.offset + uint32(index.length) 400 if uint32(len(r.accountData)) < last { 401 return accountIndex{}, nil, errors.New("account data buffer is corrupted") 402 } 403 data := r.accountData[index.offset:last] 404 405 r.lastAccount = &index.address 406 r.lastAccountRead = last 407 408 return index, data, nil 409 } 410 411 // readStorage parses the storage slots from the byte stream with specified account. 412 func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) { 413 var ( 414 last *common.Hash 415 count = int(accIndex.storageSlots) 416 list = make([]common.Hash, 0, count) 417 storage = make(map[common.Hash][]byte, count) 418 ) 419 for j := 0; j < count; j++ { 420 var ( 421 index slotIndex 422 start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize) 423 end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize) 424 ) 425 // Perform validation before parsing storage slot data, ensure 426 // - slot index is not out-of-slice 427 // - slot data is not out-of-slice 428 // - slot is sorted in order in byte stream 429 // - slot indexes is strictly encoded with no gap inside 430 // - slot data is strictly encoded with no gap inside 431 if start != r.lastSlotIndexRead { 432 return nil, nil, errors.New("storage index buffer is gapped") 433 } 434 if uint32(len(r.storageIndexes)) < end { 435 return nil, nil, errors.New("storage index buffer is corrupted") 436 } 437 index.decode(r.storageIndexes[start:end]) 438 439 if last != nil { 440 if bytes.Compare(last.Bytes(), index.id.Bytes()) >= 0 { 441 return nil, nil, fmt.Errorf("storage slot is not in order, last: %x, current: %x", *last, index.id) 442 } 443 } 444 if index.offset != r.lastSlotDataRead { 445 return nil, nil, errors.New("storage data buffer is gapped") 446 } 447 sEnd := index.offset + uint32(index.length) 448 if uint32(len(r.storageData)) < sEnd { 449 return nil, nil, errors.New("storage data buffer is corrupted") 450 } 451 storage[index.id] = r.storageData[r.lastSlotDataRead:sEnd] 452 list = append(list, index.id) 453 454 last = &index.id 455 r.lastSlotIndexRead = end 456 r.lastSlotDataRead = sEnd 457 } 458 return list, storage, nil 459 } 460 461 // decode deserializes the account and storage data from the provided byte stream. 462 func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error { 463 var ( 464 count = len(accountIndexes) / accountIndexSize 465 accounts = make(map[common.Address][]byte, count) 466 storages = make(map[common.Address]map[common.Hash][]byte) 467 accountList = make([]common.Address, 0, count) 468 storageList = make(map[common.Address][]common.Hash) 469 470 r = &decoder{ 471 accountData: accountData, 472 storageData: storageData, 473 accountIndexes: accountIndexes, 474 storageIndexes: storageIndexes, 475 } 476 ) 477 if err := r.verify(); err != nil { 478 return err 479 } 480 for i := 0; i < count; i++ { 481 // Resolve account first 482 accIndex, accData, err := r.readAccount(i) 483 if err != nil { 484 return err 485 } 486 accounts[accIndex.address] = accData 487 accountList = append(accountList, accIndex.address) 488 489 // Resolve storage slots 490 slotList, slotData, err := r.readStorage(accIndex) 491 if err != nil { 492 return err 493 } 494 if len(slotList) > 0 { 495 storageList[accIndex.address] = slotList 496 storages[accIndex.address] = slotData 497 } 498 } 499 h.accounts = accounts 500 h.accountList = accountList 501 h.storages = storages 502 h.storageList = storageList 503 return nil 504 } 505 506 // readHistory reads and decodes the state history object by the given id. 507 func readHistory(reader ethdb.AncientReader, id uint64) (*history, error) { 508 mData, accountIndexes, storageIndexes, accountData, storageData, err := rawdb.ReadStateHistory(reader, id) 509 if err != nil { 510 return nil, err 511 } 512 var m meta 513 if err := m.decode(mData); err != nil { 514 return nil, err 515 } 516 h := history{meta: &m} 517 if err := h.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { 518 return nil, err 519 } 520 return &h, nil 521 } 522 523 // readHistories reads and decodes a list of state histories with the specific 524 // history range. 525 func readHistories(freezer ethdb.AncientReader, start uint64, count uint64) ([]*history, error) { 526 var histories []*history 527 metaList, aIndexList, sIndexList, aDataList, sDataList, err := rawdb.ReadStateHistoryList(freezer, start, count) 528 if err != nil { 529 return nil, err 530 } 531 for i := 0; i < len(metaList); i++ { 532 var m meta 533 if err := m.decode(metaList[i]); err != nil { 534 return nil, err 535 } 536 h := history{meta: &m} 537 if err := h.decode(aDataList[i], sDataList[i], aIndexList[i], sIndexList[i]); err != nil { 538 return nil, err 539 } 540 histories = append(histories, &h) 541 } 542 return histories, nil 543 } 544 545 // writeHistory persists the state history with the provided state set. 546 func writeHistory(writer ethdb.AncientWriter, dl *diffLayer) error { 547 // Short circuit if state set is not available. 548 if dl.states == nil { 549 return errors.New("state change set is not available") 550 } 551 var ( 552 start = time.Now() 553 history = newHistory(dl.rootHash(), dl.parentLayer().rootHash(), dl.block, dl.states.accountOrigin, dl.states.storageOrigin, dl.states.rawStorageKey) 554 ) 555 accountData, storageData, accountIndex, storageIndex := history.encode() 556 dataSize := common.StorageSize(len(accountData) + len(storageData)) 557 indexSize := common.StorageSize(len(accountIndex) + len(storageIndex)) 558 559 // Write history data into five freezer table respectively. 560 if err := rawdb.WriteStateHistory(writer, dl.stateID(), history.meta.encode(), accountIndex, storageIndex, accountData, storageData); err != nil { 561 return err 562 } 563 historyDataBytesMeter.Mark(int64(dataSize)) 564 historyIndexBytesMeter.Mark(int64(indexSize)) 565 historyBuildTimeMeter.UpdateSince(start) 566 log.Debug("Stored state history", "id", dl.stateID(), "block", dl.block, "data", dataSize, "index", indexSize, "elapsed", common.PrettyDuration(time.Since(start))) 567 568 return nil 569 } 570 571 // checkHistories retrieves a batch of meta objects with the specified range 572 // and performs the callback on each item. 573 func checkHistories(reader ethdb.AncientReader, start, count uint64, check func(*meta) error) error { 574 for count > 0 { 575 number := count 576 if number > 10000 { 577 number = 10000 // split the big read into small chunks 578 } 579 blobs, err := rawdb.ReadStateHistoryMetaList(reader, start, number) 580 if err != nil { 581 return err 582 } 583 for _, blob := range blobs { 584 var dec meta 585 if err := dec.decode(blob); err != nil { 586 return err 587 } 588 if err := check(&dec); err != nil { 589 return err 590 } 591 } 592 count -= uint64(len(blobs)) 593 start += uint64(len(blobs)) 594 } 595 return nil 596 } 597 598 // truncateFromHead removes the extra state histories from the head with the given 599 // parameters. It returns the number of items removed from the head. 600 func truncateFromHead(db ethdb.Batcher, store ethdb.AncientStore, nhead uint64) (int, error) { 601 ohead, err := store.Ancients() 602 if err != nil { 603 return 0, err 604 } 605 otail, err := store.Tail() 606 if err != nil { 607 return 0, err 608 } 609 // Ensure that the truncation target falls within the specified range. 610 if ohead < nhead || nhead < otail { 611 return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, nhead) 612 } 613 // Short circuit if nothing to truncate. 614 if ohead == nhead { 615 return 0, nil 616 } 617 // Load the meta objects in range [nhead+1, ohead] 618 blobs, err := rawdb.ReadStateHistoryMetaList(store, nhead+1, ohead-nhead) 619 if err != nil { 620 return 0, err 621 } 622 batch := db.NewBatch() 623 for _, blob := range blobs { 624 var m meta 625 if err := m.decode(blob); err != nil { 626 return 0, err 627 } 628 rawdb.DeleteStateID(batch, m.root) 629 } 630 if err := batch.Write(); err != nil { 631 return 0, err 632 } 633 ohead, err = store.TruncateHead(nhead) 634 if err != nil { 635 return 0, err 636 } 637 return int(ohead - nhead), nil 638 } 639 640 // truncateFromTail removes the extra state histories from the tail with the given 641 // parameters. It returns the number of items removed from the tail. 642 func truncateFromTail(db ethdb.Batcher, store ethdb.AncientStore, ntail uint64) (int, error) { 643 ohead, err := store.Ancients() 644 if err != nil { 645 return 0, err 646 } 647 otail, err := store.Tail() 648 if err != nil { 649 return 0, err 650 } 651 // Ensure that the truncation target falls within the specified range. 652 if otail > ntail || ntail > ohead { 653 return 0, fmt.Errorf("out of range, tail: %d, head: %d, target: %d", otail, ohead, ntail) 654 } 655 // Short circuit if nothing to truncate. 656 if otail == ntail { 657 return 0, nil 658 } 659 // Load the meta objects in range [otail+1, ntail] 660 blobs, err := rawdb.ReadStateHistoryMetaList(store, otail+1, ntail-otail) 661 if err != nil { 662 return 0, err 663 } 664 batch := db.NewBatch() 665 for _, blob := range blobs { 666 var m meta 667 if err := m.decode(blob); err != nil { 668 return 0, err 669 } 670 rawdb.DeleteStateID(batch, m.root) 671 } 672 if err := batch.Write(); err != nil { 673 return 0, err 674 } 675 otail, err = store.TruncateTail(ntail) 676 if err != nil { 677 return 0, err 678 } 679 return int(ntail - otail), nil 680 }