github.com/decred/dcrd/blockchain@v1.2.1/upgrade.go (about) 1 // Copyright (c) 2013-2016 The btcsuite developers 2 // Copyright (c) 2015-2018 The Decred developers 3 // Use of this source code is governed by an ISC 4 // license that can be found in the LICENSE file. 5 6 package blockchain 7 8 import ( 9 "bytes" 10 "encoding/binary" 11 "errors" 12 "fmt" 13 "time" 14 15 "github.com/decred/dcrd/blockchain/internal/progresslog" 16 "github.com/decred/dcrd/blockchain/stake" 17 "github.com/decred/dcrd/blockchain/standalone" 18 "github.com/decred/dcrd/chaincfg" 19 "github.com/decred/dcrd/chaincfg/chainhash" 20 "github.com/decred/dcrd/database" 21 "github.com/decred/dcrd/dcrutil" 22 "github.com/decred/dcrd/wire" 23 ) 24 25 // errInterruptRequested indicates that an operation was cancelled due 26 // to a user-requested interrupt. 27 var errInterruptRequested = errors.New("interrupt requested") 28 29 // errBatchFinished indicates that a foreach database loop was exited due to 30 // reaching the maximum batch size. 31 var errBatchFinished = errors.New("batch finished") 32 33 // interruptRequested returns true when the provided channel has been closed. 34 // This simplifies early shutdown slightly since the caller can just use an if 35 // statement instead of a select. 36 func interruptRequested(interrupted <-chan struct{}) bool { 37 select { 38 case <-interrupted: 39 return true 40 default: 41 } 42 43 return false 44 } 45 46 // deserializeDatabaseInfoV2 deserializes a database information struct from the 47 // passed serialized byte slice according to the legacy version 2 format. 48 // 49 // The legacy format is as follows: 50 // 51 // Field Type Size Description 52 // version uint32 4 bytes The version of the database 53 // compVer uint32 4 bytes The script compression version of the database 54 // created uint32 4 bytes The date of the creation of the database 55 // 56 // The high bit (0x80000000) is used on version to indicate that an upgrade 57 // is in progress and used to confirm the database fidelity on start up. 58 func deserializeDatabaseInfoV2(dbInfoBytes []byte) (*databaseInfo, error) { 59 // upgradeStartedBit if the bit flag for whether or not a database 60 // upgrade is in progress. It is used to determine if the database 61 // is in an inconsistent state from the update. 62 const upgradeStartedBit = 0x80000000 63 64 byteOrder := binary.LittleEndian 65 66 rawVersion := byteOrder.Uint32(dbInfoBytes[0:4]) 67 upgradeStarted := (upgradeStartedBit & rawVersion) > 0 68 version := rawVersion &^ upgradeStartedBit 69 compVer := byteOrder.Uint32(dbInfoBytes[4:8]) 70 ts := byteOrder.Uint32(dbInfoBytes[8:12]) 71 72 if upgradeStarted { 73 return nil, AssertError("database is in the upgrade started " + 74 "state before resumable upgrades were supported - " + 75 "delete the database and resync the blockchain") 76 } 77 78 return &databaseInfo{ 79 version: version, 80 compVer: compVer, 81 created: time.Unix(int64(ts), 0), 82 }, nil 83 } 84 85 // ticketsVotedInBlock fetches a list of tickets that were voted in the 86 // block. 87 func ticketsVotedInBlock(bl *dcrutil.Block) []chainhash.Hash { 88 var tickets []chainhash.Hash 89 for _, stx := range bl.MsgBlock().STransactions { 90 if stake.IsSSGen(stx) { 91 tickets = append(tickets, stx.TxIn[1].PreviousOutPoint.Hash) 92 } 93 } 94 95 return tickets 96 } 97 98 // ticketsRevokedInBlock fetches a list of tickets that were revoked in the 99 // block. 100 func ticketsRevokedInBlock(bl *dcrutil.Block) []chainhash.Hash { 101 var tickets []chainhash.Hash 102 for _, stx := range bl.MsgBlock().STransactions { 103 if stake.DetermineTxType(stx) == stake.TxTypeSSRtx { 104 tickets = append(tickets, stx.TxIn[0].PreviousOutPoint.Hash) 105 } 106 } 107 108 return tickets 109 } 110 111 // upgradeToVersion2 upgrades a version 1 blockchain to version 2, allowing 112 // use of the new on-disk ticket database. 113 func upgradeToVersion2(db database.DB, chainParams *chaincfg.Params, dbInfo *databaseInfo) error { 114 // Hardcoded so updates to the global values do not affect old upgrades. 115 byteOrder := binary.LittleEndian 116 chainStateKeyName := []byte("chainstate") 117 heightIdxBucketName := []byte("heightidx") 118 119 // These are legacy functions that relied on information in the database 120 // that is no longer available in more recent code. 121 dbFetchHashByHeight := func(dbTx database.Tx, height int64) (*chainhash.Hash, error) { 122 var serializedHeight [4]byte 123 byteOrder.PutUint32(serializedHeight[:], uint32(height)) 124 125 meta := dbTx.Metadata() 126 heightIndex := meta.Bucket(heightIdxBucketName) 127 hashBytes := heightIndex.Get(serializedHeight[:]) 128 if hashBytes == nil { 129 str := fmt.Sprintf("no block at height %d exists", height) 130 return nil, errNotInMainChain(str) 131 } 132 133 var hash chainhash.Hash 134 copy(hash[:], hashBytes) 135 return &hash, nil 136 } 137 dbFetchBlockByHeight := func(dbTx database.Tx, height int64) (*dcrutil.Block, error) { 138 // First find the hash associated with the provided height in the index. 139 hash, err := dbFetchHashByHeight(dbTx, height) 140 if err != nil { 141 return nil, err 142 } 143 144 // Load the raw block bytes from the database. 145 blockBytes, err := dbTx.FetchBlock(hash) 146 if err != nil { 147 return nil, err 148 } 149 150 // Create the encapsulated block and set the height appropriately. 151 block, err := dcrutil.NewBlockFromBytes(blockBytes) 152 if err != nil { 153 return nil, err 154 } 155 156 return block, nil 157 } 158 159 log.Infof("Initializing upgrade to database version 2") 160 progressLogger := progresslog.NewBlockProgressLogger("Upgraded", log) 161 162 // The upgrade is atomic, so there is no need to set the flag that 163 // the database is undergoing an upgrade here. Get the stake node 164 // for the genesis block, and then begin connecting stake nodes 165 // incrementally. 166 err := db.Update(func(dbTx database.Tx) error { 167 // Fetch the stored best chain state from the database metadata. 168 serializedData := dbTx.Metadata().Get(chainStateKeyName) 169 best, err := deserializeBestChainState(serializedData) 170 if err != nil { 171 return err 172 } 173 174 bestStakeNode, errLocal := stake.InitDatabaseState(dbTx, chainParams) 175 if errLocal != nil { 176 return errLocal 177 } 178 179 parent, errLocal := dbFetchBlockByHeight(dbTx, 0) 180 if errLocal != nil { 181 return errLocal 182 } 183 184 for i := int64(1); i <= int64(best.height); i++ { 185 block, errLocal := dbFetchBlockByHeight(dbTx, i) 186 if errLocal != nil { 187 return errLocal 188 } 189 190 // If we need the tickets, fetch them too. 191 var newTickets []chainhash.Hash 192 if i >= chainParams.StakeEnabledHeight { 193 matureHeight := i - int64(chainParams.TicketMaturity) 194 matureBlock, errLocal := dbFetchBlockByHeight(dbTx, matureHeight) 195 if errLocal != nil { 196 return errLocal 197 } 198 for _, stx := range matureBlock.MsgBlock().STransactions { 199 if stake.IsSStx(stx) { 200 h := stx.TxHash() 201 newTickets = append(newTickets, h) 202 } 203 } 204 } 205 206 // Iteratively connect the stake nodes in memory. 207 header := block.MsgBlock().Header 208 hB, errLocal := header.Bytes() 209 if errLocal != nil { 210 return errLocal 211 } 212 bestStakeNode, errLocal = bestStakeNode.ConnectNode( 213 stake.CalcHash256PRNGIV(hB), ticketsVotedInBlock(block), 214 ticketsRevokedInBlock(block), newTickets) 215 if errLocal != nil { 216 return errLocal 217 } 218 219 // Write the top block stake node to the database. 220 errLocal = stake.WriteConnectedBestNode(dbTx, bestStakeNode, 221 best.hash) 222 if errLocal != nil { 223 return errLocal 224 } 225 226 progressLogger.LogBlockHeight(block.MsgBlock(), parent.MsgBlock()) 227 parent = block 228 } 229 230 // Write the new database version. 231 dbInfo.version = 2 232 return dbPutDatabaseInfo(dbTx, dbInfo) 233 }) 234 if err != nil { 235 return err 236 } 237 238 log.Infof("Upgrade to new stake database was successful!") 239 240 return nil 241 } 242 243 // migrateBlockIndex migrates all block entries from the v1 block index bucket 244 // manged by ffldb to the v2 bucket managed by this package. The v1 bucket 245 // stored all block entries keyed by block hash, whereas the v2 bucket stores 246 // them keyed by block height + hash. Also, the old block index only stored the 247 // header, while the new one stores all info needed to recreate block nodes. 248 // 249 // The new block index is guaranteed to be fully updated if this returns without 250 // failure. 251 func migrateBlockIndex(db database.DB, interrupt <-chan struct{}) error { 252 // blkHdrOffset defines the offsets into a v1 block index row for the block 253 // header. 254 // 255 // The serialized block index row format is: 256 // <blocklocation><blockheader> 257 const blkHdrOffset = 12 258 259 // blkHdrHeightStart is the offset of the height in the serialized block 260 // header bytes as it existed at the time of this migration. It is hard 261 // coded here so potential future changes do not affect old upgrades. 262 const blkHdrHeightStart = 128 263 264 // Hardcoded bucket names so updates to the global values do not affect old 265 // upgrades. 266 v1BucketName := []byte("ffldb-blockidx") 267 v2BucketName := []byte("blockidx") 268 hashIdxBucketName := []byte("hashidx") 269 270 log.Info("Reindexing block information in the database. This will take " + 271 "a while...") 272 start := time.Now() 273 274 // Create the new block index bucket as needed. 275 err := db.Update(func(dbTx database.Tx) error { 276 _, err := dbTx.Metadata().CreateBucketIfNotExists(v2BucketName) 277 return err 278 }) 279 if err != nil { 280 return err 281 } 282 283 // doBatch contains the primary logic for upgrading the block index from 284 // version 1 to 2 in batches. This is done because attempting to migrate in 285 // a single database transaction could result in massive memory usage and 286 // could potentially crash on many systems due to ulimits. 287 // 288 // It returns the number of entries processed. 289 const maxEntries = 20000 290 var resumeOffset uint32 291 doBatch := func(dbTx database.Tx) (uint32, error) { 292 meta := dbTx.Metadata() 293 v1BlockIdxBucket := meta.Bucket(v1BucketName) 294 if v1BlockIdxBucket == nil { 295 return 0, fmt.Errorf("bucket %s does not exist", v1BucketName) 296 } 297 298 v2BlockIdxBucket := meta.Bucket(v2BucketName) 299 if v2BlockIdxBucket == nil { 300 return 0, fmt.Errorf("bucket %s does not exist", v2BucketName) 301 } 302 303 hashIdxBucket := meta.Bucket(hashIdxBucketName) 304 if hashIdxBucket == nil { 305 return 0, fmt.Errorf("bucket %s does not exist", hashIdxBucketName) 306 } 307 308 // Migrate block index entries so long as the max number of entries for 309 // this batch has not been exceeded. 310 var numMigrated, numIterated uint32 311 err := v1BlockIdxBucket.ForEach(func(hashBytes, blockRow []byte) error { 312 if numMigrated >= maxEntries { 313 return errBatchFinished 314 } 315 316 // Skip entries that have already been migrated in previous batches. 317 numIterated++ 318 if numIterated-1 < resumeOffset { 319 return nil 320 } 321 resumeOffset++ 322 323 // Skip entries that have already been migrated in previous 324 // interrupted upgrades. 325 var blockHash chainhash.Hash 326 copy(blockHash[:], hashBytes) 327 endOffset := blkHdrOffset + blockHdrSize 328 headerBytes := blockRow[blkHdrOffset:endOffset:endOffset] 329 heightBytes := headerBytes[blkHdrHeightStart : blkHdrHeightStart+4] 330 height := binary.LittleEndian.Uint32(heightBytes) 331 key := blockIndexKey(&blockHash, height) 332 if v2BlockIdxBucket.Get(key) != nil { 333 return nil 334 } 335 336 // Load the raw full block from the database. 337 blockBytes, err := dbTx.FetchBlock(&blockHash) 338 if err != nil { 339 return err 340 } 341 342 // Deserialize the block bytes. 343 var block wire.MsgBlock 344 err = block.Deserialize(bytes.NewReader(blockBytes)) 345 if err != nil { 346 return err 347 } 348 349 // Mark the block as valid if it's part of the main chain. While it 350 // is possible side chain blocks were validated too, there was 351 // previously no tracking of that information, so there is no way to 352 // know for sure. It's better to be safe and just assume side chain 353 // blocks were never validated. 354 status := statusDataStored 355 if hashIdxBucket.Get(blockHash[:]) != nil { 356 status |= statusValid 357 } 358 359 // Write the serialized block index entry to the new bucket keyed by 360 // its hash and height. 361 ticketInfo := stake.FindSpentTicketsInBlock(&block) 362 serialized, err := serializeBlockIndexEntry(&blockIndexEntry{ 363 header: block.Header, 364 status: status, 365 voteInfo: ticketInfo.Votes, 366 ticketsVoted: ticketInfo.VotedTickets, 367 ticketsRevoked: ticketInfo.RevokedTickets, 368 }) 369 if err != nil { 370 return err 371 } 372 err = v2BlockIdxBucket.Put(key, serialized) 373 if err != nil { 374 return err 375 } 376 377 numMigrated++ 378 379 if interruptRequested(interrupt) { 380 return errInterruptRequested 381 } 382 383 return nil 384 }) 385 return numMigrated, err 386 } 387 388 // Migrate all entries in batches for the reasons mentioned above. 389 var totalMigrated uint64 390 for { 391 var numMigrated uint32 392 err := db.Update(func(dbTx database.Tx) error { 393 var err error 394 numMigrated, err = doBatch(dbTx) 395 if err == errInterruptRequested || err == errBatchFinished { 396 // No error here so the database transaction is not cancelled 397 // and therefore outstanding work is written to disk. The 398 // outer function will exit with an interrupted error below due 399 // to another interrupted check. 400 err = nil 401 } 402 return err 403 }) 404 if err != nil { 405 return err 406 } 407 408 if interruptRequested(interrupt) { 409 return errInterruptRequested 410 } 411 412 if numMigrated == 0 { 413 break 414 } 415 416 totalMigrated += uint64(numMigrated) 417 log.Infof("Migrated %d entries (%d total)", numMigrated, totalMigrated) 418 } 419 420 seconds := int64(time.Since(start) / time.Second) 421 log.Infof("Done upgrading block index. Total entries: %d in %d seconds", 422 totalMigrated, seconds) 423 return nil 424 } 425 426 // upgradeToVersion3 upgrades a version 2 blockchain to version 3 along with 427 // upgrading the block index to version 2. 428 func upgradeToVersion3(db database.DB, dbInfo *databaseInfo, interrupt <-chan struct{}) error { 429 if err := migrateBlockIndex(db, interrupt); err != nil { 430 return err 431 } 432 433 // Update and persist the updated database versions. 434 dbInfo.version = 3 435 dbInfo.bidxVer = 2 436 return db.Update(func(dbTx database.Tx) error { 437 return dbPutDatabaseInfo(dbTx, dbInfo) 438 }) 439 } 440 441 // removeMainChainIndex removes the main chain hash index and height index 442 // buckets. These are no longer needed due to using the full block index in 443 // memory. 444 // 445 // The database is guaranteed to be fully updated if this returns without 446 // failure. 447 func removeMainChainIndex(db database.DB, interrupt <-chan struct{}) error { 448 // Hardcoded bucket names so updates to the global values do not affect old 449 // upgrades. 450 hashIdxBucketName := []byte("hashidx") 451 heightIdxBucketName := []byte("heightidx") 452 453 log.Info("Removing unneeded indexes in the database...") 454 start := time.Now() 455 456 // Delete the main chain index buckets. 457 err := db.Update(func(dbTx database.Tx) error { 458 // Delete the main chain hash to height index. 459 meta := dbTx.Metadata() 460 hashIdxBucket := meta.Bucket(hashIdxBucketName) 461 if hashIdxBucket != nil { 462 if err := meta.DeleteBucket(hashIdxBucketName); err != nil { 463 return err 464 } 465 log.Infof("Removed hash index.") 466 } 467 468 if interruptRequested(interrupt) { 469 // No error here so the database transaction is not cancelled 470 // and therefore outstanding work is written to disk. The 471 // outer function will exit with an interrupted error below due 472 // to another interrupted check. 473 return nil 474 } 475 476 // Delete the main chain hash to height index. 477 heightIdxBucket := meta.Bucket(heightIdxBucketName) 478 if heightIdxBucket != nil { 479 if err := meta.DeleteBucket(heightIdxBucketName); err != nil { 480 return err 481 } 482 log.Infof("Removed height index.") 483 } 484 485 return nil 486 }) 487 if err != nil { 488 return err 489 } 490 491 if interruptRequested(interrupt) { 492 return errInterruptRequested 493 } 494 495 elapsed := time.Since(start).Round(time.Millisecond) 496 log.Infof("Done upgrading database in %v.", elapsed) 497 return nil 498 } 499 500 // upgradeToVersion4 upgrades a version 3 blockchain database to version 4. 501 func upgradeToVersion4(db database.DB, dbInfo *databaseInfo, interrupt <-chan struct{}) error { 502 if err := removeMainChainIndex(db, interrupt); err != nil { 503 return err 504 } 505 506 // Update and persist the updated database versions. 507 dbInfo.version = 4 508 return db.Update(func(dbTx database.Tx) error { 509 return dbPutDatabaseInfo(dbTx, dbInfo) 510 }) 511 } 512 513 // incrementalFlatDrop uses multiple database updates to remove key/value pairs 514 // saved to a flag bucket. 515 func incrementalFlatDrop(db database.DB, bucketKey []byte, humanName string, interrupt <-chan struct{}) error { 516 const maxDeletions = 2000000 517 var totalDeleted uint64 518 for numDeleted := maxDeletions; numDeleted == maxDeletions; { 519 numDeleted = 0 520 err := db.Update(func(dbTx database.Tx) error { 521 bucket := dbTx.Metadata().Bucket(bucketKey) 522 cursor := bucket.Cursor() 523 for ok := cursor.First(); ok; ok = cursor.Next() && 524 numDeleted < maxDeletions { 525 526 if err := cursor.Delete(); err != nil { 527 return err 528 } 529 numDeleted++ 530 } 531 return nil 532 }) 533 if err != nil { 534 return err 535 } 536 537 if numDeleted > 0 { 538 totalDeleted += uint64(numDeleted) 539 log.Infof("Deleted %d keys (%d total) from %s", numDeleted, 540 totalDeleted, humanName) 541 } 542 543 if interruptRequested(interrupt) { 544 return errInterruptRequested 545 } 546 } 547 return nil 548 } 549 550 // upgradeToVersion5 upgrades a version 4 blockchain database to version 5. 551 func upgradeToVersion5(db database.DB, chainParams *chaincfg.Params, dbInfo *databaseInfo, interrupt <-chan struct{}) error { 552 // Hardcoded bucket and key names so updates to the global values do not 553 // affect old upgrades. 554 utxoSetBucketName := []byte("utxoset") 555 spendJournalBucketName := []byte("spendjournal") 556 chainStateKeyName := []byte("chainstate") 557 v5ReindexTipKeyName := []byte("v5reindextip") 558 559 log.Info("Clearing database utxoset and spend journal for upgrade...") 560 start := time.Now() 561 562 // Clear the utxoset. 563 err := incrementalFlatDrop(db, utxoSetBucketName, "utxoset", interrupt) 564 if err != nil { 565 return err 566 } 567 log.Infof("Cleared utxoset.") 568 569 if interruptRequested(interrupt) { 570 return errInterruptRequested 571 } 572 573 // Clear the spend journal. 574 err = incrementalFlatDrop(db, spendJournalBucketName, "spend journal", 575 interrupt) 576 if err != nil { 577 return err 578 } 579 log.Infof("Cleared spend journal.") 580 581 if interruptRequested(interrupt) { 582 return errInterruptRequested 583 } 584 585 err = db.Update(func(dbTx database.Tx) error { 586 // Reset the ticket database to the genesis block. 587 log.Infof("Resetting the ticket database. This might take a while...") 588 if err := stake.ResetDatabase(dbTx, chainParams); err != nil { 589 return err 590 } 591 592 // Fetch the stored best chain state from the database metadata. 593 meta := dbTx.Metadata() 594 serializedData := meta.Get(chainStateKeyName) 595 best, err := deserializeBestChainState(serializedData) 596 if err != nil { 597 return err 598 } 599 600 // Store the current best chain tip as the reindex target. 601 if err := meta.Put(v5ReindexTipKeyName, best.hash[:]); err != nil { 602 return err 603 } 604 605 // Reset the state related to the best block to the genesis block. 606 genesisBlock := chainParams.GenesisBlock 607 numTxns := uint64(len(genesisBlock.Transactions)) 608 serializedData = serializeBestChainState(bestChainState{ 609 hash: genesisBlock.BlockHash(), 610 height: 0, 611 totalTxns: numTxns, 612 totalSubsidy: 0, 613 workSum: standalone.CalcWork(genesisBlock.Header.Bits), 614 }) 615 err = meta.Put(chainStateKeyName, serializedData) 616 if err != nil { 617 return err 618 } 619 620 // Update and persist the updated database versions. 621 dbInfo.version = 5 622 return dbPutDatabaseInfo(dbTx, dbInfo) 623 }) 624 if err != nil { 625 return err 626 } 627 628 elapsed := time.Since(start).Round(time.Millisecond) 629 log.Infof("Done upgrading database in %v.", elapsed) 630 return nil 631 } 632 633 // maybeFinishV5Upgrade potentially reindexes the chain due to a version 5 634 // database upgrade. It will resume previously uncompleted attempts. 635 func (b *BlockChain) maybeFinishV5Upgrade() error { 636 // Nothing to do if the database is not version 5. 637 if b.dbInfo.version != 5 { 638 return nil 639 } 640 641 // Hardcoded key name so updates to the global values do not affect old 642 // upgrades. 643 v5ReindexTipKeyName := []byte("v5reindextip") 644 645 // Finish the version 5 reindex as needed. 646 var v5ReindexTipHash *chainhash.Hash 647 err := b.db.View(func(dbTx database.Tx) error { 648 hash := dbTx.Metadata().Get(v5ReindexTipKeyName) 649 if hash != nil { 650 v5ReindexTipHash = new(chainhash.Hash) 651 copy(v5ReindexTipHash[:], hash) 652 } 653 return nil 654 }) 655 if err != nil { 656 return err 657 } 658 if v5ReindexTipHash != nil { 659 // Look up the final target tip to reindex to in the block index. 660 targetTip := b.index.LookupNode(v5ReindexTipHash) 661 if targetTip == nil { 662 return AssertError(fmt.Sprintf("maybeFinishV5Upgrade: cannot find "+ 663 "chain tip %s in block index", v5ReindexTipHash)) 664 } 665 666 // Ensure all ancestors of the current best chain tip are marked as 667 // valid. This is necessary due to older software versions not marking 668 // nodes before the final checkpoint as valid. 669 for node := targetTip; node != nil; node = node.parent { 670 b.index.SetStatusFlags(node, statusValid) 671 } 672 if err := b.index.flush(); err != nil { 673 return err 674 } 675 676 // Disable notifications during the reindex. 677 ntfnCallback := b.notifications 678 b.notifications = nil 679 defer func() { 680 b.notifications = ntfnCallback 681 }() 682 683 tip := b.bestChain.Tip() 684 for tip != targetTip { 685 if interruptRequested(b.interrupt) { 686 return errInterruptRequested 687 } 688 689 // Limit to a reasonable number of blocks at a time. 690 const maxReindexBlocks = 250 691 intermediateTip := targetTip 692 if intermediateTip.height-tip.height > maxReindexBlocks { 693 intermediateTip = intermediateTip.Ancestor(tip.height + 694 maxReindexBlocks) 695 } 696 697 log.Infof("Reindexing to height %d of %d (progress %.2f%%)...", 698 intermediateTip.height, targetTip.height, 699 float64(intermediateTip.height)/float64(targetTip.height)*100) 700 b.chainLock.Lock() 701 if err := b.reorganizeChainInternal(intermediateTip); err != nil { 702 b.chainLock.Unlock() 703 return err 704 } 705 b.chainLock.Unlock() 706 707 tip = b.bestChain.Tip() 708 } 709 710 // Mark the v5 reindex as complete by removing the associated key. 711 err := b.db.Update(func(dbTx database.Tx) error { 712 return dbTx.Metadata().Delete(v5ReindexTipKeyName) 713 }) 714 if err != nil { 715 return err 716 } 717 } 718 719 return nil 720 } 721 722 // upgradeDB upgrades old database versions to the newest version by applying 723 // all possible upgrades iteratively. 724 // 725 // NOTE: The passed database info will be updated with the latest versions. 726 func upgradeDB(db database.DB, chainParams *chaincfg.Params, dbInfo *databaseInfo, interrupt <-chan struct{}) error { 727 if dbInfo.version == 1 { 728 if err := upgradeToVersion2(db, chainParams, dbInfo); err != nil { 729 return err 730 } 731 } 732 733 // Migrate to the new v2 block index format if needed. That database 734 // version was bumped because prior versions of the software did not have 735 // a block index version. 736 if dbInfo.version == 2 && dbInfo.bidxVer < 2 { 737 if err := upgradeToVersion3(db, dbInfo, interrupt); err != nil { 738 return err 739 } 740 } 741 742 // Remove the main chain index from the database if needed. 743 if dbInfo.version == 3 { 744 if err := upgradeToVersion4(db, dbInfo, interrupt); err != nil { 745 return err 746 } 747 } 748 749 // Clear the utxoset, clear the spend journal, reset the best chain back to 750 // the genesis block, and mark that a v5 reindex is required if needed. 751 if dbInfo.version == 4 { 752 err := upgradeToVersion5(db, chainParams, dbInfo, interrupt) 753 if err != nil { 754 return err 755 } 756 } 757 758 return nil 759 }