github.com/lbryio/lbcd@v0.22.119/blockchain/chainio.go (about) 1 // Copyright (c) 2015-2017 The btcsuite developers 2 // Use of this source code is governed by an ISC 3 // license that can be found in the LICENSE file. 4 5 package blockchain 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "fmt" 11 "math/big" 12 "sync" 13 "time" 14 15 "github.com/lbryio/lbcd/chaincfg/chainhash" 16 "github.com/lbryio/lbcd/database" 17 "github.com/lbryio/lbcd/wire" 18 btcutil "github.com/lbryio/lbcutil" 19 ) 20 21 const ( 22 // blockHdrSize is the size of a block header. This is simply the 23 // constant from wire and is only provided here for convenience since 24 // wire.MaxBlockHeaderPayload is quite long. 25 blockHdrSize = wire.MaxBlockHeaderPayload 26 27 // latestUtxoSetBucketVersion is the current version of the utxo set 28 // bucket that is used to track all unspent outputs. 29 latestUtxoSetBucketVersion = 2 30 31 // latestSpendJournalBucketVersion is the current version of the spend 32 // journal bucket that is used to track all spent transactions for use 33 // in reorgs. 34 latestSpendJournalBucketVersion = 1 35 ) 36 37 var ( 38 // blockIndexBucketName is the name of the db bucket used to house to the 39 // block headers and contextual information. 40 blockIndexBucketName = []byte("blockheaderidx") 41 42 // hashIndexBucketName is the name of the db bucket used to house to the 43 // block hash -> block height index. 44 hashIndexBucketName = []byte("hashidx") 45 46 // heightIndexBucketName is the name of the db bucket used to house to 47 // the block height -> block hash index. 48 heightIndexBucketName = []byte("heightidx") 49 50 // chainStateKeyName is the name of the db key used to store the best 51 // chain state. 52 chainStateKeyName = []byte("chainstate") 53 54 // spendJournalVersionKeyName is the name of the db key used to store 55 // the version of the spend journal currently in the database. 56 spendJournalVersionKeyName = []byte("spendjournalversion") 57 58 // spendJournalBucketName is the name of the db bucket used to house 59 // transactions outputs that are spent in each block. 60 spendJournalBucketName = []byte("spendjournal") 61 62 // utxoSetVersionKeyName is the name of the db key used to store the 63 // version of the utxo set currently in the database. 64 utxoSetVersionKeyName = []byte("utxosetversion") 65 66 // utxoSetBucketName is the name of the db bucket used to house the 67 // unspent transaction output set. 68 utxoSetBucketName = []byte("utxosetv2") 69 70 // byteOrder is the preferred byte order used for serializing numeric 71 // fields for storage in the database. 72 byteOrder = binary.LittleEndian 73 ) 74 75 // errNotInMainChain signifies that a block hash or height that is not in the 76 // main chain was requested. 77 type errNotInMainChain string 78 79 // Error implements the error interface. 80 func (e errNotInMainChain) Error() string { 81 return string(e) 82 } 83 84 // isNotInMainChainErr returns whether or not the passed error is an 85 // errNotInMainChain error. 86 func isNotInMainChainErr(err error) bool { 87 _, ok := err.(errNotInMainChain) 88 return ok 89 } 90 91 // errDeserialize signifies that a problem was encountered when deserializing 92 // data. 93 type errDeserialize string 94 95 // Error implements the error interface. 96 func (e errDeserialize) Error() string { 97 return string(e) 98 } 99 100 // isDeserializeErr returns whether or not the passed error is an errDeserialize 101 // error. 102 func isDeserializeErr(err error) bool { 103 _, ok := err.(errDeserialize) 104 return ok 105 } 106 107 // isDbBucketNotFoundErr returns whether or not the passed error is a 108 // database.Error with an error code of database.ErrBucketNotFound. 109 func isDbBucketNotFoundErr(err error) bool { 110 dbErr, ok := err.(database.Error) 111 return ok && dbErr.ErrorCode == database.ErrBucketNotFound 112 } 113 114 // dbFetchVersion fetches an individual version with the given key from the 115 // metadata bucket. It is primarily used to track versions on entities such as 116 // buckets. It returns zero if the provided key does not exist. 117 func dbFetchVersion(dbTx database.Tx, key []byte) uint32 { 118 serialized := dbTx.Metadata().Get(key) 119 if serialized == nil { 120 return 0 121 } 122 123 return byteOrder.Uint32(serialized) 124 } 125 126 // dbPutVersion uses an existing database transaction to update the provided 127 // key in the metadata bucket to the given version. It is primarily used to 128 // track versions on entities such as buckets. 129 func dbPutVersion(dbTx database.Tx, key []byte, version uint32) error { 130 var serialized [4]byte 131 byteOrder.PutUint32(serialized[:], version) 132 return dbTx.Metadata().Put(key, serialized[:]) 133 } 134 135 // dbFetchOrCreateVersion uses an existing database transaction to attempt to 136 // fetch the provided key from the metadata bucket as a version and in the case 137 // it doesn't exist, it adds the entry with the provided default version and 138 // returns that. This is useful during upgrades to automatically handle loading 139 // and adding version keys as necessary. 140 func dbFetchOrCreateVersion(dbTx database.Tx, key []byte, defaultVersion uint32) (uint32, error) { 141 version := dbFetchVersion(dbTx, key) 142 if version == 0 { 143 version = defaultVersion 144 err := dbPutVersion(dbTx, key, version) 145 if err != nil { 146 return 0, err 147 } 148 } 149 150 return version, nil 151 } 152 153 // ----------------------------------------------------------------------------- 154 // The transaction spend journal consists of an entry for each block connected 155 // to the main chain which contains the transaction outputs the block spends 156 // serialized such that the order is the reverse of the order they were spent. 157 // 158 // This is required because reorganizing the chain necessarily entails 159 // disconnecting blocks to get back to the point of the fork which implies 160 // unspending all of the transaction outputs that each block previously spent. 161 // Since the utxo set, by definition, only contains unspent transaction outputs, 162 // the spent transaction outputs must be resurrected from somewhere. There is 163 // more than one way this could be done, however this is the most straight 164 // forward method that does not require having a transaction index and unpruned 165 // blockchain. 166 // 167 // NOTE: This format is NOT self describing. The additional details such as 168 // the number of entries (transaction inputs) are expected to come from the 169 // block itself and the utxo set (for legacy entries). The rationale in doing 170 // this is to save space. This is also the reason the spent outputs are 171 // serialized in the reverse order they are spent because later transactions are 172 // allowed to spend outputs from earlier ones in the same block. 173 // 174 // The reserved field below used to keep track of the version of the containing 175 // transaction when the height in the header code was non-zero, however the 176 // height is always non-zero now, but keeping the extra reserved field allows 177 // backwards compatibility. 178 // 179 // The serialized format is: 180 // 181 // [<header code><reserved><compressed txout>],... 182 // 183 // Field Type Size 184 // header code VLQ variable 185 // reserved byte 1 186 // compressed txout 187 // compressed amount VLQ variable 188 // compressed script []byte variable 189 // 190 // The serialized header code format is: 191 // bit 0 - containing transaction is a coinbase 192 // bits 1-x - height of the block that contains the spent txout 193 // 194 // Example 1: 195 // From block 170 in main blockchain. 196 // 197 // 1300320511db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c 198 // <><><------------------------------------------------------------------> 199 // | | | 200 // | reserved compressed txout 201 // header code 202 // 203 // - header code: 0x13 (coinbase, height 9) 204 // - reserved: 0x00 205 // - compressed txout 0: 206 // - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC) 207 // - 0x05: special script type pay-to-pubkey 208 // - 0x11...5c: x-coordinate of the pubkey 209 // 210 // Example 2: 211 // Adapted from block 100025 in main blockchain. 212 // 213 // 8b99700091f20f006edbc6c4d31bae9f1ccc38538a114bf42de65e868b99700086c64700b2fb57eadf61e106a100a7445a8c3f67898841ec 214 // <----><><----------------------------------------------><----><><----------------------------------------------> 215 // | | | | | | 216 // | reserved compressed txout | reserved compressed txout 217 // header code header code 218 // 219 // - Last spent output: 220 // - header code: 0x8b9970 (not coinbase, height 100024) 221 // - reserved: 0x00 222 // - compressed txout: 223 // - 0x91f20f: VLQ-encoded compressed amount for 34405000000 (344.05 BTC) 224 // - 0x00: special script type pay-to-pubkey-hash 225 // - 0x6e...86: pubkey hash 226 // - Second to last spent output: 227 // - header code: 0x8b9970 (not coinbase, height 100024) 228 // - reserved: 0x00 229 // - compressed txout: 230 // - 0x86c647: VLQ-encoded compressed amount for 13761000000 (137.61 BTC) 231 // - 0x00: special script type pay-to-pubkey-hash 232 // - 0xb2...ec: pubkey hash 233 // ----------------------------------------------------------------------------- 234 235 // SpentTxOut contains a spent transaction output and potentially additional 236 // contextual information such as whether or not it was contained in a coinbase 237 // transaction, the version of the transaction it was contained in, and which 238 // block height the containing transaction was included in. As described in 239 // the comments above, the additional contextual information will only be valid 240 // when this spent txout is spending the last unspent output of the containing 241 // transaction. 242 type SpentTxOut struct { 243 // Amount is the amount of the output. 244 Amount int64 245 246 // PkScipt is the the public key script for the output. 247 PkScript []byte 248 249 // Height is the height of the the block containing the creating tx. 250 Height int32 251 252 // Denotes if the creating tx is a coinbase. 253 IsCoinBase bool 254 } 255 256 // FetchSpendJournal attempts to retrieve the spend journal, or the set of 257 // outputs spent for the target block. This provides a view of all the outputs 258 // that will be consumed once the target block is connected to the end of the 259 // main chain. 260 // 261 // This function is safe for concurrent access. 262 func (b *BlockChain) FetchSpendJournal(targetBlock *btcutil.Block) ([]SpentTxOut, error) { 263 b.chainLock.RLock() 264 defer b.chainLock.RUnlock() 265 266 var spendEntries []SpentTxOut 267 err := b.db.View(func(dbTx database.Tx) error { 268 var err error 269 270 spendEntries, err = dbFetchSpendJournalEntry(dbTx, targetBlock) 271 return err 272 }) 273 if err != nil { 274 return nil, err 275 } 276 277 return spendEntries, nil 278 } 279 280 // spentTxOutHeaderCode returns the calculated header code to be used when 281 // serializing the provided stxo entry. 282 func spentTxOutHeaderCode(stxo *SpentTxOut) uint64 { 283 // As described in the serialization format comments, the header code 284 // encodes the height shifted over one bit and the coinbase flag in the 285 // lowest bit. 286 headerCode := uint64(stxo.Height) << 1 287 if stxo.IsCoinBase { 288 headerCode |= 0x01 289 } 290 291 return headerCode 292 } 293 294 // spentTxOutSerializeSize returns the number of bytes it would take to 295 // serialize the passed stxo according to the format described above. 296 func spentTxOutSerializeSize(stxo *SpentTxOut) int { 297 size := serializeSizeVLQ(spentTxOutHeaderCode(stxo)) 298 if stxo.Height > 0 { 299 // The legacy v1 spend journal format conditionally tracked the 300 // containing transaction version when the height was non-zero, 301 // so this is required for backwards compat. 302 size += serializeSizeVLQ(0) 303 } 304 return size + compressedTxOutSize(uint64(stxo.Amount), stxo.PkScript) 305 } 306 307 // putSpentTxOut serializes the passed stxo according to the format described 308 // above directly into the passed target byte slice. The target byte slice must 309 // be at least large enough to handle the number of bytes returned by the 310 // SpentTxOutSerializeSize function or it will panic. 311 func putSpentTxOut(target []byte, stxo *SpentTxOut) int { 312 headerCode := spentTxOutHeaderCode(stxo) 313 offset := putVLQ(target, headerCode) 314 if stxo.Height > 0 { 315 // The legacy v1 spend journal format conditionally tracked the 316 // containing transaction version when the height was non-zero, 317 // so this is required for backwards compat. 318 offset += putVLQ(target[offset:], 0) 319 } 320 return offset + putCompressedTxOut(target[offset:], uint64(stxo.Amount), 321 stxo.PkScript) 322 } 323 324 // decodeSpentTxOut decodes the passed serialized stxo entry, possibly followed 325 // by other data, into the passed stxo struct. It returns the number of bytes 326 // read. 327 func decodeSpentTxOut(serialized []byte, stxo *SpentTxOut) (int, error) { 328 // Ensure there are bytes to decode. 329 if len(serialized) == 0 { 330 return 0, errDeserialize("no serialized bytes") 331 } 332 333 // Deserialize the header code. 334 code, offset := deserializeVLQ(serialized) 335 if offset >= len(serialized) { 336 return offset, errDeserialize("unexpected end of data after " + 337 "header code") 338 } 339 340 // Decode the header code. 341 // 342 // Bit 0 indicates containing transaction is a coinbase. 343 // Bits 1-x encode height of containing transaction. 344 stxo.IsCoinBase = code&0x01 != 0 345 stxo.Height = int32(code >> 1) 346 if stxo.Height > 0 { 347 // The legacy v1 spend journal format conditionally tracked the 348 // containing transaction version when the height was non-zero, 349 // so this is required for backwards compat. 350 _, bytesRead := deserializeVLQ(serialized[offset:]) 351 offset += bytesRead 352 if offset >= len(serialized) { 353 return offset, errDeserialize("unexpected end of data " + 354 "after reserved") 355 } 356 } 357 358 // Decode the compressed txout. 359 amount, pkScript, bytesRead, err := decodeCompressedTxOut( 360 serialized[offset:]) 361 offset += bytesRead 362 if err != nil { 363 return offset, errDeserialize(fmt.Sprintf("unable to decode "+ 364 "txout: %v", err)) 365 } 366 stxo.Amount = int64(amount) 367 stxo.PkScript = pkScript 368 return offset, nil 369 } 370 371 // deserializeSpendJournalEntry decodes the passed serialized byte slice into a 372 // slice of spent txouts according to the format described in detail above. 373 // 374 // Since the serialization format is not self describing, as noted in the 375 // format comments, this function also requires the transactions that spend the 376 // txouts. 377 func deserializeSpendJournalEntry(serialized []byte, txns []*wire.MsgTx) ([]SpentTxOut, error) { 378 // Calculate the total number of stxos. 379 var numStxos int 380 for _, tx := range txns { 381 numStxos += len(tx.TxIn) 382 } 383 384 // When a block has no spent txouts there is nothing to serialize. 385 if len(serialized) == 0 { 386 // Ensure the block actually has no stxos. This should never 387 // happen unless there is database corruption or an empty entry 388 // erroneously made its way into the database. 389 if numStxos != 0 { 390 return nil, AssertError(fmt.Sprintf("mismatched spend "+ 391 "journal serialization - no serialization for "+ 392 "expected %d stxos", numStxos)) 393 } 394 395 return nil, nil 396 } 397 398 // Loop backwards through all transactions so everything is read in 399 // reverse order to match the serialization order. 400 stxoIdx := numStxos - 1 401 offset := 0 402 stxos := make([]SpentTxOut, numStxos) 403 for txIdx := len(txns) - 1; txIdx > -1; txIdx-- { 404 tx := txns[txIdx] 405 406 // Loop backwards through all of the transaction inputs and read 407 // the associated stxo. 408 for txInIdx := len(tx.TxIn) - 1; txInIdx > -1; txInIdx-- { 409 txIn := tx.TxIn[txInIdx] 410 stxo := &stxos[stxoIdx] 411 stxoIdx-- 412 413 n, err := decodeSpentTxOut(serialized[offset:], stxo) 414 offset += n 415 if err != nil { 416 return nil, errDeserialize(fmt.Sprintf("unable "+ 417 "to decode stxo for %v: %v", 418 txIn.PreviousOutPoint, err)) 419 } 420 } 421 } 422 423 return stxos, nil 424 } 425 426 // serializeSpendJournalEntry serializes all of the passed spent txouts into a 427 // single byte slice according to the format described in detail above. 428 func serializeSpendJournalEntry(stxos []SpentTxOut) []byte { 429 if len(stxos) == 0 { 430 return nil 431 } 432 433 // Calculate the size needed to serialize the entire journal entry. 434 var size int 435 for i := range stxos { 436 size += spentTxOutSerializeSize(&stxos[i]) 437 } 438 serialized := make([]byte, size) 439 440 // Serialize each individual stxo directly into the slice in reverse 441 // order one after the other. 442 var offset int 443 for i := len(stxos) - 1; i > -1; i-- { 444 offset += putSpentTxOut(serialized[offset:], &stxos[i]) 445 } 446 447 return serialized 448 } 449 450 // dbFetchSpendJournalEntry fetches the spend journal entry for the passed block 451 // and deserializes it into a slice of spent txout entries. 452 // 453 // NOTE: Legacy entries will not have the coinbase flag or height set unless it 454 // was the final output spend in the containing transaction. It is up to the 455 // caller to handle this properly by looking the information up in the utxo set. 456 func dbFetchSpendJournalEntry(dbTx database.Tx, block *btcutil.Block) ([]SpentTxOut, error) { 457 // Exclude the coinbase transaction since it can't spend anything. 458 spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) 459 serialized := spendBucket.Get(block.Hash()[:]) 460 blockTxns := block.MsgBlock().Transactions[1:] 461 stxos, err := deserializeSpendJournalEntry(serialized, blockTxns) 462 if err != nil { 463 // Ensure any deserialization errors are returned as database 464 // corruption errors. 465 if isDeserializeErr(err) { 466 return nil, database.Error{ 467 ErrorCode: database.ErrCorruption, 468 Description: fmt.Sprintf("corrupt spend "+ 469 "information for %v: %v", block.Hash(), 470 err), 471 } 472 } 473 474 return nil, err 475 } 476 477 return stxos, nil 478 } 479 480 // dbPutSpendJournalEntry uses an existing database transaction to update the 481 // spend journal entry for the given block hash using the provided slice of 482 // spent txouts. The spent txouts slice must contain an entry for every txout 483 // the transactions in the block spend in the order they are spent. 484 func dbPutSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash, stxos []SpentTxOut) error { 485 spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) 486 serialized := serializeSpendJournalEntry(stxos) 487 return spendBucket.Put(blockHash[:], serialized) 488 } 489 490 // dbRemoveSpendJournalEntry uses an existing database transaction to remove the 491 // spend journal entry for the passed block hash. 492 func dbRemoveSpendJournalEntry(dbTx database.Tx, blockHash *chainhash.Hash) error { 493 spendBucket := dbTx.Metadata().Bucket(spendJournalBucketName) 494 return spendBucket.Delete(blockHash[:]) 495 } 496 497 // ----------------------------------------------------------------------------- 498 // The unspent transaction output (utxo) set consists of an entry for each 499 // unspent output using a format that is optimized to reduce space using domain 500 // specific compression algorithms. This format is a slightly modified version 501 // of the format used in Bitcoin Core. 502 // 503 // Each entry is keyed by an outpoint as specified below. It is important to 504 // note that the key encoding uses a VLQ, which employs an MSB encoding so 505 // iteration of utxos when doing byte-wise comparisons will produce them in 506 // order. 507 // 508 // The serialized key format is: 509 // <hash><output index> 510 // 511 // Field Type Size 512 // hash chainhash.Hash chainhash.HashSize 513 // output index VLQ variable 514 // 515 // The serialized value format is: 516 // 517 // <header code><compressed txout> 518 // 519 // Field Type Size 520 // header code VLQ variable 521 // compressed txout 522 // compressed amount VLQ variable 523 // compressed script []byte variable 524 // 525 // The serialized header code format is: 526 // bit 0 - containing transaction is a coinbase 527 // bits 1-x - height of the block that contains the unspent txout 528 // 529 // Example 1: 530 // From tx in main blockchain: 531 // Blk 1, 0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098:0 532 // 533 // 03320496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52 534 // <><------------------------------------------------------------------> 535 // | | 536 // header code compressed txout 537 // 538 // - header code: 0x03 (coinbase, height 1) 539 // - compressed txout: 540 // - 0x32: VLQ-encoded compressed amount for 5000000000 (50 BTC) 541 // - 0x04: special script type pay-to-pubkey 542 // - 0x96...52: x-coordinate of the pubkey 543 // 544 // Example 2: 545 // From tx in main blockchain: 546 // Blk 113931, 4a16969aa4764dd7507fc1de7f0baa4850a246de90c45e59a3207f9a26b5036f:2 547 // 548 // 8cf316800900b8025be1b3efc63b0ad48e7f9f10e87544528d58 549 // <----><------------------------------------------> 550 // | | 551 // header code compressed txout 552 // 553 // - header code: 0x8cf316 (not coinbase, height 113931) 554 // - compressed txout: 555 // - 0x8009: VLQ-encoded compressed amount for 15000000 (0.15 BTC) 556 // - 0x00: special script type pay-to-pubkey-hash 557 // - 0xb8...58: pubkey hash 558 // 559 // Example 3: 560 // From tx in main blockchain: 561 // Blk 338156, 1b02d1c8cfef60a189017b9a420c682cf4a0028175f2f563209e4ff61c8c3620:22 562 // 563 // a8a2588ba5b9e763011dd46a006572d820e448e12d2bbb38640bc718e6 564 // <----><--------------------------------------------------> 565 // | | 566 // header code compressed txout 567 // 568 // - header code: 0xa8a258 (not coinbase, height 338156) 569 // - compressed txout: 570 // - 0x8ba5b9e763: VLQ-encoded compressed amount for 366875659 (3.66875659 BTC) 571 // - 0x01: special script type pay-to-script-hash 572 // - 0x1d...e6: script hash 573 // ----------------------------------------------------------------------------- 574 575 // maxUint32VLQSerializeSize is the maximum number of bytes a max uint32 takes 576 // to serialize as a VLQ. 577 var maxUint32VLQSerializeSize = serializeSizeVLQ(1<<32 - 1) 578 579 // outpointKeyPool defines a concurrent safe free list of byte slices used to 580 // provide temporary buffers for outpoint database keys. 581 var outpointKeyPool = sync.Pool{ 582 New: func() interface{} { 583 b := make([]byte, chainhash.HashSize+maxUint32VLQSerializeSize) 584 return &b // Pointer to slice to avoid boxing alloc. 585 }, 586 } 587 588 // outpointKey returns a key suitable for use as a database key in the utxo set 589 // while making use of a free list. A new buffer is allocated if there are not 590 // already any available on the free list. The returned byte slice should be 591 // returned to the free list by using the recycleOutpointKey function when the 592 // caller is done with it _unless_ the slice will need to live for longer than 593 // the caller can calculate such as when used to write to the database. 594 func outpointKey(outpoint wire.OutPoint) *[]byte { 595 // A VLQ employs an MSB encoding, so they are useful not only to reduce 596 // the amount of storage space, but also so iteration of utxos when 597 // doing byte-wise comparisons will produce them in order. 598 key := outpointKeyPool.Get().(*[]byte) 599 idx := uint64(outpoint.Index) 600 *key = (*key)[:chainhash.HashSize+serializeSizeVLQ(idx)] 601 copy(*key, outpoint.Hash[:]) 602 putVLQ((*key)[chainhash.HashSize:], idx) 603 return key 604 } 605 606 // recycleOutpointKey puts the provided byte slice, which should have been 607 // obtained via the outpointKey function, back on the free list. 608 func recycleOutpointKey(key *[]byte) { 609 outpointKeyPool.Put(key) 610 } 611 612 // utxoEntryHeaderCode returns the calculated header code to be used when 613 // serializing the provided utxo entry. 614 func utxoEntryHeaderCode(entry *UtxoEntry) (uint64, error) { 615 if entry.IsSpent() { 616 return 0, AssertError("attempt to serialize spent utxo header") 617 } 618 619 // As described in the serialization format comments, the header code 620 // encodes the height shifted over one bit and the coinbase flag in the 621 // lowest bit. 622 headerCode := uint64(entry.BlockHeight()) << 1 623 if entry.IsCoinBase() { 624 headerCode |= 0x01 625 } 626 627 return headerCode, nil 628 } 629 630 // serializeUtxoEntry returns the entry serialized to a format that is suitable 631 // for long-term storage. The format is described in detail above. 632 func serializeUtxoEntry(entry *UtxoEntry) ([]byte, error) { 633 // Spent outputs have no serialization. 634 if entry.IsSpent() { 635 return nil, nil 636 } 637 638 // Encode the header code. 639 headerCode, err := utxoEntryHeaderCode(entry) 640 if err != nil { 641 return nil, err 642 } 643 644 // Calculate the size needed to serialize the entry. 645 size := serializeSizeVLQ(headerCode) + 646 compressedTxOutSize(uint64(entry.Amount()), entry.PkScript()) 647 648 // Serialize the header code followed by the compressed unspent 649 // transaction output. 650 serialized := make([]byte, size) 651 offset := putVLQ(serialized, headerCode) 652 offset += putCompressedTxOut(serialized[offset:], uint64(entry.Amount()), 653 entry.PkScript()) 654 655 return serialized, nil 656 } 657 658 // deserializeUtxoEntry decodes a utxo entry from the passed serialized byte 659 // slice into a new UtxoEntry using a format that is suitable for long-term 660 // storage. The format is described in detail above. 661 func deserializeUtxoEntry(serialized []byte) (*UtxoEntry, error) { 662 // Deserialize the header code. 663 code, offset := deserializeVLQ(serialized) 664 if offset >= len(serialized) { 665 return nil, errDeserialize("unexpected end of data after header") 666 } 667 668 // Decode the header code. 669 // 670 // Bit 0 indicates whether the containing transaction is a coinbase. 671 // Bits 1-x encode height of containing transaction. 672 isCoinBase := code&0x01 != 0 673 blockHeight := int32(code >> 1) 674 675 // Decode the compressed unspent transaction output. 676 amount, pkScript, _, err := decodeCompressedTxOut(serialized[offset:]) 677 if err != nil { 678 return nil, errDeserialize(fmt.Sprintf("unable to decode "+ 679 "utxo: %v", err)) 680 } 681 682 entry := &UtxoEntry{ 683 amount: int64(amount), 684 pkScript: pkScript, 685 blockHeight: blockHeight, 686 packedFlags: 0, 687 } 688 if isCoinBase { 689 entry.packedFlags |= tfCoinBase 690 } 691 692 return entry, nil 693 } 694 695 // dbFetchUtxoEntryByHash attempts to find and fetch a utxo for the given hash. 696 // It uses a cursor and seek to try and do this as efficiently as possible. 697 // 698 // When there are no entries for the provided hash, nil will be returned for the 699 // both the entry and the error. 700 func dbFetchUtxoEntryByHash(dbTx database.Tx, hash *chainhash.Hash) (*UtxoEntry, error) { 701 // Attempt to find an entry by seeking for the hash along with a zero 702 // index. Due to the fact the keys are serialized as <hash><index>, 703 // where the index uses an MSB encoding, if there are any entries for 704 // the hash at all, one will be found. 705 cursor := dbTx.Metadata().Bucket(utxoSetBucketName).Cursor() 706 key := outpointKey(wire.OutPoint{Hash: *hash, Index: 0}) 707 ok := cursor.Seek(*key) 708 recycleOutpointKey(key) 709 if !ok { 710 return nil, nil 711 } 712 713 // An entry was found, but it could just be an entry with the next 714 // highest hash after the requested one, so make sure the hashes 715 // actually match. 716 cursorKey := cursor.Key() 717 if len(cursorKey) < chainhash.HashSize { 718 return nil, nil 719 } 720 if !bytes.Equal(hash[:], cursorKey[:chainhash.HashSize]) { 721 return nil, nil 722 } 723 724 return deserializeUtxoEntry(cursor.Value()) 725 } 726 727 // dbFetchUtxoEntry uses an existing database transaction to fetch the specified 728 // transaction output from the utxo set. 729 // 730 // When there is no entry for the provided output, nil will be returned for both 731 // the entry and the error. 732 func dbFetchUtxoEntry(dbTx database.Tx, outpoint wire.OutPoint) (*UtxoEntry, error) { 733 // Fetch the unspent transaction output information for the passed 734 // transaction output. Return now when there is no entry. 735 key := outpointKey(outpoint) 736 utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) 737 serializedUtxo := utxoBucket.Get(*key) 738 recycleOutpointKey(key) 739 if serializedUtxo == nil { 740 return nil, nil 741 } 742 743 // A non-nil zero-length entry means there is an entry in the database 744 // for a spent transaction output which should never be the case. 745 if len(serializedUtxo) == 0 { 746 return nil, AssertError(fmt.Sprintf("database contains entry "+ 747 "for spent tx output %v", outpoint)) 748 } 749 750 // Deserialize the utxo entry and return it. 751 entry, err := deserializeUtxoEntry(serializedUtxo) 752 if err != nil { 753 // Ensure any deserialization errors are returned as database 754 // corruption errors. 755 if isDeserializeErr(err) { 756 return nil, database.Error{ 757 ErrorCode: database.ErrCorruption, 758 Description: fmt.Sprintf("corrupt utxo entry "+ 759 "for %v: %v", outpoint, err), 760 } 761 } 762 763 return nil, err 764 } 765 766 return entry, nil 767 } 768 769 // dbPutUtxoView uses an existing database transaction to update the utxo set 770 // in the database based on the provided utxo view contents and state. In 771 // particular, only the entries that have been marked as modified are written 772 // to the database. 773 func dbPutUtxoView(dbTx database.Tx, view *UtxoViewpoint) error { 774 utxoBucket := dbTx.Metadata().Bucket(utxoSetBucketName) 775 for outpoint, entry := range view.entries { 776 // No need to update the database if the entry was not modified. 777 if entry == nil || !entry.isModified() { 778 continue 779 } 780 781 // Remove the utxo entry if it is spent. 782 if entry.IsSpent() { 783 key := outpointKey(outpoint) 784 err := utxoBucket.Delete(*key) 785 recycleOutpointKey(key) 786 if err != nil { 787 return err 788 } 789 790 continue 791 } 792 793 // Serialize and store the utxo entry. 794 serialized, err := serializeUtxoEntry(entry) 795 if err != nil { 796 return err 797 } 798 key := outpointKey(outpoint) 799 err = utxoBucket.Put(*key, serialized) 800 // NOTE: The key is intentionally not recycled here since the 801 // database interface contract prohibits modifications. It will 802 // be garbage collected normally when the database is done with 803 // it. 804 if err != nil { 805 return err 806 } 807 } 808 809 return nil 810 } 811 812 // ----------------------------------------------------------------------------- 813 // The block index consists of two buckets with an entry for every block in the 814 // main chain. One bucket is for the hash to height mapping and the other is 815 // for the height to hash mapping. 816 // 817 // The serialized format for values in the hash to height bucket is: 818 // <height> 819 // 820 // Field Type Size 821 // height uint32 4 bytes 822 // 823 // The serialized format for values in the height to hash bucket is: 824 // <hash> 825 // 826 // Field Type Size 827 // hash chainhash.Hash chainhash.HashSize 828 // ----------------------------------------------------------------------------- 829 830 // dbPutBlockIndex uses an existing database transaction to update or add the 831 // block index entries for the hash to height and height to hash mappings for 832 // the provided values. 833 func dbPutBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error { 834 // Serialize the height for use in the index entries. 835 var serializedHeight [4]byte 836 byteOrder.PutUint32(serializedHeight[:], uint32(height)) 837 838 // Add the block hash to height mapping to the index. 839 meta := dbTx.Metadata() 840 hashIndex := meta.Bucket(hashIndexBucketName) 841 if err := hashIndex.Put(hash[:], serializedHeight[:]); err != nil { 842 return err 843 } 844 845 // Add the block height to hash mapping to the index. 846 heightIndex := meta.Bucket(heightIndexBucketName) 847 return heightIndex.Put(serializedHeight[:], hash[:]) 848 } 849 850 // dbRemoveBlockIndex uses an existing database transaction remove block index 851 // entries from the hash to height and height to hash mappings for the provided 852 // values. 853 func dbRemoveBlockIndex(dbTx database.Tx, hash *chainhash.Hash, height int32) error { 854 // Remove the block hash to height mapping. 855 meta := dbTx.Metadata() 856 hashIndex := meta.Bucket(hashIndexBucketName) 857 if err := hashIndex.Delete(hash[:]); err != nil { 858 return err 859 } 860 861 // Remove the block height to hash mapping. 862 var serializedHeight [4]byte 863 byteOrder.PutUint32(serializedHeight[:], uint32(height)) 864 heightIndex := meta.Bucket(heightIndexBucketName) 865 return heightIndex.Delete(serializedHeight[:]) 866 } 867 868 // dbFetchHeightByHash uses an existing database transaction to retrieve the 869 // height for the provided hash from the index. 870 func dbFetchHeightByHash(dbTx database.Tx, hash *chainhash.Hash) (int32, error) { 871 meta := dbTx.Metadata() 872 hashIndex := meta.Bucket(hashIndexBucketName) 873 serializedHeight := hashIndex.Get(hash[:]) 874 if serializedHeight == nil { 875 str := fmt.Sprintf("block %s is not in the main chain", hash) 876 return 0, errNotInMainChain(str) 877 } 878 879 return int32(byteOrder.Uint32(serializedHeight)), nil 880 } 881 882 // dbFetchHashByHeight uses an existing database transaction to retrieve the 883 // hash for the provided height from the index. 884 func dbFetchHashByHeight(dbTx database.Tx, height int32) (*chainhash.Hash, error) { 885 var serializedHeight [4]byte 886 byteOrder.PutUint32(serializedHeight[:], uint32(height)) 887 888 meta := dbTx.Metadata() 889 heightIndex := meta.Bucket(heightIndexBucketName) 890 hashBytes := heightIndex.Get(serializedHeight[:]) 891 if hashBytes == nil { 892 str := fmt.Sprintf("no block at height %d exists", height) 893 return nil, errNotInMainChain(str) 894 } 895 896 var hash chainhash.Hash 897 copy(hash[:], hashBytes) 898 return &hash, nil 899 } 900 901 // ----------------------------------------------------------------------------- 902 // The best chain state consists of the best block hash and height, the total 903 // number of transactions up to and including those in the best block, and the 904 // accumulated work sum up to and including the best block. 905 // 906 // The serialized format is: 907 // 908 // <block hash><block height><total txns><work sum length><work sum> 909 // 910 // Field Type Size 911 // block hash chainhash.Hash chainhash.HashSize 912 // block height uint32 4 bytes 913 // total txns uint64 8 bytes 914 // work sum length uint32 4 bytes 915 // work sum big.Int work sum length 916 // ----------------------------------------------------------------------------- 917 918 // bestChainState represents the data to be stored the database for the current 919 // best chain state. 920 type bestChainState struct { 921 hash chainhash.Hash 922 height uint32 923 totalTxns uint64 924 workSum *big.Int 925 } 926 927 // serializeBestChainState returns the serialization of the passed block best 928 // chain state. This is data to be stored in the chain state bucket. 929 func serializeBestChainState(state bestChainState) []byte { 930 // Calculate the full size needed to serialize the chain state. 931 workSumBytes := state.workSum.Bytes() 932 workSumBytesLen := uint32(len(workSumBytes)) 933 serializedLen := chainhash.HashSize + 4 + 8 + 4 + workSumBytesLen 934 935 // Serialize the chain state. 936 serializedData := make([]byte, serializedLen) 937 copy(serializedData[0:chainhash.HashSize], state.hash[:]) 938 offset := uint32(chainhash.HashSize) 939 byteOrder.PutUint32(serializedData[offset:], state.height) 940 offset += 4 941 byteOrder.PutUint64(serializedData[offset:], state.totalTxns) 942 offset += 8 943 byteOrder.PutUint32(serializedData[offset:], workSumBytesLen) 944 offset += 4 945 copy(serializedData[offset:], workSumBytes) 946 return serializedData 947 } 948 949 // deserializeBestChainState deserializes the passed serialized best chain 950 // state. This is data stored in the chain state bucket and is updated after 951 // every block is connected or disconnected form the main chain. 952 // block. 953 func deserializeBestChainState(serializedData []byte) (bestChainState, error) { 954 // Ensure the serialized data has enough bytes to properly deserialize 955 // the hash, height, total transactions, and work sum length. 956 if len(serializedData) < chainhash.HashSize+16 { 957 return bestChainState{}, database.Error{ 958 ErrorCode: database.ErrCorruption, 959 Description: "corrupt best chain state", 960 } 961 } 962 963 state := bestChainState{} 964 copy(state.hash[:], serializedData[0:chainhash.HashSize]) 965 offset := uint32(chainhash.HashSize) 966 state.height = byteOrder.Uint32(serializedData[offset : offset+4]) 967 offset += 4 968 state.totalTxns = byteOrder.Uint64(serializedData[offset : offset+8]) 969 offset += 8 970 workSumBytesLen := byteOrder.Uint32(serializedData[offset : offset+4]) 971 offset += 4 972 973 // Ensure the serialized data has enough bytes to deserialize the work 974 // sum. 975 if uint32(len(serializedData[offset:])) < workSumBytesLen { 976 return bestChainState{}, database.Error{ 977 ErrorCode: database.ErrCorruption, 978 Description: "corrupt best chain state", 979 } 980 } 981 workSumBytes := serializedData[offset : offset+workSumBytesLen] 982 state.workSum = new(big.Int).SetBytes(workSumBytes) 983 984 return state, nil 985 } 986 987 // dbPutBestState uses an existing database transaction to update the best chain 988 // state with the given parameters. 989 func dbPutBestState(dbTx database.Tx, snapshot *BestState, workSum *big.Int) error { 990 // Serialize the current best chain state. 991 serializedData := serializeBestChainState(bestChainState{ 992 hash: snapshot.Hash, 993 height: uint32(snapshot.Height), 994 totalTxns: snapshot.TotalTxns, 995 workSum: workSum, 996 }) 997 998 // Store the current best chain state into the database. 999 return dbTx.Metadata().Put(chainStateKeyName, serializedData) 1000 } 1001 1002 // createChainState initializes both the database and the chain state to the 1003 // genesis block. This includes creating the necessary buckets and inserting 1004 // the genesis block, so it must only be called on an uninitialized database. 1005 func (b *BlockChain) createChainState() error { 1006 // Create a new node from the genesis block and set it as the best node. 1007 genesisBlock := btcutil.NewBlock(b.chainParams.GenesisBlock) 1008 genesisBlock.SetHeight(0) 1009 header := &genesisBlock.MsgBlock().Header 1010 node := newBlockNode(header, nil) 1011 node.status = statusDataStored | statusValid 1012 b.bestChain.SetTip(node) 1013 1014 // Add the new node to the index which is used for faster lookups. 1015 b.index.addNode(node) 1016 1017 // Initialize the state related to the best block. Since it is the 1018 // genesis block, use its timestamp for the median time. 1019 numTxns := uint64(len(genesisBlock.MsgBlock().Transactions)) 1020 blockSize := uint64(genesisBlock.MsgBlock().SerializeSize()) 1021 blockWeight := uint64(GetBlockWeight(genesisBlock)) 1022 b.stateSnapshot = newBestState(node, blockSize, blockWeight, numTxns, 1023 numTxns, time.Unix(node.timestamp, 0)) 1024 1025 // Create the initial the database chain state including creating the 1026 // necessary index buckets and inserting the genesis block. 1027 err := b.db.Update(func(dbTx database.Tx) error { 1028 meta := dbTx.Metadata() 1029 1030 // Create the bucket that houses the block index data. 1031 _, err := meta.CreateBucket(blockIndexBucketName) 1032 if err != nil { 1033 return err 1034 } 1035 1036 // Create the bucket that houses the chain block hash to height 1037 // index. 1038 _, err = meta.CreateBucket(hashIndexBucketName) 1039 if err != nil { 1040 return err 1041 } 1042 1043 // Create the bucket that houses the chain block height to hash 1044 // index. 1045 _, err = meta.CreateBucket(heightIndexBucketName) 1046 if err != nil { 1047 return err 1048 } 1049 1050 // Create the bucket that houses the spend journal data and 1051 // store its version. 1052 _, err = meta.CreateBucket(spendJournalBucketName) 1053 if err != nil { 1054 return err 1055 } 1056 err = dbPutVersion(dbTx, utxoSetVersionKeyName, 1057 latestUtxoSetBucketVersion) 1058 if err != nil { 1059 return err 1060 } 1061 1062 // Create the bucket that houses the utxo set and store its 1063 // version. Note that the genesis block coinbase transaction is 1064 // intentionally not inserted here since it is not spendable by 1065 // consensus rules. 1066 _, err = meta.CreateBucket(utxoSetBucketName) 1067 if err != nil { 1068 return err 1069 } 1070 err = dbPutVersion(dbTx, spendJournalVersionKeyName, 1071 latestSpendJournalBucketVersion) 1072 if err != nil { 1073 return err 1074 } 1075 1076 // Save the genesis block to the block index database. 1077 err = dbStoreBlockNode(dbTx, node) 1078 if err != nil { 1079 return err 1080 } 1081 1082 // Add the genesis block hash to height and height to hash 1083 // mappings to the index. 1084 err = dbPutBlockIndex(dbTx, &node.hash, node.height) 1085 if err != nil { 1086 return err 1087 } 1088 1089 // Store the current best chain state into the database. 1090 err = dbPutBestState(dbTx, b.stateSnapshot, node.workSum) 1091 if err != nil { 1092 return err 1093 } 1094 1095 // Store the genesis block into the database. 1096 return dbStoreBlock(dbTx, genesisBlock) 1097 }) 1098 return err 1099 } 1100 1101 // initChainState attempts to load and initialize the chain state from the 1102 // database. When the db does not yet contain any chain state, both it and the 1103 // chain state are initialized to the genesis block. 1104 func (b *BlockChain) initChainState() error { 1105 // Determine the state of the chain database. We may need to initialize 1106 // everything from scratch or upgrade certain buckets. 1107 var initialized, hasBlockIndex bool 1108 err := b.db.View(func(dbTx database.Tx) error { 1109 initialized = dbTx.Metadata().Get(chainStateKeyName) != nil 1110 hasBlockIndex = dbTx.Metadata().Bucket(blockIndexBucketName) != nil 1111 return nil 1112 }) 1113 if err != nil { 1114 return err 1115 } 1116 1117 if !initialized { 1118 // At this point the database has not already been initialized, so 1119 // initialize both it and the chain state to the genesis block. 1120 return b.createChainState() 1121 } 1122 1123 if !hasBlockIndex { 1124 err := migrateBlockIndex(b.db) 1125 if err != nil { 1126 return nil 1127 } 1128 } 1129 1130 // Attempt to load the chain state from the database. 1131 err = b.db.View(func(dbTx database.Tx) error { 1132 // Fetch the stored chain state from the database metadata. 1133 // When it doesn't exist, it means the database hasn't been 1134 // initialized for use with chain yet, so break out now to allow 1135 // that to happen under a writable database transaction. 1136 serializedData := dbTx.Metadata().Get(chainStateKeyName) 1137 log.Tracef("Serialized chain state: %x", serializedData) 1138 state, err := deserializeBestChainState(serializedData) 1139 if err != nil { 1140 return err 1141 } 1142 1143 // Load all of the headers from the data for the known best 1144 // chain and construct the block index accordingly. Since the 1145 // number of nodes are already known, perform a single alloc 1146 // for them versus a whole bunch of little ones to reduce 1147 // pressure on the GC. 1148 log.Infof("Loading block index...") 1149 1150 blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) 1151 1152 var i int32 1153 var lastNode *blockNode 1154 cursor := blockIndexBucket.Cursor() 1155 for ok := cursor.First(); ok; ok = cursor.Next() { 1156 header, status, err := deserializeBlockRow(cursor.Value()) 1157 if err != nil { 1158 return err 1159 } 1160 1161 // Determine the parent block node. Since we iterate block headers 1162 // in order of height, if the blocks are mostly linear there is a 1163 // very good chance the previous header processed is the parent. 1164 var parent *blockNode 1165 if lastNode == nil { 1166 blockHash := header.BlockHash() 1167 if !blockHash.IsEqual(b.chainParams.GenesisHash) { 1168 return AssertError(fmt.Sprintf("initChainState: Expected "+ 1169 "first entry in block index to be genesis block, "+ 1170 "found %s", blockHash)) 1171 } 1172 } else if header.PrevBlock == lastNode.hash { 1173 // Since we iterate block headers in order of height, if the 1174 // blocks are mostly linear there is a very good chance the 1175 // previous header processed is the parent. 1176 parent = lastNode 1177 } else { 1178 parent = b.index.LookupNode(&header.PrevBlock) 1179 if parent == nil { 1180 return AssertError(fmt.Sprintf("initChainState: Could "+ 1181 "not find parent for block %s", header.BlockHash())) 1182 } 1183 } 1184 1185 // Initialize the block node for the block, connect it, 1186 // and add it to the block index. 1187 node := new(blockNode) 1188 initBlockNode(node, header, parent) 1189 node.status = status 1190 b.index.addNode(node) 1191 1192 lastNode = node 1193 i++ 1194 } 1195 1196 // Set the best chain view to the stored best state. 1197 tip := b.index.LookupNode(&state.hash) 1198 if tip == nil { 1199 return AssertError(fmt.Sprintf("initChainState: cannot find "+ 1200 "chain tip %s in block index", state.hash)) 1201 } 1202 b.bestChain.SetTip(tip) 1203 1204 // Load the raw block bytes for the best block. 1205 blockBytes, err := dbTx.FetchBlock(&state.hash) 1206 if err != nil { 1207 return err 1208 } 1209 var block wire.MsgBlock 1210 err = block.Deserialize(bytes.NewReader(blockBytes)) 1211 if err != nil { 1212 return err 1213 } 1214 1215 // As a final consistency check, we'll run through all the 1216 // nodes which are ancestors of the current chain tip, and mark 1217 // them as valid if they aren't already marked as such. This 1218 // is a safe assumption as all the block before the current tip 1219 // are valid by definition. 1220 for iterNode := tip; iterNode != nil; iterNode = iterNode.parent { 1221 // If this isn't already marked as valid in the index, then 1222 // we'll mark it as valid now to ensure consistency once 1223 // we're up and running. 1224 if !iterNode.status.KnownValid() { 1225 log.Infof("Block %v (height=%v) ancestor of "+ 1226 "chain tip not marked as valid, "+ 1227 "upgrading to valid for consistency", 1228 iterNode.hash, iterNode.height) 1229 1230 b.index.SetStatusFlags(iterNode, statusValid) 1231 } 1232 } 1233 1234 // Initialize the state related to the best block. 1235 blockSize := uint64(len(blockBytes)) 1236 blockWeight := uint64(GetBlockWeight(btcutil.NewBlock(&block))) 1237 numTxns := uint64(len(block.Transactions)) 1238 b.stateSnapshot = newBestState(tip, blockSize, blockWeight, 1239 numTxns, state.totalTxns, tip.CalcPastMedianTime()) 1240 1241 return nil 1242 }) 1243 if err != nil { 1244 return err 1245 } 1246 1247 // As we might have updated the index after it was loaded, we'll 1248 // attempt to flush the index to the DB. This will only result in a 1249 // write if the elements are dirty, so it'll usually be a noop. 1250 return b.index.flushToDB() 1251 } 1252 1253 // deserializeBlockRow parses a value in the block index bucket into a block 1254 // header and block status bitfield. 1255 func deserializeBlockRow(blockRow []byte) (*wire.BlockHeader, blockStatus, error) { 1256 buffer := bytes.NewReader(blockRow) 1257 1258 var header wire.BlockHeader 1259 err := header.Deserialize(buffer) 1260 if err != nil { 1261 return nil, statusNone, err 1262 } 1263 1264 statusByte, err := buffer.ReadByte() 1265 if err != nil { 1266 return nil, statusNone, err 1267 } 1268 1269 return &header, blockStatus(statusByte), nil 1270 } 1271 1272 // dbFetchHeaderByHash uses an existing database transaction to retrieve the 1273 // block header for the provided hash. 1274 func dbFetchHeaderByHash(dbTx database.Tx, hash *chainhash.Hash) (*wire.BlockHeader, error) { 1275 headerBytes, err := dbTx.FetchBlockHeader(hash) 1276 if err != nil { 1277 return nil, err 1278 } 1279 1280 var header wire.BlockHeader 1281 err = header.Deserialize(bytes.NewReader(headerBytes)) 1282 if err != nil { 1283 return nil, err 1284 } 1285 1286 return &header, nil 1287 } 1288 1289 // dbFetchHeaderByHeight uses an existing database transaction to retrieve the 1290 // block header for the provided height. 1291 func dbFetchHeaderByHeight(dbTx database.Tx, height int32) (*wire.BlockHeader, error) { 1292 hash, err := dbFetchHashByHeight(dbTx, height) 1293 if err != nil { 1294 return nil, err 1295 } 1296 1297 return dbFetchHeaderByHash(dbTx, hash) 1298 } 1299 1300 // dbFetchBlockByNode uses an existing database transaction to retrieve the 1301 // raw block for the provided node, deserialize it, and return a btcutil.Block 1302 // with the height set. 1303 func dbFetchBlockByNode(dbTx database.Tx, node *blockNode) (*btcutil.Block, error) { 1304 // Load the raw block bytes from the database. 1305 blockBytes, err := dbTx.FetchBlock(&node.hash) 1306 if err != nil { 1307 return nil, err 1308 } 1309 1310 // Create the encapsulated block and set the height appropriately. 1311 block, err := btcutil.NewBlockFromBytes(blockBytes) 1312 if err != nil { 1313 return nil, err 1314 } 1315 block.SetHeight(node.height) 1316 1317 return block, nil 1318 } 1319 1320 // dbStoreBlockNode stores the block header and validation status to the block 1321 // index bucket. This overwrites the current entry if there exists one. 1322 func dbStoreBlockNode(dbTx database.Tx, node *blockNode) error { 1323 // Serialize block data to be stored. 1324 w := bytes.NewBuffer(make([]byte, 0, blockHdrSize+1)) 1325 header := node.Header() 1326 err := header.Serialize(w) 1327 if err != nil { 1328 return err 1329 } 1330 err = w.WriteByte(byte(node.status)) 1331 if err != nil { 1332 return err 1333 } 1334 value := w.Bytes() 1335 1336 // Write block header data to block index bucket. 1337 blockIndexBucket := dbTx.Metadata().Bucket(blockIndexBucketName) 1338 key := blockIndexKey(&node.hash, uint32(node.height)) 1339 return blockIndexBucket.Put(key, value) 1340 } 1341 1342 // dbStoreBlock stores the provided block in the database if it is not already 1343 // there. The full block data is written to ffldb. 1344 func dbStoreBlock(dbTx database.Tx, block *btcutil.Block) error { 1345 hasBlock, err := dbTx.HasBlock(block.Hash()) 1346 if err != nil { 1347 return err 1348 } 1349 if hasBlock { 1350 return nil 1351 } 1352 return dbTx.StoreBlock(block) 1353 } 1354 1355 // blockIndexKey generates the binary key for an entry in the block index 1356 // bucket. The key is composed of the block height encoded as a big-endian 1357 // 32-bit unsigned int followed by the 32 byte block hash. 1358 func blockIndexKey(blockHash *chainhash.Hash, blockHeight uint32) []byte { 1359 indexKey := make([]byte, chainhash.HashSize+4) 1360 binary.BigEndian.PutUint32(indexKey[0:4], blockHeight) 1361 copy(indexKey[4:chainhash.HashSize+4], blockHash[:]) 1362 return indexKey 1363 } 1364 1365 // BlockByHeight returns the block at the given height in the main chain. 1366 // 1367 // This function is safe for concurrent access. 1368 func (b *BlockChain) BlockByHeight(blockHeight int32) (*btcutil.Block, error) { 1369 // Lookup the block height in the best chain. 1370 node := b.bestChain.NodeByHeight(blockHeight) 1371 if node == nil { 1372 str := fmt.Sprintf("no block at height %d exists", blockHeight) 1373 return nil, errNotInMainChain(str) 1374 } 1375 1376 // Load the block from the database and return it. 1377 var block *btcutil.Block 1378 err := b.db.View(func(dbTx database.Tx) error { 1379 var err error 1380 block, err = dbFetchBlockByNode(dbTx, node) 1381 return err 1382 }) 1383 return block, err 1384 } 1385 1386 // BlockByHash returns the block from the main chain with the given hash with 1387 // the appropriate chain height set. 1388 // 1389 // This function is safe for concurrent access. 1390 func (b *BlockChain) BlockByHash(hash *chainhash.Hash) (*btcutil.Block, error) { 1391 // Lookup the block hash in block index and ensure it is in the best 1392 // chain. 1393 node := b.index.LookupNode(hash) 1394 if node == nil || !b.bestChain.Contains(node) { 1395 str := fmt.Sprintf("block %s is not in the main chain", hash) 1396 return nil, errNotInMainChain(str) 1397 } 1398 1399 // Load the block from the database and return it. 1400 var block *btcutil.Block 1401 err := b.db.View(func(dbTx database.Tx) error { 1402 var err error 1403 block, err = dbFetchBlockByNode(dbTx, node) 1404 return err 1405 }) 1406 return block, err 1407 }