github.com/decred/dcrd/blockchain@v1.2.1/indexers/addrindex.go (about) 1 // Copyright (c) 2016 The btcsuite developers 2 // Copyright (c) 2016-2017 The Decred developers 3 // Use of this source code is governed by an ISC 4 // license that can be found in the LICENSE file. 5 6 package indexers 7 8 import ( 9 "errors" 10 "fmt" 11 "sync" 12 13 "github.com/decred/dcrd/blockchain" 14 "github.com/decred/dcrd/blockchain/stake" 15 "github.com/decred/dcrd/chaincfg" 16 "github.com/decred/dcrd/chaincfg/chainhash" 17 "github.com/decred/dcrd/database" 18 "github.com/decred/dcrd/dcrec" 19 "github.com/decred/dcrd/dcrutil" 20 "github.com/decred/dcrd/txscript" 21 "github.com/decred/dcrd/wire" 22 ) 23 24 const ( 25 // addrIndexName is the human-readable name for the index. 26 addrIndexName = "address index" 27 28 // addrIndexVersion is the current version of the address index. 29 addrIndexVersion = 2 30 31 // level0MaxEntries is the maximum number of transactions that are 32 // stored in level 0 of an address index entry. Subsequent levels store 33 // 2^n * level0MaxEntries entries, or in words, double the maximum of 34 // the previous level. 35 level0MaxEntries = 8 36 37 // addrKeySize is the number of bytes an address key consumes in the 38 // index. It consists of 1 byte address type + 20 bytes hash160. 39 addrKeySize = 1 + 20 40 41 // levelKeySize is the number of bytes a level key in the address index 42 // consumes. It consists of the address key + 1 byte for the level. 43 levelKeySize = addrKeySize + 1 44 45 // levelOffset is the offset in the level key which identifes the level. 46 levelOffset = levelKeySize - 1 47 48 // addrKeyTypePubKeyHash is the address type in an address key which 49 // represents both a pay-to-pubkey-hash and a pay-to-pubkey address. 50 // This is done because both are identical for the purposes of the 51 // address index. 52 addrKeyTypePubKeyHash = 0 53 54 // addrKeyTypePubKeyHashEdwards is the address type in an address key 55 // which represents both a pay-to-pubkey-hash and a pay-to-pubkey-alt 56 // address using Schnorr signatures over the Ed25519 curve. This is 57 // done because both are identical for the purposes of the address 58 // index. 59 addrKeyTypePubKeyHashEdwards = 1 60 61 // addrKeyTypePubKeyHashSchnorr is the address type in an address key 62 // which represents both a pay-to-pubkey-hash and a pay-to-pubkey-alt 63 // address using Schnorr signatures over the secp256k1 curve. This is 64 // done because both are identical for the purposes of the address 65 // index. 66 addrKeyTypePubKeyHashSchnorr = 2 67 68 // addrKeyTypeScriptHash is the address type in an address key which 69 // represents a pay-to-script-hash address. This is necessary because 70 // the hash of a pubkey address might be the same as that of a script 71 // hash. 72 addrKeyTypeScriptHash = 3 73 ) 74 75 var ( 76 // addrIndexKey is the key of the address index and the db bucket used 77 // to house it. 78 addrIndexKey = []byte("txbyaddridx") 79 80 // errUnsupportedAddressType is an error that is used to signal an 81 // unsupported address type has been used. 82 errUnsupportedAddressType = errors.New("address type is not supported " + 83 "by the address index") 84 ) 85 86 // ----------------------------------------------------------------------------- 87 // The address index maps addresses referenced in the blockchain to a list of 88 // all the transactions involving that address. Transactions are stored 89 // according to their order of appearance in the blockchain. That is to say 90 // first by block height and then by offset inside the block. It is also 91 // important to note that this implementation requires the transaction index 92 // since it is needed in order to catch up old blocks due to the fact the spent 93 // outputs will already be pruned from the utxo set. 94 // 95 // The approach used to store the index is similar to a log-structured merge 96 // tree (LSM tree) and is thus similar to how leveldb works internally. 97 // 98 // Every address consists of one or more entries identified by a level starting 99 // from 0 where each level holds a maximum number of entries such that each 100 // subsequent level holds double the maximum of the previous one. In equation 101 // form, the number of entries each level holds is 2^n * firstLevelMaxSize. 102 // 103 // New transactions are appended to level 0 until it becomes full at which point 104 // the entire level 0 entry is appended to the level 1 entry and level 0 is 105 // cleared. This process continues until level 1 becomes full at which point it 106 // will be appended to level 2 and cleared and so on. 107 // 108 // The result of this is the lower levels contain newer transactions and the 109 // transactions within each level are ordered from oldest to newest. 110 // 111 // The intent of this approach is to provide a balance between space efficiency 112 // and indexing cost. Storing one entry per transaction would have the lowest 113 // indexing cost, but would waste a lot of space because the same address hash 114 // would be duplicated for every transaction key. On the other hand, storing a 115 // single entry with all transactions would be the most space efficient, but 116 // would cause indexing cost to grow quadratically with the number of 117 // transactions involving the same address. The approach used here provides 118 // logarithmic insertion and retrieval. 119 // 120 // The serialized key format is: 121 // 122 // <addr type><addr hash><level> 123 // 124 // Field Type Size 125 // addr type uint8 1 byte 126 // addr hash hash160 20 bytes 127 // level uint8 1 byte 128 // ----- 129 // Total: 22 bytes 130 // 131 // The serialized value format is: 132 // 133 // [<block id><start offset><tx length><block index>,...] 134 // 135 // Field Type Size 136 // block id uint32 4 bytes 137 // start offset uint32 4 bytes 138 // tx length uint32 4 bytes 139 // block index uint32 4 bytes 140 // ----- 141 // Total: 16 bytes per indexed tx 142 // ----------------------------------------------------------------------------- 143 144 // fetchBlockHashFunc defines a callback function to use in order to convert a 145 // serialized block ID to an associated block hash. 146 type fetchBlockHashFunc func(serializedID []byte) (*chainhash.Hash, error) 147 148 // serializeAddrIndexEntry serializes the provided block id and transaction 149 // location according to the format described in detail above. 150 func serializeAddrIndexEntry(blockID uint32, txLoc wire.TxLoc, blockIndex uint32) []byte { 151 // Serialize the entry. 152 serialized := make([]byte, txEntrySize) 153 byteOrder.PutUint32(serialized, blockID) 154 byteOrder.PutUint32(serialized[4:], uint32(txLoc.TxStart)) 155 byteOrder.PutUint32(serialized[8:], uint32(txLoc.TxLen)) 156 byteOrder.PutUint32(serialized[12:], blockIndex) 157 return serialized 158 } 159 160 // deserializeAddrIndexEntry decodes the passed serialized byte slice into the 161 // provided region struct according to the format described in detail above and 162 // uses the passed block hash fetching function in order to conver the block ID 163 // to the associated block hash. 164 func deserializeAddrIndexEntry(serialized []byte, entry *TxIndexEntry, fetchBlockHash fetchBlockHashFunc) error { 165 // Ensure there are enough bytes to decode. 166 if len(serialized) < txEntrySize { 167 return errDeserialize("unexpected end of data") 168 } 169 170 hash, err := fetchBlockHash(serialized[0:4]) 171 if err != nil { 172 return err 173 } 174 region := &entry.BlockRegion 175 region.Hash = hash 176 region.Offset = byteOrder.Uint32(serialized[4:8]) 177 region.Len = byteOrder.Uint32(serialized[8:12]) 178 entry.BlockIndex = byteOrder.Uint32(serialized[12:16]) 179 return nil 180 } 181 182 // keyForLevel returns the key for a specific address and level in the address 183 // index entry. 184 func keyForLevel(addrKey [addrKeySize]byte, level uint8) [levelKeySize]byte { 185 var key [levelKeySize]byte 186 copy(key[:], addrKey[:]) 187 key[levelOffset] = level 188 return key 189 } 190 191 // dbPutAddrIndexEntry updates the address index to include the provided entry 192 // according to the level-based scheme described in detail above. 193 func dbPutAddrIndexEntry(bucket internalBucket, addrKey [addrKeySize]byte, blockID uint32, txLoc wire.TxLoc, blockIndex uint32) error { 194 // Start with level 0 and its initial max number of entries. 195 curLevel := uint8(0) 196 maxLevelBytes := level0MaxEntries * txEntrySize 197 198 // Simply append the new entry to level 0 and return now when it will 199 // fit. This is the most common path. 200 newData := serializeAddrIndexEntry(blockID, txLoc, blockIndex) 201 level0Key := keyForLevel(addrKey, 0) 202 level0Data := bucket.Get(level0Key[:]) 203 if len(level0Data)+len(newData) <= maxLevelBytes { 204 mergedData := newData 205 if len(level0Data) > 0 { 206 mergedData = make([]byte, len(level0Data)+len(newData)) 207 copy(mergedData, level0Data) 208 copy(mergedData[len(level0Data):], newData) 209 } 210 return bucket.Put(level0Key[:], mergedData) 211 } 212 213 // At this point, level 0 is full, so merge each level into higher 214 // levels as many times as needed to free up level 0. 215 prevLevelData := level0Data 216 for { 217 // Each new level holds twice as much as the previous one. 218 curLevel++ 219 maxLevelBytes *= 2 220 221 // Move to the next level as long as the current level is full. 222 curLevelKey := keyForLevel(addrKey, curLevel) 223 curLevelData := bucket.Get(curLevelKey[:]) 224 if len(curLevelData) == maxLevelBytes { 225 prevLevelData = curLevelData 226 continue 227 } 228 229 // The current level has room for the data in the previous one, 230 // so merge the data from previous level into it. 231 mergedData := prevLevelData 232 if len(curLevelData) > 0 { 233 mergedData = make([]byte, len(curLevelData)+ 234 len(prevLevelData)) 235 copy(mergedData, curLevelData) 236 copy(mergedData[len(curLevelData):], prevLevelData) 237 } 238 err := bucket.Put(curLevelKey[:], mergedData) 239 if err != nil { 240 return err 241 } 242 243 // Move all of the levels before the previous one up a level. 244 for mergeLevel := curLevel - 1; mergeLevel > 0; mergeLevel-- { 245 mergeLevelKey := keyForLevel(addrKey, mergeLevel) 246 prevLevelKey := keyForLevel(addrKey, mergeLevel-1) 247 prevData := bucket.Get(prevLevelKey[:]) 248 err := bucket.Put(mergeLevelKey[:], prevData) 249 if err != nil { 250 return err 251 } 252 } 253 break 254 } 255 256 // Finally, insert the new entry into level 0 now that it is empty. 257 return bucket.Put(level0Key[:], newData) 258 } 259 260 // dbFetchAddrIndexEntries returns block regions for transactions referenced by 261 // the given address key and the number of entries skipped since it could have 262 // been less in the case where there are less total entries than the requested 263 // number of entries to skip. 264 func dbFetchAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, numToSkip, numRequested uint32, reverse bool, fetchBlockHash fetchBlockHashFunc) ([]TxIndexEntry, uint32, error) { 265 // When the reverse flag is not set, all levels need to be fetched 266 // because numToSkip and numRequested are counted from the oldest 267 // transactions (highest level) and thus the total count is needed. 268 // However, when the reverse flag is set, only enough records to satisfy 269 // the requested amount are needed. 270 var level uint8 271 var serialized []byte 272 for !reverse || len(serialized) < int(numToSkip+numRequested)*txEntrySize { 273 curLevelKey := keyForLevel(addrKey, level) 274 levelData := bucket.Get(curLevelKey[:]) 275 if levelData == nil { 276 // Stop when there are no more levels. 277 break 278 } 279 280 // Higher levels contain older transactions, so prepend them. 281 prepended := make([]byte, len(serialized)+len(levelData)) 282 copy(prepended, levelData) 283 copy(prepended[len(levelData):], serialized) 284 serialized = prepended 285 level++ 286 } 287 288 // When the requested number of entries to skip is larger than the 289 // number available, skip them all and return now with the actual number 290 // skipped. 291 numEntries := uint32(len(serialized) / txEntrySize) 292 if numToSkip >= numEntries { 293 return nil, numEntries, nil 294 } 295 296 // Nothing more to do when there are no requested entries. 297 if numRequested == 0 { 298 return nil, numToSkip, nil 299 } 300 301 // Limit the number to load based on the number of available entries, 302 // the number to skip, and the number requested. 303 numToLoad := numEntries - numToSkip 304 if numToLoad > numRequested { 305 numToLoad = numRequested 306 } 307 308 // Start the offset after all skipped entries and load the calculated 309 // number. 310 results := make([]TxIndexEntry, numToLoad) 311 for i := uint32(0); i < numToLoad; i++ { 312 // Calculate the read offset according to the reverse flag. 313 var offset uint32 314 if reverse { 315 offset = (numEntries - numToSkip - i - 1) * txEntrySize 316 } else { 317 offset = (numToSkip + i) * txEntrySize 318 } 319 320 // Deserialize and populate the result. 321 err := deserializeAddrIndexEntry(serialized[offset:], 322 &results[i], fetchBlockHash) 323 if err != nil { 324 // Ensure any deserialization errors are returned as 325 // database corruption errors. 326 if isDeserializeErr(err) { 327 err = database.Error{ 328 ErrorCode: database.ErrCorruption, 329 Description: fmt.Sprintf("failed to "+ 330 "deserialized address index "+ 331 "for key %x: %v", addrKey, err), 332 } 333 } 334 335 return nil, 0, err 336 } 337 } 338 339 return results, numToSkip, nil 340 } 341 342 // minEntriesToReachLevel returns the minimum number of entries that are 343 // required to reach the given address index level. 344 func minEntriesToReachLevel(level uint8) int { 345 maxEntriesForLevel := level0MaxEntries 346 minRequired := 1 347 for l := uint8(1); l <= level; l++ { 348 minRequired += maxEntriesForLevel 349 maxEntriesForLevel *= 2 350 } 351 return minRequired 352 } 353 354 // maxEntriesForLevel returns the maximum number of entries allowed for the 355 // given address index level. 356 func maxEntriesForLevel(level uint8) int { 357 numEntries := level0MaxEntries 358 for l := level; l > 0; l-- { 359 numEntries *= 2 360 } 361 return numEntries 362 } 363 364 // dbRemoveAddrIndexEntries removes the specified number of entries from from 365 // the address index for the provided key. An assertion error will be returned 366 // if the count exceeds the total number of entries in the index. 367 func dbRemoveAddrIndexEntries(bucket internalBucket, addrKey [addrKeySize]byte, count int) error { 368 // Nothing to do if no entries are being deleted. 369 if count <= 0 { 370 return nil 371 } 372 373 // Make use of a local map to track pending updates and define a closure 374 // to apply it to the database. This is done in order to reduce the 375 // number of database reads and because there is more than one exit 376 // path that needs to apply the updates. 377 pendingUpdates := make(map[uint8][]byte) 378 applyPending := func() error { 379 for level, data := range pendingUpdates { 380 curLevelKey := keyForLevel(addrKey, level) 381 if len(data) == 0 { 382 err := bucket.Delete(curLevelKey[:]) 383 if err != nil { 384 return err 385 } 386 continue 387 } 388 err := bucket.Put(curLevelKey[:], data) 389 if err != nil { 390 return err 391 } 392 } 393 return nil 394 } 395 396 // Loop forwards through the levels while removing entries until the 397 // specified number has been removed. This will potentially result in 398 // entirely empty lower levels which will be backfilled below. 399 var highestLoadedLevel uint8 400 numRemaining := count 401 for level := uint8(0); numRemaining > 0; level++ { 402 // Load the data for the level from the database. 403 curLevelKey := keyForLevel(addrKey, level) 404 curLevelData := bucket.Get(curLevelKey[:]) 405 if len(curLevelData) == 0 && numRemaining > 0 { 406 return AssertError(fmt.Sprintf("dbRemoveAddrIndexEntries "+ 407 "not enough entries for address key %x to "+ 408 "delete %d entries", addrKey, count)) 409 } 410 pendingUpdates[level] = curLevelData 411 highestLoadedLevel = level 412 413 // Delete the entire level as needed. 414 numEntries := len(curLevelData) / txEntrySize 415 if numRemaining >= numEntries { 416 pendingUpdates[level] = nil 417 numRemaining -= numEntries 418 continue 419 } 420 421 // Remove remaining entries to delete from the level. 422 offsetEnd := len(curLevelData) - (numRemaining * txEntrySize) 423 pendingUpdates[level] = curLevelData[:offsetEnd] 424 break 425 } 426 427 // When all elements in level 0 were not removed there is nothing left 428 // to do other than updating the database. 429 if len(pendingUpdates[0]) != 0 { 430 return applyPending() 431 } 432 433 // At this point there are one or more empty levels before the current 434 // level which need to be backfilled and the current level might have 435 // had some entries deleted from it as well. Since all levels after 436 // level 0 are required to either be empty, half full, or completely 437 // full, the current level must be adjusted accordingly by backfilling 438 // each previous levels in a way which satisfies the requirements. Any 439 // entries that are left are assigned to level 0 after the loop as they 440 // are guaranteed to fit by the logic in the loop. In other words, this 441 // effectively squashes all remaining entries in the current level into 442 // the lowest possible levels while following the level rules. 443 // 444 // Note that the level after the current level might also have entries 445 // and gaps are not allowed, so this also keeps track of the lowest 446 // empty level so the code below knows how far to backfill in case it is 447 // required. 448 lowestEmptyLevel := uint8(255) 449 curLevelData := pendingUpdates[highestLoadedLevel] 450 curLevelMaxEntries := maxEntriesForLevel(highestLoadedLevel) 451 for level := highestLoadedLevel; level > 0; level-- { 452 // When there are not enough entries left in the current level 453 // for the number that would be required to reach it, clear the 454 // the current level which effectively moves them all up to the 455 // previous level on the next iteration. Otherwise, there are 456 // are sufficient entries, so update the current level to 457 // contain as many entries as possible while still leaving 458 // enough remaining entries required to reach the level. 459 numEntries := len(curLevelData) / txEntrySize 460 prevLevelMaxEntries := curLevelMaxEntries / 2 461 minPrevRequired := minEntriesToReachLevel(level - 1) 462 if numEntries < prevLevelMaxEntries+minPrevRequired { 463 lowestEmptyLevel = level 464 pendingUpdates[level] = nil 465 } else { 466 // This level can only be completely full or half full, 467 // so choose the appropriate offset to ensure enough 468 // entries remain to reach the level. 469 var offset int 470 if numEntries-curLevelMaxEntries >= minPrevRequired { 471 offset = curLevelMaxEntries * txEntrySize 472 } else { 473 offset = prevLevelMaxEntries * txEntrySize 474 } 475 pendingUpdates[level] = curLevelData[:offset] 476 curLevelData = curLevelData[offset:] 477 } 478 479 curLevelMaxEntries = prevLevelMaxEntries 480 } 481 pendingUpdates[0] = curLevelData 482 if len(curLevelData) == 0 { 483 lowestEmptyLevel = 0 484 } 485 486 // When the highest loaded level is empty, it's possible the level after 487 // it still has data and thus that data needs to be backfilled as well. 488 for len(pendingUpdates[highestLoadedLevel]) == 0 { 489 // When the next level is empty too, the is no data left to 490 // continue backfilling, so there is nothing left to do. 491 // Otherwise, populate the pending updates map with the newly 492 // loaded data and update the highest loaded level accordingly. 493 level := highestLoadedLevel + 1 494 curLevelKey := keyForLevel(addrKey, level) 495 levelData := bucket.Get(curLevelKey[:]) 496 if len(levelData) == 0 { 497 break 498 } 499 pendingUpdates[level] = levelData 500 highestLoadedLevel = level 501 502 // At this point the highest level is not empty, but it might 503 // be half full. When that is the case, move it up a level to 504 // simplify the code below which backfills all lower levels that 505 // are still empty. This also means the current level will be 506 // empty, so the loop will perform another another iteration to 507 // potentially backfill this level with data from the next one. 508 curLevelMaxEntries := maxEntriesForLevel(level) 509 if len(levelData)/txEntrySize != curLevelMaxEntries { 510 pendingUpdates[level] = nil 511 pendingUpdates[level-1] = levelData 512 level-- 513 curLevelMaxEntries /= 2 514 } 515 516 // Backfill all lower levels that are still empty by iteratively 517 // halfing the data until the lowest empty level is filled. 518 for level > lowestEmptyLevel { 519 offset := (curLevelMaxEntries / 2) * txEntrySize 520 pendingUpdates[level] = levelData[:offset] 521 levelData = levelData[offset:] 522 pendingUpdates[level-1] = levelData 523 level-- 524 curLevelMaxEntries /= 2 525 } 526 527 // The lowest possible empty level is now the highest loaded 528 // level. 529 lowestEmptyLevel = highestLoadedLevel 530 } 531 532 // Apply the pending updates. 533 return applyPending() 534 } 535 536 // addrToKey converts known address types to an addrindex key. An error is 537 // returned for unsupported types. 538 func addrToKey(addr dcrutil.Address, params *chaincfg.Params) ([addrKeySize]byte, error) { 539 switch addr := addr.(type) { 540 case *dcrutil.AddressPubKeyHash: 541 switch addr.DSA(params) { 542 case dcrec.STEcdsaSecp256k1: 543 var result [addrKeySize]byte 544 result[0] = addrKeyTypePubKeyHash 545 copy(result[1:], addr.Hash160()[:]) 546 return result, nil 547 case dcrec.STEd25519: 548 var result [addrKeySize]byte 549 result[0] = addrKeyTypePubKeyHashEdwards 550 copy(result[1:], addr.Hash160()[:]) 551 return result, nil 552 case dcrec.STSchnorrSecp256k1: 553 var result [addrKeySize]byte 554 result[0] = addrKeyTypePubKeyHashSchnorr 555 copy(result[1:], addr.Hash160()[:]) 556 return result, nil 557 } 558 559 case *dcrutil.AddressScriptHash: 560 var result [addrKeySize]byte 561 result[0] = addrKeyTypeScriptHash 562 copy(result[1:], addr.Hash160()[:]) 563 return result, nil 564 565 case *dcrutil.AddressSecpPubKey: 566 var result [addrKeySize]byte 567 result[0] = addrKeyTypePubKeyHash 568 copy(result[1:], addr.AddressPubKeyHash().Hash160()[:]) 569 return result, nil 570 571 case *dcrutil.AddressEdwardsPubKey: 572 var result [addrKeySize]byte 573 result[0] = addrKeyTypePubKeyHashEdwards 574 copy(result[1:], addr.AddressPubKeyHash().Hash160()[:]) 575 return result, nil 576 577 case *dcrutil.AddressSecSchnorrPubKey: 578 var result [addrKeySize]byte 579 result[0] = addrKeyTypePubKeyHashSchnorr 580 copy(result[1:], addr.AddressPubKeyHash().Hash160()[:]) 581 return result, nil 582 } 583 584 return [addrKeySize]byte{}, errUnsupportedAddressType 585 } 586 587 // AddrIndex implements a transaction by address index. That is to say, it 588 // supports querying all transactions that reference a given address because 589 // they are either crediting or debiting the address. The returned transactions 590 // are ordered according to their order of appearance in the blockchain. In 591 // other words, first by block height and then by offset inside the block. 592 // 593 // In addition, support is provided for a memory-only index of unconfirmed 594 // transactions such as those which are kept in the memory pool before inclusion 595 // in a block. 596 type AddrIndex struct { 597 // The following fields are set when the instance is created and can't 598 // be changed afterwards, so there is no need to protect them with a 599 // separate mutex. 600 db database.DB 601 chainParams *chaincfg.Params 602 603 // The following fields are used to quickly link transactions and 604 // addresses that have not been included into a block yet when an 605 // address index is being maintained. The are protected by the 606 // unconfirmedLock field. 607 // 608 // The txnsByAddr field is used to keep an index of all transactions 609 // which either create an output to a given address or spend from a 610 // previous output to it keyed by the address. 611 // 612 // The addrsByTx field is essentially the reverse and is used to 613 // keep an index of all addresses which a given transaction involves. 614 // This allows fairly efficient updates when transactions are removed 615 // once they are included into a block. 616 unconfirmedLock sync.RWMutex 617 txnsByAddr map[[addrKeySize]byte]map[chainhash.Hash]*dcrutil.Tx 618 addrsByTx map[chainhash.Hash]map[[addrKeySize]byte]struct{} 619 } 620 621 // Ensure the AddrIndex type implements the Indexer interface. 622 var _ Indexer = (*AddrIndex)(nil) 623 624 // Ensure the AddrIndex type implements the NeedsInputser interface. 625 var _ NeedsInputser = (*AddrIndex)(nil) 626 627 // NeedsInputs signals that the index requires the referenced inputs in order 628 // to properly create the index. 629 // 630 // This implements the NeedsInputser interface. 631 func (idx *AddrIndex) NeedsInputs() bool { 632 return true 633 } 634 635 // Init is only provided to satisfy the Indexer interface as there is nothing to 636 // initialize for this index. 637 // 638 // This is part of the Indexer interface. 639 func (idx *AddrIndex) Init() error { 640 // Nothing to do. 641 return nil 642 } 643 644 // Key returns the database key to use for the index as a byte slice. 645 // 646 // This is part of the Indexer interface. 647 func (idx *AddrIndex) Key() []byte { 648 return addrIndexKey 649 } 650 651 // Name returns the human-readable name of the index. 652 // 653 // This is part of the Indexer interface. 654 func (idx *AddrIndex) Name() string { 655 return addrIndexName 656 } 657 658 // Version returns the current version of the index. 659 // 660 // This is part of the Indexer interface. 661 func (idx *AddrIndex) Version() uint32 { 662 return addrIndexVersion 663 } 664 665 // Create is invoked when the indexer manager determines the index needs 666 // to be created for the first time. It creates the bucket for the address 667 // index. 668 // 669 // This is part of the Indexer interface. 670 func (idx *AddrIndex) Create(dbTx database.Tx) error { 671 _, err := dbTx.Metadata().CreateBucket(addrIndexKey) 672 return err 673 } 674 675 // writeIndexData represents the address index data to be written for one block. 676 // It consists of the address mapped to an ordered list of the transactions 677 // that involve the address in block. It is ordered so the transactions can be 678 // stored in the order they appear in the block. 679 type writeIndexData map[[addrKeySize]byte][]int 680 681 // indexPkScript extracts all standard addresses from the passed public key 682 // script and maps each of them to the associated transaction using the passed 683 // map. 684 func (idx *AddrIndex) indexPkScript(data writeIndexData, scriptVersion uint16, pkScript []byte, txIdx int, isSStx bool) { 685 // Nothing to index if the script is non-standard or otherwise doesn't 686 // contain any addresses. 687 class, addrs, _, err := txscript.ExtractPkScriptAddrs(scriptVersion, pkScript, 688 idx.chainParams) 689 if err != nil { 690 return 691 } 692 693 if isSStx && class == txscript.NullDataTy { 694 addr, err := stake.AddrFromSStxPkScrCommitment(pkScript, idx.chainParams) 695 if err != nil { 696 return 697 } 698 699 addrs = append(addrs, addr) 700 } 701 702 if len(addrs) == 0 { 703 return 704 } 705 706 for _, addr := range addrs { 707 addrKey, err := addrToKey(addr, idx.chainParams) 708 if err != nil { 709 // Ignore unsupported address types. 710 continue 711 } 712 713 // Avoid inserting the transaction more than once. Since the 714 // transactions are indexed serially any duplicates will be 715 // indexed in a row, so checking the most recent entry for the 716 // address is enough to detect duplicates. 717 indexedTxns := data[addrKey] 718 numTxns := len(indexedTxns) 719 if numTxns > 0 && indexedTxns[numTxns-1] == txIdx { 720 continue 721 } 722 indexedTxns = append(indexedTxns, txIdx) 723 data[addrKey] = indexedTxns 724 } 725 } 726 727 // indexBlock extracts all of the standard addresses from all of the 728 // regular and stake transactions in the passed block and maps each of them to 729 // the associated transaction using the passed map. 730 func (idx *AddrIndex) indexBlock(data writeIndexData, block *dcrutil.Block, view *blockchain.UtxoViewpoint) { 731 regularTxns := block.Transactions() 732 for txIdx, tx := range regularTxns { 733 // Coinbases do not reference any inputs. Since the block is 734 // required to have already gone through full validation, it has 735 // already been proven that the first transaction in the block 736 // is a coinbase. 737 if txIdx != 0 { 738 for _, txIn := range tx.MsgTx().TxIn { 739 // The view should always have the input since 740 // the index contract requires it, however, be 741 // safe and simply ignore any missing entries. 742 origin := &txIn.PreviousOutPoint 743 entry := view.LookupEntry(&origin.Hash) 744 if entry == nil { 745 log.Warnf("Missing input %v for tx %v while "+ 746 "indexing block %v (height %v)\n", origin.Hash, 747 tx.Hash(), block.Hash(), block.Height()) 748 continue 749 } 750 751 version := entry.ScriptVersionByIndex(origin.Index) 752 pkScript := entry.PkScriptByIndex(origin.Index) 753 txType := entry.TransactionType() 754 idx.indexPkScript(data, version, pkScript, txIdx, 755 txType == stake.TxTypeSStx) 756 } 757 } 758 759 for _, txOut := range tx.MsgTx().TxOut { 760 idx.indexPkScript(data, txOut.Version, txOut.PkScript, txIdx, 761 false) 762 } 763 } 764 765 for txIdx, tx := range block.STransactions() { 766 msgTx := tx.MsgTx() 767 thisTxOffset := txIdx + len(regularTxns) 768 769 isSSGen := stake.IsSSGen(msgTx) 770 for i, txIn := range msgTx.TxIn { 771 // Skip stakebases. 772 if isSSGen && i == 0 { 773 continue 774 } 775 776 // The view should always have the input since 777 // the index contract requires it, however, be 778 // safe and simply ignore any missing entries. 779 origin := &txIn.PreviousOutPoint 780 entry := view.LookupEntry(&origin.Hash) 781 if entry == nil { 782 log.Warnf("Missing input %v for tx %v while "+ 783 "indexing block %v (height %v)\n", origin.Hash, 784 tx.Hash(), block.Hash(), block.Height()) 785 continue 786 } 787 788 version := entry.ScriptVersionByIndex(origin.Index) 789 pkScript := entry.PkScriptByIndex(origin.Index) 790 txType := entry.TransactionType() 791 idx.indexPkScript(data, version, pkScript, thisTxOffset, 792 txType == stake.TxTypeSStx) 793 } 794 795 isSStx := stake.IsSStx(msgTx) 796 for _, txOut := range msgTx.TxOut { 797 idx.indexPkScript(data, txOut.Version, txOut.PkScript, 798 thisTxOffset, isSStx) 799 } 800 } 801 } 802 803 // ConnectBlock is invoked by the index manager when a new block has been 804 // connected to the main chain. This indexer adds a mapping for each address 805 // the transactions in the block involve. 806 // 807 // This is part of the Indexer interface. 808 func (idx *AddrIndex) ConnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { 809 // NOTE: The fact that the block can disapprove the regular tree of the 810 // previous block is ignored for this index because even though the 811 // disapproved transactions no longer apply spend semantics, they still 812 // exist within the block and thus have to be processed before the next 813 // block disapproves them. 814 815 // The offset and length of the transactions within the serialized block. 816 txLocs, stakeTxLocs, err := block.TxLoc() 817 if err != nil { 818 return err 819 } 820 821 // Get the internal block ID associated with the block. 822 blockID, err := dbFetchBlockIDByHash(dbTx, block.Hash()) 823 if err != nil { 824 return err 825 } 826 827 // Build all of the address to transaction mappings in a local map. 828 addrsToTxns := make(writeIndexData) 829 idx.indexBlock(addrsToTxns, block, view) 830 831 // Add all of the index entries for each address. 832 stakeIdxsStart := len(txLocs) 833 addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey) 834 for addrKey, txIdxs := range addrsToTxns { 835 for _, txIdx := range txIdxs { 836 // Adjust the block index and slice of transaction locations to use 837 // based on the regular or stake tree. 838 txLocations := txLocs 839 blockIndex := txIdx 840 if txIdx >= stakeIdxsStart { 841 txLocations = stakeTxLocs 842 blockIndex -= stakeIdxsStart 843 } 844 845 err := dbPutAddrIndexEntry(addrIdxBucket, addrKey, blockID, 846 txLocations[blockIndex], uint32(blockIndex)) 847 if err != nil { 848 return err 849 } 850 } 851 } 852 853 return nil 854 } 855 856 // DisconnectBlock is invoked by the index manager when a block has been 857 // disconnected from the main chain. This indexer removes the address mappings 858 // each transaction in the block involve. 859 // 860 // This is part of the Indexer interface. 861 func (idx *AddrIndex) DisconnectBlock(dbTx database.Tx, block, parent *dcrutil.Block, view *blockchain.UtxoViewpoint) error { 862 // NOTE: The fact that the block can disapprove the regular tree of the 863 // previous block is ignored for this index because even though the 864 // disapproved transactions no longer apply spend semantics, they still 865 // exist within the block and thus have to be processed before the next 866 // block disapproves them. 867 868 // Build all of the address to transaction mappings in a local map. 869 addrsToTxns := make(writeIndexData) 870 idx.indexBlock(addrsToTxns, block, view) 871 872 // Remove all of the index entries for each address. 873 bucket := dbTx.Metadata().Bucket(addrIndexKey) 874 for addrKey, txIdxs := range addrsToTxns { 875 err := dbRemoveAddrIndexEntries(bucket, addrKey, len(txIdxs)) 876 if err != nil { 877 return err 878 } 879 } 880 881 return nil 882 } 883 884 // EntriesForAddress returns a slice of details which identify each transaction, 885 // including a block region, that involves the passed address according to the 886 // specified number to skip, number requested, and whether or not the results 887 // should be reversed. It also returns the number actually skipped since it 888 // could be less in the case where there are not enough entries. 889 // 890 // NOTE: These results only include transactions confirmed in blocks. See the 891 // UnconfirmedTxnsForAddress method for obtaining unconfirmed transactions 892 // that involve a given address. 893 // 894 // This function is safe for concurrent access. 895 func (idx *AddrIndex) EntriesForAddress(dbTx database.Tx, addr dcrutil.Address, numToSkip, numRequested uint32, reverse bool) ([]TxIndexEntry, uint32, error) { 896 addrKey, err := addrToKey(addr, idx.chainParams) 897 if err != nil { 898 return nil, 0, err 899 } 900 901 var entries []TxIndexEntry 902 var skipped uint32 903 err = idx.db.View(func(dbTx database.Tx) error { 904 // Create closure to lookup the block hash given the ID using 905 // the database transaction. 906 fetchBlockHash := func(id []byte) (*chainhash.Hash, error) { 907 // Deserialize and populate the result. 908 return dbFetchBlockHashBySerializedID(dbTx, id) 909 } 910 911 var err error 912 addrIdxBucket := dbTx.Metadata().Bucket(addrIndexKey) 913 entries, skipped, err = dbFetchAddrIndexEntries(addrIdxBucket, 914 addrKey, numToSkip, numRequested, reverse, 915 fetchBlockHash) 916 return err 917 }) 918 919 return entries, skipped, err 920 } 921 922 // indexUnconfirmedAddresses modifies the unconfirmed (memory-only) address 923 // index to include mappings for the addresses encoded by the passed public key 924 // script to the transaction. 925 // 926 // This function is safe for concurrent access. 927 func (idx *AddrIndex) indexUnconfirmedAddresses(scriptVersion uint16, pkScript []byte, tx *dcrutil.Tx, isSStx bool) { 928 // The error is ignored here since the only reason it can fail is if the 929 // script fails to parse and it was already validated before being 930 // admitted to the mempool. 931 class, addresses, _, _ := txscript.ExtractPkScriptAddrs(scriptVersion, 932 pkScript, idx.chainParams) 933 934 if isSStx && class == txscript.NullDataTy { 935 addr, err := stake.AddrFromSStxPkScrCommitment(pkScript, idx.chainParams) 936 if err != nil { 937 // Fail if this fails to decode. It should. 938 return 939 } 940 941 addresses = append(addresses, addr) 942 } 943 944 for _, addr := range addresses { 945 // Ignore unsupported address types. 946 addrKey, err := addrToKey(addr, idx.chainParams) 947 if err != nil { 948 continue 949 } 950 951 // Add a mapping from the address to the transaction. 952 idx.unconfirmedLock.Lock() 953 addrIndexEntry := idx.txnsByAddr[addrKey] 954 if addrIndexEntry == nil { 955 addrIndexEntry = make(map[chainhash.Hash]*dcrutil.Tx) 956 idx.txnsByAddr[addrKey] = addrIndexEntry 957 } 958 addrIndexEntry[*tx.Hash()] = tx 959 960 // Add a mapping from the transaction to the address. 961 addrsByTxEntry := idx.addrsByTx[*tx.Hash()] 962 if addrsByTxEntry == nil { 963 addrsByTxEntry = make(map[[addrKeySize]byte]struct{}) 964 idx.addrsByTx[*tx.Hash()] = addrsByTxEntry 965 } 966 addrsByTxEntry[addrKey] = struct{}{} 967 idx.unconfirmedLock.Unlock() 968 } 969 } 970 971 // AddUnconfirmedTx adds all addresses related to the transaction to the 972 // unconfirmed (memory-only) address index. 973 // 974 // NOTE: This transaction MUST have already been validated by the memory pool 975 // before calling this function with it and have all of the inputs available in 976 // the provided utxo view. Failure to do so could result in some or all 977 // addresses not being indexed. 978 // 979 // This function is safe for concurrent access. 980 func (idx *AddrIndex) AddUnconfirmedTx(tx *dcrutil.Tx, utxoView *blockchain.UtxoViewpoint) { 981 // Index addresses of all referenced previous transaction outputs. 982 // 983 // The existence checks are elided since this is only called after the 984 // transaction has already been validated and thus all inputs are 985 // already known to exist. 986 msgTx := tx.MsgTx() 987 isSSGen := stake.IsSSGen(msgTx) 988 for i, txIn := range msgTx.TxIn { 989 // Skip stakebase. 990 if i == 0 && isSSGen { 991 continue 992 } 993 994 entry := utxoView.LookupEntry(&txIn.PreviousOutPoint.Hash) 995 if entry == nil { 996 // Ignore missing entries. This should never happen 997 // in practice since the function comments specifically 998 // call out all inputs must be available. 999 continue 1000 } 1001 version := entry.ScriptVersionByIndex(txIn.PreviousOutPoint.Index) 1002 pkScript := entry.PkScriptByIndex(txIn.PreviousOutPoint.Index) 1003 txType := entry.TransactionType() 1004 idx.indexUnconfirmedAddresses(version, pkScript, tx, 1005 txType == stake.TxTypeSStx) 1006 } 1007 1008 // Index addresses of all created outputs. 1009 isSStx := stake.IsSStx(msgTx) 1010 for _, txOut := range msgTx.TxOut { 1011 idx.indexUnconfirmedAddresses(txOut.Version, txOut.PkScript, tx, 1012 isSStx) 1013 } 1014 } 1015 1016 // RemoveUnconfirmedTx removes the passed transaction from the unconfirmed 1017 // (memory-only) address index. 1018 // 1019 // This function is safe for concurrent access. 1020 func (idx *AddrIndex) RemoveUnconfirmedTx(hash *chainhash.Hash) { 1021 idx.unconfirmedLock.Lock() 1022 defer idx.unconfirmedLock.Unlock() 1023 1024 // Remove all address references to the transaction from the address 1025 // index and remove the entry for the address altogether if it no longer 1026 // references any transactions. 1027 for addrKey := range idx.addrsByTx[*hash] { 1028 delete(idx.txnsByAddr[addrKey], *hash) 1029 if len(idx.txnsByAddr[addrKey]) == 0 { 1030 delete(idx.txnsByAddr, addrKey) 1031 } 1032 } 1033 1034 // Remove the entry from the transaction to address lookup map as well. 1035 delete(idx.addrsByTx, *hash) 1036 } 1037 1038 // UnconfirmedTxnsForAddress returns all transactions currently in the 1039 // unconfirmed (memory-only) address index that involve the passed address. 1040 // Unsupported address types are ignored and will result in no results. 1041 // 1042 // This function is safe for concurrent access. 1043 func (idx *AddrIndex) UnconfirmedTxnsForAddress(addr dcrutil.Address) []*dcrutil.Tx { 1044 // Ignore unsupported address types. 1045 addrKey, err := addrToKey(addr, idx.chainParams) 1046 if err != nil { 1047 return nil 1048 } 1049 1050 // Protect concurrent access. 1051 idx.unconfirmedLock.RLock() 1052 defer idx.unconfirmedLock.RUnlock() 1053 1054 // Return a new slice with the results if there are any. This ensures 1055 // safe concurrency. 1056 if txns, exists := idx.txnsByAddr[addrKey]; exists { 1057 addressTxns := make([]*dcrutil.Tx, 0, len(txns)) 1058 for _, tx := range txns { 1059 addressTxns = append(addressTxns, tx) 1060 } 1061 return addressTxns 1062 } 1063 1064 return nil 1065 } 1066 1067 // NewAddrIndex returns a new instance of an indexer that is used to create a 1068 // mapping of all addresses in the blockchain to the respective transactions 1069 // that involve them. 1070 // 1071 // It implements the Indexer interface which plugs into the IndexManager that in 1072 // turn is used by the blockchain package. This allows the index to be 1073 // seamlessly maintained along with the chain. 1074 func NewAddrIndex(db database.DB, chainParams *chaincfg.Params) *AddrIndex { 1075 return &AddrIndex{ 1076 db: db, 1077 chainParams: chainParams, 1078 txnsByAddr: make(map[[addrKeySize]byte]map[chainhash.Hash]*dcrutil.Tx), 1079 addrsByTx: make(map[chainhash.Hash]map[[addrKeySize]byte]struct{}), 1080 } 1081 } 1082 1083 // DropAddrIndex drops the address index from the provided database if it 1084 // exists. 1085 func DropAddrIndex(db database.DB, interrupt <-chan struct{}) error { 1086 return dropFlatIndex(db, addrIndexKey, addrIndexName, interrupt) 1087 } 1088 1089 // DropIndex drops the address index from the provided database if it exists. 1090 func (*AddrIndex) DropIndex(db database.DB, interrupt <-chan struct{}) error { 1091 return DropAddrIndex(db, interrupt) 1092 }