github.com/decred/dcrlnd@v0.7.6/chainntnfs/chainscannotify/csnotify.go (about) 1 package csnotify 2 3 import ( 4 "context" 5 "errors" 6 "fmt" 7 "sync" 8 "sync/atomic" 9 "time" 10 11 "github.com/decred/dcrd/chaincfg/chainhash" 12 "github.com/decred/dcrd/chaincfg/v3" 13 "github.com/decred/dcrd/dcrutil/v4" 14 "github.com/decred/dcrd/gcs/v4" 15 "github.com/decred/dcrd/wire" 16 "github.com/decred/dcrlnd/chainntnfs" 17 "github.com/decred/dcrlnd/chainscan" 18 "github.com/decred/dcrlnd/queue" 19 ) 20 21 var ( 22 // ErrChainNotifierShuttingDown is used when we are trying to 23 // measure a spend notification when notifier is already stopped. 24 ErrChainNotifierShuttingDown = errors.New("chainntnfs: system interrupt " + 25 "while attempting to register for spend notification") 26 ) 27 28 type ChainSource interface { 29 GetBlock(context.Context, *chainhash.Hash) (*wire.MsgBlock, error) 30 CurrentTip(context.Context) (*chainhash.Hash, int32, error) 31 32 ChainEvents(context.Context) <-chan chainscan.ChainEvent 33 34 GetCFilter(context.Context, int32) (*chainhash.Hash, [16]byte, *gcs.FilterV2, error) 35 36 GetBlockHash(context.Context, int32) (*chainhash.Hash, error) 37 GetBlockHeader(context.Context, *chainhash.Hash) (*wire.BlockHeader, error) 38 StoresReorgedHeaders() bool 39 40 Run(context.Context) error 41 } 42 43 // chainConn adapts a ChainSource (plus a context) into a chainntf.ChainConn 44 type chainConn struct { 45 c ChainSource 46 ctx context.Context 47 storesReorgs bool 48 } 49 50 func (c *chainConn) GetBlockHash(height int64) (*chainhash.Hash, error) { 51 return c.c.GetBlockHash(c.ctx, int32(height)) 52 } 53 54 func (c *chainConn) GetBlockHeader(hash *chainhash.Hash) (*wire.BlockHeader, error) { 55 return c.c.GetBlockHeader(c.ctx, hash) 56 } 57 58 // ChainscanNotifier implements the ChainNotifier interface by using the 59 // chainscan package components. Multiple concurrent clients are supported. 60 // All notifications are achieved via non-blocking sends on client channels. 61 // 62 // NOTE: This assumes for the moment the backing chain source is a dcrwallet 63 // instance (either embedded or remote) so it makes assumptions about the kinds 64 // of things the wallet stores and how it behaves. 65 type ChainscanNotifier struct { 66 epochClientCounter uint64 // To be used atomically. 67 68 start sync.Once 69 active int32 // To be used atomically. 70 stopped int32 // To be used atomically. 71 72 ctx context.Context 73 cancelCtx func() 74 75 tipTxsMtx sync.Mutex 76 tipWatcherTxs map[chainhash.Hash]map[*wire.MsgTx]chainscan.Event 77 78 historical *chainscan.Historical 79 tipWatcher *chainscan.TipWatcher 80 chainConn *chainConn 81 chainEvents <-chan chainscan.ChainEvent 82 83 chainParams *chaincfg.Params 84 85 notificationCancels chan interface{} 86 notificationRegistry chan interface{} 87 88 txNotifier *chainntnfs.TxNotifier 89 90 blockEpochClients map[uint64]*blockEpochRegistration 91 92 bestBlock chainntnfs.BlockEpoch 93 94 chainUpdates chan *filteredBlock 95 96 // spendHintCache is a cache used to query and update the latest height 97 // hints for an outpoint. Each height hint represents the earliest 98 // height at which the outpoint could have been spent within the chain. 99 spendHintCache chainntnfs.SpendHintCache 100 101 // confirmHintCache is a cache used to query the latest height hints for 102 // a transaction. Each height hint represents the earliest height at 103 // which the transaction could have confirmed within the chain. 104 confirmHintCache chainntnfs.ConfirmHintCache 105 106 wg sync.WaitGroup 107 quit chan struct{} 108 } 109 110 // Ensure ChainscanNotifier implements the ChainNotifier interface at compile time. 111 var _ chainntnfs.ChainNotifier = (*ChainscanNotifier)(nil) 112 113 // New returns a new ChainscanNotifier instance. This function assumes the dcrd node 114 // detailed in the passed configuration is already running, and willing to 115 // accept new websockets clients. 116 func New(chainSrc ChainSource, 117 chainParams *chaincfg.Params, spendHintCache chainntnfs.SpendHintCache, 118 confirmHintCache chainntnfs.ConfirmHintCache) (*ChainscanNotifier, error) { 119 120 ctx, cancel := context.WithCancel(context.Background()) 121 chainConn := &chainConn{ 122 c: chainSrc, 123 ctx: ctx, 124 storesReorgs: chainSrc.StoresReorgedHeaders(), 125 } 126 127 historical := chainscan.NewHistorical(chainSrc) 128 tipWatcher := chainscan.NewTipWatcher(chainSrc) 129 130 notifier := &ChainscanNotifier{ 131 chainParams: chainParams, 132 133 historical: historical, 134 tipWatcher: tipWatcher, 135 chainConn: chainConn, 136 tipWatcherTxs: make(map[chainhash.Hash]map[*wire.MsgTx]chainscan.Event), 137 138 ctx: ctx, 139 cancelCtx: cancel, 140 141 notificationCancels: make(chan interface{}), 142 notificationRegistry: make(chan interface{}), 143 144 blockEpochClients: make(map[uint64]*blockEpochRegistration), 145 146 chainUpdates: make(chan *filteredBlock, 100), 147 148 spendHintCache: spendHintCache, 149 confirmHintCache: confirmHintCache, 150 151 quit: make(chan struct{}), 152 } 153 154 return notifier, nil 155 } 156 157 func runAndLogOnError(ctx context.Context, f func(context.Context) error, name string) { 158 go func() { 159 err := f(ctx) 160 select { 161 case <-ctx.Done(): 162 // Any errs were due to done() so, ok 163 return 164 default: 165 } 166 if err != nil { 167 chainntnfs.Log.Errorf("CSNotify error while running %s: %v", name, err) 168 } 169 }() 170 } 171 172 // Start connects to the running dcrd node over websockets, registers for block 173 // notifications, and finally launches all related helper goroutines. 174 func (n *ChainscanNotifier) Start() error { 175 var startErr error 176 n.start.Do(func() { 177 startErr = n.startNotifier() 178 }) 179 return startErr 180 } 181 182 func (n *ChainscanNotifier) startNotifier() error { 183 chainntnfs.Log.Infof("Starting chainscan notifier") 184 185 runAndLogOnError(n.ctx, n.chainConn.c.Run, "chainConn") 186 runAndLogOnError(n.ctx, n.tipWatcher.Run, "tipWatcher") 187 runAndLogOnError(n.ctx, n.historical.Run, "historical") 188 189 n.chainEvents = n.tipWatcher.ChainEvents(n.ctx) 190 191 currentHash, currentHeight, err := n.chainConn.c.CurrentTip(n.ctx) 192 if err != nil { 193 return err 194 } 195 196 currentHeader, err := n.chainConn.c.GetBlockHeader(n.ctx, currentHash) 197 if err != nil { 198 return err 199 } 200 201 chainntnfs.Log.Debugf("Starting txnotifier at height %d hash %s", 202 currentHeight, currentHash) 203 204 n.txNotifier = chainntnfs.NewTxNotifier( 205 uint32(currentHeight), chainntnfs.ReorgSafetyLimit, 206 n.confirmHintCache, n.spendHintCache, n.chainParams, 207 ) 208 209 n.bestBlock = chainntnfs.BlockEpoch{ 210 Height: currentHeight, 211 Hash: currentHash, 212 BlockHeader: currentHeader, 213 } 214 215 n.wg.Add(2) 216 go n.notificationDispatcher() 217 go n.handleChainEvents() 218 219 // Set the active flag now that we've completed the full startup. 220 atomic.StoreInt32(&n.active, 1) 221 222 return nil 223 } 224 225 // Started returns true if this instance has been started, and false otherwise. 226 func (n *ChainscanNotifier) Started() bool { 227 return atomic.LoadInt32(&n.active) != 0 228 } 229 230 // Stop shutsdown the ChainscanNotifier. 231 func (n *ChainscanNotifier) Stop() error { 232 // Already shutting down? 233 if atomic.AddInt32(&n.stopped, 1) != 1 { 234 return nil 235 } 236 237 chainntnfs.Log.Debug("ChainscanNotifier shutting down") 238 239 // Cancel any outstanding request. 240 n.cancelCtx() 241 242 close(n.quit) 243 n.wg.Wait() 244 245 // Notify all pending clients of our shutdown by closing the related 246 // notification channels. 247 for _, epochClient := range n.blockEpochClients { 248 close(epochClient.cancelChan) 249 epochClient.wg.Wait() 250 251 close(epochClient.epochChan) 252 } 253 n.txNotifier.TearDown() 254 255 chainntnfs.Log.Info("ChainscanNotifier shut down") 256 257 return nil 258 } 259 260 // filteredBlock represents a new block which has been connected to the main 261 // chain. The slice of transactions will only be populated if the block 262 // includes a transaction that confirmed one of our watched txids, or spends 263 // one of the outputs currently being watched. 264 type filteredBlock struct { 265 hash *chainhash.Hash 266 height int32 267 prevHash *chainhash.Hash 268 header *wire.BlockHeader 269 270 txns []*dcrutil.Tx 271 272 // connected is true if this update is a new block and false if it is a 273 // disconnected block. 274 connect bool 275 } 276 277 // foundAtTip is called by the tipWatcher whenever a watched target matches. 278 func (n *ChainscanNotifier) foundAtTip(e chainscan.Event, _ chainscan.FindFunc) { 279 chainntnfs.Log.Tracef("Found at tip bh %s: %s", e.BlockHash, e) 280 n.tipTxsMtx.Lock() 281 txs, ok := n.tipWatcherTxs[e.BlockHash] 282 if !ok { 283 txs = make(map[*wire.MsgTx]chainscan.Event) 284 n.tipWatcherTxs[e.BlockHash] = txs 285 } 286 txs[e.Tx] = e 287 n.tipTxsMtx.Unlock() 288 } 289 290 func (n *ChainscanNotifier) drainTipWatcherTxs(blockHash *chainhash.Hash) []*dcrutil.Tx { 291 n.tipTxsMtx.Lock() 292 txs := n.tipWatcherTxs[*blockHash] 293 utxs := make([]*dcrutil.Tx, 0, len(txs)) 294 for tx, etx := range txs { 295 utx := dcrutil.NewTx(tx) 296 utx.SetTree(etx.Tree) 297 utx.SetIndex(int(etx.TxIndex)) 298 utxs = append(utxs, utx) 299 } 300 301 delete(n.tipWatcherTxs, *blockHash) 302 n.tipTxsMtx.Unlock() 303 return utxs 304 } 305 306 func (n *ChainscanNotifier) handleChainEvents() { 307 defer n.wg.Done() 308 309 for { 310 var e chainscan.ChainEvent 311 select { 312 case <-n.ctx.Done(): 313 return 314 case e = <-n.chainEvents: 315 } 316 317 fb := &filteredBlock{ 318 hash: e.BlockHash(), 319 height: e.BlockHeight(), 320 prevHash: e.PrevBlockHash(), 321 header: e.BlockHeader(), 322 } 323 324 if _, ok := e.(chainscan.BlockConnectedEvent); ok { 325 fb.connect = true 326 fb.txns = n.drainTipWatcherTxs(e.BlockHash()) 327 } 328 329 select { 330 case n.chainUpdates <- fb: 331 case <-n.ctx.Done(): 332 return 333 } 334 } 335 } 336 337 // notificationDispatcher is the primary goroutine which handles client 338 // notification registrations, as well as notification dispatches. 339 func (n *ChainscanNotifier) notificationDispatcher() { 340 out: 341 for { 342 select { 343 case cancelMsg := <-n.notificationCancels: 344 switch msg := cancelMsg.(type) { 345 case *epochCancel: 346 chainntnfs.Log.Infof("Cancelling epoch "+ 347 "notification, epoch_id=%v", msg.epochID) 348 349 // First, we'll lookup the original 350 // registration in order to stop the active 351 // queue goroutine. 352 reg := n.blockEpochClients[msg.epochID] 353 reg.epochQueue.Stop() 354 355 // Next, close the cancel channel for this 356 // specific client, and wait for the client to 357 // exit. 358 close(n.blockEpochClients[msg.epochID].cancelChan) 359 n.blockEpochClients[msg.epochID].wg.Wait() 360 361 // Once the client has exited, we can then 362 // safely close the channel used to send epoch 363 // notifications, in order to notify any 364 // listeners that the intent has been 365 // canceled. 366 close(n.blockEpochClients[msg.epochID].epochChan) 367 delete(n.blockEpochClients, msg.epochID) 368 } 369 370 case registerMsg := <-n.notificationRegistry: 371 switch msg := registerMsg.(type) { 372 case *blockEpochRegistration: 373 chainntnfs.Log.Infof("New block epoch subscription") 374 375 n.blockEpochClients[msg.epochID] = msg 376 377 // If the client did not provide their best 378 // known block, then we'll immediately dispatch 379 // a notification for the current tip. 380 if msg.bestBlock == nil { 381 n.notifyBlockEpochClient( 382 msg, n.bestBlock.Height, 383 n.bestBlock.Hash, 384 n.bestBlock.BlockHeader, 385 ) 386 387 msg.errorChan <- nil 388 continue 389 } 390 391 // Otherwise, we'll attempt to deliver the 392 // backlog of notifications from their best 393 // known block. 394 missedBlocks, err := chainntnfs.GetClientMissedBlocks( 395 n.chainConn, msg.bestBlock, 396 n.bestBlock.Height, n.chainConn.storesReorgs, 397 ) 398 if err != nil { 399 msg.errorChan <- err 400 continue 401 } 402 403 for _, block := range missedBlocks { 404 n.notifyBlockEpochClient( 405 msg, block.Height, block.Hash, block.BlockHeader, 406 ) 407 } 408 409 msg.errorChan <- nil 410 } 411 412 case update := <-n.chainUpdates: 413 if update.connect { 414 if *update.prevHash != *n.bestBlock.Hash { 415 // Handle the case where the notifier 416 // missed some blocks from its chain 417 // backend 418 chainntnfs.Log.Infof("Missed blocks, " + 419 "attempting to catch up") 420 newBestBlock, missedBlocks, err := 421 chainntnfs.HandleMissedBlocks( 422 n.chainConn, 423 n.txNotifier, 424 n.bestBlock, 425 update.height, 426 n.chainConn.storesReorgs, 427 ) 428 if err != nil { 429 // Set the bestBlock here in case 430 // a catch up partially completed. 431 n.bestBlock = newBestBlock 432 chainntnfs.Log.Error(err) 433 continue 434 } 435 436 n.handleMissedBlocks(newBestBlock, missedBlocks) 437 } 438 439 if err := n.handleBlockConnected(update); err != nil { 440 chainntnfs.Log.Error(err) 441 } 442 continue 443 } 444 445 if update.height != n.bestBlock.Height { 446 chainntnfs.Log.Infof("Missed disconnected" + 447 "blocks, attempting to catch up") 448 } 449 450 newBestBlock, err := chainntnfs.RewindChain( 451 n.chainConn, n.txNotifier, n.bestBlock, 452 update.height-1, 453 ) 454 if err != nil { 455 chainntnfs.Log.Errorf("Unable to rewind chain "+ 456 "from height %d to height %d: %v", 457 n.bestBlock.Height, update.height-1, err) 458 } 459 460 // Set the bestBlock here in case a chain rewind 461 // partially completed. 462 n.bestBlock = newBestBlock 463 464 case <-n.quit: 465 break out 466 } 467 } 468 n.wg.Done() 469 } 470 471 func (n *ChainscanNotifier) handleMissedBlocks(newBestBlock chainntnfs.BlockEpoch, missed []chainntnfs.BlockEpoch) { 472 // Track the previous block hash to fill in the data. 473 prevHash := newBestBlock.Hash 474 475 for _, m := range missed { 476 bh, cfkey, filter, err := n.chainConn.c.GetCFilter(n.ctx, m.Height) 477 if err != nil { 478 return 479 } 480 481 if *bh != *m.Hash { 482 chainntnfs.Log.Warnf("Missed block hash (%s) different than "+ 483 "mainchain block hash (%s)", m.Hash, bh) 484 } 485 486 header, err := n.chainConn.c.GetBlockHeader(n.ctx, bh) 487 if err != nil { 488 return 489 } 490 491 e := chainscan.BlockConnectedEvent{ 492 Height: m.Height, 493 Hash: *bh, 494 CFKey: cfkey, 495 Filter: filter, 496 PrevHash: *prevHash, 497 Header: header, 498 } 499 if err := n.tipWatcher.ForceRescan(n.ctx, &e); err != nil { 500 return 501 } 502 503 fb := &filteredBlock{ 504 hash: e.BlockHash(), 505 height: e.BlockHeight(), 506 prevHash: prevHash, 507 txns: n.drainTipWatcherTxs(e.BlockHash()), 508 header: e.BlockHeader(), 509 } 510 prevHash = bh 511 512 n.handleBlockConnected(fb) 513 } 514 } 515 516 // handleBlockConnected applies a chain update for a new block. Any watched 517 // transactions included this block will processed to either send notifications 518 // now or after numConfirmations confs. 519 func (n *ChainscanNotifier) handleBlockConnected(newBlock *filteredBlock) error { 520 // We'll then extend the txNotifier's height with the information of 521 // this new block, which will handle all of the notification logic for 522 // us. 523 newBlockHash := newBlock.hash 524 newBlockHeight := uint32(newBlock.height) 525 err := n.txNotifier.ConnectTip( 526 newBlockHash, newBlockHeight, newBlock.txns, 527 ) 528 if err != nil { 529 return fmt.Errorf("unable to connect tip: %v", err) 530 } 531 532 chainntnfs.Log.Infof("New block: height=%v, hash=%v, txs=%d", newBlockHeight, 533 newBlockHash, len(newBlock.txns)) 534 535 // Now that we've guaranteed the new block extends the txNotifier's 536 // current tip, we'll proceed to dispatch notifications to all of our 537 // registered clients whom have had notifications fulfilled. Before 538 // doing so, we'll make sure update our in memory state in order to 539 // satisfy any client requests based upon the new block. 540 n.bestBlock.Hash = newBlockHash 541 n.bestBlock.Height = int32(newBlockHeight) 542 n.bestBlock.BlockHeader = newBlock.header 543 544 n.notifyBlockEpochs(int32(newBlockHeight), newBlockHash, newBlock.header) 545 546 // Delay spend/confirm notifications until the block epoch ntfn has 547 // (likely) been processed. This helps prevent classes of errors that 548 // happen due to racing the spend/conf ntfn and tracking the current 549 // block height in some subsystems. 550 select { 551 case <-time.After(5 * time.Millisecond): 552 case <-n.quit: 553 } 554 555 return n.txNotifier.NotifyHeight(newBlockHeight) 556 } 557 558 // notifyBlockEpochs notifies all registered block epoch clients of the newly 559 // connected block to the main chain. 560 func (n *ChainscanNotifier) notifyBlockEpochs(newHeight int32, newHash *chainhash.Hash, 561 newHeader *wire.BlockHeader) { 562 563 for _, client := range n.blockEpochClients { 564 n.notifyBlockEpochClient(client, newHeight, newHash, newHeader) 565 } 566 } 567 568 // notifyBlockEpochClient sends a registered block epoch client a notification 569 // about a specific block. 570 func (n *ChainscanNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration, 571 height int32, hash *chainhash.Hash, header *wire.BlockHeader) { 572 573 epoch := &chainntnfs.BlockEpoch{ 574 Height: height, 575 Hash: hash, 576 BlockHeader: header, 577 } 578 579 select { 580 case epochClient.epochQueue.ChanIn() <- epoch: 581 case <-epochClient.cancelChan: 582 case <-n.quit: 583 } 584 } 585 586 // RegisterSpendNtfn registers an intent to be notified once the target 587 // outpoint/output script has been spent by a transaction on-chain. When 588 // intending to be notified of the spend of an output script, a nil outpoint 589 // must be used. The heightHint should represent the earliest height in the 590 // chain of the transaction that spent the outpoint/output script. 591 // 592 // Once a spend of has been detected, the details of the spending event will be 593 // sent across the 'Spend' channel. 594 func (n *ChainscanNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, 595 pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) { 596 597 // Register the conf notification with the TxNotifier. A non-nil value 598 // for `dispatch` will be returned if we are required to perform a 599 // manual scan for the confirmation. Otherwise the notifier will begin 600 // watching at tip for the transaction to confirm. 601 ntfn, err := n.txNotifier.RegisterSpend(outpoint, pkScript, heightHint) 602 if err != nil { 603 return nil, err 604 } 605 606 // Normalize to zero outpoint so we don't trigger panics. 607 if outpoint == nil { 608 outpoint = &chainntnfs.ZeroOutPoint 609 } 610 611 // Set the TipWatcher to scan for spends of this output. 612 var tipTarget, histTarget chainscan.Target 613 scriptVersion := uint16(0) 614 switch { 615 case *outpoint == chainntnfs.ZeroOutPoint: 616 tipTarget = chainscan.SpentScript(scriptVersion, pkScript) 617 histTarget = chainscan.SpentScript(scriptVersion, pkScript) 618 default: 619 tipTarget = chainscan.SpentOutPoint(*outpoint, scriptVersion, pkScript) 620 histTarget = chainscan.SpentOutPoint(*outpoint, scriptVersion, pkScript) 621 } 622 623 startHeightChan := make(chan int32) 624 625 // TODO: handle cancelling this target after it is found and the 626 // txnotifier's reorgSafetyLimit has been reached. 627 n.tipWatcher.Find( 628 tipTarget, 629 chainscan.WithFoundCallback(n.foundAtTip), 630 chainscan.WithStartWatchHeightChan(startHeightChan), 631 // TODO: Add and verify safety of using 632 // WithStartHeight(ntfn.Height) 633 ) 634 635 // Determine when the TipWatcher actually started watching for this tx. 636 var startWatchHeight int32 637 select { 638 case <-n.ctx.Done(): 639 return nil, n.ctx.Err() 640 case startWatchHeight = <-startHeightChan: 641 } 642 643 // We can only exit early if the TipWatcher and the TxNotifier were in 644 // sync. Otherwise we might need to force a historical search to 645 // prevent any missed txs. 646 if ntfn.HistoricalDispatch == nil && uint32(startWatchHeight) <= ntfn.Height { 647 return ntfn.Event, nil 648 } 649 650 if ntfn.HistoricalDispatch == nil { 651 // Ignore the error since problems would have been returned by 652 // RegisterSpend(). 653 spendReq, _ := chainntnfs.NewSpendRequest(outpoint, pkScript) 654 655 // The TipWatcher and txnotifier were out of sync, so even 656 // though originally a historical search was not needed, we 657 // force one to ensure there are no gaps in our search history. 658 ntfn.HistoricalDispatch = &chainntnfs.HistoricalSpendDispatch{ 659 SpendRequest: spendReq, 660 StartHeight: ntfn.Height, 661 EndHeight: uint32(startWatchHeight), 662 } 663 chainntnfs.Log.Infof("Forcing historical search for %s between %d and %d", 664 spendReq, ntfn.HistoricalDispatch.StartHeight, 665 ntfn.HistoricalDispatch.EndHeight) 666 } else if uint32(startWatchHeight) > ntfn.HistoricalDispatch.EndHeight { 667 // We started watching after the txnotifier's currentHeight, so 668 // update the historical search to include the extra blocks. 669 chainntnfs.Log.Infof("Modifying historical search EndHeight "+ 670 "for %s from %d to %d", ntfn.HistoricalDispatch.SpendRequest, 671 ntfn.HistoricalDispatch.EndHeight, 672 startWatchHeight) 673 ntfn.HistoricalDispatch.EndHeight = uint32(startWatchHeight) 674 } 675 676 // Handle a historical scan in a goroutine. 677 go func() { 678 var details *chainntnfs.SpendDetail 679 completeChan := make(chan struct{}) 680 cancelChan := make(chan struct{}) 681 foundCb := func(e chainscan.Event, _ chainscan.FindFunc) { 682 details = &chainntnfs.SpendDetail{ 683 SpentOutPoint: outpoint, 684 SpenderTxHash: e.Tx.CachedTxHash(), 685 SpendingTx: e.Tx, 686 SpenderInputIndex: uint32(e.Index), 687 SpendingHeight: e.BlockHeight, 688 } 689 close(cancelChan) 690 } 691 hist := ntfn.HistoricalDispatch 692 n.historical.Find( 693 histTarget, 694 chainscan.WithFoundCallback(foundCb), 695 chainscan.WithCompleteChan(completeChan), 696 chainscan.WithCancelChan(cancelChan), 697 chainscan.WithStartHeight(int32(hist.StartHeight)), 698 chainscan.WithEndHeight(int32(hist.EndHeight)), 699 // TODO: force to notify during the historical only if 700 // the confirmation was approved or on the last block? 701 // This is to handle 702 // https://github.com/decred/dcrlnd/issues/69 703 ) 704 705 select { 706 case <-n.quit: 707 close(cancelChan) 708 return 709 case <-completeChan: 710 case <-cancelChan: 711 } 712 713 // We will invoke UpdateConfDetails even if none were found. 714 // This allows the notifier to begin safely updating the height 715 // hint cache at tip, since any pending rescans have now 716 // completed. 717 err = n.txNotifier.UpdateSpendDetails(hist.SpendRequest, details) 718 if err != nil { 719 chainntnfs.Log.Error(err) 720 } 721 }() 722 723 return ntfn.Event, nil 724 } 725 726 // RegisterConfirmationsNtfn registers an intent to be notified once the target 727 // txid/output script has reached numConfs confirmations on-chain. When 728 // intending to be notified of the confirmation of an output script, a nil txid 729 // must be used. The heightHint should represent the earliest height at which 730 // the txid/output script could have been included in the chain. 731 // 732 // Progress on the number of confirmations left can be read from the 'Updates' 733 // channel. Once it has reached all of its confirmations, a notification will be 734 // sent across the 'Confirmed' channel. 735 func (n *ChainscanNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, 736 pkScript []byte, 737 numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) { 738 739 // Register the conf notification with the TxNotifier. A non-nil value 740 // for `dispatch` will be returned if we are required to perform a 741 // manual scan for the confirmation. Otherwise the notifier will begin 742 // watching at tip for the transaction to confirm. 743 ntfn, err := n.txNotifier.RegisterConf( 744 txid, pkScript, numConfs, heightHint, 745 ) 746 if err != nil { 747 return nil, err 748 } 749 750 // Set the TipWatcher to scan for this confirmation. 751 var tipTarget, histTarget chainscan.Target 752 scriptVersion := uint16(0) 753 switch { 754 case txid == nil || *txid == chainntnfs.ZeroHash: 755 tipTarget = chainscan.ConfirmedScript(scriptVersion, pkScript) 756 histTarget = chainscan.ConfirmedScript(scriptVersion, pkScript) 757 default: 758 tipTarget = chainscan.ConfirmedTransaction(*txid, scriptVersion, pkScript) 759 histTarget = chainscan.ConfirmedTransaction(*txid, scriptVersion, pkScript) 760 } 761 762 startHeightChan := make(chan int32) 763 764 // TODO: handle cancelling this target after it is found and the 765 // txnotifier's reorgSafetyLimit has been reached. 766 n.tipWatcher.Find( 767 tipTarget, 768 chainscan.WithFoundCallback(n.foundAtTip), 769 chainscan.WithStartWatchHeightChan(startHeightChan), 770 // TODO: Add and verify safety of using 771 // WithStartHeight(ntfn.Height) 772 ) 773 774 // Determine when the TipWatcher actually started watching for this tx. 775 var startWatchHeight int32 776 select { 777 case <-n.ctx.Done(): 778 return nil, n.ctx.Err() 779 case startWatchHeight = <-startHeightChan: 780 } 781 782 // We can only exit early if the TipWatcher and the TxNotifier were in 783 // sync. Otherwise we might need to force a historical search to 784 // prevent any missed txs. 785 if ntfn.HistoricalDispatch == nil && uint32(startWatchHeight) <= ntfn.Height { 786 return ntfn.Event, nil 787 } 788 789 if ntfn.HistoricalDispatch == nil { 790 // Ignore errors since they would've been triggered by 791 // RegisterConf() above. 792 confReq, _ := chainntnfs.NewConfRequest(txid, pkScript) 793 794 // The TipWatcher and txnotifier were out of sync, so even 795 // though originally a historical search was not needed, we 796 // force one to ensure there are no gaps in our search history. 797 ntfn.HistoricalDispatch = &chainntnfs.HistoricalConfDispatch{ 798 ConfRequest: confReq, 799 StartHeight: ntfn.Height, 800 EndHeight: uint32(startWatchHeight), 801 } 802 chainntnfs.Log.Infof("Forcing historical search for %s between %d and %d", 803 confReq, ntfn.HistoricalDispatch.StartHeight, 804 ntfn.HistoricalDispatch.EndHeight) 805 } else if uint32(startWatchHeight) > ntfn.HistoricalDispatch.EndHeight { 806 // We started watching after the txnotifier's currentHeight, so 807 // update the historical search to include the extra blocks. 808 chainntnfs.Log.Infof("Modifying historical search EndHeight "+ 809 "for %s from %d to %d", ntfn.HistoricalDispatch.ConfRequest, 810 ntfn.HistoricalDispatch.EndHeight, 811 startWatchHeight) 812 ntfn.HistoricalDispatch.EndHeight = uint32(startWatchHeight) 813 } 814 815 // Handle a historical scan in a goroutine. 816 go func() { 817 var txconf *chainntnfs.TxConfirmation 818 completeChan := make(chan struct{}) 819 cancelChan := make(chan struct{}) 820 foundCb := func(e chainscan.Event, _ chainscan.FindFunc) { 821 txconf = &chainntnfs.TxConfirmation{ 822 Tx: e.Tx, 823 BlockHash: &e.BlockHash, 824 BlockHeight: uint32(e.BlockHeight), 825 TxIndex: uint32(e.TxIndex), 826 } 827 close(cancelChan) 828 } 829 hist := ntfn.HistoricalDispatch 830 n.historical.Find( 831 histTarget, 832 chainscan.WithFoundCallback(foundCb), 833 chainscan.WithCompleteChan(completeChan), 834 chainscan.WithCancelChan(cancelChan), 835 chainscan.WithStartHeight(int32(hist.StartHeight)), 836 chainscan.WithEndHeight(int32(hist.EndHeight)), 837 // TODO: force to notify during the historical only if 838 // the confirmation was approved or on the last block? 839 // This is to handle 840 // https://github.com/decred/dcrlnd/issues/69 841 ) 842 843 select { 844 case <-n.quit: 845 close(cancelChan) 846 return 847 case <-cancelChan: 848 case <-completeChan: 849 } 850 851 // We will invoke UpdateConfDetails even if none were found. 852 // This allows the notifier to begin safely updating the height 853 // hint cache at tip, since any pending rescans have now 854 // completed. 855 err := n.txNotifier.UpdateConfDetails( 856 hist.ConfRequest, txconf, 857 ) 858 if err != nil { 859 chainntnfs.Log.Error(err) 860 } 861 }() 862 863 return ntfn.Event, nil 864 } 865 866 // blockEpochRegistration represents a client's intent to receive a 867 // notification with each newly connected block. 868 type blockEpochRegistration struct { 869 epochID uint64 870 871 epochChan chan *chainntnfs.BlockEpoch 872 873 epochQueue *queue.ConcurrentQueue 874 875 bestBlock *chainntnfs.BlockEpoch 876 877 errorChan chan error 878 879 cancelChan chan struct{} 880 881 wg sync.WaitGroup 882 } 883 884 // epochCancel is a message sent to the ChainscanNotifier when a client wishes to 885 // cancel an outstanding epoch notification that has yet to be dispatched. 886 type epochCancel struct { 887 epochID uint64 888 } 889 890 // RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the 891 // caller to receive notifications, of each new block connected to the main 892 // chain. Clients have the option of passing in their best known block, which 893 // the notifier uses to check if they are behind on blocks and catch them up. 894 // If they do not provide one, then a notification will be dispatched 895 // immediately for the current tip of the chain upon a successful registration. 896 func (n *ChainscanNotifier) RegisterBlockEpochNtfn( 897 bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) { 898 899 reg := &blockEpochRegistration{ 900 epochQueue: queue.NewConcurrentQueue(20), 901 epochChan: make(chan *chainntnfs.BlockEpoch, 20), 902 cancelChan: make(chan struct{}), 903 epochID: atomic.AddUint64(&n.epochClientCounter, 1), 904 bestBlock: bestBlock, 905 errorChan: make(chan error, 1), 906 } 907 908 reg.epochQueue.Start() 909 910 // Before we send the request to the main goroutine, we'll launch a new 911 // goroutine to proxy items added to our queue to the client itself. 912 // This ensures that all notifications are received *in order*. 913 reg.wg.Add(1) 914 go func() { 915 defer reg.wg.Done() 916 917 for { 918 select { 919 case ntfn := <-reg.epochQueue.ChanOut(): 920 blockNtfn := ntfn.(*chainntnfs.BlockEpoch) 921 select { 922 case reg.epochChan <- blockNtfn: 923 924 case <-reg.cancelChan: 925 return 926 927 case <-n.quit: 928 return 929 } 930 931 case <-reg.cancelChan: 932 return 933 934 case <-n.quit: 935 return 936 } 937 } 938 }() 939 940 select { 941 case <-n.quit: 942 // As we're exiting before the registration could be sent, 943 // we'll stop the queue now ourselves. 944 reg.epochQueue.Stop() 945 946 return nil, errors.New("chainntnfs: system interrupt while " + 947 "attempting to register for block epoch notification") 948 case n.notificationRegistry <- reg: 949 return &chainntnfs.BlockEpochEvent{ 950 Epochs: reg.epochChan, 951 Cancel: func() { 952 cancel := &epochCancel{ 953 epochID: reg.epochID, 954 } 955 956 // Submit epoch cancellation to notification dispatcher. 957 select { 958 case n.notificationCancels <- cancel: 959 // Cancellation is being handled, drain 960 // the epoch channel until it is closed 961 // before yielding to caller. 962 for { 963 select { 964 case _, ok := <-reg.epochChan: 965 if !ok { 966 return 967 } 968 case <-n.quit: 969 return 970 } 971 } 972 case <-n.quit: 973 } 974 }, 975 }, nil 976 } 977 }