decred.org/dcrwallet/v3@v3.1.0/spv/sync.go (about) 1 // Copyright (c) 2018-2021 The Decred developers 2 // Use of this source code is governed by an ISC 3 // license that can be found in the LICENSE file. 4 5 package spv 6 7 import ( 8 "context" 9 "runtime" 10 "sync" 11 "sync/atomic" 12 "time" 13 14 "decred.org/dcrwallet/v3/errors" 15 "decred.org/dcrwallet/v3/lru" 16 "decred.org/dcrwallet/v3/p2p" 17 "decred.org/dcrwallet/v3/validate" 18 "decred.org/dcrwallet/v3/wallet" 19 "github.com/decred/dcrd/addrmgr/v2" 20 "github.com/decred/dcrd/blockchain/stake/v5" 21 "github.com/decred/dcrd/chaincfg/chainhash" 22 "github.com/decred/dcrd/dcrec/secp256k1/v4/schnorr" 23 "github.com/decred/dcrd/gcs/v4/blockcf2" 24 "github.com/decred/dcrd/txscript/v4" 25 "github.com/decred/dcrd/wire" 26 "golang.org/x/sync/errgroup" 27 ) 28 29 // reqSvcs defines the services that must be supported by outbounded peers. 30 // After fetching more addresses (if needed), peers are disconnected from if 31 // they do not provide each of these services. 32 const reqSvcs = wire.SFNodeNetwork 33 34 // Syncer implements wallet synchronization services by over the Decred wire 35 // protocol using Simplified Payment Verification (SPV) with compact filters. 36 type Syncer struct { 37 // atomics 38 atomicCatchUpTryLock uint32 // CAS (entered=1) to perform discovery/rescan 39 atomicWalletSynced uint32 // CAS (synced=1) when wallet syncing complete 40 41 wallet *wallet.Wallet 42 lp *p2p.LocalPeer 43 44 // Protected by atomicCatchUpTryLock 45 discoverAccounts bool 46 loadedFilters bool 47 48 persistentPeers []string 49 50 connectingRemotes map[string]struct{} 51 remotes map[string]*p2p.RemotePeer 52 remotesMu sync.Mutex 53 54 // Data filters 55 // 56 // TODO: Replace precise rescan filter with wallet db accesses to avoid 57 // needing to keep all relevant data in memory. 58 rescanFilter *wallet.RescanFilter 59 filterData blockcf2.Entries 60 filterMu sync.Mutex 61 62 // seenTxs records hashes of received inventoried transactions. Once a 63 // transaction is fetched and processed from one peer, the hash is added to 64 // this cache to avoid fetching it again from other peers that announce the 65 // transaction. 66 seenTxs lru.Cache 67 68 // Sidechain management 69 sidechains wallet.SidechainForest 70 sidechainMu sync.Mutex 71 72 currentLocators []*chainhash.Hash 73 locatorGeneration uint 74 locatorMu sync.Mutex 75 76 // Holds all potential callbacks used to notify clients 77 notifications *Notifications 78 79 // Mempool for non-wallet-relevant transactions. 80 mempool sync.Map // k=chainhash.Hash v=*wire.MsgTx 81 mempoolAdds chan *chainhash.Hash 82 } 83 84 // Notifications struct to contain all of the upcoming callbacks that will 85 // be used to update the rpc streams for syncing. 86 type Notifications struct { 87 Synced func(sync bool) 88 PeerConnected func(peerCount int32, addr string) 89 PeerDisconnected func(peerCount int32, addr string) 90 FetchMissingCFiltersStarted func() 91 FetchMissingCFiltersProgress func(startCFiltersHeight, endCFiltersHeight int32) 92 FetchMissingCFiltersFinished func() 93 FetchHeadersStarted func() 94 FetchHeadersProgress func(lastHeaderHeight int32, lastHeaderTime int64) 95 FetchHeadersFinished func() 96 DiscoverAddressesStarted func() 97 DiscoverAddressesFinished func() 98 RescanStarted func() 99 RescanProgress func(rescannedThrough int32) 100 RescanFinished func() 101 102 // MempoolTxs is called whenever new relevant unmined transactions are 103 // observed and saved. 104 MempoolTxs func(txs []*wire.MsgTx) 105 106 // TipChanged is called when the main chain tip block changes. 107 // When reorgDepth is zero, the new block is a direct child of the previous tip. 108 // If non-zero, one or more blocks described by the parameter were removed from 109 // the previous main chain. 110 // txs contains all relevant transactions mined in each attached block in 111 // unspecified order. 112 // reorgDepth is guaranteed to be non-negative. 113 TipChanged func(tip *wire.BlockHeader, reorgDepth int32, txs []*wire.MsgTx) 114 } 115 116 // NewSyncer creates a Syncer that will sync the wallet using SPV. 117 func NewSyncer(w *wallet.Wallet, lp *p2p.LocalPeer) *Syncer { 118 return &Syncer{ 119 wallet: w, 120 discoverAccounts: !w.Locked(), 121 connectingRemotes: make(map[string]struct{}), 122 remotes: make(map[string]*p2p.RemotePeer), 123 rescanFilter: wallet.NewRescanFilter(nil, nil), 124 seenTxs: lru.NewCache(2000), 125 lp: lp, 126 mempoolAdds: make(chan *chainhash.Hash), 127 } 128 } 129 130 // SetPersistentPeers sets each peer as a persistent peer and disables DNS 131 // seeding and peer discovery. 132 func (s *Syncer) SetPersistentPeers(peers []string) { 133 s.persistentPeers = peers 134 } 135 136 // SetNotifications sets the possible various callbacks that are used 137 // to notify interested parties to the syncing progress. 138 func (s *Syncer) SetNotifications(ntfns *Notifications) { 139 s.notifications = ntfns 140 } 141 142 // DisableDiscoverAccounts disables account discovery. This has an effect only 143 // if called before the main Run() executes the account discovery process. 144 func (s *Syncer) DisableDiscoverAccounts() { 145 s.discoverAccounts = false 146 } 147 148 // synced checks the atomic that controls wallet syncness and if previously 149 // unsynced, updates to synced and notifies the callback, if set. 150 func (s *Syncer) synced() { 151 if atomic.CompareAndSwapUint32(&s.atomicWalletSynced, 0, 1) && 152 s.notifications != nil && 153 s.notifications.Synced != nil { 154 s.notifications.Synced(true) 155 } 156 } 157 158 // Synced returns whether this wallet is completely synced to the network. 159 func (s *Syncer) Synced() bool { 160 return atomic.LoadUint32(&s.atomicWalletSynced) == 1 161 } 162 163 // EstimateMainChainTip returns an estimated height for the current tip of the 164 // blockchain. The estimate is made by comparing the initial height reported by 165 // all connected peers and the wallet's current tip. The highest of these values 166 // is estimated to be the mainchain's tip height. 167 func (s *Syncer) EstimateMainChainTip(ctx context.Context) int32 { 168 _, chainTip := s.wallet.MainChainTip(ctx) 169 s.forRemotes(func(rp *p2p.RemotePeer) error { 170 if rp.InitialHeight() > chainTip { 171 chainTip = rp.InitialHeight() 172 } 173 return nil 174 }) 175 return chainTip 176 } 177 178 // GetRemotePeers returns a map of connected remote peers. 179 func (s *Syncer) GetRemotePeers() map[string]*p2p.RemotePeer { 180 s.remotesMu.Lock() 181 defer s.remotesMu.Unlock() 182 183 remotes := make(map[string]*p2p.RemotePeer, len(s.remotes)) 184 for k, rp := range s.remotes { 185 remotes[k] = rp 186 } 187 return remotes 188 } 189 190 // unsynced checks the atomic that controls wallet syncness and if previously 191 // synced, updates to unsynced and notifies the callback, if set. 192 func (s *Syncer) unsynced() { 193 if atomic.CompareAndSwapUint32(&s.atomicWalletSynced, 1, 0) && 194 s.notifications != nil && 195 s.notifications.Synced != nil { 196 s.notifications.Synced(false) 197 } 198 } 199 200 // peerConnected updates the notification for peer count, if set. 201 func (s *Syncer) peerConnected(remotesCount int, addr string) { 202 if s.notifications != nil && s.notifications.PeerConnected != nil { 203 s.notifications.PeerConnected(int32(remotesCount), addr) 204 } 205 } 206 207 // peerDisconnected updates the notification for peer count, if set. 208 func (s *Syncer) peerDisconnected(remotesCount int, addr string) { 209 if s.notifications != nil && s.notifications.PeerDisconnected != nil { 210 s.notifications.PeerDisconnected(int32(remotesCount), addr) 211 } 212 } 213 214 func (s *Syncer) fetchMissingCfiltersStart() { 215 if s.notifications != nil && s.notifications.FetchMissingCFiltersStarted != nil { 216 s.notifications.FetchMissingCFiltersStarted() 217 } 218 } 219 220 func (s *Syncer) fetchMissingCfiltersProgress(startMissingCFilterHeight, endMissinCFilterHeight int32) { 221 if s.notifications != nil && s.notifications.FetchMissingCFiltersProgress != nil { 222 s.notifications.FetchMissingCFiltersProgress(startMissingCFilterHeight, endMissinCFilterHeight) 223 } 224 } 225 226 func (s *Syncer) fetchMissingCfiltersFinished() { 227 if s.notifications != nil && s.notifications.FetchMissingCFiltersFinished != nil { 228 s.notifications.FetchMissingCFiltersFinished() 229 } 230 } 231 232 func (s *Syncer) fetchHeadersStart() { 233 if s.notifications != nil && s.notifications.FetchHeadersStarted != nil { 234 s.notifications.FetchHeadersStarted() 235 } 236 } 237 238 func (s *Syncer) fetchHeadersProgress(lastHeader *wire.BlockHeader) { 239 if s.notifications != nil && s.notifications.FetchHeadersProgress != nil { 240 s.notifications.FetchHeadersProgress(int32(lastHeader.Height), lastHeader.Timestamp.Unix()) 241 } 242 } 243 244 func (s *Syncer) fetchHeadersFinished() { 245 if s.notifications != nil && s.notifications.FetchHeadersFinished != nil { 246 s.notifications.FetchHeadersFinished() 247 } 248 } 249 func (s *Syncer) discoverAddressesStart() { 250 if s.notifications != nil && s.notifications.DiscoverAddressesStarted != nil { 251 s.notifications.DiscoverAddressesStarted() 252 } 253 } 254 255 func (s *Syncer) discoverAddressesFinished() { 256 if s.notifications != nil && s.notifications.DiscoverAddressesFinished != nil { 257 s.notifications.DiscoverAddressesFinished() 258 } 259 } 260 261 func (s *Syncer) rescanStart() { 262 if s.notifications != nil && s.notifications.RescanStarted != nil { 263 s.notifications.RescanStarted() 264 } 265 } 266 267 func (s *Syncer) rescanProgress(rescannedThrough int32) { 268 if s.notifications != nil && s.notifications.RescanProgress != nil { 269 s.notifications.RescanProgress(rescannedThrough) 270 } 271 } 272 273 func (s *Syncer) rescanFinished() { 274 if s.notifications != nil && s.notifications.RescanFinished != nil { 275 s.notifications.RescanFinished() 276 } 277 } 278 279 func (s *Syncer) mempoolTxs(txs []*wire.MsgTx) { 280 if s.notifications != nil && s.notifications.MempoolTxs != nil { 281 s.notifications.MempoolTxs(txs) 282 } 283 } 284 285 func (s *Syncer) tipChanged(tip *wire.BlockHeader, reorgDepth int32, matchingTxs map[chainhash.Hash][]*wire.MsgTx) { 286 if s.notifications != nil && s.notifications.TipChanged != nil { 287 var txs []*wire.MsgTx 288 for _, matching := range matchingTxs { 289 txs = append(txs, matching...) 290 } 291 s.notifications.TipChanged(tip, reorgDepth, txs) 292 } 293 } 294 295 // setRequiredHeight sets the required height a peer must advertise as their 296 // last height. Initial height 6 blocks below the current chain tip height 297 // result in a handshake error. 298 func (s *Syncer) setRequiredHeight(tipHeight int32) { 299 requireHeight := tipHeight 300 if requireHeight > 6 { 301 requireHeight -= 6 302 } 303 s.lp.RequirePeerHeight(requireHeight) 304 } 305 306 // Run synchronizes the wallet, returning when synchronization fails or the 307 // context is cancelled. 308 func (s *Syncer) Run(ctx context.Context) error { 309 tipHash, tipHeight := s.wallet.MainChainTip(ctx) 310 s.setRequiredHeight(tipHeight) 311 rescanPoint, err := s.wallet.RescanPoint(ctx) 312 if err != nil { 313 return err 314 } 315 log.Infof("Headers synced through block %v height %d", &tipHash, tipHeight) 316 if rescanPoint != nil { 317 h, err := s.wallet.BlockHeader(ctx, rescanPoint) 318 if err != nil { 319 return err 320 } 321 // The rescan point is the first block that does not have synced 322 // transactions, so we are synced with the parent. 323 log.Infof("Transactions synced through block %v height %d", &h.PrevBlock, h.Height-1) 324 } else { 325 log.Infof("Transactions synced through block %v height %d", &tipHash, tipHeight) 326 } 327 328 locators, err := s.wallet.BlockLocators(ctx, nil) 329 if err != nil { 330 return err 331 } 332 s.currentLocators = locators 333 334 s.lp.AddrManager().Start() 335 defer func() { 336 err := s.lp.AddrManager().Stop() 337 if err != nil { 338 log.Errorf("Failed to cleanly stop address manager: %v", err) 339 } 340 }() 341 342 // Seed peers over DNS when not disabled by persistent peers. 343 if len(s.persistentPeers) == 0 { 344 s.lp.SeedPeers(ctx, wire.SFNodeNetwork) 345 } 346 347 // Start background handlers to read received messages from remote peers 348 g, ctx := errgroup.WithContext(ctx) 349 g.Go(func() error { return s.receiveGetData(ctx) }) 350 g.Go(func() error { return s.receiveInv(ctx) }) 351 g.Go(func() error { return s.receiveHeadersAnnouncements(ctx) }) 352 s.lp.AddHandledMessages(p2p.MaskGetData | p2p.MaskInv) 353 354 if len(s.persistentPeers) != 0 { 355 for i := range s.persistentPeers { 356 raddr := s.persistentPeers[i] 357 g.Go(func() error { return s.connectToPersistent(ctx, raddr) }) 358 } 359 } else { 360 g.Go(func() error { return s.connectToCandidates(ctx) }) 361 } 362 363 g.Go(func() error { return s.handleMempool(ctx) }) 364 365 s.wallet.SetNetworkBackend(s) 366 defer s.wallet.SetNetworkBackend(nil) 367 368 // Wait until cancellation or a handler errors. 369 return g.Wait() 370 } 371 372 // peerCandidate returns a peer address that we shall attempt to connect to. 373 // Only peers not already remotes or in the process of connecting are returned. 374 // Any address returned is marked in s.connectingRemotes before returning. 375 func (s *Syncer) peerCandidate(svcs wire.ServiceFlag) (*addrmgr.NetAddress, error) { 376 // Try to obtain peer candidates at random, decreasing the requirements 377 // as more tries are performed. 378 for tries := 0; tries < 100; tries++ { 379 kaddr := s.lp.AddrManager().GetAddress() 380 if kaddr == nil { 381 break 382 } 383 na := kaddr.NetAddress() 384 385 k := na.Key() 386 s.remotesMu.Lock() 387 _, isConnecting := s.connectingRemotes[k] 388 _, isRemote := s.remotes[k] 389 390 switch { 391 // Skip peer if already connected, or in process of connecting 392 // TODO: this should work with network blocks, not exact addresses. 393 case isConnecting || isRemote: 394 fallthrough 395 // Only allow recent nodes (10mins) after we failed 30 times 396 case tries < 30 && time.Since(kaddr.LastAttempt()) < 10*time.Minute: 397 fallthrough 398 // Skip peers without matching service flags for the first 50 tries. 399 case tries < 50 && kaddr.NetAddress().Services&svcs != svcs: 400 s.remotesMu.Unlock() 401 continue 402 } 403 404 s.connectingRemotes[k] = struct{}{} 405 s.remotesMu.Unlock() 406 407 return na, nil 408 } 409 return nil, errors.New("no addresses") 410 } 411 412 func (s *Syncer) connectToPersistent(ctx context.Context, raddr string) error { 413 for { 414 func() { 415 ctx, cancel := context.WithCancel(ctx) 416 defer cancel() 417 418 rp, err := s.lp.ConnectOutbound(ctx, raddr, reqSvcs) 419 if err != nil { 420 if ctx.Err() == nil { 421 log.Errorf("Peering attempt failed: %v", err) 422 } 423 return 424 } 425 log.Infof("New peer %v %v %v", raddr, rp.UA(), rp.Services()) 426 427 k := rp.NA().Key() 428 s.remotesMu.Lock() 429 s.remotes[k] = rp 430 n := len(s.remotes) 431 s.remotesMu.Unlock() 432 s.peerConnected(n, k) 433 434 wait := make(chan struct{}) 435 go func() { 436 err := s.startupSync(ctx, rp) 437 if err != nil { 438 rp.Disconnect(err) 439 } 440 wait <- struct{}{} 441 }() 442 443 err = rp.Err() 444 s.remotesMu.Lock() 445 delete(s.remotes, k) 446 n = len(s.remotes) 447 s.remotesMu.Unlock() 448 s.peerDisconnected(n, k) 449 <-wait 450 if ctx.Err() != nil { 451 return 452 } 453 log.Warnf("Lost peer %v: %v", raddr, err) 454 }() 455 456 if err := ctx.Err(); err != nil { 457 return err 458 } 459 460 time.Sleep(5 * time.Second) 461 } 462 } 463 464 func (s *Syncer) connectToCandidates(ctx context.Context) error { 465 var wg sync.WaitGroup 466 defer wg.Wait() 467 468 sem := make(chan struct{}, 8) 469 for { 470 if ctx.Err() != nil { 471 return ctx.Err() 472 } 473 select { 474 case sem <- struct{}{}: 475 case <-ctx.Done(): 476 return ctx.Err() 477 } 478 na, err := s.peerCandidate(reqSvcs) 479 if err != nil { 480 select { 481 case <-ctx.Done(): 482 return ctx.Err() 483 case <-time.After(5 * time.Second): 484 <-sem 485 continue 486 } 487 } 488 489 wg.Add(1) 490 go func() { 491 ctx, cancel := context.WithCancel(ctx) 492 defer func() { 493 cancel() 494 wg.Done() 495 <-sem 496 }() 497 498 // Make outbound connections to remote peers. 499 raddr := na.String() 500 rp, err := s.lp.ConnectOutbound(ctx, raddr, reqSvcs) 501 if err != nil { 502 s.remotesMu.Lock() 503 delete(s.connectingRemotes, raddr) 504 s.remotesMu.Unlock() 505 if ctx.Err() == nil { 506 log.Warnf("Peering attempt failed: %v", err) 507 } 508 return 509 } 510 log.Infof("New peer %v %v %v", raddr, rp.UA(), rp.Services()) 511 512 s.remotesMu.Lock() 513 delete(s.connectingRemotes, raddr) 514 s.remotes[raddr] = rp 515 n := len(s.remotes) 516 s.remotesMu.Unlock() 517 s.peerConnected(n, raddr) 518 519 wait := make(chan struct{}) 520 go func() { 521 err := s.startupSync(ctx, rp) 522 if err != nil { 523 rp.Disconnect(err) 524 } 525 wait <- struct{}{} 526 }() 527 528 err = rp.Err() 529 if ctx.Err() != context.Canceled { 530 log.Warnf("Lost peer %v: %v", raddr, err) 531 } 532 533 <-wait 534 s.remotesMu.Lock() 535 delete(s.remotes, raddr) 536 n = len(s.remotes) 537 s.remotesMu.Unlock() 538 s.peerDisconnected(n, raddr) 539 }() 540 } 541 } 542 543 func (s *Syncer) forRemotes(f func(rp *p2p.RemotePeer) error) error { 544 defer s.remotesMu.Unlock() 545 s.remotesMu.Lock() 546 if len(s.remotes) == 0 { 547 return errors.E(errors.NoPeers) 548 } 549 for _, rp := range s.remotes { 550 err := f(rp) 551 if err != nil { 552 return err 553 } 554 } 555 return nil 556 } 557 558 func (s *Syncer) pickRemote(pick func(*p2p.RemotePeer) bool) (*p2p.RemotePeer, error) { 559 defer s.remotesMu.Unlock() 560 s.remotesMu.Lock() 561 562 for _, rp := range s.remotes { 563 if pick(rp) { 564 return rp, nil 565 } 566 } 567 return nil, errors.E(errors.NoPeers) 568 } 569 570 // receiveGetData handles all received getdata requests from peers. An inv 571 // message declaring knowledge of the data must have been previously sent to the 572 // peer, or a notfound message reports the data as missing. Only transactions 573 // may be queried by a peer. 574 func (s *Syncer) receiveGetData(ctx context.Context) error { 575 var wg sync.WaitGroup 576 for { 577 rp, msg, err := s.lp.ReceiveGetData(ctx) 578 if err != nil { 579 wg.Wait() 580 return err 581 } 582 wg.Add(1) 583 go func() { 584 defer wg.Done() 585 // Ensure that the data was (recently) announced using an inv. 586 var txHashes []*chainhash.Hash 587 var notFound []*wire.InvVect 588 for _, inv := range msg.InvList { 589 if !rp.InvsSent().Contains(inv.Hash) { 590 notFound = append(notFound, inv) 591 continue 592 } 593 switch inv.Type { 594 case wire.InvTypeTx: 595 txHashes = append(txHashes, &inv.Hash) 596 default: 597 notFound = append(notFound, inv) 598 } 599 } 600 601 // Search for requested transactions 602 var foundTxs []*wire.MsgTx 603 if len(txHashes) != 0 { 604 var missing []*wire.InvVect 605 var err error 606 foundTxs, missing, err = s.wallet.GetTransactionsByHashes(ctx, txHashes) 607 if err != nil && !errors.Is(err, errors.NotExist) { 608 log.Warnf("Failed to look up transactions for getdata reply to peer %v: %v", 609 rp.RemoteAddr(), err) 610 return 611 } 612 613 // For the missing ones, attempt to search in 614 // the non-wallet-relevant syncer mempool. 615 for _, miss := range missing { 616 if v, ok := s.mempool.Load(miss.Hash); ok { 617 tx := v.(*wire.MsgTx) 618 foundTxs = append(foundTxs, tx) 619 continue 620 } 621 notFound = append(notFound, miss) 622 } 623 } 624 625 // Send all found transactions 626 for _, tx := range foundTxs { 627 err := rp.SendMessage(ctx, tx) 628 if ctx.Err() != nil { 629 return 630 } 631 if err != nil { 632 log.Warnf("Failed to send getdata reply to peer %v: %v", 633 rp.RemoteAddr(), err) 634 } 635 } 636 637 // Send notfound message for all missing or unannounced data. 638 if len(notFound) != 0 { 639 err := rp.SendMessage(ctx, &wire.MsgNotFound{InvList: notFound}) 640 if ctx.Err() != nil { 641 return 642 } 643 if err != nil { 644 log.Warnf("Failed to send notfound reply to peer %v: %v", 645 rp.RemoteAddr(), err) 646 } 647 } 648 }() 649 } 650 } 651 652 // receiveInv receives all inv messages from peers and starts goroutines to 653 // handle block and tx announcements. 654 func (s *Syncer) receiveInv(ctx context.Context) error { 655 var wg sync.WaitGroup 656 for { 657 rp, msg, err := s.lp.ReceiveInv(ctx) 658 if err != nil { 659 wg.Wait() 660 return err 661 } 662 663 wg.Add(1) 664 go func() { 665 defer wg.Done() 666 667 var blocks []*chainhash.Hash 668 var txs []*chainhash.Hash 669 670 for _, inv := range msg.InvList { 671 switch inv.Type { 672 case wire.InvTypeBlock: 673 blocks = append(blocks, &inv.Hash) 674 case wire.InvTypeTx: 675 txs = append(txs, &inv.Hash) 676 } 677 } 678 679 if len(blocks) != 0 { 680 wg.Add(1) 681 go func() { 682 defer wg.Done() 683 684 err := s.handleBlockInvs(ctx, rp, blocks) 685 if ctx.Err() != nil { 686 return 687 } 688 if errors.Is(err, errors.Protocol) || errors.Is(err, errors.Consensus) { 689 log.Warnf("Disconnecting peer %v: %v", rp, err) 690 rp.Disconnect(err) 691 return 692 } 693 if err != nil { 694 log.Warnf("Failed to handle blocks inventoried by %v: %v", rp, err) 695 } 696 }() 697 } 698 if len(txs) != 0 { 699 wg.Add(1) 700 go func() { 701 s.handleTxInvs(ctx, rp, txs) 702 wg.Done() 703 }() 704 } 705 }() 706 } 707 } 708 709 // verifyTSpendSignature verifies that the provided signature and public key 710 // were the ones that signed the provided message transaction. 711 func (s *Syncer) verifyTSpendSignature(msgTx *wire.MsgTx, signature, pubKey []byte) error { 712 // Calculate signature hash. 713 sigHash, err := txscript.CalcSignatureHash(nil, 714 txscript.SigHashAll, msgTx, 0, nil) 715 if err != nil { 716 return errors.Errorf("CalcSignatureHash: %w", err) 717 } 718 719 // Lift Signature from bytes. 720 sig, err := schnorr.ParseSignature(signature) 721 if err != nil { 722 return errors.Errorf("ParseSignature: %w", err) 723 } 724 725 // Lift public PI key from bytes. 726 pk, err := schnorr.ParsePubKey(pubKey) 727 if err != nil { 728 return errors.Errorf("ParsePubKey: %w", err) 729 } 730 731 // Verify transaction was properly signed. 732 if !sig.Verify(sigHash, pk) { 733 return errors.Errorf("Verify failed") 734 } 735 736 return nil 737 } 738 739 func (s *Syncer) checkTSpend(ctx context.Context, tx *wire.MsgTx) bool { 740 var ( 741 isTSpend bool 742 signature, pubKey []byte 743 err error 744 ) 745 signature, pubKey, err = stake.CheckTSpend(tx) 746 isTSpend = err == nil 747 748 if !isTSpend { 749 log.Debugf("Tx is not a TSpend") 750 return false 751 } 752 753 _, height := s.wallet.MainChainTip(ctx) 754 if uint32(height) > tx.Expiry { 755 log.Debugf("TSpend has been expired") 756 return false 757 } 758 759 // If we have a TSpend verify the signature. 760 // Check if this is a sanctioned PI key. 761 if !s.wallet.ChainParams().PiKeyExists(pubKey) { 762 log.Errorf("Unknown Pi Key: %x", pubKey) 763 return false 764 } 765 766 // Verify that the signature is valid and corresponds to the 767 // provided public key. 768 err = s.verifyTSpendSignature(tx, signature, pubKey) 769 if err != nil { 770 log.Errorf("Could not verify TSpend signature: %v", err) 771 return false 772 } 773 774 return true 775 } 776 777 // GetInitState requests the init state, then using the tspend hashes requests 778 // all unseen tspend txs, validates them, and adds them to the tspends cache. 779 func (s *Syncer) GetInitState(ctx context.Context, rp *p2p.RemotePeer) error { 780 msg := wire.NewMsgGetInitState() 781 msg.AddTypes(wire.InitStateTSpends) 782 783 initState, err := rp.GetInitState(ctx, msg) 784 if err != nil { 785 return err 786 } 787 788 unseenTSpends := make([]*chainhash.Hash, 0) 789 for h := range initState.TSpendHashes { 790 if !s.wallet.IsTSpendCached(&initState.TSpendHashes[h]) { 791 unseenTSpends = append(unseenTSpends, &initState.TSpendHashes[h]) 792 } 793 } 794 795 if len(unseenTSpends) == 0 { 796 return nil 797 } 798 799 tspendTxs, err := rp.Transactions(ctx, unseenTSpends) 800 if errors.Is(err, errors.NotExist) { 801 err = nil 802 // Remove notfound txs. 803 prevTxs := tspendTxs 804 tspendTxs = tspendTxs[:0] 805 for _, tx := range prevTxs { 806 if tx != nil { 807 tspendTxs = append(tspendTxs, tx) 808 } 809 } 810 } 811 if err != nil { 812 return nil 813 } 814 815 for _, v := range tspendTxs { 816 if s.checkTSpend(ctx, v) { 817 s.wallet.AddTSpend(*v) 818 } 819 } 820 return nil 821 } 822 823 func (s *Syncer) handleBlockInvs(ctx context.Context, rp *p2p.RemotePeer, hashes []*chainhash.Hash) error { 824 const opf = "spv.handleBlockInvs(%v)" 825 826 // We send a sendheaders msg at the end of our startup stage. Ignore 827 // any invs sent before that happens, since we'll still be performing 828 // an initial sync with the peer. 829 if !rp.SendHeadersSent() { 830 log.Debugf("Ignoring block invs from %v before "+ 831 "sendheaders is sent", rp) 832 return nil 833 } 834 835 blocks, err := rp.Blocks(ctx, hashes) 836 if err != nil { 837 op := errors.Opf(opf, rp) 838 return errors.E(op, err) 839 } 840 headers := make([]*wire.BlockHeader, len(blocks)) 841 bmap := make(map[chainhash.Hash]*wire.MsgBlock) 842 for i, block := range blocks { 843 bmap[block.BlockHash()] = block 844 h := block.Header 845 headers[i] = &h 846 } 847 848 return s.handleBlockAnnouncements(ctx, rp, headers, bmap) 849 } 850 851 // handleTxInvs responds to the inv message created by rp by fetching 852 // all unseen transactions announced by the peer. Any transactions 853 // that are relevant to the wallet are saved as unconfirmed 854 // transactions. Transaction invs are ignored when a rescan is 855 // necessary or ongoing. 856 func (s *Syncer) handleTxInvs(ctx context.Context, rp *p2p.RemotePeer, hashes []*chainhash.Hash) { 857 const opf = "spv.handleTxInvs(%v)" 858 859 rpt, err := s.wallet.RescanPoint(ctx) 860 if err != nil { 861 op := errors.Opf(opf, rp.RemoteAddr()) 862 log.Warn(errors.E(op, err)) 863 return 864 } 865 if rpt != nil { 866 return 867 } 868 869 // Ignore already-processed transactions 870 unseen := hashes[:0] 871 for _, h := range hashes { 872 if !s.seenTxs.Contains(*h) { 873 unseen = append(unseen, h) 874 } 875 } 876 if len(unseen) == 0 { 877 return 878 } 879 880 txs, err := rp.Transactions(ctx, unseen) 881 if errors.Is(err, errors.NotExist) { 882 err = nil 883 // Remove notfound txs. 884 prevTxs, prevUnseen := txs, unseen 885 txs, unseen = txs[:0], unseen[:0] 886 for i, tx := range prevTxs { 887 if tx != nil { 888 txs = append(txs, tx) 889 unseen = append(unseen, prevUnseen[i]) 890 } 891 } 892 } 893 if err != nil { 894 if ctx.Err() == nil { 895 op := errors.Opf(opf, rp.RemoteAddr()) 896 err := errors.E(op, err) 897 log.Warn(err) 898 } 899 return 900 } 901 902 // Mark transactions as processed so they are not queried from other nodes 903 // who announce them in the future. 904 for _, h := range unseen { 905 s.seenTxs.Add(*h) 906 } 907 908 for _, tx := range txs { 909 if s.checkTSpend(ctx, tx) { 910 s.wallet.AddTSpend(*tx) 911 } 912 } 913 914 // Save any relevant transaction. 915 relevant := s.filterRelevant(txs) 916 for _, tx := range relevant { 917 if s.wallet.ManualTickets() && stake.IsSStx(tx) { 918 continue 919 } 920 err := s.wallet.AddTransaction(ctx, tx, nil) 921 if err != nil { 922 op := errors.Opf(opf, rp.RemoteAddr()) 923 log.Warn(errors.E(op, err)) 924 } 925 } 926 s.mempoolTxs(relevant) 927 } 928 929 // receiveHeaderAnnouncements receives all block announcements through pushed 930 // headers messages messages from peers and starts goroutines to handle the 931 // announced header. 932 func (s *Syncer) receiveHeadersAnnouncements(ctx context.Context) error { 933 for { 934 rp, headers, err := s.lp.ReceiveHeadersAnnouncement(ctx) 935 if err != nil { 936 return err 937 } 938 939 go func() { 940 err := s.handleBlockAnnouncements(ctx, rp, headers, nil) 941 if err != nil { 942 if ctx.Err() != nil { 943 return 944 } 945 946 if errors.Is(err, errors.Protocol) || errors.Is(err, errors.Consensus) { 947 log.Warnf("Disconnecting peer %v: %v", rp, err) 948 rp.Disconnect(err) 949 return 950 } 951 952 log.Warnf("Failed to handle headers announced by %v: %v", rp, err) 953 } 954 }() 955 } 956 } 957 958 // scanChain checks for matching filters of chain and returns a map of 959 // relevant wallet transactions keyed by block hash. bmap is queried 960 // for the block first with fallback to querying rp using getdata. 961 func (s *Syncer) scanChain(ctx context.Context, rp *p2p.RemotePeer, chain []*wallet.BlockNode, 962 bmap map[chainhash.Hash]*wire.MsgBlock) (map[chainhash.Hash][]*wire.MsgTx, error) { 963 964 found := make(map[chainhash.Hash][]*wire.MsgTx) 965 966 s.filterMu.Lock() 967 filterData := s.filterData 968 s.filterMu.Unlock() 969 970 fetched := make([]*wire.MsgBlock, len(chain)) 971 if bmap != nil { 972 for i := range chain { 973 if b, ok := bmap[*chain[i].Hash]; ok { 974 fetched[i] = b 975 } 976 } 977 } 978 979 idx := 0 980 FilterLoop: 981 for idx < len(chain) { 982 var fmatches []*chainhash.Hash 983 var fmatchidx []int 984 var fmatchMu sync.Mutex 985 986 // Scan remaining filters with up to ncpu workers 987 c := make(chan int) 988 var wg sync.WaitGroup 989 worker := func() { 990 for i := range c { 991 n := chain[i] 992 f := n.FilterV2 993 k := blockcf2.Key(&n.Header.MerkleRoot) 994 if f.N() != 0 && f.MatchAny(k, filterData) { 995 fmatchMu.Lock() 996 fmatches = append(fmatches, n.Hash) 997 fmatchidx = append(fmatchidx, i) 998 fmatchMu.Unlock() 999 } 1000 } 1001 wg.Done() 1002 } 1003 nworkers := 0 1004 for i := idx; i < len(chain); i++ { 1005 if fetched[i] != nil { 1006 continue // Already have block 1007 } 1008 select { 1009 case c <- i: 1010 default: 1011 if nworkers < runtime.NumCPU() { 1012 nworkers++ 1013 wg.Add(1) 1014 go worker() 1015 } 1016 c <- i 1017 } 1018 } 1019 close(c) 1020 wg.Wait() 1021 1022 if len(fmatches) != 0 { 1023 blocks, err := rp.Blocks(ctx, fmatches) 1024 if err != nil { 1025 return nil, err 1026 } 1027 for j, b := range blocks { 1028 i := fmatchidx[j] 1029 1030 // Perform context-free validation on the block. 1031 // Disconnect peer when invalid. 1032 err := validate.MerkleRoots(b) 1033 if err != nil { 1034 err = validate.DCP0005MerkleRoot(b) 1035 } 1036 if err != nil { 1037 rp.Disconnect(err) 1038 return nil, err 1039 } 1040 1041 fetched[i] = b 1042 } 1043 } 1044 1045 if err := ctx.Err(); err != nil { 1046 return nil, err 1047 } 1048 1049 for i := idx; i < len(chain); i++ { 1050 b := fetched[i] 1051 if b == nil { 1052 continue 1053 } 1054 matches, fadded := s.rescanBlock(b) 1055 found[*chain[i].Hash] = matches 1056 if len(fadded) != 0 { 1057 idx = i + 1 1058 filterData = fadded 1059 continue FilterLoop 1060 } 1061 } 1062 return found, nil 1063 } 1064 return found, nil 1065 } 1066 1067 // handleBlockAnnouncements handles blocks announced through block invs or 1068 // headers messages by rp. bmap should contain the full blocks of any 1069 // inventoried blocks, but may be nil in case the blocks were announced through 1070 // headers. 1071 func (s *Syncer) handleBlockAnnouncements(ctx context.Context, rp *p2p.RemotePeer, headers []*wire.BlockHeader, 1072 bmap map[chainhash.Hash]*wire.MsgBlock) (err error) { 1073 1074 const opf = "spv.handleBlockAnnouncements(%v)" 1075 defer func() { 1076 if err != nil && ctx.Err() == nil { 1077 op := errors.Opf(opf, rp.RemoteAddr()) 1078 err = errors.E(op, err) 1079 } 1080 }() 1081 1082 if len(headers) == 0 { 1083 return nil 1084 } 1085 1086 firstHeader := headers[0] 1087 1088 // Disconnect if the peer announced a header that is significantly 1089 // behind our main chain height. 1090 const maxAnnHeaderTipDelta = int32(256) 1091 _, tipHeight := s.wallet.MainChainTip(ctx) 1092 if int32(firstHeader.Height) < tipHeight && tipHeight-int32(firstHeader.Height) > maxAnnHeaderTipDelta { 1093 err = errors.E(errors.Protocol, "peer announced old header") 1094 return err 1095 } 1096 1097 newBlocks := make([]*wallet.BlockNode, 0, len(headers)) 1098 var bestChain []*wallet.BlockNode 1099 var matchingTxs map[chainhash.Hash][]*wire.MsgTx 1100 cnet := s.wallet.ChainParams().Net 1101 err = func() error { 1102 defer s.sidechainMu.Unlock() 1103 s.sidechainMu.Lock() 1104 1105 // Determine if the peer sent a header that connects to an 1106 // unknown sidechain (i.e. an orphan chain). In that case, 1107 // re-request headers to hopefully find the missing ones. 1108 // 1109 // The header is an orphan if its parent block is not in the 1110 // mainchain nor on a previously known side chain. 1111 prevInMainChain, _, err := s.wallet.BlockInMainChain(ctx, &firstHeader.PrevBlock) 1112 if err != nil { 1113 return err 1114 } 1115 if !prevInMainChain && !s.sidechains.HasSideChainBlock(&firstHeader.PrevBlock) { 1116 if err := rp.ReceivedOrphanHeader(); err != nil { 1117 return err 1118 } 1119 1120 locators, err := s.wallet.BlockLocators(ctx, nil) 1121 if err != nil { 1122 return err 1123 } 1124 if err := rp.HeadersAsync(ctx, locators, &hashStop); err != nil { 1125 return err 1126 } 1127 1128 // We requested async headers, so return early and wait 1129 // for the next headers msg. 1130 // 1131 // newBlocks and bestChain are empty at this point, so 1132 // the rest of this function continues without 1133 // producing side effects. 1134 return nil 1135 } 1136 1137 for i := range headers { 1138 hash := headers[i].BlockHash() 1139 1140 // Skip the first blocks sent if they are already in 1141 // the mainchain or on a known side chain. We only skip 1142 // those at the start of the list to ensure every block 1143 // in newBlocks still connects in sequence. 1144 if len(newBlocks) == 0 { 1145 haveBlock, _, err := s.wallet.BlockInMainChain(ctx, &hash) 1146 if err != nil { 1147 return err 1148 } 1149 1150 if haveBlock || s.sidechains.HasSideChainBlock(&hash) { 1151 continue 1152 } 1153 } 1154 1155 n := wallet.NewBlockNode(headers[i], &hash, nil) 1156 newBlocks = append(newBlocks, n) 1157 } 1158 1159 if len(newBlocks) == 0 { 1160 // Peer did not send any headers we didn't already 1161 // have. 1162 return nil 1163 } 1164 1165 fullsc, err := s.sidechains.FullSideChain(newBlocks) 1166 if err != nil { 1167 return err 1168 } 1169 _, err = s.wallet.ValidateHeaderChainDifficulties(ctx, fullsc, 0) 1170 if err != nil { 1171 return err 1172 } 1173 1174 for _, n := range newBlocks { 1175 s.sidechains.AddBlockNode(n) 1176 } 1177 1178 bestChain, err = s.wallet.EvaluateBestChain(ctx, &s.sidechains) 1179 if err != nil { 1180 return err 1181 } 1182 1183 if len(bestChain) == 0 { 1184 return nil 1185 } 1186 1187 bestChainHashes := make([]*chainhash.Hash, len(bestChain)) 1188 for i, n := range bestChain { 1189 bestChainHashes[i] = n.Hash 1190 } 1191 1192 filters, err := rp.CFiltersV2(ctx, bestChainHashes) 1193 if err != nil { 1194 if ctx.Err() != nil { 1195 return ctx.Err() 1196 } 1197 return err 1198 } 1199 1200 for i, cf := range filters { 1201 filter, proofIndex, proof := cf.Filter, cf.ProofIndex, cf.Proof 1202 1203 err = validate.CFilterV2HeaderCommitment(cnet, 1204 bestChain[i].Header, filter, proofIndex, proof) 1205 if err != nil { 1206 return err 1207 } 1208 1209 bestChain[i].FilterV2 = filter 1210 } 1211 1212 rpt, err := s.wallet.RescanPoint(ctx) 1213 if err != nil { 1214 return err 1215 } 1216 if rpt == nil { 1217 matchingTxs, err = s.scanChain(ctx, rp, bestChain, bmap) 1218 if err != nil { 1219 return err 1220 } 1221 } 1222 1223 prevChain, err := s.wallet.ChainSwitch(ctx, &s.sidechains, bestChain, matchingTxs) 1224 if err != nil { 1225 return err 1226 } 1227 if len(prevChain) != 0 { 1228 log.Infof("Reorganize from %v to %v (total %d block(s) reorged)", 1229 prevChain[len(prevChain)-1].Hash, bestChain[len(bestChain)-1].Hash, len(prevChain)) 1230 for _, n := range prevChain { 1231 s.sidechains.AddBlockNode(n) 1232 } 1233 } 1234 tipHeader := bestChain[len(bestChain)-1].Header 1235 s.setRequiredHeight(int32(tipHeader.Height)) 1236 s.tipChanged(tipHeader, int32(len(prevChain)), matchingTxs) 1237 1238 return nil 1239 }() 1240 if err != nil { 1241 return err 1242 } 1243 1244 if len(bestChain) != 0 { 1245 s.locatorMu.Lock() 1246 s.currentLocators = nil 1247 s.locatorGeneration++ 1248 s.locatorMu.Unlock() 1249 } 1250 1251 // Log connected blocks. 1252 for _, n := range bestChain { 1253 log.Infof("Connected block %v, height %d, %d wallet transaction(s)", 1254 n.Hash, n.Header.Height, len(matchingTxs[*n.Hash])) 1255 } 1256 // Announced blocks not in the main chain are logged as sidechain or orphan 1257 // blocks. 1258 for _, n := range newBlocks { 1259 haveBlock, _, err := s.wallet.BlockInMainChain(ctx, n.Hash) 1260 if err != nil { 1261 return err 1262 } 1263 if haveBlock { 1264 continue 1265 } 1266 log.Infof("Received sidechain or orphan block %v, height %v", 1267 n.Hash, n.Header.Height) 1268 } 1269 1270 return nil 1271 } 1272 1273 // hashStop is a zero value stop hash for fetching all possible data using 1274 // locators. 1275 var hashStop chainhash.Hash 1276 1277 // getHeaders iteratively fetches headers from rp using the latest locators. 1278 // Returns when no more headers are available. A sendheaders message is pushed 1279 // to the peer when there are no more headers to fetch. 1280 func (s *Syncer) getHeaders(ctx context.Context, rp *p2p.RemotePeer) error { 1281 var locators []*chainhash.Hash 1282 var generation uint 1283 var err error 1284 s.locatorMu.Lock() 1285 locators = s.currentLocators 1286 generation = s.locatorGeneration 1287 if len(locators) == 0 { 1288 locators, err = s.wallet.BlockLocators(ctx, nil) 1289 if err != nil { 1290 s.locatorMu.Unlock() 1291 return err 1292 } 1293 s.currentLocators = locators 1294 s.locatorGeneration++ 1295 } 1296 s.locatorMu.Unlock() 1297 1298 var lastHeight int32 1299 cnet := s.wallet.ChainParams().Net 1300 1301 for { 1302 headers, err := rp.Headers(ctx, locators, &hashStop) 1303 if err != nil { 1304 return err 1305 } 1306 1307 if len(headers) == 0 { 1308 // Ensure that the peer provided headers through the height 1309 // advertised during handshake. 1310 if lastHeight < rp.InitialHeight() { 1311 // Peer may not have provided any headers if our own locators 1312 // were up to date. Compare the best locator hash with the 1313 // advertised height. 1314 h, err := s.wallet.BlockHeader(ctx, locators[0]) 1315 if err == nil && int32(h.Height) < rp.InitialHeight() { 1316 return errors.E(errors.Protocol, "peer did not provide "+ 1317 "headers through advertised height") 1318 } 1319 } 1320 1321 return rp.SendHeaders(ctx) 1322 } 1323 1324 lastHeight = int32(headers[len(headers)-1].Height) 1325 1326 nodes := make([]*wallet.BlockNode, len(headers)) 1327 g, ctx := errgroup.WithContext(ctx) 1328 for i := range headers { 1329 i := i 1330 g.Go(func() error { 1331 header := headers[i] 1332 hash := header.BlockHash() 1333 filter, proofIndex, proof, err := rp.CFilterV2(ctx, &hash) 1334 if err != nil { 1335 return err 1336 } 1337 1338 err = validate.CFilterV2HeaderCommitment(cnet, header, 1339 filter, proofIndex, proof) 1340 if err != nil { 1341 return err 1342 } 1343 1344 nodes[i] = wallet.NewBlockNode(header, &hash, filter) 1345 if wallet.BadCheckpoint(cnet, &hash, int32(header.Height)) { 1346 nodes[i].BadCheckpoint() 1347 } 1348 return nil 1349 }) 1350 } 1351 err = g.Wait() 1352 if err != nil { 1353 return err 1354 } 1355 1356 var added int 1357 s.sidechainMu.Lock() 1358 for _, n := range nodes { 1359 haveBlock, _, _ := s.wallet.BlockInMainChain(ctx, n.Hash) 1360 if haveBlock { 1361 continue 1362 } 1363 if s.sidechains.AddBlockNode(n) { 1364 added++ 1365 } 1366 } 1367 if added == 0 { 1368 s.sidechainMu.Unlock() 1369 1370 s.locatorMu.Lock() 1371 if s.locatorGeneration > generation { 1372 locators = s.currentLocators 1373 } 1374 if len(locators) == 0 { 1375 locators, err = s.wallet.BlockLocators(ctx, nil) 1376 if err != nil { 1377 s.locatorMu.Unlock() 1378 return err 1379 } 1380 s.currentLocators = locators 1381 s.locatorGeneration++ 1382 generation = s.locatorGeneration 1383 } 1384 s.locatorMu.Unlock() 1385 continue 1386 } 1387 s.fetchHeadersProgress(headers[len(headers)-1]) 1388 log.Debugf("Fetched %d new header(s) ending at height %d from %v", 1389 added, nodes[len(nodes)-1].Header.Height, rp) 1390 1391 bestChain, err := s.wallet.EvaluateBestChain(ctx, &s.sidechains) 1392 if err != nil { 1393 s.sidechainMu.Unlock() 1394 return err 1395 } 1396 if len(bestChain) == 0 { 1397 s.sidechainMu.Unlock() 1398 continue 1399 } 1400 1401 _, err = s.wallet.ValidateHeaderChainDifficulties(ctx, bestChain, 0) 1402 if err != nil { 1403 s.sidechainMu.Unlock() 1404 return err 1405 } 1406 1407 prevChain, err := s.wallet.ChainSwitch(ctx, &s.sidechains, bestChain, nil) 1408 if err != nil { 1409 s.sidechainMu.Unlock() 1410 return err 1411 } 1412 1413 if len(prevChain) != 0 { 1414 log.Infof("Reorganize from %v to %v (total %d block(s) reorged)", 1415 prevChain[len(prevChain)-1].Hash, bestChain[len(bestChain)-1].Hash, len(prevChain)) 1416 for _, n := range prevChain { 1417 s.sidechains.AddBlockNode(n) 1418 } 1419 } 1420 tip := bestChain[len(bestChain)-1] 1421 if len(bestChain) == 1 { 1422 log.Infof("Connected block %v, height %d", tip.Hash, tip.Header.Height) 1423 } else { 1424 log.Infof("Connected %d blocks, new tip %v, height %d, date %v", 1425 len(bestChain), tip.Hash, tip.Header.Height, tip.Header.Timestamp) 1426 } 1427 1428 s.sidechainMu.Unlock() 1429 1430 // Generate new locators 1431 s.locatorMu.Lock() 1432 locators, err = s.wallet.BlockLocators(ctx, nil) 1433 if err != nil { 1434 s.locatorMu.Unlock() 1435 return err 1436 } 1437 s.currentLocators = locators 1438 s.locatorGeneration++ 1439 s.locatorMu.Unlock() 1440 } 1441 } 1442 1443 func (s *Syncer) startupSync(ctx context.Context, rp *p2p.RemotePeer) error { 1444 // Disconnect from the peer if their advertised block height is 1445 // significantly behind the wallet's. 1446 _, tipHeight := s.wallet.MainChainTip(ctx) 1447 if rp.InitialHeight() < tipHeight-6 { 1448 return errors.E("peer is not synced") 1449 } 1450 s.fetchMissingCfiltersStart() 1451 progress := make(chan wallet.MissingCFilterProgress, 1) 1452 go s.wallet.FetchMissingCFiltersWithProgress(ctx, rp, progress) 1453 1454 for p := range progress { 1455 if p.Err != nil { 1456 return p.Err 1457 } 1458 s.fetchMissingCfiltersProgress(p.BlockHeightStart, p.BlockHeightEnd) 1459 } 1460 s.fetchMissingCfiltersFinished() 1461 1462 // Fetch any unseen headers from the peer. 1463 s.fetchHeadersStart() 1464 log.Debugf("Fetching headers from %v", rp.RemoteAddr()) 1465 err := s.getHeaders(ctx, rp) 1466 if err != nil { 1467 return err 1468 } 1469 s.fetchHeadersFinished() 1470 1471 if atomic.CompareAndSwapUint32(&s.atomicCatchUpTryLock, 0, 1) { 1472 err = func() error { 1473 rescanPoint, err := s.wallet.RescanPoint(ctx) 1474 if err != nil { 1475 return err 1476 } 1477 if rescanPoint == nil { 1478 if !s.loadedFilters { 1479 err = s.wallet.LoadActiveDataFilters(ctx, s, true) 1480 if err != nil { 1481 return err 1482 } 1483 s.loadedFilters = true 1484 } 1485 1486 s.synced() 1487 1488 return nil 1489 } 1490 // RescanPoint is != nil so we are not synced to the peer and 1491 // check to see if it was previously synced 1492 s.unsynced() 1493 1494 s.discoverAddressesStart() 1495 err = s.wallet.DiscoverActiveAddresses(ctx, rp, rescanPoint, s.discoverAccounts, s.wallet.GapLimit()) 1496 if err != nil { 1497 return err 1498 } 1499 s.discoverAddressesFinished() 1500 s.discoverAccounts = false 1501 1502 err = s.wallet.LoadActiveDataFilters(ctx, s, true) 1503 if err != nil { 1504 return err 1505 } 1506 s.loadedFilters = true 1507 1508 s.rescanStart() 1509 1510 rescanBlock, err := s.wallet.BlockHeader(ctx, rescanPoint) 1511 if err != nil { 1512 return err 1513 } 1514 progress := make(chan wallet.RescanProgress, 1) 1515 go s.wallet.RescanProgressFromHeight(ctx, s, int32(rescanBlock.Height), progress) 1516 1517 for p := range progress { 1518 if p.Err != nil { 1519 return p.Err 1520 } 1521 s.rescanProgress(p.ScannedThrough) 1522 } 1523 s.rescanFinished() 1524 1525 s.synced() 1526 1527 return nil 1528 }() 1529 atomic.StoreUint32(&s.atomicCatchUpTryLock, 0) 1530 if err != nil { 1531 return err 1532 } 1533 } 1534 1535 if rp.Pver() >= wire.InitStateVersion { 1536 err = s.GetInitState(ctx, rp) 1537 if err != nil { 1538 log.Errorf("Failed to get init state", err) 1539 } 1540 } 1541 1542 unminedTxs, err := s.wallet.UnminedTransactions(ctx) 1543 if err != nil { 1544 log.Errorf("Cannot load unmined transactions for resending: %v", err) 1545 return nil 1546 } 1547 if len(unminedTxs) == 0 { 1548 return nil 1549 } 1550 err = rp.PublishTransactions(ctx, unminedTxs...) 1551 if err != nil { 1552 // TODO: Transactions should be removed if this is a double spend. 1553 log.Errorf("Failed to resent one or more unmined transactions: %v", err) 1554 } 1555 return nil 1556 } 1557 1558 // handleMempool handles eviction from the local mempool of non-wallet-backed 1559 // transactions. It MUST be run as a goroutine. 1560 func (s *Syncer) handleMempool(ctx context.Context) error { 1561 const mempoolEvictionTimeout = 60 * time.Minute 1562 1563 for { 1564 select { 1565 case txHash := <-s.mempoolAdds: 1566 go func() { 1567 select { 1568 case <-ctx.Done(): 1569 case <-time.After(mempoolEvictionTimeout): 1570 s.mempool.Delete(*txHash) 1571 } 1572 }() 1573 case <-ctx.Done(): 1574 return ctx.Err() 1575 } 1576 } 1577 }