github.com/mit-dci/lit@v0.0.0-20221102210550-8c3d3b49f2ce/uspv/eight333.go (about)

     1  package uspv
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  
     7  	"github.com/mit-dci/lit/logging"
     8  
     9  	"github.com/mit-dci/lit/btcutil/bloom"
    10  	"github.com/mit-dci/lit/btcutil/chaincfg/chainhash"
    11  	"github.com/mit-dci/lit/lnutil"
    12  	"github.com/mit-dci/lit/wire"
    13  )
    14  
    15  const (
    16  	// keyFileName and headerFileName are not referred in this file? -- takaya
    17  	keyFileName    = "testseed.hex"
    18  	headerFileName = "headers.bin"
    19  
    20  	// VERSION hardcoded for now, probably ok...?
    21  	// 70012 is for segnet... make this an init var?
    22  	VERSION = 70012
    23  )
    24  
    25  // GimmeFilter ... or I'm gonna fade away
    26  func (s *SPVCon) GimmeFilter() (*bloom.Filter, error) {
    27  
    28  	s.TrackingAdrsMtx.Lock()
    29  	defer s.TrackingAdrsMtx.Unlock()
    30  	s.TrackingOPsMtx.Lock()
    31  	defer s.TrackingOPsMtx.Unlock()
    32  
    33  	filterElements := uint32(len(s.TrackingAdrs) + (len(s.TrackingOPs)))
    34  
    35  	f := bloom.NewFilter(filterElements, 0, 0.000001, wire.BloomUpdateAll)
    36  
    37  	// note there could be false positives since we're just looking
    38  	// for the 20 byte PKH without the opcodes.
    39  	for a160 := range s.TrackingAdrs { // add 20-byte pubkeyhash
    40  		//		logging.Infof("adding address hash %x\n", a160)
    41  		f.Add(a160[:])
    42  	}
    43  	//	for _, u := range allUtxos {
    44  	//		f.AddOutPoint(&u.Op)
    45  	//	}
    46  
    47  	// actually... we should monitor addresses, not txids, right?
    48  	// or no...?
    49  	for wop := range s.TrackingOPs {
    50  		// try just outpoints, not the txids as well
    51  		f.AddOutPoint(&wop)
    52  	}
    53  	// still some problem with filter?  When they broadcast a close which doesn't
    54  	// send any to us, sometimes we don't see it and think the channel is still open.
    55  	// so not monitoring the channel outpoint properly?  here or in ingest()
    56  
    57  	logging.Infof("made %d element filter\n", filterElements)
    58  	return f, nil
    59  }
    60  
    61  // MatchTx queries whether a tx mathches registered addresses and outpoints.
    62  func (s *SPVCon) MatchTx(tx *wire.MsgTx) bool {
    63  	gain := false
    64  	txid := tx.TxHash()
    65  
    66  	// get lock for adrs / outpoints
    67  	s.TrackingAdrsMtx.Lock()
    68  	defer s.TrackingAdrsMtx.Unlock()
    69  	s.TrackingOPsMtx.Lock()
    70  	defer s.TrackingOPsMtx.Unlock()
    71  
    72  	// start with optimism.  We may gain money.  Iterate through all output scripts.
    73  	for i, out := range tx.TxOut {
    74  		// create outpoint of what we're looking at
    75  		op := wire.NewOutPoint(&txid, uint32(i))
    76  
    77  		// 20 byte pubkey hash of this txout (if any)
    78  		var adr20 [20]byte
    79  		copy(adr20[:], lnutil.KeyHashFromPkScript(out.PkScript))
    80  		// when we gain utxo, set as gain so we can return a match, but
    81  		// also go through all gained utxos and register to track them
    82  
    83  		//		logging.Infof("got output key %x ", adr20)
    84  		if s.TrackingAdrs[adr20] {
    85  			gain = true
    86  			s.TrackingOPs[*op] = true
    87  		} else {
    88  			//			logging.Infof(" no match\n")
    89  		}
    90  
    91  		// this outpoint may confirm an outpoint we're watching.  Check that here.
    92  		if s.TrackingOPs[*op] {
    93  			// not quite "gain", more like confirm, but same idea.
    94  			gain = true
    95  		}
    96  
    97  	}
    98  
    99  	// No need to check for loss if we have a gain
   100  	if gain {
   101  		return true
   102  	}
   103  
   104  	// next pessimism.  Iterate through inputs, matching tracked outpoints
   105  	for _, in := range tx.TxIn {
   106  		if s.TrackingOPs[in.PreviousOutPoint] {
   107  			return true
   108  		}
   109  	}
   110  
   111  	return false
   112  }
   113  
   114  // OKTxid assigns a height to a txid.  This means that
   115  // the txid exists at that height, with whatever assurance (for height 0
   116  // it's no assurance at all)
   117  func (s *SPVCon) OKTxid(txid *chainhash.Hash, height int32) error {
   118  	if txid == nil {
   119  		return fmt.Errorf("tried to add nil txid")
   120  	}
   121  	logging.Infof("added %s to OKTxids at height %d\n", txid.String(), height)
   122  	s.OKMutex.Lock()
   123  	s.OKTxids[*txid] = height
   124  	s.OKMutex.Unlock()
   125  	return nil
   126  }
   127  
   128  // AskForTx requests a tx we heard about from an inv message.
   129  // It's one at a time but should be fast enough.
   130  // I don't like this function because SPV shouldn't even ask...
   131  func (s *SPVCon) AskForTx(txid chainhash.Hash) {
   132  	gdata := wire.NewMsgGetData()
   133  	inv := wire.NewInvVect(wire.InvTypeTx, &txid)
   134  	// no longer get wit txs if in hardmode... don't need to, right?
   135  	//	if s.HardMode {
   136  	//		inv.Type = wire.InvTypeWitnessTx
   137  	//	}
   138  	gdata.AddInvVect(inv)
   139  	logging.Infof("asking for tx %s\n", txid.String())
   140  	s.outMsgQueue <- gdata
   141  }
   142  
   143  // HashAndHeight is needed instead of just height in case a fullnode
   144  // responds abnormally (?) by sending out of order merkleblocks.
   145  // we cache a merkleroot:height pair in the queue so we don't have to
   146  // look them up from the disk.
   147  // Also used when inv messages indicate blocks so we can add the header
   148  // and parse the txs in one request instead of requesting headers first.
   149  type HashAndHeight struct {
   150  	blockhash chainhash.Hash
   151  	height    int32
   152  	final     bool // indicates this is the last merkleblock requested
   153  }
   154  
   155  // NewRootAndHeight saves like 2 lines.
   156  func NewRootAndHeight(b chainhash.Hash, h int32) (hah HashAndHeight) {
   157  	hah.blockhash = b
   158  	hah.height = h
   159  	return
   160  }
   161  
   162  // IngestMerkleBlock ...
   163  func (s *SPVCon) IngestMerkleBlock(m *wire.MsgMerkleBlock) {
   164  
   165  	txids, err := checkMBlock(m) // check self-consistency
   166  	if err != nil {
   167  		logging.Errorf("Merkle block error: %s\n", err.Error())
   168  		return
   169  	}
   170  	var hah HashAndHeight
   171  	select { // select here so we don't block on an unrequested mblock
   172  	case hah = <-s.blockQueue: // pop height off mblock queue
   173  		break
   174  	default:
   175  		logging.Errorf("Unrequested merkle block")
   176  		return
   177  	}
   178  
   179  	// this verifies order, and also that the returned header fits
   180  	// into our SPV header file
   181  	newMerkBlockSha := m.Header.BlockHash()
   182  	if !hah.blockhash.IsEqual(&newMerkBlockSha) {
   183  		logging.Errorf("merkle block out of order got %s expect %s",
   184  			m.Header.BlockHash().String(), hah.blockhash.String())
   185  		logging.Errorf("has %d hashes %d txs flags: %x",
   186  			len(m.Hashes), m.Transactions, m.Flags)
   187  		return
   188  	}
   189  
   190  	for _, txid := range txids {
   191  		err = s.OKTxid(txid, hah.height)
   192  		if err != nil {
   193  			logging.Errorf("Txid store error: %s\n", err.Error())
   194  			return
   195  		}
   196  	}
   197  
   198  	// CurrentHeightChan is "How we tell the wallet that a block has come in"
   199  	// so I guess this applies here as well.
   200  	for i := range s.HeightDistribute {
   201  		s.HeightDistribute[i] <- hah.height
   202  	}
   203  
   204  	// actually we should do this AFTER sending all the txs...
   205  	s.CurrentHeightChan <- hah.height
   206  
   207  	if hah.final {
   208  		// don't set waitstate; instead, ask for headers again!
   209  		// this way the only thing that triggers waitstate is asking for headers,
   210  		// getting 0, calling AskForMerkBlocks(), and seeing you don't need any.
   211  		// that way you are pretty sure you're synced up.
   212  		err = s.AskForHeaders()
   213  		if err != nil {
   214  			logging.Errorf("Merkle block error: %s\n", err.Error())
   215  			return
   216  		}
   217  	}
   218  	return
   219  }
   220  
   221  // IngestHeaders takes in a bunch of headers, checks them,
   222  // and if they're OK, appends them to the local header file.
   223  // If there are no headers, it assumes we're done and returns false.
   224  // Otherwise it assumes there's more to request and returns true.
   225  func (s *SPVCon) IngestHeaders(m *wire.MsgHeaders) (bool, error) {
   226  
   227  	// headerChainLength is how many headers we give to the
   228  	// verification function.  In bitcoin you never need more than 2016 previous
   229  	// headers to figure out the validity of the next; some alcoins need more
   230  	// though, like 4K or so.
   231  	//	headerChainLength := 4096
   232  
   233  	gotNum := int64(len(m.Headers))
   234  	if gotNum > 0 {
   235  		logging.Infof("got %d headers. Range:\n%s - %s\n",
   236  			gotNum, m.Headers[0].BlockHash().String(),
   237  			m.Headers[len(m.Headers)-1].BlockHash().String())
   238  	} else {
   239  		logging.Infof("got 0 headers, we're probably synced up")
   240  		return false, nil
   241  	}
   242  
   243  	s.headerMutex.Lock()
   244  	// even though we will be doing a bunch without writing, should be
   245  	// OK performance-wise to keep it locked for this function duration,
   246  	// because verification is pretty quick.
   247  	defer s.headerMutex.Unlock()
   248  
   249  	reorgHeight, err := CheckHeaderChain(s.headerFile, m.Headers, s.Param)
   250  	if err != nil {
   251  		// insufficient depth reorg means we're still trying to sync up?
   252  		// really, the re-org hasn't been proven; if the remote node
   253  		// provides us with a new block we'll ask again.
   254  		if reorgHeight == -1 {
   255  			logging.Errorf("Header error: %s\n", err.Error())
   256  			return false, nil
   257  		}
   258  		// some other error
   259  		return false, err
   260  	}
   261  
   262  	// truncate header file if reorg happens
   263  	if reorgHeight != 0 {
   264  		fileHeight := reorgHeight - s.Param.StartHeight
   265  		err = s.headerFile.Truncate(int64(fileHeight) * 80)
   266  		if err != nil {
   267  			return false, err
   268  		}
   269  
   270  		// CurrentHeightChan is "How we tell the wallet that a block has come in"
   271  		// so I guess this applies here as well.
   272  		for i := range s.HeightDistribute {
   273  			s.HeightDistribute[i] <- reorgHeight
   274  		}
   275  
   276  		// also we need to tell the upstream modules that a reorg happened
   277  		s.CurrentHeightChan <- reorgHeight
   278  		s.syncHeight = reorgHeight
   279  	}
   280  
   281  	// a header message is all or nothing; if we think there's something
   282  	// wrong with it, we don't take any of their headers
   283  	for _, resphdr := range m.Headers {
   284  		// write to end of file
   285  		err = resphdr.Serialize(s.headerFile)
   286  		if err != nil {
   287  			return false, err
   288  		}
   289  	}
   290  	logging.Infof("Added %d headers OK.", len(m.Headers))
   291  	return true, nil
   292  }
   293  
   294  // AskForHeaders ...
   295  func (s *SPVCon) AskForHeaders() error {
   296  	ghdr := wire.NewMsgGetHeaders()
   297  	ghdr.ProtocolVersion = s.localVersion
   298  
   299  	tipheight := s.GetHeaderTipHeight()
   300  	logging.Infof("got header tip height %d\n", tipheight)
   301  	// get tip header, as well as a few older ones (inefficient...?)
   302  	// yes, inefficient; really we should use "getheaders" and skip some of this
   303  
   304  	tipheader, err := s.GetHeaderAtHeight(tipheight)
   305  	if err != nil {
   306  		logging.Errorf("AskForHeaders GetHeaderAtHeight error\n")
   307  		return err
   308  	}
   309  
   310  	tHash := tipheader.BlockHash()
   311  	err = ghdr.AddBlockLocatorHash(&tHash)
   312  	if err != nil {
   313  		return err
   314  	}
   315  
   316  	backnum := int32(1)
   317  
   318  	// add more blockhashes in there if we're high enough
   319  	for tipheight > s.Param.StartHeight+backnum {
   320  		backhdr, err := s.GetHeaderAtHeight(tipheight - backnum)
   321  		if err != nil {
   322  			return err
   323  		}
   324  		backhash := backhdr.BlockHash()
   325  
   326  		err = ghdr.AddBlockLocatorHash(&backhash)
   327  		if err != nil {
   328  			return err
   329  		}
   330  
   331  		// send the most recent 10 blockhashes, then get sparse
   332  		if backnum > 10 {
   333  			backnum <<= 2
   334  		} else {
   335  			backnum++
   336  		}
   337  	}
   338  
   339  	logging.Infof("get headers message has %d header hashes, first one is %s\n",
   340  		len(ghdr.BlockLocatorHashes), ghdr.BlockLocatorHashes[0].String())
   341  
   342  	s.outMsgQueue <- ghdr
   343  	return nil
   344  }
   345  
   346  // AskForBlocks requests blocks from current to last
   347  // right now this asks for 1 block per getData message.
   348  // Maybe it's faster to ask for many in each message?
   349  func (s *SPVCon) AskForBlocks() error {
   350  	var hdr wire.BlockHeader
   351  
   352  	s.headerMutex.Lock() // lock just to check filesize
   353  	stat, err := os.Stat(s.headerFile.Name())
   354  	if err != nil {
   355  		return err
   356  	}
   357  	s.headerMutex.Unlock() // checked, unlock
   358  	endPos := stat.Size()
   359  
   360  	// move back 1 header length to read
   361  	headerTip := int32(endPos/80) + (s.headerStartHeight - 1)
   362  
   363  	logging.Infof("blockTip to %d headerTip %d\n", s.syncHeight, headerTip)
   364  	if s.syncHeight > headerTip {
   365  		return fmt.Errorf("error- db longer than headers! shouldn't happen.")
   366  	}
   367  	if s.syncHeight == headerTip {
   368  		// nothing to ask for; set wait state and return
   369  		logging.Infof("no blocks to request, entering wait state\n")
   370  		logging.Infof("%d bytes received\n", s.RBytes)
   371  		s.inWaitState <- true
   372  
   373  		// check if we can grab outputs
   374  		// Do this on wallit level instead
   375  		//		err = s.GrabAll()
   376  		//		if err != nil {
   377  		//			return err
   378  		//		}
   379  		// also advertise any unconfirmed txs here
   380  		//		s.Rebroadcast()
   381  		// ask for mempool each time...?  put something in to only ask the
   382  		// first time we sync...?
   383  		//		if !s.Ironman {
   384  		//			s.AskForMempool()
   385  		//		}
   386  		return nil
   387  	}
   388  
   389  	logging.Debugf("will request blocks %d to %d\n", s.syncHeight+1, headerTip)
   390  	reqHeight := s.syncHeight
   391  
   392  	// loop through all heights where we want merkleblocks.
   393  	for reqHeight < headerTip {
   394  		reqHeight++ // we're requesting the next header
   395  
   396  		// load header from file
   397  		s.headerMutex.Lock() // seek to header we need
   398  		_, err = s.headerFile.Seek(
   399  			int64((reqHeight-s.headerStartHeight)*80), os.SEEK_SET)
   400  		if err != nil {
   401  			return err
   402  		}
   403  		err = hdr.Deserialize(s.headerFile) // read header, done w/ file for now
   404  		s.headerMutex.Unlock()              // unlock after reading 1 header
   405  		if err != nil {
   406  			logging.Errorf("header deserialize error!\n")
   407  			return err
   408  		}
   409  
   410  		bHash := hdr.BlockHash()
   411  		// create inventory we're asking for
   412  		var iv1 *wire.InvVect
   413  		// if hardmode, ask for legit blocks, none of this ralphy stuff
   414  		// I don't think you can have a queue for SPV.  You miss stuff.
   415  		// also ask if someone wants rawblocks, like the watchtower
   416  		if s.HardMode || s.RawBlockActive {
   417  			iv1 = wire.NewInvVect(wire.InvTypeWitnessBlock, &bHash)
   418  		} else { // ah well
   419  			iv1 = wire.NewInvVect(wire.InvTypeFilteredBlock, &bHash)
   420  		}
   421  		gdataMsg := wire.NewMsgGetData()
   422  		// add inventory
   423  		err = gdataMsg.AddInvVect(iv1)
   424  		if err != nil {
   425  			return err
   426  		}
   427  
   428  		hah := NewRootAndHeight(hdr.BlockHash(), reqHeight)
   429  		if reqHeight == headerTip { // if this is the last block, indicate finality
   430  			hah.final = true
   431  		}
   432  		// waits here most of the time for the queue to empty out
   433  		s.blockQueue <- hah // push height and mroot of requested block on queue
   434  		s.outMsgQueue <- gdataMsg
   435  	}
   436  	return nil
   437  }