github.com/ledgerwatch/erigon-lib@v1.0.0/txpool/fetch.go (about)

     1  /*
     2     Copyright 2021 Erigon contributors
     3  
     4     Licensed under the Apache License, Version 2.0 (the "License");
     5     you may not use this file except in compliance with the License.
     6     You may obtain a copy of the License at
     7  
     8         http://www.apache.org/licenses/LICENSE-2.0
     9  
    10     Unless required by applicable law or agreed to in writing, software
    11     distributed under the License is distributed on an "AS IS" BASIS,
    12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13     See the License for the specific language governing permissions and
    14     limitations under the License.
    15  */
    16  
    17  package txpool
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/holiman/uint256"
    27  	"github.com/ledgerwatch/erigon-lib/common/dbg"
    28  	"github.com/ledgerwatch/erigon-lib/direct"
    29  	"github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil"
    30  	"github.com/ledgerwatch/erigon-lib/gointerfaces/remote"
    31  	"github.com/ledgerwatch/erigon-lib/gointerfaces/sentry"
    32  	"github.com/ledgerwatch/erigon-lib/kv"
    33  	"github.com/ledgerwatch/erigon-lib/rlp"
    34  	types2 "github.com/ledgerwatch/erigon-lib/types"
    35  	"github.com/ledgerwatch/log/v3"
    36  	"google.golang.org/grpc"
    37  	"google.golang.org/protobuf/types/known/emptypb"
    38  )
    39  
    40  // Fetch connects to sentry and implements eth/66 protocol regarding the transaction
    41  // messages. It tries to "prime" the sentry with StatusData message containing given
    42  // genesis hash and list of forks, but with zero max block and total difficulty
    43  // Sentry should have a logic not to overwrite statusData with messages from tx pool
    44  type Fetch struct {
    45  	ctx                      context.Context // Context used for cancellation and closing of the fetcher
    46  	pool                     Pool            // Transaction pool implementation
    47  	coreDB                   kv.RoDB
    48  	db                       kv.RwDB
    49  	stateChangesClient       StateChangesClient
    50  	wg                       *sync.WaitGroup // used for synchronisation in the tests (nil when not in tests)
    51  	stateChangesParseCtx     *types2.TxParseContext
    52  	pooledTxsParseCtx        *types2.TxParseContext
    53  	sentryClients            []direct.SentryClient // sentry clients that will be used for accessing the network
    54  	stateChangesParseCtxLock sync.Mutex
    55  	pooledTxsParseCtxLock    sync.Mutex
    56  	logger                   log.Logger
    57  }
    58  
    59  type StateChangesClient interface {
    60  	StateChanges(ctx context.Context, in *remote.StateChangeRequest, opts ...grpc.CallOption) (remote.KV_StateChangesClient, error)
    61  }
    62  
    63  // NewFetch creates a new fetch object that will work with given sentry clients. Since the
    64  // SentryClient here is an interface, it is suitable for mocking in tests (mock will need
    65  // to implement all the functions of the SentryClient interface).
    66  func NewFetch(ctx context.Context, sentryClients []direct.SentryClient, pool Pool, stateChangesClient StateChangesClient, coreDB kv.RoDB, db kv.RwDB,
    67  	chainID uint256.Int, logger log.Logger) *Fetch {
    68  	f := &Fetch{
    69  		ctx:                  ctx,
    70  		sentryClients:        sentryClients,
    71  		pool:                 pool,
    72  		coreDB:               coreDB,
    73  		db:                   db,
    74  		stateChangesClient:   stateChangesClient,
    75  		stateChangesParseCtx: types2.NewTxParseContext(chainID).ChainIDRequired(), //TODO: change ctx if rules changed
    76  		pooledTxsParseCtx:    types2.NewTxParseContext(chainID).ChainIDRequired(),
    77  		logger:               logger,
    78  	}
    79  	f.pooledTxsParseCtx.ValidateRLP(f.pool.ValidateSerializedTxn)
    80  	f.stateChangesParseCtx.ValidateRLP(f.pool.ValidateSerializedTxn)
    81  
    82  	return f
    83  }
    84  
    85  func (f *Fetch) SetWaitGroup(wg *sync.WaitGroup) {
    86  	f.wg = wg
    87  }
    88  
    89  func (f *Fetch) threadSafeParsePooledTxn(cb func(*types2.TxParseContext) error) error {
    90  	f.pooledTxsParseCtxLock.Lock()
    91  	defer f.pooledTxsParseCtxLock.Unlock()
    92  	return cb(f.pooledTxsParseCtx)
    93  }
    94  
    95  func (f *Fetch) threadSafeParseStateChangeTxn(cb func(*types2.TxParseContext) error) error {
    96  	f.stateChangesParseCtxLock.Lock()
    97  	defer f.stateChangesParseCtxLock.Unlock()
    98  	return cb(f.stateChangesParseCtx)
    99  }
   100  
   101  // ConnectSentries initialises connection to the sentry
   102  func (f *Fetch) ConnectSentries() {
   103  	for i := range f.sentryClients {
   104  		go func(i int) {
   105  			f.receiveMessageLoop(f.sentryClients[i])
   106  		}(i)
   107  		go func(i int) {
   108  			f.receivePeerLoop(f.sentryClients[i])
   109  		}(i)
   110  	}
   111  }
   112  func (f *Fetch) ConnectCore() {
   113  	go func() {
   114  		for {
   115  			select {
   116  			case <-f.ctx.Done():
   117  				return
   118  			default:
   119  			}
   120  			if err := f.handleStateChanges(f.ctx, f.stateChangesClient); err != nil {
   121  				if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) {
   122  					time.Sleep(3 * time.Second)
   123  					continue
   124  				}
   125  				f.logger.Warn("[txpool.handleStateChanges]", "err", err)
   126  			}
   127  		}
   128  	}()
   129  }
   130  
   131  func (f *Fetch) receiveMessageLoop(sentryClient sentry.SentryClient) {
   132  	for {
   133  		select {
   134  		case <-f.ctx.Done():
   135  			return
   136  		default:
   137  		}
   138  		if _, err := sentryClient.HandShake(f.ctx, &emptypb.Empty{}, grpc.WaitForReady(true)); err != nil {
   139  			if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) {
   140  				time.Sleep(3 * time.Second)
   141  				continue
   142  			}
   143  			// Report error and wait more
   144  			f.logger.Warn("[txpool.recvMessage] sentry not ready yet", "err", err)
   145  			continue
   146  		}
   147  
   148  		if err := f.receiveMessage(f.ctx, sentryClient); err != nil {
   149  			if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) {
   150  				time.Sleep(3 * time.Second)
   151  				continue
   152  			}
   153  			f.logger.Warn("[txpool.recvMessage]", "err", err)
   154  		}
   155  	}
   156  }
   157  
   158  func (f *Fetch) receiveMessage(ctx context.Context, sentryClient sentry.SentryClient) error {
   159  	streamCtx, cancel := context.WithCancel(ctx)
   160  	defer cancel()
   161  	stream, err := sentryClient.Messages(streamCtx, &sentry.MessagesRequest{Ids: []sentry.MessageId{
   162  		sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66,
   163  		sentry.MessageId_GET_POOLED_TRANSACTIONS_66,
   164  		sentry.MessageId_TRANSACTIONS_66,
   165  		sentry.MessageId_POOLED_TRANSACTIONS_66,
   166  		sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68,
   167  	}}, grpc.WaitForReady(true))
   168  	if err != nil {
   169  		select {
   170  		case <-f.ctx.Done():
   171  			return ctx.Err()
   172  		default:
   173  		}
   174  		return err
   175  	}
   176  
   177  	var req *sentry.InboundMessage
   178  	for req, err = stream.Recv(); ; req, err = stream.Recv() {
   179  		if err != nil {
   180  			select {
   181  			case <-f.ctx.Done():
   182  				return ctx.Err()
   183  			default:
   184  			}
   185  			return err
   186  		}
   187  		if req == nil {
   188  			return nil
   189  		}
   190  		if err := f.handleInboundMessage(streamCtx, req, sentryClient); err != nil {
   191  			if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) {
   192  				time.Sleep(3 * time.Second)
   193  				continue
   194  			}
   195  			f.logger.Debug("[txpool.fetch] Handling incoming message", "msg", req.Id.String(), "err", err)
   196  		}
   197  		if f.wg != nil {
   198  			f.wg.Done()
   199  		}
   200  	}
   201  }
   202  
   203  func (f *Fetch) handleInboundMessage(ctx context.Context, req *sentry.InboundMessage, sentryClient sentry.SentryClient) (err error) {
   204  	defer func() {
   205  		if rec := recover(); rec != nil {
   206  			err = fmt.Errorf("%+v, trace: %s, rlp: %x", rec, dbg.Stack(), req.Data)
   207  		}
   208  	}()
   209  
   210  	if !f.pool.Started() {
   211  		return nil
   212  	}
   213  	tx, err := f.db.BeginRo(ctx)
   214  	if err != nil {
   215  		return err
   216  	}
   217  	defer tx.Rollback()
   218  
   219  	switch req.Id {
   220  	case sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66:
   221  		hashCount, pos, err := types2.ParseHashesCount(req.Data, 0)
   222  		if err != nil {
   223  			return fmt.Errorf("parsing NewPooledTransactionHashes: %w", err)
   224  		}
   225  		hashes := make([]byte, 32*hashCount)
   226  		for i := 0; i < len(hashes); i += 32 {
   227  			if _, pos, err = types2.ParseHash(req.Data, pos, hashes[i:]); err != nil {
   228  				return err
   229  			}
   230  		}
   231  		unknownHashes, err := f.pool.FilterKnownIdHashes(tx, hashes)
   232  		if err != nil {
   233  			return err
   234  		}
   235  		if len(unknownHashes) > 0 {
   236  			var encodedRequest []byte
   237  			var messageID sentry.MessageId
   238  			if encodedRequest, err = types2.EncodeGetPooledTransactions66(unknownHashes, uint64(1), nil); err != nil {
   239  				return err
   240  			}
   241  			messageID = sentry.MessageId_GET_POOLED_TRANSACTIONS_66
   242  			if _, err = sentryClient.SendMessageById(f.ctx, &sentry.SendMessageByIdRequest{
   243  				Data:   &sentry.OutboundMessageData{Id: messageID, Data: encodedRequest},
   244  				PeerId: req.PeerId,
   245  			}, &grpc.EmptyCallOption{}); err != nil {
   246  				return err
   247  			}
   248  		}
   249  	case sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_68:
   250  		_, _, hashes, _, err := rlp.ParseAnnouncements(req.Data, 0)
   251  		if err != nil {
   252  			return fmt.Errorf("parsing NewPooledTransactionHashes88: %w", err)
   253  		}
   254  		unknownHashes, err := f.pool.FilterKnownIdHashes(tx, hashes)
   255  		if err != nil {
   256  			return err
   257  		}
   258  
   259  		if len(unknownHashes) > 0 {
   260  			var encodedRequest []byte
   261  			var messageID sentry.MessageId
   262  			if encodedRequest, err = types2.EncodeGetPooledTransactions66(unknownHashes, uint64(1), nil); err != nil {
   263  				return err
   264  			}
   265  			messageID = sentry.MessageId_GET_POOLED_TRANSACTIONS_66
   266  			if _, err = sentryClient.SendMessageById(f.ctx, &sentry.SendMessageByIdRequest{
   267  				Data:   &sentry.OutboundMessageData{Id: messageID, Data: encodedRequest},
   268  				PeerId: req.PeerId,
   269  			}, &grpc.EmptyCallOption{}); err != nil {
   270  				return err
   271  			}
   272  		}
   273  	case sentry.MessageId_GET_POOLED_TRANSACTIONS_66:
   274  		//TODO: handleInboundMessage is single-threaded - means it can accept as argument couple buffers (or analog of txParseContext). Protobuf encoding will copy data anyway, but DirectClient doesn't
   275  		var encodedRequest []byte
   276  		var messageID sentry.MessageId
   277  		messageID = sentry.MessageId_POOLED_TRANSACTIONS_66
   278  		requestID, hashes, _, err := types2.ParseGetPooledTransactions66(req.Data, 0, nil)
   279  		if err != nil {
   280  			return err
   281  		}
   282  		_ = requestID
   283  		var txs [][]byte
   284  		for i := 0; i < len(hashes); i += 32 {
   285  			txn, err := f.pool.GetRlp(tx, hashes[i:i+32])
   286  			if err != nil {
   287  				return err
   288  			}
   289  			if txn == nil {
   290  				continue
   291  			}
   292  			txs = append(txs, txn)
   293  		}
   294  		encodedRequest = types2.EncodePooledTransactions66(txs, requestID, nil)
   295  
   296  		if _, err := sentryClient.SendMessageById(f.ctx, &sentry.SendMessageByIdRequest{
   297  			Data:   &sentry.OutboundMessageData{Id: messageID, Data: encodedRequest},
   298  			PeerId: req.PeerId,
   299  		}, &grpc.EmptyCallOption{}); err != nil {
   300  			return err
   301  		}
   302  	case sentry.MessageId_POOLED_TRANSACTIONS_66, sentry.MessageId_TRANSACTIONS_66:
   303  		txs := types2.TxSlots{}
   304  		if err := f.threadSafeParsePooledTxn(func(parseContext *types2.TxParseContext) error {
   305  			return nil
   306  		}); err != nil {
   307  			return err
   308  		}
   309  
   310  		switch req.Id {
   311  		case sentry.MessageId_TRANSACTIONS_66:
   312  			if err := f.threadSafeParsePooledTxn(func(parseContext *types2.TxParseContext) error {
   313  				if _, err := types2.ParseTransactions(req.Data, 0, parseContext, &txs, func(hash []byte) error {
   314  					known, err := f.pool.IdHashKnown(tx, hash)
   315  					if err != nil {
   316  						return err
   317  					}
   318  					if known {
   319  						return types2.ErrRejected
   320  					}
   321  					return nil
   322  				}); err != nil {
   323  					return err
   324  				}
   325  				return nil
   326  			}); err != nil {
   327  				return err
   328  			}
   329  		case sentry.MessageId_POOLED_TRANSACTIONS_66:
   330  			if err := f.threadSafeParsePooledTxn(func(parseContext *types2.TxParseContext) error {
   331  				if _, _, err := types2.ParsePooledTransactions66(req.Data, 0, parseContext, &txs, func(hash []byte) error {
   332  					known, err := f.pool.IdHashKnown(tx, hash)
   333  					if err != nil {
   334  						return err
   335  					}
   336  					if known {
   337  						return types2.ErrRejected
   338  					}
   339  					return nil
   340  				}); err != nil {
   341  					return err
   342  				}
   343  				return nil
   344  			}); err != nil {
   345  				return err
   346  			}
   347  		default:
   348  			return fmt.Errorf("unexpected message: %s", req.Id.String())
   349  		}
   350  		if len(txs.Txs) == 0 {
   351  			return nil
   352  		}
   353  		f.pool.AddRemoteTxs(ctx, txs)
   354  	default:
   355  		defer f.logger.Trace("[txpool] dropped p2p message", "id", req.Id)
   356  	}
   357  
   358  	return nil
   359  }
   360  
   361  func (f *Fetch) receivePeerLoop(sentryClient sentry.SentryClient) {
   362  	for {
   363  		select {
   364  		case <-f.ctx.Done():
   365  			return
   366  		default:
   367  		}
   368  		if _, err := sentryClient.HandShake(f.ctx, &emptypb.Empty{}, grpc.WaitForReady(true)); err != nil {
   369  			if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) {
   370  				time.Sleep(3 * time.Second)
   371  				continue
   372  			}
   373  			// Report error and wait more
   374  			f.logger.Warn("[txpool.recvPeers] sentry not ready yet", "err", err)
   375  			time.Sleep(time.Second)
   376  			continue
   377  		}
   378  		if err := f.receivePeer(sentryClient); err != nil {
   379  			if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) {
   380  				time.Sleep(3 * time.Second)
   381  				continue
   382  			}
   383  
   384  			f.logger.Warn("[txpool.recvPeers]", "err", err)
   385  		}
   386  	}
   387  }
   388  
   389  func (f *Fetch) receivePeer(sentryClient sentry.SentryClient) error {
   390  	streamCtx, cancel := context.WithCancel(f.ctx)
   391  	defer cancel()
   392  
   393  	stream, err := sentryClient.PeerEvents(streamCtx, &sentry.PeerEventsRequest{})
   394  	if err != nil {
   395  		select {
   396  		case <-f.ctx.Done():
   397  			return f.ctx.Err()
   398  		default:
   399  		}
   400  		return err
   401  	}
   402  
   403  	var req *sentry.PeerEvent
   404  	for req, err = stream.Recv(); ; req, err = stream.Recv() {
   405  		if err != nil {
   406  			return err
   407  		}
   408  		if req == nil {
   409  			return nil
   410  		}
   411  		if err = f.handleNewPeer(req); err != nil {
   412  			return err
   413  		}
   414  		if f.wg != nil {
   415  			f.wg.Done()
   416  		}
   417  	}
   418  }
   419  
   420  func (f *Fetch) handleNewPeer(req *sentry.PeerEvent) error {
   421  	if req == nil {
   422  		return nil
   423  	}
   424  	switch req.EventId {
   425  	case sentry.PeerEvent_Connect:
   426  		f.pool.AddNewGoodPeer(req.PeerId)
   427  	}
   428  
   429  	return nil
   430  }
   431  
   432  func (f *Fetch) handleStateChanges(ctx context.Context, client StateChangesClient) error {
   433  	streamCtx, cancel := context.WithCancel(ctx)
   434  	defer cancel()
   435  	stream, err := client.StateChanges(streamCtx, &remote.StateChangeRequest{WithStorage: false, WithTransactions: true}, grpc.WaitForReady(true))
   436  	if err != nil {
   437  		return err
   438  	}
   439  	tx, err := f.db.BeginRo(ctx)
   440  	if err != nil {
   441  		return err
   442  	}
   443  	defer tx.Rollback()
   444  	for req, err := stream.Recv(); ; req, err = stream.Recv() {
   445  		if err != nil {
   446  			return err
   447  		}
   448  		if req == nil {
   449  			return nil
   450  		}
   451  
   452  		var unwindTxs, minedTxs types2.TxSlots
   453  		for _, change := range req.ChangeBatch {
   454  			if change.Direction == remote.Direction_FORWARD {
   455  				minedTxs.Resize(uint(len(change.Txs)))
   456  				for i := range change.Txs {
   457  					minedTxs.Txs[i] = &types2.TxSlot{}
   458  					if err = f.threadSafeParseStateChangeTxn(func(parseContext *types2.TxParseContext) error {
   459  						_, err := parseContext.ParseTransaction(change.Txs[i], 0, minedTxs.Txs[i], minedTxs.Senders.At(i), false /* hasEnvelope */, false /* wrappedWithBlobs */, nil)
   460  						return err
   461  					}); err != nil && !errors.Is(err, context.Canceled) {
   462  						f.logger.Warn("stream.Recv", "err", err)
   463  						continue
   464  					}
   465  				}
   466  			}
   467  			if change.Direction == remote.Direction_UNWIND {
   468  				for i := range change.Txs {
   469  					if err = f.threadSafeParseStateChangeTxn(func(parseContext *types2.TxParseContext) error {
   470  						utx := &types2.TxSlot{}
   471  						sender := make([]byte, 20)
   472  						_, err2 := parseContext.ParseTransaction(change.Txs[i], 0, utx, sender, false /* hasEnvelope */, false /* wrappedWithBlobs */, nil)
   473  						if err2 != nil {
   474  							return err2
   475  						}
   476  						if utx.Type == types2.BlobTxType {
   477  							knownBlobTxn, err2 := f.pool.GetKnownBlobTxn(tx, utx.IDHash[:])
   478  							if err2 != nil {
   479  								return err2
   480  							}
   481  							// Get the blob tx from cache; ignore altogether if it isn't there
   482  							if knownBlobTxn != nil {
   483  								unwindTxs.Append(knownBlobTxn.Tx, sender, false)
   484  							}
   485  						} else {
   486  							unwindTxs.Append(utx, sender, false)
   487  						}
   488  						return err
   489  					}); err != nil && !errors.Is(err, context.Canceled) {
   490  						f.logger.Warn("stream.Recv", "err", err)
   491  						continue
   492  					}
   493  				}
   494  			}
   495  		}
   496  
   497  		if err := f.db.View(ctx, func(tx kv.Tx) error {
   498  			return f.pool.OnNewBlock(ctx, req, unwindTxs, minedTxs, tx)
   499  		}); err != nil && !errors.Is(err, context.Canceled) {
   500  			f.logger.Warn("onNewBlock", "err", err)
   501  		}
   502  		if f.wg != nil {
   503  			f.wg.Done()
   504  		}
   505  	}
   506  }