github.com/amazechain/amc@v0.1.3/internal/sync/rpc_send_request.go (about)

     1  package sync
     2  
     3  import (
     4  	"context"
     5  	"github.com/amazechain/amc/api/protocol/sync_pb"
     6  	"github.com/amazechain/amc/api/protocol/types_pb"
     7  	"github.com/amazechain/amc/common"
     8  	"github.com/amazechain/amc/internal/p2p"
     9  	"github.com/amazechain/amc/utils"
    10  	"github.com/holiman/uint256"
    11  	"io"
    12  
    13  	"github.com/libp2p/go-libp2p/core/peer"
    14  	"github.com/pkg/errors"
    15  )
    16  
    17  // ErrInvalidFetchedData is thrown if stream fails to provide requested blocks.
    18  var ErrInvalidFetchedData = errors.New("invalid data returned from peer")
    19  
    20  // BlockProcessor defines a block processing function, which allows to start utilizing
    21  // blocks even before all blocks are ready.
    22  type BlockProcessor func(block *types_pb.Block) error
    23  
    24  // SendBodiesByRangeRequest sends BeaconBlocksByRange and returns fetched blocks, if any.
    25  func SendBodiesByRangeRequest(ctx context.Context, chain common.IBlockChain, p2pProvider p2p.SenderEncoder, pid peer.ID, req *sync_pb.BodiesByRangeRequest, blockProcessor BlockProcessor) ([]*types_pb.Block, error) {
    26  	topic, err := p2p.TopicFromMessage(p2p.BodiesByRangeMessageName)
    27  	if err != nil {
    28  		return nil, err
    29  	}
    30  	//todo
    31  	stream, err := p2pProvider.Send(ctx, &sync_pb.BodiesByRangeRequest{
    32  		StartBlockNumber: utils.ConvertUint256IntToH256(utils.ConvertH256ToUint256Int(req.StartBlockNumber)),
    33  		Count:            req.Count,
    34  		Step:             req.Step,
    35  	}, topic, pid)
    36  
    37  	if err != nil {
    38  		return nil, err
    39  	}
    40  	defer closeStream(stream)
    41  
    42  	// Augment block processing function, if non-nil block processor is provided.
    43  	blocks := make([]*types_pb.Block, 0, req.Count)
    44  	process := func(blk *types_pb.Block) error {
    45  		blocks = append(blocks, blk)
    46  		if blockProcessor != nil {
    47  			return blockProcessor(blk)
    48  		}
    49  		return nil
    50  	}
    51  	var prevBlockNr *uint256.Int
    52  	blockStart := utils.ConvertH256ToUint256Int(req.StartBlockNumber)
    53  	for i := uint64(0); ; i++ {
    54  		isFirstChunk := i == 0
    55  		blk, err := ReadChunkedBlock(stream, p2pProvider, isFirstChunk)
    56  		if errors.Is(err, io.EOF) {
    57  			break
    58  		}
    59  		if err != nil {
    60  			return nil, err
    61  		}
    62  		// The response MUST contain no more than `count` blocks, and no more than
    63  		// MAX_REQUEST_BLOCKS blocks.
    64  		if i >= req.Count || i >= maxRequestBlocks {
    65  			return nil, ErrInvalidFetchedData
    66  		}
    67  		blockNr := utils.ConvertH256ToUint256Int(blk.Header.Number)
    68  		// Returned blocks MUST be in the slot range [start_slot, start_slot + count * step).
    69  		if blockNr.Cmp(blockStart) == -1 || blockNr.Cmp(new(uint256.Int).AddUint64(blockStart, req.Count*req.Step)) >= 0 {
    70  			return nil, ErrInvalidFetchedData
    71  		}
    72  		// Returned blocks, where they exist, MUST be sent in a consecutive order.
    73  		// Consecutive blocks MUST have values in `step` increments (slots may be skipped in between).
    74  		isSlotOutOfOrder := false
    75  		if prevBlockNr != nil && prevBlockNr.Cmp(blockNr) >= 0 {
    76  			isSlotOutOfOrder = true
    77  		} else if prevBlockNr != nil && req.Step != 0 && new(uint256.Int).Mod(new(uint256.Int).Sub(blockNr, prevBlockNr), uint256.NewInt(req.Step)).Uint64() != 0 {
    78  			isSlotOutOfOrder = true
    79  		}
    80  		if !isFirstChunk && isSlotOutOfOrder {
    81  			return nil, ErrInvalidFetchedData
    82  		}
    83  		prevBlockNr = blockNr.Clone()
    84  		if err := process(blk); err != nil {
    85  			return nil, err
    86  		}
    87  	}
    88  
    89  	return blocks, nil
    90  }