github.com/koko1123/flow-go-1@v0.29.6/network/stub/network.go (about)

     1  package stub
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/pkg/errors"
    11  	"github.com/stretchr/testify/mock"
    12  	"github.com/stretchr/testify/require"
    13  
    14  	"github.com/koko1123/flow-go-1/model/flow"
    15  	"github.com/koko1123/flow-go-1/network"
    16  	"github.com/koko1123/flow-go-1/network/channels"
    17  	"github.com/koko1123/flow-go-1/network/mocknetwork"
    18  	"github.com/koko1123/flow-go-1/network/p2p/conduit"
    19  )
    20  
    21  // Network is a mocked Network layer made for testing engine's behavior.
    22  // It represents the Network layer of a single node. A node can attach several engines of
    23  // itself to the Network, and hence enabling them send and receive message.
    24  // When an engine is attached on a Network instance, the mocked Network delivers
    25  // all engine's events to others using an in-memory delivery mechanism.
    26  type Network struct {
    27  	mocknetwork.Network
    28  	ctx context.Context
    29  	sync.Mutex
    30  	myId           flow.Identifier                               // used to represent information of the attached node.
    31  	hub            *Hub                                          // used to attach Network layers of nodes together.
    32  	engines        map[channels.Channel]network.MessageProcessor // used to keep track of attached engines of the node.
    33  	seenEventIDs   map[string]struct{}                           // used to keep track of event IDs seen by attached engines.
    34  	qCD            chan struct{}                                 // used to stop continuous delivery mode of the Network.
    35  	conduitFactory network.ConduitFactory
    36  }
    37  
    38  func WithConduitFactory(factory network.ConduitFactory) func(*Network) {
    39  	return func(n *Network) {
    40  		n.conduitFactory = factory
    41  	}
    42  }
    43  
    44  // NewNetwork create a mocked Network.
    45  // The committee has the identity of the node already, so only `committee` is needed
    46  // in order for a mock hub to find each other.
    47  func NewNetwork(t testing.TB, myId flow.Identifier, hub *Hub, opts ...func(*Network)) *Network {
    48  	net := &Network{
    49  		ctx:            context.Background(),
    50  		myId:           myId,
    51  		hub:            hub,
    52  		engines:        make(map[channels.Channel]network.MessageProcessor),
    53  		seenEventIDs:   make(map[string]struct{}),
    54  		qCD:            make(chan struct{}),
    55  		conduitFactory: conduit.NewDefaultConduitFactory(),
    56  	}
    57  
    58  	for _, opt := range opts {
    59  		opt(net)
    60  	}
    61  
    62  	// mocks the Start, Ready, and Done behavior of the network.
    63  	net.On("Start", mock.Anything).Return()
    64  	ready := make(chan struct{})
    65  	close(ready)
    66  	net.On("Ready", mock.Anything).Return(func() <-chan struct{} {
    67  		return ready
    68  	})
    69  
    70  	done := make(chan struct{})
    71  	close(done)
    72  	net.On("Done", mock.Anything).Return(func() <-chan struct{} {
    73  		return done
    74  	})
    75  
    76  	require.NoError(t, net.conduitFactory.RegisterAdapter(net))
    77  
    78  	// AddNetwork the Network to a hub so that Networks can find each other.
    79  	hub.AddNetwork(net)
    80  	return net
    81  }
    82  
    83  // GetID returns the identity of the attached node.
    84  func (n *Network) GetID() flow.Identifier {
    85  	return n.myId
    86  }
    87  
    88  // Register registers an Engine of the attached node to the channel via a Conduit, and returns the
    89  // Conduit instance.
    90  func (n *Network) Register(channel channels.Channel, engine network.MessageProcessor) (network.Conduit, error) {
    91  	n.Lock()
    92  	defer n.Unlock()
    93  	_, ok := n.engines[channel]
    94  	if ok {
    95  		return nil, errors.Errorf("channel already taken (%s)", channel)
    96  	}
    97  
    98  	c, err := n.conduitFactory.NewConduit(n.ctx, channel)
    99  	if err != nil {
   100  		return nil, fmt.Errorf("could not create a conduit on the channel: %w", err)
   101  	}
   102  
   103  	n.engines[channel] = engine
   104  
   105  	return c, nil
   106  }
   107  
   108  func (n *Network) UnRegisterChannel(channel channels.Channel) error {
   109  	n.Lock()
   110  	defer n.Unlock()
   111  	delete(n.engines, channel)
   112  	return nil
   113  }
   114  
   115  // submit is called when the attached Engine to the channel is sending an event to an
   116  // Engine attached to the same channel on another node or nodes.
   117  func (n *Network) submit(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error {
   118  	m := &PendingMessage{
   119  		From:      n.GetID(),
   120  		Channel:   channel,
   121  		Event:     event,
   122  		TargetIDs: targetIDs,
   123  	}
   124  
   125  	n.buffer(m)
   126  
   127  	return nil
   128  }
   129  
   130  // unicast is called when the attached Engine to the channel is sending an event to a single target
   131  // Engine attached to the same channel on another node.
   132  func (n *Network) UnicastOnChannel(channel channels.Channel, event interface{}, targetID flow.Identifier) error {
   133  	m := &PendingMessage{
   134  		From:      n.GetID(),
   135  		Channel:   channel,
   136  		Event:     event,
   137  		TargetIDs: []flow.Identifier{targetID},
   138  	}
   139  
   140  	n.buffer(m)
   141  	return nil
   142  }
   143  
   144  // publish is called when the attached Engine is sending an event to a group of Engines attached to the
   145  // same channel on other nodes based on selector.
   146  // In this test helper implementation, publish uses submit method under the hood.
   147  func (n *Network) PublishOnChannel(channel channels.Channel, event interface{}, targetIDs ...flow.Identifier) error {
   148  
   149  	if len(targetIDs) == 0 {
   150  		return fmt.Errorf("publish found empty target ID list for the message")
   151  	}
   152  
   153  	return n.submit(channel, event, targetIDs...)
   154  }
   155  
   156  // multicast is called when an engine attached to the channel is sending an event to a number of randomly chosen
   157  // Engines attached to the same channel on other nodes. The targeted nodes are selected based on the selector.
   158  // In this test helper implementation, multicast uses submit method under the hood.
   159  func (n *Network) MulticastOnChannel(channel channels.Channel, event interface{}, num uint, targetIDs ...flow.Identifier) error {
   160  	targetIDs = flow.Sample(num, targetIDs...)
   161  	return n.submit(channel, event, targetIDs...)
   162  }
   163  
   164  // buffer saves the message into the pending buffer of the Network hub.
   165  // Buffering process of a message imitates its transmission over an unreliable Network.
   166  // In specific, it emulates the process of dispatching the message out of the sender.
   167  func (n *Network) buffer(msg *PendingMessage) {
   168  	n.hub.Buffer.Save(msg)
   169  }
   170  
   171  // DeliverAll sends all pending messages to the receivers. The receivers
   172  // might be triggered to forward messages to its peers, so this function will
   173  // block until all receivers have done their forwarding, and there is no more message
   174  // in the Network to deliver.
   175  func (n *Network) DeliverAll(syncOnProcess bool) {
   176  	n.hub.Buffer.DeliverRecursive(func(m *PendingMessage) {
   177  		_ = n.sendToAllTargets(m, syncOnProcess)
   178  	})
   179  }
   180  
   181  // DeliverAllExcept flushes all pending messages in the buffer except
   182  // those that satisfy the shouldDrop predicate function. All messages that
   183  // satisfy the shouldDrop predicate are permanently dropped.
   184  // The message receivers might be triggered to forward some messages to their peers,
   185  // so this function will block until all receivers have done their forwarding,
   186  // and there is no more message in the Network to deliver.
   187  //
   188  // If syncOnProcess is true, the sender and receiver are synchronized on processing the message.
   189  // Otherwise they sync on delivery of the message.
   190  func (n *Network) DeliverAllExcept(syncOnProcess bool, shouldDrop func(*PendingMessage) bool) {
   191  	n.hub.Buffer.DeliverRecursive(func(m *PendingMessage) {
   192  		if shouldDrop(m) {
   193  			return
   194  		}
   195  		_ = n.sendToAllTargets(m, syncOnProcess)
   196  	})
   197  }
   198  
   199  // DeliverSome delivers all messages in the buffer that satisfy the
   200  // shouldDeliver predicate. Any messages that are not delivered remain in the
   201  // buffer.
   202  //
   203  // If syncOnProcess is true, the sender and receiver are synchronized on processing the message.
   204  // Otherwise they sync on delivery of the message.
   205  func (n *Network) DeliverSome(syncOnProcess bool, shouldDeliver func(*PendingMessage) bool) {
   206  	n.hub.Buffer.Deliver(func(m *PendingMessage) bool {
   207  		if shouldDeliver(m) {
   208  			return n.sendToAllTargets(m, syncOnProcess) != nil
   209  		}
   210  		return false
   211  	})
   212  }
   213  
   214  // sendToAllTargets send a message to all its targeted nodes if the targeted
   215  // node has not yet seen it.
   216  // sync parameter defines whether the sender and receiver are synced over processing or delivery of
   217  // message.
   218  // If syncOnProcess is set true, sender and receiver are synced over processing of the message, i.e., the method call
   219  // gets blocking till the message is processed at destination.
   220  // If syncOnProcess is set false, sender and receiver are synced over delivery of the message, i.e., the method call
   221  // returns once the message is delivered at destination (and not necessarily processed).
   222  func (n *Network) sendToAllTargets(m *PendingMessage, syncOnProcess bool) error {
   223  	key, err := eventKey(m.From, m.Channel, m.Event)
   224  	if err != nil {
   225  		return fmt.Errorf("could not generate event key for event: %w", err)
   226  	}
   227  
   228  	for _, nodeID := range m.TargetIDs {
   229  		// finds the Network of the targeted node
   230  		receiverNetwork, exist := n.hub.GetNetwork(nodeID)
   231  		if !exist {
   232  			continue
   233  		}
   234  
   235  		// finds the engine of the targeted Network
   236  		err := receiverNetwork.processWithEngine(syncOnProcess, key, m)
   237  		if err != nil {
   238  			return fmt.Errorf("could not process message for nodeID: %v, %w", nodeID, err)
   239  		}
   240  	}
   241  	return nil
   242  }
   243  
   244  func (n *Network) processWithEngine(syncOnProcess bool, key string, m *PendingMessage) error {
   245  	n.Lock()
   246  	defer n.Unlock()
   247  
   248  	// checks if the given engine already received the event.
   249  	// this prevents a node receiving the same event twice.
   250  	if _, ok := n.seenEventIDs[key]; ok {
   251  		return nil
   252  	}
   253  	n.seenEventIDs[key] = struct{}{}
   254  
   255  	receiverEngine, ok := n.engines[m.Channel]
   256  	if !ok {
   257  		return fmt.Errorf("could find engine ID: %v", m.Channel)
   258  	}
   259  
   260  	if syncOnProcess {
   261  		// sender and receiver are synced over processing the message
   262  		if err := receiverEngine.Process(m.Channel, m.From, m.Event); err != nil {
   263  			return fmt.Errorf("receiver engine failed to process event (%v): %w", m.Event, err)
   264  		}
   265  	} else {
   266  		// sender and receiver are synced over delivery of message
   267  		go func() {
   268  			_ = receiverEngine.Process(m.Channel, m.From, m.Event)
   269  		}()
   270  	}
   271  	return nil
   272  }
   273  
   274  // StartConDev starts the continuous delivery mode of the Network.
   275  // In this mode, the Network continuously checks the nodes' buffer
   276  // every `updateInterval` milliseconds, and delivers all the pending
   277  // messages. `recursive` determines whether the delivery is in recursive mode or not
   278  func (n *Network) StartConDev(updateInterval time.Duration, recursive bool) {
   279  	timer := time.NewTicker(updateInterval)
   280  
   281  	wg := sync.WaitGroup{}
   282  	wg.Add(1)
   283  
   284  	go func() {
   285  		wg.Done()
   286  		for {
   287  			select {
   288  			case <-timer.C:
   289  				n.DeliverAll(recursive)
   290  			case <-n.qCD:
   291  				// stops continuous delivery mode
   292  				return
   293  			}
   294  		}
   295  	}()
   296  
   297  	// waits till the internal goroutine starts
   298  	wg.Wait()
   299  }
   300  
   301  // StopConDev stops the continuous deliver mode of the Network.
   302  func (n *Network) StopConDev() {
   303  	close(n.qCD)
   304  }