github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/network/test/cohort2/blob_service_test.go (about)

     1  package cohort2
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/ipfs/boxo/blockstore"
    10  	"github.com/ipfs/go-cid"
    11  	"github.com/ipfs/go-datastore"
    12  	"github.com/ipfs/go-datastore/sync"
    13  	"github.com/stretchr/testify/require"
    14  	"github.com/stretchr/testify/suite"
    15  	"go.uber.org/atomic"
    16  
    17  	p2pbuilderconfig "github.com/onflow/flow-go/network/p2p/builder/config"
    18  	"github.com/onflow/flow-go/network/p2p/connection"
    19  	"github.com/onflow/flow-go/network/p2p/dht"
    20  	p2ptest "github.com/onflow/flow-go/network/p2p/test"
    21  	"github.com/onflow/flow-go/network/underlay"
    22  	"github.com/onflow/flow-go/utils/unittest"
    23  
    24  	"github.com/onflow/flow-go/model/flow"
    25  	"github.com/onflow/flow-go/module/blobs"
    26  	"github.com/onflow/flow-go/module/irrecoverable"
    27  	"github.com/onflow/flow-go/module/util"
    28  	"github.com/onflow/flow-go/network"
    29  	"github.com/onflow/flow-go/network/channels"
    30  	"github.com/onflow/flow-go/network/internal/testutils"
    31  )
    32  
    33  // conditionalTopology is a topology that behaves like the underlying topology when the condition is true,
    34  // otherwise returns an empty identity list.
    35  type conditionalTopology struct {
    36  	top       network.Topology
    37  	condition func() bool
    38  }
    39  
    40  var _ network.Topology = (*conditionalTopology)(nil)
    41  
    42  func (t *conditionalTopology) Fanout(ids flow.IdentityList) flow.IdentityList {
    43  	if t.condition() {
    44  		return t.top.Fanout(ids)
    45  	} else {
    46  		return flow.IdentityList{}
    47  	}
    48  }
    49  
    50  type BlobServiceTestSuite struct {
    51  	suite.Suite
    52  
    53  	cancel       context.CancelFunc
    54  	networks     []*underlay.Network
    55  	blobServices []network.BlobService
    56  	datastores   []datastore.Batching
    57  	blobCids     []cid.Cid
    58  	numNodes     int
    59  }
    60  
    61  func TestBlobService(t *testing.T) {
    62  	suite.Run(t, new(BlobServiceTestSuite))
    63  }
    64  
    65  func (suite *BlobServiceTestSuite) putBlob(ds datastore.Batching, blob blobs.Blob) {
    66  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
    67  	defer cancel()
    68  	suite.Require().NoError(blockstore.NewBlockstore(ds).Put(ctx, blob))
    69  }
    70  
    71  func (suite *BlobServiceTestSuite) SetupTest() {
    72  	suite.numNodes = 3
    73  
    74  	// Bitswap listens to connect events but doesn't iterate over existing connections, and fixing this without
    75  	// race conditions is tricky given the way the code is architected. As a result, libP2P hosts must first listen
    76  	// on Bitswap before connecting to each other, otherwise their Bitswap requests may never reach each other.
    77  	// See https://github.com/ipfs/go-bitswap/issues/525 for more details.
    78  	topologyActive := atomic.NewBool(false)
    79  
    80  	ctx, cancel := context.WithCancel(context.Background())
    81  	suite.cancel = cancel
    82  
    83  	signalerCtx := irrecoverable.NewMockSignalerContext(suite.T(), ctx)
    84  
    85  	sporkId := unittest.IdentifierFixture()
    86  	ids, nodes := testutils.LibP2PNodeForNetworkFixture(
    87  		suite.T(),
    88  		sporkId,
    89  		suite.numNodes,
    90  		p2ptest.WithRole(flow.RoleExecution),
    91  		p2ptest.WithDHTOptions(dht.AsServer()),
    92  		p2ptest.WithPeerManagerEnabled(
    93  			&p2pbuilderconfig.PeerManagerConfig{
    94  				UpdateInterval:    1 * time.Second,
    95  				ConnectionPruning: true,
    96  				ConnectorFactory:  connection.DefaultLibp2pBackoffConnectorFactory(),
    97  			}, nil))
    98  
    99  	suite.networks, _ = testutils.NetworksFixture(suite.T(), sporkId, ids, nodes)
   100  	// starts the nodes and networks
   101  	testutils.StartNodes(signalerCtx, suite.T(), nodes)
   102  	for _, net := range suite.networks {
   103  		testutils.StartNetworks(signalerCtx, suite.T(), []network.EngineRegistry{net})
   104  		unittest.RequireComponentsReadyBefore(suite.T(), 1*time.Second, net)
   105  	}
   106  
   107  	blobExchangeChannel := channels.Channel("blob-exchange")
   108  
   109  	for i, net := range suite.networks {
   110  		ds := sync.MutexWrap(datastore.NewMapDatastore())
   111  		suite.datastores = append(suite.datastores, ds)
   112  		blob := blobs.NewBlob([]byte(fmt.Sprintf("foo%v", i)))
   113  		suite.blobCids = append(suite.blobCids, blob.Cid())
   114  		suite.putBlob(ds, blob)
   115  		blobService, err := net.RegisterBlobService(blobExchangeChannel, ds)
   116  		suite.Require().NoError(err)
   117  		unittest.RequireCloseBefore(suite.T(), blobService.Ready(), 100*time.Millisecond, "blob service not ready")
   118  		suite.blobServices = append(suite.blobServices, blobService)
   119  	}
   120  
   121  	// let nodes connect to each other only after they are all listening on Bitswap
   122  	topologyActive.Store(true)
   123  	suite.Require().Eventually(
   124  		func() bool {
   125  			for i, libp2pNode := range nodes {
   126  				for j := i + 1; j < suite.numNodes; j++ {
   127  					connected, err := libp2pNode.IsConnected(nodes[j].ID())
   128  					require.NoError(suite.T(), err)
   129  					if !connected {
   130  						return false
   131  					}
   132  				}
   133  			}
   134  			return true
   135  		}, 3*time.Second, 100*time.Millisecond)
   136  }
   137  
   138  func (suite *BlobServiceTestSuite) TearDownTest() {
   139  	suite.cancel()
   140  
   141  	netDoneChans := make([]<-chan struct{}, len(suite.networks))
   142  	for i, net := range suite.networks {
   143  		netDoneChans[i] = net.Done()
   144  	}
   145  	<-util.AllClosed(netDoneChans...)
   146  
   147  	suite.networks = nil
   148  	suite.cancel = nil
   149  	suite.blobServices = nil
   150  	suite.datastores = nil
   151  	suite.blobCids = nil
   152  }
   153  
   154  func (suite *BlobServiceTestSuite) TestGetBlobs() {
   155  	for i, bex := range suite.blobServices {
   156  		// check that we can get all other blobs
   157  		var blobsToGet []cid.Cid
   158  		unreceivedBlobs := make(map[cid.Cid]struct{})
   159  		for j, blobCid := range suite.blobCids {
   160  			if j != i {
   161  				blobsToGet = append(blobsToGet, blobCid)
   162  				unreceivedBlobs[blobCid] = struct{}{}
   163  			}
   164  		}
   165  
   166  		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
   167  		defer cancel()
   168  
   169  		blobs := bex.GetBlobs(ctx, blobsToGet)
   170  
   171  		for blob := range blobs {
   172  			delete(unreceivedBlobs, blob.Cid())
   173  		}
   174  
   175  		for c := range unreceivedBlobs {
   176  			suite.T().Errorf("Blob %v not received by node %v", c, i)
   177  		}
   178  	}
   179  }
   180  
   181  func (suite *BlobServiceTestSuite) TestGetBlobsWithSession() {
   182  	for i, bex := range suite.blobServices {
   183  		// check that we can get all other blobs in a single session
   184  		blobsToGet := make(map[cid.Cid]struct{})
   185  		for j, blobCid := range suite.blobCids {
   186  			if j != i {
   187  				blobsToGet[blobCid] = struct{}{}
   188  			}
   189  		}
   190  		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
   191  		defer cancel()
   192  		session := bex.GetSession(ctx)
   193  		for blobCid := range blobsToGet {
   194  			_, err := session.GetBlob(ctx, blobCid)
   195  			suite.Assert().NoError(err)
   196  		}
   197  	}
   198  }
   199  
   200  func (suite *BlobServiceTestSuite) TestHas() {
   201  	var blobChans []<-chan blobs.Blob
   202  	unreceivedBlobs := make([]map[cid.Cid]struct{}, len(suite.blobServices))
   203  
   204  	for i, bex := range suite.blobServices {
   205  		unreceivedBlobs[i] = make(map[cid.Cid]struct{})
   206  		// check that peers are notified when we have a new blob
   207  		var blobsToGet []cid.Cid
   208  		for j := 0; j < suite.numNodes; j++ {
   209  			if j != i {
   210  				blob := blobs.NewBlob([]byte(fmt.Sprintf("bar%v", i)))
   211  				blobsToGet = append(blobsToGet, blob.Cid())
   212  				unreceivedBlobs[i][blob.Cid()] = struct{}{}
   213  			}
   214  		}
   215  
   216  		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
   217  		defer cancel()
   218  
   219  		blobs := bex.GetBlobs(ctx, blobsToGet)
   220  		blobChans = append(blobChans, blobs)
   221  	}
   222  
   223  	// check that blobs are not received until Has is called by the server
   224  	suite.Require().Never(
   225  		func() bool {
   226  			for _, blobChan := range blobChans {
   227  				select {
   228  				case _, ok := <-blobChan:
   229  					if ok {
   230  						return true
   231  					}
   232  				default:
   233  				}
   234  			}
   235  			return false
   236  		}, time.Second, 100*time.Millisecond)
   237  
   238  	for i, bex := range suite.blobServices {
   239  		ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
   240  		defer cancel()
   241  
   242  		err := bex.AddBlob(ctx, blobs.NewBlob([]byte(fmt.Sprintf("bar%v", i))))
   243  		suite.Require().NoError(err)
   244  	}
   245  
   246  	for i, blobs := range blobChans {
   247  		for blob := range blobs {
   248  			delete(unreceivedBlobs[i], blob.Cid())
   249  		}
   250  		for c := range unreceivedBlobs[i] {
   251  			suite.T().Errorf("blob %v not received by node %v", c, i)
   252  		}
   253  	}
   254  }