github.com/koko1123/flow-go-1@v0.29.6/module/executiondatasync/provider/provider_test.go (about)

     1  package provider_test
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"crypto/rand"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/ipfs/go-datastore"
    11  	dssync "github.com/ipfs/go-datastore/sync"
    12  	"github.com/rs/zerolog"
    13  	"github.com/stretchr/testify/assert"
    14  	"github.com/stretchr/testify/mock"
    15  	"github.com/stretchr/testify/require"
    16  	goassert "gotest.tools/assert"
    17  
    18  	"github.com/koko1123/flow-go-1/ledger"
    19  	"github.com/koko1123/flow-go-1/ledger/common/testutils"
    20  	"github.com/koko1123/flow-go-1/module/blobs"
    21  	"github.com/koko1123/flow-go-1/module/executiondatasync/execution_data"
    22  	"github.com/koko1123/flow-go-1/module/executiondatasync/provider"
    23  	mocktracker "github.com/koko1123/flow-go-1/module/executiondatasync/tracker/mock"
    24  	"github.com/koko1123/flow-go-1/module/metrics"
    25  	"github.com/koko1123/flow-go-1/network"
    26  	"github.com/koko1123/flow-go-1/network/mocknetwork"
    27  	"github.com/koko1123/flow-go-1/utils/unittest"
    28  )
    29  
    30  func getDatastore() datastore.Batching {
    31  	return dssync.MutexWrap(datastore.NewMapDatastore())
    32  }
    33  
    34  func getExecutionDataStore(ds datastore.Batching) execution_data.ExecutionDataStore {
    35  	return execution_data.NewExecutionDataStore(blobs.NewBlobstore(ds), execution_data.DefaultSerializer)
    36  }
    37  
    38  func getBlobservice(ds datastore.Batching) network.BlobService {
    39  	blobstore := blobs.NewBlobstore(ds)
    40  	blobService := new(mocknetwork.BlobService)
    41  	blobService.On("AddBlobs", mock.Anything, mock.AnythingOfType("[]blocks.Block")).Return(blobstore.PutMany)
    42  	return blobService
    43  }
    44  
    45  func getProvider(blobService network.BlobService) *provider.Provider {
    46  	trackerStorage := mocktracker.NewMockStorage()
    47  
    48  	return provider.NewProvider(
    49  		zerolog.Nop(),
    50  		metrics.NewNoopCollector(),
    51  		execution_data.DefaultSerializer,
    52  		blobService,
    53  		trackerStorage,
    54  	)
    55  }
    56  
    57  func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *execution_data.ChunkExecutionData {
    58  	ced := &execution_data.ChunkExecutionData{
    59  		TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8),
    60  	}
    61  
    62  	size := 1
    63  
    64  	for {
    65  		buf := &bytes.Buffer{}
    66  		require.NoError(t, execution_data.DefaultSerializer.Serialize(buf, ced))
    67  
    68  		if buf.Len() >= int(minSerializedSize) {
    69  			t.Logf("Chunk execution data size: %d", buf.Len())
    70  			return ced
    71  		}
    72  
    73  		v := make([]byte, size)
    74  		_, _ = rand.Read(v)
    75  
    76  		k, err := ced.TrieUpdate.Payloads[0].Key()
    77  		require.NoError(t, err)
    78  
    79  		ced.TrieUpdate.Payloads[0] = ledger.NewPayload(k, v)
    80  		size *= 2
    81  	}
    82  }
    83  
    84  func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData {
    85  	bed := &execution_data.BlockExecutionData{
    86  		BlockID:             unittest.IdentifierFixture(),
    87  		ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, numChunks),
    88  	}
    89  
    90  	for i := 0; i < numChunks; i++ {
    91  		bed.ChunkExecutionDatas[i] = generateChunkExecutionData(t, minSerializedSizePerChunk)
    92  	}
    93  
    94  	return bed
    95  }
    96  
    97  func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) {
    98  	assert.Equal(t, expected.BlockID, actual.BlockID)
    99  	assert.Equal(t, len(expected.ChunkExecutionDatas), len(actual.ChunkExecutionDatas))
   100  
   101  	for i, expectedChunk := range expected.ChunkExecutionDatas {
   102  		actualChunk := actual.ChunkExecutionDatas[i]
   103  
   104  		goassert.DeepEqual(t, expectedChunk.Collection, actualChunk.Collection)
   105  		goassert.DeepEqual(t, expectedChunk.Events, actualChunk.Events)
   106  		assert.True(t, expectedChunk.TrieUpdate.Equals(actualChunk.TrieUpdate))
   107  	}
   108  }
   109  
   110  func TestHappyPath(t *testing.T) {
   111  	t.Parallel()
   112  
   113  	ds := getDatastore()
   114  	provider := getProvider(getBlobservice(ds))
   115  	store := getExecutionDataStore(ds)
   116  
   117  	test := func(numChunks int, minSerializedSizePerChunk uint64) {
   118  		expected := generateBlockExecutionData(t, numChunks, minSerializedSizePerChunk)
   119  		executionDataID, err := provider.Provide(context.Background(), 0, expected)
   120  		require.NoError(t, err)
   121  		actual, err := store.GetExecutionData(context.Background(), executionDataID)
   122  		require.NoError(t, err)
   123  		deepEqual(t, expected, actual)
   124  	}
   125  
   126  	test(1, 0)                                   // small execution data (single level blob tree)
   127  	test(5, 5*execution_data.DefaultMaxBlobSize) // large execution data (multi level blob tree)
   128  }
   129  
   130  func TestProvideContextCanceled(t *testing.T) {
   131  	t.Parallel()
   132  
   133  	bed := generateBlockExecutionData(t, 5, 5*execution_data.DefaultMaxBlobSize)
   134  
   135  	provider := getProvider(getBlobservice(getDatastore()))
   136  	_, err := provider.Provide(context.Background(), 0, bed)
   137  	require.NoError(t, err)
   138  
   139  	blobService := new(mocknetwork.BlobService)
   140  	blobService.On("AddBlobs", mock.Anything, mock.AnythingOfType("[]blocks.Block")).
   141  		Return(func(ctx context.Context, blobs []blobs.Blob) error {
   142  			<-ctx.Done()
   143  			return ctx.Err()
   144  		})
   145  	provider = getProvider(blobService)
   146  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   147  	defer cancel()
   148  	_, err = provider.Provide(ctx, 0, bed)
   149  	assert.ErrorIs(t, err, ctx.Err())
   150  }