github.com/onflow/flow-go@v0.35.7-crescendo-preview.23-atree-inlining/module/executiondatasync/execution_data/store_test.go (about)

     1  package execution_data_test
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"crypto/rand"
     7  	"fmt"
     8  	"io"
     9  	mrand "math/rand"
    10  	"testing"
    11  
    12  	"github.com/ipfs/go-cid"
    13  	"github.com/ipfs/go-datastore"
    14  	dssync "github.com/ipfs/go-datastore/sync"
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  	goassert "gotest.tools/assert"
    18  
    19  	"github.com/onflow/flow-go/module/blobs"
    20  	"github.com/onflow/flow-go/module/executiondatasync/execution_data"
    21  	"github.com/onflow/flow-go/utils/unittest"
    22  )
    23  
    24  func getBlobstore() blobs.Blobstore {
    25  	return blobs.NewBlobstore(dssync.MutexWrap(datastore.NewMapDatastore()))
    26  }
    27  
    28  func getExecutionDataStore(blobstore blobs.Blobstore, serializer execution_data.Serializer) execution_data.ExecutionDataStore {
    29  	return execution_data.NewExecutionDataStore(blobstore, serializer)
    30  }
    31  
    32  func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData {
    33  	ceds := make([]*execution_data.ChunkExecutionData, numChunks)
    34  	for i := 0; i < numChunks; i++ {
    35  		ceds[i] = unittest.ChunkExecutionDataFixture(t, int(minSerializedSizePerChunk))
    36  	}
    37  
    38  	return unittest.BlockExecutionDataFixture(unittest.WithChunkExecutionDatas(ceds...))
    39  }
    40  
    41  func getAllKeys(t *testing.T, bs blobs.Blobstore) []cid.Cid {
    42  	cidChan, err := bs.AllKeysChan(context.Background())
    43  	require.NoError(t, err)
    44  
    45  	var cids []cid.Cid
    46  
    47  	for cid := range cidChan {
    48  		cids = append(cids, cid)
    49  	}
    50  
    51  	return cids
    52  }
    53  
    54  func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) {
    55  	assert.Equal(t, expected.BlockID, actual.BlockID)
    56  	assert.Equal(t, len(expected.ChunkExecutionDatas), len(actual.ChunkExecutionDatas))
    57  
    58  	for i, expectedChunk := range expected.ChunkExecutionDatas {
    59  		actualChunk := actual.ChunkExecutionDatas[i]
    60  
    61  		goassert.DeepEqual(t, expectedChunk.Collection, actualChunk.Collection)
    62  		goassert.DeepEqual(t, expectedChunk.Events, actualChunk.Events)
    63  		assert.True(t, expectedChunk.TrieUpdate.Equals(actualChunk.TrieUpdate))
    64  	}
    65  }
    66  
    67  func TestHappyPath(t *testing.T) {
    68  	t.Parallel()
    69  
    70  	eds := getExecutionDataStore(getBlobstore(), execution_data.DefaultSerializer)
    71  
    72  	test := func(numChunks int, minSerializedSizePerChunk uint64) {
    73  		expected := generateBlockExecutionData(t, numChunks, minSerializedSizePerChunk)
    74  		rootId, err := eds.Add(context.Background(), expected)
    75  		require.NoError(t, err)
    76  		actual, err := eds.Get(context.Background(), rootId)
    77  		require.NoError(t, err)
    78  		deepEqual(t, expected, actual)
    79  	}
    80  
    81  	test(1, 0)                                   // small execution data (single level blob tree)
    82  	test(5, 5*execution_data.DefaultMaxBlobSize) // large execution data (multi level blob tree)
    83  }
    84  
    85  type randomSerializer struct{}
    86  
    87  func (rs *randomSerializer) Serialize(w io.Writer, v interface{}) error {
    88  	data := make([]byte, 1024)
    89  	_, _ = rand.Read(data)
    90  	_, err := w.Write(data)
    91  	return err
    92  }
    93  
    94  func (rs *randomSerializer) Deserialize(r io.Reader) (interface{}, error) {
    95  	return nil, fmt.Errorf("not implemented")
    96  }
    97  
    98  type corruptedTailSerializer struct {
    99  	corruptedChunk int
   100  	i              int
   101  }
   102  
   103  func newCorruptedTailSerializer(numChunks int) *corruptedTailSerializer {
   104  	return &corruptedTailSerializer{
   105  		corruptedChunk: mrand.Intn(numChunks) + 1,
   106  	}
   107  }
   108  
   109  func (cts *corruptedTailSerializer) Serialize(w io.Writer, v interface{}) error {
   110  	if _, ok := v.(*execution_data.ChunkExecutionData); ok {
   111  		cts.i++
   112  		if cts.i == cts.corruptedChunk {
   113  			buf := &bytes.Buffer{}
   114  
   115  			err := execution_data.DefaultSerializer.Serialize(buf, v)
   116  			if err != nil {
   117  				return err
   118  			}
   119  
   120  			data := buf.Bytes()
   121  			_, _ = rand.Read(data[len(data)-1024:])
   122  
   123  			_, err = w.Write(data)
   124  			return err
   125  		}
   126  	}
   127  
   128  	return execution_data.DefaultSerializer.Serialize(w, v)
   129  }
   130  
   131  func (cts *corruptedTailSerializer) Deserialize(r io.Reader) (interface{}, error) {
   132  	return nil, fmt.Errorf("not implemented")
   133  }
   134  
   135  func TestMalformedData(t *testing.T) {
   136  	t.Parallel()
   137  
   138  	test := func(bed *execution_data.BlockExecutionData, serializer execution_data.Serializer) {
   139  		blobstore := getBlobstore()
   140  		defaultEds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer)
   141  		malformedEds := getExecutionDataStore(blobstore, serializer)
   142  		rootID, err := malformedEds.Add(context.Background(), bed)
   143  		require.NoError(t, err)
   144  		_, err = defaultEds.Get(context.Background(), rootID)
   145  		assert.True(t, execution_data.IsMalformedDataError(err))
   146  	}
   147  
   148  	numChunks := 5
   149  	bed := generateBlockExecutionData(t, numChunks, 10*execution_data.DefaultMaxBlobSize)
   150  
   151  	test(bed, &randomSerializer{})                   // random bytes
   152  	test(bed, newCorruptedTailSerializer(numChunks)) // serialized execution data with random bytes replaced at the end of a random chunk
   153  }
   154  
   155  func TestGetIncompleteData(t *testing.T) {
   156  	t.Parallel()
   157  
   158  	blobstore := getBlobstore()
   159  	eds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer)
   160  
   161  	bed := generateBlockExecutionData(t, 5, 10*execution_data.DefaultMaxBlobSize)
   162  	rootID, err := eds.Add(context.Background(), bed)
   163  	require.NoError(t, err)
   164  
   165  	cids := getAllKeys(t, blobstore)
   166  	t.Logf("%d blobs in blob tree", len(cids))
   167  
   168  	cidToDelete := cids[mrand.Intn(len(cids))]
   169  	require.NoError(t, blobstore.DeleteBlob(context.Background(), cidToDelete))
   170  
   171  	_, err = eds.Get(context.Background(), rootID)
   172  	var blobNotFoundError *execution_data.BlobNotFoundError
   173  	assert.ErrorAs(t, err, &blobNotFoundError)
   174  }