github.com/koko1123/flow-go-1@v0.29.6/module/executiondatasync/execution_data/store_test.go (about)

     1  package execution_data_test
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"io"
     8  	"math/rand"
     9  	"testing"
    10  
    11  	"github.com/ipfs/go-cid"
    12  	"github.com/ipfs/go-datastore"
    13  	dssync "github.com/ipfs/go-datastore/sync"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  	goassert "gotest.tools/assert"
    17  
    18  	"github.com/koko1123/flow-go-1/ledger"
    19  	"github.com/koko1123/flow-go-1/ledger/common/testutils"
    20  	"github.com/koko1123/flow-go-1/module/blobs"
    21  	"github.com/koko1123/flow-go-1/module/executiondatasync/execution_data"
    22  	"github.com/koko1123/flow-go-1/utils/unittest"
    23  )
    24  
    25  func getBlobstore() blobs.Blobstore {
    26  	return blobs.NewBlobstore(dssync.MutexWrap(datastore.NewMapDatastore()))
    27  }
    28  
    29  func getExecutionDataStore(blobstore blobs.Blobstore, serializer execution_data.Serializer) execution_data.ExecutionDataStore {
    30  	return execution_data.NewExecutionDataStore(blobstore, serializer)
    31  }
    32  
    33  func generateChunkExecutionData(t *testing.T, minSerializedSize uint64) *execution_data.ChunkExecutionData {
    34  	ced := &execution_data.ChunkExecutionData{
    35  		TrieUpdate: testutils.TrieUpdateFixture(1, 1, 8),
    36  	}
    37  
    38  	size := 1
    39  
    40  	for {
    41  		buf := &bytes.Buffer{}
    42  		require.NoError(t, execution_data.DefaultSerializer.Serialize(buf, ced))
    43  
    44  		if buf.Len() >= int(minSerializedSize) {
    45  			t.Logf("Chunk execution data size: %d", buf.Len())
    46  			return ced
    47  		}
    48  
    49  		v := make([]byte, size)
    50  		_, _ = rand.Read(v)
    51  
    52  		k, err := ced.TrieUpdate.Payloads[0].Key()
    53  		require.NoError(t, err)
    54  
    55  		ced.TrieUpdate.Payloads[0] = ledger.NewPayload(k, v)
    56  		size *= 2
    57  	}
    58  }
    59  
    60  func generateBlockExecutionData(t *testing.T, numChunks int, minSerializedSizePerChunk uint64) *execution_data.BlockExecutionData {
    61  	bed := &execution_data.BlockExecutionData{
    62  		BlockID:             unittest.IdentifierFixture(),
    63  		ChunkExecutionDatas: make([]*execution_data.ChunkExecutionData, numChunks),
    64  	}
    65  
    66  	for i := 0; i < numChunks; i++ {
    67  		bed.ChunkExecutionDatas[i] = generateChunkExecutionData(t, minSerializedSizePerChunk)
    68  	}
    69  
    70  	return bed
    71  }
    72  
    73  func getAllKeys(t *testing.T, bs blobs.Blobstore) []cid.Cid {
    74  	cidChan, err := bs.AllKeysChan(context.Background())
    75  	require.NoError(t, err)
    76  
    77  	var cids []cid.Cid
    78  
    79  	for cid := range cidChan {
    80  		cids = append(cids, cid)
    81  	}
    82  
    83  	return cids
    84  }
    85  
    86  func deepEqual(t *testing.T, expected, actual *execution_data.BlockExecutionData) {
    87  	assert.Equal(t, expected.BlockID, actual.BlockID)
    88  	assert.Equal(t, len(expected.ChunkExecutionDatas), len(actual.ChunkExecutionDatas))
    89  
    90  	for i, expectedChunk := range expected.ChunkExecutionDatas {
    91  		actualChunk := actual.ChunkExecutionDatas[i]
    92  
    93  		goassert.DeepEqual(t, expectedChunk.Collection, actualChunk.Collection)
    94  		goassert.DeepEqual(t, expectedChunk.Events, actualChunk.Events)
    95  		assert.True(t, expectedChunk.TrieUpdate.Equals(actualChunk.TrieUpdate))
    96  	}
    97  }
    98  
    99  func TestHappyPath(t *testing.T) {
   100  	t.Parallel()
   101  
   102  	eds := getExecutionDataStore(getBlobstore(), execution_data.DefaultSerializer)
   103  
   104  	test := func(numChunks int, minSerializedSizePerChunk uint64) {
   105  		expected := generateBlockExecutionData(t, numChunks, minSerializedSizePerChunk)
   106  		rootId, err := eds.AddExecutionData(context.Background(), expected)
   107  		require.NoError(t, err)
   108  		actual, err := eds.GetExecutionData(context.Background(), rootId)
   109  		require.NoError(t, err)
   110  		deepEqual(t, expected, actual)
   111  	}
   112  
   113  	test(1, 0)                                   // small execution data (single level blob tree)
   114  	test(5, 5*execution_data.DefaultMaxBlobSize) // large execution data (multi level blob tree)
   115  }
   116  
   117  type randomSerializer struct{}
   118  
   119  func (rs *randomSerializer) Serialize(w io.Writer, v interface{}) error {
   120  	data := make([]byte, 1024)
   121  	_, _ = rand.Read(data)
   122  	_, err := w.Write(data)
   123  	return err
   124  }
   125  
   126  func (rs *randomSerializer) Deserialize(r io.Reader) (interface{}, error) {
   127  	return nil, fmt.Errorf("not implemented")
   128  }
   129  
   130  type corruptedTailSerializer struct {
   131  	corruptedChunk int
   132  	i              int
   133  }
   134  
   135  func newCorruptedTailSerializer(numChunks int) *corruptedTailSerializer {
   136  	return &corruptedTailSerializer{
   137  		corruptedChunk: rand.Intn(numChunks) + 1,
   138  	}
   139  }
   140  
   141  func (cts *corruptedTailSerializer) Serialize(w io.Writer, v interface{}) error {
   142  	if _, ok := v.(*execution_data.ChunkExecutionData); ok {
   143  		cts.i++
   144  		if cts.i == cts.corruptedChunk {
   145  			buf := &bytes.Buffer{}
   146  
   147  			err := execution_data.DefaultSerializer.Serialize(buf, v)
   148  			if err != nil {
   149  				return err
   150  			}
   151  
   152  			data := buf.Bytes()
   153  			_, _ = rand.Read(data[len(data)-1024:])
   154  
   155  			_, err = w.Write(data)
   156  			return err
   157  		}
   158  	}
   159  
   160  	return execution_data.DefaultSerializer.Serialize(w, v)
   161  }
   162  
   163  func (cts *corruptedTailSerializer) Deserialize(r io.Reader) (interface{}, error) {
   164  	return nil, fmt.Errorf("not implemented")
   165  }
   166  
   167  func TestMalformedData(t *testing.T) {
   168  	t.Parallel()
   169  
   170  	test := func(bed *execution_data.BlockExecutionData, serializer execution_data.Serializer) {
   171  		blobstore := getBlobstore()
   172  		defaultEds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer)
   173  		malformedEds := getExecutionDataStore(blobstore, serializer)
   174  		rootID, err := malformedEds.AddExecutionData(context.Background(), bed)
   175  		require.NoError(t, err)
   176  		_, err = defaultEds.GetExecutionData(context.Background(), rootID)
   177  		assert.True(t, execution_data.IsMalformedDataError(err))
   178  	}
   179  
   180  	numChunks := 5
   181  	bed := generateBlockExecutionData(t, numChunks, 10*execution_data.DefaultMaxBlobSize)
   182  
   183  	test(bed, &randomSerializer{})                   // random bytes
   184  	test(bed, newCorruptedTailSerializer(numChunks)) // serialized execution data with random bytes replaced at the end of a random chunk
   185  }
   186  
   187  func TestGetIncompleteData(t *testing.T) {
   188  	t.Parallel()
   189  
   190  	blobstore := getBlobstore()
   191  	eds := getExecutionDataStore(blobstore, execution_data.DefaultSerializer)
   192  
   193  	bed := generateBlockExecutionData(t, 5, 10*execution_data.DefaultMaxBlobSize)
   194  	rootID, err := eds.AddExecutionData(context.Background(), bed)
   195  	require.NoError(t, err)
   196  
   197  	cids := getAllKeys(t, blobstore)
   198  	t.Logf("%d blobs in blob tree", len(cids))
   199  
   200  	cidToDelete := cids[rand.Intn(len(cids))]
   201  	require.NoError(t, blobstore.DeleteBlob(context.Background(), cidToDelete))
   202  
   203  	_, err = eds.GetExecutionData(context.Background(), rootID)
   204  	var blobNotFoundError *execution_data.BlobNotFoundError
   205  	assert.ErrorAs(t, err, &blobNotFoundError)
   206  }