storj.io/uplink@v1.13.0/private/storage/streams/uploader_test.go (about)

     1  // Copyright (C) 2023 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package streams
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"io"
    10  	"math"
    11  	"strings"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  	"github.com/zeebo/errs"
    18  
    19  	"storj.io/common/encryption"
    20  	"storj.io/common/paths"
    21  	"storj.io/common/pb"
    22  	"storj.io/common/storj"
    23  	"storj.io/uplink/private/eestream/scheduler"
    24  	"storj.io/uplink/private/metaclient"
    25  	"storj.io/uplink/private/storage/streams/splitter"
    26  	"storj.io/uplink/private/storage/streams/streamupload"
    27  )
    28  
    29  var (
    30  	creationDate         = time.Date(2023, time.February, 23, 10, 0, 0, 0, time.UTC)
    31  	segmentSize          = int64(4096)
    32  	cipherSuite          = storj.EncAESGCM
    33  	encryptionParameters = storj.EncryptionParameters{CipherSuite: cipherSuite, BlockSize: 32}
    34  	inlineThreshold      = 1024
    35  	longTailMargin       = 1
    36  	storjKey             = storj.Key{0: 1}
    37  	expiration           = creationDate.Add(time.Hour)
    38  	uploadInfo           = streamupload.Info{CreationDate: creationDate, PlainSize: 123}
    39  	streamID             = storj.StreamID("STREAMID")
    40  	partNumber           = int32(1)
    41  	eTagCh               = make(chan []byte)
    42  )
    43  
    44  func TestNewUploader(t *testing.T) {
    45  	type config struct {
    46  		segmentSize          int64
    47  		encryptionParameters storj.EncryptionParameters
    48  		inlineThreshold      int
    49  		longTailMargin       int
    50  	}
    51  
    52  	for _, tc := range []struct {
    53  		desc           string
    54  		overrideConfig func(c *config)
    55  		expectNewErr   string
    56  	}{
    57  		{
    58  			desc: "segment size is zero",
    59  			overrideConfig: func(c *config) {
    60  				c.segmentSize = 0
    61  			},
    62  			expectNewErr: "segment size must be larger than 0",
    63  		},
    64  		{
    65  			desc: "block size is zero",
    66  			overrideConfig: func(c *config) {
    67  				c.encryptionParameters.BlockSize = 0
    68  			},
    69  			expectNewErr: "encryption block size must be larger than 0",
    70  		},
    71  		{
    72  			desc: "inline threshold is zero",
    73  			overrideConfig: func(c *config) {
    74  				c.inlineThreshold = 0
    75  			},
    76  			expectNewErr: "inline threshold must be larger than 0",
    77  		},
    78  	} {
    79  		t.Run(tc.desc, func(t *testing.T) {
    80  			// These parameters are not validated by NewUploader but stashed
    81  			// and used later.
    82  			var (
    83  				metainfo *metaclient.Client
    84  				encStore *encryption.Store
    85  			)
    86  			c := config{
    87  				segmentSize:          segmentSize,
    88  				encryptionParameters: encryptionParameters,
    89  				inlineThreshold:      inlineThreshold,
    90  			}
    91  			tc.overrideConfig(&c)
    92  
    93  			uploader, err := NewUploader(metainfo, piecePutter{}, c.segmentSize, encStore, c.encryptionParameters, c.inlineThreshold, c.longTailMargin)
    94  			if uploader != nil {
    95  				defer func() { assert.NoError(t, uploader.Close()) }()
    96  			}
    97  			if tc.expectNewErr != "" {
    98  				require.EqualError(t, err, tc.expectNewErr)
    99  				return
   100  			}
   101  			require.NoError(t, err)
   102  			require.NotNil(t, uploader)
   103  		})
   104  	}
   105  }
   106  
   107  func TestUpload(t *testing.T) {
   108  	type config struct {
   109  		bucket   string
   110  		key      string
   111  		metadata Metadata
   112  		backend  uploaderBackend
   113  	}
   114  
   115  	testUpload := func(t *testing.T, uploadFn func(uploader *Uploader, c config) (*Upload, error)) {
   116  		for _, tc := range []struct {
   117  			desc            string
   118  			overrideConfig  func(c *config)
   119  			expectUploadErr string
   120  			expectCommitErr string
   121  		}{
   122  			{
   123  				desc: "no access to bucket",
   124  				overrideConfig: func(c *config) {
   125  					c.bucket = "OHNO"
   126  				},
   127  				expectUploadErr: `missing encryption base: "OHNO"/"KEY"`,
   128  			},
   129  			{
   130  				desc: "no access to key",
   131  				overrideConfig: func(c *config) {
   132  					c.key = "OHNO"
   133  				},
   134  				expectUploadErr: `missing encryption base: "BUCKET"/"OHNO"`,
   135  			},
   136  			{
   137  				desc: "upload fails",
   138  				overrideConfig: func(c *config) {
   139  					c.backend = fakeUploaderBackend{err: errs.New("upload failed")}
   140  				},
   141  				expectCommitErr: "upload failed",
   142  			},
   143  			{
   144  				desc:           "upload success",
   145  				overrideConfig: func(c *config) {},
   146  			},
   147  		} {
   148  			t.Run(tc.desc, func(t *testing.T) {
   149  				var (
   150  					encStore = encryption.NewStore()
   151  					unenc    = paths.NewUnencrypted("KEY")
   152  					enc      = paths.NewEncrypted("ENCKEY")
   153  				)
   154  
   155  				err := encStore.Add("BUCKET", unenc, enc, storjKey)
   156  				require.NoError(t, err)
   157  
   158  				c := config{
   159  					bucket:   "BUCKET",
   160  					key:      "KEY",
   161  					metadata: fixedMetadata{},
   162  					backend:  fakeUploaderBackend{},
   163  				}
   164  				tc.overrideConfig(&c)
   165  
   166  				uploader, err := NewUploader(metainfoUpload{}, piecePutter{}, segmentSize, encStore, encryptionParameters, inlineThreshold, longTailMargin)
   167  				require.NoError(t, err)
   168  				defer func() { assert.NoError(t, uploader.Close()) }()
   169  
   170  				uploader.backend = c.backend
   171  
   172  				upload, err := uploadFn(uploader, c)
   173  				if tc.expectUploadErr != "" {
   174  					require.EqualError(t, err, tc.expectUploadErr)
   175  					return
   176  				}
   177  				require.NoError(t, err)
   178  				require.NotNil(t, upload)
   179  
   180  				meta := upload.Meta()
   181  				require.Nil(t, meta, "upload has metadata before being committed")
   182  
   183  				err = upload.Commit()
   184  
   185  				// Whether or not the commit succeeds or fails, writing to the
   186  				// upload should fail, since either path finishes the underlying
   187  				// splitter.
   188  				_, copyErr := io.Copy(upload, strings.NewReader("JUNK"))
   189  				require.EqualError(t, copyErr, "upload already done")
   190  
   191  				if tc.expectCommitErr != "" {
   192  					require.EqualError(t, err, tc.expectCommitErr)
   193  					return
   194  				}
   195  				require.NoError(t, err)
   196  
   197  				meta = upload.Meta()
   198  				require.Equal(t, &Meta{Modified: uploadInfo.CreationDate, Size: uploadInfo.PlainSize}, meta)
   199  			})
   200  		}
   201  	}
   202  
   203  	t.Run("Object", func(t *testing.T) {
   204  		testUpload(t, func(uploader *Uploader, c config) (*Upload, error) {
   205  			return uploader.UploadObject(context.Background(), c.bucket, c.key, c.metadata, expiration, noopScheduler{})
   206  		})
   207  	})
   208  
   209  	t.Run("Part", func(t *testing.T) {
   210  		testUpload(t, func(uploader *Uploader, c config) (*Upload, error) {
   211  			return uploader.UploadPart(context.Background(), c.bucket, c.key, streamID, partNumber, eTagCh, noopScheduler{})
   212  		})
   213  	})
   214  }
   215  
   216  func TestEncryptedMetadata(t *testing.T) {
   217  	e := encryptedMetadata{
   218  		metadata:    fixedMetadata{},
   219  		segmentSize: segmentSize,
   220  		derivedKey:  &storjKey,
   221  		cipherSuite: cipherSuite,
   222  	}
   223  	streamMetaBytes, encMetaKey, encMetaKeyNonce, err := e.EncryptedMetadata(segmentSize - 1)
   224  	require.NoError(t, err)
   225  	require.NotNil(t, streamMetaBytes)
   226  	require.NotNil(t, encMetaKey)
   227  	require.NotNil(t, encMetaKeyNonce)
   228  
   229  	streamMeta := new(pb.StreamMeta)
   230  	err = pb.Unmarshal(streamMetaBytes, streamMeta)
   231  	require.NoError(t, err)
   232  
   233  	// Decrypt and assert contents
   234  	metadataKey, err := encryption.DecryptKey(*encMetaKey, e.cipherSuite, e.derivedKey, encMetaKeyNonce)
   235  	require.NoError(t, err)
   236  	streamInfoBytes, err := encryption.Decrypt(streamMeta.EncryptedStreamInfo, e.cipherSuite, metadataKey, &storj.Nonce{})
   237  	require.NoError(t, err)
   238  
   239  	streamInfo := new(pb.StreamInfo)
   240  	err = pb.Unmarshal(streamInfoBytes, streamInfo)
   241  	require.NoError(t, err)
   242  
   243  	require.Equal(t, &pb.StreamInfo{
   244  		SegmentsSize:    segmentSize,
   245  		LastSegmentSize: segmentSize - 1,
   246  		Metadata:        []byte("METADATA"),
   247  	}, streamInfo)
   248  }
   249  
   250  func TestLimitsExchanger(t *testing.T) {
   251  	e := limitsExchanger{metainfo: retryBeginSegments{}}
   252  
   253  	t.Run("success", func(t *testing.T) {
   254  		segmentID, limits, err := e.ExchangeLimits(context.Background(), storj.SegmentID("IN"), []int{1, 2, 3})
   255  		require.NoError(t, err)
   256  		require.Equal(t, storj.SegmentID("OUT"), segmentID)
   257  		require.Equal(t, []*pb.AddressedOrderLimit{{Limit: &pb.OrderLimit{Limit: 123}}}, limits)
   258  	})
   259  }
   260  
   261  type retryBeginSegments struct {
   262  	MetainfoUpload
   263  }
   264  
   265  func (r retryBeginSegments) RetryBeginSegmentPieces(ctx context.Context, params metaclient.RetryBeginSegmentPiecesParams) (metaclient.RetryBeginSegmentPiecesResponse, error) {
   266  	// Calculate a limit that helps us detect that we passed the piece numbers correctly.
   267  	var limit int64
   268  	for i, num := range params.RetryPieceNumbers {
   269  		limit += int64(math.Pow(10, float64(len(params.RetryPieceNumbers)-i-1))) * int64(num)
   270  	}
   271  	switch string(params.SegmentID) {
   272  	case "IN":
   273  		return metaclient.RetryBeginSegmentPiecesResponse{SegmentID: []byte("OUT"), Limits: []*pb.AddressedOrderLimit{{Limit: &pb.OrderLimit{Limit: limit}}}}, nil
   274  	case "ERR":
   275  		return metaclient.RetryBeginSegmentPiecesResponse{}, errs.New("expected error")
   276  	default:
   277  		return metaclient.RetryBeginSegmentPiecesResponse{}, errs.New("segment ID not passed correctly")
   278  	}
   279  }
   280  
   281  type fakeUploaderBackend struct {
   282  	err error
   283  }
   284  
   285  func (b fakeUploaderBackend) UploadObject(ctx context.Context, segmentSource streamupload.SegmentSource, segmentUploader streamupload.SegmentUploader, miBatcher metaclient.Batcher, beginObject *metaclient.BeginObjectParams, encMeta streamupload.EncryptedMetadata) (streamupload.Info, error) {
   286  	if err := b.checkCommonParams(segmentSource, segmentUploader, miBatcher); err != nil {
   287  		return streamupload.Info{}, err
   288  	}
   289  
   290  	m, ok := encMeta.(*encryptedMetadata)
   291  	if !ok {
   292  		return streamupload.Info{}, errs.New("encrypted metadata is of type %T but expected %T", encMeta, m)
   293  	}
   294  
   295  	if um, ok := m.metadata.(fixedMetadata); !ok {
   296  		return streamupload.Info{}, errs.New("encrypted metadata metadata is of type %T but expected %T", m.metadata, um)
   297  	}
   298  	if m.segmentSize != segmentSize {
   299  		return streamupload.Info{}, errs.New("encrypted metadata segment size should be %d but is %d", segmentSize, m.segmentSize)
   300  	}
   301  	if m.derivedKey == nil {
   302  		return streamupload.Info{}, errs.New("encrypted metadata segment derived key is nil")
   303  	}
   304  	if m.cipherSuite != cipherSuite {
   305  		return streamupload.Info{}, errs.New("encrypted metadata cipher suite should be %d but got %d", cipherSuite, m.cipherSuite)
   306  	}
   307  
   308  	return b.upload()
   309  }
   310  
   311  func (b fakeUploaderBackend) UploadPart(ctx context.Context, segmentSource streamupload.SegmentSource, segmentUploader streamupload.SegmentUploader, miBatcher metaclient.Batcher, streamIDIn storj.StreamID, eTagChIn <-chan []byte) (streamupload.Info, error) {
   312  	if err := b.checkCommonParams(segmentSource, segmentUploader, miBatcher); err != nil {
   313  		return streamupload.Info{}, err
   314  	}
   315  	if !bytes.Equal(streamIDIn, streamID) {
   316  		return streamupload.Info{}, errs.New("expected stream ID %x but got %x", streamID, streamIDIn)
   317  	}
   318  	if eTagChIn != eTagCh {
   319  		return streamupload.Info{}, errs.New("unexpected eTag channel")
   320  	}
   321  	return b.upload()
   322  }
   323  
   324  func (fakeUploaderBackend) checkCommonParams(source streamupload.SegmentSource, uploader streamupload.SegmentUploader, batcher metaclient.Batcher) error {
   325  	if s, ok := source.(*splitter.Splitter); !ok {
   326  		return errs.New("segment source is of type %T but expected %T", source, s)
   327  	} else if s == nil {
   328  		return errs.New("segment source is not a valid splitter: nil")
   329  	}
   330  
   331  	u, ok := uploader.(segmentUploader)
   332  	if !ok {
   333  		return errs.New("segment uploader is of type %T but expected %T", uploader, u)
   334  	}
   335  
   336  	if um, ok := u.metainfo.(metainfoUpload); !ok {
   337  		return errs.New("segment uploader metainfo is of type %T but expected %T", u.metainfo, um)
   338  	}
   339  	if up, ok := u.piecePutter.(piecePutter); !ok {
   340  		return errs.New("segment uploader piece putter is of type %T but expected %T", u.piecePutter, up)
   341  	}
   342  	if s, ok := u.sched.(noopScheduler); !ok {
   343  		return errs.New("segment uploader scheduler is of type %T but expected %T", u.sched, s)
   344  	}
   345  	if u.longTailMargin != longTailMargin {
   346  		return errs.New("segment uploader long tail margin is %d but expected %d", u.longTailMargin, longTailMargin)
   347  	}
   348  
   349  	if b, ok := batcher.(metainfoUpload); !ok {
   350  		return errs.New("batcher is of type %T but expected %T", batcher, b)
   351  	}
   352  
   353  	return nil
   354  }
   355  
   356  func (b fakeUploaderBackend) upload() (streamupload.Info, error) {
   357  	if b.err != nil {
   358  		return streamupload.Info{}, b.err
   359  	}
   360  	return uploadInfo, nil
   361  }
   362  
   363  type metainfoUpload struct{}
   364  
   365  func (metainfoUpload) Batch(ctx context.Context, batchItems ...metaclient.BatchItem) ([]metaclient.BatchResponse, error) {
   366  	return nil, errs.New("should not be called")
   367  }
   368  
   369  func (metainfoUpload) RetryBeginSegmentPieces(ctx context.Context, params metaclient.RetryBeginSegmentPiecesParams) (metaclient.RetryBeginSegmentPiecesResponse, error) {
   370  	return metaclient.RetryBeginSegmentPiecesResponse{}, errs.New("should not be called")
   371  }
   372  
   373  func (metainfoUpload) Close() error {
   374  	return nil
   375  }
   376  
   377  type piecePutter struct{}
   378  
   379  func (piecePutter) PutPiece(longTailCtx, uploadCtx context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, deprecated *struct{}, err error) {
   380  	return nil, nil, errs.New("should not be called")
   381  }
   382  
   383  type fixedMetadata struct{}
   384  
   385  func (fixedMetadata) Metadata() ([]byte, error) {
   386  	return []byte("METADATA"), nil
   387  }
   388  
   389  type noopScheduler struct{}
   390  
   391  func (noopScheduler) Join(ctx context.Context) (scheduler.Handle, bool) {
   392  	return noopHandle{}, true
   393  }
   394  
   395  type noopHandle struct{}
   396  
   397  func (noopHandle) Get(context.Context) (scheduler.Resource, bool) {
   398  	return noopResource{}, false
   399  }
   400  
   401  func (noopHandle) Done() {}
   402  
   403  type noopResource struct{}
   404  
   405  func (noopResource) Done() {}