storj.io/uplink@v1.13.0/private/storage/streams/segmentupload/single_test.go (about)

     1  // Copyright (C) 2023 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package segmentupload
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"errors"
    10  	"fmt"
    11  	"io"
    12  	"sync"
    13  	"testing"
    14  
    15  	"github.com/stretchr/testify/require"
    16  	"github.com/zeebo/errs"
    17  
    18  	"storj.io/common/pb"
    19  	"storj.io/common/storj"
    20  	"storj.io/uplink/private/eestream"
    21  	"storj.io/uplink/private/eestream/scheduler"
    22  	"storj.io/uplink/private/metaclient"
    23  	"storj.io/uplink/private/storage/streams/splitter"
    24  )
    25  
    26  const (
    27  	optimalShares = 3
    28  	totalShares   = 4
    29  )
    30  
    31  var (
    32  	fastKind = nodeKind{0: 1}
    33  	slowKind = nodeKind{0: 2}
    34  	badKind  = nodeKind{0: 3}
    35  
    36  	rs             = mustNewRedundancyStrategy()
    37  	fakeSegmentID  = storj.SegmentID{0xFF}
    38  	fakePrivateKey = mustNewPiecePrivateKey()
    39  	minimumLimits  = makeLimits(fastKind, fastKind, fastKind)
    40  
    41  	fakeSegmentInfo = &splitter.SegmentInfo{
    42  		Encryption:    metaclient.SegmentEncryption{EncryptedKeyNonce: storj.Nonce{0: 1}},
    43  		PlainSize:     123,
    44  		EncryptedSize: 456,
    45  	}
    46  )
    47  
    48  func TestBegin(t *testing.T) {
    49  	const longTailMargin = 1
    50  
    51  	for _, tc := range []struct {
    52  		desc                   string
    53  		beginSegment           *metaclient.BeginSegmentResponse
    54  		overrideContext        func(*testing.T, context.Context) context.Context
    55  		overrideLongTailMargin func() int
    56  		expectBeginErr         string
    57  		expectWaitErr          string
    58  		expectUploaderCount    int // expected number of many concurrent piece uploads
    59  	}{
    60  		{
    61  			desc:           "begin segment response missing private key",
    62  			beginSegment:   &metaclient.BeginSegmentResponse{RedundancyStrategy: rs, Limits: minimumLimits},
    63  			expectBeginErr: "begin segment response is missing piece private key",
    64  		},
    65  		{
    66  			desc:           "begin segment response missing redundancy strategy",
    67  			beginSegment:   &metaclient.BeginSegmentResponse{Limits: minimumLimits, PiecePrivateKey: fakePrivateKey},
    68  			expectBeginErr: "begin segment response is missing redundancy strategy",
    69  		},
    70  		{
    71  			desc:           "begin segment response does not have any limits",
    72  			beginSegment:   &metaclient.BeginSegmentResponse{RedundancyStrategy: rs, PiecePrivateKey: fakePrivateKey},
    73  			expectBeginErr: fmt.Sprintf("begin segment response needs at least %d limits to meet optimal threshold but has 0", optimalShares),
    74  		},
    75  		{
    76  			desc:           "begin segment response does not have enough limits",
    77  			beginSegment:   makeBeginSegment(fastKind, fastKind),
    78  			expectBeginErr: fmt.Sprintf("begin segment response needs at least %d limits to meet optimal threshold but has %d", optimalShares, optimalShares-1),
    79  		},
    80  		{
    81  			desc:                   "negative long tail margin",
    82  			beginSegment:           makeBeginSegment(fastKind, fastKind, fastKind, slowKind),
    83  			overrideLongTailMargin: func() int { return -1 },
    84  			expectUploaderCount:    totalShares,
    85  		},
    86  		{
    87  			desc:                   "zero long tail margin",
    88  			beginSegment:           makeBeginSegment(fastKind, fastKind, fastKind, slowKind),
    89  			overrideLongTailMargin: func() int { return 0 },
    90  			expectUploaderCount:    optimalShares,
    91  		},
    92  		{
    93  			desc:                "upload count is capped to limits",
    94  			beginSegment:        makeBeginSegment(fastKind, fastKind, fastKind),
    95  			expectUploaderCount: optimalShares,
    96  		},
    97  		{
    98  			desc:                "upload count does not exceed optimal threshold + long tail margin",
    99  			beginSegment:        makeBeginSegment(fastKind, fastKind, fastKind, slowKind, fastKind),
   100  			expectUploaderCount: optimalShares + longTailMargin,
   101  		},
   102  		{
   103  			desc:                "slow piece uploads are cancelled after optimal threshold hit",
   104  			beginSegment:        makeBeginSegment(fastKind, fastKind, fastKind, slowKind),
   105  			expectUploaderCount: optimalShares + longTailMargin,
   106  		},
   107  		{
   108  			desc:         "aborts immediately when context already cancelled",
   109  			beginSegment: makeBeginSegment(slowKind, fastKind, fastKind, fastKind),
   110  			overrideContext: func(t *testing.T, ctx context.Context) context.Context {
   111  				ctx, cancel := context.WithCancel(ctx)
   112  				cancel()
   113  				return ctx
   114  			},
   115  			expectBeginErr: "failed to obtain piece upload handle",
   116  		},
   117  		{
   118  			desc:          "fails when not enough successful pieces were uploaded",
   119  			beginSegment:  makeBeginSegment(badKind, fastKind, fastKind),
   120  			expectWaitErr: "failed to upload enough pieces (needed at least 3 but got 2); piece limit exchange failed: exchanges disallowed for test",
   121  		},
   122  	} {
   123  		t.Run(tc.desc, func(t *testing.T) {
   124  			var (
   125  				segment         = new(fakeSegment)
   126  				limitsExchanger = new(fakeLimitsExchanger)
   127  				piecePutter     = new(fakePiecePutter)
   128  				sched           = newWrappedScheduler()
   129  				longTailMargin  = longTailMargin
   130  			)
   131  			ctx := context.Background()
   132  			if tc.overrideContext != nil {
   133  				ctx = tc.overrideContext(t, ctx)
   134  			}
   135  			if tc.overrideLongTailMargin != nil {
   136  				longTailMargin = tc.overrideLongTailMargin()
   137  			}
   138  			upload, err := Begin(ctx, tc.beginSegment, segment, limitsExchanger, piecePutter, sched, longTailMargin)
   139  			if tc.expectBeginErr != "" {
   140  				require.EqualError(t, err, tc.expectBeginErr)
   141  				require.NoError(t, sched.check(0))
   142  				return
   143  			}
   144  			require.NoError(t, err)
   145  
   146  			commitSegment, err := upload.Wait()
   147  
   148  			// pass or fail, the segment should always be marked as done reading with the upload error
   149  			require.Equal(t, err, segment.err)
   150  
   151  			if tc.expectWaitErr != "" {
   152  				require.EqualError(t, err, tc.expectWaitErr)
   153  				return
   154  			}
   155  
   156  			require.NoError(t, err)
   157  
   158  			require.Equal(t, &metaclient.CommitSegmentParams{
   159  				SegmentID:         fakeSegmentID,
   160  				Encryption:        fakeSegmentInfo.Encryption,
   161  				SizeEncryptedData: fakeSegmentInfo.EncryptedSize,
   162  				PlainSize:         fakeSegmentInfo.PlainSize,
   163  				EncryptedTag:      nil,
   164  				// The uploads with the first three limits/pieces always
   165  				// succeed due to the way the tests are constructed above. If
   166  				// that changes then this code needs to be updated to be more
   167  				// flexible here.
   168  				UploadResult: []*pb.SegmentPieceUploadResult{
   169  					{PieceNum: 0, NodeId: fastNodeID(0), Hash: &pb.PieceHash{PieceId: pieceID(0)}},
   170  					{PieceNum: 1, NodeId: fastNodeID(1), Hash: &pb.PieceHash{PieceId: pieceID(1)}},
   171  					{PieceNum: 2, NodeId: fastNodeID(2), Hash: &pb.PieceHash{PieceId: pieceID(2)}},
   172  				},
   173  			}, commitSegment)
   174  			require.NoError(t, sched.check(tc.expectUploaderCount))
   175  		})
   176  	}
   177  }
   178  
   179  type nodeKind storj.NodeID
   180  
   181  func isNodeKind(nodeID storj.NodeID, kind nodeKind) bool {
   182  	return nodeID[0] == kind[0]
   183  }
   184  
   185  func makeBeginSegment(kinds ...nodeKind) *metaclient.BeginSegmentResponse {
   186  	return &metaclient.BeginSegmentResponse{
   187  		SegmentID:          fakeSegmentID,
   188  		Limits:             makeLimits(kinds...),
   189  		RedundancyStrategy: rs,
   190  		PiecePrivateKey:    fakePrivateKey,
   191  	}
   192  }
   193  
   194  func makeNodeID(i int, kind nodeKind) storj.NodeID {
   195  	nodeID := storj.NodeID(kind)
   196  	nodeID[1] = byte(i)
   197  	return nodeID
   198  }
   199  
   200  func fastNodeID(i int) storj.NodeID {
   201  	return makeNodeID(i, fastKind)
   202  }
   203  
   204  func makeNodeIDs(kinds ...nodeKind) []storj.NodeID {
   205  	var nodes []storj.NodeID
   206  	for i, kind := range kinds {
   207  		nodes = append(nodes, makeNodeID(i, kind))
   208  	}
   209  	return nodes
   210  }
   211  
   212  func makeLimits(kinds ...nodeKind) []*pb.AddressedOrderLimit {
   213  	var limits []*pb.AddressedOrderLimit
   214  	for i, nodeID := range makeNodeIDs(kinds...) {
   215  		limits = append(limits, &pb.AddressedOrderLimit{
   216  			Limit: &pb.OrderLimit{
   217  				StorageNodeId: nodeID,
   218  				PieceId:       pieceID(i),
   219  			},
   220  		})
   221  	}
   222  	return limits
   223  }
   224  
   225  func pieceID(num int) storj.PieceID {
   226  	return storj.PieceID{0: byte(num)}
   227  }
   228  
   229  type fakeSegment struct {
   230  	splitter.Segment
   231  	err error
   232  }
   233  
   234  func (fakeSegment) Position() metaclient.SegmentPosition {
   235  	return metaclient.SegmentPosition{}
   236  }
   237  
   238  func (fakeSegment) Reader() io.Reader {
   239  	return bytes.NewReader(nil)
   240  }
   241  
   242  func (s *fakeSegment) DoneReading(err error) {
   243  	s.err = err
   244  }
   245  
   246  func (fakeSegment) Finalize() *splitter.SegmentInfo {
   247  	return fakeSegmentInfo
   248  }
   249  
   250  type fakeLimitsExchanger struct{}
   251  
   252  func (fakeLimitsExchanger) ExchangeLimits(ctx context.Context, segmentID storj.SegmentID, pieceNumbers []int) (storj.SegmentID, []*pb.AddressedOrderLimit, error) {
   253  	return nil, nil, errors.New("exchanges disallowed for test")
   254  }
   255  
   256  type fakePiecePutter struct{}
   257  
   258  func (fakePiecePutter) PutPiece(longTailCtx, uploadCtx context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, deprecated *struct{}, err error) {
   259  	if !bytes.Equal(fakePrivateKey.Bytes(), privateKey.Bytes()) {
   260  		return nil, nil, errs.New("private key was not passed correctly")
   261  	}
   262  
   263  	if _, err := io.ReadAll(data); err != nil {
   264  		return nil, nil, err
   265  	}
   266  
   267  	switch {
   268  	case isNodeKind(limit.Limit.StorageNodeId, badKind):
   269  		return nil, nil, errs.New("piece upload failed")
   270  	case isNodeKind(limit.Limit.StorageNodeId, slowKind):
   271  		select {
   272  		case <-longTailCtx.Done():
   273  			return nil, nil, longTailCtx.Err()
   274  		case <-uploadCtx.Done():
   275  			return nil, nil, uploadCtx.Err()
   276  		}
   277  	}
   278  	return &pb.PieceHash{PieceId: limit.Limit.PieceId}, nil, nil
   279  }
   280  
   281  type wrappedScheduler struct {
   282  	wrapped *scheduler.Scheduler
   283  	handles []*wrappedHandle
   284  }
   285  
   286  func newWrappedScheduler() *wrappedScheduler {
   287  	return &wrappedScheduler{
   288  		wrapped: scheduler.New(scheduler.Options{MaximumConcurrent: 200}),
   289  	}
   290  }
   291  
   292  func (w *wrappedScheduler) Join(ctx context.Context) (scheduler.Handle, bool) {
   293  	wrapped, ok := w.wrapped.Join(ctx)
   294  	if !ok {
   295  		return nil, false
   296  	}
   297  	handle := &wrappedHandle{wrapped: wrapped}
   298  	w.handles = append(w.handles, handle)
   299  	return handle, true
   300  }
   301  
   302  func (w *wrappedScheduler) check(expectedUploaderCount int) error {
   303  	// we're allowed 0 handles if we expected no upload resources
   304  	if expectedUploaderCount == 0 && len(w.handles) == 0 {
   305  		return nil
   306  	}
   307  	if len(w.handles) != 1 {
   308  		return errs.New("expected one handle but got %d", len(w.handles))
   309  	}
   310  	return w.handles[0].check(expectedUploaderCount)
   311  }
   312  
   313  type wrappedHandle struct {
   314  	wrapped scheduler.Handle
   315  
   316  	mu        sync.Mutex
   317  	done      int
   318  	resources []*wrappedResource
   319  }
   320  
   321  func (w *wrappedHandle) Get(ctx context.Context) (scheduler.Resource, bool) {
   322  	if err := ctx.Err(); err != nil {
   323  		return nil, false
   324  	}
   325  	wrapped, ok := w.wrapped.Get(ctx)
   326  	if !ok {
   327  		return nil, false
   328  	}
   329  	resource := &wrappedResource{wrapped: wrapped}
   330  	w.resources = append(w.resources, resource)
   331  	return resource, true
   332  }
   333  
   334  func (w *wrappedHandle) Done() {
   335  	w.mu.Lock()
   336  	defer w.mu.Unlock()
   337  	w.done++
   338  }
   339  
   340  func (w *wrappedHandle) check(resources int) error {
   341  	w.mu.Lock()
   342  	defer w.mu.Unlock()
   343  	switch {
   344  	case w.done == 0:
   345  		return errs.New("done not called")
   346  	case w.done > 1:
   347  		return errs.New("done more than once (%d times)", w.done)
   348  	case len(w.resources) != resources:
   349  		return errs.New("expected %d resource(s) but got %d", resources, len(w.resources))
   350  	}
   351  	var eg errs.Group
   352  	for i, resource := range w.resources {
   353  		if err := resource.check(); err != nil {
   354  			eg.Add(errs.New("resource(%d): %w", i, err))
   355  		}
   356  	}
   357  	return eg.Err()
   358  }
   359  
   360  type wrappedResource struct {
   361  	wrapped scheduler.Resource
   362  
   363  	mu   sync.Mutex
   364  	done int
   365  }
   366  
   367  func (r *wrappedResource) Done() {
   368  	r.mu.Lock()
   369  	defer r.mu.Unlock()
   370  	r.done++
   371  }
   372  
   373  func (r *wrappedResource) check() error {
   374  	r.mu.Lock()
   375  	defer r.mu.Unlock()
   376  	switch {
   377  	case r.done == 0:
   378  		return errs.New("done not called")
   379  	case r.done > 1:
   380  		return errs.New("done more than once (%d times)", r.done)
   381  	}
   382  	return nil
   383  }
   384  
   385  func mustNewPiecePrivateKey() storj.PiecePrivateKey {
   386  	pk, err := storj.PiecePrivateKeyFromBytes(bytes.Repeat([]byte{1}, 64))
   387  	if err != nil {
   388  		panic(err)
   389  	}
   390  	return pk
   391  }
   392  
   393  func mustNewRedundancyStrategy() eestream.RedundancyStrategy {
   394  	rs, err := eestream.NewRedundancyStrategyFromStorj(storj.RedundancyScheme{
   395  		Algorithm:      storj.ReedSolomon,
   396  		ShareSize:      64,
   397  		RequiredShares: 1,
   398  		RepairShares:   2,
   399  		OptimalShares:  int16(optimalShares),
   400  		TotalShares:    int16(totalShares),
   401  	})
   402  	if err != nil {
   403  		panic(err)
   404  	}
   405  	return rs
   406  }