storj.io/uplink@v1.13.0/private/storage/streams/store.go (about)

     1  // Copyright (C) 2019 Storj Labs, Inc.
     2  // See LICENSE for copying information.
     3  
     4  package streams
     5  
     6  import (
     7  	"context"
     8  	"crypto/rand"
     9  	"io"
    10  	"os"
    11  	"sort"
    12  	"strconv"
    13  	"time"
    14  
    15  	"github.com/spacemonkeygo/monkit/v3"
    16  	"github.com/zeebo/errs"
    17  	"golang.org/x/exp/slices"
    18  
    19  	"storj.io/common/context2"
    20  	"storj.io/common/encryption"
    21  	"storj.io/common/paths"
    22  	"storj.io/common/pb"
    23  	"storj.io/common/ranger"
    24  	"storj.io/common/storj"
    25  	"storj.io/uplink/private/ecclient"
    26  	"storj.io/uplink/private/eestream"
    27  	"storj.io/uplink/private/metaclient"
    28  	"storj.io/uplink/private/testuplink"
    29  )
    30  
    31  // DisableDeleteOnCancel is now a no-op.
    32  func DisableDeleteOnCancel(ctx context.Context) context.Context {
    33  	return ctx
    34  }
    35  
    36  var mon = monkit.Package()
    37  
    38  // Meta info about a stream.
    39  type Meta struct {
    40  	Modified   time.Time
    41  	Expiration time.Time
    42  	Size       int64
    43  	Data       []byte
    44  	Version    []byte
    45  }
    46  
    47  // Part info about a part.
    48  type Part struct {
    49  	PartNumber uint32
    50  	Size       int64
    51  	Modified   time.Time
    52  	ETag       []byte
    53  }
    54  
    55  // Metadata interface returns the latest metadata for an object.
    56  type Metadata interface {
    57  	Metadata() ([]byte, error)
    58  }
    59  
    60  // Store is a store for streams. It implements typedStore as part of an ongoing migration
    61  // to use typed paths. See the shim for the store that the rest of the world interacts with.
    62  type Store struct {
    63  	*Uploader
    64  
    65  	metainfo             *metaclient.Client
    66  	ec                   ecclient.Client
    67  	segmentSize          int64
    68  	encStore             *encryption.Store
    69  	encryptionParameters storj.EncryptionParameters
    70  	inlineThreshold      int
    71  }
    72  
    73  // NewStreamStore constructs a stream store.
    74  func NewStreamStore(metainfo *metaclient.Client, ec ecclient.Client, segmentSize int64, encStore *encryption.Store, encryptionParameters storj.EncryptionParameters, inlineThreshold, longTailMargin int) (*Store, error) {
    75  	if segmentSize <= 0 {
    76  		return nil, errs.New("segment size must be larger than 0")
    77  	}
    78  	if encryptionParameters.BlockSize <= 0 {
    79  		return nil, errs.New("encryption block size must be larger than 0")
    80  	}
    81  
    82  	// TODO: this is a hack for now. Once the new upload codepath is enabled
    83  	// by default, we can clean this up and stop embedding the uploader in
    84  	// the streams store.
    85  	uploader, err := NewUploader(metainfo, ec, segmentSize, encStore, encryptionParameters, inlineThreshold, longTailMargin)
    86  	if err != nil {
    87  		return nil, err
    88  	}
    89  
    90  	return &Store{
    91  		Uploader:             uploader,
    92  		metainfo:             metainfo,
    93  		ec:                   ec,
    94  		segmentSize:          segmentSize,
    95  		encStore:             encStore,
    96  		encryptionParameters: encryptionParameters,
    97  		inlineThreshold:      inlineThreshold,
    98  	}, nil
    99  }
   100  
   101  // Close closes the underlying resources passed to the metainfo DB.
   102  func (s *Store) Close() error {
   103  	return s.metainfo.Close()
   104  }
   105  
   106  // Put breaks up data as it comes in into s.segmentSize length pieces, then
   107  // store the first piece at s0/<key>, second piece at s1/<key>, and the
   108  // *last* piece at l/<key>. Store the given metadata, along with the number
   109  // of segments, in a new protobuf, in the metadata of l/<key>.
   110  //
   111  // If there is an error, it cleans up any uploaded segment before returning.
   112  func (s *Store) Put(ctx context.Context, bucket, unencryptedKey string, data io.Reader, metadata Metadata, expiration time.Time) (_ Meta, err error) {
   113  	defer mon.Task()(&ctx)(&err)
   114  	derivedKey, err := encryption.DeriveContentKey(bucket, paths.NewUnencrypted(unencryptedKey), s.encStore)
   115  	if err != nil {
   116  		return Meta{}, errs.Wrap(err)
   117  	}
   118  	encPath, err := encryption.EncryptPathWithStoreCipher(bucket, paths.NewUnencrypted(unencryptedKey), s.encStore)
   119  	if err != nil {
   120  		return Meta{}, errs.Wrap(err)
   121  	}
   122  
   123  	beginObjectReq := &metaclient.BeginObjectParams{
   124  		Bucket:               []byte(bucket),
   125  		EncryptedObjectKey:   []byte(encPath.Raw()),
   126  		ExpiresAt:            expiration,
   127  		EncryptionParameters: s.encryptionParameters,
   128  	}
   129  
   130  	var streamID storj.StreamID
   131  	defer func() {
   132  		if err != nil && !streamID.IsZero() {
   133  			s.deleteCancelledObject(context2.WithoutCancellation(ctx), bucket, encPath.Raw(), streamID)
   134  			return
   135  		}
   136  	}()
   137  
   138  	var (
   139  		currentSegment    uint32
   140  		contentKey        storj.Key
   141  		streamSize        int64
   142  		lastSegmentSize   int64
   143  		encryptedKey      []byte
   144  		encryptedKeyNonce storj.Nonce
   145  		segmentRS         eestream.RedundancyStrategy
   146  
   147  		requestsToBatch = make([]metaclient.BatchItem, 0, 2)
   148  	)
   149  
   150  	maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(s.segmentSize, s.encryptionParameters)
   151  	if err != nil {
   152  		return Meta{}, errs.Wrap(err)
   153  	}
   154  
   155  	eofReader := NewEOFReader(data)
   156  	for !eofReader.IsEOF() && !eofReader.HasError() {
   157  		// generate random key for encrypting the segment's content
   158  		_, err := rand.Read(contentKey[:])
   159  		if err != nil {
   160  			return Meta{}, errs.Wrap(err)
   161  		}
   162  
   163  		// Initialize the content nonce with the current total segment incremented
   164  		// by 1 because at this moment the next segment has not been already
   165  		// uploaded.
   166  		// The increment by 1 is to avoid nonce reuse with the metadata encryption,
   167  		// which is encrypted with the zero nonce.
   168  		contentNonce := storj.Nonce{}
   169  		_, err = encryption.Increment(&contentNonce, int64(currentSegment)+1)
   170  		if err != nil {
   171  			return Meta{}, errs.Wrap(err)
   172  		}
   173  
   174  		// generate random nonce for encrypting the content key
   175  		_, err = rand.Read(encryptedKeyNonce[:])
   176  		if err != nil {
   177  			return Meta{}, errs.Wrap(err)
   178  		}
   179  
   180  		// note that we are *not* using the cipher suite from the encryption store, which
   181  		// might be encnull. we must make sure this actually encrypts here, otherwise the
   182  		// satellite will receive the decryption keys for all uploaded data.
   183  		// we also care that the storage nodes don't receive unencrypted data, even if
   184  		// paths are unencrypted.
   185  		if s.encryptionParameters.CipherSuite == storj.EncNull ||
   186  			s.encryptionParameters.CipherSuite == storj.EncNullBase64URL {
   187  			return Meta{}, errs.New("programmer error")
   188  		}
   189  		encryptedKey, err = encryption.EncryptKey(&contentKey, s.encryptionParameters.CipherSuite, derivedKey, &encryptedKeyNonce)
   190  		if err != nil {
   191  			return Meta{}, errs.Wrap(err)
   192  		}
   193  
   194  		sizeReader := SizeReader(eofReader)
   195  		peekReader := NewPeekThresholdReader(io.LimitReader(sizeReader, s.segmentSize))
   196  		// If the data is larger than the inline threshold size, then it will be a remote segment
   197  		isRemote, err := peekReader.IsLargerThan(s.inlineThreshold)
   198  		if err != nil {
   199  			return Meta{}, errs.Wrap(err)
   200  		}
   201  
   202  		segmentEncryption := metaclient.SegmentEncryption{
   203  			EncryptedKey:      encryptedKey,
   204  			EncryptedKeyNonce: encryptedKeyNonce,
   205  		}
   206  
   207  		if isRemote {
   208  			encrypter, err := encryption.NewEncrypter(s.encryptionParameters.CipherSuite, &contentKey, &contentNonce, int(s.encryptionParameters.BlockSize))
   209  			if err != nil {
   210  				return Meta{}, errs.Wrap(err)
   211  			}
   212  
   213  			paddedReader := encryption.PadReader(io.NopCloser(peekReader), encrypter.InBlockSize())
   214  			transformedReader := encryption.TransformReader(paddedReader, encrypter, 0)
   215  
   216  			beginSegment := &metaclient.BeginSegmentParams{
   217  				MaxOrderLimit: maxEncryptedSegmentSize,
   218  				Position: metaclient.SegmentPosition{
   219  					Index: int32(currentSegment),
   220  				},
   221  			}
   222  
   223  			var responses []metaclient.BatchResponse
   224  			if currentSegment == 0 {
   225  				responses, err = s.metainfo.Batch(ctx, beginObjectReq, beginSegment)
   226  				if err != nil {
   227  					return Meta{}, errs.Wrap(err)
   228  				}
   229  				objResponse, err := responses[0].BeginObject()
   230  				if err != nil {
   231  					return Meta{}, errs.Wrap(err)
   232  				}
   233  				streamID = objResponse.StreamID
   234  			} else {
   235  				beginSegment.StreamID = streamID
   236  				responses, err = s.metainfo.Batch(ctx, append(requestsToBatch, beginSegment)...)
   237  				requestsToBatch = requestsToBatch[:0]
   238  				if err != nil {
   239  					return Meta{}, errs.Wrap(err)
   240  				}
   241  			}
   242  
   243  			segResponse, err := responses[1].BeginSegment()
   244  			if err != nil {
   245  				return Meta{}, errs.Wrap(err)
   246  			}
   247  			segmentID := segResponse.SegmentID
   248  			limits := segResponse.Limits
   249  			piecePrivateKey := segResponse.PiecePrivateKey
   250  			segmentRS = segResponse.RedundancyStrategy
   251  
   252  			encSizedReader := SizeReader(transformedReader)
   253  			uploadResults, err := s.ec.PutSingleResult(ctx, limits, piecePrivateKey, segmentRS, encSizedReader)
   254  			if err != nil {
   255  				return Meta{}, errs.Wrap(err)
   256  			}
   257  
   258  			plainSize := sizeReader.Size()
   259  			if testuplink.IsWithoutPlainSize(ctx) {
   260  				plainSize = 0
   261  			}
   262  
   263  			requestsToBatch = append(requestsToBatch, &metaclient.CommitSegmentParams{
   264  				SegmentID:         segmentID,
   265  				SizeEncryptedData: encSizedReader.Size(),
   266  				PlainSize:         plainSize,
   267  				Encryption:        segmentEncryption,
   268  				UploadResult:      uploadResults,
   269  			})
   270  		} else {
   271  			data, err := io.ReadAll(peekReader)
   272  			if err != nil {
   273  				return Meta{}, errs.Wrap(err)
   274  			}
   275  
   276  			cipherData, err := encryption.Encrypt(data, s.encryptionParameters.CipherSuite, &contentKey, &contentNonce)
   277  			if err != nil {
   278  				return Meta{}, errs.Wrap(err)
   279  			}
   280  
   281  			plainSize := int64(len(data))
   282  			if testuplink.IsWithoutPlainSize(ctx) {
   283  				plainSize = 0
   284  			}
   285  
   286  			makeInlineSegment := &metaclient.MakeInlineSegmentParams{
   287  				Position: metaclient.SegmentPosition{
   288  					Index: int32(currentSegment),
   289  				},
   290  				Encryption:          segmentEncryption,
   291  				EncryptedInlineData: cipherData,
   292  				PlainSize:           plainSize,
   293  			}
   294  			if currentSegment == 0 {
   295  				responses, err := s.metainfo.Batch(ctx, beginObjectReq, makeInlineSegment)
   296  				if err != nil {
   297  					return Meta{}, errs.Wrap(err)
   298  				}
   299  				objResponse, err := responses[0].BeginObject()
   300  				if err != nil {
   301  					return Meta{}, errs.Wrap(err)
   302  				}
   303  				streamID = objResponse.StreamID
   304  			} else {
   305  				makeInlineSegment.StreamID = streamID
   306  				requestsToBatch = append(requestsToBatch, makeInlineSegment)
   307  			}
   308  		}
   309  
   310  		lastSegmentSize = sizeReader.Size()
   311  		streamSize += lastSegmentSize
   312  		currentSegment++
   313  	}
   314  
   315  	if eofReader.HasError() {
   316  		return Meta{}, errs.Wrap(eofReader.err)
   317  	}
   318  
   319  	metadataBytes, err := metadata.Metadata()
   320  	if err != nil {
   321  		return Meta{}, errs.Wrap(err)
   322  	}
   323  
   324  	// We still need SegmentsSize and LastSegmentSize for backward
   325  	// compatibility with old uplinks.
   326  	streamInfo, err := pb.Marshal(&pb.StreamInfo{
   327  		SegmentsSize:    s.segmentSize,
   328  		LastSegmentSize: lastSegmentSize,
   329  		Metadata:        metadataBytes,
   330  	})
   331  	if err != nil {
   332  		return Meta{}, errs.Wrap(err)
   333  	}
   334  
   335  	// encrypt metadata with the content encryption key and zero nonce.
   336  	encryptedStreamInfo, err := encryption.Encrypt(streamInfo, s.encryptionParameters.CipherSuite, &contentKey, &storj.Nonce{})
   337  	if err != nil {
   338  		return Meta{}, errs.Wrap(err)
   339  	}
   340  
   341  	streamMeta := pb.StreamMeta{
   342  		EncryptedStreamInfo: encryptedStreamInfo,
   343  	}
   344  
   345  	objectMetadata, err := pb.Marshal(&streamMeta)
   346  	if err != nil {
   347  		return Meta{}, errs.Wrap(err)
   348  	}
   349  
   350  	commitObject := metaclient.CommitObjectParams{
   351  		StreamID:          streamID,
   352  		EncryptedMetadata: objectMetadata,
   353  	}
   354  	if s.encryptionParameters.CipherSuite != storj.EncNull {
   355  		commitObject.EncryptedMetadataEncryptedKey = encryptedKey
   356  		commitObject.EncryptedMetadataNonce = encryptedKeyNonce
   357  	}
   358  
   359  	var rawObject metaclient.RawObjectItem
   360  	if len(requestsToBatch) > 0 {
   361  		responses, err := s.metainfo.Batch(ctx, append(requestsToBatch, &commitObject)...)
   362  		if err != nil {
   363  			return Meta{}, errs.Wrap(err)
   364  		}
   365  		if len(responses) > 0 && responses[len(responses)-1].IsCommitObject() {
   366  			response, _ := responses[len(responses)-1].CommitObject()
   367  			rawObject = response.Object
   368  		}
   369  	} else {
   370  		response, err := s.metainfo.CommitObjectWithResponse(ctx, commitObject)
   371  		if err != nil {
   372  			return Meta{}, errs.Wrap(err)
   373  		}
   374  		rawObject = response.Object
   375  	}
   376  
   377  	satStreamID := &pb.SatStreamID{}
   378  	if err := pb.Unmarshal(streamID, satStreamID); err != nil {
   379  		return Meta{}, errs.Wrap(err)
   380  	}
   381  
   382  	resultMeta := Meta{
   383  		Modified:   rawObject.Created,
   384  		Expiration: expiration,
   385  		Size:       streamSize,
   386  		Data:       metadataBytes,
   387  		Version:    rawObject.Version,
   388  	}
   389  
   390  	return resultMeta, nil
   391  }
   392  
   393  // PutPart uploads single part.
   394  func (s *Store) PutPart(ctx context.Context, bucket, unencryptedKey string, streamID storj.StreamID, partNumber uint32, eTagCh <-chan []byte, data io.Reader) (_ Part, err error) {
   395  	defer mon.Task()(&ctx)(&err)
   396  
   397  	var (
   398  		currentSegment        uint32
   399  		streamSize            int64
   400  		lastSegmentContentKey storj.Key
   401  
   402  		// requests to send in a single call, in this case it will be always CommitSegment or MakeInlineSegment
   403  		requestsToBatch []metaclient.BatchItem
   404  	)
   405  
   406  	maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(s.segmentSize, s.encryptionParameters)
   407  	if err != nil {
   408  		return Part{}, errs.Wrap(err)
   409  	}
   410  
   411  	derivedKey, err := encryption.DeriveContentKey(bucket, paths.NewUnencrypted(unencryptedKey), s.encStore)
   412  	if err != nil {
   413  		return Part{}, errs.Wrap(err)
   414  	}
   415  
   416  	eofReader := NewEOFReader(data)
   417  	for !eofReader.IsEOF() && !eofReader.HasError() {
   418  
   419  		// generate random key for encrypting the segment's content
   420  		var contentKey storj.Key
   421  		_, err := rand.Read(contentKey[:])
   422  		if err != nil {
   423  			return Part{}, errs.Wrap(err)
   424  		}
   425  
   426  		// Initialize the content nonce with the current total segment incremented
   427  		// by 1 because at this moment the next segment has not been already
   428  		// uploaded.
   429  		// The increment by 1 is to avoid nonce reuse with the metadata encryption,
   430  		// which is encrypted with the zero nonce.
   431  		contentNonce := storj.Nonce{}
   432  		_, err = encryption.Increment(&contentNonce, (int64(partNumber)<<32)|(int64(currentSegment)+1))
   433  		if err != nil {
   434  			return Part{}, errs.Wrap(err)
   435  		}
   436  
   437  		var encryptedKeyNonce storj.Nonce
   438  		// generate random nonce for encrypting the content key
   439  		_, err = rand.Read(encryptedKeyNonce[:])
   440  		if err != nil {
   441  			return Part{}, errs.Wrap(err)
   442  		}
   443  
   444  		encryptedKey, err := encryption.EncryptKey(&contentKey, s.encryptionParameters.CipherSuite, derivedKey, &encryptedKeyNonce)
   445  		if err != nil {
   446  			return Part{}, errs.Wrap(err)
   447  		}
   448  
   449  		sizeReader := SizeReader(eofReader)
   450  		segmentReader := io.LimitReader(sizeReader, s.segmentSize)
   451  		peekReader := NewPeekThresholdReader(segmentReader)
   452  		// If the data is larger than the inline threshold size, then it will be a remote segment
   453  		isRemote, err := peekReader.IsLargerThan(s.inlineThreshold)
   454  		if err != nil {
   455  			return Part{}, errs.Wrap(err)
   456  		}
   457  
   458  		segmentEncryption := metaclient.SegmentEncryption{}
   459  		if s.encryptionParameters.CipherSuite != storj.EncNull {
   460  			segmentEncryption = metaclient.SegmentEncryption{
   461  				EncryptedKey:      encryptedKey,
   462  				EncryptedKeyNonce: encryptedKeyNonce,
   463  			}
   464  		}
   465  
   466  		if isRemote {
   467  			encrypter, err := encryption.NewEncrypter(s.encryptionParameters.CipherSuite, &contentKey, &contentNonce, int(s.encryptionParameters.BlockSize))
   468  			if err != nil {
   469  				return Part{}, errs.Wrap(err)
   470  			}
   471  
   472  			paddedReader := encryption.PadReader(io.NopCloser(peekReader), encrypter.InBlockSize())
   473  			transformedReader := encryption.TransformReader(paddedReader, encrypter, 0)
   474  
   475  			beginSegment := metaclient.BeginSegmentParams{
   476  				StreamID:      streamID,
   477  				MaxOrderLimit: maxEncryptedSegmentSize,
   478  				Position: metaclient.SegmentPosition{
   479  					PartNumber: int32(partNumber),
   480  					Index:      int32(currentSegment),
   481  				},
   482  			}
   483  
   484  			var beginResponse metaclient.BeginSegmentResponse
   485  			if len(requestsToBatch) == 0 {
   486  				beginResponse, err = s.metainfo.BeginSegment(ctx, beginSegment)
   487  				if err != nil {
   488  					return Part{}, errs.Wrap(err)
   489  				}
   490  			} else {
   491  				responses, err := s.metainfo.Batch(ctx, append(requestsToBatch, &beginSegment)...)
   492  				if err != nil {
   493  					return Part{}, errs.Wrap(err)
   494  				}
   495  
   496  				requestsToBatch = requestsToBatch[:0]
   497  
   498  				beginResponse, err = responses[1].BeginSegment()
   499  				if err != nil {
   500  					return Part{}, errs.Wrap(err)
   501  				}
   502  			}
   503  
   504  			encSizedReader := SizeReader(transformedReader)
   505  			uploadResults, err := s.ec.PutSingleResult(ctx, beginResponse.Limits, beginResponse.PiecePrivateKey,
   506  				beginResponse.RedundancyStrategy, encSizedReader)
   507  			if err != nil {
   508  				return Part{}, errs.Wrap(err)
   509  			}
   510  
   511  			lastSegmentContentKey = contentKey
   512  			requestsToBatch = append(requestsToBatch, &metaclient.CommitSegmentParams{
   513  				SegmentID:         beginResponse.SegmentID,
   514  				SizeEncryptedData: encSizedReader.Size(),
   515  				PlainSize:         sizeReader.Size(),
   516  				Encryption:        segmentEncryption,
   517  				UploadResult:      uploadResults,
   518  			})
   519  		} else {
   520  			data, err := io.ReadAll(peekReader)
   521  			if err != nil {
   522  				return Part{}, errs.Wrap(err)
   523  			}
   524  
   525  			// if it's first segment then we still need to create is even
   526  			// if it's zero size because it can be last part while upload
   527  			// and we need to allow it for S3 compatibility
   528  			if len(data) > 0 || currentSegment == 0 {
   529  				lastSegmentContentKey = contentKey
   530  				cipherData, err := encryption.Encrypt(data, s.encryptionParameters.CipherSuite, &contentKey, &contentNonce)
   531  				if err != nil {
   532  					return Part{}, errs.Wrap(err)
   533  				}
   534  
   535  				requestsToBatch = append(requestsToBatch, &metaclient.MakeInlineSegmentParams{
   536  					StreamID: streamID,
   537  					Position: metaclient.SegmentPosition{
   538  						PartNumber: int32(partNumber),
   539  						Index:      int32(currentSegment),
   540  					},
   541  					Encryption:          segmentEncryption,
   542  					EncryptedInlineData: cipherData,
   543  					PlainSize:           int64(len(data)),
   544  				})
   545  			}
   546  		}
   547  		streamSize += sizeReader.Size()
   548  		currentSegment++
   549  	}
   550  
   551  	var eTag []byte
   552  	select {
   553  	case eTag = <-eTagCh:
   554  	case <-ctx.Done():
   555  		return Part{}, ctx.Err()
   556  	}
   557  
   558  	// store ETag only for last segment in a part
   559  	encryptedTag, err := encryptETag(eTag, s.encryptionParameters, &lastSegmentContentKey)
   560  	if err != nil {
   561  		return Part{}, errs.Wrap(err)
   562  	}
   563  	if len(requestsToBatch) > 0 {
   564  		// take last segment in a part and set ETag
   565  		switch singleRequest := requestsToBatch[len(requestsToBatch)-1].(type) {
   566  		case *metaclient.MakeInlineSegmentParams:
   567  			singleRequest.EncryptedTag = encryptedTag
   568  		case *metaclient.CommitSegmentParams:
   569  			singleRequest.EncryptedTag = encryptedTag
   570  		default:
   571  			return Part{}, errs.New("unsupported request type")
   572  		}
   573  		_, err = s.metainfo.Batch(ctx, requestsToBatch...)
   574  		if err != nil {
   575  			return Part{}, errs.Wrap(err)
   576  		}
   577  	}
   578  
   579  	return Part{
   580  		PartNumber: partNumber,
   581  		Size:       streamSize,
   582  		ETag:       eTag, // return plain ETag
   583  	}, nil
   584  }
   585  
   586  // TODO move it to separate package?
   587  func encryptETag(etag []byte, encryptionParameters storj.EncryptionParameters, contentKey *storj.Key) ([]byte, error) {
   588  	// Derive another key from the randomly generated content key to encrypt
   589  	// the segment's ETag.
   590  	etagKey, err := deriveETagKey(contentKey)
   591  	if err != nil {
   592  		return nil, err
   593  	}
   594  
   595  	encryptedETag, err := encryption.Encrypt(etag, encryptionParameters.CipherSuite, etagKey, &storj.Nonce{})
   596  	if err != nil {
   597  		return nil, err
   598  	}
   599  
   600  	return encryptedETag, nil
   601  }
   602  
   603  // TODO move it to separate package?
   604  func deriveETagKey(key *storj.Key) (*storj.Key, error) {
   605  	return encryption.DeriveKey(key, "storj-etag-v1")
   606  }
   607  
   608  // Get returns a ranger that knows what the overall size is (from l/<key>)
   609  // and then returns the appropriate data from segments s0/<key>, s1/<key>,
   610  // ..., l/<key>.
   611  func (s *Store) Get(ctx context.Context, bucket, unencryptedKey string, info metaclient.DownloadInfo, nextSegmentErrorDetection bool) (rr ranger.Ranger, err error) {
   612  	defer mon.Task()(&ctx)(&err)
   613  
   614  	object := info.Object
   615  	if object.Size == 0 {
   616  		return ranger.ByteRanger(nil), nil
   617  	}
   618  
   619  	derivedKey, err := encryption.DeriveContentKey(bucket, paths.NewUnencrypted(unencryptedKey), s.encStore)
   620  	if err != nil {
   621  		return nil, errs.Wrap(err)
   622  	}
   623  
   624  	// make copies of these slices so we aren't mutating data that was passed in
   625  	// to Get. even though info was passed by copy, the slices it contains weren't
   626  	// deep copied, so we'll copy them here and only use the copies below.
   627  	downloaded := slices.Clone(info.DownloadedSegments)
   628  	listed := slices.Clone(info.ListSegments.Items)
   629  
   630  	// calculate plain offset and plain size for migrated objects.
   631  	for i := 0; i < len(downloaded); i++ {
   632  		seg := &downloaded[i].Info
   633  		seg.PlainOffset, seg.PlainSize = calculatePlain(*seg.Position, seg.PlainOffset, seg.PlainSize, object)
   634  	}
   635  	for i := 0; i < len(listed); i++ {
   636  		seg := &listed[i]
   637  		seg.PlainOffset, seg.PlainSize = calculatePlain(seg.Position, seg.PlainOffset, seg.PlainSize, object)
   638  	}
   639  
   640  	// ensure that the items are correctly sorted
   641  	sort.Slice(downloaded, func(i, k int) bool {
   642  		return downloaded[i].Info.PlainOffset < downloaded[k].Info.PlainOffset
   643  	})
   644  	sort.Slice(listed, func(i, k int) bool {
   645  		return listed[i].PlainOffset < listed[k].PlainOffset
   646  	})
   647  
   648  	// calculate the offset for the range listed / downloaded
   649  	var offset int64
   650  	switch {
   651  	case len(downloaded) > 0 && len(listed) > 0:
   652  		if listed[0].PlainOffset < downloaded[0].Info.PlainOffset {
   653  			offset = listed[0].PlainOffset
   654  		} else {
   655  			offset = downloaded[0].Info.PlainOffset
   656  		}
   657  	case len(downloaded) > 0:
   658  		offset = downloaded[0].Info.PlainOffset
   659  	case len(listed) > 0:
   660  		offset = listed[0].PlainOffset
   661  	}
   662  
   663  	rangers := make([]ranger.Ranger, 0, len(downloaded)+len(listed)+2)
   664  
   665  	if offset > 0 {
   666  		rangers = append(rangers, &invalidRanger{size: offset})
   667  	}
   668  
   669  	for len(downloaded) > 0 || len(listed) > 0 {
   670  		switch {
   671  		case len(downloaded) > 0 && downloaded[0].Info.PlainOffset == offset:
   672  			segment := downloaded[0]
   673  			downloaded = downloaded[1:]
   674  
   675  			// drop any duplicate segment info in listing
   676  			for len(listed) > 0 && listed[0].PlainOffset == offset {
   677  				if listed[0].Position != *segment.Info.Position {
   678  					return nil, errs.New("segment info for download and list does not match: %v != %v", listed[0].Position, *segment.Info.Position)
   679  				}
   680  				listed = listed[1:]
   681  			}
   682  
   683  			encryptedRanger, err := s.Ranger(ctx, segment, nextSegmentErrorDetection)
   684  			if err != nil {
   685  				return nil, errs.Wrap(err)
   686  			}
   687  			nextSegmentErrorDetection = false
   688  
   689  			contentNonce, err := deriveContentNonce(*segment.Info.Position)
   690  			if err != nil {
   691  				return nil, errs.Wrap(err)
   692  			}
   693  
   694  			enc := segment.Info.SegmentEncryption
   695  			decrypted, err := decryptRanger(ctx, encryptedRanger, segment.Info.PlainSize, object.EncryptionParameters, derivedKey, enc.EncryptedKey, &enc.EncryptedKeyNonce, &contentNonce)
   696  			if err != nil {
   697  				return nil, errs.Wrap(err)
   698  			}
   699  
   700  			rangers = append(rangers, decrypted)
   701  			offset += segment.Info.PlainSize
   702  
   703  		case len(listed) > 0 && listed[0].PlainOffset == offset:
   704  			segment := listed[0]
   705  			listed = listed[1:]
   706  
   707  			contentNonce, err := deriveContentNonce(segment.Position)
   708  			if err != nil {
   709  				return nil, errs.Wrap(err)
   710  			}
   711  
   712  			rangers = append(rangers, &lazySegmentRanger{
   713  				metainfo:             s.metainfo,
   714  				streams:              s,
   715  				streamID:             object.ID,
   716  				position:             segment.Position,
   717  				plainSize:            segment.PlainSize,
   718  				derivedKey:           derivedKey,
   719  				startingNonce:        &contentNonce,
   720  				encryptionParameters: object.EncryptionParameters,
   721  				errorDetection:       nextSegmentErrorDetection,
   722  			})
   723  			offset += segment.PlainSize
   724  			nextSegmentErrorDetection = false
   725  
   726  		default:
   727  			return nil, errs.New("missing segment for offset %d", offset)
   728  		}
   729  	}
   730  
   731  	if offset < object.Size {
   732  		rangers = append(rangers, &invalidRanger{size: object.Size - offset})
   733  	}
   734  	if offset > object.Size {
   735  		return nil, errs.New("invalid final offset %d; expected %d", offset, object.Size)
   736  	}
   737  
   738  	return ranger.ConcatWithOpts(ranger.ConcatOpts{
   739  		Prefetch:                   true,
   740  		ForceReads:                 prefetchForceReads,
   741  		PrefetchWhenBytesRemaining: prefetchBytesRemaining,
   742  	}, rangers...), nil
   743  }
   744  
   745  var (
   746  	// EXPERIMENTAL VALUES
   747  	// TODO: once we understand the usefulness of these, we should expose useful
   748  	// values as real options.
   749  	prefetchForceReads, _     = strconv.ParseBool(os.Getenv("STORJ_EXP_UPLINK_DOWNLOAD_PREFETCH_FORCE_READS"))
   750  	prefetchBytesRemaining, _ = strconv.ParseInt(os.Getenv("STORJ_EXP_UPLINK_DOWNLOAD_PREFETCH_BYTES_REMAINING"), 0, 64)
   751  )
   752  
   753  func deriveContentNonce(pos metaclient.SegmentPosition) (storj.Nonce, error) {
   754  	// The increment by 1 is to avoid nonce reuse with the metadata encryption,
   755  	// which is encrypted with the zero nonce.
   756  	var n storj.Nonce
   757  	_, err := encryption.Increment(&n, int64(pos.PartNumber)<<32|(int64(pos.Index)+1))
   758  	return n, err
   759  }
   760  
   761  // calculatePlain calculates segment plain size, taking into account migrated objects.
   762  func calculatePlain(pos metaclient.SegmentPosition, rawOffset, rawSize int64, object metaclient.Object) (plainOffset, plainSize int64) {
   763  	switch {
   764  	case object.FixedSegmentSize <= 0:
   765  		// this is a multipart object and has correct offset and size.
   766  		return rawOffset, rawSize
   767  	case pos.PartNumber > 0:
   768  		// this case should be impossible, however let's return the initial values.
   769  		return rawOffset, rawSize
   770  	case pos.Index == int32(object.SegmentCount-1):
   771  		// this is a last segment
   772  		return int64(pos.Index) * object.FixedSegmentSize, object.LastSegment.Size
   773  	default:
   774  		// this is a fixed size segment
   775  		return int64(pos.Index) * object.FixedSegmentSize, object.FixedSegmentSize
   776  	}
   777  }
   778  
   779  type lazySegmentRanger struct {
   780  	ranger               ranger.Ranger
   781  	metainfo             *metaclient.Client
   782  	streams              *Store
   783  	streamID             storj.StreamID
   784  	position             metaclient.SegmentPosition
   785  	plainSize            int64
   786  	derivedKey           *storj.Key
   787  	startingNonce        *storj.Nonce
   788  	encryptionParameters storj.EncryptionParameters
   789  	errorDetection       bool
   790  }
   791  
   792  // Size implements Ranger.Size.
   793  func (lr *lazySegmentRanger) Size() int64 {
   794  	return lr.plainSize
   795  }
   796  
   797  // Range implements Ranger.Range to be lazily connected.
   798  func (lr *lazySegmentRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
   799  	defer mon.Task()(&ctx)(&err)
   800  
   801  	if lr.ranger == nil {
   802  		downloadResponse, err := lr.metainfo.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
   803  			StreamID: lr.streamID,
   804  			Position: metaclient.SegmentPosition{
   805  				PartNumber: lr.position.PartNumber,
   806  				Index:      lr.position.Index,
   807  			},
   808  		})
   809  		if err != nil {
   810  			return nil, err
   811  		}
   812  
   813  		rr, err := lr.streams.Ranger(ctx, downloadResponse, lr.errorDetection)
   814  		if err != nil {
   815  			return nil, err
   816  		}
   817  
   818  		encryptedKey, keyNonce := downloadResponse.Info.SegmentEncryption.EncryptedKey, downloadResponse.Info.SegmentEncryption.EncryptedKeyNonce
   819  		lr.ranger, err = decryptRanger(ctx, rr, lr.plainSize, lr.encryptionParameters, lr.derivedKey, encryptedKey, &keyNonce, lr.startingNonce)
   820  		if err != nil {
   821  			return nil, err
   822  		}
   823  	}
   824  	return lr.ranger.Range(ctx, offset, length)
   825  }
   826  
   827  // decryptRanger returns a decrypted ranger of the given rr ranger.
   828  func decryptRanger(ctx context.Context, rr ranger.Ranger, plainSize int64, encryptionParameters storj.EncryptionParameters, derivedKey *storj.Key, encryptedKey storj.EncryptedPrivateKey, encryptedKeyNonce, startingNonce *storj.Nonce) (decrypted ranger.Ranger, err error) {
   829  	defer mon.Task()(&ctx)(&err)
   830  	contentKey, err := encryption.DecryptKey(encryptedKey, encryptionParameters.CipherSuite, derivedKey, encryptedKeyNonce)
   831  	if err != nil {
   832  		return nil, err
   833  	}
   834  
   835  	decrypter, err := encryption.NewDecrypter(encryptionParameters.CipherSuite, contentKey, startingNonce, int(encryptionParameters.BlockSize))
   836  	if err != nil {
   837  		return nil, err
   838  	}
   839  
   840  	var rd ranger.Ranger
   841  	if rr.Size()%int64(decrypter.InBlockSize()) != 0 {
   842  		reader, err := rr.Range(ctx, 0, rr.Size())
   843  		if err != nil {
   844  			return nil, err
   845  		}
   846  		defer func() { err = errs.Combine(err, reader.Close()) }()
   847  		cipherData, err := io.ReadAll(reader)
   848  		if err != nil {
   849  			return nil, err
   850  		}
   851  		data, err := encryption.Decrypt(cipherData, encryptionParameters.CipherSuite, contentKey, startingNonce)
   852  		if err != nil {
   853  			return nil, err
   854  		}
   855  		return ranger.ByteRanger(data), nil
   856  	}
   857  
   858  	rd, err = encryption.Transform(rr, decrypter)
   859  	if err != nil {
   860  		return nil, err
   861  	}
   862  	return encryption.Unpad(rd, int(rd.Size()-plainSize))
   863  }
   864  
   865  // deleteCancelledObject handles clean up of segments on receiving CTRL+C or context cancellation.
   866  func (s *Store) deleteCancelledObject(ctx context.Context, bucketName, encryptedObjectKey string, streamID storj.StreamID) {
   867  	var err error
   868  	defer mon.Task()(&ctx)(&err)
   869  
   870  	_, err = s.metainfo.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{
   871  		Bucket:             []byte(bucketName),
   872  		EncryptedObjectKey: []byte(encryptedObjectKey),
   873  		StreamID:           streamID,
   874  		Status:             int32(pb.Object_UPLOADING),
   875  	})
   876  	if err != nil {
   877  		mon.Event("failed to delete cancelled object")
   878  	}
   879  }
   880  
   881  // Ranger creates a ranger for downloading erasure codes from piece store nodes.
   882  func (s *Store) Ranger(ctx context.Context, response metaclient.DownloadSegmentWithRSResponse, errorDetection bool) (rr ranger.Ranger, err error) {
   883  	info := response.Info
   884  	limits := response.Limits
   885  
   886  	defer mon.Task()(&ctx, info, limits, info.RedundancyScheme)(&err)
   887  
   888  	// no order limits also means its inline segment
   889  	if len(info.EncryptedInlineData) != 0 || len(limits) == 0 {
   890  		return ranger.ByteRanger(info.EncryptedInlineData), nil
   891  	}
   892  
   893  	redundancy, err := eestream.NewRedundancyStrategyFromStorj(info.RedundancyScheme)
   894  	if err != nil {
   895  		return nil, err
   896  	}
   897  
   898  	rr, err = s.ec.GetWithOptions(ctx, limits, info.PiecePrivateKey, redundancy, info.EncryptedSize, ecclient.GetOptions{ErrorDetection: errorDetection})
   899  	return rr, err
   900  }
   901  
   902  // invalidRanger is used to mark a range as invalid.
   903  type invalidRanger struct {
   904  	size int64
   905  }
   906  
   907  func (d *invalidRanger) Size() int64 { return d.size }
   908  
   909  func (d *invalidRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
   910  	if offset < 0 {
   911  		return nil, errs.New("negative offset")
   912  	}
   913  	if length < 0 {
   914  		return nil, errs.New("negative length")
   915  	}
   916  	// allow reading zero bytes from an invalid range.
   917  	if 0 <= offset && offset <= d.size && length == 0 {
   918  		return emptyReader{}, nil
   919  	}
   920  	return nil, errs.New("invalid range %d:%d (size:%d)", offset, length, d.size)
   921  }
   922  
   923  // emptyReader is used to read no data.
   924  type emptyReader struct{}
   925  
   926  func (emptyReader) Read(data []byte) (n int, err error) { return 0, io.EOF }
   927  
   928  func (emptyReader) Close() error { return nil }