storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/object-api-utils.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2015-2019 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package cmd
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/hex"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"math/rand"
    27  	"net/http"
    28  	"path"
    29  	"runtime"
    30  	"strconv"
    31  	"strings"
    32  	"sync"
    33  	"unicode/utf8"
    34  
    35  	"github.com/google/uuid"
    36  	"github.com/klauspost/compress/s2"
    37  	"github.com/klauspost/readahead"
    38  	"github.com/minio/minio-go/v7/pkg/s3utils"
    39  
    40  	"storj.io/minio/cmd/config/compress"
    41  	"storj.io/minio/cmd/config/storageclass"
    42  	"storj.io/minio/cmd/crypto"
    43  	xhttp "storj.io/minio/cmd/http"
    44  	"storj.io/minio/cmd/logger"
    45  	"storj.io/minio/pkg/bucket/lifecycle"
    46  	"storj.io/minio/pkg/hash"
    47  	"storj.io/minio/pkg/ioutil"
    48  	"storj.io/minio/pkg/trie"
    49  	"storj.io/minio/pkg/wildcard"
    50  )
    51  
    52  const (
    53  	// MinIO meta bucket.
    54  	minioMetaBucket = ".minio.sys"
    55  	// Multipart meta prefix.
    56  	mpartMetaPrefix = "multipart"
    57  	// MinIO Multipart meta prefix.
    58  	minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix
    59  	// MinIO tmp meta prefix.
    60  	minioMetaTmpBucket = minioMetaBucket + "/tmp"
    61  	// MinIO tmp meta prefix for deleted objects.
    62  	minioMetaTmpDeletedBucket = minioMetaTmpBucket + "/.trash"
    63  
    64  	// DNS separator (period), used for bucket name validation.
    65  	dnsDelimiter = "."
    66  	// On compressed files bigger than this;
    67  	compReadAheadSize = 100 << 20
    68  	// Read this many buffers ahead.
    69  	compReadAheadBuffers = 5
    70  	// Size of each buffer.
    71  	compReadAheadBufSize = 1 << 20
    72  )
    73  
    74  // isMinioBucket returns true if given bucket is a MinIO internal
    75  // bucket and false otherwise.
    76  func isMinioMetaBucketName(bucket string) bool {
    77  	return bucket == minioMetaBucket ||
    78  		bucket == minioMetaMultipartBucket ||
    79  		bucket == minioMetaTmpBucket ||
    80  		bucket == dataUsageBucket
    81  }
    82  
    83  // IsValidBucketName verifies that a bucket name is in accordance with
    84  // Amazon's requirements (i.e. DNS naming conventions). It must be 3-63
    85  // characters long, and it must be a sequence of one or more labels
    86  // separated by periods. Each label can contain lowercase ascii
    87  // letters, decimal digits and hyphens, but must not begin or end with
    88  // a hyphen. See:
    89  // http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
    90  func IsValidBucketName(bucket string) bool {
    91  	// Special case when bucket is equal to one of the meta buckets.
    92  	if isMinioMetaBucketName(bucket) {
    93  		return true
    94  	}
    95  	if len(bucket) < 3 || len(bucket) > 63 {
    96  		return false
    97  	}
    98  
    99  	// Split on dot and check each piece conforms to rules.
   100  	allNumbers := true
   101  	pieces := strings.Split(bucket, dnsDelimiter)
   102  	for _, piece := range pieces {
   103  		if len(piece) == 0 || piece[0] == '-' ||
   104  			piece[len(piece)-1] == '-' {
   105  			// Current piece has 0-length or starts or
   106  			// ends with a hyphen.
   107  			return false
   108  		}
   109  		// Now only need to check if each piece is a valid
   110  		// 'label' in AWS terminology and if the bucket looks
   111  		// like an IP address.
   112  		isNotNumber := false
   113  		for i := 0; i < len(piece); i++ {
   114  			switch {
   115  			case (piece[i] >= 'a' && piece[i] <= 'z' ||
   116  				piece[i] == '-'):
   117  				// Found a non-digit character, so
   118  				// this piece is not a number.
   119  				isNotNumber = true
   120  			case piece[i] >= '0' && piece[i] <= '9':
   121  				// Nothing to do.
   122  			default:
   123  				// Found invalid character.
   124  				return false
   125  			}
   126  		}
   127  		allNumbers = allNumbers && !isNotNumber
   128  	}
   129  	// Does the bucket name look like an IP address?
   130  	return !(len(pieces) == 4 && allNumbers)
   131  }
   132  
   133  // IsValidObjectName verifies an object name in accordance with Amazon's
   134  // requirements. It cannot exceed 1024 characters and must be a valid UTF8
   135  // string.
   136  //
   137  // See:
   138  // http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
   139  //
   140  // You should avoid the following characters in a key name because of
   141  // significant special handling for consistency across all
   142  // applications.
   143  //
   144  // Rejects strings with following characters.
   145  //
   146  // - Backslash ("\")
   147  //
   148  // additionally minio does not support object names with trailing SlashSeparator.
   149  func IsValidObjectName(object string) bool {
   150  	if len(object) == 0 {
   151  		return false
   152  	}
   153  	if HasSuffix(object, SlashSeparator) {
   154  		return false
   155  	}
   156  	return IsValidObjectPrefix(object)
   157  }
   158  
   159  // IsValidObjectPrefix verifies whether the prefix is a valid object name.
   160  // Its valid to have a empty prefix.
   161  func IsValidObjectPrefix(object string) bool {
   162  	if hasBadPathComponent(object) {
   163  		return false
   164  	}
   165  	if !utf8.ValidString(object) {
   166  		return false
   167  	}
   168  	if strings.Contains(object, `//`) {
   169  		return false
   170  	}
   171  	return true
   172  }
   173  
   174  // checkObjectNameForLengthAndSlash -check for the validity of object name length and prefis as slash
   175  func checkObjectNameForLengthAndSlash(bucket, object string) error {
   176  	// Check for the length of object name
   177  	if len(object) > 1024 {
   178  		return ObjectNameTooLong{
   179  			Bucket: bucket,
   180  			Object: object,
   181  		}
   182  	}
   183  	// Check for slash as prefix in object name
   184  	if HasPrefix(object, SlashSeparator) {
   185  		return ObjectNamePrefixAsSlash{
   186  			Bucket: bucket,
   187  			Object: object,
   188  		}
   189  	}
   190  	if runtime.GOOS == globalWindowsOSName {
   191  		// Explicitly disallowed characters on windows.
   192  		// Avoids most problematic names.
   193  		if strings.ContainsAny(object, `:*?"|<>`) {
   194  			return ObjectNameInvalid{
   195  				Bucket: bucket,
   196  				Object: object,
   197  			}
   198  		}
   199  	}
   200  	return nil
   201  }
   202  
   203  // SlashSeparator - slash separator.
   204  const SlashSeparator = "/"
   205  
   206  // retainSlash - retains slash from a path.
   207  func retainSlash(s string) string {
   208  	if s == "" {
   209  		return s
   210  	}
   211  	return strings.TrimSuffix(s, SlashSeparator) + SlashSeparator
   212  }
   213  
   214  // pathsJoinPrefix - like pathJoin retains trailing SlashSeparator
   215  // for all elements, prepends them with 'prefix' respectively.
   216  func pathsJoinPrefix(prefix string, elem ...string) (paths []string) {
   217  	paths = make([]string, len(elem))
   218  	for i, e := range elem {
   219  		paths[i] = pathJoin(prefix, e)
   220  	}
   221  	return paths
   222  }
   223  
   224  // pathJoin - like path.Join() but retains trailing SlashSeparator of the last element
   225  func pathJoin(elem ...string) string {
   226  	trailingSlash := ""
   227  	if len(elem) > 0 {
   228  		if HasSuffix(elem[len(elem)-1], SlashSeparator) {
   229  			trailingSlash = SlashSeparator
   230  		}
   231  	}
   232  	return path.Join(elem...) + trailingSlash
   233  }
   234  
   235  // mustGetUUID - get a random UUID.
   236  func mustGetUUID() string {
   237  	u, err := uuid.NewRandom()
   238  	if err != nil {
   239  		logger.CriticalIf(GlobalContext, err)
   240  	}
   241  
   242  	return u.String()
   243  }
   244  
   245  // Create an s3 compatible MD5sum for complete multipart transaction.
   246  func getCompleteMultipartMD5(parts []CompletePart) string {
   247  	var finalMD5Bytes []byte
   248  	for _, part := range parts {
   249  		md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag))
   250  		if err != nil {
   251  			finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...)
   252  		} else {
   253  			finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
   254  		}
   255  	}
   256  	s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts))
   257  	return s3MD5
   258  }
   259  
   260  // Clean unwanted fields from metadata
   261  func cleanMetadata(metadata map[string]string) map[string]string {
   262  	// Remove STANDARD StorageClass
   263  	metadata = removeStandardStorageClass(metadata)
   264  	// Clean meta etag keys 'md5Sum', 'etag', "expires", "x-amz-tagging".
   265  	return cleanMetadataKeys(metadata, "md5Sum", "etag", "expires", xhttp.AmzObjectTagging, "last-modified")
   266  }
   267  
   268  // Filter X-Amz-Storage-Class field only if it is set to STANDARD.
   269  // This is done since AWS S3 doesn't return STANDARD Storage class as response header.
   270  func removeStandardStorageClass(metadata map[string]string) map[string]string {
   271  	if metadata[xhttp.AmzStorageClass] == storageclass.STANDARD {
   272  		delete(metadata, xhttp.AmzStorageClass)
   273  	}
   274  	return metadata
   275  }
   276  
   277  // cleanMetadataKeys takes keyNames to be filtered
   278  // and returns a new map with all the entries with keyNames removed.
   279  func cleanMetadataKeys(metadata map[string]string, keyNames ...string) map[string]string {
   280  	var newMeta = make(map[string]string, len(metadata))
   281  	for k, v := range metadata {
   282  		if contains(keyNames, k) {
   283  			continue
   284  		}
   285  		newMeta[k] = v
   286  	}
   287  	return newMeta
   288  }
   289  
   290  // Extracts etag value from the metadata.
   291  func extractETag(metadata map[string]string) string {
   292  	// md5Sum tag is kept for backward compatibility.
   293  	etag, ok := metadata["md5Sum"]
   294  	if !ok {
   295  		etag = metadata["etag"]
   296  	}
   297  	// Success.
   298  	return etag
   299  }
   300  
   301  // HasPrefix - Prefix matcher string matches prefix in a platform specific way.
   302  // For example on windows since its case insensitive we are supposed
   303  // to do case insensitive checks.
   304  func HasPrefix(s string, prefix string) bool {
   305  	if runtime.GOOS == globalWindowsOSName {
   306  		return strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix))
   307  	}
   308  	return strings.HasPrefix(s, prefix)
   309  }
   310  
   311  // HasSuffix - Suffix matcher string matches suffix in a platform specific way.
   312  // For example on windows since its case insensitive we are supposed
   313  // to do case insensitive checks.
   314  func HasSuffix(s string, suffix string) bool {
   315  	if runtime.GOOS == globalWindowsOSName {
   316  		return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix))
   317  	}
   318  	return strings.HasSuffix(s, suffix)
   319  }
   320  
   321  // Validates if two strings are equal.
   322  func isStringEqual(s1 string, s2 string) bool {
   323  	if runtime.GOOS == globalWindowsOSName {
   324  		return strings.EqualFold(s1, s2)
   325  	}
   326  	return s1 == s2
   327  }
   328  
   329  // Ignores all reserved bucket names or invalid bucket names.
   330  func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
   331  	if bucketEntry == "" {
   332  		return true
   333  	}
   334  
   335  	bucketEntry = strings.TrimSuffix(bucketEntry, SlashSeparator)
   336  	if strict {
   337  		if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
   338  			return true
   339  		}
   340  	} else {
   341  		if err := s3utils.CheckValidBucketName(bucketEntry); err != nil {
   342  			return true
   343  		}
   344  	}
   345  	return isMinioMetaBucket(bucketEntry) || isMinioReservedBucket(bucketEntry)
   346  }
   347  
   348  // Returns true if input bucket is a reserved minio meta bucket '.minio.sys'.
   349  func isMinioMetaBucket(bucketName string) bool {
   350  	return bucketName == minioMetaBucket
   351  }
   352  
   353  // Returns true if input bucket is a reserved minio bucket 'minio'.
   354  func isMinioReservedBucket(bucketName string) bool {
   355  	return bucketName == minioReservedBucket
   356  }
   357  
   358  // IsCompressed returns true if the object is marked as compressed.
   359  func (o ObjectInfo) IsCompressed() bool {
   360  	_, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
   361  	return ok
   362  }
   363  
   364  // IsCompressedOK returns whether the object is compressed and can be decompressed.
   365  func (o ObjectInfo) IsCompressedOK() (bool, error) {
   366  	scheme, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
   367  	if !ok {
   368  		return false, nil
   369  	}
   370  	switch scheme {
   371  	case compressionAlgorithmV1, compressionAlgorithmV2:
   372  		return true, nil
   373  	}
   374  	return true, fmt.Errorf("unknown compression scheme: %s", scheme)
   375  }
   376  
   377  // GetActualETag - returns the actual etag of the stored object
   378  // decrypts SSE objects.
   379  func (o ObjectInfo) GetActualETag(h http.Header) string {
   380  	if _, ok := crypto.IsEncrypted(o.UserDefined); !ok {
   381  		return o.ETag
   382  	}
   383  	return getDecryptedETag(h, o, false)
   384  }
   385  
   386  // GetActualSize - returns the actual size of the stored object
   387  func (o ObjectInfo) GetActualSize() (int64, error) {
   388  	if o.IsCompressed() {
   389  		sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"]
   390  		if !ok {
   391  			return -1, errInvalidDecompressedSize
   392  		}
   393  		size, err := strconv.ParseInt(sizeStr, 10, 64)
   394  		if err != nil {
   395  			return -1, errInvalidDecompressedSize
   396  		}
   397  		return size, nil
   398  	}
   399  	if _, ok := crypto.IsEncrypted(o.UserDefined); ok {
   400  		return o.DecryptedSize()
   401  	}
   402  
   403  	return o.Size, nil
   404  }
   405  
   406  // Disabling compression for encrypted enabled requests.
   407  // Using compression and encryption together enables room for side channel attacks.
   408  // Eliminate non-compressible objects by extensions/content-types.
   409  func isCompressible(header http.Header, object string) bool {
   410  	globalCompressConfigMu.Lock()
   411  	cfg := globalCompressConfig
   412  	globalCompressConfigMu.Unlock()
   413  
   414  	_, ok := crypto.IsRequested(header)
   415  	if !cfg.Enabled || (ok && !cfg.AllowEncrypted) || excludeForCompression(header, object, cfg) {
   416  		return false
   417  	}
   418  	return true
   419  }
   420  
   421  // Eliminate the non-compressible objects.
   422  func excludeForCompression(header http.Header, object string, cfg compress.Config) bool {
   423  	objStr := object
   424  	contentType := header.Get(xhttp.ContentType)
   425  	if !cfg.Enabled {
   426  		return true
   427  	}
   428  
   429  	// We strictly disable compression for standard extensions/content-types (`compressed`).
   430  	if hasStringSuffixInSlice(objStr, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, contentType) {
   431  		return true
   432  	}
   433  
   434  	// Filter compression includes.
   435  	exclude := len(cfg.Extensions) > 0 || len(cfg.MimeTypes) > 0
   436  	if len(cfg.Extensions) > 0 && hasStringSuffixInSlice(objStr, cfg.Extensions) {
   437  		exclude = false
   438  	}
   439  
   440  	if len(cfg.MimeTypes) > 0 && hasPattern(cfg.MimeTypes, contentType) {
   441  		exclude = false
   442  	}
   443  	return exclude
   444  }
   445  
   446  // Utility which returns if a string is present in the list.
   447  // Comparison is case insensitive.
   448  func hasStringSuffixInSlice(str string, list []string) bool {
   449  	str = strings.ToLower(str)
   450  	for _, v := range list {
   451  		if strings.HasSuffix(str, strings.ToLower(v)) {
   452  			return true
   453  		}
   454  	}
   455  	return false
   456  }
   457  
   458  // Returns true if any of the given wildcard patterns match the matchStr.
   459  func hasPattern(patterns []string, matchStr string) bool {
   460  	for _, pattern := range patterns {
   461  		if ok := wildcard.MatchSimple(pattern, matchStr); ok {
   462  			return true
   463  		}
   464  	}
   465  	return false
   466  }
   467  
   468  // Returns the part file name which matches the partNumber and etag.
   469  func getPartFile(entriesTrie *trie.Trie, partNumber int, etag string) (partFile string) {
   470  	for _, match := range entriesTrie.PrefixMatch(fmt.Sprintf("%.5d.%s.", partNumber, etag)) {
   471  		partFile = match
   472  		break
   473  	}
   474  	return partFile
   475  }
   476  
   477  func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec {
   478  	if oi.Size == 0 || len(oi.Parts) == 0 {
   479  		return nil
   480  	}
   481  
   482  	var start int64
   483  	var end = int64(-1)
   484  	for i := 0; i < len(oi.Parts) && i < partNumber; i++ {
   485  		start = end + 1
   486  		end = start + oi.Parts[i].ActualSize - 1
   487  	}
   488  
   489  	return &HTTPRangeSpec{Start: start, End: end}
   490  }
   491  
   492  // Returns the compressed offset which should be skipped.
   493  // If encrypted offsets are adjusted for encrypted block headers/trailers.
   494  // Since de-compression is after decryption encryption overhead is only added to compressedOffset.
   495  func getCompressedOffsets(objectInfo ObjectInfo, offset int64) (compressedOffset int64, partSkip int64, firstPart int) {
   496  	var skipLength int64
   497  	var cumulativeActualSize int64
   498  	var firstPartIdx int
   499  	if len(objectInfo.Parts) > 0 {
   500  		for i, part := range objectInfo.Parts {
   501  			cumulativeActualSize += part.ActualSize
   502  			if cumulativeActualSize <= offset {
   503  				compressedOffset += part.Size
   504  			} else {
   505  				firstPartIdx = i
   506  				skipLength = cumulativeActualSize - part.ActualSize
   507  				break
   508  			}
   509  		}
   510  	}
   511  
   512  	if isEncryptedMultipart(objectInfo) && firstPartIdx > 0 {
   513  		off, _, _, _, _, err := objectInfo.GetDecryptedRange(partNumberToRangeSpec(objectInfo, firstPartIdx))
   514  		logger.LogIf(context.Background(), err)
   515  		compressedOffset += off
   516  	}
   517  	return compressedOffset, offset - skipLength, firstPartIdx
   518  }
   519  
   520  // GetObjectReader is a type that wraps a reader with a lock to
   521  // provide a ReadCloser interface that unlocks on Close()
   522  type GetObjectReader struct {
   523  	ObjInfo ObjectInfo
   524  	pReader io.Reader
   525  
   526  	cleanUpFns []func()
   527  	opts       ObjectOptions
   528  	once       sync.Once
   529  }
   530  
   531  // NewGetObjectReaderFromReader sets up a GetObjectReader with a given
   532  // reader. This ignores any object properties.
   533  func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
   534  	if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
   535  		// Call the cleanup funcs
   536  		for i := len(cleanupFns) - 1; i >= 0; i-- {
   537  			cleanupFns[i]()
   538  		}
   539  		return nil, PreConditionFailed{}
   540  	}
   541  	return &GetObjectReader{
   542  		ObjInfo:    oi,
   543  		pReader:    r,
   544  		cleanUpFns: cleanupFns,
   545  		opts:       opts,
   546  	}, nil
   547  }
   548  
   549  // ObjReaderFn is a function type that takes a reader and returns
   550  // GetObjectReader and an error. Request headers are passed to provide
   551  // encryption parameters. cleanupFns allow cleanup funcs to be
   552  // registered for calling after usage of the reader.
   553  type ObjReaderFn func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cleanupFns ...func()) (r *GetObjectReader, err error)
   554  
   555  // NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
   556  // are called on Close() in reverse order as passed here. NOTE: It is
   557  // assumed that clean up functions do not panic (otherwise, they may
   558  // not all run!).
   559  func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, cleanUpFns ...func()) (
   560  	fn ObjReaderFn, off, length int64, err error) {
   561  
   562  	if rs == nil && opts.PartNumber > 0 {
   563  		rs = partNumberToRangeSpec(oi, opts.PartNumber)
   564  	}
   565  
   566  	// Call the clean-up functions immediately in case of exit
   567  	// with error
   568  	defer func() {
   569  		if err != nil {
   570  			for i := len(cleanUpFns) - 1; i >= 0; i-- {
   571  				cleanUpFns[i]()
   572  			}
   573  		}
   574  	}()
   575  
   576  	_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
   577  	isCompressed, err := oi.IsCompressedOK()
   578  	if err != nil {
   579  		return nil, 0, 0, err
   580  	}
   581  
   582  	// if object is encrypted, transition content without decrypting.
   583  	if opts.TransitionStatus == lifecycle.TransitionPending && (isEncrypted || isCompressed) {
   584  		isEncrypted = false
   585  		isCompressed = false
   586  	}
   587  
   588  	// Calculate range to read (different for encrypted/compressed objects)
   589  	switch {
   590  	case isCompressed:
   591  		var firstPart int
   592  		if opts.PartNumber > 0 {
   593  			// firstPart is an index to Parts slice,
   594  			// make sure that PartNumber uses the
   595  			// index value properly.
   596  			firstPart = opts.PartNumber - 1
   597  		}
   598  
   599  		// If compressed, we start from the beginning of the part.
   600  		// Read the decompressed size from the meta.json.
   601  		actualSize, err := oi.GetActualSize()
   602  		if err != nil {
   603  			return nil, 0, 0, err
   604  		}
   605  		off, length = int64(0), oi.Size
   606  		decOff, decLength := int64(0), actualSize
   607  		if rs != nil {
   608  			off, length, err = rs.GetOffsetLength(actualSize)
   609  			if err != nil {
   610  				return nil, 0, 0, err
   611  			}
   612  			// In case of range based queries on multiparts, the offset and length are reduced.
   613  			off, decOff, firstPart = getCompressedOffsets(oi, off)
   614  			decLength = length
   615  			length = oi.Size - off
   616  			// For negative length we read everything.
   617  			if decLength < 0 {
   618  				decLength = actualSize - decOff
   619  			}
   620  
   621  			// Reply back invalid range if the input offset and length fall out of range.
   622  			if decOff > actualSize || decOff+decLength > actualSize {
   623  				return nil, 0, 0, errInvalidRange
   624  			}
   625  		}
   626  		fn = func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
   627  			cFns = append(cleanUpFns, cFns...)
   628  			if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
   629  				// Call the cleanup funcs
   630  				for i := len(cFns) - 1; i >= 0; i-- {
   631  					cFns[i]()
   632  				}
   633  				return nil, PreConditionFailed{}
   634  			}
   635  			if isEncrypted {
   636  				copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
   637  				// Attach decrypter on inputReader
   638  				inputReader, err = DecryptBlocksRequestR(inputReader, h, 0, firstPart, oi, copySource)
   639  				if err != nil {
   640  					// Call the cleanup funcs
   641  					for i := len(cFns) - 1; i >= 0; i-- {
   642  						cFns[i]()
   643  					}
   644  					return nil, err
   645  				}
   646  				oi.Size = decLength
   647  			}
   648  			// Decompression reader.
   649  			s2Reader := s2.NewReader(inputReader)
   650  			// Apply the skipLen and limit on the decompressed stream.
   651  			if decOff > 0 {
   652  				if err = s2Reader.Skip(decOff); err != nil {
   653  					// Call the cleanup funcs
   654  					for i := len(cFns) - 1; i >= 0; i-- {
   655  						cFns[i]()
   656  					}
   657  					return nil, err
   658  				}
   659  			}
   660  
   661  			decReader := io.LimitReader(s2Reader, decLength)
   662  			if decLength > compReadAheadSize {
   663  				rah, err := readahead.NewReaderSize(decReader, compReadAheadBuffers, compReadAheadBufSize)
   664  				if err == nil {
   665  					decReader = rah
   666  					cFns = append(cFns, func() {
   667  						rah.Close()
   668  					})
   669  				}
   670  			}
   671  			oi.Size = decLength
   672  
   673  			// Assemble the GetObjectReader
   674  			r = &GetObjectReader{
   675  				ObjInfo:    oi,
   676  				pReader:    decReader,
   677  				cleanUpFns: cFns,
   678  				opts:       opts,
   679  			}
   680  			return r, nil
   681  		}
   682  
   683  	case isEncrypted:
   684  		var seqNumber uint32
   685  		var partStart int
   686  		var skipLen int64
   687  
   688  		off, length, skipLen, seqNumber, partStart, err = oi.GetDecryptedRange(rs)
   689  		if err != nil {
   690  			return nil, 0, 0, err
   691  		}
   692  		var decSize int64
   693  		decSize, err = oi.DecryptedSize()
   694  		if err != nil {
   695  			return nil, 0, 0, err
   696  		}
   697  		var decRangeLength int64
   698  		decRangeLength, err = rs.GetLength(decSize)
   699  		if err != nil {
   700  			return nil, 0, 0, err
   701  		}
   702  
   703  		// We define a closure that performs decryption given
   704  		// a reader that returns the desired range of
   705  		// encrypted bytes. The header parameter is used to
   706  		// provide encryption parameters.
   707  		fn = func(inputReader io.Reader, h http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
   708  			copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
   709  
   710  			cFns = append(cleanUpFns, cFns...)
   711  			// Attach decrypter on inputReader
   712  			var decReader io.Reader
   713  			decReader, err = DecryptBlocksRequestR(inputReader, h, seqNumber, partStart, oi, copySource)
   714  			if err != nil {
   715  				// Call the cleanup funcs
   716  				for i := len(cFns) - 1; i >= 0; i-- {
   717  					cFns[i]()
   718  				}
   719  				return nil, err
   720  			}
   721  
   722  			if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
   723  				// Call the cleanup funcs
   724  				for i := len(cFns) - 1; i >= 0; i-- {
   725  					cFns[i]()
   726  				}
   727  				return nil, PreConditionFailed{}
   728  			}
   729  
   730  			oi.ETag = getDecryptedETag(h, oi, false)
   731  
   732  			// Apply the skipLen and limit on the
   733  			// decrypted stream
   734  			decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength)
   735  
   736  			// Assemble the GetObjectReader
   737  			r = &GetObjectReader{
   738  				ObjInfo:    oi,
   739  				pReader:    decReader,
   740  				cleanUpFns: cFns,
   741  				opts:       opts,
   742  			}
   743  			return r, nil
   744  		}
   745  
   746  	default:
   747  		off, length, err = rs.GetOffsetLength(oi.Size)
   748  		if err != nil {
   749  			return nil, 0, 0, err
   750  		}
   751  		fn = func(inputReader io.Reader, _ http.Header, pcfn CheckPreconditionFn, cFns ...func()) (r *GetObjectReader, err error) {
   752  			cFns = append(cleanUpFns, cFns...)
   753  			if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
   754  				// Call the cleanup funcs
   755  				for i := len(cFns) - 1; i >= 0; i-- {
   756  					cFns[i]()
   757  				}
   758  				return nil, PreConditionFailed{}
   759  			}
   760  			r = &GetObjectReader{
   761  				ObjInfo:    oi,
   762  				pReader:    inputReader,
   763  				cleanUpFns: cFns,
   764  				opts:       opts,
   765  			}
   766  			return r, nil
   767  		}
   768  	}
   769  	return fn, off, length, nil
   770  }
   771  
   772  // Close - calls the cleanup actions in reverse order
   773  func (g *GetObjectReader) Close() error {
   774  	// sync.Once is used here to ensure that Close() is
   775  	// idempotent.
   776  	g.once.Do(func() {
   777  		for i := len(g.cleanUpFns) - 1; i >= 0; i-- {
   778  			g.cleanUpFns[i]()
   779  		}
   780  	})
   781  	return nil
   782  }
   783  
   784  // Read - to implement Reader interface.
   785  func (g *GetObjectReader) Read(p []byte) (n int, err error) {
   786  	return g.pReader.Read(p)
   787  }
   788  
   789  //SealMD5CurrFn seals md5sum with object encryption key and returns sealed
   790  // md5sum
   791  type SealMD5CurrFn func([]byte) []byte
   792  
   793  // PutObjReader is a type that wraps sio.EncryptReader and
   794  // underlying hash.Reader in a struct
   795  type PutObjReader struct {
   796  	*hash.Reader              // actual data stream
   797  	rawReader    *hash.Reader // original data stream
   798  	sealMD5Fn    SealMD5CurrFn
   799  }
   800  
   801  // Size returns the absolute number of bytes the Reader
   802  // will return during reading. It returns -1 for unlimited
   803  // data.
   804  func (p *PutObjReader) Size() int64 {
   805  	return p.Reader.Size()
   806  }
   807  
   808  // MD5CurrentHexString returns the current MD5Sum or encrypted MD5Sum
   809  // as a hex encoded string
   810  func (p *PutObjReader) MD5CurrentHexString() string {
   811  	md5sumCurr := p.rawReader.MD5Current()
   812  	var appendHyphen bool
   813  	// md5sumcurr is not empty in two scenarios
   814  	// - server is running in strict compatibility mode
   815  	// - client set Content-Md5 during PUT operation
   816  	if len(md5sumCurr) == 0 {
   817  		// md5sumCurr is only empty when we are running
   818  		// in non-compatibility mode.
   819  		md5sumCurr = make([]byte, 16)
   820  		rand.Read(md5sumCurr)
   821  		appendHyphen = true
   822  	}
   823  	if p.sealMD5Fn != nil {
   824  		md5sumCurr = p.sealMD5Fn(md5sumCurr)
   825  	}
   826  	if appendHyphen {
   827  		// Make sure to return etag string upto 32 length, for SSE
   828  		// requests ETag might be longer and the code decrypting the
   829  		// ETag ignores ETag in multipart ETag form i.e <hex>-N
   830  		return hex.EncodeToString(md5sumCurr)[:32] + "-1"
   831  	}
   832  	return hex.EncodeToString(md5sumCurr)
   833  }
   834  
   835  // WithEncryption sets up encrypted reader and the sealing for content md5sum
   836  // using objEncKey. Unsealed md5sum is computed from the rawReader setup when
   837  // NewPutObjReader was called. It returns an error if called on an uninitialized
   838  // PutObjReader.
   839  func (p *PutObjReader) WithEncryption(encReader *hash.Reader, objEncKey *crypto.ObjectKey) (*PutObjReader, error) {
   840  	if p.Reader == nil {
   841  		return nil, errors.New("put-object reader uninitialized")
   842  	}
   843  	p.Reader = encReader
   844  	p.sealMD5Fn = sealETagFn(*objEncKey)
   845  	return p, nil
   846  }
   847  
   848  // NewPutObjReader returns a new PutObjReader. It uses given hash.Reader's
   849  // MD5Current method to construct md5sum when requested downstream.
   850  func NewPutObjReader(rawReader *hash.Reader) *PutObjReader {
   851  	return &PutObjReader{Reader: rawReader, rawReader: rawReader}
   852  }
   853  
   854  func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte {
   855  	var emptyKey [32]byte
   856  	if bytes.Equal(encKey[:], emptyKey[:]) {
   857  		return md5CurrSum
   858  	}
   859  	return encKey.SealETag(md5CurrSum)
   860  }
   861  
   862  func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn {
   863  	fn := func(md5sumcurr []byte) []byte {
   864  		return sealETag(key, md5sumcurr)
   865  	}
   866  	return fn
   867  }
   868  
   869  // CleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal
   870  // encryption metadata that was sent by minio gateway
   871  func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string {
   872  	var newMeta = make(map[string]string, len(metadata))
   873  	for k, v := range metadata {
   874  		if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") {
   875  			newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v
   876  		} else {
   877  			newMeta[k] = v
   878  		}
   879  	}
   880  	return newMeta
   881  }
   882  
   883  // newS2CompressReader will read data from r, compress it and return the compressed data as a Reader.
   884  // Use Close to ensure resources are released on incomplete streams.
   885  //
   886  // input 'on' is always recommended such that this function works
   887  // properly, because we do not wish to create an object even if
   888  // client closed the stream prematurely.
   889  func newS2CompressReader(r io.Reader, on int64) io.ReadCloser {
   890  	pr, pw := io.Pipe()
   891  	comp := s2.NewWriter(pw)
   892  	// Copy input to compressor
   893  	go func() {
   894  		cn, err := io.Copy(comp, r)
   895  		if err != nil {
   896  			comp.Close()
   897  			pw.CloseWithError(err)
   898  			return
   899  		}
   900  		if on > 0 && on != cn {
   901  			// if client didn't sent all data
   902  			// from the client verify here.
   903  			comp.Close()
   904  			pw.CloseWithError(IncompleteBody{})
   905  			return
   906  		}
   907  		// Close the stream.
   908  		if err = comp.Close(); err != nil {
   909  			pw.CloseWithError(err)
   910  			return
   911  		}
   912  		// Everything ok, do regular close.
   913  		pw.Close()
   914  	}()
   915  	return pr
   916  }
   917  
   918  // compressSelfTest performs a self-test to ensure that compression
   919  // algorithms completes a roundtrip. If any algorithm
   920  // produces an incorrect checksum it fails with a hard error.
   921  //
   922  // compressSelfTest tries to catch any issue in the compression implementation
   923  // early instead of silently corrupting data.
   924  func compressSelfTest() {
   925  	// 4 MB block.
   926  	// Approx runtime ~30ms
   927  	data := make([]byte, 4<<20)
   928  	rng := rand.New(rand.NewSource(0))
   929  	for i := range data {
   930  		// Generate compressible stream...
   931  		data[i] = byte(rng.Int63() & 3)
   932  	}
   933  	failOnErr := func(err error) {
   934  		if err != nil {
   935  			logger.Fatal(errSelfTestFailure, "compress: error on self-test: %v", err)
   936  		}
   937  	}
   938  	const skip = 2<<20 + 511
   939  	r := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)))
   940  	b, err := io.ReadAll(r)
   941  	failOnErr(err)
   942  	failOnErr(r.Close())
   943  	// Decompression reader.
   944  	s2Reader := s2.NewReader(bytes.NewBuffer(b))
   945  	// Apply the skipLen on the decompressed stream.
   946  	failOnErr(s2Reader.Skip(skip))
   947  	got, err := io.ReadAll(s2Reader)
   948  	failOnErr(err)
   949  	if !bytes.Equal(got, data[skip:]) {
   950  		logger.Fatal(errSelfTestFailure, "compress: self-test roundtrip mismatch.")
   951  
   952  	}
   953  }