github.com/minio/minio@v0.0.0-20240328213742-3f72439b8a27/cmd/object-api-utils.go (about)

     1  // Copyright (c) 2015-2021 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	"encoding/hex"
    24  	"errors"
    25  	"fmt"
    26  	"io"
    27  	"math/rand"
    28  	"net"
    29  	"net/http"
    30  	"path"
    31  	"runtime"
    32  	"strconv"
    33  	"strings"
    34  	"sync"
    35  	"time"
    36  	"unicode/utf8"
    37  
    38  	"github.com/google/uuid"
    39  	"github.com/klauspost/compress/s2"
    40  	"github.com/klauspost/readahead"
    41  	"github.com/minio/minio-go/v7/pkg/s3utils"
    42  	"github.com/minio/minio/internal/config/compress"
    43  	"github.com/minio/minio/internal/config/dns"
    44  	"github.com/minio/minio/internal/config/storageclass"
    45  	"github.com/minio/minio/internal/crypto"
    46  	"github.com/minio/minio/internal/hash"
    47  	xhttp "github.com/minio/minio/internal/http"
    48  	"github.com/minio/minio/internal/ioutil"
    49  	xioutil "github.com/minio/minio/internal/ioutil"
    50  	"github.com/minio/minio/internal/logger"
    51  	"github.com/minio/pkg/v2/trie"
    52  	"github.com/minio/pkg/v2/wildcard"
    53  	"github.com/valyala/bytebufferpool"
    54  	"golang.org/x/exp/slices"
    55  )
    56  
    57  const (
    58  	// MinIO meta bucket.
    59  	minioMetaBucket = ".minio.sys"
    60  	// Multipart meta prefix.
    61  	mpartMetaPrefix = "multipart"
    62  	// MinIO Multipart meta prefix.
    63  	minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix
    64  	// MinIO tmp meta prefix.
    65  	minioMetaTmpBucket = minioMetaBucket + "/tmp"
    66  	// MinIO tmp meta prefix for deleted objects.
    67  	minioMetaTmpDeletedBucket = minioMetaTmpBucket + "/.trash"
    68  
    69  	// DNS separator (period), used for bucket name validation.
    70  	dnsDelimiter = "."
    71  	// On compressed files bigger than this;
    72  	compReadAheadSize = 100 << 20
    73  	// Read this many buffers ahead.
    74  	compReadAheadBuffers = 5
    75  	// Size of each buffer.
    76  	compReadAheadBufSize = 1 << 20
    77  	// Pad Encrypted+Compressed files to a multiple of this.
    78  	compPadEncrypted = 256
    79  	// Disable compressed file indices below this size
    80  	compMinIndexSize = 8 << 20
    81  )
    82  
    83  // isMinioBucket returns true if given bucket is a MinIO internal
    84  // bucket and false otherwise.
    85  func isMinioMetaBucketName(bucket string) bool {
    86  	return strings.HasPrefix(bucket, minioMetaBucket)
    87  }
    88  
    89  // IsValidBucketName verifies that a bucket name is in accordance with
    90  // Amazon's requirements (i.e. DNS naming conventions). It must be 3-63
    91  // characters long, and it must be a sequence of one or more labels
    92  // separated by periods. Each label can contain lowercase ascii
    93  // letters, decimal digits and hyphens, but must not begin or end with
    94  // a hyphen. See:
    95  // http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
    96  func IsValidBucketName(bucket string) bool {
    97  	// Special case when bucket is equal to one of the meta buckets.
    98  	if isMinioMetaBucketName(bucket) {
    99  		return true
   100  	}
   101  	if len(bucket) < 3 || len(bucket) > 63 {
   102  		return false
   103  	}
   104  
   105  	// Split on dot and check each piece conforms to rules.
   106  	allNumbers := true
   107  	pieces := strings.Split(bucket, dnsDelimiter)
   108  	for _, piece := range pieces {
   109  		if len(piece) == 0 || piece[0] == '-' ||
   110  			piece[len(piece)-1] == '-' {
   111  			// Current piece has 0-length or starts or
   112  			// ends with a hyphen.
   113  			return false
   114  		}
   115  		// Now only need to check if each piece is a valid
   116  		// 'label' in AWS terminology and if the bucket looks
   117  		// like an IP address.
   118  		isNotNumber := false
   119  		for i := 0; i < len(piece); i++ {
   120  			switch {
   121  			case (piece[i] >= 'a' && piece[i] <= 'z' ||
   122  				piece[i] == '-'):
   123  				// Found a non-digit character, so
   124  				// this piece is not a number.
   125  				isNotNumber = true
   126  			case piece[i] >= '0' && piece[i] <= '9':
   127  				// Nothing to do.
   128  			default:
   129  				// Found invalid character.
   130  				return false
   131  			}
   132  		}
   133  		allNumbers = allNumbers && !isNotNumber
   134  	}
   135  	// Does the bucket name look like an IP address?
   136  	return !(len(pieces) == 4 && allNumbers)
   137  }
   138  
   139  // IsValidObjectName verifies an object name in accordance with Amazon's
   140  // requirements. It cannot exceed 1024 characters and must be a valid UTF8
   141  // string.
   142  //
   143  // See:
   144  // http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
   145  //
   146  // You should avoid the following characters in a key name because of
   147  // significant special handling for consistency across all
   148  // applications.
   149  //
   150  // Rejects strings with following characters.
   151  //
   152  // - Backslash ("\")
   153  //
   154  // additionally minio does not support object names with trailing SlashSeparator.
   155  func IsValidObjectName(object string) bool {
   156  	if len(object) == 0 {
   157  		return false
   158  	}
   159  	if HasSuffix(object, SlashSeparator) {
   160  		return false
   161  	}
   162  	return IsValidObjectPrefix(object)
   163  }
   164  
   165  // IsValidObjectPrefix verifies whether the prefix is a valid object name.
   166  // Its valid to have a empty prefix.
   167  func IsValidObjectPrefix(object string) bool {
   168  	if hasBadPathComponent(object) {
   169  		return false
   170  	}
   171  	if !utf8.ValidString(object) {
   172  		return false
   173  	}
   174  	if strings.Contains(object, `//`) {
   175  		return false
   176  	}
   177  	// This is valid for AWS S3 but it will never
   178  	// work with file systems, we will reject here
   179  	// to return object name invalid rather than
   180  	// a cryptic error from the file system.
   181  	return !strings.ContainsRune(object, 0)
   182  }
   183  
   184  // checkObjectNameForLengthAndSlash -check for the validity of object name length and prefis as slash
   185  func checkObjectNameForLengthAndSlash(bucket, object string) error {
   186  	// Check for the length of object name
   187  	if len(object) > 1024 {
   188  		return ObjectNameTooLong{
   189  			Bucket: bucket,
   190  			Object: object,
   191  		}
   192  	}
   193  	// Check for slash as prefix in object name
   194  	if HasPrefix(object, SlashSeparator) {
   195  		return ObjectNamePrefixAsSlash{
   196  			Bucket: bucket,
   197  			Object: object,
   198  		}
   199  	}
   200  	if runtime.GOOS == globalWindowsOSName {
   201  		// Explicitly disallowed characters on windows.
   202  		// Avoids most problematic names.
   203  		if strings.ContainsAny(object, `\:*?"|<>`) {
   204  			return ObjectNameInvalid{
   205  				Bucket: bucket,
   206  				Object: object,
   207  			}
   208  		}
   209  	}
   210  	return nil
   211  }
   212  
   213  // SlashSeparator - slash separator.
   214  const SlashSeparator = "/"
   215  
   216  // SlashSeparatorChar - slash separator.
   217  const SlashSeparatorChar = '/'
   218  
   219  // retainSlash - retains slash from a path.
   220  func retainSlash(s string) string {
   221  	if s == "" {
   222  		return s
   223  	}
   224  	return strings.TrimSuffix(s, SlashSeparator) + SlashSeparator
   225  }
   226  
   227  // pathsJoinPrefix - like pathJoin retains trailing SlashSeparator
   228  // for all elements, prepends them with 'prefix' respectively.
   229  func pathsJoinPrefix(prefix string, elem ...string) (paths []string) {
   230  	paths = make([]string, len(elem))
   231  	for i, e := range elem {
   232  		paths[i] = pathJoin(prefix, e)
   233  	}
   234  	return paths
   235  }
   236  
   237  // pathJoin - like path.Join() but retains trailing SlashSeparator of the last element
   238  func pathJoin(elem ...string) string {
   239  	sb := bytebufferpool.Get()
   240  	defer func() {
   241  		sb.Reset()
   242  		bytebufferpool.Put(sb)
   243  	}()
   244  
   245  	return pathJoinBuf(sb, elem...)
   246  }
   247  
   248  // pathJoinBuf - like path.Join() but retains trailing SlashSeparator of the last element.
   249  // Provide a string builder to reduce allocation.
   250  func pathJoinBuf(dst *bytebufferpool.ByteBuffer, elem ...string) string {
   251  	trailingSlash := len(elem) > 0 && hasSuffixByte(elem[len(elem)-1], SlashSeparatorChar)
   252  	dst.Reset()
   253  	added := 0
   254  	for _, e := range elem {
   255  		if added > 0 || e != "" {
   256  			if added > 0 {
   257  				dst.WriteByte(SlashSeparatorChar)
   258  			}
   259  			dst.WriteString(e)
   260  			added += len(e)
   261  		}
   262  	}
   263  
   264  	if pathNeedsClean(dst.Bytes()) {
   265  		s := path.Clean(dst.String())
   266  		if trailingSlash {
   267  			return s + SlashSeparator
   268  		}
   269  		return s
   270  	}
   271  	if trailingSlash {
   272  		dst.WriteByte(SlashSeparatorChar)
   273  	}
   274  	return dst.String()
   275  }
   276  
   277  // hasSuffixByte returns true if the last byte of s is 'suffix'
   278  func hasSuffixByte(s string, suffix byte) bool {
   279  	return len(s) > 0 && s[len(s)-1] == suffix
   280  }
   281  
   282  // pathNeedsClean returns whether path.Clean may change the path.
   283  // Will detect all cases that will be cleaned,
   284  // but may produce false positives on non-trivial paths.
   285  func pathNeedsClean(path []byte) bool {
   286  	if len(path) == 0 {
   287  		return true
   288  	}
   289  
   290  	rooted := path[0] == '/'
   291  	n := len(path)
   292  
   293  	r, w := 0, 0
   294  	if rooted {
   295  		r, w = 1, 1
   296  	}
   297  
   298  	for r < n {
   299  		switch {
   300  		case path[r] > 127:
   301  			// Non ascii.
   302  			return true
   303  		case path[r] == '/':
   304  			// multiple / elements
   305  			return true
   306  		case path[r] == '.' && (r+1 == n || path[r+1] == '/'):
   307  			// . element - assume it has to be cleaned.
   308  			return true
   309  		case path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == '/'):
   310  			// .. element: remove to last / - assume it has to be cleaned.
   311  			return true
   312  		default:
   313  			// real path element.
   314  			// add slash if needed
   315  			if rooted && w != 1 || !rooted && w != 0 {
   316  				w++
   317  			}
   318  			// copy element
   319  			for ; r < n && path[r] != '/'; r++ {
   320  				w++
   321  			}
   322  			// allow one slash, not at end
   323  			if r < n-1 && path[r] == '/' {
   324  				r++
   325  			}
   326  		}
   327  	}
   328  
   329  	// Turn empty string into "."
   330  	if w == 0 {
   331  		return true
   332  	}
   333  
   334  	return false
   335  }
   336  
   337  // mustGetUUID - get a random UUID.
   338  func mustGetUUID() string {
   339  	u, err := uuid.NewRandom()
   340  	if err != nil {
   341  		logger.CriticalIf(GlobalContext, err)
   342  	}
   343  
   344  	return u.String()
   345  }
   346  
   347  // mustGetUUIDBytes - get a random UUID as 16 bytes unencoded.
   348  func mustGetUUIDBytes() []byte {
   349  	u, err := uuid.NewRandom()
   350  	if err != nil {
   351  		logger.CriticalIf(GlobalContext, err)
   352  	}
   353  	return u[:]
   354  }
   355  
   356  // Create an s3 compatible MD5sum for complete multipart transaction.
   357  func getCompleteMultipartMD5(parts []CompletePart) string {
   358  	var finalMD5Bytes []byte
   359  	for _, part := range parts {
   360  		md5Bytes, err := hex.DecodeString(canonicalizeETag(part.ETag))
   361  		if err != nil {
   362  			finalMD5Bytes = append(finalMD5Bytes, []byte(part.ETag)...)
   363  		} else {
   364  			finalMD5Bytes = append(finalMD5Bytes, md5Bytes...)
   365  		}
   366  	}
   367  	s3MD5 := fmt.Sprintf("%s-%d", getMD5Hash(finalMD5Bytes), len(parts))
   368  	return s3MD5
   369  }
   370  
   371  // Clean unwanted fields from metadata
   372  func cleanMetadata(metadata map[string]string) map[string]string {
   373  	// Remove STANDARD StorageClass
   374  	metadata = removeStandardStorageClass(metadata)
   375  	// Clean meta etag keys 'md5Sum', 'etag', "expires", "x-amz-tagging".
   376  	return cleanMetadataKeys(metadata, "md5Sum", "etag", "expires", xhttp.AmzObjectTagging, "last-modified", VersionPurgeStatusKey)
   377  }
   378  
   379  // Filter X-Amz-Storage-Class field only if it is set to STANDARD.
   380  // This is done since AWS S3 doesn't return STANDARD Storage class as response header.
   381  func removeStandardStorageClass(metadata map[string]string) map[string]string {
   382  	if metadata[xhttp.AmzStorageClass] == storageclass.STANDARD {
   383  		delete(metadata, xhttp.AmzStorageClass)
   384  	}
   385  	return metadata
   386  }
   387  
   388  // cleanMetadataKeys takes keyNames to be filtered
   389  // and returns a new map with all the entries with keyNames removed.
   390  func cleanMetadataKeys(metadata map[string]string, keyNames ...string) map[string]string {
   391  	newMeta := make(map[string]string, len(metadata))
   392  	for k, v := range metadata {
   393  		if slices.Contains(keyNames, k) {
   394  			continue
   395  		}
   396  		newMeta[k] = v
   397  	}
   398  	return newMeta
   399  }
   400  
   401  // Extracts etag value from the metadata.
   402  func extractETag(metadata map[string]string) string {
   403  	etag, ok := metadata["etag"]
   404  	if !ok {
   405  		// md5Sum tag is kept for backward compatibility.
   406  		etag = metadata["md5Sum"]
   407  	}
   408  	// Success.
   409  	return etag
   410  }
   411  
   412  // HasPrefix - Prefix matcher string matches prefix in a platform specific way.
   413  // For example on windows since its case insensitive we are supposed
   414  // to do case insensitive checks.
   415  func HasPrefix(s string, prefix string) bool {
   416  	if runtime.GOOS == globalWindowsOSName {
   417  		return stringsHasPrefixFold(s, prefix)
   418  	}
   419  	return strings.HasPrefix(s, prefix)
   420  }
   421  
   422  // HasSuffix - Suffix matcher string matches suffix in a platform specific way.
   423  // For example on windows since its case insensitive we are supposed
   424  // to do case insensitive checks.
   425  func HasSuffix(s string, suffix string) bool {
   426  	if runtime.GOOS == globalWindowsOSName {
   427  		return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix))
   428  	}
   429  	return strings.HasSuffix(s, suffix)
   430  }
   431  
   432  // Validates if two strings are equal.
   433  func isStringEqual(s1 string, s2 string) bool {
   434  	if runtime.GOOS == globalWindowsOSName {
   435  		return strings.EqualFold(s1, s2)
   436  	}
   437  	return s1 == s2
   438  }
   439  
   440  // Ignores all reserved bucket names or invalid bucket names.
   441  func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
   442  	if bucketEntry == "" {
   443  		return true
   444  	}
   445  
   446  	bucketEntry = strings.TrimSuffix(bucketEntry, SlashSeparator)
   447  	if strict {
   448  		if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
   449  			return true
   450  		}
   451  	} else {
   452  		if err := s3utils.CheckValidBucketName(bucketEntry); err != nil {
   453  			return true
   454  		}
   455  	}
   456  	return isMinioMetaBucket(bucketEntry) || isMinioReservedBucket(bucketEntry)
   457  }
   458  
   459  // Returns true if input bucket is a reserved minio meta bucket '.minio.sys'.
   460  func isMinioMetaBucket(bucketName string) bool {
   461  	return bucketName == minioMetaBucket
   462  }
   463  
   464  // Returns true if input bucket is a reserved minio bucket 'minio'.
   465  func isMinioReservedBucket(bucketName string) bool {
   466  	return bucketName == minioReservedBucket
   467  }
   468  
   469  // returns a slice of hosts by reading a slice of DNS records
   470  func getHostsSlice(records []dns.SrvRecord) []string {
   471  	hosts := make([]string, len(records))
   472  	for i, r := range records {
   473  		hosts[i] = net.JoinHostPort(r.Host, string(r.Port))
   474  	}
   475  	return hosts
   476  }
   477  
   478  // returns an online host (and corresponding port) from a slice of DNS records
   479  func getHostFromSrv(records []dns.SrvRecord) (host string) {
   480  	hosts := getHostsSlice(records)
   481  	rng := rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
   482  	var d net.Dialer
   483  	var retry int
   484  	for retry < len(hosts) {
   485  		ctx, cancel := context.WithTimeout(GlobalContext, 300*time.Millisecond)
   486  
   487  		host = hosts[rng.Intn(len(hosts))]
   488  		conn, err := d.DialContext(ctx, "tcp", host)
   489  		cancel()
   490  		if err != nil {
   491  			retry++
   492  			continue
   493  		}
   494  		conn.Close()
   495  		break
   496  	}
   497  
   498  	return host
   499  }
   500  
   501  // IsCompressed returns true if the object is marked as compressed.
   502  func (o *ObjectInfo) IsCompressed() bool {
   503  	_, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
   504  	return ok
   505  }
   506  
   507  // IsCompressedOK returns whether the object is compressed and can be decompressed.
   508  func (o *ObjectInfo) IsCompressedOK() (bool, error) {
   509  	scheme, ok := o.UserDefined[ReservedMetadataPrefix+"compression"]
   510  	if !ok {
   511  		return false, nil
   512  	}
   513  	switch scheme {
   514  	case compressionAlgorithmV1, compressionAlgorithmV2:
   515  		return true, nil
   516  	}
   517  	return true, fmt.Errorf("unknown compression scheme: %s", scheme)
   518  }
   519  
   520  // GetActualSize - returns the actual size of the stored object
   521  func (o ObjectInfo) GetActualSize() (int64, error) {
   522  	if o.ActualSize != nil {
   523  		return *o.ActualSize, nil
   524  	}
   525  	if o.IsCompressed() {
   526  		sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"]
   527  		if !ok {
   528  			return -1, errInvalidDecompressedSize
   529  		}
   530  		size, err := strconv.ParseInt(sizeStr, 10, 64)
   531  		if err != nil {
   532  			return -1, errInvalidDecompressedSize
   533  		}
   534  		return size, nil
   535  	}
   536  	if _, ok := crypto.IsEncrypted(o.UserDefined); ok {
   537  		sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"]
   538  		if ok {
   539  			size, err := strconv.ParseInt(sizeStr, 10, 64)
   540  			if err != nil {
   541  				return -1, errObjectTampered
   542  			}
   543  			return size, nil
   544  		}
   545  		return o.DecryptedSize()
   546  	}
   547  
   548  	return o.Size, nil
   549  }
   550  
   551  // Disabling compression for encrypted enabled requests.
   552  // Using compression and encryption together enables room for side channel attacks.
   553  // Eliminate non-compressible objects by extensions/content-types.
   554  func isCompressible(header http.Header, object string) bool {
   555  	globalCompressConfigMu.Lock()
   556  	cfg := globalCompressConfig
   557  	globalCompressConfigMu.Unlock()
   558  
   559  	return !excludeForCompression(header, object, cfg)
   560  }
   561  
   562  // Eliminate the non-compressible objects.
   563  func excludeForCompression(header http.Header, object string, cfg compress.Config) bool {
   564  	objStr := object
   565  	contentType := header.Get(xhttp.ContentType)
   566  	if !cfg.Enabled {
   567  		return true
   568  	}
   569  
   570  	if crypto.Requested(header) && !cfg.AllowEncrypted {
   571  		return true
   572  	}
   573  
   574  	// We strictly disable compression for standard extensions/content-types (`compressed`).
   575  	if hasStringSuffixInSlice(objStr, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, contentType) {
   576  		return true
   577  	}
   578  
   579  	// Filter compression includes.
   580  	if len(cfg.Extensions) == 0 && len(cfg.MimeTypes) == 0 {
   581  		// Nothing to filter, include everything.
   582  		return false
   583  	}
   584  
   585  	if len(cfg.Extensions) > 0 && hasStringSuffixInSlice(objStr, cfg.Extensions) {
   586  		// Matched an extension to compress, do not exclude.
   587  		return false
   588  	}
   589  
   590  	if len(cfg.MimeTypes) > 0 && hasPattern(cfg.MimeTypes, contentType) {
   591  		// Matched an MIME type to compress, do not exclude.
   592  		return false
   593  	}
   594  
   595  	// Did not match any inclusion filters, exclude from compression.
   596  	return true
   597  }
   598  
   599  // Utility which returns if a string is present in the list.
   600  // Comparison is case insensitive. Explicit short-circuit if
   601  // the list contains the wildcard "*".
   602  func hasStringSuffixInSlice(str string, list []string) bool {
   603  	str = strings.ToLower(str)
   604  	for _, v := range list {
   605  		if v == "*" {
   606  			return true
   607  		}
   608  
   609  		if strings.HasSuffix(str, strings.ToLower(v)) {
   610  			return true
   611  		}
   612  	}
   613  	return false
   614  }
   615  
   616  // Returns true if any of the given wildcard patterns match the matchStr.
   617  func hasPattern(patterns []string, matchStr string) bool {
   618  	for _, pattern := range patterns {
   619  		if ok := wildcard.MatchSimple(pattern, matchStr); ok {
   620  			return true
   621  		}
   622  	}
   623  	return false
   624  }
   625  
   626  // Returns the part file name which matches the partNumber and etag.
   627  func getPartFile(entriesTrie *trie.Trie, partNumber int, etag string) (partFile string) {
   628  	for _, match := range entriesTrie.PrefixMatch(fmt.Sprintf("%.5d.%s.", partNumber, etag)) {
   629  		partFile = match
   630  		break
   631  	}
   632  	return partFile
   633  }
   634  
   635  func partNumberToRangeSpec(oi ObjectInfo, partNumber int) *HTTPRangeSpec {
   636  	if oi.Size == 0 || len(oi.Parts) == 0 {
   637  		return nil
   638  	}
   639  
   640  	var start int64
   641  	end := int64(-1)
   642  	for i := 0; i < len(oi.Parts) && i < partNumber; i++ {
   643  		start = end + 1
   644  		end = start + oi.Parts[i].ActualSize - 1
   645  	}
   646  
   647  	return &HTTPRangeSpec{Start: start, End: end}
   648  }
   649  
   650  // Returns the compressed offset which should be skipped.
   651  // If encrypted offsets are adjusted for encrypted block headers/trailers.
   652  // Since de-compression is after decryption encryption overhead is only added to compressedOffset.
   653  func getCompressedOffsets(oi ObjectInfo, offset int64, decrypt func([]byte) ([]byte, error)) (compressedOffset int64, partSkip int64, firstPart int, decryptSkip int64, seqNum uint32) {
   654  	var skipLength int64
   655  	var cumulativeActualSize int64
   656  	var firstPartIdx int
   657  	for i, part := range oi.Parts {
   658  		cumulativeActualSize += part.ActualSize
   659  		if cumulativeActualSize <= offset {
   660  			compressedOffset += part.Size
   661  		} else {
   662  			firstPartIdx = i
   663  			skipLength = cumulativeActualSize - part.ActualSize
   664  			break
   665  		}
   666  	}
   667  	partSkip = offset - skipLength
   668  
   669  	// Load index and skip more if feasible.
   670  	if partSkip > 0 && len(oi.Parts) > firstPartIdx && len(oi.Parts[firstPartIdx].Index) > 0 {
   671  		_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
   672  		if isEncrypted {
   673  			dec, err := decrypt(oi.Parts[firstPartIdx].Index)
   674  			if err == nil {
   675  				// Load Index
   676  				var idx s2.Index
   677  				_, err := idx.Load(s2.RestoreIndexHeaders(dec))
   678  
   679  				// Find compressed/uncompressed offsets of our partskip
   680  				compOff, uCompOff, err2 := idx.Find(partSkip)
   681  
   682  				if err == nil && err2 == nil && compOff > 0 {
   683  					// Encrypted.
   684  					const sseDAREEncPackageBlockSize = SSEDAREPackageBlockSize + SSEDAREPackageMetaSize
   685  					// Number of full blocks in skipped area
   686  					seqNum = uint32(compOff / SSEDAREPackageBlockSize)
   687  					// Skip this many inside a decrypted block to get to compression block start
   688  					decryptSkip = compOff % SSEDAREPackageBlockSize
   689  					// Skip this number of full blocks.
   690  					skipEnc := compOff / SSEDAREPackageBlockSize
   691  					skipEnc *= sseDAREEncPackageBlockSize
   692  					compressedOffset += skipEnc
   693  					// Skip this number of uncompressed bytes.
   694  					partSkip -= uCompOff
   695  				}
   696  			}
   697  		} else {
   698  			// Not encrypted
   699  			var idx s2.Index
   700  			_, err := idx.Load(s2.RestoreIndexHeaders(oi.Parts[firstPartIdx].Index))
   701  
   702  			// Find compressed/uncompressed offsets of our partskip
   703  			compOff, uCompOff, err2 := idx.Find(partSkip)
   704  
   705  			if err == nil && err2 == nil && compOff > 0 {
   706  				compressedOffset += compOff
   707  				partSkip -= uCompOff
   708  			}
   709  		}
   710  	}
   711  
   712  	return compressedOffset, partSkip, firstPartIdx, decryptSkip, seqNum
   713  }
   714  
   715  // GetObjectReader is a type that wraps a reader with a lock to
   716  // provide a ReadCloser interface that unlocks on Close()
   717  type GetObjectReader struct {
   718  	io.Reader
   719  	ObjInfo    ObjectInfo
   720  	cleanUpFns []func()
   721  	once       sync.Once
   722  }
   723  
   724  // WithCleanupFuncs sets additional cleanup functions to be called when closing
   725  // the GetObjectReader.
   726  func (g *GetObjectReader) WithCleanupFuncs(fns ...func()) *GetObjectReader {
   727  	g.cleanUpFns = append(g.cleanUpFns, fns...)
   728  	return g
   729  }
   730  
   731  // NewGetObjectReaderFromReader sets up a GetObjectReader with a given
   732  // reader. This ignores any object properties.
   733  func NewGetObjectReaderFromReader(r io.Reader, oi ObjectInfo, opts ObjectOptions, cleanupFns ...func()) (*GetObjectReader, error) {
   734  	if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
   735  		// Call the cleanup funcs
   736  		for i := len(cleanupFns) - 1; i >= 0; i-- {
   737  			cleanupFns[i]()
   738  		}
   739  		return nil, PreConditionFailed{}
   740  	}
   741  	return &GetObjectReader{
   742  		ObjInfo:    oi,
   743  		Reader:     r,
   744  		cleanUpFns: cleanupFns,
   745  	}, nil
   746  }
   747  
   748  // ObjReaderFn is a function type that takes a reader and returns
   749  // GetObjectReader and an error. Request headers are passed to provide
   750  // encryption parameters. cleanupFns allow cleanup funcs to be
   751  // registered for calling after usage of the reader.
   752  type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func()) (r *GetObjectReader, err error)
   753  
   754  // NewGetObjectReader creates a new GetObjectReader. The cleanUpFns
   755  // are called on Close() in FIFO order as passed in ObjReadFn(). NOTE: It is
   756  // assumed that clean up functions do not panic (otherwise, they may
   757  // not all run!).
   758  func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) (
   759  	fn ObjReaderFn, off, length int64, err error,
   760  ) {
   761  	if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) {
   762  		return nil, 0, 0, PreConditionFailed{}
   763  	}
   764  
   765  	if rs == nil && opts.PartNumber > 0 {
   766  		rs = partNumberToRangeSpec(oi, opts.PartNumber)
   767  	}
   768  
   769  	_, isEncrypted := crypto.IsEncrypted(oi.UserDefined)
   770  	isCompressed, err := oi.IsCompressedOK()
   771  	if err != nil {
   772  		return nil, 0, 0, err
   773  	}
   774  
   775  	// if object is encrypted and it is a restore request or if NoDecryption
   776  	// was requested, fetch content without decrypting.
   777  	if opts.Transition.RestoreRequest != nil || opts.NoDecryption {
   778  		isEncrypted = false
   779  		isCompressed = false
   780  	}
   781  
   782  	// Calculate range to read (different for encrypted/compressed objects)
   783  	switch {
   784  	case isCompressed:
   785  		var firstPart int
   786  		if opts.PartNumber > 0 {
   787  			// firstPart is an index to Parts slice,
   788  			// make sure that PartNumber uses the
   789  			// index value properly.
   790  			firstPart = opts.PartNumber - 1
   791  		}
   792  
   793  		// If compressed, we start from the beginning of the part.
   794  		// Read the decompressed size from the meta.json.
   795  		actualSize, err := oi.GetActualSize()
   796  		if err != nil {
   797  			return nil, 0, 0, err
   798  		}
   799  		var decryptSkip int64
   800  		var seqNum uint32
   801  
   802  		off, length = int64(0), oi.Size
   803  		decOff, decLength := int64(0), actualSize
   804  		if rs != nil {
   805  			off, length, err = rs.GetOffsetLength(actualSize)
   806  			if err != nil {
   807  				return nil, 0, 0, err
   808  			}
   809  			decrypt := func(b []byte) ([]byte, error) {
   810  				return b, nil
   811  			}
   812  			if isEncrypted {
   813  				decrypt = oi.compressionIndexDecrypt
   814  			}
   815  			// In case of range based queries on multiparts, the offset and length are reduced.
   816  			off, decOff, firstPart, decryptSkip, seqNum = getCompressedOffsets(oi, off, decrypt)
   817  			decLength = length
   818  			length = oi.Size - off
   819  			// For negative length we read everything.
   820  			if decLength < 0 {
   821  				decLength = actualSize - decOff
   822  			}
   823  
   824  			// Reply back invalid range if the input offset and length fall out of range.
   825  			if decOff > actualSize || decOff+decLength > actualSize {
   826  				return nil, 0, 0, errInvalidRange
   827  			}
   828  		}
   829  		fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
   830  			if isEncrypted {
   831  				copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
   832  				// Attach decrypter on inputReader
   833  				inputReader, err = DecryptBlocksRequestR(inputReader, h, seqNum, firstPart, oi, copySource)
   834  				if err != nil {
   835  					// Call the cleanup funcs
   836  					for i := len(cFns) - 1; i >= 0; i-- {
   837  						cFns[i]()
   838  					}
   839  					return nil, err
   840  				}
   841  				if decryptSkip > 0 {
   842  					inputReader = ioutil.NewSkipReader(inputReader, decryptSkip)
   843  				}
   844  				oi.Size = decLength
   845  			}
   846  			// Decompression reader.
   847  			var dopts []s2.ReaderOption
   848  			if off > 0 || decOff > 0 {
   849  				// We are not starting at the beginning, so ignore stream identifiers.
   850  				dopts = append(dopts, s2.ReaderIgnoreStreamIdentifier())
   851  			}
   852  			s2Reader := s2.NewReader(inputReader, dopts...)
   853  			// Apply the skipLen and limit on the decompressed stream.
   854  			if decOff > 0 {
   855  				if err = s2Reader.Skip(decOff); err != nil {
   856  					// Call the cleanup funcs
   857  					for i := len(cFns) - 1; i >= 0; i-- {
   858  						cFns[i]()
   859  					}
   860  					return nil, err
   861  				}
   862  			}
   863  
   864  			decReader := io.LimitReader(s2Reader, decLength)
   865  			if decLength > compReadAheadSize {
   866  				rah, err := readahead.NewReaderSize(decReader, compReadAheadBuffers, compReadAheadBufSize)
   867  				if err == nil {
   868  					decReader = rah
   869  					cFns = append([]func(){func() {
   870  						rah.Close()
   871  					}}, cFns...)
   872  				}
   873  			}
   874  			oi.Size = decLength
   875  
   876  			// Assemble the GetObjectReader
   877  			r = &GetObjectReader{
   878  				ObjInfo:    oi,
   879  				Reader:     decReader,
   880  				cleanUpFns: cFns,
   881  			}
   882  			return r, nil
   883  		}
   884  
   885  	case isEncrypted:
   886  		var seqNumber uint32
   887  		var partStart int
   888  		var skipLen int64
   889  
   890  		off, length, skipLen, seqNumber, partStart, err = oi.GetDecryptedRange(rs)
   891  		if err != nil {
   892  			return nil, 0, 0, err
   893  		}
   894  		var decSize int64
   895  		decSize, err = oi.DecryptedSize()
   896  		if err != nil {
   897  			return nil, 0, 0, err
   898  		}
   899  		var decRangeLength int64
   900  		decRangeLength, err = rs.GetLength(decSize)
   901  		if err != nil {
   902  			return nil, 0, 0, err
   903  		}
   904  
   905  		// We define a closure that performs decryption given
   906  		// a reader that returns the desired range of
   907  		// encrypted bytes. The header parameter is used to
   908  		// provide encryption parameters.
   909  		fn = func(inputReader io.Reader, h http.Header, cFns ...func()) (r *GetObjectReader, err error) {
   910  			copySource := h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != ""
   911  
   912  			// Attach decrypter on inputReader
   913  			var decReader io.Reader
   914  			decReader, err = DecryptBlocksRequestR(inputReader, h, seqNumber, partStart, oi, copySource)
   915  			if err != nil {
   916  				// Call the cleanup funcs
   917  				for i := len(cFns) - 1; i >= 0; i-- {
   918  					cFns[i]()
   919  				}
   920  				return nil, err
   921  			}
   922  
   923  			oi.ETag = getDecryptedETag(h, oi, false)
   924  
   925  			// Apply the skipLen and limit on the
   926  			// decrypted stream
   927  			decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength)
   928  
   929  			// Assemble the GetObjectReader
   930  			r = &GetObjectReader{
   931  				ObjInfo:    oi,
   932  				Reader:     decReader,
   933  				cleanUpFns: cFns,
   934  			}
   935  			return r, nil
   936  		}
   937  
   938  	default:
   939  		off, length, err = rs.GetOffsetLength(oi.Size)
   940  		if err != nil {
   941  			return nil, 0, 0, err
   942  		}
   943  		fn = func(inputReader io.Reader, _ http.Header, cFns ...func()) (r *GetObjectReader, err error) {
   944  			r = &GetObjectReader{
   945  				ObjInfo:    oi,
   946  				Reader:     inputReader,
   947  				cleanUpFns: cFns,
   948  			}
   949  			return r, nil
   950  		}
   951  	}
   952  	return fn, off, length, nil
   953  }
   954  
   955  // Close - calls the cleanup actions in reverse order
   956  func (g *GetObjectReader) Close() error {
   957  	if g == nil {
   958  		return nil
   959  	}
   960  	// sync.Once is used here to ensure that Close() is
   961  	// idempotent.
   962  	g.once.Do(func() {
   963  		for i := len(g.cleanUpFns) - 1; i >= 0; i-- {
   964  			g.cleanUpFns[i]()
   965  		}
   966  	})
   967  	return nil
   968  }
   969  
   970  // compressionIndexEncrypter returns a function that will read data from input,
   971  // encrypt it using the provided key and return the result.
   972  func compressionIndexEncrypter(key crypto.ObjectKey, input func() []byte) func() []byte {
   973  	var data []byte
   974  	var fetched bool
   975  	return func() []byte {
   976  		if !fetched {
   977  			data = input()
   978  			fetched = true
   979  		}
   980  		return metadataEncrypter(key)("compression-index", data)
   981  	}
   982  }
   983  
   984  // compressionIndexDecrypt reverses compressionIndexEncrypter.
   985  func (o *ObjectInfo) compressionIndexDecrypt(input []byte) ([]byte, error) {
   986  	return o.metadataDecrypter()("compression-index", input)
   987  }
   988  
   989  // SealMD5CurrFn seals md5sum with object encryption key and returns sealed
   990  // md5sum
   991  type SealMD5CurrFn func([]byte) []byte
   992  
   993  // PutObjReader is a type that wraps sio.EncryptReader and
   994  // underlying hash.Reader in a struct
   995  type PutObjReader struct {
   996  	*hash.Reader              // actual data stream
   997  	rawReader    *hash.Reader // original data stream
   998  	sealMD5Fn    SealMD5CurrFn
   999  }
  1000  
  1001  // Size returns the absolute number of bytes the Reader
  1002  // will return during reading. It returns -1 for unlimited
  1003  // data.
  1004  func (p *PutObjReader) Size() int64 {
  1005  	return p.Reader.Size()
  1006  }
  1007  
  1008  // MD5CurrentHexString returns the current MD5Sum or encrypted MD5Sum
  1009  // as a hex encoded string
  1010  func (p *PutObjReader) MD5CurrentHexString() string {
  1011  	md5sumCurr := p.rawReader.MD5Current()
  1012  	var appendHyphen bool
  1013  	// md5sumcurr is not empty in two scenarios
  1014  	// - server is running in strict compatibility mode
  1015  	// - client set Content-Md5 during PUT operation
  1016  	if len(md5sumCurr) == 0 {
  1017  		// md5sumCurr is only empty when we are running
  1018  		// in non-compatibility mode.
  1019  		md5sumCurr = make([]byte, 16)
  1020  		rand.Read(md5sumCurr)
  1021  		appendHyphen = true
  1022  	}
  1023  	if p.sealMD5Fn != nil {
  1024  		md5sumCurr = p.sealMD5Fn(md5sumCurr)
  1025  	}
  1026  	if appendHyphen {
  1027  		// Make sure to return etag string upto 32 length, for SSE
  1028  		// requests ETag might be longer and the code decrypting the
  1029  		// ETag ignores ETag in multipart ETag form i.e <hex>-N
  1030  		return hex.EncodeToString(md5sumCurr)[:32] + "-1"
  1031  	}
  1032  	return hex.EncodeToString(md5sumCurr)
  1033  }
  1034  
  1035  // WithEncryption sets up encrypted reader and the sealing for content md5sum
  1036  // using objEncKey. Unsealed md5sum is computed from the rawReader setup when
  1037  // NewPutObjReader was called. It returns an error if called on an uninitialized
  1038  // PutObjReader.
  1039  func (p *PutObjReader) WithEncryption(encReader *hash.Reader, objEncKey *crypto.ObjectKey) (*PutObjReader, error) {
  1040  	if p.Reader == nil {
  1041  		return nil, errors.New("put-object reader uninitialized")
  1042  	}
  1043  	p.Reader = encReader
  1044  	p.sealMD5Fn = sealETagFn(*objEncKey)
  1045  	return p, nil
  1046  }
  1047  
  1048  // NewPutObjReader returns a new PutObjReader. It uses given hash.Reader's
  1049  // MD5Current method to construct md5sum when requested downstream.
  1050  func NewPutObjReader(rawReader *hash.Reader) *PutObjReader {
  1051  	return &PutObjReader{Reader: rawReader, rawReader: rawReader}
  1052  }
  1053  
  1054  func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte {
  1055  	var emptyKey [32]byte
  1056  	if bytes.Equal(encKey[:], emptyKey[:]) {
  1057  		return md5CurrSum
  1058  	}
  1059  	return encKey.SealETag(md5CurrSum)
  1060  }
  1061  
  1062  func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn {
  1063  	fn := func(md5sumcurr []byte) []byte {
  1064  		return sealETag(key, md5sumcurr)
  1065  	}
  1066  	return fn
  1067  }
  1068  
  1069  // compressOpts are the options for writing compressed data.
  1070  var compressOpts []s2.WriterOption
  1071  
  1072  func init() {
  1073  	if runtime.GOARCH == "amd64" {
  1074  		// On amd64 we have assembly and can use stronger compression.
  1075  		compressOpts = append(compressOpts, s2.WriterBetterCompression())
  1076  	}
  1077  }
  1078  
  1079  // newS2CompressReader will read data from r, compress it and return the compressed data as a Reader.
  1080  // Use Close to ensure resources are released on incomplete streams.
  1081  //
  1082  // input 'on' is always recommended such that this function works
  1083  // properly, because we do not wish to create an object even if
  1084  // client closed the stream prematurely.
  1085  func newS2CompressReader(r io.Reader, on int64, encrypted bool) (rc io.ReadCloser, idx func() []byte) {
  1086  	pr, pw := io.Pipe()
  1087  	// Copy input to compressor
  1088  	opts := compressOpts
  1089  	if encrypted {
  1090  		// The values used for padding are not a security concern,
  1091  		// but we choose pseudo-random numbers instead of just zeros.
  1092  		rng := rand.New(rand.NewSource(time.Now().UnixNano()))
  1093  		opts = append([]s2.WriterOption{s2.WriterPadding(compPadEncrypted), s2.WriterPaddingSrc(rng)}, compressOpts...)
  1094  	}
  1095  	comp := s2.NewWriter(pw, opts...)
  1096  	indexCh := make(chan []byte, 1)
  1097  	go func() {
  1098  		defer xioutil.SafeClose(indexCh)
  1099  		cn, err := io.Copy(comp, r)
  1100  		if err != nil {
  1101  			comp.Close()
  1102  			pw.CloseWithError(err)
  1103  			return
  1104  		}
  1105  		if on > 0 && on != cn {
  1106  			// if client didn't sent all data
  1107  			// from the client verify here.
  1108  			comp.Close()
  1109  			pw.CloseWithError(IncompleteBody{})
  1110  			return
  1111  		}
  1112  		// Close the stream.
  1113  		// If more than compMinIndexSize was written, generate index.
  1114  		if cn > compMinIndexSize {
  1115  			idx, err := comp.CloseIndex()
  1116  			idx = s2.RemoveIndexHeaders(idx)
  1117  			indexCh <- idx
  1118  			pw.CloseWithError(err)
  1119  			return
  1120  		}
  1121  		pw.CloseWithError(comp.Close())
  1122  	}()
  1123  	var gotIdx []byte
  1124  	return pr, func() []byte {
  1125  		if gotIdx != nil {
  1126  			return gotIdx
  1127  		}
  1128  		// Will get index or nil if closed.
  1129  		gotIdx = <-indexCh
  1130  		return gotIdx
  1131  	}
  1132  }
  1133  
  1134  // compressSelfTest performs a self-test to ensure that compression
  1135  // algorithms completes a roundtrip. If any algorithm
  1136  // produces an incorrect checksum it fails with a hard error.
  1137  //
  1138  // compressSelfTest tries to catch any issue in the compression implementation
  1139  // early instead of silently corrupting data.
  1140  func compressSelfTest() {
  1141  	// 4 MB block.
  1142  	// Approx runtime ~30ms
  1143  	data := make([]byte, 4<<20)
  1144  	rng := rand.New(rand.NewSource(0))
  1145  	for i := range data {
  1146  		// Generate compressible stream...
  1147  		data[i] = byte(rng.Int63() & 3)
  1148  	}
  1149  	failOnErr := func(err error) {
  1150  		if err != nil {
  1151  			logger.Fatal(errSelfTestFailure, "compress: error on self-test: %v", err)
  1152  		}
  1153  	}
  1154  	const skip = 2<<20 + 511
  1155  	r, _ := newS2CompressReader(bytes.NewBuffer(data), int64(len(data)), true)
  1156  	b, err := io.ReadAll(r)
  1157  	failOnErr(err)
  1158  	failOnErr(r.Close())
  1159  	// Decompression reader.
  1160  	s2Reader := s2.NewReader(bytes.NewBuffer(b))
  1161  	// Apply the skipLen on the decompressed stream.
  1162  	failOnErr(s2Reader.Skip(skip))
  1163  	got, err := io.ReadAll(s2Reader)
  1164  	failOnErr(err)
  1165  	if !bytes.Equal(got, data[skip:]) {
  1166  		logger.Fatal(errSelfTestFailure, "compress: self-test roundtrip mismatch.")
  1167  	}
  1168  }
  1169  
  1170  // getDiskInfos returns the disk information for the provided disks.
  1171  // If a disk is nil or an error is returned the result will be nil as well.
  1172  func getDiskInfos(ctx context.Context, disks ...StorageAPI) []*DiskInfo {
  1173  	res := make([]*DiskInfo, len(disks))
  1174  	opts := DiskInfoOptions{}
  1175  	for i, disk := range disks {
  1176  		if disk == nil {
  1177  			continue
  1178  		}
  1179  		if di, err := disk.DiskInfo(ctx, opts); err == nil {
  1180  			res[i] = &di
  1181  		}
  1182  	}
  1183  	return res
  1184  }
  1185  
  1186  // hasSpaceFor returns whether the disks in `di` have space for and object of a given size.
  1187  func hasSpaceFor(di []*DiskInfo, size int64) (bool, error) {
  1188  	// We multiply the size by 2 to account for erasure coding.
  1189  	size *= 2
  1190  	if size < 0 {
  1191  		// If no size, assume diskAssumeUnknownSize.
  1192  		size = diskAssumeUnknownSize
  1193  	}
  1194  
  1195  	var available uint64
  1196  	var total uint64
  1197  	var nDisks int
  1198  	for _, disk := range di {
  1199  		if disk == nil || disk.Total == 0 {
  1200  			// Disk offline, no inodes or something else is wrong.
  1201  			continue
  1202  		}
  1203  		nDisks++
  1204  		total += disk.Total
  1205  		available += disk.Total - disk.Used
  1206  	}
  1207  
  1208  	if nDisks < len(di)/2 || nDisks <= 0 {
  1209  		return false, fmt.Errorf("not enough online disks to calculate the available space, expected (%d)/(%d)", (len(di)/2)+1, nDisks)
  1210  	}
  1211  
  1212  	// Check we have enough on each disk, ignoring diskFillFraction.
  1213  	perDisk := size / int64(nDisks)
  1214  	for _, disk := range di {
  1215  		if disk == nil || disk.Total == 0 {
  1216  			continue
  1217  		}
  1218  		if !globalIsErasureSD && disk.FreeInodes < diskMinInodes && disk.UsedInodes > 0 {
  1219  			// We have an inode count, but not enough inodes.
  1220  			return false, nil
  1221  		}
  1222  		if int64(disk.Free) <= perDisk {
  1223  			return false, nil
  1224  		}
  1225  	}
  1226  
  1227  	// Make sure we can fit "size" on to the disk without getting above the diskFillFraction
  1228  	if available < uint64(size) {
  1229  		return false, nil
  1230  	}
  1231  
  1232  	// How much will be left after adding the file.
  1233  	available -= uint64(size)
  1234  
  1235  	// wantLeft is how much space there at least must be left.
  1236  	wantLeft := uint64(float64(total) * (1.0 - diskFillFraction))
  1237  	return available > wantLeft, nil
  1238  }