github.com/minio/mc@v0.0.0-20240503112107-b471de8d1882/cmd/client-s3.go (about)

     1  // Copyright (c) 2015-2022 MinIO, Inc.
     2  //
     3  // This file is part of MinIO Object Storage stack
     4  //
     5  // This program is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Affero General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // This program is distributed in the hope that it will be useful
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    13  // GNU Affero General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Affero General Public License
    16  // along with this program.  If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package cmd
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	"crypto/tls"
    24  	"encoding/json"
    25  	"errors"
    26  	"fmt"
    27  	"hash/fnv"
    28  	"io"
    29  	"math/rand"
    30  	"net"
    31  	"net/http"
    32  	"net/url"
    33  	"os"
    34  	"path"
    35  	"path/filepath"
    36  	"regexp"
    37  	"sort"
    38  	"strings"
    39  	"sync"
    40  	"time"
    41  
    42  	"github.com/klauspost/compress/gzhttp"
    43  	"github.com/minio/pkg/v2/env"
    44  
    45  	"github.com/minio/minio-go/v7"
    46  	"github.com/minio/minio-go/v7/pkg/credentials"
    47  	"github.com/minio/minio-go/v7/pkg/encrypt"
    48  	"github.com/minio/minio-go/v7/pkg/lifecycle"
    49  	"github.com/minio/minio-go/v7/pkg/notification"
    50  	"github.com/minio/minio-go/v7/pkg/policy"
    51  	"github.com/minio/minio-go/v7/pkg/replication"
    52  	"github.com/minio/minio-go/v7/pkg/s3utils"
    53  	"github.com/minio/minio-go/v7/pkg/sse"
    54  	"github.com/minio/minio-go/v7/pkg/tags"
    55  	"github.com/minio/pkg/v2/mimedb"
    56  
    57  	"github.com/minio/mc/pkg/deadlineconn"
    58  	"github.com/minio/mc/pkg/httptracer"
    59  	"github.com/minio/mc/pkg/limiter"
    60  	"github.com/minio/mc/pkg/probe"
    61  )
    62  
    63  // S3Client construct
    64  type S3Client struct {
    65  	sync.Mutex
    66  	targetURL    *ClientURL
    67  	api          *minio.Client
    68  	virtualStyle bool
    69  }
    70  
    71  const (
    72  	amazonHostNameAccelerated = "s3-accelerate.amazonaws.com"
    73  	googleHostName            = "storage.googleapis.com"
    74  	serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
    75  
    76  	defaultRecordDelimiter = "\n"
    77  	defaultFieldDelimiter  = ","
    78  )
    79  
    80  const (
    81  	recordDelimiterType      = "recorddelimiter"
    82  	fieldDelimiterType       = "fielddelimiter"
    83  	quoteCharacterType       = "quotechar"
    84  	quoteEscapeCharacterType = "quoteescchar"
    85  	quoteFieldsType          = "quotefields"
    86  	fileHeaderType           = "fileheader"
    87  	commentCharType          = "commentchar"
    88  	typeJSONType             = "type"
    89  	// AmzObjectLockMode sets object lock mode
    90  	AmzObjectLockMode = "X-Amz-Object-Lock-Mode"
    91  	// AmzObjectLockRetainUntilDate sets object lock retain until date
    92  	AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date"
    93  	// AmzObjectLockLegalHold sets object lock legal hold
    94  	AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold"
    95  	amzObjectSSEKMSKeyID   = "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"
    96  	amzObjectSSE           = "X-Amz-Server-Side-Encryption"
    97  )
    98  
    99  type dialContext func(ctx context.Context, network, addr string) (net.Conn, error)
   100  
   101  // newCustomDialContext setups a custom dialer for any external communication and proxies.
   102  func newCustomDialContext(c *Config) dialContext {
   103  	return func(ctx context.Context, network, addr string) (net.Conn, error) {
   104  		dialer := &net.Dialer{
   105  			Timeout:   10 * time.Second,
   106  			KeepAlive: 15 * time.Second,
   107  		}
   108  
   109  		conn, err := dialer.DialContext(ctx, network, addr)
   110  		if err != nil {
   111  			return nil, err
   112  		}
   113  
   114  		dconn := deadlineconn.New(conn).
   115  			WithReadDeadline(c.ConnReadDeadline).
   116  			WithWriteDeadline(c.ConnWriteDeadline)
   117  
   118  		return dconn, nil
   119  	}
   120  }
   121  
   122  var timeSentinel = time.Unix(0, 0).UTC()
   123  
   124  // getConfigHash returns the Hash for che *Config
   125  func getConfigHash(config *Config) uint32 {
   126  	// Creates a parsed URL.
   127  	targetURL := newClientURL(config.HostURL)
   128  
   129  	// Save if target supports virtual host style.
   130  	hostName := targetURL.Host
   131  
   132  	// Generate a hash out of s3Conf.
   133  	confHash := fnv.New32a()
   134  	confHash.Write([]byte(hostName + config.AccessKey + config.SecretKey + config.SessionToken))
   135  	confSum := confHash.Sum32()
   136  	return confSum
   137  }
   138  
   139  // isHostTLS returns true if the Host URL is https
   140  func isHostTLS(config *Config) bool {
   141  	// By default enable HTTPs.
   142  	useTLS := true
   143  	targetURL := newClientURL(config.HostURL)
   144  	if targetURL.Scheme == "http" {
   145  		useTLS = false
   146  	}
   147  	return useTLS
   148  }
   149  
   150  // getTransportForConfig returns a corresponding *http.Transport for the *Config
   151  // set withS3v2 bool to true to add traceV2 tracer.
   152  func getTransportForConfig(config *Config, withS3v2 bool) http.RoundTripper {
   153  	var transport http.RoundTripper
   154  
   155  	useTLS := isHostTLS(config)
   156  
   157  	if config.Transport != nil {
   158  		transport = config.Transport
   159  	} else {
   160  		tr := &http.Transport{
   161  			Proxy:                 http.ProxyFromEnvironment,
   162  			DialContext:           newCustomDialContext(config),
   163  			MaxIdleConnsPerHost:   1024,
   164  			WriteBufferSize:       32 << 10, // 32KiB moving up from 4KiB default
   165  			ReadBufferSize:        32 << 10, // 32KiB moving up from 4KiB default
   166  			IdleConnTimeout:       90 * time.Second,
   167  			TLSHandshakeTimeout:   10 * time.Second,
   168  			ExpectContinueTimeout: 10 * time.Second,
   169  			// Set this value so that the underlying transport round-tripper
   170  			// doesn't try to auto decode the body of objects with
   171  			// content-encoding set to `gzip`.
   172  			//
   173  			// Refer:
   174  			//    https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
   175  			DisableCompression: true,
   176  		}
   177  		if useTLS {
   178  			// Keep TLS config.
   179  			tlsConfig := &tls.Config{
   180  				RootCAs: globalRootCAs,
   181  				// Can't use SSLv3 because of POODLE and BEAST
   182  				// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
   183  				// Can't use TLSv1.1 because of RC4 cipher usage
   184  				MinVersion: tls.VersionTLS12,
   185  			}
   186  			if config.Insecure {
   187  				tlsConfig.InsecureSkipVerify = true
   188  			}
   189  			tr.TLSClientConfig = tlsConfig
   190  
   191  			// Because we create a custom TLSClientConfig, we have to opt-in to HTTP/2.
   192  			// See https://github.com/golang/go/issues/14275
   193  			//
   194  			// TODO: Enable http2.0 when upstream issues related to HTTP/2 are fixed.
   195  			//
   196  			// if e = http2.ConfigureTransport(tr); e != nil {
   197  			// 	return nil, probe.NewError(e)
   198  			// }
   199  		}
   200  		transport = tr
   201  	}
   202  
   203  	transport = limiter.New(config.UploadLimit, config.DownloadLimit, transport)
   204  
   205  	if config.Debug {
   206  		if strings.EqualFold(config.Signature, "S3v4") {
   207  			transport = httptracer.GetNewTraceTransport(newTraceV4(), transport)
   208  		} else if strings.EqualFold(config.Signature, "S3v2") && withS3v2 {
   209  			transport = httptracer.GetNewTraceTransport(newTraceV2(), transport)
   210  		}
   211  	}
   212  	transport = gzhttp.Transport(transport)
   213  	return transport
   214  }
   215  
   216  // getCredentialsChainForConfig returns an []credentials.Provider array for the config
   217  // and the STS configuration (if present)
   218  func getCredentialsChainForConfig(config *Config, transport http.RoundTripper) ([]credentials.Provider, *probe.Error) {
   219  	var credsChain []credentials.Provider
   220  	// if an STS endpoint is set, we will add that to the chain
   221  	if stsEndpoint := env.Get("MC_STS_ENDPOINT_"+config.Alias, ""); stsEndpoint != "" {
   222  		// set AWS_WEB_IDENTITY_TOKEN_FILE is MC_WEB_IDENTITY_TOKEN_FILE is set
   223  		if val := env.Get("MC_WEB_IDENTITY_TOKEN_FILE_"+config.Alias, ""); val != "" {
   224  			os.Setenv("AWS_WEB_IDENTITY_TOKEN_FILE", val)
   225  			if val := env.Get("MC_ROLE_ARN_"+config.Alias, ""); val != "" {
   226  				os.Setenv("AWS_ROLE_ARN", val)
   227  			}
   228  			if val := env.Get("MC_ROLE_SESSION_NAME_"+config.Alias, randString(32, rand.NewSource(time.Now().UnixNano()), "mc-session-name-")); val != "" {
   229  				os.Setenv("AWS_ROLE_SESSION_NAME", val)
   230  			}
   231  		}
   232  
   233  		stsEndpointURL, err := url.Parse(stsEndpoint)
   234  		if err != nil {
   235  			return nil, probe.NewError(fmt.Errorf("Error parsing sts endpoint: %v", err))
   236  		}
   237  		credsSts := &credentials.IAM{
   238  			Client: &http.Client{
   239  				Transport: transport,
   240  			},
   241  			Endpoint: stsEndpointURL.String(),
   242  		}
   243  		credsChain = append(credsChain, credsSts)
   244  	}
   245  
   246  	signType := credentials.SignatureV4
   247  	if strings.EqualFold(config.Signature, "s3v2") {
   248  		signType = credentials.SignatureV2
   249  	}
   250  
   251  	// Credentials
   252  	creds := &credentials.Static{
   253  		Value: credentials.Value{
   254  			AccessKeyID:     config.AccessKey,
   255  			SecretAccessKey: config.SecretKey,
   256  			SessionToken:    config.SessionToken,
   257  			SignerType:      signType,
   258  		},
   259  	}
   260  	credsChain = append(credsChain, creds)
   261  	return credsChain, nil
   262  }
   263  
   264  // newFactory encloses New function with client cache.
   265  func newFactory() func(config *Config) (Client, *probe.Error) {
   266  	clientCache := make(map[uint32]*minio.Client)
   267  	var mutex sync.Mutex
   268  
   269  	// Return New function.
   270  	return func(config *Config) (Client, *probe.Error) {
   271  		// Creates a parsed URL.
   272  		targetURL := newClientURL(config.HostURL)
   273  
   274  		// Save if target supports virtual host style.
   275  		hostName := targetURL.Host
   276  
   277  		confSum := getConfigHash(config)
   278  
   279  		useTLS := isHostTLS(config)
   280  
   281  		// Instantiate s3
   282  		s3Clnt := &S3Client{}
   283  		// Save the target URL.
   284  		s3Clnt.targetURL = targetURL
   285  
   286  		s3Clnt.virtualStyle = isVirtualHostStyle(hostName, config.Lookup)
   287  		isS3AcceleratedEndpoint := isAmazonAccelerated(hostName)
   288  
   289  		if s3Clnt.virtualStyle {
   290  			// If Google URL replace it with 'storage.googleapis.com'
   291  			if isGoogle(hostName) {
   292  				hostName = googleHostName
   293  			}
   294  		}
   295  
   296  		// Lookup previous cache by hash.
   297  		mutex.Lock()
   298  		defer mutex.Unlock()
   299  		var api *minio.Client
   300  		var found bool
   301  		if api, found = clientCache[confSum]; !found {
   302  
   303  			transport := getTransportForConfig(config, true)
   304  
   305  			credsChain, err := getCredentialsChainForConfig(config, transport)
   306  			if err != nil {
   307  				return nil, err
   308  			}
   309  
   310  			var e error
   311  
   312  			options := minio.Options{
   313  				Creds:        credentials.NewChainCredentials(credsChain),
   314  				Secure:       useTLS,
   315  				Region:       env.Get("MC_REGION", env.Get("AWS_REGION", "")),
   316  				BucketLookup: config.Lookup,
   317  				Transport:    transport,
   318  			}
   319  
   320  			api, e = minio.New(hostName, &options)
   321  			if e != nil {
   322  				return nil, probe.NewError(e)
   323  			}
   324  
   325  			// If Amazon Accelerated URL is requested enable it.
   326  			if isS3AcceleratedEndpoint {
   327  				api.SetS3TransferAccelerate(amazonHostNameAccelerated)
   328  			}
   329  
   330  			// Set app info.
   331  			api.SetAppInfo(config.AppName, config.AppVersion)
   332  
   333  			// Cache the new MinIO Client with hash of config as key.
   334  			clientCache[confSum] = api
   335  		}
   336  
   337  		// Store the new api object.
   338  		s3Clnt.api = api
   339  
   340  		return s3Clnt, nil
   341  	}
   342  }
   343  
   344  // S3New returns an initialized S3Client structure. If debug is enabled,
   345  // it also enables an internal trace transport.
   346  var S3New = newFactory()
   347  
   348  // GetURL get url.
   349  func (c *S3Client) GetURL() ClientURL {
   350  	return c.targetURL.Clone()
   351  }
   352  
   353  // AddNotificationConfig - Add bucket notification
   354  func (c *S3Client) AddNotificationConfig(ctx context.Context, arn string, events []string, prefix, suffix string, ignoreExisting bool) *probe.Error {
   355  	bucket, _ := c.url2BucketAndObject()
   356  
   357  	accountArn, err := notification.NewArnFromString(arn)
   358  	if err != nil {
   359  		return probe.NewError(invalidArgumentErr(err)).Untrace()
   360  	}
   361  	nc := notification.NewConfig(accountArn)
   362  
   363  	// Get any enabled notification.
   364  	mb, e := c.api.GetBucketNotification(ctx, bucket)
   365  	if e != nil {
   366  		return probe.NewError(e)
   367  	}
   368  
   369  	// Configure events
   370  	for _, event := range events {
   371  		switch event {
   372  		case "put":
   373  			nc.AddEvents(notification.ObjectCreatedAll)
   374  		case "delete":
   375  			nc.AddEvents(notification.ObjectRemovedAll)
   376  		case "get":
   377  			nc.AddEvents(notification.ObjectAccessedAll)
   378  		case "replica":
   379  			nc.AddEvents(notification.EventType("s3:Replication:*"))
   380  		case "ilm":
   381  			nc.AddEvents(notification.EventType("s3:ObjectRestore:*"))
   382  			nc.AddEvents(notification.EventType("s3:ObjectTransition:*"))
   383  		case "scanner":
   384  			nc.AddEvents(notification.EventType("s3:Scanner:ManyVersions"))
   385  			nc.AddEvents(notification.EventType("s3:Scanner:BigPrefix"))
   386  		default:
   387  			return errInvalidArgument().Trace(events...)
   388  		}
   389  	}
   390  	if prefix != "" {
   391  		nc.AddFilterPrefix(prefix)
   392  	}
   393  	if suffix != "" {
   394  		nc.AddFilterSuffix(suffix)
   395  	}
   396  
   397  	switch accountArn.Service {
   398  	case "sns":
   399  		if !mb.AddTopic(nc) {
   400  			return errInvalidArgument().Trace("Overlapping Topic configs")
   401  		}
   402  	case "sqs":
   403  		if !mb.AddQueue(nc) {
   404  			return errInvalidArgument().Trace("Overlapping Queue configs")
   405  		}
   406  	case "lambda":
   407  		if !mb.AddLambda(nc) {
   408  			return errInvalidArgument().Trace("Overlapping lambda configs")
   409  		}
   410  	default:
   411  		return errInvalidArgument().Trace(accountArn.Service)
   412  	}
   413  
   414  	// Set the new bucket configuration
   415  	if err := c.api.SetBucketNotification(ctx, bucket, mb); err != nil {
   416  		if ignoreExisting && strings.Contains(err.Error(), "An object key name filtering rule defined with overlapping prefixes, overlapping suffixes, or overlapping combinations of prefixes and suffixes for the same event types") {
   417  			return nil
   418  		}
   419  		return probe.NewError(err)
   420  	}
   421  	return nil
   422  }
   423  
   424  // RemoveNotificationConfig - Remove bucket notification
   425  func (c *S3Client) RemoveNotificationConfig(ctx context.Context, arn, event, prefix, suffix string) *probe.Error {
   426  	bucket, _ := c.url2BucketAndObject()
   427  	// Remove all notification configs if arn is empty
   428  	if arn == "" {
   429  		if err := c.api.RemoveAllBucketNotification(ctx, bucket); err != nil {
   430  			return probe.NewError(err)
   431  		}
   432  		return nil
   433  	}
   434  
   435  	mb, e := c.api.GetBucketNotification(ctx, bucket)
   436  	if e != nil {
   437  		return probe.NewError(e)
   438  	}
   439  
   440  	accountArn, err := notification.NewArnFromString(arn)
   441  	if err != nil {
   442  		return probe.NewError(invalidArgumentErr(err)).Untrace()
   443  	}
   444  
   445  	// if we are passed filters for either events, suffix or prefix, then only delete the single event that matches
   446  	// the arguments
   447  	if event != "" || suffix != "" || prefix != "" {
   448  		// Translate events to type events for comparison
   449  		events := strings.Split(event, ",")
   450  		var eventsTyped []notification.EventType
   451  		for _, e := range events {
   452  			switch e {
   453  			case "put":
   454  				eventsTyped = append(eventsTyped, notification.ObjectCreatedAll)
   455  			case "delete":
   456  				eventsTyped = append(eventsTyped, notification.ObjectRemovedAll)
   457  			case "get":
   458  				eventsTyped = append(eventsTyped, notification.ObjectAccessedAll)
   459  			case "replica":
   460  				eventsTyped = append(eventsTyped, notification.EventType("s3:Replication:*"))
   461  			case "ilm":
   462  				eventsTyped = append(eventsTyped, notification.EventType("s3:ObjectRestore:*"))
   463  				eventsTyped = append(eventsTyped, notification.EventType("s3:ObjectTransition:*"))
   464  			case "scanner":
   465  				eventsTyped = append(eventsTyped, notification.EventType("s3:Scanner:ManyVersions"))
   466  				eventsTyped = append(eventsTyped, notification.EventType("s3:Scanner:BigPrefix"))
   467  			default:
   468  				return errInvalidArgument().Trace(events...)
   469  			}
   470  		}
   471  		var err error
   472  		// based on the arn type, we'll look for the event in the corresponding sublist and delete it if there's a match
   473  		switch accountArn.Service {
   474  		case "sns":
   475  			err = mb.RemoveTopicByArnEventsPrefixSuffix(accountArn, eventsTyped, prefix, suffix)
   476  		case "sqs":
   477  			err = mb.RemoveQueueByArnEventsPrefixSuffix(accountArn, eventsTyped, prefix, suffix)
   478  		case "lambda":
   479  			err = mb.RemoveLambdaByArnEventsPrefixSuffix(accountArn, eventsTyped, prefix, suffix)
   480  		default:
   481  			return errInvalidArgument().Trace(accountArn.Service)
   482  		}
   483  		if err != nil {
   484  			return probe.NewError(err)
   485  		}
   486  
   487  	} else {
   488  		// remove all events for matching arn
   489  		switch accountArn.Service {
   490  		case "sns":
   491  			mb.RemoveTopicByArn(accountArn)
   492  		case "sqs":
   493  			mb.RemoveQueueByArn(accountArn)
   494  		case "lambda":
   495  			mb.RemoveLambdaByArn(accountArn)
   496  		default:
   497  			return errInvalidArgument().Trace(accountArn.Service)
   498  		}
   499  	}
   500  
   501  	// Set the new bucket configuration
   502  	if e := c.api.SetBucketNotification(ctx, bucket, mb); e != nil {
   503  		return probe.NewError(e)
   504  	}
   505  	return nil
   506  }
   507  
   508  // NotificationConfig notification config
   509  type NotificationConfig struct {
   510  	ID     string   `json:"id"`
   511  	Arn    string   `json:"arn"`
   512  	Events []string `json:"events"`
   513  	Prefix string   `json:"prefix"`
   514  	Suffix string   `json:"suffix"`
   515  }
   516  
   517  // ListNotificationConfigs - List notification configs
   518  func (c *S3Client) ListNotificationConfigs(ctx context.Context, arn string) ([]NotificationConfig, *probe.Error) {
   519  	var configs []NotificationConfig
   520  	bucket, _ := c.url2BucketAndObject()
   521  	mb, e := c.api.GetBucketNotification(ctx, bucket)
   522  	if e != nil {
   523  		return nil, probe.NewError(e)
   524  	}
   525  
   526  	// Generate pretty event names from event types
   527  	prettyEventNames := func(eventsTypes []notification.EventType) []string {
   528  		var result []string
   529  		for _, eventType := range eventsTypes {
   530  			result = append(result, string(eventType))
   531  		}
   532  		return result
   533  	}
   534  
   535  	getFilters := func(config notification.Config) (prefix, suffix string) {
   536  		if config.Filter == nil {
   537  			return
   538  		}
   539  		for _, filter := range config.Filter.S3Key.FilterRules {
   540  			if strings.ToLower(filter.Name) == "prefix" {
   541  				prefix = filter.Value
   542  			}
   543  			if strings.ToLower(filter.Name) == "suffix" {
   544  				suffix = filter.Value
   545  			}
   546  
   547  		}
   548  		return prefix, suffix
   549  	}
   550  
   551  	for _, config := range mb.TopicConfigs {
   552  		if arn != "" && config.Topic != arn {
   553  			continue
   554  		}
   555  		prefix, suffix := getFilters(config.Config)
   556  		configs = append(configs, NotificationConfig{
   557  			ID:     config.ID,
   558  			Arn:    config.Topic,
   559  			Events: prettyEventNames(config.Events),
   560  			Prefix: prefix,
   561  			Suffix: suffix,
   562  		})
   563  	}
   564  
   565  	for _, config := range mb.QueueConfigs {
   566  		if arn != "" && config.Queue != arn {
   567  			continue
   568  		}
   569  		prefix, suffix := getFilters(config.Config)
   570  		configs = append(configs, NotificationConfig{
   571  			ID:     config.ID,
   572  			Arn:    config.Queue,
   573  			Events: prettyEventNames(config.Events),
   574  			Prefix: prefix,
   575  			Suffix: suffix,
   576  		})
   577  	}
   578  
   579  	for _, config := range mb.LambdaConfigs {
   580  		if arn != "" && config.Lambda != arn {
   581  			continue
   582  		}
   583  		prefix, suffix := getFilters(config.Config)
   584  		configs = append(configs, NotificationConfig{
   585  			ID:     config.ID,
   586  			Arn:    config.Lambda,
   587  			Events: prettyEventNames(config.Events),
   588  			Prefix: prefix,
   589  			Suffix: suffix,
   590  		})
   591  	}
   592  
   593  	return configs, nil
   594  }
   595  
   596  // Supported content types
   597  var supportedContentTypes = []string{
   598  	"csv",
   599  	"json",
   600  	"gzip",
   601  	"bzip2",
   602  }
   603  
   604  // set the SelectObjectOutputSerialization struct using options passed in by client. If unspecified,
   605  // default S3 API specified defaults
   606  func selectObjectOutputOpts(selOpts SelectObjectOpts, i minio.SelectObjectInputSerialization) minio.SelectObjectOutputSerialization {
   607  	var isOK bool
   608  	var recDelim, fldDelim, quoteChar, quoteEscChar, qf string
   609  
   610  	o := minio.SelectObjectOutputSerialization{}
   611  	if _, ok := selOpts.OutputSerOpts["json"]; ok {
   612  		jo := minio.JSONOutputOptions{}
   613  		if recDelim, isOK = selOpts.OutputSerOpts["json"][recordDelimiterType]; !isOK {
   614  			recDelim = "\n"
   615  		}
   616  		jo.SetRecordDelimiter(recDelim)
   617  		o.JSON = &jo
   618  	}
   619  	if _, ok := selOpts.OutputSerOpts["csv"]; ok {
   620  		ocsv := minio.CSVOutputOptions{}
   621  		if recDelim, isOK = selOpts.OutputSerOpts["csv"][recordDelimiterType]; !isOK {
   622  			recDelim = defaultRecordDelimiter
   623  		}
   624  		ocsv.SetRecordDelimiter(recDelim)
   625  		if fldDelim, isOK = selOpts.OutputSerOpts["csv"][fieldDelimiterType]; !isOK {
   626  			fldDelim = defaultFieldDelimiter
   627  		}
   628  		ocsv.SetFieldDelimiter(fldDelim)
   629  		if quoteChar, isOK = selOpts.OutputSerOpts["csv"][quoteCharacterType]; isOK {
   630  			ocsv.SetQuoteCharacter(quoteChar)
   631  		}
   632  		if quoteEscChar, isOK = selOpts.OutputSerOpts["csv"][quoteEscapeCharacterType]; isOK {
   633  			ocsv.SetQuoteEscapeCharacter(quoteEscChar)
   634  		}
   635  		if qf, isOK = selOpts.OutputSerOpts["csv"][quoteFieldsType]; isOK {
   636  			ocsv.SetQuoteFields(minio.CSVQuoteFields(qf))
   637  		}
   638  		o.CSV = &ocsv
   639  	}
   640  	// default to CSV output if options left unspecified
   641  	if o.CSV == nil && o.JSON == nil {
   642  		if i.JSON != nil {
   643  			j := minio.JSONOutputOptions{}
   644  			j.SetRecordDelimiter("\n")
   645  			o.JSON = &j
   646  		} else {
   647  			ocsv := minio.CSVOutputOptions{}
   648  			ocsv.SetRecordDelimiter(defaultRecordDelimiter)
   649  			ocsv.SetFieldDelimiter(defaultFieldDelimiter)
   650  			o.CSV = &ocsv
   651  		}
   652  	}
   653  	return o
   654  }
   655  
   656  func trimCompressionFileExts(name string) string {
   657  	return strings.TrimSuffix(strings.TrimSuffix(strings.TrimSuffix(name, ".gz"), ".bz"), ".bz2")
   658  }
   659  
   660  // set the SelectObjectInputSerialization struct using options passed in by client. If unspecified,
   661  // default S3 API specified defaults
   662  func selectObjectInputOpts(selOpts SelectObjectOpts, object string) minio.SelectObjectInputSerialization {
   663  	var isOK bool
   664  	var recDelim, fldDelim, quoteChar, quoteEscChar, fileHeader, commentChar, typ string
   665  
   666  	i := minio.SelectObjectInputSerialization{}
   667  	if _, ok := selOpts.InputSerOpts["parquet"]; ok {
   668  		iparquet := minio.ParquetInputOptions{}
   669  		i.Parquet = &iparquet
   670  	}
   671  	if _, ok := selOpts.InputSerOpts["json"]; ok {
   672  		j := minio.JSONInputOptions{}
   673  		if typ = selOpts.InputSerOpts["json"][typeJSONType]; typ != "" {
   674  			j.SetType(minio.JSONType(typ))
   675  		}
   676  		i.JSON = &j
   677  	}
   678  	if _, ok := selOpts.InputSerOpts["csv"]; ok {
   679  		icsv := minio.CSVInputOptions{}
   680  		icsv.SetRecordDelimiter(defaultRecordDelimiter)
   681  		if recDelim, isOK = selOpts.InputSerOpts["csv"][recordDelimiterType]; isOK {
   682  			icsv.SetRecordDelimiter(recDelim)
   683  		}
   684  		if fldDelim, isOK = selOpts.InputSerOpts["csv"][fieldDelimiterType]; isOK {
   685  			icsv.SetFieldDelimiter(fldDelim)
   686  		}
   687  		if quoteChar, isOK = selOpts.InputSerOpts["csv"][quoteCharacterType]; isOK {
   688  			icsv.SetQuoteCharacter(quoteChar)
   689  		}
   690  		if quoteEscChar, isOK = selOpts.InputSerOpts["csv"][quoteEscapeCharacterType]; isOK {
   691  			icsv.SetQuoteEscapeCharacter(quoteEscChar)
   692  		}
   693  		if fileHeader, isOK = selOpts.InputSerOpts["csv"][fileHeaderType]; isOK {
   694  			icsv.SetFileHeaderInfo(minio.CSVFileHeaderInfo(fileHeader))
   695  		}
   696  		if commentChar, isOK = selOpts.InputSerOpts["csv"][commentCharType]; isOK {
   697  			icsv.SetComments(commentChar)
   698  		}
   699  		i.CSV = &icsv
   700  	}
   701  	if i.CSV == nil && i.JSON == nil && i.Parquet == nil {
   702  		ext := filepath.Ext(trimCompressionFileExts(object))
   703  		if strings.Contains(ext, "csv") {
   704  			icsv := minio.CSVInputOptions{}
   705  			icsv.SetRecordDelimiter(defaultRecordDelimiter)
   706  			icsv.SetFieldDelimiter(defaultFieldDelimiter)
   707  			icsv.SetFileHeaderInfo(minio.CSVFileHeaderInfoUse)
   708  			i.CSV = &icsv
   709  		}
   710  		if strings.Contains(ext, "parquet") || strings.Contains(object, ".parquet") {
   711  			iparquet := minio.ParquetInputOptions{}
   712  			i.Parquet = &iparquet
   713  		}
   714  		if strings.Contains(ext, "json") {
   715  			ijson := minio.JSONInputOptions{}
   716  			ijson.SetType(minio.JSONLinesType)
   717  			i.JSON = &ijson
   718  		}
   719  	}
   720  	if i.CompressionType == "" {
   721  		i.CompressionType = selectCompressionType(selOpts, object)
   722  	}
   723  	return i
   724  }
   725  
   726  // get client specified compression type or default compression type from file extension
   727  func selectCompressionType(selOpts SelectObjectOpts, object string) minio.SelectCompressionType {
   728  	ext := filepath.Ext(object)
   729  	contentType := mimedb.TypeByExtension(ext)
   730  
   731  	if selOpts.CompressionType != "" {
   732  		return selOpts.CompressionType
   733  	}
   734  	if strings.Contains(ext, "parquet") || strings.Contains(object, ".parquet") {
   735  		return minio.SelectCompressionNONE
   736  	}
   737  	if contentType != "" {
   738  		if strings.Contains(contentType, "gzip") {
   739  			return minio.SelectCompressionGZIP
   740  		} else if strings.Contains(contentType, "bzip") {
   741  			return minio.SelectCompressionBZIP
   742  		}
   743  	}
   744  	return minio.SelectCompressionNONE
   745  }
   746  
   747  // Select - select object content wrapper.
   748  func (c *S3Client) Select(ctx context.Context, expression string, sse encrypt.ServerSide, selOpts SelectObjectOpts) (io.ReadCloser, *probe.Error) {
   749  	opts := minio.SelectObjectOptions{
   750  		Expression:     expression,
   751  		ExpressionType: minio.QueryExpressionTypeSQL,
   752  		// Set any encryption headers
   753  		ServerSideEncryption: sse,
   754  	}
   755  
   756  	bucket, object := c.url2BucketAndObject()
   757  
   758  	opts.InputSerialization = selectObjectInputOpts(selOpts, object)
   759  	opts.OutputSerialization = selectObjectOutputOpts(selOpts, opts.InputSerialization)
   760  	reader, e := c.api.SelectObjectContent(ctx, bucket, object, opts)
   761  	if e != nil {
   762  		return nil, probe.NewError(e)
   763  	}
   764  	return reader, nil
   765  }
   766  
   767  func (c *S3Client) notificationToEventsInfo(ninfo notification.Info) []EventInfo {
   768  	eventsInfo := make([]EventInfo, len(ninfo.Records))
   769  	for i, record := range ninfo.Records {
   770  		bucketName := record.S3.Bucket.Name
   771  		var key string
   772  		// Unescape only if needed, look for URL encoded content.
   773  		if strings.Contains(record.S3.Object.Key, "%2F") {
   774  			var e error
   775  			key, e = url.QueryUnescape(record.S3.Object.Key)
   776  			if e != nil {
   777  				key = record.S3.Object.Key
   778  			}
   779  		} else {
   780  			key = record.S3.Object.Key
   781  		}
   782  		u := c.targetURL.Clone()
   783  		u.Path = path.Join(string(u.Separator), bucketName, key)
   784  		if strings.HasPrefix(record.EventName, "s3:ObjectCreated:") {
   785  			if strings.HasPrefix(record.EventName, "s3:ObjectCreated:Copy") {
   786  				eventsInfo[i] = EventInfo{
   787  					Time:         record.EventTime,
   788  					Size:         record.S3.Object.Size,
   789  					UserMetadata: record.S3.Object.UserMetadata,
   790  					Path:         u.String(),
   791  					Type:         notification.ObjectCreatedCopy,
   792  					Host:         record.Source.Host,
   793  					Port:         record.Source.Port,
   794  					UserAgent:    record.Source.UserAgent,
   795  				}
   796  			} else if strings.HasPrefix(record.EventName, "s3:ObjectCreated:PutRetention") {
   797  				eventsInfo[i] = EventInfo{
   798  					Time:         record.EventTime,
   799  					Size:         record.S3.Object.Size,
   800  					UserMetadata: record.S3.Object.UserMetadata,
   801  					Path:         u.String(),
   802  					Type:         notification.EventType("s3:ObjectCreated:PutRetention"),
   803  					Host:         record.Source.Host,
   804  					Port:         record.Source.Port,
   805  					UserAgent:    record.Source.UserAgent,
   806  				}
   807  			} else if strings.HasPrefix(record.EventName, "s3:ObjectCreated:PutLegalHold") {
   808  				eventsInfo[i] = EventInfo{
   809  					Time:         record.EventTime,
   810  					Size:         record.S3.Object.Size,
   811  					UserMetadata: record.S3.Object.UserMetadata,
   812  					Path:         u.String(),
   813  					Type:         notification.EventType("s3:ObjectCreated:PutLegalHold"),
   814  					Host:         record.Source.Host,
   815  					Port:         record.Source.Port,
   816  					UserAgent:    record.Source.UserAgent,
   817  				}
   818  			} else {
   819  				eventsInfo[i] = EventInfo{
   820  					Time:         record.EventTime,
   821  					Size:         record.S3.Object.Size,
   822  					UserMetadata: record.S3.Object.UserMetadata,
   823  					Path:         u.String(),
   824  					Type:         notification.ObjectCreatedPut,
   825  					Host:         record.Source.Host,
   826  					Port:         record.Source.Port,
   827  					UserAgent:    record.Source.UserAgent,
   828  				}
   829  			}
   830  		} else {
   831  			eventsInfo[i] = EventInfo{
   832  				Time:         record.EventTime,
   833  				Size:         record.S3.Object.Size,
   834  				UserMetadata: record.S3.Object.UserMetadata,
   835  				Path:         u.String(),
   836  				Type:         notification.EventType(record.EventName),
   837  				Host:         record.Source.Host,
   838  				Port:         record.Source.Port,
   839  				UserAgent:    record.Source.UserAgent,
   840  			}
   841  		}
   842  	}
   843  	return eventsInfo
   844  }
   845  
   846  // Watch - Start watching on all bucket events for a given account ID.
   847  func (c *S3Client) Watch(ctx context.Context, options WatchOptions) (*WatchObject, *probe.Error) {
   848  	// Extract bucket and object.
   849  	bucket, object := c.url2BucketAndObject()
   850  
   851  	// Validation
   852  	if bucket == "" && object != "" {
   853  		return nil, errInvalidArgument().Trace(bucket, object)
   854  	}
   855  	if object != "" && options.Prefix != "" {
   856  		return nil, errInvalidArgument().Trace(options.Prefix, object)
   857  	}
   858  
   859  	// Flag set to set the notification.
   860  	var events []string
   861  	for _, event := range options.Events {
   862  		switch event {
   863  		case "put":
   864  			events = append(events, string(notification.ObjectCreatedAll))
   865  		case "delete":
   866  			events = append(events, string(notification.ObjectRemovedAll))
   867  		case "get":
   868  			events = append(events, string(notification.ObjectAccessedAll))
   869  		case "replica":
   870  			events = append(events, "s3:Replication:*") // TODO: add it to minio-go as constant
   871  		case "ilm":
   872  			events = append(events, "s3:ObjectRestore:*", "s3:ObjectTransition:*") // TODO: add it to minio-go as constant
   873  		case "bucket-creation":
   874  			events = append(events, string(notification.BucketCreatedAll))
   875  		case "bucket-removal":
   876  			events = append(events, string(notification.BucketRemovedAll))
   877  		case "scanner":
   878  			events = append(events, "s3:Scanner:ManyVersions", "s3:Scanner:BigPrefix")
   879  		default:
   880  			return nil, errInvalidArgument().Trace(event)
   881  		}
   882  	}
   883  
   884  	wo := &WatchObject{
   885  		EventInfoChan: make(chan []EventInfo),
   886  		ErrorChan:     make(chan *probe.Error),
   887  		DoneChan:      make(chan struct{}),
   888  	}
   889  
   890  	listenCtx, listenCancel := context.WithCancel(ctx)
   891  
   892  	var eventsCh <-chan notification.Info
   893  	if bucket != "" {
   894  		if object != "" && options.Prefix == "" {
   895  			options.Prefix = object
   896  		}
   897  		eventsCh = c.api.ListenBucketNotification(listenCtx, bucket, options.Prefix, options.Suffix, events)
   898  	} else {
   899  		eventsCh = c.api.ListenNotification(listenCtx, "", "", events)
   900  	}
   901  
   902  	go func() {
   903  		defer close(wo.EventInfoChan)
   904  		defer close(wo.ErrorChan)
   905  		defer listenCancel()
   906  
   907  		for {
   908  			// Start listening on all bucket events.
   909  			select {
   910  			case notificationInfo, ok := <-eventsCh:
   911  				if !ok {
   912  					return
   913  				}
   914  				if notificationInfo.Err != nil {
   915  					var perr *probe.Error
   916  					if minio.ToErrorResponse(notificationInfo.Err).Code == "NotImplemented" {
   917  						perr = probe.NewError(APINotImplemented{
   918  							API:     "Watch",
   919  							APIType: c.GetURL().String(),
   920  						})
   921  					} else {
   922  						perr = probe.NewError(notificationInfo.Err)
   923  					}
   924  					wo.Errors() <- perr
   925  				} else {
   926  					wo.Events() <- c.notificationToEventsInfo(notificationInfo)
   927  				}
   928  			case <-wo.DoneChan:
   929  				return
   930  			}
   931  		}
   932  	}()
   933  
   934  	return wo, nil
   935  }
   936  
   937  // Get - get object with GET options.
   938  func (c *S3Client) Get(ctx context.Context, opts GetOptions) (io.ReadCloser, *ClientContent, *probe.Error) {
   939  	bucket, object := c.url2BucketAndObject()
   940  	o := minio.GetObjectOptions{
   941  		ServerSideEncryption: opts.SSE,
   942  		VersionID:            opts.VersionID,
   943  	}
   944  	if opts.Zip {
   945  		o.Set("x-minio-extract", "true")
   946  	}
   947  	if opts.RangeStart != 0 {
   948  		err := o.SetRange(opts.RangeStart, 0)
   949  		if err != nil {
   950  			return nil, nil, probe.NewError(err)
   951  		}
   952  	}
   953  	// Disallow automatic decompression for some objects with content-encoding set.
   954  	o.Set("Accept-Encoding", "identity")
   955  
   956  	reader, e := c.api.GetObject(ctx, bucket, object, o)
   957  	if e != nil {
   958  		errResponse := minio.ToErrorResponse(e)
   959  		if errResponse.Code == "NoSuchBucket" {
   960  			return nil, nil, probe.NewError(BucketDoesNotExist{
   961  				Bucket: bucket,
   962  			})
   963  		}
   964  		if errResponse.Code == "InvalidBucketName" {
   965  			return nil, nil, probe.NewError(BucketInvalid{
   966  				Bucket: bucket,
   967  			})
   968  		}
   969  		if errResponse.Code == "NoSuchKey" {
   970  			return nil, nil, probe.NewError(ObjectMissing{})
   971  		}
   972  		return nil, nil, probe.NewError(e)
   973  	}
   974  	objStat, e := reader.Stat()
   975  	if e != nil {
   976  		errResponse := minio.ToErrorResponse(e)
   977  		if errResponse.Code == "NoSuchBucket" {
   978  			return nil, nil, probe.NewError(BucketDoesNotExist{
   979  				Bucket: bucket,
   980  			})
   981  		}
   982  		if errResponse.Code == "InvalidBucketName" {
   983  			return nil, nil, probe.NewError(BucketInvalid{
   984  				Bucket: bucket,
   985  			})
   986  		}
   987  		if errResponse.Code == "NoSuchKey" {
   988  			return nil, nil, probe.NewError(ObjectMissing{})
   989  		}
   990  		return nil, nil, probe.NewError(e)
   991  	}
   992  	return reader, c.objectInfo2ClientContent(bucket, objStat), nil
   993  }
   994  
   995  // Copy - copy object, uses server side copy API. Also uses an abstracted API
   996  // such that large file sizes will be copied in multipart manner on server
   997  // side.
   998  func (c *S3Client) Copy(ctx context.Context, source string, opts CopyOptions, progress io.Reader) *probe.Error {
   999  	dstBucket, dstObject := c.url2BucketAndObject()
  1000  	if dstBucket == "" {
  1001  		return probe.NewError(BucketNameEmpty{})
  1002  	}
  1003  
  1004  	metadata := make(map[string]string, len(opts.metadata))
  1005  	for k, v := range opts.metadata {
  1006  		metadata[k] = v
  1007  	}
  1008  
  1009  	delete(metadata, "X-Amz-Storage-Class")
  1010  	if opts.storageClass != "" {
  1011  		metadata["X-Amz-Storage-Class"] = opts.storageClass
  1012  	}
  1013  
  1014  	tokens := splitStr(source, string(c.targetURL.Separator), 3)
  1015  
  1016  	// Source object
  1017  	srcOpts := minio.CopySrcOptions{
  1018  		Bucket:     tokens[1],
  1019  		Object:     tokens[2],
  1020  		Encryption: opts.srcSSE,
  1021  		VersionID:  opts.versionID,
  1022  	}
  1023  
  1024  	destOpts := minio.CopyDestOptions{
  1025  		Bucket:     dstBucket,
  1026  		Object:     dstObject,
  1027  		Encryption: opts.tgtSSE,
  1028  		Progress:   progress,
  1029  		Size:       opts.size,
  1030  	}
  1031  
  1032  	if lockModeStr, ok := metadata[AmzObjectLockMode]; ok {
  1033  		destOpts.Mode = minio.RetentionMode(strings.ToUpper(lockModeStr))
  1034  		delete(metadata, AmzObjectLockMode)
  1035  	}
  1036  
  1037  	if retainUntilDateStr, ok := metadata[AmzObjectLockRetainUntilDate]; ok {
  1038  		delete(metadata, AmzObjectLockRetainUntilDate)
  1039  		if t, e := time.Parse(time.RFC3339, retainUntilDateStr); e == nil {
  1040  			destOpts.RetainUntilDate = t.UTC()
  1041  		}
  1042  	}
  1043  
  1044  	if lh, ok := metadata[AmzObjectLockLegalHold]; ok {
  1045  		destOpts.LegalHold = minio.LegalHoldStatus(lh)
  1046  		delete(metadata, AmzObjectLockLegalHold)
  1047  	}
  1048  
  1049  	// Assign metadata after irrelevant parts are delete above
  1050  	destOpts.UserMetadata = metadata
  1051  	destOpts.ReplaceMetadata = len(metadata) > 0
  1052  
  1053  	var e error
  1054  	if opts.disableMultipart || opts.size < 64*1024*1024 {
  1055  		_, e = c.api.CopyObject(ctx, destOpts, srcOpts)
  1056  	} else {
  1057  		_, e = c.api.ComposeObject(ctx, destOpts, srcOpts)
  1058  	}
  1059  
  1060  	if e != nil {
  1061  		errResponse := minio.ToErrorResponse(e)
  1062  		if errResponse.Code == "AccessDenied" {
  1063  			return probe.NewError(PathInsufficientPermission{
  1064  				Path: c.targetURL.String(),
  1065  			})
  1066  		}
  1067  		if errResponse.Code == "NoSuchBucket" {
  1068  			return probe.NewError(BucketDoesNotExist{
  1069  				Bucket: dstBucket,
  1070  			})
  1071  		}
  1072  		if errResponse.Code == "InvalidBucketName" {
  1073  			return probe.NewError(BucketInvalid{
  1074  				Bucket: dstBucket,
  1075  			})
  1076  		}
  1077  		if errResponse.Code == "NoSuchKey" {
  1078  			return probe.NewError(ObjectMissing{})
  1079  		}
  1080  		return probe.NewError(e)
  1081  	}
  1082  	return nil
  1083  }
  1084  
  1085  // Put - upload an object with custom metadata.
  1086  func (c *S3Client) Put(ctx context.Context, reader io.Reader, size int64, progress io.Reader, putOpts PutOptions) (int64, *probe.Error) {
  1087  	bucket, object := c.url2BucketAndObject()
  1088  	if bucket == "" {
  1089  		return 0, probe.NewError(BucketNameEmpty{})
  1090  	}
  1091  
  1092  	metadata := make(map[string]string, len(putOpts.metadata))
  1093  	for k, v := range putOpts.metadata {
  1094  		metadata[k] = v
  1095  	}
  1096  
  1097  	// Do not copy storage class, it needs to be specified in putOpts
  1098  	delete(metadata, "X-Amz-Storage-Class")
  1099  
  1100  	contentType, ok := metadata["Content-Type"]
  1101  	if ok {
  1102  		delete(metadata, "Content-Type")
  1103  	} else {
  1104  		// Set content-type if not specified.
  1105  		contentType = "application/octet-stream"
  1106  	}
  1107  
  1108  	cacheControl, ok := metadata["Cache-Control"]
  1109  	if ok {
  1110  		delete(metadata, "Cache-Control")
  1111  	}
  1112  
  1113  	contentEncoding, ok := metadata["Content-Encoding"]
  1114  	if ok {
  1115  		delete(metadata, "Content-Encoding")
  1116  	}
  1117  
  1118  	contentDisposition, ok := metadata["Content-Disposition"]
  1119  	if ok {
  1120  		delete(metadata, "Content-Disposition")
  1121  	}
  1122  
  1123  	contentLanguage, ok := metadata["Content-Language"]
  1124  	if ok {
  1125  		delete(metadata, "Content-Language")
  1126  	}
  1127  
  1128  	var tagsMap map[string]string
  1129  	tagsHdr, ok := metadata["X-Amz-Tagging"]
  1130  	if ok {
  1131  		tagsSet, e := tags.Parse(tagsHdr, true)
  1132  		if e != nil {
  1133  			return 0, probe.NewError(e)
  1134  		}
  1135  		tagsMap = tagsSet.ToMap()
  1136  		delete(metadata, "X-Amz-Tagging")
  1137  	}
  1138  
  1139  	lockModeStr, ok := metadata[AmzObjectLockMode]
  1140  	lockMode := minio.RetentionMode("")
  1141  	if ok {
  1142  		lockMode = minio.RetentionMode(strings.ToUpper(lockModeStr))
  1143  		delete(metadata, AmzObjectLockMode)
  1144  	}
  1145  
  1146  	retainUntilDate := timeSentinel
  1147  	retainUntilDateStr, ok := metadata[AmzObjectLockRetainUntilDate]
  1148  	if ok {
  1149  		delete(metadata, AmzObjectLockRetainUntilDate)
  1150  		if t, e := time.Parse(time.RFC3339, retainUntilDateStr); e == nil {
  1151  			retainUntilDate = t.UTC()
  1152  		}
  1153  	}
  1154  
  1155  	opts := minio.PutObjectOptions{
  1156  		UserMetadata:          metadata,
  1157  		UserTags:              tagsMap,
  1158  		Progress:              progress,
  1159  		ContentType:           contentType,
  1160  		CacheControl:          cacheControl,
  1161  		ContentDisposition:    contentDisposition,
  1162  		ContentEncoding:       contentEncoding,
  1163  		ContentLanguage:       contentLanguage,
  1164  		StorageClass:          strings.ToUpper(putOpts.storageClass),
  1165  		ServerSideEncryption:  putOpts.sse,
  1166  		SendContentMd5:        putOpts.md5,
  1167  		DisableMultipart:      putOpts.disableMultipart,
  1168  		PartSize:              putOpts.multipartSize,
  1169  		NumThreads:            putOpts.multipartThreads,
  1170  		ConcurrentStreamParts: putOpts.concurrentStream, // if enabled honors NumThreads for piped() uploads
  1171  	}
  1172  
  1173  	if !retainUntilDate.IsZero() && !retainUntilDate.Equal(timeSentinel) {
  1174  		opts.RetainUntilDate = retainUntilDate
  1175  	}
  1176  
  1177  	if lockModeStr != "" {
  1178  		opts.Mode = lockMode
  1179  		opts.SendContentMd5 = true
  1180  	}
  1181  
  1182  	if lh, ok := metadata[AmzObjectLockLegalHold]; ok {
  1183  		delete(metadata, AmzObjectLockLegalHold)
  1184  		opts.LegalHold = minio.LegalHoldStatus(strings.ToUpper(lh))
  1185  		opts.SendContentMd5 = true
  1186  	}
  1187  
  1188  	ui, e := c.api.PutObject(ctx, bucket, object, reader, size, opts)
  1189  	if e != nil {
  1190  		errResponse := minio.ToErrorResponse(e)
  1191  		if errResponse.Code == "UnexpectedEOF" || e == io.EOF {
  1192  			return ui.Size, probe.NewError(UnexpectedEOF{
  1193  				TotalSize:    size,
  1194  				TotalWritten: ui.Size,
  1195  			})
  1196  		}
  1197  		if errResponse.Code == "AccessDenied" {
  1198  			return ui.Size, probe.NewError(PathInsufficientPermission{
  1199  				Path: c.targetURL.String(),
  1200  			})
  1201  		}
  1202  		if errResponse.Code == "MethodNotAllowed" {
  1203  			return ui.Size, probe.NewError(ObjectAlreadyExists{
  1204  				Object: object,
  1205  			})
  1206  		}
  1207  		if errResponse.Code == "XMinioObjectExistsAsDirectory" {
  1208  			return ui.Size, probe.NewError(ObjectAlreadyExistsAsDirectory{
  1209  				Object: object,
  1210  			})
  1211  		}
  1212  		if errResponse.Code == "NoSuchBucket" {
  1213  			return ui.Size, probe.NewError(BucketDoesNotExist{
  1214  				Bucket: bucket,
  1215  			})
  1216  		}
  1217  		if errResponse.Code == "InvalidBucketName" {
  1218  			return ui.Size, probe.NewError(BucketInvalid{
  1219  				Bucket: bucket,
  1220  			})
  1221  		}
  1222  		if errResponse.Code == "NoSuchKey" {
  1223  			return ui.Size, probe.NewError(ObjectMissing{})
  1224  		}
  1225  		return ui.Size, probe.NewError(e)
  1226  	}
  1227  	return ui.Size, nil
  1228  }
  1229  
  1230  // PutPart - upload an object with custom metadata. (Same as Put)
  1231  func (c *S3Client) PutPart(ctx context.Context, reader io.Reader, size int64, progress io.Reader, putOpts PutOptions) (int64, *probe.Error) {
  1232  	return c.Put(ctx, reader, size, progress, putOpts)
  1233  }
  1234  
  1235  // Remove incomplete uploads.
  1236  func (c *S3Client) removeIncompleteObjects(ctx context.Context, bucket string, objectsCh <-chan minio.ObjectInfo) <-chan minio.RemoveObjectResult {
  1237  	removeObjectErrorCh := make(chan minio.RemoveObjectResult)
  1238  
  1239  	// Goroutine reads from objectsCh and sends error to removeObjectErrorCh if any.
  1240  	go func() {
  1241  		defer close(removeObjectErrorCh)
  1242  
  1243  		for info := range objectsCh {
  1244  			if err := c.api.RemoveIncompleteUpload(ctx, bucket, info.Key); err != nil {
  1245  				removeObjectErrorCh <- minio.RemoveObjectResult{ObjectName: info.Key, Err: err}
  1246  			}
  1247  		}
  1248  	}()
  1249  
  1250  	return removeObjectErrorCh
  1251  }
  1252  
  1253  // AddUserAgent - add custom user agent.
  1254  func (c *S3Client) AddUserAgent(app, version string) {
  1255  	c.api.SetAppInfo(app, version)
  1256  }
  1257  
  1258  // RemoveResult returns the error or result of the removed objects.
  1259  type RemoveResult struct {
  1260  	minio.RemoveObjectResult
  1261  	BucketName string
  1262  	Err        *probe.Error
  1263  }
  1264  
  1265  // Remove - remove object or bucket(s).
  1266  func (c *S3Client) Remove(ctx context.Context, isIncomplete, isRemoveBucket, isBypass, isForceDel bool, contentCh <-chan *ClientContent) <-chan RemoveResult {
  1267  	resultCh := make(chan RemoveResult)
  1268  
  1269  	prevBucket := ""
  1270  	// Maintain objectsCh, statusCh for each bucket
  1271  	var objectsCh chan minio.ObjectInfo
  1272  	var statusCh <-chan minio.RemoveObjectResult
  1273  	opts := minio.RemoveObjectsOptions{
  1274  		GovernanceBypass: isBypass,
  1275  	}
  1276  
  1277  	go func() {
  1278  		defer close(resultCh)
  1279  
  1280  		if isForceDel {
  1281  			bucket, object := c.url2BucketAndObject()
  1282  			if e := c.api.RemoveObject(ctx, bucket, object, minio.RemoveObjectOptions{
  1283  				ForceDelete: isForceDel,
  1284  			}); e != nil {
  1285  				resultCh <- RemoveResult{
  1286  					Err: probe.NewError(e),
  1287  				}
  1288  				return
  1289  			}
  1290  			resultCh <- RemoveResult{
  1291  				BucketName: bucket,
  1292  				RemoveObjectResult: minio.RemoveObjectResult{
  1293  					ObjectName: object,
  1294  				},
  1295  			}
  1296  			return
  1297  		}
  1298  
  1299  		_, object := c.url2BucketAndObject()
  1300  		if isRemoveBucket && object != "" {
  1301  			resultCh <- RemoveResult{
  1302  				Err: probe.NewError(errors.New(
  1303  					"use `mc rm` command to delete prefixes, or point your" +
  1304  						" bucket directly, `mc rb <alias>/<bucket-name>/`"),
  1305  				),
  1306  			}
  1307  			return
  1308  		}
  1309  
  1310  		for {
  1311  			select {
  1312  			case <-ctx.Done():
  1313  				resultCh <- RemoveResult{
  1314  					Err: probe.NewError(ctx.Err()),
  1315  				}
  1316  				return
  1317  			case content, ok := <-contentCh:
  1318  				if !ok {
  1319  					goto breakout
  1320  				}
  1321  
  1322  				// Convert content.URL.Path to objectName for objectsCh.
  1323  				bucket, objectName := c.splitPath(content.URL.Path)
  1324  				objectVersionID := content.VersionID
  1325  
  1326  				// We don't treat path when bucket is
  1327  				// empty, just skip it when it happens.
  1328  				if bucket == "" {
  1329  					continue
  1330  				}
  1331  
  1332  				// Init objectsCh the first time.
  1333  				if prevBucket == "" {
  1334  					objectsCh = make(chan minio.ObjectInfo)
  1335  					prevBucket = bucket
  1336  					if isIncomplete {
  1337  						statusCh = c.removeIncompleteObjects(ctx, bucket, objectsCh)
  1338  					} else {
  1339  						statusCh = c.api.RemoveObjectsWithResult(ctx, bucket, objectsCh, opts)
  1340  					}
  1341  				}
  1342  
  1343  				if prevBucket != bucket {
  1344  					if objectsCh != nil {
  1345  						close(objectsCh)
  1346  					}
  1347  
  1348  					for removeStatus := range statusCh {
  1349  						if removeStatus.Err != nil {
  1350  							resultCh <- RemoveResult{
  1351  								BucketName: bucket,
  1352  								Err:        probe.NewError(removeStatus.Err),
  1353  							}
  1354  						} else {
  1355  							resultCh <- RemoveResult{
  1356  								BucketName:         bucket,
  1357  								RemoveObjectResult: removeStatus,
  1358  							}
  1359  						}
  1360  					}
  1361  
  1362  					// Remove bucket if it qualifies.
  1363  					if isRemoveBucket && !isIncomplete {
  1364  						if e := c.api.RemoveBucket(ctx, prevBucket); e != nil {
  1365  							resultCh <- RemoveResult{
  1366  								BucketName: bucket,
  1367  								Err:        probe.NewError(e),
  1368  							}
  1369  							return
  1370  						}
  1371  					}
  1372  					// Re-init objectsCh for next bucket
  1373  					objectsCh = make(chan minio.ObjectInfo)
  1374  					if isIncomplete {
  1375  						statusCh = c.removeIncompleteObjects(ctx, bucket, objectsCh)
  1376  					} else {
  1377  						statusCh = c.api.RemoveObjectsWithResult(ctx, bucket, objectsCh, opts)
  1378  					}
  1379  					prevBucket = bucket
  1380  				}
  1381  
  1382  				if objectName != "" {
  1383  					// Send object name once but continuously checks for pending
  1384  					// errors in parallel, the reason is that minio-go RemoveObjects
  1385  					// can block if there is any pending error not received yet.
  1386  					sent := false
  1387  					for !sent {
  1388  						select {
  1389  						case objectsCh <- minio.ObjectInfo{
  1390  							Key:       objectName,
  1391  							VersionID: objectVersionID,
  1392  						}:
  1393  							sent = true
  1394  						case removeStatus := <-statusCh:
  1395  							if removeStatus.Err != nil {
  1396  								resultCh <- RemoveResult{
  1397  									BucketName: bucket,
  1398  									Err:        probe.NewError(removeStatus.Err),
  1399  								}
  1400  							} else {
  1401  								resultCh <- RemoveResult{
  1402  									BucketName:         bucket,
  1403  									RemoveObjectResult: removeStatus,
  1404  								}
  1405  							}
  1406  						}
  1407  					}
  1408  				} else {
  1409  					// end of bucket - close the objectsCh
  1410  					if objectsCh != nil {
  1411  						close(objectsCh)
  1412  					}
  1413  					objectsCh = nil
  1414  				}
  1415  			}
  1416  		}
  1417  
  1418  	breakout:
  1419  		// Close objectsCh at end of contentCh
  1420  		if objectsCh != nil {
  1421  			close(objectsCh)
  1422  		}
  1423  		// Write remove objects status to resultCh
  1424  		if statusCh != nil {
  1425  			for removeStatus := range statusCh {
  1426  				if removeStatus.Err != nil {
  1427  					removeStatus.Err = errors.New(strings.Replace(
  1428  						removeStatus.Err.Error(), "Object is WORM protected",
  1429  						"Object, '"+removeStatus.ObjectName+" (Version ID="+
  1430  							removeStatus.ObjectVersionID+")' is WORM protected", 1))
  1431  
  1432  					// If the removeStatus error message is:
  1433  					// "Object is WORM protected and cannot be overwritten",
  1434  					// it is too generic. We have the object's name and vid.
  1435  					// Adding the object's name and version id into the error msg
  1436  					resultCh <- RemoveResult{
  1437  						Err: probe.NewError(removeStatus.Err),
  1438  					}
  1439  				} else {
  1440  					resultCh <- RemoveResult{
  1441  						BucketName:         prevBucket,
  1442  						RemoveObjectResult: removeStatus,
  1443  					}
  1444  				}
  1445  			}
  1446  		}
  1447  		// Remove last bucket if it qualifies.
  1448  		if isRemoveBucket && prevBucket != "" && !isIncomplete {
  1449  			if e := c.api.RemoveBucket(ctx, prevBucket); e != nil {
  1450  				resultCh <- RemoveResult{
  1451  					BucketName: prevBucket,
  1452  					Err:        probe.NewError(e),
  1453  				}
  1454  				return
  1455  			}
  1456  		}
  1457  	}()
  1458  	return resultCh
  1459  }
  1460  
  1461  // MakeBucket - make a new bucket.
  1462  func (c *S3Client) MakeBucket(ctx context.Context, region string, ignoreExisting, withLock bool) *probe.Error {
  1463  	bucket, object := c.url2BucketAndObject()
  1464  	if bucket == "" {
  1465  		return probe.NewError(BucketNameEmpty{})
  1466  	}
  1467  	if object != "" {
  1468  		if !strings.HasSuffix(object, string(c.targetURL.Separator)) {
  1469  			object += string(c.targetURL.Separator)
  1470  		}
  1471  		var retried bool
  1472  		for {
  1473  			_, e := c.api.PutObject(ctx, bucket, object, bytes.NewReader([]byte("")), 0,
  1474  				// Always send Content-MD5 to succeed with bucket with
  1475  				// locking enabled. There is no performance hit since
  1476  				// this is always an empty object
  1477  				minio.PutObjectOptions{SendContentMd5: true},
  1478  			)
  1479  			if e == nil {
  1480  				return nil
  1481  			}
  1482  			if retried {
  1483  				return probe.NewError(e)
  1484  			}
  1485  			switch minio.ToErrorResponse(e).Code {
  1486  			case "NoSuchBucket":
  1487  				opts := minio.MakeBucketOptions{Region: region, ObjectLocking: withLock}
  1488  				if e = c.api.MakeBucket(ctx, bucket, opts); e != nil {
  1489  					return probe.NewError(e)
  1490  				}
  1491  				retried = true
  1492  				continue
  1493  			}
  1494  			return probe.NewError(e)
  1495  		}
  1496  	}
  1497  
  1498  	var e error
  1499  	opts := minio.MakeBucketOptions{Region: region, ObjectLocking: withLock}
  1500  	if e = c.api.MakeBucket(ctx, bucket, opts); e != nil {
  1501  		// Ignore bucket already existing error when ignoreExisting flag is enabled
  1502  		if ignoreExisting {
  1503  			switch minio.ToErrorResponse(e).Code {
  1504  			case "BucketAlreadyOwnedByYou":
  1505  				fallthrough
  1506  			case "BucketAlreadyExists":
  1507  				return nil
  1508  			}
  1509  		}
  1510  		return probe.NewError(e)
  1511  	}
  1512  	return nil
  1513  }
  1514  
  1515  // RemoveBucket removes a bucket, forcibly if asked
  1516  func (c *S3Client) RemoveBucket(ctx context.Context, forceRemove bool) *probe.Error {
  1517  	bucket, object := c.url2BucketAndObject()
  1518  	if bucket == "" {
  1519  		return probe.NewError(BucketNameEmpty{})
  1520  	}
  1521  	if object != "" {
  1522  		return probe.NewError(BucketInvalid{c.joinPath(bucket, object)})
  1523  	}
  1524  
  1525  	opts := minio.RemoveBucketOptions{ForceDelete: forceRemove}
  1526  	if e := c.api.RemoveBucketWithOptions(ctx, bucket, opts); e != nil {
  1527  		return probe.NewError(e)
  1528  	}
  1529  	return nil
  1530  }
  1531  
  1532  // GetAccessRules - get configured policies from the server
  1533  func (c *S3Client) GetAccessRules(ctx context.Context) (map[string]string, *probe.Error) {
  1534  	bucket, object := c.url2BucketAndObject()
  1535  	if bucket == "" {
  1536  		return map[string]string{}, probe.NewError(BucketNameEmpty{})
  1537  	}
  1538  	policies := map[string]string{}
  1539  	policyStr, e := c.api.GetBucketPolicy(ctx, bucket)
  1540  	if e != nil {
  1541  		return nil, probe.NewError(e)
  1542  	}
  1543  	if policyStr == "" {
  1544  		return policies, nil
  1545  	}
  1546  	var p policy.BucketAccessPolicy
  1547  	if e = json.Unmarshal([]byte(policyStr), &p); e != nil {
  1548  		return nil, probe.NewError(e)
  1549  	}
  1550  	policyRules := policy.GetPolicies(p.Statements, bucket, object)
  1551  	// Hide policy data structure at this level
  1552  	for k, v := range policyRules {
  1553  		policies[k] = string(v)
  1554  	}
  1555  	return policies, nil
  1556  }
  1557  
  1558  // GetAccess get access policy permissions.
  1559  func (c *S3Client) GetAccess(ctx context.Context) (string, string, *probe.Error) {
  1560  	bucket, object := c.url2BucketAndObject()
  1561  	if bucket == "" {
  1562  		return "", "", probe.NewError(BucketNameEmpty{})
  1563  	}
  1564  	policyStr, e := c.api.GetBucketPolicy(ctx, bucket)
  1565  	if e != nil {
  1566  		return "", "", probe.NewError(e)
  1567  	}
  1568  	if policyStr == "" {
  1569  		return string(policy.BucketPolicyNone), policyStr, nil
  1570  	}
  1571  	var p policy.BucketAccessPolicy
  1572  	if e = json.Unmarshal([]byte(policyStr), &p); e != nil {
  1573  		return "", "", probe.NewError(e)
  1574  	}
  1575  	pType := string(policy.GetPolicy(p.Statements, bucket, object))
  1576  	if pType == string(policy.BucketPolicyNone) && policyStr != "" {
  1577  		pType = "custom"
  1578  	}
  1579  	return pType, policyStr, nil
  1580  }
  1581  
  1582  // SetAccess set access policy permissions.
  1583  func (c *S3Client) SetAccess(ctx context.Context, bucketPolicy string, isJSON bool) *probe.Error {
  1584  	bucket, object := c.url2BucketAndObject()
  1585  	if bucket == "" {
  1586  		return probe.NewError(BucketNameEmpty{})
  1587  	}
  1588  	if isJSON {
  1589  		if e := c.api.SetBucketPolicy(ctx, bucket, bucketPolicy); e != nil {
  1590  			return probe.NewError(e)
  1591  		}
  1592  		return nil
  1593  	}
  1594  	policyStr, e := c.api.GetBucketPolicy(ctx, bucket)
  1595  	if e != nil {
  1596  		return probe.NewError(e)
  1597  	}
  1598  	p := policy.BucketAccessPolicy{Version: "2012-10-17"}
  1599  	if policyStr != "" {
  1600  		if e = json.Unmarshal([]byte(policyStr), &p); e != nil {
  1601  			return probe.NewError(e)
  1602  		}
  1603  	}
  1604  	p.Statements = policy.SetPolicy(p.Statements, policy.BucketPolicy(bucketPolicy), bucket, object)
  1605  	if len(p.Statements) == 0 {
  1606  		if e = c.api.SetBucketPolicy(ctx, bucket, ""); e != nil {
  1607  			return probe.NewError(e)
  1608  		}
  1609  		return nil
  1610  	}
  1611  	policyB, e := json.Marshal(p)
  1612  	if e != nil {
  1613  		return probe.NewError(e)
  1614  	}
  1615  	if e = c.api.SetBucketPolicy(ctx, bucket, string(policyB)); e != nil {
  1616  		return probe.NewError(e)
  1617  	}
  1618  	return nil
  1619  }
  1620  
  1621  // listObjectWrapper - select ObjectList mode depending on arguments
  1622  func (c *S3Client) listObjectWrapper(ctx context.Context, bucket, object string, isRecursive bool, timeRef time.Time, withVersions, withDeleteMarkers, metadata bool, maxKeys int, zip bool) <-chan minio.ObjectInfo {
  1623  	if !timeRef.IsZero() || withVersions {
  1624  		return c.listVersions(ctx, bucket, object, ListOptions{Recursive: isRecursive, TimeRef: timeRef, WithOlderVersions: withVersions, WithDeleteMarkers: withDeleteMarkers})
  1625  	}
  1626  
  1627  	if isGoogle(c.targetURL.Host) {
  1628  		// Google Cloud S3 layer doesn't implement ListObjectsV2 implementation
  1629  		// https://github.com/minio/mc/issues/3073
  1630  		return c.api.ListObjects(ctx, bucket, minio.ListObjectsOptions{Prefix: object, Recursive: isRecursive, UseV1: true, MaxKeys: maxKeys})
  1631  	}
  1632  	opts := minio.ListObjectsOptions{Prefix: object, Recursive: isRecursive, WithMetadata: metadata, MaxKeys: maxKeys}
  1633  	if zip {
  1634  		// If prefix ends with .zip, add a slash.
  1635  		if strings.HasSuffix(object, ".zip") {
  1636  			opts.Prefix = object + "/"
  1637  		}
  1638  		opts.Set("x-minio-extract", "true")
  1639  	}
  1640  	return c.api.ListObjects(ctx, bucket, opts)
  1641  }
  1642  
  1643  func (c *S3Client) statIncompleteUpload(ctx context.Context, bucket, object string) (*ClientContent, *probe.Error) {
  1644  	nonRecursive := false
  1645  	objectMetadata := &ClientContent{}
  1646  	// Prefix to pass to minio-go listing in order to fetch a given object/directory
  1647  	prefix := strings.TrimRight(object, string(c.targetURL.Separator))
  1648  
  1649  	for objectMultipartInfo := range c.api.ListIncompleteUploads(ctx, bucket, prefix, nonRecursive) {
  1650  		if objectMultipartInfo.Err != nil {
  1651  			return nil, probe.NewError(objectMultipartInfo.Err)
  1652  		}
  1653  
  1654  		if objectMultipartInfo.Key == object {
  1655  			objectMetadata.BucketName = bucket
  1656  			objectMetadata.URL = c.targetURL.Clone()
  1657  			objectMetadata.Time = objectMultipartInfo.Initiated
  1658  			objectMetadata.Size = objectMultipartInfo.Size
  1659  			objectMetadata.Type = os.FileMode(0o664)
  1660  			objectMetadata.Metadata = map[string]string{}
  1661  			return objectMetadata, nil
  1662  		}
  1663  
  1664  		if strings.HasSuffix(objectMultipartInfo.Key, string(c.targetURL.Separator)) {
  1665  			objectMetadata.BucketName = bucket
  1666  			objectMetadata.URL = c.targetURL.Clone()
  1667  			objectMetadata.Type = os.ModeDir
  1668  			objectMetadata.Metadata = map[string]string{}
  1669  			return objectMetadata, nil
  1670  		}
  1671  	}
  1672  	return nil, probe.NewError(ObjectMissing{})
  1673  }
  1674  
  1675  // Stat - send a 'HEAD' on a bucket or object to fetch its metadata. It also returns
  1676  // a DIR type content if a prefix does exist in the server.
  1677  func (c *S3Client) Stat(ctx context.Context, opts StatOptions) (*ClientContent, *probe.Error) {
  1678  	c.Lock()
  1679  	defer c.Unlock()
  1680  	bucket, path := c.url2BucketAndObject()
  1681  
  1682  	// Bucket name cannot be empty, stat on URL has no meaning.
  1683  	if bucket == "" {
  1684  		url := c.targetURL.Clone()
  1685  		url.Path = string(c.targetURL.Separator)
  1686  		return &ClientContent{
  1687  			URL:        url,
  1688  			Size:       0,
  1689  			Type:       os.ModeDir,
  1690  			BucketName: bucket,
  1691  		}, nil
  1692  	}
  1693  
  1694  	if path == "" {
  1695  		content, err := c.bucketStat(ctx, BucketStatOptions{bucket: bucket, ignoreBucketExists: opts.ignoreBucketExists})
  1696  		if err != nil {
  1697  			return nil, err.Trace(bucket)
  1698  		}
  1699  		return content, nil
  1700  	}
  1701  
  1702  	// If the request is for incomplete upload stat, handle it here.
  1703  	if opts.incomplete {
  1704  		return c.statIncompleteUpload(ctx, bucket, path)
  1705  	}
  1706  
  1707  	// The following code tries to calculate if a given prefix/object does really exist
  1708  	// using minio-go listing API. The following inputs are supported:
  1709  	//     - /path/to/existing/object
  1710  	//     - /path/to/existing_directory
  1711  	//     - /path/to/existing_directory/
  1712  	//     - /path/to/directory_marker
  1713  	//     - /path/to/directory_marker/
  1714  
  1715  	// Start with a HEAD request first to return object metadata information.
  1716  	// If the object is not found, continue to look for a directory marker or a prefix
  1717  	if !strings.HasSuffix(path, string(c.targetURL.Separator)) && opts.timeRef.IsZero() {
  1718  		o := minio.StatObjectOptions{ServerSideEncryption: opts.sse, VersionID: opts.versionID}
  1719  		if opts.isZip {
  1720  			o.Set("x-minio-extract", "true")
  1721  		}
  1722  		ctnt, err := c.getObjectStat(ctx, bucket, path, o)
  1723  		if err == nil {
  1724  			return ctnt, nil
  1725  		}
  1726  		// Ignore object missing error but return for other errors
  1727  		if !errors.As(err.ToGoError(), &ObjectMissing{}) && !errors.As(err.ToGoError(), &ObjectIsDeleteMarker{}) {
  1728  			return nil, err
  1729  		}
  1730  		// The object is not found, look for a directory marker or a prefix
  1731  		path += string(c.targetURL.Separator)
  1732  	}
  1733  
  1734  	nonRecursive := false
  1735  	maxKeys := 1
  1736  	for objectStat := range c.listObjectWrapper(ctx, bucket, path, nonRecursive, opts.timeRef, false, false, false, maxKeys, opts.isZip) {
  1737  		if objectStat.Err != nil {
  1738  			return nil, probe.NewError(objectStat.Err)
  1739  		}
  1740  		// In case of a directory marker
  1741  		if path == objectStat.Key {
  1742  			return c.objectInfo2ClientContent(bucket, objectStat), nil
  1743  		}
  1744  		if strings.HasPrefix(objectStat.Key, path) {
  1745  			// An object inside the prefix is found, then the prefix exists.
  1746  			return c.prefixInfo2ClientContent(bucket, path), nil
  1747  		}
  1748  	}
  1749  
  1750  	return nil, probe.NewError(ObjectMissing{opts.timeRef})
  1751  }
  1752  
  1753  // getObjectStat returns the metadata of an object from a HEAD call.
  1754  func (c *S3Client) getObjectStat(ctx context.Context, bucket, object string, opts minio.StatObjectOptions) (*ClientContent, *probe.Error) {
  1755  	objectStat, e := c.api.StatObject(ctx, bucket, object, opts)
  1756  	objectMetadata := c.objectInfo2ClientContent(bucket, objectStat)
  1757  	if e != nil {
  1758  		errResponse := minio.ToErrorResponse(e)
  1759  		if errResponse.Code == "AccessDenied" {
  1760  			return nil, probe.NewError(PathInsufficientPermission{Path: c.targetURL.String()})
  1761  		}
  1762  		if errResponse.Code == "NoSuchBucket" {
  1763  			return nil, probe.NewError(BucketDoesNotExist{
  1764  				Bucket: bucket,
  1765  			})
  1766  		}
  1767  		if errResponse.Code == "InvalidBucketName" {
  1768  			return nil, probe.NewError(BucketInvalid{
  1769  				Bucket: bucket,
  1770  			})
  1771  		}
  1772  		if errResponse.Code == "NoSuchKey" {
  1773  			if objectMetadata.IsDeleteMarker {
  1774  				return nil, probe.NewError(ObjectIsDeleteMarker{})
  1775  			}
  1776  			return nil, probe.NewError(ObjectMissing{})
  1777  		}
  1778  		return nil, probe.NewError(e)
  1779  	}
  1780  	// HEAD with a version ID will not return version in the response headers
  1781  	if objectMetadata.VersionID == "" {
  1782  		objectMetadata.VersionID = opts.VersionID
  1783  	}
  1784  	return objectMetadata, nil
  1785  }
  1786  
  1787  func isAmazon(host string) bool {
  1788  	return s3utils.IsAmazonEndpoint(url.URL{Host: host})
  1789  }
  1790  
  1791  func isAmazonChina(host string) bool {
  1792  	amazonS3ChinaHost := regexp.MustCompile(`^s3\.(cn.*?)\.amazonaws\.com\.cn$`)
  1793  	parts := amazonS3ChinaHost.FindStringSubmatch(host)
  1794  	return len(parts) > 1
  1795  }
  1796  
  1797  func isAmazonAccelerated(host string) bool {
  1798  	return host == "s3-accelerate.amazonaws.com"
  1799  }
  1800  
  1801  func isGoogle(host string) bool {
  1802  	return s3utils.IsGoogleEndpoint(url.URL{Host: host})
  1803  }
  1804  
  1805  // Figure out if the URL is of 'virtual host' style.
  1806  // Use lookup from config to see if dns/path style look
  1807  // up should be used. If it is set to "auto", use virtual
  1808  // style for supported hosts such as Amazon S3 and Google
  1809  // Cloud Storage. Otherwise, default to path style
  1810  func isVirtualHostStyle(host string, lookup minio.BucketLookupType) bool {
  1811  	if lookup == minio.BucketLookupDNS {
  1812  		return true
  1813  	}
  1814  	if lookup == minio.BucketLookupPath {
  1815  		return false
  1816  	}
  1817  	return isAmazon(host) && !isAmazonChina(host) || isGoogle(host) || isAmazonAccelerated(host)
  1818  }
  1819  
  1820  func url2BucketAndObject(u *ClientURL) (bucketName, objectName string) {
  1821  	tokens := splitStr(u.Path, string(u.Separator), 3)
  1822  	return tokens[1], tokens[2]
  1823  }
  1824  
  1825  // url2BucketAndObject gives bucketName and objectName from URL path.
  1826  func (c *S3Client) url2BucketAndObject() (bucketName, objectName string) {
  1827  	return url2BucketAndObject(c.targetURL)
  1828  }
  1829  
  1830  // splitPath split path into bucket and object.
  1831  func (c *S3Client) splitPath(path string) (bucketName, objectName string) {
  1832  	path = strings.TrimPrefix(path, string(c.targetURL.Separator))
  1833  
  1834  	// Handle path if its virtual style.
  1835  	if c.virtualStyle {
  1836  		hostIndex := strings.Index(c.targetURL.Host, "s3")
  1837  		if hostIndex == -1 {
  1838  			hostIndex = strings.Index(c.targetURL.Host, "s3-accelerate")
  1839  		}
  1840  		if hostIndex == -1 {
  1841  			hostIndex = strings.Index(c.targetURL.Host, "storage.googleapis")
  1842  		}
  1843  		if hostIndex > 0 {
  1844  			bucketName = c.targetURL.Host[:hostIndex-1]
  1845  			objectName = path
  1846  			return bucketName, objectName
  1847  		}
  1848  	}
  1849  
  1850  	tokens := splitStr(path, string(c.targetURL.Separator), 2)
  1851  	return tokens[0], tokens[1]
  1852  }
  1853  
  1854  /// Bucket API operations.
  1855  
  1856  func (c *S3Client) listVersions(ctx context.Context, b, o string, opts ListOptions) chan minio.ObjectInfo {
  1857  	objectInfoCh := make(chan minio.ObjectInfo)
  1858  	go func() {
  1859  		defer close(objectInfoCh)
  1860  		c.listVersionsRoutine(ctx, b, o, opts, objectInfoCh)
  1861  	}()
  1862  	return objectInfoCh
  1863  }
  1864  
  1865  func (c *S3Client) listVersionsRoutine(ctx context.Context, b, o string, opts ListOptions, objectInfoCh chan minio.ObjectInfo) {
  1866  	var buckets []string
  1867  	if b == "" {
  1868  		bucketsInfo, err := c.api.ListBuckets(ctx)
  1869  		if err != nil {
  1870  			select {
  1871  			case <-ctx.Done():
  1872  			case objectInfoCh <- minio.ObjectInfo{
  1873  				Err: err,
  1874  			}:
  1875  			}
  1876  			return
  1877  		}
  1878  		for _, b := range bucketsInfo {
  1879  			buckets = append(buckets, b.Name)
  1880  		}
  1881  	} else {
  1882  		buckets = append(buckets, b)
  1883  	}
  1884  
  1885  	for _, b := range buckets {
  1886  		var skipKey string
  1887  		for objectVersion := range c.api.ListObjects(ctx, b, minio.ListObjectsOptions{
  1888  			Prefix:       o,
  1889  			Recursive:    opts.Recursive,
  1890  			WithVersions: true,
  1891  			WithMetadata: opts.WithMetadata,
  1892  		}) {
  1893  			if objectVersion.Err != nil {
  1894  				select {
  1895  				case <-ctx.Done():
  1896  					return
  1897  				case objectInfoCh <- objectVersion:
  1898  				}
  1899  				continue
  1900  			}
  1901  
  1902  			if !opts.WithOlderVersions && skipKey == objectVersion.Key {
  1903  				// Skip current version if not asked to list all versions
  1904  				// and we already listed the current object key name
  1905  				continue
  1906  			}
  1907  
  1908  			if opts.TimeRef.IsZero() || objectVersion.LastModified.Before(opts.TimeRef) {
  1909  				skipKey = objectVersion.Key
  1910  
  1911  				// Skip if this is a delete marker and we are not asked to list it
  1912  				if !opts.WithDeleteMarkers && objectVersion.IsDeleteMarker {
  1913  					continue
  1914  				}
  1915  
  1916  				select {
  1917  				case <-ctx.Done():
  1918  					return
  1919  				case objectInfoCh <- objectVersion:
  1920  				}
  1921  			}
  1922  		}
  1923  	}
  1924  }
  1925  
  1926  // ListBuckets - list buckets
  1927  func (c *S3Client) ListBuckets(ctx context.Context) ([]*ClientContent, *probe.Error) {
  1928  	buckets, err := c.api.ListBuckets(ctx)
  1929  	if err != nil {
  1930  		return nil, probe.NewError(err)
  1931  	}
  1932  	bucketsList := make([]*ClientContent, 0, len(buckets))
  1933  	for _, b := range buckets {
  1934  		bucketsList = append(bucketsList, c.bucketInfo2ClientContent(b))
  1935  	}
  1936  	return bucketsList, nil
  1937  }
  1938  
  1939  // List - list at delimited path, if not recursive.
  1940  func (c *S3Client) List(ctx context.Context, opts ListOptions) <-chan *ClientContent {
  1941  	c.Lock()
  1942  	defer c.Unlock()
  1943  
  1944  	contentCh := make(chan *ClientContent)
  1945  	go func() {
  1946  		defer close(contentCh)
  1947  		if !opts.TimeRef.IsZero() || opts.WithOlderVersions {
  1948  			c.versionedList(ctx, contentCh, opts)
  1949  		} else {
  1950  			c.unversionedList(ctx, contentCh, opts)
  1951  		}
  1952  	}()
  1953  
  1954  	return contentCh
  1955  }
  1956  
  1957  // versionedList returns objects versions if the S3 backend supports versioning,
  1958  // it falls back to the regular listing if not.
  1959  func (c *S3Client) versionedList(ctx context.Context, contentCh chan *ClientContent, opts ListOptions) {
  1960  	b, o := c.url2BucketAndObject()
  1961  	switch {
  1962  	case b == "" && o == "":
  1963  		buckets, err := c.api.ListBuckets(ctx)
  1964  		if err != nil {
  1965  			select {
  1966  			case <-ctx.Done():
  1967  			case contentCh <- &ClientContent{
  1968  				Err: probe.NewError(err),
  1969  			}:
  1970  			}
  1971  			return
  1972  		}
  1973  		if opts.Recursive {
  1974  			sortBucketsNameWithSlash(buckets)
  1975  		}
  1976  		for _, bucket := range buckets {
  1977  			if opts.ShowDir != DirLast {
  1978  				select {
  1979  				case <-ctx.Done():
  1980  					return
  1981  				case contentCh <- c.bucketInfo2ClientContent(bucket):
  1982  				}
  1983  			}
  1984  			for objectVersion := range c.listVersions(ctx, bucket.Name, "", opts) {
  1985  				if objectVersion.Err != nil {
  1986  					if minio.ToErrorResponse(objectVersion.Err).Code == "NotImplemented" {
  1987  						goto noVersioning
  1988  					}
  1989  					select {
  1990  					case <-ctx.Done():
  1991  						return
  1992  					case contentCh <- &ClientContent{
  1993  						Err: probe.NewError(objectVersion.Err),
  1994  					}:
  1995  					}
  1996  					continue
  1997  				}
  1998  				select {
  1999  				case <-ctx.Done():
  2000  					return
  2001  				case contentCh <- c.objectInfo2ClientContent(bucket.Name, objectVersion):
  2002  				}
  2003  			}
  2004  
  2005  			if opts.ShowDir == DirLast {
  2006  				select {
  2007  				case <-ctx.Done():
  2008  					return
  2009  				case contentCh <- c.bucketInfo2ClientContent(bucket):
  2010  				}
  2011  			}
  2012  		}
  2013  		return
  2014  	default:
  2015  		for objectVersion := range c.listVersions(ctx, b, o, opts) {
  2016  			if objectVersion.Err != nil {
  2017  				if minio.ToErrorResponse(objectVersion.Err).Code == "NotImplemented" {
  2018  					goto noVersioning
  2019  				}
  2020  				select {
  2021  				case <-ctx.Done():
  2022  					return
  2023  				case contentCh <- &ClientContent{
  2024  					Err: probe.NewError(objectVersion.Err),
  2025  				}:
  2026  				}
  2027  				continue
  2028  			}
  2029  			select {
  2030  			case <-ctx.Done():
  2031  				return
  2032  			case contentCh <- c.objectInfo2ClientContent(b, objectVersion):
  2033  			}
  2034  		}
  2035  		return
  2036  	}
  2037  
  2038  noVersioning:
  2039  	c.unversionedList(ctx, contentCh, opts)
  2040  }
  2041  
  2042  // unversionedList is the non versioned S3 listing
  2043  func (c *S3Client) unversionedList(ctx context.Context, contentCh chan *ClientContent, opts ListOptions) {
  2044  	if opts.Incomplete {
  2045  		if opts.Recursive {
  2046  			c.listIncompleteRecursiveInRoutine(ctx, contentCh, opts)
  2047  		} else {
  2048  			c.listIncompleteInRoutine(ctx, contentCh)
  2049  		}
  2050  	} else {
  2051  		if opts.Recursive {
  2052  			c.listRecursiveInRoutine(ctx, contentCh, opts)
  2053  		} else {
  2054  			c.listInRoutine(ctx, contentCh, opts)
  2055  		}
  2056  	}
  2057  }
  2058  
  2059  func (c *S3Client) listIncompleteInRoutine(ctx context.Context, contentCh chan *ClientContent) {
  2060  	// get bucket and object from URL.
  2061  	b, o := c.url2BucketAndObject()
  2062  	switch {
  2063  	case b == "" && o == "":
  2064  		buckets, err := c.api.ListBuckets(ctx)
  2065  		if err != nil {
  2066  			select {
  2067  			case <-ctx.Done():
  2068  			case contentCh <- &ClientContent{
  2069  				Err: probe.NewError(err),
  2070  			}:
  2071  			}
  2072  			return
  2073  		}
  2074  		isRecursive := false
  2075  		for _, bucket := range buckets {
  2076  			for object := range c.api.ListIncompleteUploads(ctx, bucket.Name, o, isRecursive) {
  2077  				if object.Err != nil {
  2078  					select {
  2079  					case <-ctx.Done():
  2080  						return
  2081  					case contentCh <- &ClientContent{
  2082  						Err: probe.NewError(object.Err),
  2083  					}:
  2084  					}
  2085  					return
  2086  				}
  2087  				content := &ClientContent{}
  2088  				url := c.targetURL.Clone()
  2089  				// Join bucket with - incoming object key.
  2090  				url.Path = c.buildAbsPath(bucket.Name, object.Key)
  2091  				switch {
  2092  				case strings.HasSuffix(object.Key, string(c.targetURL.Separator)):
  2093  					// We need to keep the trailing Separator, do not use filepath.Join().
  2094  					content.URL = url
  2095  					content.Time = time.Now()
  2096  					content.Type = os.ModeDir
  2097  				default:
  2098  					content.URL = url
  2099  					content.Size = object.Size
  2100  					content.Time = object.Initiated
  2101  					content.Type = os.ModeTemporary
  2102  				}
  2103  				select {
  2104  				case <-ctx.Done():
  2105  					return
  2106  				case contentCh <- content:
  2107  				}
  2108  			}
  2109  		}
  2110  	default:
  2111  		isRecursive := false
  2112  		for object := range c.api.ListIncompleteUploads(ctx, b, o, isRecursive) {
  2113  			if object.Err != nil {
  2114  				select {
  2115  				case <-ctx.Done():
  2116  				case contentCh <- &ClientContent{
  2117  					Err: probe.NewError(object.Err),
  2118  				}:
  2119  				}
  2120  				return
  2121  			}
  2122  			content := &ClientContent{}
  2123  			url := c.targetURL.Clone()
  2124  			// Join bucket with - incoming object key.
  2125  			url.Path = c.buildAbsPath(b, object.Key)
  2126  			switch {
  2127  			case strings.HasSuffix(object.Key, string(c.targetURL.Separator)):
  2128  				// We need to keep the trailing Separator, do not use filepath.Join().
  2129  				content.URL = url
  2130  				content.Time = time.Now()
  2131  				content.Type = os.ModeDir
  2132  			default:
  2133  				content.URL = url
  2134  				content.Size = object.Size
  2135  				content.Time = object.Initiated
  2136  				content.Type = os.ModeTemporary
  2137  			}
  2138  			select {
  2139  			case <-ctx.Done():
  2140  				return
  2141  			case contentCh <- content:
  2142  			}
  2143  		}
  2144  	}
  2145  }
  2146  
  2147  func (c *S3Client) listIncompleteRecursiveInRoutine(ctx context.Context, contentCh chan *ClientContent, opts ListOptions) {
  2148  	// get bucket and object from URL.
  2149  	b, o := c.url2BucketAndObject()
  2150  	switch {
  2151  	case b == "" && o == "":
  2152  		buckets, err := c.api.ListBuckets(ctx)
  2153  		if err != nil {
  2154  			select {
  2155  			case <-ctx.Done():
  2156  			case contentCh <- &ClientContent{
  2157  				Err: probe.NewError(err),
  2158  			}:
  2159  			}
  2160  			return
  2161  		}
  2162  		sortBucketsNameWithSlash(buckets)
  2163  		isRecursive := true
  2164  		for _, bucket := range buckets {
  2165  			if opts.ShowDir != DirLast {
  2166  				select {
  2167  				case <-ctx.Done():
  2168  					return
  2169  				case contentCh <- c.bucketInfo2ClientContent(bucket):
  2170  				}
  2171  			}
  2172  
  2173  			for object := range c.api.ListIncompleteUploads(ctx, bucket.Name, o, isRecursive) {
  2174  				if object.Err != nil {
  2175  					select {
  2176  					case <-ctx.Done():
  2177  						return
  2178  					case contentCh <- &ClientContent{
  2179  						Err: probe.NewError(object.Err),
  2180  					}:
  2181  					}
  2182  					return
  2183  				}
  2184  				url := c.targetURL.Clone()
  2185  				url.Path = c.buildAbsPath(bucket.Name, object.Key)
  2186  				content := &ClientContent{}
  2187  				content.URL = url
  2188  				content.Size = object.Size
  2189  				content.Time = object.Initiated
  2190  				content.Type = os.ModeTemporary
  2191  				select {
  2192  				case <-ctx.Done():
  2193  					return
  2194  				case contentCh <- content:
  2195  				}
  2196  			}
  2197  
  2198  			if opts.ShowDir == DirLast {
  2199  				select {
  2200  				case <-ctx.Done():
  2201  					return
  2202  				case contentCh <- c.bucketInfo2ClientContent(bucket):
  2203  				}
  2204  			}
  2205  		}
  2206  	default:
  2207  		isRecursive := true
  2208  		for object := range c.api.ListIncompleteUploads(ctx, b, o, isRecursive) {
  2209  			if object.Err != nil {
  2210  				select {
  2211  				case <-ctx.Done():
  2212  					return
  2213  				case contentCh <- &ClientContent{
  2214  					Err: probe.NewError(object.Err),
  2215  				}:
  2216  				}
  2217  				return
  2218  			}
  2219  			url := c.targetURL.Clone()
  2220  			// Join bucket and incoming object key.
  2221  			url.Path = c.buildAbsPath(b, object.Key)
  2222  			content := &ClientContent{}
  2223  			content.URL = url
  2224  			content.Size = object.Size
  2225  			content.Time = object.Initiated
  2226  			content.Type = os.ModeTemporary
  2227  			select {
  2228  			case <-ctx.Done():
  2229  				return
  2230  			case contentCh <- content:
  2231  			}
  2232  		}
  2233  	}
  2234  }
  2235  
  2236  // Join bucket and object name, keep the leading slash for directory markers
  2237  func (c *S3Client) joinPath(bucket string, objects ...string) string {
  2238  	p := bucket
  2239  	for _, o := range objects {
  2240  		p += string(c.targetURL.Separator) + o
  2241  	}
  2242  	return p
  2243  }
  2244  
  2245  // Build new absolute URL path by joining path segments with URL path separator.
  2246  func (c *S3Client) buildAbsPath(bucket string, objects ...string) string {
  2247  	return string(c.targetURL.Separator) + c.joinPath(bucket, objects...)
  2248  }
  2249  
  2250  // Convert objectInfo to ClientContent
  2251  func (c *S3Client) bucketInfo2ClientContent(bucket minio.BucketInfo) *ClientContent {
  2252  	content := &ClientContent{}
  2253  	url := c.targetURL.Clone()
  2254  	url.Path = c.buildAbsPath(bucket.Name)
  2255  	content.URL = url
  2256  	content.BucketName = bucket.Name
  2257  	content.Size = 0
  2258  	content.Time = bucket.CreationDate
  2259  	content.Type = os.ModeDir
  2260  	return content
  2261  }
  2262  
  2263  // Convert objectInfo to ClientContent
  2264  func (c *S3Client) prefixInfo2ClientContent(bucket, prefix string) *ClientContent {
  2265  	// Join bucket and incoming object key.
  2266  	if bucket == "" {
  2267  		panic("should never happen, bucket cannot be empty")
  2268  	}
  2269  	content := &ClientContent{}
  2270  	url := c.targetURL.Clone()
  2271  	url.Path = c.buildAbsPath(bucket, prefix)
  2272  	content.URL = url
  2273  	content.BucketName = bucket
  2274  	content.Type = os.ModeDir
  2275  	content.Time = time.Now()
  2276  	return content
  2277  }
  2278  
  2279  // Convert objectInfo to ClientContent
  2280  func (c *S3Client) objectInfo2ClientContent(bucket string, entry minio.ObjectInfo) *ClientContent {
  2281  	content := &ClientContent{}
  2282  	url := c.targetURL.Clone()
  2283  	// Join bucket and incoming object key.
  2284  	if bucket == "" {
  2285  		panic("should never happen, bucket cannot be empty")
  2286  	}
  2287  	url.Path = c.buildAbsPath(bucket, entry.Key)
  2288  	content.URL = url
  2289  	content.BucketName = bucket
  2290  	content.Size = entry.Size
  2291  	content.ETag = entry.ETag
  2292  	content.Time = entry.LastModified
  2293  	content.Expires = entry.Expires
  2294  	content.Expiration = entry.Expiration
  2295  	content.ExpirationRuleID = entry.ExpirationRuleID
  2296  	content.VersionID = entry.VersionID
  2297  	content.StorageClass = entry.StorageClass
  2298  	content.IsDeleteMarker = entry.IsDeleteMarker
  2299  	content.IsLatest = entry.IsLatest
  2300  	content.Restore = entry.Restore
  2301  	content.Metadata = map[string]string{}
  2302  	content.UserMetadata = map[string]string{}
  2303  	content.Tags = entry.UserTags
  2304  
  2305  	content.ReplicationStatus = entry.ReplicationStatus
  2306  	for k, v := range entry.UserMetadata {
  2307  		content.UserMetadata[k] = v
  2308  	}
  2309  	for k := range entry.Metadata {
  2310  		content.Metadata[k] = entry.Metadata.Get(k)
  2311  	}
  2312  	attr, _ := parseAttribute(content.UserMetadata)
  2313  	if len(attr) > 0 {
  2314  		_, mtime, _ := parseAtimeMtime(attr)
  2315  		if !mtime.IsZero() {
  2316  			content.Time = mtime
  2317  		}
  2318  	}
  2319  	attr, _ = parseAttribute(content.Metadata)
  2320  	if len(attr) > 0 {
  2321  		_, mtime, _ := parseAtimeMtime(attr)
  2322  		if !mtime.IsZero() {
  2323  			content.Time = mtime
  2324  		}
  2325  	}
  2326  
  2327  	if strings.HasSuffix(entry.Key, string(c.targetURL.Separator)) {
  2328  		content.Type = os.ModeDir
  2329  		if content.Time.IsZero() {
  2330  			content.Time = time.Now()
  2331  		}
  2332  	} else {
  2333  		content.Type = os.FileMode(0o664)
  2334  	}
  2335  
  2336  	return content
  2337  }
  2338  
  2339  // Returns bucket stat info of current bucket.
  2340  func (c *S3Client) bucketStat(ctx context.Context, opts BucketStatOptions) (*ClientContent, *probe.Error) {
  2341  	if !opts.ignoreBucketExists {
  2342  		exists, e := c.api.BucketExists(ctx, opts.bucket)
  2343  		if e != nil {
  2344  			return nil, probe.NewError(e)
  2345  		}
  2346  		if !exists {
  2347  			return nil, probe.NewError(BucketDoesNotExist{Bucket: opts.bucket})
  2348  		}
  2349  	}
  2350  	return &ClientContent{
  2351  		URL: c.targetURL.Clone(), BucketName: opts.bucket, Time: time.Unix(0, 0), Type: os.ModeDir,
  2352  	}, nil
  2353  }
  2354  
  2355  func (c *S3Client) listInRoutine(ctx context.Context, contentCh chan *ClientContent, opts ListOptions) {
  2356  	// get bucket and object from URL.
  2357  	b, o := c.url2BucketAndObject()
  2358  	if opts.ListZip && (b == "" || o == "") {
  2359  		contentCh <- &ClientContent{
  2360  			Err: probe.NewError(errors.New("listing zip files must provide bucket and object")),
  2361  		}
  2362  		return
  2363  	}
  2364  	switch {
  2365  	case b == "" && o == "":
  2366  		buckets, e := c.api.ListBuckets(ctx)
  2367  		if e != nil {
  2368  			contentCh <- &ClientContent{
  2369  				Err: probe.NewError(e),
  2370  			}
  2371  			return
  2372  		}
  2373  		for _, bucket := range buckets {
  2374  			contentCh <- c.bucketInfo2ClientContent(bucket)
  2375  		}
  2376  	case b != "" && !strings.HasSuffix(c.targetURL.Path, string(c.targetURL.Separator)) && o == "":
  2377  		content, err := c.bucketStat(ctx, BucketStatOptions{bucket: b})
  2378  		if err != nil {
  2379  			contentCh <- &ClientContent{Err: err.Trace(b)}
  2380  			return
  2381  		}
  2382  		contentCh <- content
  2383  	default:
  2384  		isRecursive := false
  2385  		for object := range c.listObjectWrapper(ctx, b, o, isRecursive, time.Time{}, false, false, opts.WithMetadata, -1, opts.ListZip) {
  2386  			if object.Err != nil {
  2387  				contentCh <- &ClientContent{
  2388  					Err: probe.NewError(object.Err),
  2389  				}
  2390  				return
  2391  			}
  2392  			contentCh <- c.objectInfo2ClientContent(b, object)
  2393  		}
  2394  	}
  2395  }
  2396  
  2397  // S3 offers a range of storage classes designed for
  2398  // different use cases, following list captures these.
  2399  const (
  2400  	// General purpose.
  2401  	// s3StorageClassStandard = "STANDARD"
  2402  	// Infrequent access.
  2403  	// s3StorageClassInfrequent = "STANDARD_IA"
  2404  	// Reduced redundancy access.
  2405  	// s3StorageClassRedundancy = "REDUCED_REDUNDANCY"
  2406  	// Archive access.
  2407  	s3StorageClassGlacier = "GLACIER"
  2408  )
  2409  
  2410  // Sorting buckets name with an additional '/' to make sure that a
  2411  // site-wide listing returns sorted output. This is crucial for
  2412  // correct diff/mirror calculation.
  2413  func sortBucketsNameWithSlash(bucketsInfo []minio.BucketInfo) {
  2414  	sort.Slice(bucketsInfo, func(i, j int) bool {
  2415  		return bucketsInfo[i].Name+"/" < bucketsInfo[j].Name+"/"
  2416  	})
  2417  }
  2418  
  2419  func (c *S3Client) listRecursiveInRoutine(ctx context.Context, contentCh chan *ClientContent, opts ListOptions) {
  2420  	// get bucket and object from URL.
  2421  	b, o := c.url2BucketAndObject()
  2422  	switch {
  2423  	case b == "" && o == "":
  2424  		buckets, err := c.api.ListBuckets(ctx)
  2425  		if err != nil {
  2426  			contentCh <- &ClientContent{
  2427  				Err: probe.NewError(err),
  2428  			}
  2429  			return
  2430  		}
  2431  		sortBucketsNameWithSlash(buckets)
  2432  		for _, bucket := range buckets {
  2433  			if opts.ShowDir == DirFirst {
  2434  				contentCh <- c.bucketInfo2ClientContent(bucket)
  2435  			}
  2436  
  2437  			isRecursive := true
  2438  			for object := range c.listObjectWrapper(ctx, bucket.Name, o, isRecursive, time.Time{}, false, false, opts.WithMetadata, -1, opts.ListZip) {
  2439  				if object.Err != nil {
  2440  					contentCh <- &ClientContent{
  2441  						Err: probe.NewError(object.Err),
  2442  					}
  2443  					return
  2444  				}
  2445  				contentCh <- c.objectInfo2ClientContent(bucket.Name, object)
  2446  			}
  2447  
  2448  			if opts.ShowDir == DirLast {
  2449  				contentCh <- c.bucketInfo2ClientContent(bucket)
  2450  			}
  2451  		}
  2452  	default:
  2453  		isRecursive := true
  2454  		for object := range c.listObjectWrapper(ctx, b, o, isRecursive, time.Time{}, false, false, opts.WithMetadata, -1, opts.ListZip) {
  2455  			if object.Err != nil {
  2456  				contentCh <- &ClientContent{
  2457  					Err: probe.NewError(object.Err),
  2458  				}
  2459  				return
  2460  			}
  2461  			contentCh <- c.objectInfo2ClientContent(b, object)
  2462  		}
  2463  	}
  2464  }
  2465  
  2466  // ShareDownload - get a usable presigned object url to share.
  2467  func (c *S3Client) ShareDownload(ctx context.Context, versionID string, expires time.Duration) (string, *probe.Error) {
  2468  	bucket, object := c.url2BucketAndObject()
  2469  	// No additional request parameters are set for the time being.
  2470  	reqParams := make(url.Values)
  2471  	if versionID != "" {
  2472  		reqParams.Set("versionId", versionID)
  2473  	}
  2474  	presignedURL, e := c.api.PresignedGetObject(ctx, bucket, object, expires, reqParams)
  2475  	if e != nil {
  2476  		return "", probe.NewError(e)
  2477  	}
  2478  	return presignedURL.String(), nil
  2479  }
  2480  
  2481  // ShareUpload - get data for presigned post http form upload.
  2482  func (c *S3Client) ShareUpload(ctx context.Context, isRecursive bool, expires time.Duration, contentType string) (string, map[string]string, *probe.Error) {
  2483  	bucket, object := c.url2BucketAndObject()
  2484  	p := minio.NewPostPolicy()
  2485  	if e := p.SetExpires(UTCNow().Add(expires)); e != nil {
  2486  		return "", nil, probe.NewError(e)
  2487  	}
  2488  	if strings.TrimSpace(contentType) != "" || contentType != "" {
  2489  		// No need to verify for error here, since we have stripped out spaces.
  2490  		p.SetContentType(contentType)
  2491  	}
  2492  	if e := p.SetBucket(bucket); e != nil {
  2493  		return "", nil, probe.NewError(e)
  2494  	}
  2495  	if isRecursive {
  2496  		if e := p.SetKeyStartsWith(object); e != nil {
  2497  			return "", nil, probe.NewError(e)
  2498  		}
  2499  	} else {
  2500  		if e := p.SetKey(object); e != nil {
  2501  			return "", nil, probe.NewError(e)
  2502  		}
  2503  	}
  2504  	u, m, e := c.api.PresignedPostPolicy(ctx, p)
  2505  	if e != nil {
  2506  		return "", nil, probe.NewError(e)
  2507  	}
  2508  	return u.String(), m, nil
  2509  }
  2510  
  2511  // SetObjectLockConfig - Set object lock configurataion of bucket.
  2512  func (c *S3Client) SetObjectLockConfig(ctx context.Context, mode minio.RetentionMode, validity uint64, unit minio.ValidityUnit) *probe.Error {
  2513  	bucket, object := c.url2BucketAndObject()
  2514  
  2515  	if bucket == "" || object != "" {
  2516  		return errInvalidArgument().Trace(bucket, object)
  2517  	}
  2518  
  2519  	// FIXME: This is too ugly, fix minio-go
  2520  	vuint := (uint)(validity)
  2521  	if mode != "" && vuint > 0 && unit != "" {
  2522  		e := c.api.SetBucketObjectLockConfig(ctx, bucket, &mode, &vuint, &unit)
  2523  		if e != nil {
  2524  			return probe.NewError(e).Trace(c.GetURL().String())
  2525  		}
  2526  		return nil
  2527  	}
  2528  	if mode == "" && vuint == 0 && unit == "" {
  2529  		e := c.api.SetBucketObjectLockConfig(ctx, bucket, nil, nil, nil)
  2530  		if e != nil {
  2531  			return probe.NewError(e).Trace(c.GetURL().String())
  2532  		}
  2533  		return nil
  2534  	}
  2535  	return errInvalidArgument().Trace(c.GetURL().String())
  2536  }
  2537  
  2538  // PutObjectRetention - Set object retention for a given object.
  2539  func (c *S3Client) PutObjectRetention(ctx context.Context, versionID string, mode minio.RetentionMode, retainUntilDate time.Time, bypassGovernance bool) *probe.Error {
  2540  	bucket, object := c.url2BucketAndObject()
  2541  
  2542  	var (
  2543  		modePtr            *minio.RetentionMode
  2544  		retainUntilDatePtr *time.Time
  2545  	)
  2546  
  2547  	if mode != "" && retainUntilDate.IsZero() {
  2548  		return errInvalidArgument().Trace(c.GetURL().String())
  2549  	}
  2550  
  2551  	if mode != "" {
  2552  		modePtr = &mode
  2553  		retainUntilDatePtr = &retainUntilDate
  2554  	}
  2555  
  2556  	opts := minio.PutObjectRetentionOptions{
  2557  		VersionID:        versionID,
  2558  		RetainUntilDate:  retainUntilDatePtr,
  2559  		Mode:             modePtr,
  2560  		GovernanceBypass: bypassGovernance,
  2561  	}
  2562  	e := c.api.PutObjectRetention(ctx, bucket, object, opts)
  2563  	if e != nil {
  2564  		return probe.NewError(e).Trace(c.GetURL().String())
  2565  	}
  2566  	return nil
  2567  }
  2568  
  2569  // GetObjectRetention - Get object retention for a given object.
  2570  func (c *S3Client) GetObjectRetention(ctx context.Context, versionID string) (minio.RetentionMode, time.Time, *probe.Error) {
  2571  	bucket, object := c.url2BucketAndObject()
  2572  	if object == "" {
  2573  		return "", time.Time{}, probe.NewError(ObjectNameEmpty{}).Trace(c.GetURL().String())
  2574  	}
  2575  	modePtr, untilPtr, e := c.api.GetObjectRetention(ctx, bucket, object, versionID)
  2576  	if e != nil {
  2577  		return "", time.Time{}, probe.NewError(e).Trace(c.GetURL().String())
  2578  	}
  2579  	var (
  2580  		mode  minio.RetentionMode
  2581  		until time.Time
  2582  	)
  2583  	if modePtr != nil {
  2584  		mode = *modePtr
  2585  	}
  2586  	if untilPtr != nil {
  2587  		until = *untilPtr
  2588  	}
  2589  	return mode, until, nil
  2590  }
  2591  
  2592  // PutObjectLegalHold - Set object legal hold for a given object.
  2593  func (c *S3Client) PutObjectLegalHold(ctx context.Context, versionID string, lhold minio.LegalHoldStatus) *probe.Error {
  2594  	bucket, object := c.url2BucketAndObject()
  2595  	if lhold.IsValid() {
  2596  		opts := minio.PutObjectLegalHoldOptions{
  2597  			Status:    &lhold,
  2598  			VersionID: versionID,
  2599  		}
  2600  		e := c.api.PutObjectLegalHold(ctx, bucket, object, opts)
  2601  		if e != nil {
  2602  			return probe.NewError(e).Trace(c.GetURL().String())
  2603  		}
  2604  		return nil
  2605  	}
  2606  	return errInvalidArgument().Trace(c.GetURL().String())
  2607  }
  2608  
  2609  // GetObjectLegalHold - Get object legal hold for a given object.
  2610  func (c *S3Client) GetObjectLegalHold(ctx context.Context, versionID string) (minio.LegalHoldStatus, *probe.Error) {
  2611  	var lhold minio.LegalHoldStatus
  2612  	bucket, object := c.url2BucketAndObject()
  2613  	opts := minio.GetObjectLegalHoldOptions{
  2614  		VersionID: versionID,
  2615  	}
  2616  	lhPtr, e := c.api.GetObjectLegalHold(ctx, bucket, object, opts)
  2617  	if e != nil {
  2618  		errResp := minio.ToErrorResponse(e)
  2619  		if errResp.Code != "NoSuchObjectLockConfiguration" {
  2620  			return "", probe.NewError(e).Trace(c.GetURL().String())
  2621  		}
  2622  		return "", nil
  2623  	}
  2624  	// lhPtr can be nil if there is no legalhold status set
  2625  	if lhPtr != nil {
  2626  		lhold = *lhPtr
  2627  	}
  2628  	return lhold, nil
  2629  }
  2630  
  2631  // GetObjectLockConfig - Get object lock configuration of bucket.
  2632  func (c *S3Client) GetObjectLockConfig(ctx context.Context) (string, minio.RetentionMode, uint64, minio.ValidityUnit, *probe.Error) {
  2633  	bucket, object := c.url2BucketAndObject()
  2634  
  2635  	if bucket == "" || object != "" {
  2636  		return "", "", 0, "", errInvalidArgument().Trace(bucket, object)
  2637  	}
  2638  
  2639  	status, mode, validity, unit, e := c.api.GetObjectLockConfig(ctx, bucket)
  2640  	if e != nil {
  2641  		return "", "", 0, "", probe.NewError(e).Trace(c.GetURL().String())
  2642  	}
  2643  
  2644  	if mode != nil && validity != nil && unit != nil {
  2645  		// FIXME: this is too ugly, fix minio-go
  2646  		vuint64 := uint64(*validity)
  2647  		return status, *mode, vuint64, *unit, nil
  2648  	}
  2649  
  2650  	return status, "", 0, "", nil
  2651  }
  2652  
  2653  // GetTags - Get tags of bucket or object.
  2654  func (c *S3Client) GetTags(ctx context.Context, versionID string) (map[string]string, *probe.Error) {
  2655  	bucketName, objectName := c.url2BucketAndObject()
  2656  	if bucketName == "" {
  2657  		return nil, probe.NewError(BucketNameEmpty{})
  2658  	}
  2659  
  2660  	if objectName == "" {
  2661  		if versionID != "" {
  2662  			return nil, probe.NewError(errors.New("getting bucket tags does not support versioning parameters"))
  2663  		}
  2664  
  2665  		tags, err := c.api.GetBucketTagging(ctx, bucketName)
  2666  		if err != nil {
  2667  			return nil, probe.NewError(err)
  2668  		}
  2669  
  2670  		return tags.ToMap(), nil
  2671  	}
  2672  
  2673  	tags, err := c.api.GetObjectTagging(ctx, bucketName, objectName, minio.GetObjectTaggingOptions{VersionID: versionID})
  2674  	if err != nil {
  2675  		return nil, probe.NewError(err)
  2676  	}
  2677  
  2678  	return tags.ToMap(), nil
  2679  }
  2680  
  2681  // SetTags - Set tags of bucket or object.
  2682  func (c *S3Client) SetTags(ctx context.Context, versionID, tagString string) *probe.Error {
  2683  	bucketName, objectName := c.url2BucketAndObject()
  2684  	if bucketName == "" {
  2685  		return probe.NewError(BucketNameEmpty{})
  2686  	}
  2687  
  2688  	tags, err := tags.Parse(tagString, objectName != "")
  2689  	if err != nil {
  2690  		return probe.NewError(err)
  2691  	}
  2692  
  2693  	if objectName == "" {
  2694  		if versionID != "" {
  2695  			return probe.NewError(errors.New("setting bucket tags does not support versioning parameters"))
  2696  		}
  2697  		err = c.api.SetBucketTagging(ctx, bucketName, tags)
  2698  	} else {
  2699  		err = c.api.PutObjectTagging(ctx, bucketName, objectName, tags, minio.PutObjectTaggingOptions{VersionID: versionID})
  2700  	}
  2701  
  2702  	if err != nil {
  2703  		return probe.NewError(err)
  2704  	}
  2705  
  2706  	return nil
  2707  }
  2708  
  2709  // DeleteTags - Delete tags of bucket or object
  2710  func (c *S3Client) DeleteTags(ctx context.Context, versionID string) *probe.Error {
  2711  	bucketName, objectName := c.url2BucketAndObject()
  2712  	if bucketName == "" {
  2713  		return probe.NewError(BucketNameEmpty{})
  2714  	}
  2715  
  2716  	var err error
  2717  	if objectName == "" {
  2718  		if versionID != "" {
  2719  			return probe.NewError(errors.New("setting bucket tags does not support versioning parameters"))
  2720  		}
  2721  		err = c.api.RemoveBucketTagging(ctx, bucketName)
  2722  	} else {
  2723  		err = c.api.RemoveObjectTagging(ctx, bucketName, objectName, minio.RemoveObjectTaggingOptions{VersionID: versionID})
  2724  	}
  2725  
  2726  	if err != nil {
  2727  		return probe.NewError(err)
  2728  	}
  2729  
  2730  	return nil
  2731  }
  2732  
  2733  // GetLifecycle - Get current lifecycle configuration.
  2734  func (c *S3Client) GetLifecycle(ctx context.Context) (*lifecycle.Configuration, time.Time, *probe.Error) {
  2735  	bucket, _ := c.url2BucketAndObject()
  2736  	if bucket == "" {
  2737  		return nil, time.Time{}, probe.NewError(BucketNameEmpty{})
  2738  	}
  2739  
  2740  	config, updatedAt, e := c.api.GetBucketLifecycleWithInfo(ctx, bucket)
  2741  	if e != nil {
  2742  		return nil, time.Time{}, probe.NewError(e)
  2743  	}
  2744  
  2745  	return config, updatedAt, nil
  2746  }
  2747  
  2748  // SetLifecycle - Set lifecycle configuration on a bucket
  2749  func (c *S3Client) SetLifecycle(ctx context.Context, config *lifecycle.Configuration) *probe.Error {
  2750  	bucket, _ := c.url2BucketAndObject()
  2751  	if bucket == "" {
  2752  		return probe.NewError(BucketNameEmpty{})
  2753  	}
  2754  
  2755  	if e := c.api.SetBucketLifecycle(ctx, bucket, config); e != nil {
  2756  		return probe.NewError(e)
  2757  	}
  2758  
  2759  	return nil
  2760  }
  2761  
  2762  // GetVersion - gets bucket version info.
  2763  func (c *S3Client) GetVersion(ctx context.Context) (config minio.BucketVersioningConfiguration, err *probe.Error) {
  2764  	bucket, _ := c.url2BucketAndObject()
  2765  	if bucket == "" {
  2766  		return config, probe.NewError(BucketNameEmpty{})
  2767  	}
  2768  	var e error
  2769  	config, e = c.api.GetBucketVersioning(ctx, bucket)
  2770  	if e != nil {
  2771  		return config, probe.NewError(e)
  2772  	}
  2773  
  2774  	return config, nil
  2775  }
  2776  
  2777  // SetVersion - Set version configuration on a bucket
  2778  func (c *S3Client) SetVersion(ctx context.Context, status string, prefixes []string, excludeFolders bool) *probe.Error {
  2779  	bucket, _ := c.url2BucketAndObject()
  2780  	if bucket == "" {
  2781  		return probe.NewError(BucketNameEmpty{})
  2782  	}
  2783  	var err error
  2784  	switch status {
  2785  	case "enable":
  2786  
  2787  		if len(prefixes) > 0 || excludeFolders {
  2788  			vc := minio.BucketVersioningConfiguration{
  2789  				Status:         minio.Enabled,
  2790  				ExcludeFolders: excludeFolders,
  2791  			}
  2792  			if len(prefixes) > 0 {
  2793  				eprefixes := make([]minio.ExcludedPrefix, 0, len(prefixes))
  2794  				for _, prefix := range prefixes {
  2795  					eprefixes = append(eprefixes, minio.ExcludedPrefix{Prefix: prefix})
  2796  				}
  2797  				vc.ExcludedPrefixes = eprefixes
  2798  			}
  2799  			err = c.api.SetBucketVersioning(ctx, bucket, vc)
  2800  		} else {
  2801  			err = c.api.EnableVersioning(ctx, bucket)
  2802  		}
  2803  	case "suspend":
  2804  		err = c.api.SuspendVersioning(ctx, bucket)
  2805  	default:
  2806  		return probe.NewError(fmt.Errorf("Invalid versioning status"))
  2807  	}
  2808  	return probe.NewError(err)
  2809  }
  2810  
  2811  // GetReplication - gets replication configuration for a given bucket.
  2812  func (c *S3Client) GetReplication(ctx context.Context) (replication.Config, *probe.Error) {
  2813  	bucket, _ := c.url2BucketAndObject()
  2814  	if bucket == "" {
  2815  		return replication.Config{}, probe.NewError(BucketNameEmpty{})
  2816  	}
  2817  
  2818  	replicationCfg, e := c.api.GetBucketReplication(ctx, bucket)
  2819  	if e != nil {
  2820  		return replication.Config{}, probe.NewError(e)
  2821  	}
  2822  	return replicationCfg, nil
  2823  }
  2824  
  2825  // RemoveReplication - removes replication configuration for a given bucket.
  2826  func (c *S3Client) RemoveReplication(ctx context.Context) *probe.Error {
  2827  	bucket, _ := c.url2BucketAndObject()
  2828  	if bucket == "" {
  2829  		return probe.NewError(BucketNameEmpty{})
  2830  	}
  2831  
  2832  	e := c.api.RemoveBucketReplication(ctx, bucket)
  2833  	return probe.NewError(e)
  2834  }
  2835  
  2836  // SetReplication sets replication configuration for a given bucket.
  2837  func (c *S3Client) SetReplication(ctx context.Context, cfg *replication.Config, opts replication.Options) *probe.Error {
  2838  	bucket, objectPrefix := c.url2BucketAndObject()
  2839  	if bucket == "" {
  2840  		return probe.NewError(BucketNameEmpty{})
  2841  	}
  2842  	opts.Prefix = objectPrefix
  2843  	switch opts.Op {
  2844  	case replication.AddOption:
  2845  		if e := cfg.AddRule(opts); e != nil {
  2846  			return probe.NewError(e)
  2847  		}
  2848  	case replication.SetOption:
  2849  		if e := cfg.EditRule(opts); e != nil {
  2850  			return probe.NewError(e)
  2851  		}
  2852  	case replication.RemoveOption:
  2853  		if e := cfg.RemoveRule(opts); e != nil {
  2854  			return probe.NewError(e)
  2855  		}
  2856  	case replication.ImportOption:
  2857  	default:
  2858  		return probe.NewError(fmt.Errorf("Invalid replication option"))
  2859  	}
  2860  	if e := c.api.SetBucketReplication(ctx, bucket, *cfg); e != nil {
  2861  		return probe.NewError(e)
  2862  	}
  2863  	return nil
  2864  }
  2865  
  2866  // GetReplicationMetrics - Get replication metrics for a given bucket.
  2867  func (c *S3Client) GetReplicationMetrics(ctx context.Context) (replication.MetricsV2, *probe.Error) {
  2868  	bucket, _ := c.url2BucketAndObject()
  2869  	if bucket == "" {
  2870  		return replication.MetricsV2{}, probe.NewError(BucketNameEmpty{})
  2871  	}
  2872  
  2873  	metrics, e := c.api.GetBucketReplicationMetricsV2(ctx, bucket)
  2874  	if e != nil {
  2875  		return replication.MetricsV2{}, probe.NewError(e)
  2876  	}
  2877  	return metrics, nil
  2878  }
  2879  
  2880  // ResetReplication - kicks off replication again on previously replicated objects if existing object
  2881  // replication is enabled in the replication config.Optional to provide a timestamp
  2882  func (c *S3Client) ResetReplication(ctx context.Context, before time.Duration, tgtArn string) (rinfo replication.ResyncTargetsInfo, err *probe.Error) {
  2883  	bucket, _ := c.url2BucketAndObject()
  2884  	if bucket == "" {
  2885  		return rinfo, probe.NewError(BucketNameEmpty{})
  2886  	}
  2887  
  2888  	rinfo, e := c.api.ResetBucketReplicationOnTarget(ctx, bucket, before, tgtArn)
  2889  	if e != nil {
  2890  		return rinfo, probe.NewError(e)
  2891  	}
  2892  	return rinfo, nil
  2893  }
  2894  
  2895  // ReplicationResyncStatus - gets status of replication resync for this target arn
  2896  func (c *S3Client) ReplicationResyncStatus(ctx context.Context, arn string) (rinfo replication.ResyncTargetsInfo, err *probe.Error) {
  2897  	bucket, _ := c.url2BucketAndObject()
  2898  	if bucket == "" {
  2899  		return rinfo, probe.NewError(BucketNameEmpty{})
  2900  	}
  2901  
  2902  	rinfo, e := c.api.GetBucketReplicationResyncStatus(ctx, bucket, arn)
  2903  	if e != nil {
  2904  		return rinfo, probe.NewError(e)
  2905  	}
  2906  	return rinfo, nil
  2907  }
  2908  
  2909  // GetEncryption - gets bucket encryption info.
  2910  func (c *S3Client) GetEncryption(ctx context.Context) (algorithm, keyID string, err *probe.Error) {
  2911  	bucket, _ := c.url2BucketAndObject()
  2912  	if bucket == "" {
  2913  		return "", "", probe.NewError(BucketNameEmpty{})
  2914  	}
  2915  
  2916  	config, e := c.api.GetBucketEncryption(ctx, bucket)
  2917  	if e != nil {
  2918  		return "", "", probe.NewError(e)
  2919  	}
  2920  	for _, rule := range config.Rules {
  2921  		algorithm = rule.Apply.SSEAlgorithm
  2922  		if rule.Apply.KmsMasterKeyID != "" {
  2923  			keyID = rule.Apply.KmsMasterKeyID
  2924  			break
  2925  		}
  2926  	}
  2927  	return algorithm, keyID, nil
  2928  }
  2929  
  2930  // SetEncryption - Set encryption configuration on a bucket
  2931  func (c *S3Client) SetEncryption(ctx context.Context, encType, kmsKeyID string) *probe.Error {
  2932  	bucket, _ := c.url2BucketAndObject()
  2933  	if bucket == "" {
  2934  		return probe.NewError(BucketNameEmpty{})
  2935  	}
  2936  	var config *sse.Configuration
  2937  	switch strings.ToLower(encType) {
  2938  	case "sse-kms":
  2939  		config = sse.NewConfigurationSSEKMS(kmsKeyID)
  2940  	case "sse-s3":
  2941  		config = sse.NewConfigurationSSES3()
  2942  	default:
  2943  		return probe.NewError(fmt.Errorf("Invalid encryption algorithm %s", encType))
  2944  	}
  2945  	if err := c.api.SetBucketEncryption(ctx, bucket, config); err != nil {
  2946  		return probe.NewError(err)
  2947  	}
  2948  	return nil
  2949  }
  2950  
  2951  // DeleteEncryption - removes encryption configuration on a bucket
  2952  func (c *S3Client) DeleteEncryption(ctx context.Context) *probe.Error {
  2953  	bucket, _ := c.url2BucketAndObject()
  2954  	if bucket == "" {
  2955  		return probe.NewError(BucketNameEmpty{})
  2956  	}
  2957  	if err := c.api.RemoveBucketEncryption(ctx, bucket); err != nil {
  2958  		return probe.NewError(err)
  2959  	}
  2960  	return nil
  2961  }
  2962  
  2963  // GetBucketInfo gets info about a bucket
  2964  func (c *S3Client) GetBucketInfo(ctx context.Context) (BucketInfo, *probe.Error) {
  2965  	var b BucketInfo
  2966  	bucket, object := c.url2BucketAndObject()
  2967  	if bucket == "" {
  2968  		return b, probe.NewError(BucketNameEmpty{})
  2969  	}
  2970  	if object != "" {
  2971  		return b, probe.NewError(InvalidArgument{})
  2972  	}
  2973  	content, err := c.bucketStat(ctx, BucketStatOptions{bucket: bucket})
  2974  	if err != nil {
  2975  		return b, err.Trace(bucket)
  2976  	}
  2977  	b.Key = bucket
  2978  	b.URL = content.URL
  2979  	b.Size = content.Size
  2980  	b.Type = content.Type
  2981  	b.Date = content.Time
  2982  	if vcfg, err := c.GetVersion(ctx); err == nil {
  2983  		b.Versioning.Status = vcfg.Status
  2984  		b.Versioning.MFADelete = vcfg.MFADelete
  2985  	}
  2986  	if enabled, mode, validity, unit, err := c.api.GetObjectLockConfig(ctx, bucket); err == nil {
  2987  		if mode != nil {
  2988  			b.Locking.Mode = *mode
  2989  		}
  2990  		b.Locking.Enabled = enabled
  2991  		if validity != nil && unit != nil {
  2992  			vuint64 := uint64(*validity)
  2993  			b.Locking.Validity = fmt.Sprintf("%d%s", vuint64, unit)
  2994  		}
  2995  	}
  2996  
  2997  	if rcfg, err := c.GetReplication(ctx); err == nil {
  2998  		if !rcfg.Empty() {
  2999  			b.Replication.Enabled = true
  3000  		}
  3001  	}
  3002  	if algo, keyID, err := c.GetEncryption(ctx); err == nil {
  3003  		b.Encryption.Algorithm = algo
  3004  		b.Encryption.KeyID = keyID
  3005  	}
  3006  
  3007  	if pType, policyStr, err := c.GetAccess(ctx); err == nil {
  3008  		b.Policy.Type = pType
  3009  		b.Policy.Text = policyStr
  3010  	}
  3011  	location, e := c.api.GetBucketLocation(ctx, bucket)
  3012  	if e != nil {
  3013  		return b, probe.NewError(e)
  3014  	}
  3015  	b.Location = location
  3016  	if tags, err := c.GetTags(ctx, ""); err == nil {
  3017  		b.Tagging = tags
  3018  	}
  3019  	if lfc, _, err := c.GetLifecycle(ctx); err == nil {
  3020  		b.ILM.Config = lfc
  3021  	}
  3022  	if nfc, err := c.api.GetBucketNotification(ctx, bucket); err == nil {
  3023  		b.Notification.Config = nfc
  3024  	}
  3025  	return b, nil
  3026  }
  3027  
  3028  // Restore gets a copy of an archived object
  3029  func (c *S3Client) Restore(ctx context.Context, versionID string, days int) *probe.Error {
  3030  	bucket, object := c.url2BucketAndObject()
  3031  	if bucket == "" {
  3032  		return probe.NewError(BucketNameEmpty{})
  3033  	}
  3034  	if object == "" {
  3035  		return probe.NewError(ObjectNameEmpty{})
  3036  	}
  3037  
  3038  	req := minio.RestoreRequest{}
  3039  	req.SetDays(days)
  3040  	req.SetGlacierJobParameters(minio.GlacierJobParameters{Tier: minio.TierExpedited})
  3041  	if err := c.api.RestoreObject(ctx, bucket, object, versionID, req); err != nil {
  3042  		return probe.NewError(err)
  3043  	}
  3044  	return nil
  3045  }
  3046  
  3047  // GetPart gets an object in a given number of parts
  3048  func (c *S3Client) GetPart(ctx context.Context, part int) (io.ReadCloser, *probe.Error) {
  3049  	bucket, object := c.url2BucketAndObject()
  3050  	if bucket == "" {
  3051  		return nil, probe.NewError(BucketNameEmpty{})
  3052  	}
  3053  	if object == "" {
  3054  		return nil, probe.NewError(ObjectNameEmpty{})
  3055  	}
  3056  	getOO := minio.GetObjectOptions{}
  3057  	if part > 0 {
  3058  		getOO.PartNumber = part
  3059  	}
  3060  	reader, e := c.api.GetObject(ctx, bucket, object, getOO)
  3061  	if e != nil {
  3062  		return nil, probe.NewError(e)
  3063  	}
  3064  	return reader, nil
  3065  }