github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/s3/s3.go (about)

     1  // Package s3 provides an interface to Amazon S3 object storage
     2  package s3
     3  
     4  //go:generate go run gen_setfrom.go -o setfrom.go
     5  
     6  import (
     7  	"bytes"
     8  	"context"
     9  	"crypto/md5"
    10  	"crypto/tls"
    11  	"encoding/base64"
    12  	"encoding/hex"
    13  	"encoding/json"
    14  	"encoding/xml"
    15  	"errors"
    16  	"fmt"
    17  	"io"
    18  	"math"
    19  	"net/http"
    20  	"net/url"
    21  	"path"
    22  	"regexp"
    23  	"sort"
    24  	"strconv"
    25  	"strings"
    26  	"sync"
    27  	"time"
    28  
    29  	"github.com/aws/aws-sdk-go/service/s3/s3manager"
    30  
    31  	"github.com/aws/aws-sdk-go/aws"
    32  	"github.com/aws/aws-sdk-go/aws/awserr"
    33  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    34  	"github.com/aws/aws-sdk-go/aws/credentials"
    35  	"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
    36  	"github.com/aws/aws-sdk-go/aws/defaults"
    37  	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    38  	"github.com/aws/aws-sdk-go/aws/endpoints"
    39  	"github.com/aws/aws-sdk-go/aws/request"
    40  	"github.com/aws/aws-sdk-go/aws/session"
    41  	"github.com/aws/aws-sdk-go/service/s3"
    42  	"github.com/ncw/swift/v2"
    43  	"github.com/rclone/rclone/fs"
    44  	"github.com/rclone/rclone/fs/accounting"
    45  	"github.com/rclone/rclone/fs/chunksize"
    46  	"github.com/rclone/rclone/fs/config"
    47  	"github.com/rclone/rclone/fs/config/configmap"
    48  	"github.com/rclone/rclone/fs/config/configstruct"
    49  	"github.com/rclone/rclone/fs/fserrors"
    50  	"github.com/rclone/rclone/fs/fshttp"
    51  	"github.com/rclone/rclone/fs/hash"
    52  	"github.com/rclone/rclone/fs/operations"
    53  	"github.com/rclone/rclone/fs/walk"
    54  	"github.com/rclone/rclone/lib/atexit"
    55  	"github.com/rclone/rclone/lib/bucket"
    56  	"github.com/rclone/rclone/lib/encoder"
    57  	"github.com/rclone/rclone/lib/multipart"
    58  	"github.com/rclone/rclone/lib/pacer"
    59  	"github.com/rclone/rclone/lib/pool"
    60  	"github.com/rclone/rclone/lib/readers"
    61  	"github.com/rclone/rclone/lib/rest"
    62  	"github.com/rclone/rclone/lib/version"
    63  	"golang.org/x/net/http/httpguts"
    64  	"golang.org/x/sync/errgroup"
    65  )
    66  
    67  // The S3 providers
    68  //
    69  // Please keep these in alphabetical order, but with AWS first and
    70  // Other last.
    71  //
    72  // NB if you add a new provider here, then add it in the setQuirks
    73  // function and set the correct quirks. Test the quirks are correct by
    74  // running the integration tests "go test -v -remote NewS3Provider:".
    75  //
    76  // See https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#adding-a-new-s3-provider
    77  // for full information about how to add a new s3 provider.
    78  var providerOption = fs.Option{
    79  	Name: fs.ConfigProvider,
    80  	Help: "Choose your S3 provider.",
    81  	Examples: []fs.OptionExample{{
    82  		Value: "AWS",
    83  		Help:  "Amazon Web Services (AWS) S3",
    84  	}, {
    85  		Value: "Alibaba",
    86  		Help:  "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
    87  	}, {
    88  		Value: "ArvanCloud",
    89  		Help:  "Arvan Cloud Object Storage (AOS)",
    90  	}, {
    91  		Value: "Ceph",
    92  		Help:  "Ceph Object Storage",
    93  	}, {
    94  		Value: "ChinaMobile",
    95  		Help:  "China Mobile Ecloud Elastic Object Storage (EOS)",
    96  	}, {
    97  		Value: "Cloudflare",
    98  		Help:  "Cloudflare R2 Storage",
    99  	}, {
   100  		Value: "DigitalOcean",
   101  		Help:  "DigitalOcean Spaces",
   102  	}, {
   103  		Value: "Dreamhost",
   104  		Help:  "Dreamhost DreamObjects",
   105  	}, {
   106  		Value: "GCS",
   107  		Help:  "Google Cloud Storage",
   108  	}, {
   109  		Value: "HuaweiOBS",
   110  		Help:  "Huawei Object Storage Service",
   111  	}, {
   112  		Value: "IBMCOS",
   113  		Help:  "IBM COS S3",
   114  	}, {
   115  		Value: "IDrive",
   116  		Help:  "IDrive e2",
   117  	}, {
   118  		Value: "IONOS",
   119  		Help:  "IONOS Cloud",
   120  	}, {
   121  		Value: "LyveCloud",
   122  		Help:  "Seagate Lyve Cloud",
   123  	}, {
   124  		Value: "Leviia",
   125  		Help:  "Leviia Object Storage",
   126  	}, {
   127  		Value: "Liara",
   128  		Help:  "Liara Object Storage",
   129  	}, {
   130  		Value: "Linode",
   131  		Help:  "Linode Object Storage",
   132  	}, {
   133  		Value: "Minio",
   134  		Help:  "Minio Object Storage",
   135  	}, {
   136  		Value: "Netease",
   137  		Help:  "Netease Object Storage (NOS)",
   138  	}, {
   139  		Value: "Petabox",
   140  		Help:  "Petabox Object Storage",
   141  	}, {
   142  		Value: "RackCorp",
   143  		Help:  "RackCorp Object Storage",
   144  	}, {
   145  		Value: "Rclone",
   146  		Help:  "Rclone S3 Server",
   147  	}, {
   148  		Value: "Scaleway",
   149  		Help:  "Scaleway Object Storage",
   150  	}, {
   151  		Value: "SeaweedFS",
   152  		Help:  "SeaweedFS S3",
   153  	}, {
   154  		Value: "StackPath",
   155  		Help:  "StackPath Object Storage",
   156  	}, {
   157  		Value: "Storj",
   158  		Help:  "Storj (S3 Compatible Gateway)",
   159  	}, {
   160  		Value: "Synology",
   161  		Help:  "Synology C2 Object Storage",
   162  	}, {
   163  		Value: "TencentCOS",
   164  		Help:  "Tencent Cloud Object Storage (COS)",
   165  	}, {
   166  		Value: "Wasabi",
   167  		Help:  "Wasabi Object Storage",
   168  	}, {
   169  		Value: "Qiniu",
   170  		Help:  "Qiniu Object Storage (Kodo)",
   171  	}, {
   172  		Value: "Other",
   173  		Help:  "Any other S3 compatible provider",
   174  	}},
   175  }
   176  
   177  var providersList string
   178  
   179  // Register with Fs
   180  func init() {
   181  	var s strings.Builder
   182  	for i, provider := range providerOption.Examples {
   183  		if provider.Value == "Other" {
   184  			_, _ = s.WriteString(" and others")
   185  		} else {
   186  			if i != 0 {
   187  				_, _ = s.WriteString(", ")
   188  			}
   189  			_, _ = s.WriteString(provider.Value)
   190  		}
   191  	}
   192  	providersList = s.String()
   193  	fs.Register(&fs.RegInfo{
   194  		Name:        "s3",
   195  		Description: "Amazon S3 Compliant Storage Providers including " + providersList,
   196  		NewFs:       NewFs,
   197  		CommandHelp: commandHelp,
   198  		Config: func(ctx context.Context, name string, m configmap.Mapper, config fs.ConfigIn) (*fs.ConfigOut, error) {
   199  			switch config.State {
   200  			case "":
   201  				return nil, setEndpointValueForIDriveE2(m)
   202  			}
   203  			return nil, fmt.Errorf("unknown state %q", config.State)
   204  		},
   205  		MetadataInfo: &fs.MetadataInfo{
   206  			System: systemMetadataInfo,
   207  			Help:   `User metadata is stored as x-amz-meta- keys. S3 metadata keys are case insensitive and are always returned in lower case.`,
   208  		},
   209  		Options: []fs.Option{providerOption, {
   210  			Name:    "env_auth",
   211  			Help:    "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\n\nOnly applies if access_key_id and secret_access_key is blank.",
   212  			Default: false,
   213  			Examples: []fs.OptionExample{{
   214  				Value: "false",
   215  				Help:  "Enter AWS credentials in the next step.",
   216  			}, {
   217  				Value: "true",
   218  				Help:  "Get AWS credentials from the environment (env vars or IAM).",
   219  			}},
   220  		}, {
   221  			Name:      "access_key_id",
   222  			Help:      "AWS Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
   223  			Sensitive: true,
   224  		}, {
   225  			Name:      "secret_access_key",
   226  			Help:      "AWS Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
   227  			Sensitive: true,
   228  		}, {
   229  			// References:
   230  			// 1. https://docs.aws.amazon.com/general/latest/gr/rande.html
   231  			// 2. https://docs.aws.amazon.com/general/latest/gr/s3.html
   232  			Name:     "region",
   233  			Help:     "Region to connect to.",
   234  			Provider: "AWS",
   235  			Examples: []fs.OptionExample{{
   236  				Value: "us-east-1",
   237  				Help:  "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.",
   238  			}, {
   239  				Value: "us-east-2",
   240  				Help:  "US East (Ohio) Region.\nNeeds location constraint us-east-2.",
   241  			}, {
   242  				Value: "us-west-1",
   243  				Help:  "US West (Northern California) Region.\nNeeds location constraint us-west-1.",
   244  			}, {
   245  				Value: "us-west-2",
   246  				Help:  "US West (Oregon) Region.\nNeeds location constraint us-west-2.",
   247  			}, {
   248  				Value: "ca-central-1",
   249  				Help:  "Canada (Central) Region.\nNeeds location constraint ca-central-1.",
   250  			}, {
   251  				Value: "eu-west-1",
   252  				Help:  "EU (Ireland) Region.\nNeeds location constraint EU or eu-west-1.",
   253  			}, {
   254  				Value: "eu-west-2",
   255  				Help:  "EU (London) Region.\nNeeds location constraint eu-west-2.",
   256  			}, {
   257  				Value: "eu-west-3",
   258  				Help:  "EU (Paris) Region.\nNeeds location constraint eu-west-3.",
   259  			}, {
   260  				Value: "eu-north-1",
   261  				Help:  "EU (Stockholm) Region.\nNeeds location constraint eu-north-1.",
   262  			}, {
   263  				Value: "eu-south-1",
   264  				Help:  "EU (Milan) Region.\nNeeds location constraint eu-south-1.",
   265  			}, {
   266  				Value: "eu-central-1",
   267  				Help:  "EU (Frankfurt) Region.\nNeeds location constraint eu-central-1.",
   268  			}, {
   269  				Value: "ap-southeast-1",
   270  				Help:  "Asia Pacific (Singapore) Region.\nNeeds location constraint ap-southeast-1.",
   271  			}, {
   272  				Value: "ap-southeast-2",
   273  				Help:  "Asia Pacific (Sydney) Region.\nNeeds location constraint ap-southeast-2.",
   274  			}, {
   275  				Value: "ap-northeast-1",
   276  				Help:  "Asia Pacific (Tokyo) Region.\nNeeds location constraint ap-northeast-1.",
   277  			}, {
   278  				Value: "ap-northeast-2",
   279  				Help:  "Asia Pacific (Seoul).\nNeeds location constraint ap-northeast-2.",
   280  			}, {
   281  				Value: "ap-northeast-3",
   282  				Help:  "Asia Pacific (Osaka-Local).\nNeeds location constraint ap-northeast-3.",
   283  			}, {
   284  				Value: "ap-south-1",
   285  				Help:  "Asia Pacific (Mumbai).\nNeeds location constraint ap-south-1.",
   286  			}, {
   287  				Value: "ap-east-1",
   288  				Help:  "Asia Pacific (Hong Kong) Region.\nNeeds location constraint ap-east-1.",
   289  			}, {
   290  				Value: "sa-east-1",
   291  				Help:  "South America (Sao Paulo) Region.\nNeeds location constraint sa-east-1.",
   292  			}, {
   293  				Value: "il-central-1",
   294  				Help:  "Israel (Tel Aviv) Region.\nNeeds location constraint il-central-1.",
   295  			}, {
   296  				Value: "me-south-1",
   297  				Help:  "Middle East (Bahrain) Region.\nNeeds location constraint me-south-1.",
   298  			}, {
   299  				Value: "af-south-1",
   300  				Help:  "Africa (Cape Town) Region.\nNeeds location constraint af-south-1.",
   301  			}, {
   302  				Value: "cn-north-1",
   303  				Help:  "China (Beijing) Region.\nNeeds location constraint cn-north-1.",
   304  			}, {
   305  				Value: "cn-northwest-1",
   306  				Help:  "China (Ningxia) Region.\nNeeds location constraint cn-northwest-1.",
   307  			}, {
   308  				Value: "us-gov-east-1",
   309  				Help:  "AWS GovCloud (US-East) Region.\nNeeds location constraint us-gov-east-1.",
   310  			}, {
   311  				Value: "us-gov-west-1",
   312  				Help:  "AWS GovCloud (US) Region.\nNeeds location constraint us-gov-west-1.",
   313  			}},
   314  		}, {
   315  			Name:     "region",
   316  			Help:     "region - the location where your bucket will be created and your data stored.\n",
   317  			Provider: "RackCorp",
   318  			Examples: []fs.OptionExample{{
   319  				Value: "global",
   320  				Help:  "Global CDN (All locations) Region",
   321  			}, {
   322  				Value: "au",
   323  				Help:  "Australia (All states)",
   324  			}, {
   325  				Value: "au-nsw",
   326  				Help:  "NSW (Australia) Region",
   327  			}, {
   328  				Value: "au-qld",
   329  				Help:  "QLD (Australia) Region",
   330  			}, {
   331  				Value: "au-vic",
   332  				Help:  "VIC (Australia) Region",
   333  			}, {
   334  				Value: "au-wa",
   335  				Help:  "Perth (Australia) Region",
   336  			}, {
   337  				Value: "ph",
   338  				Help:  "Manila (Philippines) Region",
   339  			}, {
   340  				Value: "th",
   341  				Help:  "Bangkok (Thailand) Region",
   342  			}, {
   343  				Value: "hk",
   344  				Help:  "HK (Hong Kong) Region",
   345  			}, {
   346  				Value: "mn",
   347  				Help:  "Ulaanbaatar (Mongolia) Region",
   348  			}, {
   349  				Value: "kg",
   350  				Help:  "Bishkek (Kyrgyzstan) Region",
   351  			}, {
   352  				Value: "id",
   353  				Help:  "Jakarta (Indonesia) Region",
   354  			}, {
   355  				Value: "jp",
   356  				Help:  "Tokyo (Japan) Region",
   357  			}, {
   358  				Value: "sg",
   359  				Help:  "SG (Singapore) Region",
   360  			}, {
   361  				Value: "de",
   362  				Help:  "Frankfurt (Germany) Region",
   363  			}, {
   364  				Value: "us",
   365  				Help:  "USA (AnyCast) Region",
   366  			}, {
   367  				Value: "us-east-1",
   368  				Help:  "New York (USA) Region",
   369  			}, {
   370  				Value: "us-west-1",
   371  				Help:  "Freemont (USA) Region",
   372  			}, {
   373  				Value: "nz",
   374  				Help:  "Auckland (New Zealand) Region",
   375  			}},
   376  		}, {
   377  			Name:     "region",
   378  			Help:     "Region to connect to.",
   379  			Provider: "Scaleway",
   380  			Examples: []fs.OptionExample{{
   381  				Value: "nl-ams",
   382  				Help:  "Amsterdam, The Netherlands",
   383  			}, {
   384  				Value: "fr-par",
   385  				Help:  "Paris, France",
   386  			}, {
   387  				Value: "pl-waw",
   388  				Help:  "Warsaw, Poland",
   389  			}},
   390  		}, {
   391  			Name:     "region",
   392  			Help:     "Region to connect to. - the location where your bucket will be created and your data stored. Need bo be same with your endpoint.\n",
   393  			Provider: "HuaweiOBS",
   394  			Examples: []fs.OptionExample{{
   395  				Value: "af-south-1",
   396  				Help:  "AF-Johannesburg",
   397  			}, {
   398  				Value: "ap-southeast-2",
   399  				Help:  "AP-Bangkok",
   400  			}, {
   401  				Value: "ap-southeast-3",
   402  				Help:  "AP-Singapore",
   403  			}, {
   404  				Value: "cn-east-3",
   405  				Help:  "CN East-Shanghai1",
   406  			}, {
   407  				Value: "cn-east-2",
   408  				Help:  "CN East-Shanghai2",
   409  			}, {
   410  				Value: "cn-north-1",
   411  				Help:  "CN North-Beijing1",
   412  			}, {
   413  				Value: "cn-north-4",
   414  				Help:  "CN North-Beijing4",
   415  			}, {
   416  				Value: "cn-south-1",
   417  				Help:  "CN South-Guangzhou",
   418  			}, {
   419  				Value: "ap-southeast-1",
   420  				Help:  "CN-Hong Kong",
   421  			}, {
   422  				Value: "sa-argentina-1",
   423  				Help:  "LA-Buenos Aires1",
   424  			}, {
   425  				Value: "sa-peru-1",
   426  				Help:  "LA-Lima1",
   427  			}, {
   428  				Value: "na-mexico-1",
   429  				Help:  "LA-Mexico City1",
   430  			}, {
   431  				Value: "sa-chile-1",
   432  				Help:  "LA-Santiago2",
   433  			}, {
   434  				Value: "sa-brazil-1",
   435  				Help:  "LA-Sao Paulo1",
   436  			}, {
   437  				Value: "ru-northwest-2",
   438  				Help:  "RU-Moscow2",
   439  			}},
   440  		}, {
   441  			Name:     "region",
   442  			Help:     "Region to connect to.",
   443  			Provider: "Cloudflare",
   444  			Examples: []fs.OptionExample{{
   445  				Value: "auto",
   446  				Help:  "R2 buckets are automatically distributed across Cloudflare's data centers for low latency.",
   447  			}},
   448  		}, {
   449  			// References:
   450  			// https://developer.qiniu.com/kodo/4088/s3-access-domainname
   451  			Name:     "region",
   452  			Help:     "Region to connect to.",
   453  			Provider: "Qiniu",
   454  			Examples: []fs.OptionExample{{
   455  				Value: "cn-east-1",
   456  				Help:  "The default endpoint - a good choice if you are unsure.\nEast China Region 1.\nNeeds location constraint cn-east-1.",
   457  			}, {
   458  				Value: "cn-east-2",
   459  				Help:  "East China Region 2.\nNeeds location constraint cn-east-2.",
   460  			}, {
   461  				Value: "cn-north-1",
   462  				Help:  "North China Region 1.\nNeeds location constraint cn-north-1.",
   463  			}, {
   464  				Value: "cn-south-1",
   465  				Help:  "South China Region 1.\nNeeds location constraint cn-south-1.",
   466  			}, {
   467  				Value: "us-north-1",
   468  				Help:  "North America Region.\nNeeds location constraint us-north-1.",
   469  			}, {
   470  				Value: "ap-southeast-1",
   471  				Help:  "Southeast Asia Region 1.\nNeeds location constraint ap-southeast-1.",
   472  			}, {
   473  				Value: "ap-northeast-1",
   474  				Help:  "Northeast Asia Region 1.\nNeeds location constraint ap-northeast-1.",
   475  			}},
   476  		}, {
   477  			Name:     "region",
   478  			Help:     "Region where your bucket will be created and your data stored.\n",
   479  			Provider: "IONOS",
   480  			Examples: []fs.OptionExample{{
   481  				Value: "de",
   482  				Help:  "Frankfurt, Germany",
   483  			}, {
   484  				Value: "eu-central-2",
   485  				Help:  "Berlin, Germany",
   486  			}, {
   487  				Value: "eu-south-2",
   488  				Help:  "Logrono, Spain",
   489  			}},
   490  		}, {
   491  			Name:     "region",
   492  			Help:     "Region where your bucket will be created and your data stored.\n",
   493  			Provider: "Petabox",
   494  			Examples: []fs.OptionExample{{
   495  				Value: "us-east-1",
   496  				Help:  "US East (N. Virginia)",
   497  			}, {
   498  				Value: "eu-central-1",
   499  				Help:  "Europe (Frankfurt)",
   500  			}, {
   501  				Value: "ap-southeast-1",
   502  				Help:  "Asia Pacific (Singapore)",
   503  			}, {
   504  				Value: "me-south-1",
   505  				Help:  "Middle East (Bahrain)",
   506  			}, {
   507  				Value: "sa-east-1",
   508  				Help:  "South America (São Paulo)",
   509  			}},
   510  		}, {
   511  			Name:     "region",
   512  			Help:     "Region where your data stored.\n",
   513  			Provider: "Synology",
   514  			Examples: []fs.OptionExample{{
   515  				Value: "eu-001",
   516  				Help:  "Europe Region 1",
   517  			}, {
   518  				Value: "eu-002",
   519  				Help:  "Europe Region 2",
   520  			}, {
   521  				Value: "us-001",
   522  				Help:  "US Region 1",
   523  			}, {
   524  				Value: "us-002",
   525  				Help:  "US Region 2",
   526  			}, {
   527  				Value: "tw-001",
   528  				Help:  "Asia (Taiwan)",
   529  			}},
   530  		}, {
   531  			Name:     "region",
   532  			Help:     "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
   533  			Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,IONOS,Petabox,Liara,Linode,Qiniu,RackCorp,Scaleway,Storj,Synology,TencentCOS,HuaweiOBS,IDrive",
   534  			Examples: []fs.OptionExample{{
   535  				Value: "",
   536  				Help:  "Use this if unsure.\nWill use v4 signatures and an empty region.",
   537  			}, {
   538  				Value: "other-v2-signature",
   539  				Help:  "Use this only if v4 signatures don't work.\nE.g. pre Jewel/v10 CEPH.",
   540  			}},
   541  		}, {
   542  			Name:     "endpoint",
   543  			Help:     "Endpoint for S3 API.\n\nLeave blank if using AWS to use the default endpoint for the region.",
   544  			Provider: "AWS",
   545  		}, {
   546  			// ChinaMobile endpoints: https://ecloud.10086.cn/op-help-center/doc/article/24534
   547  			Name:     "endpoint",
   548  			Help:     "Endpoint for China Mobile Ecloud Elastic Object Storage (EOS) API.",
   549  			Provider: "ChinaMobile",
   550  			Examples: []fs.OptionExample{{
   551  				Value: "eos-wuxi-1.cmecloud.cn",
   552  				Help:  "The default endpoint - a good choice if you are unsure.\nEast China (Suzhou)",
   553  			}, {
   554  				Value: "eos-jinan-1.cmecloud.cn",
   555  				Help:  "East China (Jinan)",
   556  			}, {
   557  				Value: "eos-ningbo-1.cmecloud.cn",
   558  				Help:  "East China (Hangzhou)",
   559  			}, {
   560  				Value: "eos-shanghai-1.cmecloud.cn",
   561  				Help:  "East China (Shanghai-1)",
   562  			}, {
   563  				Value: "eos-zhengzhou-1.cmecloud.cn",
   564  				Help:  "Central China (Zhengzhou)",
   565  			}, {
   566  				Value: "eos-hunan-1.cmecloud.cn",
   567  				Help:  "Central China (Changsha-1)",
   568  			}, {
   569  				Value: "eos-zhuzhou-1.cmecloud.cn",
   570  				Help:  "Central China (Changsha-2)",
   571  			}, {
   572  				Value: "eos-guangzhou-1.cmecloud.cn",
   573  				Help:  "South China (Guangzhou-2)",
   574  			}, {
   575  				Value: "eos-dongguan-1.cmecloud.cn",
   576  				Help:  "South China (Guangzhou-3)",
   577  			}, {
   578  				Value: "eos-beijing-1.cmecloud.cn",
   579  				Help:  "North China (Beijing-1)",
   580  			}, {
   581  				Value: "eos-beijing-2.cmecloud.cn",
   582  				Help:  "North China (Beijing-2)",
   583  			}, {
   584  				Value: "eos-beijing-4.cmecloud.cn",
   585  				Help:  "North China (Beijing-3)",
   586  			}, {
   587  				Value: "eos-huhehaote-1.cmecloud.cn",
   588  				Help:  "North China (Huhehaote)",
   589  			}, {
   590  				Value: "eos-chengdu-1.cmecloud.cn",
   591  				Help:  "Southwest China (Chengdu)",
   592  			}, {
   593  				Value: "eos-chongqing-1.cmecloud.cn",
   594  				Help:  "Southwest China (Chongqing)",
   595  			}, {
   596  				Value: "eos-guiyang-1.cmecloud.cn",
   597  				Help:  "Southwest China (Guiyang)",
   598  			}, {
   599  				Value: "eos-xian-1.cmecloud.cn",
   600  				Help:  "Nouthwest China (Xian)",
   601  			}, {
   602  				Value: "eos-yunnan.cmecloud.cn",
   603  				Help:  "Yunnan China (Kunming)",
   604  			}, {
   605  				Value: "eos-yunnan-2.cmecloud.cn",
   606  				Help:  "Yunnan China (Kunming-2)",
   607  			}, {
   608  				Value: "eos-tianjin-1.cmecloud.cn",
   609  				Help:  "Tianjin China (Tianjin)",
   610  			}, {
   611  				Value: "eos-jilin-1.cmecloud.cn",
   612  				Help:  "Jilin China (Changchun)",
   613  			}, {
   614  				Value: "eos-hubei-1.cmecloud.cn",
   615  				Help:  "Hubei China (Xiangyan)",
   616  			}, {
   617  				Value: "eos-jiangxi-1.cmecloud.cn",
   618  				Help:  "Jiangxi China (Nanchang)",
   619  			}, {
   620  				Value: "eos-gansu-1.cmecloud.cn",
   621  				Help:  "Gansu China (Lanzhou)",
   622  			}, {
   623  				Value: "eos-shanxi-1.cmecloud.cn",
   624  				Help:  "Shanxi China (Taiyuan)",
   625  			}, {
   626  				Value: "eos-liaoning-1.cmecloud.cn",
   627  				Help:  "Liaoning China (Shenyang)",
   628  			}, {
   629  				Value: "eos-hebei-1.cmecloud.cn",
   630  				Help:  "Hebei China (Shijiazhuang)",
   631  			}, {
   632  				Value: "eos-fujian-1.cmecloud.cn",
   633  				Help:  "Fujian China (Xiamen)",
   634  			}, {
   635  				Value: "eos-guangxi-1.cmecloud.cn",
   636  				Help:  "Guangxi China (Nanning)",
   637  			}, {
   638  				Value: "eos-anhui-1.cmecloud.cn",
   639  				Help:  "Anhui China (Huainan)",
   640  			}},
   641  		}, {
   642  			// ArvanCloud endpoints: https://www.arvancloud.ir/en/products/cloud-storage
   643  			Name:     "endpoint",
   644  			Help:     "Endpoint for Arvan Cloud Object Storage (AOS) API.",
   645  			Provider: "ArvanCloud",
   646  			Examples: []fs.OptionExample{{
   647  				Value: "s3.ir-thr-at1.arvanstorage.ir",
   648  				Help:  "The default endpoint - a good choice if you are unsure.\nTehran Iran (Simin)",
   649  			}, {
   650  				Value: "s3.ir-tbz-sh1.arvanstorage.ir",
   651  				Help:  "Tabriz Iran (Shahriar)",
   652  			}},
   653  		}, {
   654  			Name:     "endpoint",
   655  			Help:     "Endpoint for IBM COS S3 API.\n\nSpecify if using an IBM COS On Premise.",
   656  			Provider: "IBMCOS",
   657  			Examples: []fs.OptionExample{{
   658  				Value: "s3.us.cloud-object-storage.appdomain.cloud",
   659  				Help:  "US Cross Region Endpoint",
   660  			}, {
   661  				Value: "s3.dal.us.cloud-object-storage.appdomain.cloud",
   662  				Help:  "US Cross Region Dallas Endpoint",
   663  			}, {
   664  				Value: "s3.wdc.us.cloud-object-storage.appdomain.cloud",
   665  				Help:  "US Cross Region Washington DC Endpoint",
   666  			}, {
   667  				Value: "s3.sjc.us.cloud-object-storage.appdomain.cloud",
   668  				Help:  "US Cross Region San Jose Endpoint",
   669  			}, {
   670  				Value: "s3.private.us.cloud-object-storage.appdomain.cloud",
   671  				Help:  "US Cross Region Private Endpoint",
   672  			}, {
   673  				Value: "s3.private.dal.us.cloud-object-storage.appdomain.cloud",
   674  				Help:  "US Cross Region Dallas Private Endpoint",
   675  			}, {
   676  				Value: "s3.private.wdc.us.cloud-object-storage.appdomain.cloud",
   677  				Help:  "US Cross Region Washington DC Private Endpoint",
   678  			}, {
   679  				Value: "s3.private.sjc.us.cloud-object-storage.appdomain.cloud",
   680  				Help:  "US Cross Region San Jose Private Endpoint",
   681  			}, {
   682  				Value: "s3.us-east.cloud-object-storage.appdomain.cloud",
   683  				Help:  "US Region East Endpoint",
   684  			}, {
   685  				Value: "s3.private.us-east.cloud-object-storage.appdomain.cloud",
   686  				Help:  "US Region East Private Endpoint",
   687  			}, {
   688  				Value: "s3.us-south.cloud-object-storage.appdomain.cloud",
   689  				Help:  "US Region South Endpoint",
   690  			}, {
   691  				Value: "s3.private.us-south.cloud-object-storage.appdomain.cloud",
   692  				Help:  "US Region South Private Endpoint",
   693  			}, {
   694  				Value: "s3.eu.cloud-object-storage.appdomain.cloud",
   695  				Help:  "EU Cross Region Endpoint",
   696  			}, {
   697  				Value: "s3.fra.eu.cloud-object-storage.appdomain.cloud",
   698  				Help:  "EU Cross Region Frankfurt Endpoint",
   699  			}, {
   700  				Value: "s3.mil.eu.cloud-object-storage.appdomain.cloud",
   701  				Help:  "EU Cross Region Milan Endpoint",
   702  			}, {
   703  				Value: "s3.ams.eu.cloud-object-storage.appdomain.cloud",
   704  				Help:  "EU Cross Region Amsterdam Endpoint",
   705  			}, {
   706  				Value: "s3.private.eu.cloud-object-storage.appdomain.cloud",
   707  				Help:  "EU Cross Region Private Endpoint",
   708  			}, {
   709  				Value: "s3.private.fra.eu.cloud-object-storage.appdomain.cloud",
   710  				Help:  "EU Cross Region Frankfurt Private Endpoint",
   711  			}, {
   712  				Value: "s3.private.mil.eu.cloud-object-storage.appdomain.cloud",
   713  				Help:  "EU Cross Region Milan Private Endpoint",
   714  			}, {
   715  				Value: "s3.private.ams.eu.cloud-object-storage.appdomain.cloud",
   716  				Help:  "EU Cross Region Amsterdam Private Endpoint",
   717  			}, {
   718  				Value: "s3.eu-gb.cloud-object-storage.appdomain.cloud",
   719  				Help:  "Great Britain Endpoint",
   720  			}, {
   721  				Value: "s3.private.eu-gb.cloud-object-storage.appdomain.cloud",
   722  				Help:  "Great Britain Private Endpoint",
   723  			}, {
   724  				Value: "s3.eu-de.cloud-object-storage.appdomain.cloud",
   725  				Help:  "EU Region DE Endpoint",
   726  			}, {
   727  				Value: "s3.private.eu-de.cloud-object-storage.appdomain.cloud",
   728  				Help:  "EU Region DE Private Endpoint",
   729  			}, {
   730  				Value: "s3.ap.cloud-object-storage.appdomain.cloud",
   731  				Help:  "APAC Cross Regional Endpoint",
   732  			}, {
   733  				Value: "s3.tok.ap.cloud-object-storage.appdomain.cloud",
   734  				Help:  "APAC Cross Regional Tokyo Endpoint",
   735  			}, {
   736  				Value: "s3.hkg.ap.cloud-object-storage.appdomain.cloud",
   737  				Help:  "APAC Cross Regional HongKong Endpoint",
   738  			}, {
   739  				Value: "s3.seo.ap.cloud-object-storage.appdomain.cloud",
   740  				Help:  "APAC Cross Regional Seoul Endpoint",
   741  			}, {
   742  				Value: "s3.private.ap.cloud-object-storage.appdomain.cloud",
   743  				Help:  "APAC Cross Regional Private Endpoint",
   744  			}, {
   745  				Value: "s3.private.tok.ap.cloud-object-storage.appdomain.cloud",
   746  				Help:  "APAC Cross Regional Tokyo Private Endpoint",
   747  			}, {
   748  				Value: "s3.private.hkg.ap.cloud-object-storage.appdomain.cloud",
   749  				Help:  "APAC Cross Regional HongKong Private Endpoint",
   750  			}, {
   751  				Value: "s3.private.seo.ap.cloud-object-storage.appdomain.cloud",
   752  				Help:  "APAC Cross Regional Seoul Private Endpoint",
   753  			}, {
   754  				Value: "s3.jp-tok.cloud-object-storage.appdomain.cloud",
   755  				Help:  "APAC Region Japan Endpoint",
   756  			}, {
   757  				Value: "s3.private.jp-tok.cloud-object-storage.appdomain.cloud",
   758  				Help:  "APAC Region Japan Private Endpoint",
   759  			}, {
   760  				Value: "s3.au-syd.cloud-object-storage.appdomain.cloud",
   761  				Help:  "APAC Region Australia Endpoint",
   762  			}, {
   763  				Value: "s3.private.au-syd.cloud-object-storage.appdomain.cloud",
   764  				Help:  "APAC Region Australia Private Endpoint",
   765  			}, {
   766  				Value: "s3.ams03.cloud-object-storage.appdomain.cloud",
   767  				Help:  "Amsterdam Single Site Endpoint",
   768  			}, {
   769  				Value: "s3.private.ams03.cloud-object-storage.appdomain.cloud",
   770  				Help:  "Amsterdam Single Site Private Endpoint",
   771  			}, {
   772  				Value: "s3.che01.cloud-object-storage.appdomain.cloud",
   773  				Help:  "Chennai Single Site Endpoint",
   774  			}, {
   775  				Value: "s3.private.che01.cloud-object-storage.appdomain.cloud",
   776  				Help:  "Chennai Single Site Private Endpoint",
   777  			}, {
   778  				Value: "s3.mel01.cloud-object-storage.appdomain.cloud",
   779  				Help:  "Melbourne Single Site Endpoint",
   780  			}, {
   781  				Value: "s3.private.mel01.cloud-object-storage.appdomain.cloud",
   782  				Help:  "Melbourne Single Site Private Endpoint",
   783  			}, {
   784  				Value: "s3.osl01.cloud-object-storage.appdomain.cloud",
   785  				Help:  "Oslo Single Site Endpoint",
   786  			}, {
   787  				Value: "s3.private.osl01.cloud-object-storage.appdomain.cloud",
   788  				Help:  "Oslo Single Site Private Endpoint",
   789  			}, {
   790  				Value: "s3.tor01.cloud-object-storage.appdomain.cloud",
   791  				Help:  "Toronto Single Site Endpoint",
   792  			}, {
   793  				Value: "s3.private.tor01.cloud-object-storage.appdomain.cloud",
   794  				Help:  "Toronto Single Site Private Endpoint",
   795  			}, {
   796  				Value: "s3.seo01.cloud-object-storage.appdomain.cloud",
   797  				Help:  "Seoul Single Site Endpoint",
   798  			}, {
   799  				Value: "s3.private.seo01.cloud-object-storage.appdomain.cloud",
   800  				Help:  "Seoul Single Site Private Endpoint",
   801  			}, {
   802  				Value: "s3.mon01.cloud-object-storage.appdomain.cloud",
   803  				Help:  "Montreal Single Site Endpoint",
   804  			}, {
   805  				Value: "s3.private.mon01.cloud-object-storage.appdomain.cloud",
   806  				Help:  "Montreal Single Site Private Endpoint",
   807  			}, {
   808  				Value: "s3.mex01.cloud-object-storage.appdomain.cloud",
   809  				Help:  "Mexico Single Site Endpoint",
   810  			}, {
   811  				Value: "s3.private.mex01.cloud-object-storage.appdomain.cloud",
   812  				Help:  "Mexico Single Site Private Endpoint",
   813  			}, {
   814  				Value: "s3.sjc04.cloud-object-storage.appdomain.cloud",
   815  				Help:  "San Jose Single Site Endpoint",
   816  			}, {
   817  				Value: "s3.private.sjc04.cloud-object-storage.appdomain.cloud",
   818  				Help:  "San Jose Single Site Private Endpoint",
   819  			}, {
   820  				Value: "s3.mil01.cloud-object-storage.appdomain.cloud",
   821  				Help:  "Milan Single Site Endpoint",
   822  			}, {
   823  				Value: "s3.private.mil01.cloud-object-storage.appdomain.cloud",
   824  				Help:  "Milan Single Site Private Endpoint",
   825  			}, {
   826  				Value: "s3.hkg02.cloud-object-storage.appdomain.cloud",
   827  				Help:  "Hong Kong Single Site Endpoint",
   828  			}, {
   829  				Value: "s3.private.hkg02.cloud-object-storage.appdomain.cloud",
   830  				Help:  "Hong Kong Single Site Private Endpoint",
   831  			}, {
   832  				Value: "s3.par01.cloud-object-storage.appdomain.cloud",
   833  				Help:  "Paris Single Site Endpoint",
   834  			}, {
   835  				Value: "s3.private.par01.cloud-object-storage.appdomain.cloud",
   836  				Help:  "Paris Single Site Private Endpoint",
   837  			}, {
   838  				Value: "s3.sng01.cloud-object-storage.appdomain.cloud",
   839  				Help:  "Singapore Single Site Endpoint",
   840  			}, {
   841  				Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
   842  				Help:  "Singapore Single Site Private Endpoint",
   843  			}},
   844  		}, {
   845  			Name:     "endpoint",
   846  			Help:     "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
   847  			Provider: "IONOS",
   848  			Examples: []fs.OptionExample{{
   849  				Value: "s3-eu-central-1.ionoscloud.com",
   850  				Help:  "Frankfurt, Germany",
   851  			}, {
   852  				Value: "s3-eu-central-2.ionoscloud.com",
   853  				Help:  "Berlin, Germany",
   854  			}, {
   855  				Value: "s3-eu-south-2.ionoscloud.com",
   856  				Help:  "Logrono, Spain",
   857  			}},
   858  		}, {
   859  			Name:     "endpoint",
   860  			Help:     "Endpoint for Petabox S3 Object Storage.\n\nSpecify the endpoint from the same region.",
   861  			Provider: "Petabox",
   862  			Required: true,
   863  			Examples: []fs.OptionExample{{
   864  				Value: "s3.petabox.io",
   865  				Help:  "US East (N. Virginia)",
   866  			}, {
   867  				Value: "s3.us-east-1.petabox.io",
   868  				Help:  "US East (N. Virginia)",
   869  			}, {
   870  				Value: "s3.eu-central-1.petabox.io",
   871  				Help:  "Europe (Frankfurt)",
   872  			}, {
   873  				Value: "s3.ap-southeast-1.petabox.io",
   874  				Help:  "Asia Pacific (Singapore)",
   875  			}, {
   876  				Value: "s3.me-south-1.petabox.io",
   877  				Help:  "Middle East (Bahrain)",
   878  			}, {
   879  				Value: "s3.sa-east-1.petabox.io",
   880  				Help:  "South America (São Paulo)",
   881  			}},
   882  		}, {
   883  			// Leviia endpoints: https://www.leviia.com/object-storage/
   884  			Name:     "endpoint",
   885  			Help:     "Endpoint for Leviia Object Storage API.",
   886  			Provider: "Leviia",
   887  			Examples: []fs.OptionExample{{
   888  				Value: "s3.leviia.com",
   889  				Help:  "The default endpoint\nLeviia",
   890  			}},
   891  		}, {
   892  			// Liara endpoints: https://liara.ir/landing/object-storage
   893  			Name:     "endpoint",
   894  			Help:     "Endpoint for Liara Object Storage API.",
   895  			Provider: "Liara",
   896  			Examples: []fs.OptionExample{{
   897  				Value: "storage.iran.liara.space",
   898  				Help:  "The default endpoint\nIran",
   899  			}},
   900  		}, {
   901  			// Linode endpoints: https://www.linode.com/docs/products/storage/object-storage/guides/urls/#cluster-url-s3-endpoint
   902  			Name:     "endpoint",
   903  			Help:     "Endpoint for Linode Object Storage API.",
   904  			Provider: "Linode",
   905  			Examples: []fs.OptionExample{{
   906  				Value: "us-southeast-1.linodeobjects.com",
   907  				Help:  "Atlanta, GA (USA), us-southeast-1",
   908  			}, {
   909  				Value: "us-ord-1.linodeobjects.com",
   910  				Help:  "Chicago, IL (USA), us-ord-1",
   911  			}, {
   912  				Value: "eu-central-1.linodeobjects.com",
   913  				Help:  "Frankfurt (Germany), eu-central-1",
   914  			}, {
   915  				Value: "it-mil-1.linodeobjects.com",
   916  				Help:  "Milan (Italy), it-mil-1",
   917  			}, {
   918  				Value: "us-east-1.linodeobjects.com",
   919  				Help:  "Newark, NJ (USA), us-east-1",
   920  			}, {
   921  				Value: "fr-par-1.linodeobjects.com",
   922  				Help:  "Paris (France), fr-par-1",
   923  			}, {
   924  				Value: "us-sea-1.linodeobjects.com",
   925  				Help:  "Seattle, WA (USA), us-sea-1",
   926  			}, {
   927  				Value: "ap-south-1.linodeobjects.com",
   928  				Help:  "Singapore ap-south-1",
   929  			}, {
   930  				Value: "se-sto-1.linodeobjects.com",
   931  				Help:  "Stockholm (Sweden), se-sto-1",
   932  			}, {
   933  				Value: "us-iad-1.linodeobjects.com",
   934  				Help:  "Washington, DC, (USA), us-iad-1",
   935  			}},
   936  		}, {
   937  			// oss endpoints: https://help.aliyun.com/document_detail/31837.html
   938  			Name:     "endpoint",
   939  			Help:     "Endpoint for OSS API.",
   940  			Provider: "Alibaba",
   941  			Examples: []fs.OptionExample{{
   942  				Value: "oss-accelerate.aliyuncs.com",
   943  				Help:  "Global Accelerate",
   944  			}, {
   945  				Value: "oss-accelerate-overseas.aliyuncs.com",
   946  				Help:  "Global Accelerate (outside mainland China)",
   947  			}, {
   948  				Value: "oss-cn-hangzhou.aliyuncs.com",
   949  				Help:  "East China 1 (Hangzhou)",
   950  			}, {
   951  				Value: "oss-cn-shanghai.aliyuncs.com",
   952  				Help:  "East China 2 (Shanghai)",
   953  			}, {
   954  				Value: "oss-cn-qingdao.aliyuncs.com",
   955  				Help:  "North China 1 (Qingdao)",
   956  			}, {
   957  				Value: "oss-cn-beijing.aliyuncs.com",
   958  				Help:  "North China 2 (Beijing)",
   959  			}, {
   960  				Value: "oss-cn-zhangjiakou.aliyuncs.com",
   961  				Help:  "North China 3 (Zhangjiakou)",
   962  			}, {
   963  				Value: "oss-cn-huhehaote.aliyuncs.com",
   964  				Help:  "North China 5 (Hohhot)",
   965  			}, {
   966  				Value: "oss-cn-wulanchabu.aliyuncs.com",
   967  				Help:  "North China 6 (Ulanqab)",
   968  			}, {
   969  				Value: "oss-cn-shenzhen.aliyuncs.com",
   970  				Help:  "South China 1 (Shenzhen)",
   971  			}, {
   972  				Value: "oss-cn-heyuan.aliyuncs.com",
   973  				Help:  "South China 2 (Heyuan)",
   974  			}, {
   975  				Value: "oss-cn-guangzhou.aliyuncs.com",
   976  				Help:  "South China 3 (Guangzhou)",
   977  			}, {
   978  				Value: "oss-cn-chengdu.aliyuncs.com",
   979  				Help:  "West China 1 (Chengdu)",
   980  			}, {
   981  				Value: "oss-cn-hongkong.aliyuncs.com",
   982  				Help:  "Hong Kong (Hong Kong)",
   983  			}, {
   984  				Value: "oss-us-west-1.aliyuncs.com",
   985  				Help:  "US West 1 (Silicon Valley)",
   986  			}, {
   987  				Value: "oss-us-east-1.aliyuncs.com",
   988  				Help:  "US East 1 (Virginia)",
   989  			}, {
   990  				Value: "oss-ap-southeast-1.aliyuncs.com",
   991  				Help:  "Southeast Asia Southeast 1 (Singapore)",
   992  			}, {
   993  				Value: "oss-ap-southeast-2.aliyuncs.com",
   994  				Help:  "Asia Pacific Southeast 2 (Sydney)",
   995  			}, {
   996  				Value: "oss-ap-southeast-3.aliyuncs.com",
   997  				Help:  "Southeast Asia Southeast 3 (Kuala Lumpur)",
   998  			}, {
   999  				Value: "oss-ap-southeast-5.aliyuncs.com",
  1000  				Help:  "Asia Pacific Southeast 5 (Jakarta)",
  1001  			}, {
  1002  				Value: "oss-ap-northeast-1.aliyuncs.com",
  1003  				Help:  "Asia Pacific Northeast 1 (Japan)",
  1004  			}, {
  1005  				Value: "oss-ap-south-1.aliyuncs.com",
  1006  				Help:  "Asia Pacific South 1 (Mumbai)",
  1007  			}, {
  1008  				Value: "oss-eu-central-1.aliyuncs.com",
  1009  				Help:  "Central Europe 1 (Frankfurt)",
  1010  			}, {
  1011  				Value: "oss-eu-west-1.aliyuncs.com",
  1012  				Help:  "West Europe (London)",
  1013  			}, {
  1014  				Value: "oss-me-east-1.aliyuncs.com",
  1015  				Help:  "Middle East 1 (Dubai)",
  1016  			}},
  1017  		}, {
  1018  			// obs endpoints: https://developer.huaweicloud.com/intl/en-us/endpoint?OBS
  1019  			Name:     "endpoint",
  1020  			Help:     "Endpoint for OBS API.",
  1021  			Provider: "HuaweiOBS",
  1022  			Examples: []fs.OptionExample{{
  1023  				Value: "obs.af-south-1.myhuaweicloud.com",
  1024  				Help:  "AF-Johannesburg",
  1025  			}, {
  1026  				Value: "obs.ap-southeast-2.myhuaweicloud.com",
  1027  				Help:  "AP-Bangkok",
  1028  			}, {
  1029  				Value: "obs.ap-southeast-3.myhuaweicloud.com",
  1030  				Help:  "AP-Singapore",
  1031  			}, {
  1032  				Value: "obs.cn-east-3.myhuaweicloud.com",
  1033  				Help:  "CN East-Shanghai1",
  1034  			}, {
  1035  				Value: "obs.cn-east-2.myhuaweicloud.com",
  1036  				Help:  "CN East-Shanghai2",
  1037  			}, {
  1038  				Value: "obs.cn-north-1.myhuaweicloud.com",
  1039  				Help:  "CN North-Beijing1",
  1040  			}, {
  1041  				Value: "obs.cn-north-4.myhuaweicloud.com",
  1042  				Help:  "CN North-Beijing4",
  1043  			}, {
  1044  				Value: "obs.cn-south-1.myhuaweicloud.com",
  1045  				Help:  "CN South-Guangzhou",
  1046  			}, {
  1047  				Value: "obs.ap-southeast-1.myhuaweicloud.com",
  1048  				Help:  "CN-Hong Kong",
  1049  			}, {
  1050  				Value: "obs.sa-argentina-1.myhuaweicloud.com",
  1051  				Help:  "LA-Buenos Aires1",
  1052  			}, {
  1053  				Value: "obs.sa-peru-1.myhuaweicloud.com",
  1054  				Help:  "LA-Lima1",
  1055  			}, {
  1056  				Value: "obs.na-mexico-1.myhuaweicloud.com",
  1057  				Help:  "LA-Mexico City1",
  1058  			}, {
  1059  				Value: "obs.sa-chile-1.myhuaweicloud.com",
  1060  				Help:  "LA-Santiago2",
  1061  			}, {
  1062  				Value: "obs.sa-brazil-1.myhuaweicloud.com",
  1063  				Help:  "LA-Sao Paulo1",
  1064  			}, {
  1065  				Value: "obs.ru-northwest-2.myhuaweicloud.com",
  1066  				Help:  "RU-Moscow2",
  1067  			}},
  1068  		}, {
  1069  			Name:     "endpoint",
  1070  			Help:     "Endpoint for Scaleway Object Storage.",
  1071  			Provider: "Scaleway",
  1072  			Examples: []fs.OptionExample{{
  1073  				Value: "s3.nl-ams.scw.cloud",
  1074  				Help:  "Amsterdam Endpoint",
  1075  			}, {
  1076  				Value: "s3.fr-par.scw.cloud",
  1077  				Help:  "Paris Endpoint",
  1078  			}, {
  1079  				Value: "s3.pl-waw.scw.cloud",
  1080  				Help:  "Warsaw Endpoint",
  1081  			}},
  1082  		}, {
  1083  			Name:     "endpoint",
  1084  			Help:     "Endpoint for StackPath Object Storage.",
  1085  			Provider: "StackPath",
  1086  			Examples: []fs.OptionExample{{
  1087  				Value: "s3.us-east-2.stackpathstorage.com",
  1088  				Help:  "US East Endpoint",
  1089  			}, {
  1090  				Value: "s3.us-west-1.stackpathstorage.com",
  1091  				Help:  "US West Endpoint",
  1092  			}, {
  1093  				Value: "s3.eu-central-1.stackpathstorage.com",
  1094  				Help:  "EU Endpoint",
  1095  			}},
  1096  		}, {
  1097  			Name:     "endpoint",
  1098  			Help:     "Endpoint for Google Cloud Storage.",
  1099  			Provider: "GCS",
  1100  			Examples: []fs.OptionExample{{
  1101  				Value: "https://storage.googleapis.com",
  1102  				Help:  "Google Cloud Storage endpoint",
  1103  			}},
  1104  		}, {
  1105  			Name:     "endpoint",
  1106  			Help:     "Endpoint for Storj Gateway.",
  1107  			Provider: "Storj",
  1108  			Examples: []fs.OptionExample{{
  1109  				Value: "gateway.storjshare.io",
  1110  				Help:  "Global Hosted Gateway",
  1111  			}},
  1112  		}, {
  1113  			Name:     "endpoint",
  1114  			Help:     "Endpoint for Synology C2 Object Storage API.",
  1115  			Provider: "Synology",
  1116  			Examples: []fs.OptionExample{{
  1117  				Value: "eu-001.s3.synologyc2.net",
  1118  				Help:  "EU Endpoint 1",
  1119  			}, {
  1120  				Value: "eu-002.s3.synologyc2.net",
  1121  				Help:  "EU Endpoint 2",
  1122  			}, {
  1123  				Value: "us-001.s3.synologyc2.net",
  1124  				Help:  "US Endpoint 1",
  1125  			}, {
  1126  				Value: "us-002.s3.synologyc2.net",
  1127  				Help:  "US Endpoint 2",
  1128  			}, {
  1129  				Value: "tw-001.s3.synologyc2.net",
  1130  				Help:  "TW Endpoint 1",
  1131  			}},
  1132  		}, {
  1133  			// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
  1134  			Name:     "endpoint",
  1135  			Help:     "Endpoint for Tencent COS API.",
  1136  			Provider: "TencentCOS",
  1137  			Examples: []fs.OptionExample{{
  1138  				Value: "cos.ap-beijing.myqcloud.com",
  1139  				Help:  "Beijing Region",
  1140  			}, {
  1141  				Value: "cos.ap-nanjing.myqcloud.com",
  1142  				Help:  "Nanjing Region",
  1143  			}, {
  1144  				Value: "cos.ap-shanghai.myqcloud.com",
  1145  				Help:  "Shanghai Region",
  1146  			}, {
  1147  				Value: "cos.ap-guangzhou.myqcloud.com",
  1148  				Help:  "Guangzhou Region",
  1149  			}, {
  1150  				Value: "cos.ap-nanjing.myqcloud.com",
  1151  				Help:  "Nanjing Region",
  1152  			}, {
  1153  				Value: "cos.ap-chengdu.myqcloud.com",
  1154  				Help:  "Chengdu Region",
  1155  			}, {
  1156  				Value: "cos.ap-chongqing.myqcloud.com",
  1157  				Help:  "Chongqing Region",
  1158  			}, {
  1159  				Value: "cos.ap-hongkong.myqcloud.com",
  1160  				Help:  "Hong Kong (China) Region",
  1161  			}, {
  1162  				Value: "cos.ap-singapore.myqcloud.com",
  1163  				Help:  "Singapore Region",
  1164  			}, {
  1165  				Value: "cos.ap-mumbai.myqcloud.com",
  1166  				Help:  "Mumbai Region",
  1167  			}, {
  1168  				Value: "cos.ap-seoul.myqcloud.com",
  1169  				Help:  "Seoul Region",
  1170  			}, {
  1171  				Value: "cos.ap-bangkok.myqcloud.com",
  1172  				Help:  "Bangkok Region",
  1173  			}, {
  1174  				Value: "cos.ap-tokyo.myqcloud.com",
  1175  				Help:  "Tokyo Region",
  1176  			}, {
  1177  				Value: "cos.na-siliconvalley.myqcloud.com",
  1178  				Help:  "Silicon Valley Region",
  1179  			}, {
  1180  				Value: "cos.na-ashburn.myqcloud.com",
  1181  				Help:  "Virginia Region",
  1182  			}, {
  1183  				Value: "cos.na-toronto.myqcloud.com",
  1184  				Help:  "Toronto Region",
  1185  			}, {
  1186  				Value: "cos.eu-frankfurt.myqcloud.com",
  1187  				Help:  "Frankfurt Region",
  1188  			}, {
  1189  				Value: "cos.eu-moscow.myqcloud.com",
  1190  				Help:  "Moscow Region",
  1191  			}, {
  1192  				Value: "cos.accelerate.myqcloud.com",
  1193  				Help:  "Use Tencent COS Accelerate Endpoint",
  1194  			}},
  1195  		}, {
  1196  			// RackCorp endpoints: https://www.rackcorp.com/storage/s3storage
  1197  			Name:     "endpoint",
  1198  			Help:     "Endpoint for RackCorp Object Storage.",
  1199  			Provider: "RackCorp",
  1200  			Examples: []fs.OptionExample{{
  1201  				Value: "s3.rackcorp.com",
  1202  				Help:  "Global (AnyCast) Endpoint",
  1203  			}, {
  1204  				Value: "au.s3.rackcorp.com",
  1205  				Help:  "Australia (Anycast) Endpoint",
  1206  			}, {
  1207  				Value: "au-nsw.s3.rackcorp.com",
  1208  				Help:  "Sydney (Australia) Endpoint",
  1209  			}, {
  1210  				Value: "au-qld.s3.rackcorp.com",
  1211  				Help:  "Brisbane (Australia) Endpoint",
  1212  			}, {
  1213  				Value: "au-vic.s3.rackcorp.com",
  1214  				Help:  "Melbourne (Australia) Endpoint",
  1215  			}, {
  1216  				Value: "au-wa.s3.rackcorp.com",
  1217  				Help:  "Perth (Australia) Endpoint",
  1218  			}, {
  1219  				Value: "ph.s3.rackcorp.com",
  1220  				Help:  "Manila (Philippines) Endpoint",
  1221  			}, {
  1222  				Value: "th.s3.rackcorp.com",
  1223  				Help:  "Bangkok (Thailand) Endpoint",
  1224  			}, {
  1225  				Value: "hk.s3.rackcorp.com",
  1226  				Help:  "HK (Hong Kong) Endpoint",
  1227  			}, {
  1228  				Value: "mn.s3.rackcorp.com",
  1229  				Help:  "Ulaanbaatar (Mongolia) Endpoint",
  1230  			}, {
  1231  				Value: "kg.s3.rackcorp.com",
  1232  				Help:  "Bishkek (Kyrgyzstan) Endpoint",
  1233  			}, {
  1234  				Value: "id.s3.rackcorp.com",
  1235  				Help:  "Jakarta (Indonesia) Endpoint",
  1236  			}, {
  1237  				Value: "jp.s3.rackcorp.com",
  1238  				Help:  "Tokyo (Japan) Endpoint",
  1239  			}, {
  1240  				Value: "sg.s3.rackcorp.com",
  1241  				Help:  "SG (Singapore) Endpoint",
  1242  			}, {
  1243  				Value: "de.s3.rackcorp.com",
  1244  				Help:  "Frankfurt (Germany) Endpoint",
  1245  			}, {
  1246  				Value: "us.s3.rackcorp.com",
  1247  				Help:  "USA (AnyCast) Endpoint",
  1248  			}, {
  1249  				Value: "us-east-1.s3.rackcorp.com",
  1250  				Help:  "New York (USA) Endpoint",
  1251  			}, {
  1252  				Value: "us-west-1.s3.rackcorp.com",
  1253  				Help:  "Freemont (USA) Endpoint",
  1254  			}, {
  1255  				Value: "nz.s3.rackcorp.com",
  1256  				Help:  "Auckland (New Zealand) Endpoint",
  1257  			}},
  1258  		}, {
  1259  			// Qiniu endpoints: https://developer.qiniu.com/kodo/4088/s3-access-domainname
  1260  			Name:     "endpoint",
  1261  			Help:     "Endpoint for Qiniu Object Storage.",
  1262  			Provider: "Qiniu",
  1263  			Examples: []fs.OptionExample{{
  1264  				Value: "s3-cn-east-1.qiniucs.com",
  1265  				Help:  "East China Endpoint 1",
  1266  			}, {
  1267  				Value: "s3-cn-east-2.qiniucs.com",
  1268  				Help:  "East China Endpoint 2",
  1269  			}, {
  1270  				Value: "s3-cn-north-1.qiniucs.com",
  1271  				Help:  "North China Endpoint 1",
  1272  			}, {
  1273  				Value: "s3-cn-south-1.qiniucs.com",
  1274  				Help:  "South China Endpoint 1",
  1275  			}, {
  1276  				Value: "s3-us-north-1.qiniucs.com",
  1277  				Help:  "North America Endpoint 1",
  1278  			}, {
  1279  				Value: "s3-ap-southeast-1.qiniucs.com",
  1280  				Help:  "Southeast Asia Endpoint 1",
  1281  			}, {
  1282  				Value: "s3-ap-northeast-1.qiniucs.com",
  1283  				Help:  "Northeast Asia Endpoint 1",
  1284  			}},
  1285  		}, {
  1286  			Name:     "endpoint",
  1287  			Help:     "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
  1288  			Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,Scaleway,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox",
  1289  			Examples: []fs.OptionExample{{
  1290  				Value:    "objects-us-east-1.dream.io",
  1291  				Help:     "Dream Objects endpoint",
  1292  				Provider: "Dreamhost",
  1293  			}, {
  1294  				Value:    "syd1.digitaloceanspaces.com",
  1295  				Help:     "DigitalOcean Spaces Sydney 1",
  1296  				Provider: "DigitalOcean",
  1297  			}, {
  1298  				Value:    "sfo3.digitaloceanspaces.com",
  1299  				Help:     "DigitalOcean Spaces San Francisco 3",
  1300  				Provider: "DigitalOcean",
  1301  			}, {
  1302  				Value:    "fra1.digitaloceanspaces.com",
  1303  				Help:     "DigitalOcean Spaces Frankfurt 1",
  1304  				Provider: "DigitalOcean",
  1305  			}, {
  1306  				Value:    "nyc3.digitaloceanspaces.com",
  1307  				Help:     "DigitalOcean Spaces New York 3",
  1308  				Provider: "DigitalOcean",
  1309  			}, {
  1310  				Value:    "ams3.digitaloceanspaces.com",
  1311  				Help:     "DigitalOcean Spaces Amsterdam 3",
  1312  				Provider: "DigitalOcean",
  1313  			}, {
  1314  				Value:    "sgp1.digitaloceanspaces.com",
  1315  				Help:     "DigitalOcean Spaces Singapore 1",
  1316  				Provider: "DigitalOcean",
  1317  			}, {
  1318  				Value:    "localhost:8333",
  1319  				Help:     "SeaweedFS S3 localhost",
  1320  				Provider: "SeaweedFS",
  1321  			}, {
  1322  				Value:    "s3.us-east-1.lyvecloud.seagate.com",
  1323  				Help:     "Seagate Lyve Cloud US East 1 (Virginia)",
  1324  				Provider: "LyveCloud",
  1325  			}, {
  1326  				Value:    "s3.us-west-1.lyvecloud.seagate.com",
  1327  				Help:     "Seagate Lyve Cloud US West 1 (California)",
  1328  				Provider: "LyveCloud",
  1329  			}, {
  1330  				Value:    "s3.ap-southeast-1.lyvecloud.seagate.com",
  1331  				Help:     "Seagate Lyve Cloud AP Southeast 1 (Singapore)",
  1332  				Provider: "LyveCloud",
  1333  			}, {
  1334  				Value:    "s3.wasabisys.com",
  1335  				Help:     "Wasabi US East 1 (N. Virginia)",
  1336  				Provider: "Wasabi",
  1337  			}, {
  1338  				Value:    "s3.us-east-2.wasabisys.com",
  1339  				Help:     "Wasabi US East 2 (N. Virginia)",
  1340  				Provider: "Wasabi",
  1341  			}, {
  1342  				Value:    "s3.us-central-1.wasabisys.com",
  1343  				Help:     "Wasabi US Central 1 (Texas)",
  1344  				Provider: "Wasabi",
  1345  			}, {
  1346  				Value:    "s3.us-west-1.wasabisys.com",
  1347  				Help:     "Wasabi US West 1 (Oregon)",
  1348  				Provider: "Wasabi",
  1349  			}, {
  1350  				Value:    "s3.ca-central-1.wasabisys.com",
  1351  				Help:     "Wasabi CA Central 1 (Toronto)",
  1352  				Provider: "Wasabi",
  1353  			}, {
  1354  				Value:    "s3.eu-central-1.wasabisys.com",
  1355  				Help:     "Wasabi EU Central 1 (Amsterdam)",
  1356  				Provider: "Wasabi",
  1357  			}, {
  1358  				Value:    "s3.eu-central-2.wasabisys.com",
  1359  				Help:     "Wasabi EU Central 2 (Frankfurt)",
  1360  				Provider: "Wasabi",
  1361  			}, {
  1362  				Value:    "s3.eu-west-1.wasabisys.com",
  1363  				Help:     "Wasabi EU West 1 (London)",
  1364  				Provider: "Wasabi",
  1365  			}, {
  1366  				Value:    "s3.eu-west-2.wasabisys.com",
  1367  				Help:     "Wasabi EU West 2 (Paris)",
  1368  				Provider: "Wasabi",
  1369  			}, {
  1370  				Value:    "s3.ap-northeast-1.wasabisys.com",
  1371  				Help:     "Wasabi AP Northeast 1 (Tokyo) endpoint",
  1372  				Provider: "Wasabi",
  1373  			}, {
  1374  				Value:    "s3.ap-northeast-2.wasabisys.com",
  1375  				Help:     "Wasabi AP Northeast 2 (Osaka) endpoint",
  1376  				Provider: "Wasabi",
  1377  			}, {
  1378  				Value:    "s3.ap-southeast-1.wasabisys.com",
  1379  				Help:     "Wasabi AP Southeast 1 (Singapore)",
  1380  				Provider: "Wasabi",
  1381  			}, {
  1382  				Value:    "s3.ap-southeast-2.wasabisys.com",
  1383  				Help:     "Wasabi AP Southeast 2 (Sydney)",
  1384  				Provider: "Wasabi",
  1385  			}, {
  1386  				Value:    "storage.iran.liara.space",
  1387  				Help:     "Liara Iran endpoint",
  1388  				Provider: "Liara",
  1389  			}, {
  1390  				Value:    "s3.ir-thr-at1.arvanstorage.ir",
  1391  				Help:     "ArvanCloud Tehran Iran (Simin) endpoint",
  1392  				Provider: "ArvanCloud",
  1393  			}, {
  1394  				Value:    "s3.ir-tbz-sh1.arvanstorage.ir",
  1395  				Help:     "ArvanCloud Tabriz Iran (Shahriar) endpoint",
  1396  				Provider: "ArvanCloud",
  1397  			}},
  1398  		}, {
  1399  			Name:     "location_constraint",
  1400  			Help:     "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
  1401  			Provider: "AWS",
  1402  			Examples: []fs.OptionExample{{
  1403  				Value: "",
  1404  				Help:  "Empty for US Region, Northern Virginia, or Pacific Northwest",
  1405  			}, {
  1406  				Value: "us-east-2",
  1407  				Help:  "US East (Ohio) Region",
  1408  			}, {
  1409  				Value: "us-west-1",
  1410  				Help:  "US West (Northern California) Region",
  1411  			}, {
  1412  				Value: "us-west-2",
  1413  				Help:  "US West (Oregon) Region",
  1414  			}, {
  1415  				Value: "ca-central-1",
  1416  				Help:  "Canada (Central) Region",
  1417  			}, {
  1418  				Value: "eu-west-1",
  1419  				Help:  "EU (Ireland) Region",
  1420  			}, {
  1421  				Value: "eu-west-2",
  1422  				Help:  "EU (London) Region",
  1423  			}, {
  1424  				Value: "eu-west-3",
  1425  				Help:  "EU (Paris) Region",
  1426  			}, {
  1427  				Value: "eu-north-1",
  1428  				Help:  "EU (Stockholm) Region",
  1429  			}, {
  1430  				Value: "eu-south-1",
  1431  				Help:  "EU (Milan) Region",
  1432  			}, {
  1433  				Value: "EU",
  1434  				Help:  "EU Region",
  1435  			}, {
  1436  				Value: "ap-southeast-1",
  1437  				Help:  "Asia Pacific (Singapore) Region",
  1438  			}, {
  1439  				Value: "ap-southeast-2",
  1440  				Help:  "Asia Pacific (Sydney) Region",
  1441  			}, {
  1442  				Value: "ap-northeast-1",
  1443  				Help:  "Asia Pacific (Tokyo) Region",
  1444  			}, {
  1445  				Value: "ap-northeast-2",
  1446  				Help:  "Asia Pacific (Seoul) Region",
  1447  			}, {
  1448  				Value: "ap-northeast-3",
  1449  				Help:  "Asia Pacific (Osaka-Local) Region",
  1450  			}, {
  1451  				Value: "ap-south-1",
  1452  				Help:  "Asia Pacific (Mumbai) Region",
  1453  			}, {
  1454  				Value: "ap-east-1",
  1455  				Help:  "Asia Pacific (Hong Kong) Region",
  1456  			}, {
  1457  				Value: "sa-east-1",
  1458  				Help:  "South America (Sao Paulo) Region",
  1459  			}, {
  1460  				Value: "il-central-1",
  1461  				Help:  "Israel (Tel Aviv) Region",
  1462  			}, {
  1463  				Value: "me-south-1",
  1464  				Help:  "Middle East (Bahrain) Region",
  1465  			}, {
  1466  				Value: "af-south-1",
  1467  				Help:  "Africa (Cape Town) Region",
  1468  			}, {
  1469  				Value: "cn-north-1",
  1470  				Help:  "China (Beijing) Region",
  1471  			}, {
  1472  				Value: "cn-northwest-1",
  1473  				Help:  "China (Ningxia) Region",
  1474  			}, {
  1475  				Value: "us-gov-east-1",
  1476  				Help:  "AWS GovCloud (US-East) Region",
  1477  			}, {
  1478  				Value: "us-gov-west-1",
  1479  				Help:  "AWS GovCloud (US) Region",
  1480  			}},
  1481  		}, {
  1482  			Name:     "location_constraint",
  1483  			Help:     "Location constraint - must match endpoint.\n\nUsed when creating buckets only.",
  1484  			Provider: "ChinaMobile",
  1485  			Examples: []fs.OptionExample{{
  1486  				Value: "wuxi1",
  1487  				Help:  "East China (Suzhou)",
  1488  			}, {
  1489  				Value: "jinan1",
  1490  				Help:  "East China (Jinan)",
  1491  			}, {
  1492  				Value: "ningbo1",
  1493  				Help:  "East China (Hangzhou)",
  1494  			}, {
  1495  				Value: "shanghai1",
  1496  				Help:  "East China (Shanghai-1)",
  1497  			}, {
  1498  				Value: "zhengzhou1",
  1499  				Help:  "Central China (Zhengzhou)",
  1500  			}, {
  1501  				Value: "hunan1",
  1502  				Help:  "Central China (Changsha-1)",
  1503  			}, {
  1504  				Value: "zhuzhou1",
  1505  				Help:  "Central China (Changsha-2)",
  1506  			}, {
  1507  				Value: "guangzhou1",
  1508  				Help:  "South China (Guangzhou-2)",
  1509  			}, {
  1510  				Value: "dongguan1",
  1511  				Help:  "South China (Guangzhou-3)",
  1512  			}, {
  1513  				Value: "beijing1",
  1514  				Help:  "North China (Beijing-1)",
  1515  			}, {
  1516  				Value: "beijing2",
  1517  				Help:  "North China (Beijing-2)",
  1518  			}, {
  1519  				Value: "beijing4",
  1520  				Help:  "North China (Beijing-3)",
  1521  			}, {
  1522  				Value: "huhehaote1",
  1523  				Help:  "North China (Huhehaote)",
  1524  			}, {
  1525  				Value: "chengdu1",
  1526  				Help:  "Southwest China (Chengdu)",
  1527  			}, {
  1528  				Value: "chongqing1",
  1529  				Help:  "Southwest China (Chongqing)",
  1530  			}, {
  1531  				Value: "guiyang1",
  1532  				Help:  "Southwest China (Guiyang)",
  1533  			}, {
  1534  				Value: "xian1",
  1535  				Help:  "Nouthwest China (Xian)",
  1536  			}, {
  1537  				Value: "yunnan",
  1538  				Help:  "Yunnan China (Kunming)",
  1539  			}, {
  1540  				Value: "yunnan2",
  1541  				Help:  "Yunnan China (Kunming-2)",
  1542  			}, {
  1543  				Value: "tianjin1",
  1544  				Help:  "Tianjin China (Tianjin)",
  1545  			}, {
  1546  				Value: "jilin1",
  1547  				Help:  "Jilin China (Changchun)",
  1548  			}, {
  1549  				Value: "hubei1",
  1550  				Help:  "Hubei China (Xiangyan)",
  1551  			}, {
  1552  				Value: "jiangxi1",
  1553  				Help:  "Jiangxi China (Nanchang)",
  1554  			}, {
  1555  				Value: "gansu1",
  1556  				Help:  "Gansu China (Lanzhou)",
  1557  			}, {
  1558  				Value: "shanxi1",
  1559  				Help:  "Shanxi China (Taiyuan)",
  1560  			}, {
  1561  				Value: "liaoning1",
  1562  				Help:  "Liaoning China (Shenyang)",
  1563  			}, {
  1564  				Value: "hebei1",
  1565  				Help:  "Hebei China (Shijiazhuang)",
  1566  			}, {
  1567  				Value: "fujian1",
  1568  				Help:  "Fujian China (Xiamen)",
  1569  			}, {
  1570  				Value: "guangxi1",
  1571  				Help:  "Guangxi China (Nanning)",
  1572  			}, {
  1573  				Value: "anhui1",
  1574  				Help:  "Anhui China (Huainan)",
  1575  			}},
  1576  		}, {
  1577  			Name:     "location_constraint",
  1578  			Help:     "Location constraint - must match endpoint.\n\nUsed when creating buckets only.",
  1579  			Provider: "ArvanCloud",
  1580  			Examples: []fs.OptionExample{{
  1581  				Value: "ir-thr-at1",
  1582  				Help:  "Tehran Iran (Simin)",
  1583  			}, {
  1584  				Value: "ir-tbz-sh1",
  1585  				Help:  "Tabriz Iran (Shahriar)",
  1586  			}},
  1587  		}, {
  1588  			Name:     "location_constraint",
  1589  			Help:     "Location constraint - must match endpoint when using IBM Cloud Public.\n\nFor on-prem COS, do not make a selection from this list, hit enter.",
  1590  			Provider: "IBMCOS",
  1591  			Examples: []fs.OptionExample{{
  1592  				Value: "us-standard",
  1593  				Help:  "US Cross Region Standard",
  1594  			}, {
  1595  				Value: "us-vault",
  1596  				Help:  "US Cross Region Vault",
  1597  			}, {
  1598  				Value: "us-cold",
  1599  				Help:  "US Cross Region Cold",
  1600  			}, {
  1601  				Value: "us-flex",
  1602  				Help:  "US Cross Region Flex",
  1603  			}, {
  1604  				Value: "us-east-standard",
  1605  				Help:  "US East Region Standard",
  1606  			}, {
  1607  				Value: "us-east-vault",
  1608  				Help:  "US East Region Vault",
  1609  			}, {
  1610  				Value: "us-east-cold",
  1611  				Help:  "US East Region Cold",
  1612  			}, {
  1613  				Value: "us-east-flex",
  1614  				Help:  "US East Region Flex",
  1615  			}, {
  1616  				Value: "us-south-standard",
  1617  				Help:  "US South Region Standard",
  1618  			}, {
  1619  				Value: "us-south-vault",
  1620  				Help:  "US South Region Vault",
  1621  			}, {
  1622  				Value: "us-south-cold",
  1623  				Help:  "US South Region Cold",
  1624  			}, {
  1625  				Value: "us-south-flex",
  1626  				Help:  "US South Region Flex",
  1627  			}, {
  1628  				Value: "eu-standard",
  1629  				Help:  "EU Cross Region Standard",
  1630  			}, {
  1631  				Value: "eu-vault",
  1632  				Help:  "EU Cross Region Vault",
  1633  			}, {
  1634  				Value: "eu-cold",
  1635  				Help:  "EU Cross Region Cold",
  1636  			}, {
  1637  				Value: "eu-flex",
  1638  				Help:  "EU Cross Region Flex",
  1639  			}, {
  1640  				Value: "eu-gb-standard",
  1641  				Help:  "Great Britain Standard",
  1642  			}, {
  1643  				Value: "eu-gb-vault",
  1644  				Help:  "Great Britain Vault",
  1645  			}, {
  1646  				Value: "eu-gb-cold",
  1647  				Help:  "Great Britain Cold",
  1648  			}, {
  1649  				Value: "eu-gb-flex",
  1650  				Help:  "Great Britain Flex",
  1651  			}, {
  1652  				Value: "ap-standard",
  1653  				Help:  "APAC Standard",
  1654  			}, {
  1655  				Value: "ap-vault",
  1656  				Help:  "APAC Vault",
  1657  			}, {
  1658  				Value: "ap-cold",
  1659  				Help:  "APAC Cold",
  1660  			}, {
  1661  				Value: "ap-flex",
  1662  				Help:  "APAC Flex",
  1663  			}, {
  1664  				Value: "mel01-standard",
  1665  				Help:  "Melbourne Standard",
  1666  			}, {
  1667  				Value: "mel01-vault",
  1668  				Help:  "Melbourne Vault",
  1669  			}, {
  1670  				Value: "mel01-cold",
  1671  				Help:  "Melbourne Cold",
  1672  			}, {
  1673  				Value: "mel01-flex",
  1674  				Help:  "Melbourne Flex",
  1675  			}, {
  1676  				Value: "tor01-standard",
  1677  				Help:  "Toronto Standard",
  1678  			}, {
  1679  				Value: "tor01-vault",
  1680  				Help:  "Toronto Vault",
  1681  			}, {
  1682  				Value: "tor01-cold",
  1683  				Help:  "Toronto Cold",
  1684  			}, {
  1685  				Value: "tor01-flex",
  1686  				Help:  "Toronto Flex",
  1687  			}},
  1688  		}, {
  1689  			Name:     "location_constraint",
  1690  			Help:     "Location constraint - the location where your bucket will be located and your data stored.\n",
  1691  			Provider: "RackCorp",
  1692  			Examples: []fs.OptionExample{{
  1693  				Value: "global",
  1694  				Help:  "Global CDN Region",
  1695  			}, {
  1696  				Value: "au",
  1697  				Help:  "Australia (All locations)",
  1698  			}, {
  1699  				Value: "au-nsw",
  1700  				Help:  "NSW (Australia) Region",
  1701  			}, {
  1702  				Value: "au-qld",
  1703  				Help:  "QLD (Australia) Region",
  1704  			}, {
  1705  				Value: "au-vic",
  1706  				Help:  "VIC (Australia) Region",
  1707  			}, {
  1708  				Value: "au-wa",
  1709  				Help:  "Perth (Australia) Region",
  1710  			}, {
  1711  				Value: "ph",
  1712  				Help:  "Manila (Philippines) Region",
  1713  			}, {
  1714  				Value: "th",
  1715  				Help:  "Bangkok (Thailand) Region",
  1716  			}, {
  1717  				Value: "hk",
  1718  				Help:  "HK (Hong Kong) Region",
  1719  			}, {
  1720  				Value: "mn",
  1721  				Help:  "Ulaanbaatar (Mongolia) Region",
  1722  			}, {
  1723  				Value: "kg",
  1724  				Help:  "Bishkek (Kyrgyzstan) Region",
  1725  			}, {
  1726  				Value: "id",
  1727  				Help:  "Jakarta (Indonesia) Region",
  1728  			}, {
  1729  				Value: "jp",
  1730  				Help:  "Tokyo (Japan) Region",
  1731  			}, {
  1732  				Value: "sg",
  1733  				Help:  "SG (Singapore) Region",
  1734  			}, {
  1735  				Value: "de",
  1736  				Help:  "Frankfurt (Germany) Region",
  1737  			}, {
  1738  				Value: "us",
  1739  				Help:  "USA (AnyCast) Region",
  1740  			}, {
  1741  				Value: "us-east-1",
  1742  				Help:  "New York (USA) Region",
  1743  			}, {
  1744  				Value: "us-west-1",
  1745  				Help:  "Freemont (USA) Region",
  1746  			}, {
  1747  				Value: "nz",
  1748  				Help:  "Auckland (New Zealand) Region",
  1749  			}},
  1750  		}, {
  1751  			Name:     "location_constraint",
  1752  			Help:     "Location constraint - must be set to match the Region.\n\nUsed when creating buckets only.",
  1753  			Provider: "Qiniu",
  1754  			Examples: []fs.OptionExample{{
  1755  				Value: "cn-east-1",
  1756  				Help:  "East China Region 1",
  1757  			}, {
  1758  				Value: "cn-east-2",
  1759  				Help:  "East China Region 2",
  1760  			}, {
  1761  				Value: "cn-north-1",
  1762  				Help:  "North China Region 1",
  1763  			}, {
  1764  				Value: "cn-south-1",
  1765  				Help:  "South China Region 1",
  1766  			}, {
  1767  				Value: "us-north-1",
  1768  				Help:  "North America Region 1",
  1769  			}, {
  1770  				Value: "ap-southeast-1",
  1771  				Help:  "Southeast Asia Region 1",
  1772  			}, {
  1773  				Value: "ap-northeast-1",
  1774  				Help:  "Northeast Asia Region 1",
  1775  			}},
  1776  		}, {
  1777  			Name:     "location_constraint",
  1778  			Help:     "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
  1779  			Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Qiniu,RackCorp,Scaleway,StackPath,Storj,TencentCOS,Petabox",
  1780  		}, {
  1781  			Name: "acl",
  1782  			Help: `Canned ACL used when creating buckets and storing or copying objects.
  1783  
  1784  This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
  1785  
  1786  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
  1787  
  1788  Note that this ACL is applied when server-side copying objects as S3
  1789  doesn't copy the ACL from the source but rather writes a fresh one.
  1790  
  1791  If the acl is an empty string then no X-Amz-Acl: header is added and
  1792  the default (private) will be used.
  1793  `,
  1794  			Provider: "!Storj,Synology,Cloudflare",
  1795  			Examples: []fs.OptionExample{{
  1796  				Value:    "default",
  1797  				Help:     "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
  1798  				Provider: "TencentCOS",
  1799  			}, {
  1800  				Value:    "private",
  1801  				Help:     "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
  1802  				Provider: "!IBMCOS,TencentCOS",
  1803  			}, {
  1804  				Value:    "public-read",
  1805  				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
  1806  				Provider: "!IBMCOS",
  1807  			}, {
  1808  				Value:    "public-read-write",
  1809  				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
  1810  				Provider: "!IBMCOS",
  1811  			}, {
  1812  				Value:    "authenticated-read",
  1813  				Help:     "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
  1814  				Provider: "!IBMCOS",
  1815  			}, {
  1816  				Value:    "bucket-owner-read",
  1817  				Help:     "Object owner gets FULL_CONTROL.\nBucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
  1818  				Provider: "!IBMCOS,ChinaMobile",
  1819  			}, {
  1820  				Value:    "bucket-owner-full-control",
  1821  				Help:     "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
  1822  				Provider: "!IBMCOS,ChinaMobile",
  1823  			}, {
  1824  				Value:    "private",
  1825  				Help:     "Owner gets FULL_CONTROL.\nNo one else has access rights (default).\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS.",
  1826  				Provider: "IBMCOS",
  1827  			}, {
  1828  				Value:    "public-read",
  1829  				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.\nThis acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS.",
  1830  				Provider: "IBMCOS",
  1831  			}, {
  1832  				Value:    "public-read-write",
  1833  				Help:     "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nThis acl is available on IBM Cloud (Infra), On-Premise IBM COS.",
  1834  				Provider: "IBMCOS",
  1835  			}, {
  1836  				Value:    "authenticated-read",
  1837  				Help:     "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.\nNot supported on Buckets.\nThis acl is available on IBM Cloud (Infra) and On-Premise IBM COS.",
  1838  				Provider: "IBMCOS",
  1839  			}},
  1840  		}, {
  1841  			Name: "bucket_acl",
  1842  			Help: `Canned ACL used when creating buckets.
  1843  
  1844  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
  1845  
  1846  Note that this ACL is applied when only when creating buckets.  If it
  1847  isn't set then "acl" is used instead.
  1848  
  1849  If the "acl" and "bucket_acl" are empty strings then no X-Amz-Acl:
  1850  header is added and the default (private) will be used.
  1851  `,
  1852  			Advanced: true,
  1853  			Examples: []fs.OptionExample{{
  1854  				Value: "private",
  1855  				Help:  "Owner gets FULL_CONTROL.\nNo one else has access rights (default).",
  1856  			}, {
  1857  				Value: "public-read",
  1858  				Help:  "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ access.",
  1859  			}, {
  1860  				Value: "public-read-write",
  1861  				Help:  "Owner gets FULL_CONTROL.\nThe AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
  1862  			}, {
  1863  				Value: "authenticated-read",
  1864  				Help:  "Owner gets FULL_CONTROL.\nThe AuthenticatedUsers group gets READ access.",
  1865  			}},
  1866  		}, {
  1867  			Name:     "requester_pays",
  1868  			Help:     "Enables requester pays option when interacting with S3 bucket.",
  1869  			Provider: "AWS",
  1870  			Default:  false,
  1871  			Advanced: true,
  1872  		}, {
  1873  			Name:     "server_side_encryption",
  1874  			Help:     "The server-side encryption algorithm used when storing this object in S3.",
  1875  			Provider: "AWS,Ceph,ChinaMobile,Minio",
  1876  			Examples: []fs.OptionExample{{
  1877  				Value: "",
  1878  				Help:  "None",
  1879  			}, {
  1880  				Value: "AES256",
  1881  				Help:  "AES256",
  1882  			}, {
  1883  				Value:    "aws:kms",
  1884  				Help:     "aws:kms",
  1885  				Provider: "!ChinaMobile",
  1886  			}},
  1887  		}, {
  1888  			Name:     "sse_customer_algorithm",
  1889  			Help:     "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
  1890  			Provider: "AWS,Ceph,ChinaMobile,Minio",
  1891  			Advanced: true,
  1892  			Examples: []fs.OptionExample{{
  1893  				Value: "",
  1894  				Help:  "None",
  1895  			}, {
  1896  				Value: "AES256",
  1897  				Help:  "AES256",
  1898  			}},
  1899  		}, {
  1900  			Name:     "sse_kms_key_id",
  1901  			Help:     "If using KMS ID you must provide the ARN of Key.",
  1902  			Provider: "AWS,Ceph,Minio",
  1903  			Examples: []fs.OptionExample{{
  1904  				Value: "",
  1905  				Help:  "None",
  1906  			}, {
  1907  				Value: "arn:aws:kms:us-east-1:*",
  1908  				Help:  "arn:aws:kms:*",
  1909  			}},
  1910  			Sensitive: true,
  1911  		}, {
  1912  			Name: "sse_customer_key",
  1913  			Help: `To use SSE-C you may provide the secret encryption key used to encrypt/decrypt your data.
  1914  
  1915  Alternatively you can provide --sse-customer-key-base64.`,
  1916  			Provider: "AWS,Ceph,ChinaMobile,Minio",
  1917  			Advanced: true,
  1918  			Examples: []fs.OptionExample{{
  1919  				Value: "",
  1920  				Help:  "None",
  1921  			}},
  1922  			Sensitive: true,
  1923  		}, {
  1924  			Name: "sse_customer_key_base64",
  1925  			Help: `If using SSE-C you must provide the secret encryption key encoded in base64 format to encrypt/decrypt your data.
  1926  
  1927  Alternatively you can provide --sse-customer-key.`,
  1928  			Provider: "AWS,Ceph,ChinaMobile,Minio",
  1929  			Advanced: true,
  1930  			Examples: []fs.OptionExample{{
  1931  				Value: "",
  1932  				Help:  "None",
  1933  			}},
  1934  			Sensitive: true,
  1935  		}, {
  1936  			Name: "sse_customer_key_md5",
  1937  			Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional).
  1938  
  1939  If you leave it blank, this is calculated automatically from the sse_customer_key provided.
  1940  `,
  1941  			Provider: "AWS,Ceph,ChinaMobile,Minio",
  1942  			Advanced: true,
  1943  			Examples: []fs.OptionExample{{
  1944  				Value: "",
  1945  				Help:  "None",
  1946  			}},
  1947  			Sensitive: true,
  1948  		}, {
  1949  			Name:     "storage_class",
  1950  			Help:     "The storage class to use when storing new objects in S3.",
  1951  			Provider: "AWS",
  1952  			Examples: []fs.OptionExample{{
  1953  				Value: "",
  1954  				Help:  "Default",
  1955  			}, {
  1956  				Value: "STANDARD",
  1957  				Help:  "Standard storage class",
  1958  			}, {
  1959  				Value: "REDUCED_REDUNDANCY",
  1960  				Help:  "Reduced redundancy storage class",
  1961  			}, {
  1962  				Value: "STANDARD_IA",
  1963  				Help:  "Standard Infrequent Access storage class",
  1964  			}, {
  1965  				Value: "ONEZONE_IA",
  1966  				Help:  "One Zone Infrequent Access storage class",
  1967  			}, {
  1968  				Value: "GLACIER",
  1969  				Help:  "Glacier storage class",
  1970  			}, {
  1971  				Value: "DEEP_ARCHIVE",
  1972  				Help:  "Glacier Deep Archive storage class",
  1973  			}, {
  1974  				Value: "INTELLIGENT_TIERING",
  1975  				Help:  "Intelligent-Tiering storage class",
  1976  			}, {
  1977  				Value: "GLACIER_IR",
  1978  				Help:  "Glacier Instant Retrieval storage class",
  1979  			}},
  1980  		}, {
  1981  			// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
  1982  			Name:     "storage_class",
  1983  			Help:     "The storage class to use when storing new objects in OSS.",
  1984  			Provider: "Alibaba",
  1985  			Examples: []fs.OptionExample{{
  1986  				Value: "",
  1987  				Help:  "Default",
  1988  			}, {
  1989  				Value: "STANDARD",
  1990  				Help:  "Standard storage class",
  1991  			}, {
  1992  				Value: "GLACIER",
  1993  				Help:  "Archive storage mode",
  1994  			}, {
  1995  				Value: "STANDARD_IA",
  1996  				Help:  "Infrequent access storage mode",
  1997  			}},
  1998  		}, {
  1999  			// Mapping from here: https://ecloud.10086.cn/op-help-center/doc/article/24495
  2000  			Name:     "storage_class",
  2001  			Help:     "The storage class to use when storing new objects in ChinaMobile.",
  2002  			Provider: "ChinaMobile",
  2003  			Examples: []fs.OptionExample{{
  2004  				Value: "",
  2005  				Help:  "Default",
  2006  			}, {
  2007  				Value: "STANDARD",
  2008  				Help:  "Standard storage class",
  2009  			}, {
  2010  				Value: "GLACIER",
  2011  				Help:  "Archive storage mode",
  2012  			}, {
  2013  				Value: "STANDARD_IA",
  2014  				Help:  "Infrequent access storage mode",
  2015  			}},
  2016  		}, {
  2017  			// Mapping from here: https://liara.ir/landing/object-storage
  2018  			Name:     "storage_class",
  2019  			Help:     "The storage class to use when storing new objects in Liara",
  2020  			Provider: "Liara",
  2021  			Examples: []fs.OptionExample{{
  2022  				Value: "STANDARD",
  2023  				Help:  "Standard storage class",
  2024  			}},
  2025  		}, {
  2026  			// Mapping from here: https://www.arvancloud.ir/en/products/cloud-storage
  2027  			Name:     "storage_class",
  2028  			Help:     "The storage class to use when storing new objects in ArvanCloud.",
  2029  			Provider: "ArvanCloud",
  2030  			Examples: []fs.OptionExample{{
  2031  				Value: "STANDARD",
  2032  				Help:  "Standard storage class",
  2033  			}},
  2034  		}, {
  2035  			// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
  2036  			Name:     "storage_class",
  2037  			Help:     "The storage class to use when storing new objects in Tencent COS.",
  2038  			Provider: "TencentCOS",
  2039  			Examples: []fs.OptionExample{{
  2040  				Value: "",
  2041  				Help:  "Default",
  2042  			}, {
  2043  				Value: "STANDARD",
  2044  				Help:  "Standard storage class",
  2045  			}, {
  2046  				Value: "ARCHIVE",
  2047  				Help:  "Archive storage mode",
  2048  			}, {
  2049  				Value: "STANDARD_IA",
  2050  				Help:  "Infrequent access storage mode",
  2051  			}},
  2052  		}, {
  2053  			// Mapping from here: https://www.scaleway.com/en/docs/storage/object/quickstart/
  2054  			Name:     "storage_class",
  2055  			Help:     "The storage class to use when storing new objects in S3.",
  2056  			Provider: "Scaleway",
  2057  			Examples: []fs.OptionExample{{
  2058  				Value: "",
  2059  				Help:  "Default.",
  2060  			}, {
  2061  				Value: "STANDARD",
  2062  				Help:  "The Standard class for any upload.\nSuitable for on-demand content like streaming or CDN.\nAvailable in all regions.",
  2063  			}, {
  2064  				Value: "GLACIER",
  2065  				Help:  "Archived storage.\nPrices are lower, but it needs to be restored first to be accessed.\nAvailable in FR-PAR and NL-AMS regions.",
  2066  			}, {
  2067  				Value: "ONEZONE_IA",
  2068  				Help:  "One Zone - Infrequent Access.\nA good choice for storing secondary backup copies or easily re-creatable data.\nAvailable in the FR-PAR region only.",
  2069  			}},
  2070  		}, {
  2071  			// Mapping from here: https://developer.qiniu.com/kodo/5906/storage-type
  2072  			Name:     "storage_class",
  2073  			Help:     "The storage class to use when storing new objects in Qiniu.",
  2074  			Provider: "Qiniu",
  2075  			Examples: []fs.OptionExample{{
  2076  				Value: "STANDARD",
  2077  				Help:  "Standard storage class",
  2078  			}, {
  2079  				Value: "LINE",
  2080  				Help:  "Infrequent access storage mode",
  2081  			}, {
  2082  				Value: "GLACIER",
  2083  				Help:  "Archive storage mode",
  2084  			}, {
  2085  				Value: "DEEP_ARCHIVE",
  2086  				Help:  "Deep archive storage mode",
  2087  			}},
  2088  		}, {
  2089  			Name: "upload_cutoff",
  2090  			Help: `Cutoff for switching to chunked upload.
  2091  
  2092  Any files larger than this will be uploaded in chunks of chunk_size.
  2093  The minimum is 0 and the maximum is 5 GiB.`,
  2094  			Default:  defaultUploadCutoff,
  2095  			Advanced: true,
  2096  		}, {
  2097  			Name: "chunk_size",
  2098  			Help: `Chunk size to use for uploading.
  2099  
  2100  When uploading files larger than upload_cutoff or files with unknown
  2101  size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google
  2102  photos or google docs) they will be uploaded as multipart uploads
  2103  using this chunk size.
  2104  
  2105  Note that "--s3-upload-concurrency" chunks of this size are buffered
  2106  in memory per transfer.
  2107  
  2108  If you are transferring large files over high-speed links and you have
  2109  enough memory, then increasing this will speed up the transfers.
  2110  
  2111  Rclone will automatically increase the chunk size when uploading a
  2112  large file of known size to stay below the 10,000 chunks limit.
  2113  
  2114  Files of unknown size are uploaded with the configured
  2115  chunk_size. Since the default chunk size is 5 MiB and there can be at
  2116  most 10,000 chunks, this means that by default the maximum size of
  2117  a file you can stream upload is 48 GiB.  If you wish to stream upload
  2118  larger files then you will need to increase chunk_size.
  2119  
  2120  Increasing the chunk size decreases the accuracy of the progress
  2121  statistics displayed with "-P" flag. Rclone treats chunk as sent when
  2122  it's buffered by the AWS SDK, when in fact it may still be uploading.
  2123  A bigger chunk size means a bigger AWS SDK buffer and progress
  2124  reporting more deviating from the truth.
  2125  `,
  2126  			Default:  minChunkSize,
  2127  			Advanced: true,
  2128  		}, {
  2129  			Name: "max_upload_parts",
  2130  			Help: `Maximum number of parts in a multipart upload.
  2131  
  2132  This option defines the maximum number of multipart chunks to use
  2133  when doing a multipart upload.
  2134  
  2135  This can be useful if a service does not support the AWS S3
  2136  specification of 10,000 chunks.
  2137  
  2138  Rclone will automatically increase the chunk size when uploading a
  2139  large file of a known size to stay below this number of chunks limit.
  2140  `,
  2141  			Default:  maxUploadParts,
  2142  			Advanced: true,
  2143  		}, {
  2144  			Name: "copy_cutoff",
  2145  			Help: `Cutoff for switching to multipart copy.
  2146  
  2147  Any files larger than this that need to be server-side copied will be
  2148  copied in chunks of this size.
  2149  
  2150  The minimum is 0 and the maximum is 5 GiB.`,
  2151  			Default:  fs.SizeSuffix(maxSizeForCopy),
  2152  			Advanced: true,
  2153  		}, {
  2154  			Name: "disable_checksum",
  2155  			Help: `Don't store MD5 checksum with object metadata.
  2156  
  2157  Normally rclone will calculate the MD5 checksum of the input before
  2158  uploading it so it can add it to metadata on the object. This is great
  2159  for data integrity checking but can cause long delays for large files
  2160  to start uploading.`,
  2161  			Default:  false,
  2162  			Advanced: true,
  2163  		}, {
  2164  			Name: "shared_credentials_file",
  2165  			Help: `Path to the shared credentials file.
  2166  
  2167  If env_auth = true then rclone can use a shared credentials file.
  2168  
  2169  If this variable is empty rclone will look for the
  2170  "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty
  2171  it will default to the current user's home directory.
  2172  
  2173      Linux/OSX: "$HOME/.aws/credentials"
  2174      Windows:   "%USERPROFILE%\.aws\credentials"
  2175  `,
  2176  			Advanced: true,
  2177  		}, {
  2178  			Name: "profile",
  2179  			Help: `Profile to use in the shared credentials file.
  2180  
  2181  If env_auth = true then rclone can use a shared credentials file. This
  2182  variable controls which profile is used in that file.
  2183  
  2184  If empty it will default to the environment variable "AWS_PROFILE" or
  2185  "default" if that environment variable is also not set.
  2186  `,
  2187  			Advanced: true,
  2188  		}, {
  2189  			Name:      "session_token",
  2190  			Help:      "An AWS session token.",
  2191  			Advanced:  true,
  2192  			Sensitive: true,
  2193  		}, {
  2194  			Name: "upload_concurrency",
  2195  			Help: `Concurrency for multipart uploads and copies.
  2196  
  2197  This is the number of chunks of the same file that are uploaded
  2198  concurrently for multipart uploads and copies.
  2199  
  2200  If you are uploading small numbers of large files over high-speed links
  2201  and these uploads do not fully utilize your bandwidth, then increasing
  2202  this may help to speed up the transfers.`,
  2203  			Default:  4,
  2204  			Advanced: true,
  2205  		}, {
  2206  			Name: "force_path_style",
  2207  			Help: `If true use path style access if false use virtual hosted style.
  2208  
  2209  If this is true (the default) then rclone will use path style access,
  2210  if false then rclone will use virtual path style. See [the AWS S3
  2211  docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
  2212  for more info.
  2213  
  2214  Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to
  2215  false - rclone will do this automatically based on the provider
  2216  setting.`,
  2217  			Default:  true,
  2218  			Advanced: true,
  2219  		}, {
  2220  			Name: "v2_auth",
  2221  			Help: `If true use v2 authentication.
  2222  
  2223  If this is false (the default) then rclone will use v4 authentication.
  2224  If it is set then rclone will use v2 authentication.
  2225  
  2226  Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.`,
  2227  			Default:  false,
  2228  			Advanced: true,
  2229  		}, {
  2230  			Name: "use_dual_stack",
  2231  			Help: `If true use AWS S3 dual-stack endpoint (IPv6 support).
  2232  
  2233  See [AWS Docs on Dualstack Endpoints](https://docs.aws.amazon.com/AmazonS3/latest/userguide/dual-stack-endpoints.html)`,
  2234  			Default:  false,
  2235  			Advanced: true,
  2236  		}, {
  2237  			Name:     "use_accelerate_endpoint",
  2238  			Provider: "AWS",
  2239  			Help: `If true use the AWS S3 accelerated endpoint.
  2240  
  2241  See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
  2242  			Default:  false,
  2243  			Advanced: true,
  2244  		}, {
  2245  			Name:     "leave_parts_on_error",
  2246  			Provider: "AWS",
  2247  			Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
  2248  
  2249  It should be set to true for resuming uploads across different sessions.
  2250  
  2251  WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
  2252  `,
  2253  			Default:  false,
  2254  			Advanced: true,
  2255  		}, {
  2256  			Name: "list_chunk",
  2257  			Help: `Size of listing chunk (response list for each ListObject S3 request).
  2258  
  2259  This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification.
  2260  Most services truncate the response list to 1000 objects even if requested more than that.
  2261  In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
  2262  In Ceph, this can be increased with the "rgw list buckets max chunk" option.
  2263  `,
  2264  			Default:  1000,
  2265  			Advanced: true,
  2266  		}, {
  2267  			Name: "list_version",
  2268  			Help: `Version of ListObjects to use: 1,2 or 0 for auto.
  2269  
  2270  When S3 originally launched it only provided the ListObjects call to
  2271  enumerate objects in a bucket.
  2272  
  2273  However in May 2016 the ListObjectsV2 call was introduced. This is
  2274  much higher performance and should be used if at all possible.
  2275  
  2276  If set to the default, 0, rclone will guess according to the provider
  2277  set which list objects method to call. If it guesses wrong, then it
  2278  may be set manually here.
  2279  `,
  2280  			Default:  0,
  2281  			Advanced: true,
  2282  		}, {
  2283  			Name: "list_url_encode",
  2284  			Help: `Whether to url encode listings: true/false/unset
  2285  
  2286  Some providers support URL encoding listings and where this is
  2287  available this is more reliable when using control characters in file
  2288  names. If this is set to unset (the default) then rclone will choose
  2289  according to the provider setting what to apply, but you can override
  2290  rclone's choice here.
  2291  `,
  2292  			Default:  fs.Tristate{},
  2293  			Advanced: true,
  2294  		}, {
  2295  			Name: "no_check_bucket",
  2296  			Help: `If set, don't attempt to check the bucket exists or create it.
  2297  
  2298  This can be useful when trying to minimise the number of transactions
  2299  rclone does if you know the bucket exists already.
  2300  
  2301  It can also be needed if the user you are using does not have bucket
  2302  creation permissions. Before v1.52.0 this would have passed silently
  2303  due to a bug.
  2304  `,
  2305  			Default:  false,
  2306  			Advanced: true,
  2307  		}, {
  2308  			Name: "no_head",
  2309  			Help: `If set, don't HEAD uploaded objects to check integrity.
  2310  
  2311  This can be useful when trying to minimise the number of transactions
  2312  rclone does.
  2313  
  2314  Setting it means that if rclone receives a 200 OK message after
  2315  uploading an object with PUT then it will assume that it got uploaded
  2316  properly.
  2317  
  2318  In particular it will assume:
  2319  
  2320  - the metadata, including modtime, storage class and content type was as uploaded
  2321  - the size was as uploaded
  2322  
  2323  It reads the following items from the response for a single part PUT:
  2324  
  2325  - the MD5SUM
  2326  - The uploaded date
  2327  
  2328  For multipart uploads these items aren't read.
  2329  
  2330  If an source object of unknown length is uploaded then rclone **will** do a
  2331  HEAD request.
  2332  
  2333  Setting this flag increases the chance for undetected upload failures,
  2334  in particular an incorrect size, so it isn't recommended for normal
  2335  operation. In practice the chance of an undetected upload failure is
  2336  very small even with this flag.
  2337  `,
  2338  			Default:  false,
  2339  			Advanced: true,
  2340  		}, {
  2341  			Name:     "no_head_object",
  2342  			Help:     `If set, do not do HEAD before GET when getting objects.`,
  2343  			Default:  false,
  2344  			Advanced: true,
  2345  		}, {
  2346  			Name:     config.ConfigEncoding,
  2347  			Help:     config.ConfigEncodingHelp,
  2348  			Advanced: true,
  2349  			// Any UTF-8 character is valid in a key, however it can't handle
  2350  			// invalid UTF-8 and / have a special meaning.
  2351  			//
  2352  			// The SDK can't seem to handle uploading files called '.'
  2353  			//
  2354  			// FIXME would be nice to add
  2355  			// - initial / encoding
  2356  			// - doubled / encoding
  2357  			// - trailing / encoding
  2358  			// so that AWS keys are always valid file names
  2359  			Default: encoder.EncodeInvalidUtf8 |
  2360  				encoder.EncodeSlash |
  2361  				encoder.EncodeDot,
  2362  		}, {
  2363  			Name:     "memory_pool_flush_time",
  2364  			Default:  fs.Duration(time.Minute),
  2365  			Advanced: true,
  2366  			Hide:     fs.OptionHideBoth,
  2367  			Help:     `How often internal memory buffer pools will be flushed. (no longer used)`,
  2368  		}, {
  2369  			Name:     "memory_pool_use_mmap",
  2370  			Default:  false,
  2371  			Advanced: true,
  2372  			Hide:     fs.OptionHideBoth,
  2373  			Help:     `Whether to use mmap buffers in internal memory pool. (no longer used)`,
  2374  		}, {
  2375  			Name:     "disable_http2",
  2376  			Default:  false,
  2377  			Advanced: true,
  2378  			Help: `Disable usage of http2 for S3 backends.
  2379  
  2380  There is currently an unsolved issue with the s3 (specifically minio) backend
  2381  and HTTP/2.  HTTP/2 is enabled by default for the s3 backend but can be
  2382  disabled here.  When the issue is solved this flag will be removed.
  2383  
  2384  See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631
  2385  
  2386  `,
  2387  		}, {
  2388  			Name: "download_url",
  2389  			Help: `Custom endpoint for downloads.
  2390  This is usually set to a CloudFront CDN URL as AWS S3 offers
  2391  cheaper egress for data downloaded through the CloudFront network.`,
  2392  			Advanced: true,
  2393  		}, {
  2394  			Name:     "directory_markers",
  2395  			Default:  false,
  2396  			Advanced: true,
  2397  			Help: `Upload an empty object with a trailing slash when a new directory is created
  2398  
  2399  Empty folders are unsupported for bucket based remotes, this option creates an empty
  2400  object ending with "/", to persist the folder.
  2401  `,
  2402  		}, {
  2403  			Name: "use_multipart_etag",
  2404  			Help: `Whether to use ETag in multipart uploads for verification
  2405  
  2406  This should be true, false or left unset to use the default for the provider.
  2407  `,
  2408  			Default:  fs.Tristate{},
  2409  			Advanced: true,
  2410  		}, {
  2411  			Name: "use_presigned_request",
  2412  			Help: `Whether to use a presigned request or PutObject for single part uploads
  2413  
  2414  If this is false rclone will use PutObject from the AWS SDK to upload
  2415  an object.
  2416  
  2417  Versions of rclone < 1.59 use presigned requests to upload a single
  2418  part object and setting this flag to true will re-enable that
  2419  functionality. This shouldn't be necessary except in exceptional
  2420  circumstances or for testing.
  2421  `,
  2422  			Default:  false,
  2423  			Advanced: true,
  2424  		}, {
  2425  			Name:     "versions",
  2426  			Help:     "Include old versions in directory listings.",
  2427  			Default:  false,
  2428  			Advanced: true,
  2429  		}, {
  2430  			Name: "version_at",
  2431  			Help: `Show file versions as they were at the specified time.
  2432  
  2433  The parameter should be a date, "2006-01-02", datetime "2006-01-02
  2434  15:04:05" or a duration for that long ago, eg "100d" or "1h".
  2435  
  2436  Note that when using this no file write operations are permitted,
  2437  so you can't upload files or delete them.
  2438  
  2439  See [the time option docs](/docs/#time-option) for valid formats.
  2440  `,
  2441  			Default:  fs.Time{},
  2442  			Advanced: true,
  2443  		}, {
  2444  			Name: "version_deleted",
  2445  			Help: `Show deleted file markers when using versions.
  2446  
  2447  This shows deleted file markers in the listing when using versions. These will appear
  2448  as 0 size files. The only operation which can be performed on them is deletion.
  2449  
  2450  Deleting a delete marker will reveal the previous version.
  2451  
  2452  Deleted files will always show with a timestamp.
  2453  `,
  2454  			Default:  false,
  2455  			Advanced: true,
  2456  		}, {
  2457  			Name: "decompress",
  2458  			Help: `If set this will decompress gzip encoded objects.
  2459  
  2460  It is possible to upload objects to S3 with "Content-Encoding: gzip"
  2461  set. Normally rclone will download these files as compressed objects.
  2462  
  2463  If this flag is set then rclone will decompress these files with
  2464  "Content-Encoding: gzip" as they are received. This means that rclone
  2465  can't check the size and hash but the file contents will be decompressed.
  2466  `,
  2467  			Advanced: true,
  2468  			Default:  false,
  2469  		}, {
  2470  			Name: "might_gzip",
  2471  			Help: strings.ReplaceAll(`Set this if the backend might gzip objects.
  2472  
  2473  Normally providers will not alter objects when they are downloaded. If
  2474  an object was not uploaded with |Content-Encoding: gzip| then it won't
  2475  be set on download.
  2476  
  2477  However some providers may gzip objects even if they weren't uploaded
  2478  with |Content-Encoding: gzip| (eg Cloudflare).
  2479  
  2480  A symptom of this would be receiving errors like
  2481  
  2482      ERROR corrupted on transfer: sizes differ NNN vs MMM
  2483  
  2484  If you set this flag and rclone downloads an object with
  2485  Content-Encoding: gzip set and chunked transfer encoding, then rclone
  2486  will decompress the object on the fly.
  2487  
  2488  If this is set to unset (the default) then rclone will choose
  2489  according to the provider setting what to apply, but you can override
  2490  rclone's choice here.
  2491  `, "|", "`"),
  2492  			Default:  fs.Tristate{},
  2493  			Advanced: true,
  2494  		}, {
  2495  			Name: "use_accept_encoding_gzip",
  2496  			Help: strings.ReplaceAll(`Whether to send |Accept-Encoding: gzip| header.
  2497  
  2498  By default, rclone will append |Accept-Encoding: gzip| to the request to download
  2499  compressed objects whenever possible.
  2500  
  2501  However some providers such as Google Cloud Storage may alter the HTTP headers, breaking
  2502  the signature of the request.
  2503  
  2504  A symptom of this would be receiving errors like
  2505  
  2506  	SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided.
  2507  
  2508  In this case, you might want to try disabling this option.
  2509  `, "|", "`"),
  2510  			Default:  fs.Tristate{},
  2511  			Advanced: true,
  2512  		}, {
  2513  			Name:     "no_system_metadata",
  2514  			Help:     `Suppress setting and reading of system metadata`,
  2515  			Advanced: true,
  2516  			Default:  false,
  2517  		}, {
  2518  			Name:     "sts_endpoint",
  2519  			Help:     "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
  2520  			Provider: "AWS",
  2521  			Advanced: true,
  2522  		}, {
  2523  			Name: "use_already_exists",
  2524  			Help: strings.ReplaceAll(`Set if rclone should report BucketAlreadyExists errors on bucket creation.
  2525  
  2526  At some point during the evolution of the s3 protocol, AWS started
  2527  returning an |AlreadyOwnedByYou| error when attempting to create a
  2528  bucket that the user already owned, rather than a
  2529  |BucketAlreadyExists| error.
  2530  
  2531  Unfortunately exactly what has been implemented by s3 clones is a
  2532  little inconsistent, some return |AlreadyOwnedByYou|, some return
  2533  |BucketAlreadyExists| and some return no error at all.
  2534  
  2535  This is important to rclone because it ensures the bucket exists by
  2536  creating it on quite a lot of operations (unless
  2537  |--s3-no-check-bucket| is used).
  2538  
  2539  If rclone knows the provider can return |AlreadyOwnedByYou| or returns
  2540  no error then it can report |BucketAlreadyExists| errors when the user
  2541  attempts to create a bucket not owned by them. Otherwise rclone
  2542  ignores the |BucketAlreadyExists| error which can lead to confusion.
  2543  
  2544  This should be automatically set correctly for all providers rclone
  2545  knows about - please make a bug report if not.
  2546  `, "|", "`"),
  2547  			Default:  fs.Tristate{},
  2548  			Advanced: true,
  2549  		}, {
  2550  			Name: "use_multipart_uploads",
  2551  			Help: `Set if rclone should use multipart uploads.
  2552  
  2553  You can change this if you want to disable the use of multipart uploads.
  2554  This shouldn't be necessary in normal operation.
  2555  
  2556  This should be automatically set correctly for all providers rclone
  2557  knows about - please make a bug report if not.
  2558  `,
  2559  			Default:  fs.Tristate{},
  2560  			Advanced: true,
  2561  		},
  2562  		}})
  2563  }
  2564  
  2565  // Constants
  2566  const (
  2567  	metaMtime   = "mtime"     // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime
  2568  	metaMD5Hash = "md5chksum" // the meta key to store md5hash in
  2569  	// The maximum size of object we can COPY - this should be 5 GiB but is < 5 GB for b2 compatibility
  2570  	// See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76
  2571  	maxSizeForCopy      = 4768 * 1024 * 1024
  2572  	maxUploadParts      = 10000 // maximum allowed number of parts in a multi-part upload
  2573  	minChunkSize        = fs.SizeSuffix(1024 * 1024 * 5)
  2574  	defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
  2575  	maxUploadCutoff     = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
  2576  	minSleep            = 10 * time.Millisecond           // In case of error, start at 10ms sleep.
  2577  	maxExpireDuration   = fs.Duration(7 * 24 * time.Hour) // max expiry is 1 week
  2578  )
  2579  
  2580  // globals
  2581  var (
  2582  	errNotWithVersionAt = errors.New("can't modify or delete files in --s3-version-at mode")
  2583  )
  2584  
  2585  // system metadata keys which this backend owns
  2586  var systemMetadataInfo = map[string]fs.MetadataHelp{
  2587  	"cache-control": {
  2588  		Help:    "Cache-Control header",
  2589  		Type:    "string",
  2590  		Example: "no-cache",
  2591  	},
  2592  	"content-disposition": {
  2593  		Help:    "Content-Disposition header",
  2594  		Type:    "string",
  2595  		Example: "inline",
  2596  	},
  2597  	"content-encoding": {
  2598  		Help:    "Content-Encoding header",
  2599  		Type:    "string",
  2600  		Example: "gzip",
  2601  	},
  2602  	"content-language": {
  2603  		Help:    "Content-Language header",
  2604  		Type:    "string",
  2605  		Example: "en-US",
  2606  	},
  2607  	"content-type": {
  2608  		Help:    "Content-Type header",
  2609  		Type:    "string",
  2610  		Example: "text/plain",
  2611  	},
  2612  	// "tagging": {
  2613  	// 	Help:    "x-amz-tagging header",
  2614  	// 	Type:    "string",
  2615  	// 	Example: "tag1=value1&tag2=value2",
  2616  	// },
  2617  	"tier": {
  2618  		Help:     "Tier of the object",
  2619  		Type:     "string",
  2620  		Example:  "GLACIER",
  2621  		ReadOnly: true,
  2622  	},
  2623  	"mtime": {
  2624  		Help:    "Time of last modification, read from rclone metadata",
  2625  		Type:    "RFC 3339",
  2626  		Example: "2006-01-02T15:04:05.999999999Z07:00",
  2627  	},
  2628  	"btime": {
  2629  		Help:     "Time of file birth (creation) read from Last-Modified header",
  2630  		Type:     "RFC 3339",
  2631  		Example:  "2006-01-02T15:04:05.999999999Z07:00",
  2632  		ReadOnly: true,
  2633  	},
  2634  }
  2635  
  2636  // Options defines the configuration for this backend
  2637  type Options struct {
  2638  	Provider              string               `config:"provider"`
  2639  	EnvAuth               bool                 `config:"env_auth"`
  2640  	AccessKeyID           string               `config:"access_key_id"`
  2641  	SecretAccessKey       string               `config:"secret_access_key"`
  2642  	Region                string               `config:"region"`
  2643  	Endpoint              string               `config:"endpoint"`
  2644  	STSEndpoint           string               `config:"sts_endpoint"`
  2645  	UseDualStack          bool                 `config:"use_dual_stack"`
  2646  	LocationConstraint    string               `config:"location_constraint"`
  2647  	ACL                   string               `config:"acl"`
  2648  	BucketACL             string               `config:"bucket_acl"`
  2649  	RequesterPays         bool                 `config:"requester_pays"`
  2650  	ServerSideEncryption  string               `config:"server_side_encryption"`
  2651  	SSEKMSKeyID           string               `config:"sse_kms_key_id"`
  2652  	SSECustomerAlgorithm  string               `config:"sse_customer_algorithm"`
  2653  	SSECustomerKey        string               `config:"sse_customer_key"`
  2654  	SSECustomerKeyBase64  string               `config:"sse_customer_key_base64"`
  2655  	SSECustomerKeyMD5     string               `config:"sse_customer_key_md5"`
  2656  	StorageClass          string               `config:"storage_class"`
  2657  	UploadCutoff          fs.SizeSuffix        `config:"upload_cutoff"`
  2658  	CopyCutoff            fs.SizeSuffix        `config:"copy_cutoff"`
  2659  	ChunkSize             fs.SizeSuffix        `config:"chunk_size"`
  2660  	MaxUploadParts        int                  `config:"max_upload_parts"`
  2661  	DisableChecksum       bool                 `config:"disable_checksum"`
  2662  	SharedCredentialsFile string               `config:"shared_credentials_file"`
  2663  	Profile               string               `config:"profile"`
  2664  	SessionToken          string               `config:"session_token"`
  2665  	UploadConcurrency     int                  `config:"upload_concurrency"`
  2666  	ForcePathStyle        bool                 `config:"force_path_style"`
  2667  	V2Auth                bool                 `config:"v2_auth"`
  2668  	UseAccelerateEndpoint bool                 `config:"use_accelerate_endpoint"`
  2669  	LeavePartsOnError     bool                 `config:"leave_parts_on_error"`
  2670  	ListChunk             int64                `config:"list_chunk"`
  2671  	ListVersion           int                  `config:"list_version"`
  2672  	ListURLEncode         fs.Tristate          `config:"list_url_encode"`
  2673  	NoCheckBucket         bool                 `config:"no_check_bucket"`
  2674  	NoHead                bool                 `config:"no_head"`
  2675  	NoHeadObject          bool                 `config:"no_head_object"`
  2676  	Enc                   encoder.MultiEncoder `config:"encoding"`
  2677  	DisableHTTP2          bool                 `config:"disable_http2"`
  2678  	DownloadURL           string               `config:"download_url"`
  2679  	DirectoryMarkers      bool                 `config:"directory_markers"`
  2680  	UseMultipartEtag      fs.Tristate          `config:"use_multipart_etag"`
  2681  	UsePresignedRequest   bool                 `config:"use_presigned_request"`
  2682  	Versions              bool                 `config:"versions"`
  2683  	VersionAt             fs.Time              `config:"version_at"`
  2684  	VersionDeleted        bool                 `config:"version_deleted"`
  2685  	Decompress            bool                 `config:"decompress"`
  2686  	MightGzip             fs.Tristate          `config:"might_gzip"`
  2687  	UseAcceptEncodingGzip fs.Tristate          `config:"use_accept_encoding_gzip"`
  2688  	NoSystemMetadata      bool                 `config:"no_system_metadata"`
  2689  	UseAlreadyExists      fs.Tristate          `config:"use_already_exists"`
  2690  	UseMultipartUploads   fs.Tristate          `config:"use_multipart_uploads"`
  2691  }
  2692  
  2693  // Fs represents a remote s3 server
  2694  type Fs struct {
  2695  	name           string           // the name of the remote
  2696  	root           string           // root of the bucket - ignore all objects above this
  2697  	opt            Options          // parsed options
  2698  	ci             *fs.ConfigInfo   // global config
  2699  	ctx            context.Context  // global context for reading config
  2700  	features       *fs.Features     // optional features
  2701  	c              *s3.S3           // the connection to the s3 server
  2702  	ses            *session.Session // the s3 session
  2703  	rootBucket     string           // bucket part of root (if any)
  2704  	rootDirectory  string           // directory part of root (if any)
  2705  	cache          *bucket.Cache    // cache for bucket creation status
  2706  	pacer          *fs.Pacer        // To pace the API calls
  2707  	srv            *http.Client     // a plain http client
  2708  	srvRest        *rest.Client     // the rest connection to the server
  2709  	etagIsNotMD5   bool             // if set ETags are not MD5s
  2710  	versioningMu   sync.Mutex
  2711  	versioning     fs.Tristate // if set bucket is using versions
  2712  	warnCompressed sync.Once   // warn once about compressed files
  2713  }
  2714  
  2715  // Object describes a s3 object
  2716  type Object struct {
  2717  	// Will definitely have everything but meta which may be nil
  2718  	//
  2719  	// List will read everything but meta & mimeType - to fill
  2720  	// that in you need to call readMetaData
  2721  	fs           *Fs               // what this object is part of
  2722  	remote       string            // The remote path
  2723  	md5          string            // md5sum of the object
  2724  	bytes        int64             // size of the object
  2725  	lastModified time.Time         // Last modified
  2726  	meta         map[string]string // The object metadata if known - may be nil - with lower case keys
  2727  	mimeType     string            // MimeType of object - may be ""
  2728  	versionID    *string           // If present this points to an object version
  2729  
  2730  	// Metadata as pointers to strings as they often won't be present
  2731  	storageClass       *string // e.g. GLACIER
  2732  	cacheControl       *string // Cache-Control: header
  2733  	contentDisposition *string // Content-Disposition: header
  2734  	contentEncoding    *string // Content-Encoding: header
  2735  	contentLanguage    *string // Content-Language: header
  2736  }
  2737  
  2738  // ------------------------------------------------------------
  2739  
  2740  // Name of the remote (as passed into NewFs)
  2741  func (f *Fs) Name() string {
  2742  	return f.name
  2743  }
  2744  
  2745  // Root of the remote (as passed into NewFs)
  2746  func (f *Fs) Root() string {
  2747  	return f.root
  2748  }
  2749  
  2750  // String converts this Fs to a string
  2751  func (f *Fs) String() string {
  2752  	if f.rootBucket == "" {
  2753  		return "S3 root"
  2754  	}
  2755  	if f.rootDirectory == "" {
  2756  		return fmt.Sprintf("S3 bucket %s", f.rootBucket)
  2757  	}
  2758  	return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory)
  2759  }
  2760  
  2761  // Features returns the optional features of this Fs
  2762  func (f *Fs) Features() *fs.Features {
  2763  	return f.features
  2764  }
  2765  
  2766  // retryErrorCodes is a slice of error codes that we will retry
  2767  // See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
  2768  var retryErrorCodes = []int{
  2769  	429, // Too Many Requests
  2770  	500, // Internal Server Error - "We encountered an internal error. Please try again."
  2771  	503, // Service Unavailable/Slow Down - "Reduce your request rate"
  2772  }
  2773  
  2774  // S3 is pretty resilient, and the built in retry handling is probably sufficient
  2775  // as it should notice closed connections and timeouts which are the most likely
  2776  // sort of failure modes
  2777  func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
  2778  	if fserrors.ContextError(ctx, &err) {
  2779  		return false, err
  2780  	}
  2781  	// If this is an awserr object, try and extract more useful information to determine if we should retry
  2782  	if awsError, ok := err.(awserr.Error); ok {
  2783  		// Simple case, check the original embedded error in case it's generically retryable
  2784  		if fserrors.ShouldRetry(awsError.OrigErr()) {
  2785  			return true, err
  2786  		}
  2787  		// If it is a timeout then we want to retry that
  2788  		if awsError.Code() == "RequestTimeout" {
  2789  			return true, err
  2790  		}
  2791  		// Failing that, if it's a RequestFailure it's probably got an http status code we can check
  2792  		if reqErr, ok := err.(awserr.RequestFailure); ok {
  2793  			// 301 if wrong region for bucket - can only update if running from a bucket
  2794  			if f.rootBucket != "" {
  2795  				if reqErr.StatusCode() == http.StatusMovedPermanently {
  2796  					urfbErr := f.updateRegionForBucket(ctx, f.rootBucket)
  2797  					if urfbErr != nil {
  2798  						fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
  2799  						return false, err
  2800  					}
  2801  					return true, err
  2802  				}
  2803  			}
  2804  			for _, e := range retryErrorCodes {
  2805  				if reqErr.StatusCode() == e {
  2806  					return true, err
  2807  				}
  2808  			}
  2809  		}
  2810  	}
  2811  	// Ok, not an awserr, check for generic failure conditions
  2812  	return fserrors.ShouldRetry(err), err
  2813  }
  2814  
  2815  // parsePath parses a remote 'url'
  2816  func parsePath(path string) (root string) {
  2817  	root = strings.Trim(path, "/")
  2818  	return
  2819  }
  2820  
  2821  // split returns bucket and bucketPath from the rootRelativePath
  2822  // relative to f.root
  2823  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
  2824  	bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
  2825  	return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
  2826  }
  2827  
  2828  // split returns bucket and bucketPath from the object
  2829  func (o *Object) split() (bucket, bucketPath string) {
  2830  	bucket, bucketPath = o.fs.split(o.remote)
  2831  	// If there is an object version, then the path may have a
  2832  	// version suffix, if so remove it.
  2833  	//
  2834  	// If we are unlucky enough to have a file name with a valid
  2835  	// version path where this wasn't required (eg using
  2836  	// --s3-version-at) then this will go wrong.
  2837  	if o.versionID != nil {
  2838  		_, bucketPath = version.Remove(bucketPath)
  2839  	}
  2840  	return bucket, bucketPath
  2841  }
  2842  
  2843  // getClient makes an http client according to the options
  2844  func getClient(ctx context.Context, opt *Options) *http.Client {
  2845  	// TODO: Do we need cookies too?
  2846  	t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) {
  2847  		if opt.DisableHTTP2 {
  2848  			t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
  2849  		}
  2850  	})
  2851  	return &http.Client{
  2852  		Transport: t,
  2853  	}
  2854  }
  2855  
  2856  // Default name resolver
  2857  var defaultResolver = endpoints.DefaultResolver()
  2858  
  2859  // resolve (service, region) to endpoint
  2860  //
  2861  // Used to set endpoint for s3 services and not for other services
  2862  type resolver map[string]string
  2863  
  2864  // Add a service to the resolver, ignoring empty urls
  2865  func (r resolver) addService(service, url string) {
  2866  	if url == "" {
  2867  		return
  2868  	}
  2869  	if !strings.HasPrefix(url, "http") {
  2870  		url = "https://" + url
  2871  	}
  2872  	r[service] = url
  2873  }
  2874  
  2875  // EndpointFor return the endpoint for s3 if set or the default if not
  2876  func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
  2877  	fs.Debugf(nil, "Resolving service %q region %q", service, region)
  2878  	url, ok := r[service]
  2879  	if ok {
  2880  		return endpoints.ResolvedEndpoint{
  2881  			URL:           url,
  2882  			SigningRegion: region,
  2883  		}, nil
  2884  	}
  2885  	return defaultResolver.EndpointFor(service, region, opts...)
  2886  }
  2887  
  2888  // s3Connection makes a connection to s3
  2889  func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
  2890  	ci := fs.GetConfig(ctx)
  2891  	// Make the auth
  2892  	v := credentials.Value{
  2893  		AccessKeyID:     opt.AccessKeyID,
  2894  		SecretAccessKey: opt.SecretAccessKey,
  2895  		SessionToken:    opt.SessionToken,
  2896  	}
  2897  
  2898  	lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
  2899  
  2900  	def := defaults.Get()
  2901  	def.Config.HTTPClient = lowTimeoutClient
  2902  
  2903  	// start a new AWS session
  2904  	awsSession, err := session.NewSession()
  2905  	if err != nil {
  2906  		return nil, nil, fmt.Errorf("NewSession: %w", err)
  2907  	}
  2908  
  2909  	// first provider to supply a credential set "wins"
  2910  	providers := []credentials.Provider{
  2911  		// use static credentials if they're present (checked by provider)
  2912  		&credentials.StaticProvider{Value: v},
  2913  
  2914  		// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
  2915  		// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
  2916  		&credentials.EnvProvider{},
  2917  
  2918  		// A SharedCredentialsProvider retrieves credentials
  2919  		// from the current user's home directory.  It checks
  2920  		// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
  2921  		&credentials.SharedCredentialsProvider{
  2922  			Filename: opt.SharedCredentialsFile, // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable.
  2923  			Profile:  opt.Profile,               // If empty will look gor "AWS_PROFILE" env var or "default" if not set.
  2924  		},
  2925  
  2926  		// Pick up IAM role if we're in an ECS task
  2927  		defaults.RemoteCredProvider(*def.Config, def.Handlers),
  2928  
  2929  		// Pick up IAM role in case we're on EC2
  2930  		&ec2rolecreds.EC2RoleProvider{
  2931  			Client: ec2metadata.New(awsSession, &aws.Config{
  2932  				HTTPClient: lowTimeoutClient,
  2933  			}),
  2934  			ExpiryWindow: 3 * time.Minute,
  2935  		},
  2936  	}
  2937  	cred := credentials.NewChainCredentials(providers)
  2938  
  2939  	switch {
  2940  	case opt.EnvAuth:
  2941  		// No need for empty checks if "env_auth" is true
  2942  	case v.AccessKeyID == "" && v.SecretAccessKey == "":
  2943  		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
  2944  		cred = credentials.AnonymousCredentials
  2945  		fs.Debugf(nil, "Using anonymous credentials - did you mean to set env_auth=true?")
  2946  	case v.AccessKeyID == "":
  2947  		return nil, nil, errors.New("access_key_id not found")
  2948  	case v.SecretAccessKey == "":
  2949  		return nil, nil, errors.New("secret_access_key not found")
  2950  	}
  2951  
  2952  	if opt.Region == "" {
  2953  		opt.Region = "us-east-1"
  2954  	}
  2955  	setQuirks(opt)
  2956  	awsConfig := aws.NewConfig().
  2957  		WithMaxRetries(ci.LowLevelRetries).
  2958  		WithCredentials(cred).
  2959  		WithHTTPClient(client).
  2960  		WithS3ForcePathStyle(opt.ForcePathStyle).
  2961  		WithS3UseAccelerate(opt.UseAccelerateEndpoint).
  2962  		WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint)
  2963  
  2964  	if opt.Region != "" {
  2965  		awsConfig.WithRegion(opt.Region)
  2966  	}
  2967  	if opt.Endpoint != "" || opt.STSEndpoint != "" {
  2968  		// If endpoints are set, override the relevant services only
  2969  		r := make(resolver)
  2970  		r.addService("s3", opt.Endpoint)
  2971  		r.addService("sts", opt.STSEndpoint)
  2972  		awsConfig.WithEndpointResolver(r)
  2973  	}
  2974  	if opt.UseDualStack {
  2975  		awsConfig.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled
  2976  	}
  2977  
  2978  	// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
  2979  	awsSessionOpts := session.Options{
  2980  		Config: *awsConfig,
  2981  	}
  2982  	if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
  2983  		// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
  2984  		awsSessionOpts.SharedConfigState = session.SharedConfigEnable
  2985  		// Set the name of the profile if supplied
  2986  		awsSessionOpts.Profile = opt.Profile
  2987  		// Set the shared config file if supplied
  2988  		if opt.SharedCredentialsFile != "" {
  2989  			awsSessionOpts.SharedConfigFiles = []string{opt.SharedCredentialsFile}
  2990  		}
  2991  		// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
  2992  		// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
  2993  		awsSessionOpts.Config.Credentials = nil
  2994  	}
  2995  	ses, err := session.NewSessionWithOptions(awsSessionOpts)
  2996  	if err != nil {
  2997  		return nil, nil, err
  2998  	}
  2999  	c := s3.New(ses)
  3000  	if opt.V2Auth || opt.Region == "other-v2-signature" {
  3001  		fs.Debugf(nil, "Using v2 auth")
  3002  		signer := func(req *request.Request) {
  3003  			// Ignore AnonymousCredentials object
  3004  			if req.Config.Credentials == credentials.AnonymousCredentials {
  3005  				return
  3006  			}
  3007  			sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
  3008  		}
  3009  		c.Handlers.Sign.Clear()
  3010  		c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
  3011  		c.Handlers.Sign.PushBack(signer)
  3012  	}
  3013  	return c, ses, nil
  3014  }
  3015  
  3016  func checkUploadChunkSize(cs fs.SizeSuffix) error {
  3017  	if cs < minChunkSize {
  3018  		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
  3019  	}
  3020  	return nil
  3021  }
  3022  
  3023  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  3024  	err = checkUploadChunkSize(cs)
  3025  	if err == nil {
  3026  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
  3027  	}
  3028  	return
  3029  }
  3030  
  3031  func checkCopyCutoff(cs fs.SizeSuffix) error {
  3032  	minCopySize := fs.SizeSuffixBase
  3033  	if cs < minCopySize {
  3034  		return fmt.Errorf("value is too small (%v is less than %v)", cs, minCopySize)
  3035  	}
  3036  	return nil
  3037  }
  3038  
  3039  func checkUploadCutoff(cs fs.SizeSuffix) error {
  3040  	if cs > maxUploadCutoff {
  3041  		return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
  3042  	}
  3043  	return nil
  3044  }
  3045  
  3046  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  3047  	if f.opt.Provider != "Rclone" {
  3048  		err = checkUploadCutoff(cs)
  3049  	}
  3050  	if err == nil {
  3051  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
  3052  	}
  3053  	return
  3054  }
  3055  
  3056  func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  3057  	err = checkUploadChunkSize(cs)
  3058  	if err == nil {
  3059  		old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
  3060  	}
  3061  	return
  3062  }
  3063  
  3064  // setEndpointValueForIDriveE2 gets user region endpoint against the Access Key details by calling the API
  3065  func setEndpointValueForIDriveE2(m configmap.Mapper) (err error) {
  3066  	value, ok := m.Get(fs.ConfigProvider)
  3067  	if !ok || value != "IDrive" {
  3068  		return
  3069  	}
  3070  	value, ok = m.Get("access_key_id")
  3071  	if !ok || value == "" {
  3072  		return
  3073  	}
  3074  	client := &http.Client{Timeout: time.Second * 3}
  3075  	// API to get user region endpoint against the Access Key details: https://www.idrive.com/e2/guides/get_region_endpoint
  3076  	resp, err := client.Post("https://api.idrivee2.com/api/service/get_region_end_point",
  3077  		"application/json",
  3078  		strings.NewReader(`{"access_key": "`+value+`"}`))
  3079  	if err != nil {
  3080  		return
  3081  	}
  3082  	defer fs.CheckClose(resp.Body, &err)
  3083  	decoder := json.NewDecoder(resp.Body)
  3084  	var data = &struct {
  3085  		RespCode   int    `json:"resp_code"`
  3086  		RespMsg    string `json:"resp_msg"`
  3087  		DomainName string `json:"domain_name"`
  3088  	}{}
  3089  	if err = decoder.Decode(data); err == nil && data.RespCode == 0 {
  3090  		m.Set("endpoint", data.DomainName)
  3091  	}
  3092  	return
  3093  }
  3094  
  3095  // Set the provider quirks
  3096  //
  3097  // There should be no testing against opt.Provider anywhere in the
  3098  // code except in here to localise the setting of the quirks.
  3099  //
  3100  // Run the integration tests to check you have the quirks correct.
  3101  //
  3102  //	go test -v -remote NewS3Provider:
  3103  func setQuirks(opt *Options) {
  3104  	var (
  3105  		listObjectsV2         = true // Always use ListObjectsV2 instead of ListObjects
  3106  		virtualHostStyle      = true // Use bucket.provider.com instead of putting the bucket in the URL
  3107  		urlEncodeListings     = true // URL encode the listings to help with control characters
  3108  		useMultipartEtag      = true // Set if Etags for multpart uploads are compatible with AWS
  3109  		useAcceptEncodingGzip = true // Set Accept-Encoding: gzip
  3110  		mightGzip             = true // assume all providers might use content encoding gzip until proven otherwise
  3111  		useAlreadyExists      = true // Set if provider returns AlreadyOwnedByYou or no error if you try to remake your own bucket
  3112  		useMultipartUploads   = true // Set if provider supports multipart uploads
  3113  	)
  3114  	switch opt.Provider {
  3115  	case "AWS":
  3116  		// No quirks
  3117  		mightGzip = false // Never auto gzips objects
  3118  	case "Alibaba":
  3119  		useMultipartEtag = false // Alibaba seems to calculate multipart Etags differently from AWS
  3120  		useAlreadyExists = true  // returns 200 OK
  3121  	case "HuaweiOBS":
  3122  		// Huawei OBS PFS is not support listObjectV2, and if turn on the urlEncodeListing, marker will not work and keep list same page forever.
  3123  		urlEncodeListings = false
  3124  		listObjectsV2 = false
  3125  		useAlreadyExists = false // untested
  3126  	case "Ceph":
  3127  		listObjectsV2 = false
  3128  		virtualHostStyle = false
  3129  		urlEncodeListings = false
  3130  		useAlreadyExists = false // untested
  3131  	case "ChinaMobile":
  3132  		listObjectsV2 = false
  3133  		virtualHostStyle = false
  3134  		urlEncodeListings = false
  3135  		useAlreadyExists = false // untested
  3136  	case "Cloudflare":
  3137  		virtualHostStyle = false
  3138  		useMultipartEtag = false // currently multipart Etags are random
  3139  	case "ArvanCloud":
  3140  		listObjectsV2 = false
  3141  		virtualHostStyle = false
  3142  		urlEncodeListings = false
  3143  		useAlreadyExists = false // untested
  3144  	case "DigitalOcean":
  3145  		urlEncodeListings = false
  3146  		useAlreadyExists = false // untested
  3147  	case "Dreamhost":
  3148  		urlEncodeListings = false
  3149  		useAlreadyExists = false // untested
  3150  	case "IBMCOS":
  3151  		listObjectsV2 = false // untested
  3152  		virtualHostStyle = false
  3153  		urlEncodeListings = false
  3154  		useMultipartEtag = false // untested
  3155  		useAlreadyExists = false // returns BucketAlreadyExists
  3156  	case "IDrive":
  3157  		virtualHostStyle = false
  3158  		useAlreadyExists = false // untested
  3159  	case "IONOS":
  3160  		// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
  3161  		virtualHostStyle = false
  3162  		urlEncodeListings = false
  3163  		useAlreadyExists = false // untested
  3164  	case "Petabox":
  3165  		useAlreadyExists = false // untested
  3166  	case "Liara":
  3167  		virtualHostStyle = false
  3168  		urlEncodeListings = false
  3169  		useMultipartEtag = false
  3170  		useAlreadyExists = false // untested
  3171  	case "Linode":
  3172  		useAlreadyExists = true // returns 200 OK
  3173  	case "LyveCloud":
  3174  		useMultipartEtag = false // LyveCloud seems to calculate multipart Etags differently from AWS
  3175  		useAlreadyExists = false // untested
  3176  	case "Minio":
  3177  		virtualHostStyle = false
  3178  	case "Netease":
  3179  		listObjectsV2 = false // untested
  3180  		urlEncodeListings = false
  3181  		useMultipartEtag = false // untested
  3182  		useAlreadyExists = false // untested
  3183  	case "RackCorp":
  3184  		// No quirks
  3185  		useMultipartEtag = false // untested
  3186  		useAlreadyExists = false // untested
  3187  	case "Rclone":
  3188  		listObjectsV2 = true
  3189  		urlEncodeListings = true
  3190  		virtualHostStyle = false
  3191  		useMultipartEtag = false
  3192  		useAlreadyExists = false
  3193  		// useMultipartUploads = false - set this manually
  3194  	case "Scaleway":
  3195  		// Scaleway can only have 1000 parts in an upload
  3196  		if opt.MaxUploadParts > 1000 {
  3197  			opt.MaxUploadParts = 1000
  3198  		}
  3199  		urlEncodeListings = true
  3200  		useAlreadyExists = true
  3201  	case "SeaweedFS":
  3202  		listObjectsV2 = false // untested
  3203  		virtualHostStyle = false
  3204  		urlEncodeListings = false
  3205  		useMultipartEtag = false // untested
  3206  		useAlreadyExists = false // untested
  3207  	case "StackPath":
  3208  		listObjectsV2 = false // untested
  3209  		virtualHostStyle = false
  3210  		urlEncodeListings = false
  3211  		useAlreadyExists = false // untested
  3212  	case "Storj":
  3213  		// Force chunk size to >= 64 MiB
  3214  		if opt.ChunkSize < 64*fs.Mebi {
  3215  			opt.ChunkSize = 64 * fs.Mebi
  3216  		}
  3217  		useAlreadyExists = false // returns BucketAlreadyExists
  3218  	case "Synology":
  3219  		useMultipartEtag = false
  3220  		useAlreadyExists = false // untested
  3221  	case "TencentCOS":
  3222  		listObjectsV2 = false    // untested
  3223  		useMultipartEtag = false // untested
  3224  		useAlreadyExists = false // untested
  3225  	case "Wasabi":
  3226  		useAlreadyExists = true // returns 200 OK
  3227  	case "Leviia":
  3228  		useAlreadyExists = false // untested
  3229  	case "Qiniu":
  3230  		useMultipartEtag = false
  3231  		urlEncodeListings = false
  3232  		virtualHostStyle = false
  3233  		useAlreadyExists = false // untested
  3234  	case "GCS":
  3235  		// Google break request Signature by mutating accept-encoding HTTP header
  3236  		// https://github.com/rclone/rclone/issues/6670
  3237  		useAcceptEncodingGzip = false
  3238  		useAlreadyExists = true // returns BucketNameUnavailable instead of BucketAlreadyExists but good enough!
  3239  		// GCS S3 doesn't support multi-part server side copy:
  3240  		// See: https://issuetracker.google.com/issues/323465186
  3241  		// So make cutoff very large which it does seem to support
  3242  		opt.CopyCutoff = math.MaxInt64
  3243  	default:
  3244  		fs.Logf("s3", "s3 provider %q not known - please set correctly", opt.Provider)
  3245  		fallthrough
  3246  	case "Other":
  3247  		listObjectsV2 = false
  3248  		virtualHostStyle = false
  3249  		urlEncodeListings = false
  3250  		useMultipartEtag = false
  3251  		useAlreadyExists = false
  3252  	}
  3253  
  3254  	// Path Style vs Virtual Host style
  3255  	if virtualHostStyle || opt.UseAccelerateEndpoint {
  3256  		opt.ForcePathStyle = false
  3257  	}
  3258  
  3259  	// Set to see if we need to URL encode listings
  3260  	if !opt.ListURLEncode.Valid {
  3261  		opt.ListURLEncode.Valid = true
  3262  		opt.ListURLEncode.Value = urlEncodeListings
  3263  	}
  3264  
  3265  	// Set the correct list version if not manually set
  3266  	if opt.ListVersion == 0 {
  3267  		if listObjectsV2 {
  3268  			opt.ListVersion = 2
  3269  		} else {
  3270  			opt.ListVersion = 1
  3271  		}
  3272  	}
  3273  
  3274  	// Set the correct use multipart Etag for error checking if not manually set
  3275  	if !opt.UseMultipartEtag.Valid {
  3276  		opt.UseMultipartEtag.Valid = true
  3277  		opt.UseMultipartEtag.Value = useMultipartEtag
  3278  	}
  3279  
  3280  	// set MightGzip if not manually set
  3281  	if !opt.MightGzip.Valid {
  3282  		opt.MightGzip.Valid = true
  3283  		opt.MightGzip.Value = mightGzip
  3284  	}
  3285  
  3286  	// set UseAcceptEncodingGzip if not manually set
  3287  	if !opt.UseAcceptEncodingGzip.Valid {
  3288  		opt.UseAcceptEncodingGzip.Valid = true
  3289  		opt.UseAcceptEncodingGzip.Value = useAcceptEncodingGzip
  3290  	}
  3291  
  3292  	// Has the provider got AlreadyOwnedByYou error?
  3293  	if !opt.UseAlreadyExists.Valid {
  3294  		opt.UseAlreadyExists.Valid = true
  3295  		opt.UseAlreadyExists.Value = useAlreadyExists
  3296  	}
  3297  
  3298  	// Set the correct use multipart uploads if not manually set
  3299  	if !opt.UseMultipartUploads.Valid {
  3300  		opt.UseMultipartUploads.Valid = true
  3301  		opt.UseMultipartUploads.Value = useMultipartUploads
  3302  	}
  3303  	if !opt.UseMultipartUploads.Value {
  3304  		opt.UploadCutoff = math.MaxInt64
  3305  	}
  3306  
  3307  }
  3308  
  3309  // setRoot changes the root of the Fs
  3310  func (f *Fs) setRoot(root string) {
  3311  	f.root = parsePath(root)
  3312  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
  3313  }
  3314  
  3315  // return a pointer to the string if non empty or nil if it is empty
  3316  func stringPointerOrNil(s string) *string {
  3317  	if s == "" {
  3318  		return nil
  3319  	}
  3320  	return &s
  3321  }
  3322  
  3323  // NewFs constructs an Fs from the path, bucket:path
  3324  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
  3325  	// Parse config into Options struct
  3326  	opt := new(Options)
  3327  	err := configstruct.Set(m, opt)
  3328  	if err != nil {
  3329  		return nil, err
  3330  	}
  3331  	err = checkUploadChunkSize(opt.ChunkSize)
  3332  	if err != nil {
  3333  		return nil, fmt.Errorf("s3: chunk size: %w", err)
  3334  	}
  3335  	err = checkUploadCutoff(opt.UploadCutoff)
  3336  	if err != nil {
  3337  		return nil, fmt.Errorf("s3: upload cutoff: %w", err)
  3338  	}
  3339  	err = checkCopyCutoff(opt.CopyCutoff)
  3340  	if err != nil {
  3341  		return nil, fmt.Errorf("s3: --s3-copy-cutoff: %w", err)
  3342  	}
  3343  	if opt.Versions && opt.VersionAt.IsSet() {
  3344  		return nil, errors.New("s3: can't use --s3-versions and --s3-version-at at the same time")
  3345  	}
  3346  	if opt.BucketACL == "" {
  3347  		opt.BucketACL = opt.ACL
  3348  	}
  3349  	if opt.SSECustomerKeyBase64 != "" && opt.SSECustomerKey != "" {
  3350  		return nil, errors.New("s3: can't use sse_customer_key and sse_customer_key_base64 at the same time")
  3351  	} else if opt.SSECustomerKeyBase64 != "" {
  3352  		// Decode the base64-encoded key and store it in the SSECustomerKey field
  3353  		decoded, err := base64.StdEncoding.DecodeString(opt.SSECustomerKeyBase64)
  3354  		if err != nil {
  3355  			return nil, fmt.Errorf("s3: Could not decode sse_customer_key_base64: %w", err)
  3356  		}
  3357  		opt.SSECustomerKey = string(decoded)
  3358  	}
  3359  	if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" {
  3360  		// calculate CustomerKeyMD5 if not supplied
  3361  		md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey))
  3362  		opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:])
  3363  	}
  3364  	srv := getClient(ctx, opt)
  3365  	c, ses, err := s3Connection(ctx, opt, srv)
  3366  	if err != nil {
  3367  		return nil, err
  3368  	}
  3369  
  3370  	ci := fs.GetConfig(ctx)
  3371  	pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
  3372  	// Set pacer retries to 2 (1 try and 1 retry) because we are
  3373  	// relying on SDK retry mechanism, but we allow 2 attempts to
  3374  	// retry directory listings after XMLSyntaxError
  3375  	pc.SetRetries(2)
  3376  
  3377  	f := &Fs{
  3378  		name:    name,
  3379  		opt:     *opt,
  3380  		ci:      ci,
  3381  		ctx:     ctx,
  3382  		c:       c,
  3383  		ses:     ses,
  3384  		pacer:   pc,
  3385  		cache:   bucket.NewCache(),
  3386  		srv:     srv,
  3387  		srvRest: rest.NewClient(fshttp.NewClient(ctx)),
  3388  	}
  3389  	if opt.ServerSideEncryption == "aws:kms" || opt.SSECustomerAlgorithm != "" {
  3390  		// From: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
  3391  		//
  3392  		// Objects encrypted by SSE-S3 or plaintext have ETags that are an MD5
  3393  		// digest of their data.
  3394  		//
  3395  		// Objects encrypted by SSE-C or SSE-KMS have ETags that are not an
  3396  		// MD5 digest of their object data.
  3397  		f.etagIsNotMD5 = true
  3398  	}
  3399  	f.setRoot(root)
  3400  	f.features = (&fs.Features{
  3401  		ReadMimeType:      true,
  3402  		WriteMimeType:     true,
  3403  		ReadMetadata:      true,
  3404  		WriteMetadata:     true,
  3405  		UserMetadata:      true,
  3406  		BucketBased:       true,
  3407  		BucketBasedRootOK: true,
  3408  		SetTier:           true,
  3409  		GetTier:           true,
  3410  		SlowModTime:       true,
  3411  	}).Fill(ctx, f)
  3412  	if opt.Provider == "Storj" {
  3413  		f.features.SetTier = false
  3414  		f.features.GetTier = false
  3415  	}
  3416  	if opt.Provider == "IDrive" {
  3417  		f.features.SetTier = false
  3418  	}
  3419  	if opt.DirectoryMarkers {
  3420  		f.features.CanHaveEmptyDirectories = true
  3421  	}
  3422  	// f.listMultipartUploads()
  3423  	if !opt.UseMultipartUploads.Value {
  3424  		fs.Debugf(f, "Disabling multipart uploads")
  3425  		f.features.OpenChunkWriter = nil
  3426  	}
  3427  
  3428  	if f.rootBucket != "" && f.rootDirectory != "" && !opt.NoHeadObject && !strings.HasSuffix(root, "/") {
  3429  		// Check to see if the (bucket,directory) is actually an existing file
  3430  		oldRoot := f.root
  3431  		newRoot, leaf := path.Split(oldRoot)
  3432  		f.setRoot(newRoot)
  3433  		_, err := f.NewObject(ctx, leaf)
  3434  		if err != nil {
  3435  			// File doesn't exist or is a directory so return old f
  3436  			f.setRoot(oldRoot)
  3437  			return f, nil
  3438  		}
  3439  		// return an error with an fs which points to the parent
  3440  		return f, fs.ErrorIsFile
  3441  	}
  3442  	return f, nil
  3443  }
  3444  
  3445  // getMetaDataListing gets the metadata from the object unconditionally from the listing
  3446  //
  3447  // This is needed to find versioned objects from their paths.
  3448  //
  3449  // It may return info == nil and err == nil if a HEAD would be more appropriate
  3450  func (f *Fs) getMetaDataListing(ctx context.Context, wantRemote string) (info *s3.Object, versionID *string, err error) {
  3451  	bucket, bucketPath := f.split(wantRemote)
  3452  
  3453  	// Strip the version string off if using versions
  3454  	if f.opt.Versions {
  3455  		var timestamp time.Time
  3456  		timestamp, bucketPath = version.Remove(bucketPath)
  3457  		// If the path had no version string return no info, to force caller to look it up
  3458  		if timestamp.IsZero() {
  3459  			return nil, nil, nil
  3460  		}
  3461  	}
  3462  
  3463  	err = f.list(ctx, listOpt{
  3464  		bucket:       bucket,
  3465  		directory:    bucketPath,
  3466  		prefix:       f.rootDirectory,
  3467  		recurse:      true,
  3468  		withVersions: f.opt.Versions,
  3469  		findFile:     true,
  3470  		versionAt:    f.opt.VersionAt,
  3471  		hidden:       f.opt.VersionDeleted,
  3472  	}, func(gotRemote string, object *s3.Object, objectVersionID *string, isDirectory bool) error {
  3473  		if isDirectory {
  3474  			return nil
  3475  		}
  3476  		if wantRemote != gotRemote {
  3477  			return nil
  3478  		}
  3479  		info = object
  3480  		versionID = objectVersionID
  3481  		return errEndList // read only 1 item
  3482  	})
  3483  	if err != nil {
  3484  		if err == fs.ErrorDirNotFound {
  3485  			return nil, nil, fs.ErrorObjectNotFound
  3486  		}
  3487  		return nil, nil, err
  3488  	}
  3489  	if info == nil {
  3490  		return nil, nil, fs.ErrorObjectNotFound
  3491  	}
  3492  	return info, versionID, nil
  3493  }
  3494  
  3495  // stringClonePointer clones the string pointed to by sp into new
  3496  // memory. This is useful to stop us keeping references to small
  3497  // strings carved out of large XML responses.
  3498  func stringClonePointer(sp *string) *string {
  3499  	if sp == nil {
  3500  		return nil
  3501  	}
  3502  	var s = *sp
  3503  	return &s
  3504  }
  3505  
  3506  // Return an Object from a path
  3507  //
  3508  // If it can't be found it returns the error ErrorObjectNotFound.
  3509  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object, versionID *string) (obj fs.Object, err error) {
  3510  	o := &Object{
  3511  		fs:     f,
  3512  		remote: remote,
  3513  	}
  3514  	if info == nil && ((f.opt.Versions && version.Match(remote)) || f.opt.VersionAt.IsSet()) {
  3515  		// If versions, have to read the listing to find the correct version ID
  3516  		info, versionID, err = f.getMetaDataListing(ctx, remote)
  3517  		if err != nil {
  3518  			return nil, err
  3519  		}
  3520  	}
  3521  	if info != nil {
  3522  		// Set info but not meta
  3523  		if info.LastModified == nil {
  3524  			fs.Logf(o, "Failed to read last modified")
  3525  			o.lastModified = time.Now()
  3526  		} else {
  3527  			o.lastModified = *info.LastModified
  3528  		}
  3529  		o.setMD5FromEtag(aws.StringValue(info.ETag))
  3530  		o.bytes = aws.Int64Value(info.Size)
  3531  		o.storageClass = stringClonePointer(info.StorageClass)
  3532  		o.versionID = stringClonePointer(versionID)
  3533  		// If is delete marker, show that metadata has been read as there is none to read
  3534  		if info.Size == isDeleteMarker {
  3535  			o.meta = map[string]string{}
  3536  		}
  3537  	} else if !o.fs.opt.NoHeadObject {
  3538  		err := o.readMetaData(ctx) // reads info and meta, returning an error
  3539  		if err != nil {
  3540  			return nil, err
  3541  		}
  3542  	}
  3543  	return o, nil
  3544  }
  3545  
  3546  // NewObject finds the Object at remote.  If it can't be found
  3547  // it returns the error fs.ErrorObjectNotFound.
  3548  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  3549  	return f.newObjectWithInfo(ctx, remote, nil, nil)
  3550  }
  3551  
  3552  // Gets the bucket location
  3553  func (f *Fs) getBucketLocation(ctx context.Context, bucket string) (string, error) {
  3554  	region, err := s3manager.GetBucketRegion(ctx, f.ses, bucket, "", func(r *request.Request) {
  3555  		r.Config.S3ForcePathStyle = aws.Bool(f.opt.ForcePathStyle)
  3556  	})
  3557  	if err != nil {
  3558  		return "", err
  3559  	}
  3560  	return region, nil
  3561  }
  3562  
  3563  // Updates the region for the bucket by reading the region from the
  3564  // bucket then updating the session.
  3565  func (f *Fs) updateRegionForBucket(ctx context.Context, bucket string) error {
  3566  	region, err := f.getBucketLocation(ctx, bucket)
  3567  	if err != nil {
  3568  		return fmt.Errorf("reading bucket location failed: %w", err)
  3569  	}
  3570  	if aws.StringValue(f.c.Config.Endpoint) != "" {
  3571  		return fmt.Errorf("can't set region to %q as endpoint is set", region)
  3572  	}
  3573  	if aws.StringValue(f.c.Config.Region) == region {
  3574  		return fmt.Errorf("region is already %q - not updating", region)
  3575  	}
  3576  
  3577  	// Make a new session with the new region
  3578  	oldRegion := f.opt.Region
  3579  	f.opt.Region = region
  3580  	c, ses, err := s3Connection(f.ctx, &f.opt, f.srv)
  3581  	if err != nil {
  3582  		return fmt.Errorf("creating new session failed: %w", err)
  3583  	}
  3584  	f.c = c
  3585  	f.ses = ses
  3586  
  3587  	fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
  3588  	return nil
  3589  }
  3590  
  3591  // Common interface for bucket listers
  3592  type bucketLister interface {
  3593  	List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error)
  3594  	URLEncodeListings(bool)
  3595  }
  3596  
  3597  // V1 bucket lister
  3598  type v1List struct {
  3599  	f   *Fs
  3600  	req s3.ListObjectsInput
  3601  }
  3602  
  3603  // Create a new V1 bucket lister
  3604  func (f *Fs) newV1List(req *s3.ListObjectsV2Input) bucketLister {
  3605  	l := &v1List{
  3606  		f: f,
  3607  	}
  3608  	// Convert v2 req into v1 req
  3609  	//structs.SetFrom(&l.req, req)
  3610  	setFrom_s3ListObjectsInput_s3ListObjectsV2Input(&l.req, req)
  3611  	return l
  3612  }
  3613  
  3614  // List a bucket with V1 listing
  3615  func (ls *v1List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) {
  3616  	respv1, err := ls.f.c.ListObjectsWithContext(ctx, &ls.req)
  3617  	if err != nil {
  3618  		return nil, nil, err
  3619  	}
  3620  
  3621  	// Set up the request for next time
  3622  	ls.req.Marker = respv1.NextMarker
  3623  	if aws.BoolValue(respv1.IsTruncated) && ls.req.Marker == nil {
  3624  		if len(respv1.Contents) == 0 {
  3625  			return nil, nil, errors.New("s3 protocol error: received listing v1 with IsTruncated set, no NextMarker and no Contents")
  3626  		}
  3627  		// Use the last Key received if no NextMarker and isTruncated
  3628  		ls.req.Marker = respv1.Contents[len(respv1.Contents)-1].Key
  3629  
  3630  	}
  3631  
  3632  	// If we are URL encoding then must decode the marker
  3633  	if ls.req.Marker != nil && ls.req.EncodingType != nil {
  3634  		*ls.req.Marker, err = url.QueryUnescape(*ls.req.Marker)
  3635  		if err != nil {
  3636  			return nil, nil, fmt.Errorf("failed to URL decode Marker %q: %w", *ls.req.Marker, err)
  3637  		}
  3638  	}
  3639  
  3640  	// convert v1 resp into v2 resp
  3641  	resp = new(s3.ListObjectsV2Output)
  3642  	//structs.SetFrom(resp, respv1)
  3643  	setFrom_s3ListObjectsV2Output_s3ListObjectsOutput(resp, respv1)
  3644  
  3645  	return resp, nil, nil
  3646  }
  3647  
  3648  // URL Encode the listings
  3649  func (ls *v1List) URLEncodeListings(encode bool) {
  3650  	if encode {
  3651  		ls.req.EncodingType = aws.String(s3.EncodingTypeUrl)
  3652  	} else {
  3653  		ls.req.EncodingType = nil
  3654  	}
  3655  }
  3656  
  3657  // V2 bucket lister
  3658  type v2List struct {
  3659  	f   *Fs
  3660  	req s3.ListObjectsV2Input
  3661  }
  3662  
  3663  // Create a new V2 bucket lister
  3664  func (f *Fs) newV2List(req *s3.ListObjectsV2Input) bucketLister {
  3665  	return &v2List{
  3666  		f:   f,
  3667  		req: *req,
  3668  	}
  3669  }
  3670  
  3671  // Do a V2 listing
  3672  func (ls *v2List) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) {
  3673  	resp, err = ls.f.c.ListObjectsV2WithContext(ctx, &ls.req)
  3674  	if err != nil {
  3675  		return nil, nil, err
  3676  	}
  3677  	if aws.BoolValue(resp.IsTruncated) && (resp.NextContinuationToken == nil || *resp.NextContinuationToken == "") {
  3678  		return nil, nil, errors.New("s3 protocol error: received listing v2 with IsTruncated set and no NextContinuationToken. Should you be using `--s3-list-version 1`?")
  3679  	}
  3680  	ls.req.ContinuationToken = resp.NextContinuationToken
  3681  	return resp, nil, nil
  3682  }
  3683  
  3684  // URL Encode the listings
  3685  func (ls *v2List) URLEncodeListings(encode bool) {
  3686  	if encode {
  3687  		ls.req.EncodingType = aws.String(s3.EncodingTypeUrl)
  3688  	} else {
  3689  		ls.req.EncodingType = nil
  3690  	}
  3691  }
  3692  
  3693  // Versions bucket lister
  3694  type versionsList struct {
  3695  	f              *Fs
  3696  	req            s3.ListObjectVersionsInput
  3697  	versionAt      time.Time // set if we want only versions before this
  3698  	usingVersionAt bool      // set if we need to use versionAt
  3699  	hidden         bool      // set to see hidden versions
  3700  	lastKeySent    string    // last Key sent to the receiving function
  3701  }
  3702  
  3703  // Create a new Versions bucket lister
  3704  func (f *Fs) newVersionsList(req *s3.ListObjectsV2Input, hidden bool, versionAt time.Time) bucketLister {
  3705  	l := &versionsList{
  3706  		f:              f,
  3707  		versionAt:      versionAt,
  3708  		usingVersionAt: !versionAt.IsZero(),
  3709  		hidden:         hidden,
  3710  	}
  3711  	// Convert v2 req into withVersions req
  3712  	//structs.SetFrom(&l.req, req)
  3713  	setFrom_s3ListObjectVersionsInput_s3ListObjectsV2Input(&l.req, req)
  3714  	return l
  3715  }
  3716  
  3717  // Any s3.Object or s3.ObjectVersion with this as their Size are delete markers
  3718  var isDeleteMarker = new(int64)
  3719  
  3720  // Compare two s3.ObjectVersions, sorted alphabetically by key with
  3721  // the newest first if the Keys match or the one with IsLatest set if
  3722  // everything matches.
  3723  func versionLess(a, b *s3.ObjectVersion) bool {
  3724  	if a == nil || a.Key == nil || a.LastModified == nil {
  3725  		return true
  3726  	}
  3727  	if b == nil || b.Key == nil || b.LastModified == nil {
  3728  		return false
  3729  	}
  3730  	if *a.Key < *b.Key {
  3731  		return true
  3732  	}
  3733  	if *a.Key > *b.Key {
  3734  		return false
  3735  	}
  3736  	dt := (*a.LastModified).Sub(*b.LastModified)
  3737  	if dt > 0 {
  3738  		return true
  3739  	}
  3740  	if dt < 0 {
  3741  		return false
  3742  	}
  3743  	if aws.BoolValue(a.IsLatest) {
  3744  		return true
  3745  	}
  3746  	return false
  3747  }
  3748  
  3749  // Merge the DeleteMarkers into the Versions.
  3750  //
  3751  // These are delivered by S3 sorted by key then by LastUpdated
  3752  // newest first but annoyingly the SDK splits them up into two
  3753  // so we need to merge them back again
  3754  //
  3755  // We do this by converting the s3.DeleteEntry into
  3756  // s3.ObjectVersion with Size = isDeleteMarker to tell them apart
  3757  //
  3758  // We then merge them back into the Versions in the correct order
  3759  func mergeDeleteMarkers(oldVersions []*s3.ObjectVersion, deleteMarkers []*s3.DeleteMarkerEntry) (newVersions []*s3.ObjectVersion) {
  3760  	newVersions = make([]*s3.ObjectVersion, 0, len(oldVersions)+len(deleteMarkers))
  3761  	for _, deleteMarker := range deleteMarkers {
  3762  		var obj = new(s3.ObjectVersion)
  3763  		//structs.SetFrom(obj, deleteMarker)
  3764  		setFrom_s3ObjectVersion_s3DeleteMarkerEntry(obj, deleteMarker)
  3765  		obj.Size = isDeleteMarker
  3766  		for len(oldVersions) > 0 && versionLess(oldVersions[0], obj) {
  3767  			newVersions = append(newVersions, oldVersions[0])
  3768  			oldVersions = oldVersions[1:]
  3769  		}
  3770  		newVersions = append(newVersions, obj)
  3771  	}
  3772  	// Merge any remaining versions
  3773  	newVersions = append(newVersions, oldVersions...)
  3774  	return newVersions
  3775  }
  3776  
  3777  // List a bucket with versions
  3778  func (ls *versionsList) List(ctx context.Context) (resp *s3.ListObjectsV2Output, versionIDs []*string, err error) {
  3779  	respVersions, err := ls.f.c.ListObjectVersionsWithContext(ctx, &ls.req)
  3780  	if err != nil {
  3781  		return nil, nil, err
  3782  	}
  3783  
  3784  	// Set up the request for next time
  3785  	ls.req.KeyMarker = respVersions.NextKeyMarker
  3786  	ls.req.VersionIdMarker = respVersions.NextVersionIdMarker
  3787  	if aws.BoolValue(respVersions.IsTruncated) && ls.req.KeyMarker == nil {
  3788  		return nil, nil, errors.New("s3 protocol error: received versions listing with IsTruncated set with no NextKeyMarker")
  3789  	}
  3790  
  3791  	// If we are URL encoding then must decode the marker
  3792  	if ls.req.KeyMarker != nil && ls.req.EncodingType != nil {
  3793  		*ls.req.KeyMarker, err = url.QueryUnescape(*ls.req.KeyMarker)
  3794  		if err != nil {
  3795  			return nil, nil, fmt.Errorf("failed to URL decode KeyMarker %q: %w", *ls.req.KeyMarker, err)
  3796  		}
  3797  	}
  3798  
  3799  	// convert Versions resp into v2 resp
  3800  	resp = new(s3.ListObjectsV2Output)
  3801  	//structs.SetFrom(resp, respVersions)
  3802  	setFrom_s3ListObjectsV2Output_s3ListObjectVersionsOutput(resp, respVersions)
  3803  
  3804  	// Merge in delete Markers as s3.ObjectVersion if we need them
  3805  	if ls.hidden || ls.usingVersionAt {
  3806  		respVersions.Versions = mergeDeleteMarkers(respVersions.Versions, respVersions.DeleteMarkers)
  3807  	}
  3808  
  3809  	// Convert the Versions and the DeleteMarkers into an array of s3.Object
  3810  	//
  3811  	// These are returned in the order that they are stored with the most recent first.
  3812  	// With the annoyance that the Versions and DeleteMarkers are split into two
  3813  	objs := make([]*s3.Object, 0, len(respVersions.Versions))
  3814  	for _, objVersion := range respVersions.Versions {
  3815  		if ls.usingVersionAt {
  3816  			if objVersion.LastModified.After(ls.versionAt) {
  3817  				// Ignore versions that were created after the specified time
  3818  				continue
  3819  			}
  3820  			if *objVersion.Key == ls.lastKeySent {
  3821  				// Ignore versions before the already returned version
  3822  				continue
  3823  			}
  3824  		}
  3825  		ls.lastKeySent = *objVersion.Key
  3826  		// Don't send delete markers if we don't want hidden things
  3827  		if !ls.hidden && objVersion.Size == isDeleteMarker {
  3828  			continue
  3829  		}
  3830  		var obj = new(s3.Object)
  3831  		//structs.SetFrom(obj, objVersion)
  3832  		setFrom_s3Object_s3ObjectVersion(obj, objVersion)
  3833  		// Adjust the file names
  3834  		if !ls.usingVersionAt && (!aws.BoolValue(objVersion.IsLatest) || objVersion.Size == isDeleteMarker) {
  3835  			if obj.Key != nil && objVersion.LastModified != nil {
  3836  				*obj.Key = version.Add(*obj.Key, *objVersion.LastModified)
  3837  			}
  3838  		}
  3839  		objs = append(objs, obj)
  3840  		versionIDs = append(versionIDs, objVersion.VersionId)
  3841  	}
  3842  
  3843  	resp.Contents = objs
  3844  	return resp, versionIDs, nil
  3845  }
  3846  
  3847  // URL Encode the listings
  3848  func (ls *versionsList) URLEncodeListings(encode bool) {
  3849  	if encode {
  3850  		ls.req.EncodingType = aws.String(s3.EncodingTypeUrl)
  3851  	} else {
  3852  		ls.req.EncodingType = nil
  3853  	}
  3854  }
  3855  
  3856  // listFn is called from list to handle an object.
  3857  type listFn func(remote string, object *s3.Object, versionID *string, isDirectory bool) error
  3858  
  3859  // errEndList is a sentinel used to end the list iteration now.
  3860  // listFn should return it to end the iteration with no errors.
  3861  var errEndList = errors.New("end list")
  3862  
  3863  // list options
  3864  type listOpt struct {
  3865  	bucket        string  // bucket to list
  3866  	directory     string  // directory with bucket
  3867  	prefix        string  // prefix to remove from listing
  3868  	addBucket     bool    // if set, the bucket is added to the start of the remote
  3869  	recurse       bool    // if set, recurse to read sub directories
  3870  	withVersions  bool    // if set, versions are produced
  3871  	hidden        bool    // if set, return delete markers as objects with size == isDeleteMarker
  3872  	findFile      bool    // if set, it will look for files called (bucket, directory)
  3873  	versionAt     fs.Time // if set only show versions <= this time
  3874  	noSkipMarkers bool    // if set return dir marker objects
  3875  	restoreStatus bool    // if set return restore status in listing too
  3876  }
  3877  
  3878  // list lists the objects into the function supplied with the opt
  3879  // supplied.
  3880  func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
  3881  	if opt.prefix != "" {
  3882  		opt.prefix += "/"
  3883  	}
  3884  	if !opt.findFile {
  3885  		if opt.directory != "" {
  3886  			opt.directory += "/"
  3887  		}
  3888  	}
  3889  	delimiter := ""
  3890  	if !opt.recurse {
  3891  		delimiter = "/"
  3892  	}
  3893  	// URL encode the listings so we can use control characters in object names
  3894  	// See: https://github.com/aws/aws-sdk-go/issues/1914
  3895  	//
  3896  	// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
  3897  	// it doesn't encode CommonPrefixes.
  3898  	// See: https://tracker.ceph.com/issues/41870
  3899  	//
  3900  	// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
  3901  	// though maybe it does on some versions.
  3902  	//
  3903  	// This does work with minio but was only added relatively recently
  3904  	// https://github.com/minio/minio/pull/7265
  3905  	//
  3906  	// So we enable only on providers we know supports it properly, all others can retry when a
  3907  	// XML Syntax error is detected.
  3908  	urlEncodeListings := f.opt.ListURLEncode.Value
  3909  	req := s3.ListObjectsV2Input{
  3910  		Bucket:    &opt.bucket,
  3911  		Delimiter: &delimiter,
  3912  		Prefix:    &opt.directory,
  3913  		MaxKeys:   &f.opt.ListChunk,
  3914  	}
  3915  	if opt.restoreStatus {
  3916  		restoreStatus := "RestoreStatus"
  3917  		req.OptionalObjectAttributes = []*string{&restoreStatus}
  3918  	}
  3919  	if f.opt.RequesterPays {
  3920  		req.RequestPayer = aws.String(s3.RequestPayerRequester)
  3921  	}
  3922  	var listBucket bucketLister
  3923  	switch {
  3924  	case opt.withVersions || opt.versionAt.IsSet():
  3925  		listBucket = f.newVersionsList(&req, opt.hidden, time.Time(opt.versionAt))
  3926  	case f.opt.ListVersion == 1:
  3927  		listBucket = f.newV1List(&req)
  3928  	default:
  3929  		listBucket = f.newV2List(&req)
  3930  	}
  3931  	foundItems := 0
  3932  	for {
  3933  		var resp *s3.ListObjectsV2Output
  3934  		var err error
  3935  		var versionIDs []*string
  3936  		err = f.pacer.Call(func() (bool, error) {
  3937  			listBucket.URLEncodeListings(urlEncodeListings)
  3938  			resp, versionIDs, err = listBucket.List(ctx)
  3939  			if err != nil && !urlEncodeListings {
  3940  				if awsErr, ok := err.(awserr.RequestFailure); ok {
  3941  					if origErr := awsErr.OrigErr(); origErr != nil {
  3942  						if _, ok := origErr.(*xml.SyntaxError); ok {
  3943  							// Retry the listing with URL encoding as there were characters that XML can't encode
  3944  							urlEncodeListings = true
  3945  							fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded")
  3946  							return true, err
  3947  						}
  3948  					}
  3949  				}
  3950  			}
  3951  			return f.shouldRetry(ctx, err)
  3952  		})
  3953  		if err != nil {
  3954  			if awsErr, ok := err.(awserr.RequestFailure); ok {
  3955  				if awsErr.StatusCode() == http.StatusNotFound {
  3956  					err = fs.ErrorDirNotFound
  3957  				}
  3958  			}
  3959  			if f.rootBucket == "" {
  3960  				// if listing from the root ignore wrong region requests returning
  3961  				// empty directory
  3962  				if reqErr, ok := err.(awserr.RequestFailure); ok {
  3963  					// 301 if wrong region for bucket
  3964  					if reqErr.StatusCode() == http.StatusMovedPermanently {
  3965  						fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", opt.bucket)
  3966  						return nil
  3967  					}
  3968  				}
  3969  			}
  3970  			return err
  3971  		}
  3972  		if !opt.recurse {
  3973  			foundItems += len(resp.CommonPrefixes)
  3974  			for _, commonPrefix := range resp.CommonPrefixes {
  3975  				if commonPrefix.Prefix == nil {
  3976  					fs.Logf(f, "Nil common prefix received")
  3977  					continue
  3978  				}
  3979  				remote := *commonPrefix.Prefix
  3980  				if urlEncodeListings {
  3981  					remote, err = url.QueryUnescape(remote)
  3982  					if err != nil {
  3983  						fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err)
  3984  						continue
  3985  					}
  3986  				}
  3987  				remote = f.opt.Enc.ToStandardPath(remote)
  3988  				if !strings.HasPrefix(remote, opt.prefix) {
  3989  					fs.Logf(f, "Odd name received %q", remote)
  3990  					continue
  3991  				}
  3992  				remote = remote[len(opt.prefix):]
  3993  				if opt.addBucket {
  3994  					remote = bucket.Join(opt.bucket, remote)
  3995  				}
  3996  				remote = strings.TrimSuffix(remote, "/")
  3997  				err = fn(remote, &s3.Object{Key: &remote}, nil, true)
  3998  				if err != nil {
  3999  					if err == errEndList {
  4000  						return nil
  4001  					}
  4002  					return err
  4003  				}
  4004  			}
  4005  		}
  4006  		foundItems += len(resp.Contents)
  4007  		for i, object := range resp.Contents {
  4008  			remote := aws.StringValue(object.Key)
  4009  			if urlEncodeListings {
  4010  				remote, err = url.QueryUnescape(remote)
  4011  				if err != nil {
  4012  					fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err)
  4013  					continue
  4014  				}
  4015  			}
  4016  			remote = f.opt.Enc.ToStandardPath(remote)
  4017  			if !strings.HasPrefix(remote, opt.prefix) {
  4018  				fs.Logf(f, "Odd name received %q", remote)
  4019  				continue
  4020  			}
  4021  			isDirectory := (remote == "" || strings.HasSuffix(remote, "/")) && object.Size != nil && *object.Size == 0
  4022  			// is this a directory marker?
  4023  			if isDirectory {
  4024  				if opt.noSkipMarkers {
  4025  					// process directory markers as files
  4026  					isDirectory = false
  4027  				} else {
  4028  					// Don't insert the root directory
  4029  					if remote == opt.directory {
  4030  						continue
  4031  					}
  4032  				}
  4033  			}
  4034  			remote = remote[len(opt.prefix):]
  4035  			if isDirectory {
  4036  				// process directory markers as directories
  4037  				remote = strings.TrimRight(remote, "/")
  4038  			}
  4039  			if opt.addBucket {
  4040  				remote = bucket.Join(opt.bucket, remote)
  4041  			}
  4042  			if versionIDs != nil {
  4043  				err = fn(remote, object, versionIDs[i], isDirectory)
  4044  			} else {
  4045  				err = fn(remote, object, nil, isDirectory)
  4046  			}
  4047  			if err != nil {
  4048  				if err == errEndList {
  4049  					return nil
  4050  				}
  4051  				return err
  4052  			}
  4053  		}
  4054  		if !aws.BoolValue(resp.IsTruncated) {
  4055  			break
  4056  		}
  4057  	}
  4058  	if f.opt.DirectoryMarkers && foundItems == 0 && opt.directory != "" {
  4059  		// Determine whether the directory exists or not by whether it has a marker
  4060  		req := s3.HeadObjectInput{
  4061  			Bucket: &opt.bucket,
  4062  			Key:    &opt.directory,
  4063  		}
  4064  		_, err := f.headObject(ctx, &req)
  4065  		if err != nil {
  4066  			if err == fs.ErrorObjectNotFound {
  4067  				return fs.ErrorDirNotFound
  4068  			}
  4069  			return err
  4070  		}
  4071  	}
  4072  	return nil
  4073  }
  4074  
  4075  // Convert a list item into a DirEntry
  4076  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, versionID *string, isDirectory bool) (fs.DirEntry, error) {
  4077  	if isDirectory {
  4078  		size := int64(0)
  4079  		if object.Size != nil {
  4080  			size = *object.Size
  4081  		}
  4082  		d := fs.NewDir(remote, time.Time{}).SetSize(size)
  4083  		return d, nil
  4084  	}
  4085  	o, err := f.newObjectWithInfo(ctx, remote, object, versionID)
  4086  	if err != nil {
  4087  		return nil, err
  4088  	}
  4089  	return o, nil
  4090  }
  4091  
  4092  // listDir lists files and directories to out
  4093  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
  4094  	// List the objects and directories
  4095  	err = f.list(ctx, listOpt{
  4096  		bucket:       bucket,
  4097  		directory:    directory,
  4098  		prefix:       prefix,
  4099  		addBucket:    addBucket,
  4100  		withVersions: f.opt.Versions,
  4101  		versionAt:    f.opt.VersionAt,
  4102  		hidden:       f.opt.VersionDeleted,
  4103  	}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
  4104  		entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
  4105  		if err != nil {
  4106  			return err
  4107  		}
  4108  		if entry != nil {
  4109  			entries = append(entries, entry)
  4110  		}
  4111  		return nil
  4112  	})
  4113  	if err != nil {
  4114  		return nil, err
  4115  	}
  4116  	// bucket must be present if listing succeeded
  4117  	f.cache.MarkOK(bucket)
  4118  	return entries, nil
  4119  }
  4120  
  4121  // listBuckets lists the buckets to out
  4122  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
  4123  	req := s3.ListBucketsInput{}
  4124  	var resp *s3.ListBucketsOutput
  4125  	err = f.pacer.Call(func() (bool, error) {
  4126  		resp, err = f.c.ListBucketsWithContext(ctx, &req)
  4127  		return f.shouldRetry(ctx, err)
  4128  	})
  4129  	if err != nil {
  4130  		return nil, err
  4131  	}
  4132  	for _, bucket := range resp.Buckets {
  4133  		bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name))
  4134  		f.cache.MarkOK(bucketName)
  4135  		d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
  4136  		entries = append(entries, d)
  4137  	}
  4138  	return entries, nil
  4139  }
  4140  
  4141  // List the objects and directories in dir into entries.  The
  4142  // entries can be returned in any order but should be for a
  4143  // complete directory.
  4144  //
  4145  // dir should be "" to list the root, and should not have
  4146  // trailing slashes.
  4147  //
  4148  // This should return ErrDirNotFound if the directory isn't
  4149  // found.
  4150  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  4151  	bucket, directory := f.split(dir)
  4152  	if bucket == "" {
  4153  		if directory != "" {
  4154  			return nil, fs.ErrorListBucketRequired
  4155  		}
  4156  		return f.listBuckets(ctx)
  4157  	}
  4158  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
  4159  }
  4160  
  4161  // ListR lists the objects and directories of the Fs starting
  4162  // from dir recursively into out.
  4163  //
  4164  // dir should be "" to start from the root, and should not
  4165  // have trailing slashes.
  4166  //
  4167  // This should return ErrDirNotFound if the directory isn't
  4168  // found.
  4169  //
  4170  // It should call callback for each tranche of entries read.
  4171  // These need not be returned in any particular order.  If
  4172  // callback returns an error then the listing will stop
  4173  // immediately.
  4174  //
  4175  // Don't implement this unless you have a more efficient way
  4176  // of listing recursively than doing a directory traversal.
  4177  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  4178  	bucket, directory := f.split(dir)
  4179  	list := walk.NewListRHelper(callback)
  4180  	listR := func(bucket, directory, prefix string, addBucket bool) error {
  4181  		return f.list(ctx, listOpt{
  4182  			bucket:       bucket,
  4183  			directory:    directory,
  4184  			prefix:       prefix,
  4185  			addBucket:    addBucket,
  4186  			recurse:      true,
  4187  			withVersions: f.opt.Versions,
  4188  			versionAt:    f.opt.VersionAt,
  4189  			hidden:       f.opt.VersionDeleted,
  4190  		}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
  4191  			entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
  4192  			if err != nil {
  4193  				return err
  4194  			}
  4195  			return list.Add(entry)
  4196  		})
  4197  	}
  4198  	if bucket == "" {
  4199  		entries, err := f.listBuckets(ctx)
  4200  		if err != nil {
  4201  			return err
  4202  		}
  4203  		for _, entry := range entries {
  4204  			err = list.Add(entry)
  4205  			if err != nil {
  4206  				return err
  4207  			}
  4208  			bucket := entry.Remote()
  4209  			err = listR(bucket, "", f.rootDirectory, true)
  4210  			if err != nil {
  4211  				return err
  4212  			}
  4213  			// bucket must be present if listing succeeded
  4214  			f.cache.MarkOK(bucket)
  4215  		}
  4216  	} else {
  4217  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
  4218  		if err != nil {
  4219  			return err
  4220  		}
  4221  		// bucket must be present if listing succeeded
  4222  		f.cache.MarkOK(bucket)
  4223  	}
  4224  	return list.Flush()
  4225  }
  4226  
  4227  // Put the Object into the bucket
  4228  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  4229  	// Temporary Object under construction
  4230  	fs := &Object{
  4231  		fs:     f,
  4232  		remote: src.Remote(),
  4233  	}
  4234  	return fs, fs.Update(ctx, in, src, options...)
  4235  }
  4236  
  4237  // PutStream uploads to the remote path with the modTime given of indeterminate size
  4238  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  4239  	return f.Put(ctx, in, src, options...)
  4240  }
  4241  
  4242  // Check if the bucket exists
  4243  //
  4244  // NB this can return incorrect results if called immediately after bucket deletion
  4245  func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
  4246  	req := s3.HeadBucketInput{
  4247  		Bucket: &bucket,
  4248  	}
  4249  	err := f.pacer.Call(func() (bool, error) {
  4250  		_, err := f.c.HeadBucketWithContext(ctx, &req)
  4251  		return f.shouldRetry(ctx, err)
  4252  	})
  4253  	if err == nil {
  4254  		return true, nil
  4255  	}
  4256  	if err, ok := err.(awserr.RequestFailure); ok {
  4257  		if err.StatusCode() == http.StatusNotFound {
  4258  			return false, nil
  4259  		}
  4260  	}
  4261  	return false, err
  4262  }
  4263  
  4264  // Create directory marker file and parents
  4265  func (f *Fs) createDirectoryMarker(ctx context.Context, bucket, dir string) error {
  4266  	if !f.opt.DirectoryMarkers || bucket == "" {
  4267  		return nil
  4268  	}
  4269  
  4270  	// Object to be uploaded
  4271  	o := &Object{
  4272  		fs: f,
  4273  		meta: map[string]string{
  4274  			metaMtime: swift.TimeToFloatString(time.Now()),
  4275  		},
  4276  	}
  4277  
  4278  	for {
  4279  		_, bucketPath := f.split(dir)
  4280  		// Don't create the directory marker if it is the bucket or at the very root
  4281  		if bucketPath == "" {
  4282  			break
  4283  		}
  4284  		o.remote = dir + "/"
  4285  
  4286  		// Check to see if object already exists
  4287  		_, err := o.headObject(ctx)
  4288  		if err == nil {
  4289  			return nil
  4290  		}
  4291  
  4292  		// Upload it if not
  4293  		fs.Debugf(o, "Creating directory marker")
  4294  		content := io.Reader(strings.NewReader(""))
  4295  		err = o.Update(ctx, content, o)
  4296  		if err != nil {
  4297  			return fmt.Errorf("creating directory marker failed: %w", err)
  4298  		}
  4299  
  4300  		// Now check parent directory exists
  4301  		dir = path.Dir(dir)
  4302  		if dir == "/" || dir == "." {
  4303  			break
  4304  		}
  4305  	}
  4306  
  4307  	return nil
  4308  }
  4309  
  4310  // Mkdir creates the bucket if it doesn't exist
  4311  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  4312  	bucket, _ := f.split(dir)
  4313  	e := f.makeBucket(ctx, bucket)
  4314  	if e != nil {
  4315  		return e
  4316  	}
  4317  	return f.createDirectoryMarker(ctx, bucket, dir)
  4318  }
  4319  
  4320  // mkdirParent creates the parent bucket/directory if it doesn't exist
  4321  func (f *Fs) mkdirParent(ctx context.Context, remote string) error {
  4322  	remote = strings.TrimRight(remote, "/")
  4323  	dir := path.Dir(remote)
  4324  	if dir == "/" || dir == "." {
  4325  		dir = ""
  4326  	}
  4327  	return f.Mkdir(ctx, dir)
  4328  }
  4329  
  4330  // makeBucket creates the bucket if it doesn't exist
  4331  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
  4332  	if f.opt.NoCheckBucket {
  4333  		return nil
  4334  	}
  4335  	return f.cache.Create(bucket, func() error {
  4336  		req := s3.CreateBucketInput{
  4337  			Bucket: &bucket,
  4338  			ACL:    stringPointerOrNil(f.opt.BucketACL),
  4339  		}
  4340  		if f.opt.LocationConstraint != "" {
  4341  			req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
  4342  				LocationConstraint: &f.opt.LocationConstraint,
  4343  			}
  4344  		}
  4345  		err := f.pacer.Call(func() (bool, error) {
  4346  			_, err := f.c.CreateBucketWithContext(ctx, &req)
  4347  			return f.shouldRetry(ctx, err)
  4348  		})
  4349  		if err == nil {
  4350  			fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
  4351  		}
  4352  		if awsErr, ok := err.(awserr.Error); ok {
  4353  			switch awsErr.Code() {
  4354  			case "BucketAlreadyOwnedByYou":
  4355  				err = nil
  4356  			case "BucketAlreadyExists", "BucketNameUnavailable":
  4357  				if f.opt.UseAlreadyExists.Value {
  4358  					// We can trust BucketAlreadyExists to mean not owned by us, so make it non retriable
  4359  					err = fserrors.NoRetryError(err)
  4360  				} else {
  4361  					// We can't trust BucketAlreadyExists to mean not owned by us, so ignore it
  4362  					err = nil
  4363  				}
  4364  			}
  4365  		}
  4366  		return err
  4367  	}, func() (bool, error) {
  4368  		return f.bucketExists(ctx, bucket)
  4369  	})
  4370  }
  4371  
  4372  // Rmdir deletes the bucket if the fs is at the root
  4373  //
  4374  // Returns an error if it isn't empty
  4375  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  4376  	bucket, directory := f.split(dir)
  4377  	// Remove directory marker file
  4378  	if f.opt.DirectoryMarkers && bucket != "" && dir != "" {
  4379  		o := &Object{
  4380  			fs:     f,
  4381  			remote: dir + "/",
  4382  		}
  4383  		fs.Debugf(o, "Removing directory marker")
  4384  		err := o.Remove(ctx)
  4385  		if err != nil {
  4386  			return fmt.Errorf("removing directory marker failed: %w", err)
  4387  		}
  4388  	}
  4389  	if bucket == "" || directory != "" {
  4390  		return nil
  4391  	}
  4392  	return f.cache.Remove(bucket, func() error {
  4393  		req := s3.DeleteBucketInput{
  4394  			Bucket: &bucket,
  4395  		}
  4396  		err := f.pacer.Call(func() (bool, error) {
  4397  			_, err := f.c.DeleteBucketWithContext(ctx, &req)
  4398  			return f.shouldRetry(ctx, err)
  4399  		})
  4400  		if err == nil {
  4401  			fs.Infof(f, "Bucket %q deleted", bucket)
  4402  		}
  4403  		return err
  4404  	})
  4405  }
  4406  
  4407  // Precision of the remote
  4408  func (f *Fs) Precision() time.Duration {
  4409  	return time.Nanosecond
  4410  }
  4411  
  4412  // pathEscape escapes s as for a URL path.  It uses rest.URLPathEscape
  4413  // but also escapes '+' for S3 and Digital Ocean spaces compatibility
  4414  func pathEscape(s string) string {
  4415  	return strings.ReplaceAll(rest.URLPathEscape(s), "+", "%2B")
  4416  }
  4417  
  4418  // copy does a server-side copy
  4419  //
  4420  // It adds the boiler plate to the req passed in and calls the s3
  4421  // method
  4422  func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
  4423  	req.Bucket = &dstBucket
  4424  	req.ACL = stringPointerOrNil(f.opt.ACL)
  4425  	req.Key = &dstPath
  4426  	source := pathEscape(bucket.Join(srcBucket, srcPath))
  4427  	if src.versionID != nil {
  4428  		source += fmt.Sprintf("?versionId=%s", *src.versionID)
  4429  	}
  4430  	req.CopySource = &source
  4431  	if f.opt.RequesterPays {
  4432  		req.RequestPayer = aws.String(s3.RequestPayerRequester)
  4433  	}
  4434  	if f.opt.ServerSideEncryption != "" {
  4435  		req.ServerSideEncryption = &f.opt.ServerSideEncryption
  4436  	}
  4437  	if f.opt.SSECustomerAlgorithm != "" {
  4438  		req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
  4439  		req.CopySourceSSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
  4440  	}
  4441  	if f.opt.SSECustomerKey != "" {
  4442  		req.SSECustomerKey = &f.opt.SSECustomerKey
  4443  		req.CopySourceSSECustomerKey = &f.opt.SSECustomerKey
  4444  	}
  4445  	if f.opt.SSECustomerKeyMD5 != "" {
  4446  		req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
  4447  		req.CopySourceSSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
  4448  	}
  4449  	if f.opt.SSEKMSKeyID != "" {
  4450  		req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
  4451  	}
  4452  	if req.StorageClass == nil && f.opt.StorageClass != "" {
  4453  		req.StorageClass = &f.opt.StorageClass
  4454  	}
  4455  
  4456  	if src.bytes >= int64(f.opt.CopyCutoff) {
  4457  		return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, src)
  4458  	}
  4459  	return f.pacer.Call(func() (bool, error) {
  4460  		_, err := f.c.CopyObjectWithContext(ctx, req)
  4461  		return f.shouldRetry(ctx, err)
  4462  	})
  4463  }
  4464  
  4465  func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
  4466  	start := partIndex * partSize
  4467  	var ends string
  4468  	if partIndex == numParts-1 {
  4469  		if totalSize >= 1 {
  4470  			ends = strconv.FormatInt(totalSize-1, 10)
  4471  		}
  4472  	} else {
  4473  		ends = strconv.FormatInt(start+partSize-1, 10)
  4474  	}
  4475  	return fmt.Sprintf("bytes=%v-%v", start, ends)
  4476  }
  4477  
  4478  func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) (err error) {
  4479  	info, err := src.headObject(ctx)
  4480  	if err != nil {
  4481  		return err
  4482  	}
  4483  
  4484  	req := &s3.CreateMultipartUploadInput{}
  4485  
  4486  	// Fill in the request from the head info
  4487  	//structs.SetFrom(req, info)
  4488  	setFrom_s3CreateMultipartUploadInput_s3HeadObjectOutput(req, info)
  4489  
  4490  	// If copy metadata was set then set the Metadata to that read
  4491  	// from the head request
  4492  	if aws.StringValue(copyReq.MetadataDirective) == s3.MetadataDirectiveCopy {
  4493  		copyReq.Metadata = info.Metadata
  4494  	}
  4495  
  4496  	// Overwrite any from the copyReq
  4497  	//structs.SetFrom(req, copyReq)
  4498  	setFrom_s3CreateMultipartUploadInput_s3CopyObjectInput(req, copyReq)
  4499  
  4500  	req.Bucket = &dstBucket
  4501  	req.Key = &dstPath
  4502  
  4503  	var cout *s3.CreateMultipartUploadOutput
  4504  	if err := f.pacer.Call(func() (bool, error) {
  4505  		var err error
  4506  		cout, err = f.c.CreateMultipartUploadWithContext(ctx, req)
  4507  		return f.shouldRetry(ctx, err)
  4508  	}); err != nil {
  4509  		return err
  4510  	}
  4511  	uid := cout.UploadId
  4512  
  4513  	defer atexit.OnError(&err, func() {
  4514  		// Try to abort the upload, but ignore the error.
  4515  		fs.Debugf(src, "Cancelling multipart copy")
  4516  		_ = f.pacer.Call(func() (bool, error) {
  4517  			_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
  4518  				Bucket:       &dstBucket,
  4519  				Key:          &dstPath,
  4520  				UploadId:     uid,
  4521  				RequestPayer: req.RequestPayer,
  4522  			})
  4523  			return f.shouldRetry(ctx, err)
  4524  		})
  4525  	})()
  4526  
  4527  	srcSize := src.bytes
  4528  	partSize := int64(f.opt.CopyCutoff)
  4529  	numParts := (srcSize-1)/partSize + 1
  4530  
  4531  	fs.Debugf(src, "Starting  multipart copy with %d parts", numParts)
  4532  
  4533  	var (
  4534  		parts   = make([]*s3.CompletedPart, numParts)
  4535  		g, gCtx = errgroup.WithContext(ctx)
  4536  	)
  4537  	g.SetLimit(f.opt.UploadConcurrency)
  4538  	for partNum := int64(1); partNum <= numParts; partNum++ {
  4539  		// Fail fast, in case an errgroup managed function returns an error
  4540  		// gCtx is cancelled. There is no point in uploading all the other parts.
  4541  		if gCtx.Err() != nil {
  4542  			break
  4543  		}
  4544  		partNum := partNum // for closure
  4545  		g.Go(func() error {
  4546  			var uout *s3.UploadPartCopyOutput
  4547  			uploadPartReq := &s3.UploadPartCopyInput{}
  4548  			//structs.SetFrom(uploadPartReq, copyReq)
  4549  			setFrom_s3UploadPartCopyInput_s3CopyObjectInput(uploadPartReq, copyReq)
  4550  			uploadPartReq.Bucket = &dstBucket
  4551  			uploadPartReq.Key = &dstPath
  4552  			uploadPartReq.PartNumber = &partNum
  4553  			uploadPartReq.UploadId = uid
  4554  			uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize))
  4555  			err := f.pacer.Call(func() (bool, error) {
  4556  				uout, err = f.c.UploadPartCopyWithContext(gCtx, uploadPartReq)
  4557  				return f.shouldRetry(gCtx, err)
  4558  			})
  4559  			if err != nil {
  4560  				return err
  4561  			}
  4562  			parts[partNum-1] = &s3.CompletedPart{
  4563  				PartNumber: &partNum,
  4564  				ETag:       uout.CopyPartResult.ETag,
  4565  			}
  4566  			return nil
  4567  		})
  4568  	}
  4569  
  4570  	err = g.Wait()
  4571  	if err != nil {
  4572  		return err
  4573  	}
  4574  
  4575  	return f.pacer.Call(func() (bool, error) {
  4576  		_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
  4577  			Bucket: &dstBucket,
  4578  			Key:    &dstPath,
  4579  			MultipartUpload: &s3.CompletedMultipartUpload{
  4580  				Parts: parts,
  4581  			},
  4582  			RequestPayer: req.RequestPayer,
  4583  			UploadId:     uid,
  4584  		})
  4585  		return f.shouldRetry(ctx, err)
  4586  	})
  4587  }
  4588  
  4589  // Copy src to this remote using server-side copy operations.
  4590  //
  4591  // This is stored with the remote path given.
  4592  //
  4593  // It returns the destination Object and a possible error.
  4594  //
  4595  // Will only be called if src.Fs().Name() == f.Name()
  4596  //
  4597  // If it isn't possible then return fs.ErrorCantCopy
  4598  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  4599  	if f.opt.VersionAt.IsSet() {
  4600  		return nil, errNotWithVersionAt
  4601  	}
  4602  	dstBucket, dstPath := f.split(remote)
  4603  	err := f.mkdirParent(ctx, remote)
  4604  	if err != nil {
  4605  		return nil, err
  4606  	}
  4607  	srcObj, ok := src.(*Object)
  4608  	if !ok {
  4609  		fs.Debugf(src, "Can't copy - not same remote type")
  4610  		return nil, fs.ErrorCantCopy
  4611  	}
  4612  
  4613  	srcBucket, srcPath := srcObj.split()
  4614  	req := s3.CopyObjectInput{
  4615  		MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
  4616  	}
  4617  
  4618  	// Update the metadata if it is in use
  4619  	if ci := fs.GetConfig(ctx); ci.Metadata {
  4620  		ui, err := srcObj.prepareUpload(ctx, src, fs.MetadataAsOpenOptions(ctx), true)
  4621  		if err != nil {
  4622  			return nil, fmt.Errorf("failed to prepare upload: %w", err)
  4623  		}
  4624  		setFrom_s3CopyObjectInput_s3PutObjectInput(&req, ui.req)
  4625  		req.MetadataDirective = aws.String(s3.MetadataDirectiveReplace)
  4626  	}
  4627  
  4628  	err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj)
  4629  	if err != nil {
  4630  		return nil, err
  4631  	}
  4632  	return f.NewObject(ctx, remote)
  4633  }
  4634  
  4635  // Hashes returns the supported hash sets.
  4636  func (f *Fs) Hashes() hash.Set {
  4637  	return hash.Set(hash.MD5)
  4638  }
  4639  
  4640  // PublicLink generates a public link to the remote path (usually readable by anyone)
  4641  func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
  4642  	if strings.HasSuffix(remote, "/") {
  4643  		return "", fs.ErrorCantShareDirectories
  4644  	}
  4645  	obj, err := f.NewObject(ctx, remote)
  4646  	if err != nil {
  4647  		return "", err
  4648  	}
  4649  	o := obj.(*Object)
  4650  	if expire > maxExpireDuration {
  4651  		fs.Logf(f, "Public Link: Reducing expiry to %v as %v is greater than the max time allowed", maxExpireDuration, expire)
  4652  		expire = maxExpireDuration
  4653  	}
  4654  	bucket, bucketPath := o.split()
  4655  	httpReq, _ := f.c.GetObjectRequest(&s3.GetObjectInput{
  4656  		Bucket:    &bucket,
  4657  		Key:       &bucketPath,
  4658  		VersionId: o.versionID,
  4659  	})
  4660  
  4661  	return httpReq.Presign(time.Duration(expire))
  4662  }
  4663  
  4664  var commandHelp = []fs.CommandHelp{{
  4665  	Name:  "restore",
  4666  	Short: "Restore objects from GLACIER to normal storage",
  4667  	Long: `This command can be used to restore one or more objects from GLACIER
  4668  to normal storage.
  4669  
  4670  Usage Examples:
  4671  
  4672      rclone backend restore s3:bucket/path/to/object -o priority=PRIORITY -o lifetime=DAYS
  4673      rclone backend restore s3:bucket/path/to/directory -o priority=PRIORITY -o lifetime=DAYS
  4674      rclone backend restore s3:bucket -o priority=PRIORITY -o lifetime=DAYS
  4675  
  4676  This flag also obeys the filters. Test first with --interactive/-i or --dry-run flags
  4677  
  4678      rclone --interactive backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
  4679  
  4680  All the objects shown will be marked for restore, then
  4681  
  4682      rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard -o lifetime=1
  4683  
  4684  It returns a list of status dictionaries with Remote and Status
  4685  keys. The Status will be OK if it was successful or an error message
  4686  if not.
  4687  
  4688      [
  4689          {
  4690              "Status": "OK",
  4691              "Remote": "test.txt"
  4692          },
  4693          {
  4694              "Status": "OK",
  4695              "Remote": "test/file4.txt"
  4696          }
  4697      ]
  4698  
  4699  `,
  4700  	Opts: map[string]string{
  4701  		"priority":    "Priority of restore: Standard|Expedited|Bulk",
  4702  		"lifetime":    "Lifetime of the active copy in days",
  4703  		"description": "The optional description for the job.",
  4704  	},
  4705  }, {
  4706  	Name:  "restore-status",
  4707  	Short: "Show the restore status for objects being restored from GLACIER to normal storage",
  4708  	Long: `This command can be used to show the status for objects being restored from GLACIER
  4709  to normal storage.
  4710  
  4711  Usage Examples:
  4712  
  4713      rclone backend restore-status s3:bucket/path/to/object
  4714      rclone backend restore-status s3:bucket/path/to/directory
  4715      rclone backend restore-status -o all s3:bucket/path/to/directory
  4716  
  4717  This command does not obey the filters.
  4718  
  4719  It returns a list of status dictionaries.
  4720  
  4721      [
  4722          {
  4723              "Remote": "file.txt",
  4724              "VersionID": null,
  4725              "RestoreStatus": {
  4726                  "IsRestoreInProgress": true,
  4727                  "RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
  4728              },
  4729              "StorageClass": "GLACIER"
  4730          },
  4731          {
  4732              "Remote": "test.pdf",
  4733              "VersionID": null,
  4734              "RestoreStatus": {
  4735                  "IsRestoreInProgress": false,
  4736                  "RestoreExpiryDate": "2023-09-06T12:29:19+01:00"
  4737              },
  4738              "StorageClass": "DEEP_ARCHIVE"
  4739          }
  4740      ]
  4741  `,
  4742  	Opts: map[string]string{
  4743  		"all": "if set then show all objects, not just ones with restore status",
  4744  	},
  4745  }, {
  4746  	Name:  "list-multipart-uploads",
  4747  	Short: "List the unfinished multipart uploads",
  4748  	Long: `This command lists the unfinished multipart uploads in JSON format.
  4749  
  4750      rclone backend list-multipart s3:bucket/path/to/object
  4751  
  4752  It returns a dictionary of buckets with values as lists of unfinished
  4753  multipart uploads.
  4754  
  4755  You can call it with no bucket in which case it lists all bucket, with
  4756  a bucket or with a bucket and path.
  4757  
  4758      {
  4759        "rclone": [
  4760          {
  4761            "Initiated": "2020-06-26T14:20:36Z",
  4762            "Initiator": {
  4763              "DisplayName": "XXX",
  4764              "ID": "arn:aws:iam::XXX:user/XXX"
  4765            },
  4766            "Key": "KEY",
  4767            "Owner": {
  4768              "DisplayName": null,
  4769              "ID": "XXX"
  4770            },
  4771            "StorageClass": "STANDARD",
  4772            "UploadId": "XXX"
  4773          }
  4774        ],
  4775        "rclone-1000files": [],
  4776        "rclone-dst": []
  4777      }
  4778  
  4779  `,
  4780  }, {
  4781  	Name:  "cleanup",
  4782  	Short: "Remove unfinished multipart uploads.",
  4783  	Long: `This command removes unfinished multipart uploads of age greater than
  4784  max-age which defaults to 24 hours.
  4785  
  4786  Note that you can use --interactive/-i or --dry-run with this command to see what
  4787  it would do.
  4788  
  4789      rclone backend cleanup s3:bucket/path/to/object
  4790      rclone backend cleanup -o max-age=7w s3:bucket/path/to/object
  4791  
  4792  Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
  4793  `,
  4794  	Opts: map[string]string{
  4795  		"max-age": "Max age of upload to delete",
  4796  	},
  4797  }, {
  4798  	Name:  "cleanup-hidden",
  4799  	Short: "Remove old versions of files.",
  4800  	Long: `This command removes any old hidden versions of files
  4801  on a versions enabled bucket.
  4802  
  4803  Note that you can use --interactive/-i or --dry-run with this command to see what
  4804  it would do.
  4805  
  4806      rclone backend cleanup-hidden s3:bucket/path/to/dir
  4807  `,
  4808  }, {
  4809  	Name:  "versioning",
  4810  	Short: "Set/get versioning support for a bucket.",
  4811  	Long: `This command sets versioning support if a parameter is
  4812  passed and then returns the current versioning status for the bucket
  4813  supplied.
  4814  
  4815      rclone backend versioning s3:bucket # read status only
  4816      rclone backend versioning s3:bucket Enabled
  4817      rclone backend versioning s3:bucket Suspended
  4818  
  4819  It may return "Enabled", "Suspended" or "Unversioned". Note that once versioning
  4820  has been enabled the status can't be set back to "Unversioned".
  4821  `,
  4822  }, {
  4823  	Name:  "set",
  4824  	Short: "Set command for updating the config parameters.",
  4825  	Long: `This set command can be used to update the config parameters
  4826  for a running s3 backend.
  4827  
  4828  Usage Examples:
  4829  
  4830      rclone backend set s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
  4831      rclone rc backend/command command=set fs=s3: [-o opt_name=opt_value] [-o opt_name2=opt_value2]
  4832      rclone rc backend/command command=set fs=s3: -o session_token=X -o access_key_id=X -o secret_access_key=X
  4833  
  4834  The option keys are named as they are in the config file.
  4835  
  4836  This rebuilds the connection to the s3 backend when it is called with
  4837  the new parameters. Only new parameters need be passed as the values
  4838  will default to those currently in use.
  4839  
  4840  It doesn't return anything.
  4841  `,
  4842  }}
  4843  
  4844  // Command the backend to run a named command
  4845  //
  4846  // The command run is name
  4847  // args may be used to read arguments from
  4848  // opts may be used to read optional arguments from
  4849  //
  4850  // The result should be capable of being JSON encoded
  4851  // If it is a string or a []string it will be shown to the user
  4852  // otherwise it will be JSON encoded and shown to the user like that
  4853  func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
  4854  	switch name {
  4855  	case "restore":
  4856  		req := s3.RestoreObjectInput{
  4857  			//Bucket:         &f.rootBucket,
  4858  			//Key:            &encodedDirectory,
  4859  			RestoreRequest: &s3.RestoreRequest{},
  4860  		}
  4861  		if lifetime := opt["lifetime"]; lifetime != "" {
  4862  			ilifetime, err := strconv.ParseInt(lifetime, 10, 64)
  4863  			if err != nil {
  4864  				return nil, fmt.Errorf("bad lifetime: %w", err)
  4865  			}
  4866  			req.RestoreRequest.Days = &ilifetime
  4867  		}
  4868  		if priority := opt["priority"]; priority != "" {
  4869  			req.RestoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{
  4870  				Tier: &priority,
  4871  			}
  4872  		}
  4873  		if description := opt["description"]; description != "" {
  4874  			req.RestoreRequest.Description = &description
  4875  		}
  4876  		type status struct {
  4877  			Status string
  4878  			Remote string
  4879  		}
  4880  		var (
  4881  			outMu sync.Mutex
  4882  			out   = []status{}
  4883  		)
  4884  		err = operations.ListFn(ctx, f, func(obj fs.Object) {
  4885  			// Remember this is run --checkers times concurrently
  4886  			o, ok := obj.(*Object)
  4887  			st := status{Status: "OK", Remote: obj.Remote()}
  4888  			defer func() {
  4889  				outMu.Lock()
  4890  				out = append(out, st)
  4891  				outMu.Unlock()
  4892  			}()
  4893  			if operations.SkipDestructive(ctx, obj, "restore") {
  4894  				return
  4895  			}
  4896  			if !ok {
  4897  				st.Status = "Not an S3 object"
  4898  				return
  4899  			}
  4900  			if o.storageClass == nil || (*o.storageClass != "GLACIER" && *o.storageClass != "DEEP_ARCHIVE") {
  4901  				st.Status = "Not GLACIER or DEEP_ARCHIVE storage class"
  4902  				return
  4903  			}
  4904  			bucket, bucketPath := o.split()
  4905  			reqCopy := req
  4906  			reqCopy.Bucket = &bucket
  4907  			reqCopy.Key = &bucketPath
  4908  			reqCopy.VersionId = o.versionID
  4909  			err = f.pacer.Call(func() (bool, error) {
  4910  				_, err = f.c.RestoreObject(&reqCopy)
  4911  				return f.shouldRetry(ctx, err)
  4912  			})
  4913  			if err != nil {
  4914  				st.Status = err.Error()
  4915  			}
  4916  		})
  4917  		if err != nil {
  4918  			return out, err
  4919  		}
  4920  		return out, nil
  4921  	case "restore-status":
  4922  		_, all := opt["all"]
  4923  		return f.restoreStatus(ctx, all)
  4924  	case "list-multipart-uploads":
  4925  		return f.listMultipartUploadsAll(ctx)
  4926  	case "cleanup":
  4927  		maxAge := 24 * time.Hour
  4928  		if opt["max-age"] != "" {
  4929  			maxAge, err = fs.ParseDuration(opt["max-age"])
  4930  			if err != nil {
  4931  				return nil, fmt.Errorf("bad max-age: %w", err)
  4932  			}
  4933  		}
  4934  		return nil, f.cleanUp(ctx, maxAge)
  4935  	case "cleanup-hidden":
  4936  		return nil, f.CleanUpHidden(ctx)
  4937  	case "versioning":
  4938  		return f.setGetVersioning(ctx, arg...)
  4939  	case "set":
  4940  		newOpt := f.opt
  4941  		err := configstruct.Set(configmap.Simple(opt), &newOpt)
  4942  		if err != nil {
  4943  			return nil, fmt.Errorf("reading config: %w", err)
  4944  		}
  4945  		c, ses, err := s3Connection(f.ctx, &newOpt, f.srv)
  4946  		if err != nil {
  4947  			return nil, fmt.Errorf("updating session: %w", err)
  4948  		}
  4949  		f.c = c
  4950  		f.ses = ses
  4951  		f.opt = newOpt
  4952  		keys := []string{}
  4953  		for k := range opt {
  4954  			keys = append(keys, k)
  4955  		}
  4956  		fs.Logf(f, "Updated config values: %s", strings.Join(keys, ", "))
  4957  		return nil, nil
  4958  	default:
  4959  		return nil, fs.ErrorCommandNotFound
  4960  	}
  4961  }
  4962  
  4963  // Returned from "restore-status"
  4964  type restoreStatusOut struct {
  4965  	Remote        string
  4966  	VersionID     *string
  4967  	RestoreStatus *s3.RestoreStatus
  4968  	StorageClass  *string
  4969  }
  4970  
  4971  // Recursively enumerate the current fs to find objects with a restore status
  4972  func (f *Fs) restoreStatus(ctx context.Context, all bool) (out []restoreStatusOut, err error) {
  4973  	fs.Debugf(f, "all = %v", all)
  4974  	bucket, directory := f.split("")
  4975  	out = []restoreStatusOut{}
  4976  	err = f.list(ctx, listOpt{
  4977  		bucket:        bucket,
  4978  		directory:     directory,
  4979  		prefix:        f.rootDirectory,
  4980  		addBucket:     f.rootBucket == "",
  4981  		recurse:       true,
  4982  		withVersions:  f.opt.Versions,
  4983  		versionAt:     f.opt.VersionAt,
  4984  		hidden:        f.opt.VersionDeleted,
  4985  		restoreStatus: true,
  4986  	}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
  4987  		entry, err := f.itemToDirEntry(ctx, remote, object, versionID, isDirectory)
  4988  		if err != nil {
  4989  			return err
  4990  		}
  4991  		if entry != nil {
  4992  			if o, ok := entry.(*Object); ok && (all || object.RestoreStatus != nil) {
  4993  				out = append(out, restoreStatusOut{
  4994  					Remote:        o.remote,
  4995  					VersionID:     o.versionID,
  4996  					RestoreStatus: object.RestoreStatus,
  4997  					StorageClass:  object.StorageClass,
  4998  				})
  4999  			}
  5000  		}
  5001  		return nil
  5002  	})
  5003  	if err != nil {
  5004  		return nil, err
  5005  	}
  5006  	// bucket must be present if listing succeeded
  5007  	f.cache.MarkOK(bucket)
  5008  	return out, nil
  5009  }
  5010  
  5011  // listMultipartUploads lists all outstanding multipart uploads for (bucket, key)
  5012  //
  5013  // Note that rather lazily we treat key as a prefix so it matches
  5014  // directories and objects. This could surprise the user if they ask
  5015  // for "dir" and it returns "dirKey"
  5016  func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) {
  5017  	var (
  5018  		keyMarker      *string
  5019  		uploadIDMarker *string
  5020  	)
  5021  	uploads = []*s3.MultipartUpload{}
  5022  	for {
  5023  		req := s3.ListMultipartUploadsInput{
  5024  			Bucket:         &bucket,
  5025  			MaxUploads:     &f.opt.ListChunk,
  5026  			KeyMarker:      keyMarker,
  5027  			UploadIdMarker: uploadIDMarker,
  5028  			Prefix:         &key,
  5029  		}
  5030  		var resp *s3.ListMultipartUploadsOutput
  5031  		err = f.pacer.Call(func() (bool, error) {
  5032  			resp, err = f.c.ListMultipartUploads(&req)
  5033  			return f.shouldRetry(ctx, err)
  5034  		})
  5035  		if err != nil {
  5036  			return nil, fmt.Errorf("list multipart uploads bucket %q key %q: %w", bucket, key, err)
  5037  		}
  5038  		uploads = append(uploads, resp.Uploads...)
  5039  		if !aws.BoolValue(resp.IsTruncated) {
  5040  			break
  5041  		}
  5042  		keyMarker = resp.NextKeyMarker
  5043  		uploadIDMarker = resp.NextUploadIdMarker
  5044  	}
  5045  	return uploads, nil
  5046  }
  5047  
  5048  func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*s3.MultipartUpload, err error) {
  5049  	uploadsMap = make(map[string][]*s3.MultipartUpload)
  5050  	bucket, directory := f.split("")
  5051  	if bucket != "" {
  5052  		uploads, err := f.listMultipartUploads(ctx, bucket, directory)
  5053  		if err != nil {
  5054  			return uploadsMap, err
  5055  		}
  5056  		uploadsMap[bucket] = uploads
  5057  		return uploadsMap, nil
  5058  	}
  5059  	entries, err := f.listBuckets(ctx)
  5060  	if err != nil {
  5061  		return uploadsMap, err
  5062  	}
  5063  	for _, entry := range entries {
  5064  		bucket := entry.Remote()
  5065  		uploads, listErr := f.listMultipartUploads(ctx, bucket, "")
  5066  		if listErr != nil {
  5067  			err = listErr
  5068  			fs.Errorf(f, "%v", err)
  5069  		}
  5070  		uploadsMap[bucket] = uploads
  5071  	}
  5072  	return uploadsMap, err
  5073  }
  5074  
  5075  // cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge
  5076  func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration, uploads []*s3.MultipartUpload) (err error) {
  5077  	fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
  5078  	for _, upload := range uploads {
  5079  		if upload.Initiated != nil && upload.Key != nil && upload.UploadId != nil {
  5080  			age := time.Since(*upload.Initiated)
  5081  			what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Key, upload.Initiated, age)
  5082  			if age > maxAge {
  5083  				fs.Infof(f, "removing %s", what)
  5084  				if operations.SkipDestructive(ctx, what, "remove pending upload") {
  5085  					continue
  5086  				}
  5087  				req := s3.AbortMultipartUploadInput{
  5088  					Bucket:   &bucket,
  5089  					UploadId: upload.UploadId,
  5090  					Key:      upload.Key,
  5091  				}
  5092  				_, abortErr := f.c.AbortMultipartUpload(&req)
  5093  				if abortErr != nil {
  5094  					err = fmt.Errorf("failed to remove %s: %w", what, abortErr)
  5095  					fs.Errorf(f, "%v", err)
  5096  				}
  5097  			} else {
  5098  				fs.Debugf(f, "ignoring %s", what)
  5099  			}
  5100  		}
  5101  	}
  5102  	return err
  5103  }
  5104  
  5105  // CleanUp removes all pending multipart uploads
  5106  func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) {
  5107  	uploadsMap, err := f.listMultipartUploadsAll(ctx)
  5108  	if err != nil {
  5109  		return err
  5110  	}
  5111  	for bucket, uploads := range uploadsMap {
  5112  		cleanErr := f.cleanUpBucket(ctx, bucket, maxAge, uploads)
  5113  		if err != nil {
  5114  			fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucket, cleanErr)
  5115  			err = cleanErr
  5116  		}
  5117  	}
  5118  	return err
  5119  }
  5120  
  5121  // Read whether the bucket is versioned or not
  5122  func (f *Fs) isVersioned(ctx context.Context) bool {
  5123  	f.versioningMu.Lock()
  5124  	defer f.versioningMu.Unlock()
  5125  	if !f.versioning.Valid {
  5126  		_, _ = f.setGetVersioning(ctx)
  5127  		fs.Debugf(f, "bucket is versioned: %v", f.versioning.Value)
  5128  	}
  5129  	return f.versioning.Value
  5130  }
  5131  
  5132  // Set or get bucket versioning.
  5133  //
  5134  // Pass no arguments to get, or pass "Enabled" or "Suspended"
  5135  //
  5136  // Updates f.versioning
  5137  func (f *Fs) setGetVersioning(ctx context.Context, arg ...string) (status string, err error) {
  5138  	if len(arg) > 1 {
  5139  		return "", errors.New("too many arguments")
  5140  	}
  5141  	if f.rootBucket == "" {
  5142  		return "", errors.New("need a bucket")
  5143  	}
  5144  	if len(arg) == 1 {
  5145  		var versioning = s3.VersioningConfiguration{
  5146  			Status: aws.String(arg[0]),
  5147  		}
  5148  		// Disabled is indicated by the parameter missing
  5149  		if *versioning.Status == "Disabled" {
  5150  			versioning.Status = aws.String("")
  5151  		}
  5152  		req := s3.PutBucketVersioningInput{
  5153  			Bucket:                  &f.rootBucket,
  5154  			VersioningConfiguration: &versioning,
  5155  		}
  5156  		err := f.pacer.Call(func() (bool, error) {
  5157  			_, err = f.c.PutBucketVersioningWithContext(ctx, &req)
  5158  			return f.shouldRetry(ctx, err)
  5159  		})
  5160  		if err != nil {
  5161  			return "", err
  5162  		}
  5163  	}
  5164  	req := s3.GetBucketVersioningInput{
  5165  		Bucket: &f.rootBucket,
  5166  	}
  5167  	var resp *s3.GetBucketVersioningOutput
  5168  	err = f.pacer.Call(func() (bool, error) {
  5169  		resp, err = f.c.GetBucketVersioningWithContext(ctx, &req)
  5170  		return f.shouldRetry(ctx, err)
  5171  	})
  5172  	f.versioning.Valid = true
  5173  	f.versioning.Value = false
  5174  	if err != nil {
  5175  		fs.Errorf(f, "Failed to read versioning status, assuming unversioned: %v", err)
  5176  		return "", err
  5177  	}
  5178  	if resp.Status == nil {
  5179  		return "Unversioned", err
  5180  	}
  5181  	f.versioning.Value = true
  5182  	return *resp.Status, err
  5183  }
  5184  
  5185  // CleanUp removes all pending multipart uploads older than 24 hours
  5186  func (f *Fs) CleanUp(ctx context.Context) (err error) {
  5187  	return f.cleanUp(ctx, 24*time.Hour)
  5188  }
  5189  
  5190  // purge deletes all the files and directories
  5191  //
  5192  // if oldOnly is true then it deletes only non current files.
  5193  //
  5194  // Implemented here so we can make sure we delete old versions.
  5195  func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
  5196  	if f.opt.VersionAt.IsSet() {
  5197  		return errNotWithVersionAt
  5198  	}
  5199  	bucket, directory := f.split(dir)
  5200  	if bucket == "" {
  5201  		return errors.New("can't purge from root")
  5202  	}
  5203  	versioned := f.isVersioned(ctx)
  5204  	if !versioned && oldOnly {
  5205  		fs.Infof(f, "bucket is not versioned so not removing old versions")
  5206  		return nil
  5207  	}
  5208  	var errReturn error
  5209  	var checkErrMutex sync.Mutex
  5210  	var checkErr = func(err error) {
  5211  		if err == nil {
  5212  			return
  5213  		}
  5214  		checkErrMutex.Lock()
  5215  		defer checkErrMutex.Unlock()
  5216  		if errReturn == nil {
  5217  			errReturn = err
  5218  		}
  5219  	}
  5220  
  5221  	// Delete Config.Transfers in parallel
  5222  	delChan := make(fs.ObjectsChan, f.ci.Transfers)
  5223  	delErr := make(chan error, 1)
  5224  	go func() {
  5225  		delErr <- operations.DeleteFiles(ctx, delChan)
  5226  	}()
  5227  	checkErr(f.list(ctx, listOpt{
  5228  		bucket:        bucket,
  5229  		directory:     directory,
  5230  		prefix:        f.rootDirectory,
  5231  		addBucket:     f.rootBucket == "",
  5232  		recurse:       true,
  5233  		withVersions:  versioned,
  5234  		hidden:        true,
  5235  		noSkipMarkers: true,
  5236  	}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
  5237  		if isDirectory {
  5238  			return nil
  5239  		}
  5240  		// If the root is a dirmarker it will have lost its trailing /
  5241  		if remote == "" {
  5242  			remote = "/"
  5243  		}
  5244  		oi, err := f.newObjectWithInfo(ctx, remote, object, versionID)
  5245  		if err != nil {
  5246  			fs.Errorf(object, "Can't create object %+v", err)
  5247  			return nil
  5248  		}
  5249  		tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
  5250  		// Work out whether the file is the current version or not
  5251  		isCurrentVersion := !versioned || !version.Match(remote)
  5252  		fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
  5253  		if oldOnly && isCurrentVersion {
  5254  			// Check current version of the file
  5255  			if object.Size == isDeleteMarker {
  5256  				fs.Debugf(remote, "Deleting current version (id %q) as it is a delete marker", aws.StringValue(versionID))
  5257  				delChan <- oi
  5258  			} else {
  5259  				fs.Debugf(remote, "Not deleting current version %q", aws.StringValue(versionID))
  5260  			}
  5261  		} else {
  5262  			if object.Size == isDeleteMarker {
  5263  				fs.Debugf(remote, "Deleting delete marker (id %q)", aws.StringValue(versionID))
  5264  			} else {
  5265  				fs.Debugf(remote, "Deleting (id %q)", aws.StringValue(versionID))
  5266  			}
  5267  			delChan <- oi
  5268  		}
  5269  		tr.Done(ctx, nil)
  5270  		return nil
  5271  	}))
  5272  	close(delChan)
  5273  	checkErr(<-delErr)
  5274  
  5275  	if !oldOnly {
  5276  		checkErr(f.Rmdir(ctx, dir))
  5277  	}
  5278  	return errReturn
  5279  }
  5280  
  5281  // Purge deletes all the files and directories including the old versions.
  5282  func (f *Fs) Purge(ctx context.Context, dir string) error {
  5283  	return f.purge(ctx, dir, false)
  5284  }
  5285  
  5286  // CleanUpHidden deletes all the hidden files.
  5287  func (f *Fs) CleanUpHidden(ctx context.Context) error {
  5288  	return f.purge(ctx, "", true)
  5289  }
  5290  
  5291  // ------------------------------------------------------------
  5292  
  5293  // Fs returns the parent Fs
  5294  func (o *Object) Fs() fs.Info {
  5295  	return o.fs
  5296  }
  5297  
  5298  // Return a string version
  5299  func (o *Object) String() string {
  5300  	if o == nil {
  5301  		return "<nil>"
  5302  	}
  5303  	return o.remote
  5304  }
  5305  
  5306  // Remote returns the remote path
  5307  func (o *Object) Remote() string {
  5308  	return o.remote
  5309  }
  5310  
  5311  var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
  5312  
  5313  // Set the MD5 from the etag
  5314  func (o *Object) setMD5FromEtag(etag string) {
  5315  	if o.fs.etagIsNotMD5 {
  5316  		o.md5 = ""
  5317  		return
  5318  	}
  5319  	if etag == "" {
  5320  		o.md5 = ""
  5321  		return
  5322  	}
  5323  	hash := strings.Trim(strings.ToLower(etag), `"`)
  5324  	// Check the etag is a valid md5sum
  5325  	if !matchMd5.MatchString(hash) {
  5326  		o.md5 = ""
  5327  		return
  5328  	}
  5329  	o.md5 = hash
  5330  }
  5331  
  5332  // Hash returns the Md5sum of an object returning a lowercase hex string
  5333  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  5334  	if t != hash.MD5 {
  5335  		return "", hash.ErrUnsupported
  5336  	}
  5337  	// If decompressing, erase the hash
  5338  	if o.bytes < 0 {
  5339  		return "", nil
  5340  	}
  5341  	// If we haven't got an MD5, then check the metadata
  5342  	if o.md5 == "" {
  5343  		err := o.readMetaData(ctx)
  5344  		if err != nil {
  5345  			return "", err
  5346  		}
  5347  	}
  5348  	return o.md5, nil
  5349  }
  5350  
  5351  // Size returns the size of an object in bytes
  5352  func (o *Object) Size() int64 {
  5353  	return o.bytes
  5354  }
  5355  
  5356  func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err error) {
  5357  	bucket, bucketPath := o.split()
  5358  	req := s3.HeadObjectInput{
  5359  		Bucket:    &bucket,
  5360  		Key:       &bucketPath,
  5361  		VersionId: o.versionID,
  5362  	}
  5363  	return o.fs.headObject(ctx, &req)
  5364  }
  5365  
  5366  func (f *Fs) headObject(ctx context.Context, req *s3.HeadObjectInput) (resp *s3.HeadObjectOutput, err error) {
  5367  	if f.opt.RequesterPays {
  5368  		req.RequestPayer = aws.String(s3.RequestPayerRequester)
  5369  	}
  5370  	if f.opt.SSECustomerAlgorithm != "" {
  5371  		req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm
  5372  	}
  5373  	if f.opt.SSECustomerKey != "" {
  5374  		req.SSECustomerKey = &f.opt.SSECustomerKey
  5375  	}
  5376  	if f.opt.SSECustomerKeyMD5 != "" {
  5377  		req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5
  5378  	}
  5379  	err = f.pacer.Call(func() (bool, error) {
  5380  		var err error
  5381  		resp, err = f.c.HeadObjectWithContext(ctx, req)
  5382  		return f.shouldRetry(ctx, err)
  5383  	})
  5384  	if err != nil {
  5385  		if awsErr, ok := err.(awserr.RequestFailure); ok {
  5386  			if awsErr.StatusCode() == http.StatusNotFound {
  5387  				return nil, fs.ErrorObjectNotFound
  5388  			}
  5389  		}
  5390  		return nil, err
  5391  	}
  5392  	if req.Bucket != nil {
  5393  		f.cache.MarkOK(*req.Bucket)
  5394  	}
  5395  	return resp, nil
  5396  }
  5397  
  5398  // readMetaData gets the metadata if it hasn't already been fetched
  5399  //
  5400  // it also sets the info
  5401  func (o *Object) readMetaData(ctx context.Context) (err error) {
  5402  	if o.meta != nil {
  5403  		return nil
  5404  	}
  5405  	resp, err := o.headObject(ctx)
  5406  	if err != nil {
  5407  		return err
  5408  	}
  5409  	o.setMetaData(resp)
  5410  	// resp.ETag, resp.ContentLength, resp.LastModified, resp.Metadata, resp.ContentType, resp.StorageClass)
  5411  	return nil
  5412  }
  5413  
  5414  // Convert S3 metadata with pointers into a map[string]string
  5415  // while lowercasing the keys
  5416  func s3MetadataToMap(s3Meta map[string]*string) map[string]string {
  5417  	meta := make(map[string]string, len(s3Meta))
  5418  	for k, v := range s3Meta {
  5419  		if v != nil {
  5420  			meta[strings.ToLower(k)] = *v
  5421  		}
  5422  	}
  5423  	return meta
  5424  }
  5425  
  5426  // Convert our metadata back into S3 metadata
  5427  func mapToS3Metadata(meta map[string]string) map[string]*string {
  5428  	s3Meta := make(map[string]*string, len(meta))
  5429  	for k, v := range meta {
  5430  		s3Meta[k] = aws.String(v)
  5431  	}
  5432  	return s3Meta
  5433  }
  5434  
  5435  func (o *Object) setMetaData(resp *s3.HeadObjectOutput) {
  5436  	// Ignore missing Content-Length assuming it is 0
  5437  	// Some versions of ceph do this due their apache proxies
  5438  	if resp.ContentLength != nil {
  5439  		o.bytes = *resp.ContentLength
  5440  	}
  5441  	o.setMD5FromEtag(aws.StringValue(resp.ETag))
  5442  	o.meta = s3MetadataToMap(resp.Metadata)
  5443  	// Read MD5 from metadata if present
  5444  	if md5sumBase64, ok := o.meta[metaMD5Hash]; ok {
  5445  		md5sumBytes, err := base64.StdEncoding.DecodeString(md5sumBase64)
  5446  		if err != nil {
  5447  			fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", md5sumBase64, err)
  5448  		} else if len(md5sumBytes) != 16 {
  5449  			fs.Debugf(o, "Failed to read md5sum from metadata %q: wrong length", md5sumBase64)
  5450  		} else {
  5451  			o.md5 = hex.EncodeToString(md5sumBytes)
  5452  		}
  5453  	}
  5454  	if resp.LastModified == nil {
  5455  		o.lastModified = time.Now()
  5456  		fs.Logf(o, "Failed to read last modified")
  5457  	} else {
  5458  		// Try to keep the maximum precision in lastModified. If we read
  5459  		// it from listings then it may have millisecond precision, but
  5460  		// if we read it from a HEAD/GET request then it will have
  5461  		// second precision.
  5462  		equalToWithinOneSecond := o.lastModified.Truncate(time.Second).Equal((*resp.LastModified).Truncate(time.Second))
  5463  		newHasNs := (*resp.LastModified).Nanosecond() != 0
  5464  		if !equalToWithinOneSecond || newHasNs {
  5465  			o.lastModified = *resp.LastModified
  5466  		}
  5467  	}
  5468  	o.mimeType = aws.StringValue(resp.ContentType)
  5469  
  5470  	// Set system metadata
  5471  	o.storageClass = resp.StorageClass
  5472  	o.cacheControl = resp.CacheControl
  5473  	o.contentDisposition = resp.ContentDisposition
  5474  	o.contentEncoding = resp.ContentEncoding
  5475  	o.contentLanguage = resp.ContentLanguage
  5476  
  5477  	// If decompressing then size and md5sum are unknown
  5478  	if o.fs.opt.Decompress && aws.StringValue(o.contentEncoding) == "gzip" {
  5479  		o.bytes = -1
  5480  		o.md5 = ""
  5481  	}
  5482  }
  5483  
  5484  // ModTime returns the modification time of the object
  5485  //
  5486  // It attempts to read the objects mtime and if that isn't present the
  5487  // LastModified returned in the http headers
  5488  func (o *Object) ModTime(ctx context.Context) time.Time {
  5489  	if o.fs.ci.UseServerModTime {
  5490  		return o.lastModified
  5491  	}
  5492  	err := o.readMetaData(ctx)
  5493  	if err != nil {
  5494  		fs.Logf(o, "Failed to read metadata: %v", err)
  5495  		return time.Now()
  5496  	}
  5497  	// read mtime out of metadata if available
  5498  	d, ok := o.meta[metaMtime]
  5499  	if !ok {
  5500  		// fs.Debugf(o, "No metadata")
  5501  		return o.lastModified
  5502  	}
  5503  	modTime, err := swift.FloatStringToTime(d)
  5504  	if err != nil {
  5505  		fs.Logf(o, "Failed to read mtime from object: %v", err)
  5506  		return o.lastModified
  5507  	}
  5508  	return modTime
  5509  }
  5510  
  5511  // SetModTime sets the modification time of the local fs object
  5512  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  5513  	err := o.readMetaData(ctx)
  5514  	if err != nil {
  5515  		return err
  5516  	}
  5517  	o.meta[metaMtime] = swift.TimeToFloatString(modTime)
  5518  
  5519  	// Can't update metadata here, so return this error to force a recopy
  5520  	if o.storageClass != nil && (*o.storageClass == "GLACIER" || *o.storageClass == "DEEP_ARCHIVE") {
  5521  		return fs.ErrorCantSetModTime
  5522  	}
  5523  
  5524  	// Copy the object to itself to update the metadata
  5525  	bucket, bucketPath := o.split()
  5526  	req := s3.CopyObjectInput{
  5527  		ContentType:       aws.String(fs.MimeType(ctx, o)), // Guess the content type
  5528  		Metadata:          mapToS3Metadata(o.meta),
  5529  		MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
  5530  	}
  5531  	if o.fs.opt.RequesterPays {
  5532  		req.RequestPayer = aws.String(s3.RequestPayerRequester)
  5533  	}
  5534  	return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o)
  5535  }
  5536  
  5537  // Storable raturns a boolean indicating if this object is storable
  5538  func (o *Object) Storable() bool {
  5539  	return true
  5540  }
  5541  
  5542  func (o *Object) downloadFromURL(ctx context.Context, bucketPath string, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  5543  	url := o.fs.opt.DownloadURL + bucketPath
  5544  	var resp *http.Response
  5545  	opts := rest.Opts{
  5546  		Method:  "GET",
  5547  		RootURL: url,
  5548  		Options: options,
  5549  	}
  5550  	err = o.fs.pacer.Call(func() (bool, error) {
  5551  		resp, err = o.fs.srvRest.Call(ctx, &opts)
  5552  		return o.fs.shouldRetry(ctx, err)
  5553  	})
  5554  	if err != nil {
  5555  		return nil, err
  5556  	}
  5557  
  5558  	contentLength := rest.ParseSizeFromHeaders(resp.Header)
  5559  	if contentLength < 0 {
  5560  		fs.Debugf(o, "Failed to parse file size from headers")
  5561  	}
  5562  
  5563  	lastModified, err := http.ParseTime(resp.Header.Get("Last-Modified"))
  5564  	if err != nil {
  5565  		fs.Debugf(o, "Failed to parse last modified from string %s, %v", resp.Header.Get("Last-Modified"), err)
  5566  	}
  5567  
  5568  	metaData := make(map[string]*string)
  5569  	for key, value := range resp.Header {
  5570  		key = strings.ToLower(key)
  5571  		if strings.HasPrefix(key, "x-amz-meta-") {
  5572  			metaKey := strings.TrimPrefix(key, "x-amz-meta-")
  5573  			metaData[metaKey] = &value[0]
  5574  		}
  5575  	}
  5576  
  5577  	header := func(k string) *string {
  5578  		v := resp.Header.Get(k)
  5579  		if v == "" {
  5580  			return nil
  5581  		}
  5582  		return &v
  5583  	}
  5584  
  5585  	var head = s3.HeadObjectOutput{
  5586  		ETag:               header("Etag"),
  5587  		ContentLength:      &contentLength,
  5588  		LastModified:       &lastModified,
  5589  		Metadata:           metaData,
  5590  		CacheControl:       header("Cache-Control"),
  5591  		ContentDisposition: header("Content-Disposition"),
  5592  		ContentEncoding:    header("Content-Encoding"),
  5593  		ContentLanguage:    header("Content-Language"),
  5594  		ContentType:        header("Content-Type"),
  5595  		StorageClass:       header("X-Amz-Storage-Class"),
  5596  	}
  5597  	o.setMetaData(&head)
  5598  	return resp.Body, err
  5599  }
  5600  
  5601  // Open an object for read
  5602  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  5603  	bucket, bucketPath := o.split()
  5604  
  5605  	if o.fs.opt.DownloadURL != "" {
  5606  		return o.downloadFromURL(ctx, bucketPath, options...)
  5607  	}
  5608  
  5609  	req := s3.GetObjectInput{
  5610  		Bucket:    &bucket,
  5611  		Key:       &bucketPath,
  5612  		VersionId: o.versionID,
  5613  	}
  5614  	if o.fs.opt.RequesterPays {
  5615  		req.RequestPayer = aws.String(s3.RequestPayerRequester)
  5616  	}
  5617  	if o.fs.opt.SSECustomerAlgorithm != "" {
  5618  		req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
  5619  	}
  5620  	if o.fs.opt.SSECustomerKey != "" {
  5621  		req.SSECustomerKey = &o.fs.opt.SSECustomerKey
  5622  	}
  5623  	if o.fs.opt.SSECustomerKeyMD5 != "" {
  5624  		req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
  5625  	}
  5626  	httpReq, resp := o.fs.c.GetObjectRequest(&req)
  5627  	fs.FixRangeOption(options, o.bytes)
  5628  
  5629  	// Override the automatic decompression in the transport to
  5630  	// download compressed files as-is
  5631  	if o.fs.opt.UseAcceptEncodingGzip.Value {
  5632  		httpReq.HTTPRequest.Header.Set("Accept-Encoding", "gzip")
  5633  	}
  5634  
  5635  	for _, option := range options {
  5636  		switch option.(type) {
  5637  		case *fs.RangeOption, *fs.SeekOption:
  5638  			_, value := option.Header()
  5639  			req.Range = &value
  5640  		case *fs.HTTPOption:
  5641  			key, value := option.Header()
  5642  			httpReq.HTTPRequest.Header.Add(key, value)
  5643  		default:
  5644  			if option.Mandatory() {
  5645  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  5646  			}
  5647  		}
  5648  	}
  5649  	err = o.fs.pacer.Call(func() (bool, error) {
  5650  		var err error
  5651  		httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
  5652  		err = httpReq.Send()
  5653  		return o.fs.shouldRetry(ctx, err)
  5654  	})
  5655  	if err, ok := err.(awserr.RequestFailure); ok {
  5656  		if err.Code() == "InvalidObjectState" {
  5657  			return nil, fmt.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
  5658  		}
  5659  	}
  5660  	if err != nil {
  5661  		return nil, err
  5662  	}
  5663  
  5664  	// read size from ContentLength or ContentRange
  5665  	size := resp.ContentLength
  5666  	if resp.ContentRange != nil {
  5667  		var contentRange = *resp.ContentRange
  5668  		slash := strings.IndexRune(contentRange, '/')
  5669  		if slash >= 0 {
  5670  			i, err := strconv.ParseInt(contentRange[slash+1:], 10, 64)
  5671  			if err == nil {
  5672  				size = &i
  5673  			} else {
  5674  				fs.Debugf(o, "Failed to find parse integer from in %q: %v", contentRange, err)
  5675  			}
  5676  		} else {
  5677  			fs.Debugf(o, "Failed to find length in %q", contentRange)
  5678  		}
  5679  	}
  5680  	var head s3.HeadObjectOutput
  5681  	//structs.SetFrom(&head, resp)
  5682  	setFrom_s3HeadObjectOutput_s3GetObjectOutput(&head, resp)
  5683  	head.ContentLength = size
  5684  	o.setMetaData(&head)
  5685  
  5686  	// Decompress body if necessary
  5687  	if aws.StringValue(resp.ContentEncoding) == "gzip" {
  5688  		if o.fs.opt.Decompress || (resp.ContentLength == nil && o.fs.opt.MightGzip.Value) {
  5689  			return readers.NewGzipReader(resp.Body)
  5690  		}
  5691  		o.fs.warnCompressed.Do(func() {
  5692  			fs.Logf(o, "Not decompressing 'Content-Encoding: gzip' compressed file. Use --s3-decompress to override")
  5693  		})
  5694  	}
  5695  
  5696  	return resp.Body, nil
  5697  }
  5698  
  5699  var warnStreamUpload sync.Once
  5700  
  5701  // state of ChunkWriter
  5702  type s3ChunkWriter struct {
  5703  	chunkSize            int64
  5704  	size                 int64
  5705  	f                    *Fs
  5706  	bucket               *string
  5707  	key                  *string
  5708  	uploadID             *string
  5709  	multiPartUploadInput *s3.CreateMultipartUploadInput
  5710  	completedPartsMu     sync.Mutex
  5711  	completedParts       []*s3.CompletedPart
  5712  	eTag                 string
  5713  	versionID            string
  5714  	md5sMu               sync.Mutex
  5715  	md5s                 []byte
  5716  	ui                   uploadInfo
  5717  	o                    *Object
  5718  }
  5719  
  5720  // OpenChunkWriter returns the chunk size and a ChunkWriter
  5721  //
  5722  // Pass in the remote and the src object
  5723  // You can also use options to hint at the desired chunk size
  5724  func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
  5725  	// Temporary Object under construction
  5726  	o := &Object{
  5727  		fs:     f,
  5728  		remote: remote,
  5729  	}
  5730  	ui, err := o.prepareUpload(ctx, src, options, false)
  5731  	if err != nil {
  5732  		return info, nil, fmt.Errorf("failed to prepare upload: %w", err)
  5733  	}
  5734  
  5735  	//structs.SetFrom(&mReq, req)
  5736  	var mReq s3.CreateMultipartUploadInput
  5737  	setFrom_s3CreateMultipartUploadInput_s3PutObjectInput(&mReq, ui.req)
  5738  
  5739  	uploadParts := f.opt.MaxUploadParts
  5740  	if uploadParts < 1 {
  5741  		uploadParts = 1
  5742  	} else if uploadParts > maxUploadParts {
  5743  		uploadParts = maxUploadParts
  5744  	}
  5745  	size := src.Size()
  5746  
  5747  	// calculate size of parts
  5748  	chunkSize := f.opt.ChunkSize
  5749  
  5750  	// size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize
  5751  	// buffers here (default 5 MiB). With a maximum number of parts (10,000) this will be a file of
  5752  	// 48 GiB which seems like a not too unreasonable limit.
  5753  	if size == -1 {
  5754  		warnStreamUpload.Do(func() {
  5755  			fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
  5756  				f.opt.ChunkSize, fs.SizeSuffix(int64(chunkSize)*int64(uploadParts)))
  5757  		})
  5758  	} else {
  5759  		chunkSize = chunksize.Calculator(src, size, uploadParts, chunkSize)
  5760  	}
  5761  
  5762  	var mOut *s3.CreateMultipartUploadOutput
  5763  	err = f.pacer.Call(func() (bool, error) {
  5764  		mOut, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
  5765  		if err == nil {
  5766  			if mOut == nil {
  5767  				err = fserrors.RetryErrorf("internal error: no info from multipart upload")
  5768  			} else if mOut.UploadId == nil {
  5769  				err = fserrors.RetryErrorf("internal error: no UploadId in multpart upload: %#v", *mOut)
  5770  			}
  5771  		}
  5772  		return f.shouldRetry(ctx, err)
  5773  	})
  5774  	if err != nil {
  5775  		return info, nil, fmt.Errorf("create multipart upload failed: %w", err)
  5776  	}
  5777  
  5778  	chunkWriter := &s3ChunkWriter{
  5779  		chunkSize:            int64(chunkSize),
  5780  		size:                 size,
  5781  		f:                    f,
  5782  		bucket:               mOut.Bucket,
  5783  		key:                  mOut.Key,
  5784  		uploadID:             mOut.UploadId,
  5785  		multiPartUploadInput: &mReq,
  5786  		completedParts:       make([]*s3.CompletedPart, 0),
  5787  		ui:                   ui,
  5788  		o:                    o,
  5789  	}
  5790  	info = fs.ChunkWriterInfo{
  5791  		ChunkSize:         int64(chunkSize),
  5792  		Concurrency:       o.fs.opt.UploadConcurrency,
  5793  		LeavePartsOnError: o.fs.opt.LeavePartsOnError,
  5794  	}
  5795  	fs.Debugf(o, "open chunk writer: started multipart upload: %v", *mOut.UploadId)
  5796  	return info, chunkWriter, err
  5797  }
  5798  
  5799  // add a part number and etag to the completed parts
  5800  func (w *s3ChunkWriter) addCompletedPart(partNum *int64, eTag *string) {
  5801  	w.completedPartsMu.Lock()
  5802  	defer w.completedPartsMu.Unlock()
  5803  	w.completedParts = append(w.completedParts, &s3.CompletedPart{
  5804  		PartNumber: partNum,
  5805  		ETag:       eTag,
  5806  	})
  5807  }
  5808  
  5809  // addMd5 adds a binary md5 to the md5 calculated so far
  5810  func (w *s3ChunkWriter) addMd5(md5binary *[]byte, chunkNumber int64) {
  5811  	w.md5sMu.Lock()
  5812  	defer w.md5sMu.Unlock()
  5813  	start := chunkNumber * md5.Size
  5814  	end := start + md5.Size
  5815  	if extend := end - int64(len(w.md5s)); extend > 0 {
  5816  		w.md5s = append(w.md5s, make([]byte, extend)...)
  5817  	}
  5818  	copy(w.md5s[start:end], (*md5binary)[:])
  5819  }
  5820  
  5821  // WriteChunk will write chunk number with reader bytes, where chunk number >= 0
  5822  func (w *s3ChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
  5823  	if chunkNumber < 0 {
  5824  		err := fmt.Errorf("invalid chunk number provided: %v", chunkNumber)
  5825  		return -1, err
  5826  	}
  5827  	// Only account after the checksum reads have been done
  5828  	if do, ok := reader.(pool.DelayAccountinger); ok {
  5829  		// To figure out this number, do a transfer and if the accounted size is 0 or a
  5830  		// multiple of what it should be, increase or decrease this number.
  5831  		do.DelayAccounting(3)
  5832  	}
  5833  
  5834  	// create checksum of buffer for integrity checking
  5835  	// currently there is no way to calculate the md5 without reading the chunk a 2nd time (1st read is in uploadMultipart)
  5836  	// possible in AWS SDK v2 with trailers?
  5837  	m := md5.New()
  5838  	currentChunkSize, err := io.Copy(m, reader)
  5839  	if err != nil {
  5840  		return -1, err
  5841  	}
  5842  	// If no data read and not the first chunk, don't write the chunk
  5843  	if currentChunkSize == 0 && chunkNumber != 0 {
  5844  		return 0, nil
  5845  	}
  5846  	md5sumBinary := m.Sum([]byte{})
  5847  	w.addMd5(&md5sumBinary, int64(chunkNumber))
  5848  	md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
  5849  
  5850  	// S3 requires 1 <= PartNumber <= 10000
  5851  	s3PartNumber := aws.Int64(int64(chunkNumber + 1))
  5852  	uploadPartReq := &s3.UploadPartInput{
  5853  		Body:                 reader,
  5854  		Bucket:               w.bucket,
  5855  		Key:                  w.key,
  5856  		PartNumber:           s3PartNumber,
  5857  		UploadId:             w.uploadID,
  5858  		ContentMD5:           &md5sum,
  5859  		ContentLength:        aws.Int64(currentChunkSize),
  5860  		RequestPayer:         w.multiPartUploadInput.RequestPayer,
  5861  		SSECustomerAlgorithm: w.multiPartUploadInput.SSECustomerAlgorithm,
  5862  		SSECustomerKey:       w.multiPartUploadInput.SSECustomerKey,
  5863  		SSECustomerKeyMD5:    w.multiPartUploadInput.SSECustomerKeyMD5,
  5864  	}
  5865  	var uout *s3.UploadPartOutput
  5866  	err = w.f.pacer.Call(func() (bool, error) {
  5867  		// rewind the reader on retry and after reading md5
  5868  		_, err = reader.Seek(0, io.SeekStart)
  5869  		if err != nil {
  5870  			return false, err
  5871  		}
  5872  		uout, err = w.f.c.UploadPartWithContext(ctx, uploadPartReq)
  5873  		if err != nil {
  5874  			if chunkNumber <= 8 {
  5875  				return w.f.shouldRetry(ctx, err)
  5876  			}
  5877  			// retry all chunks once have done the first few
  5878  			return true, err
  5879  		}
  5880  		return false, nil
  5881  	})
  5882  	if err != nil {
  5883  		return -1, fmt.Errorf("failed to upload chunk %d with %v bytes: %w", chunkNumber+1, currentChunkSize, err)
  5884  	}
  5885  
  5886  	w.addCompletedPart(s3PartNumber, uout.ETag)
  5887  
  5888  	fs.Debugf(w.o, "multipart upload wrote chunk %d with %v bytes and etag %v", chunkNumber+1, currentChunkSize, *uout.ETag)
  5889  	return currentChunkSize, err
  5890  }
  5891  
  5892  // Abort the multipart upload
  5893  func (w *s3ChunkWriter) Abort(ctx context.Context) error {
  5894  	err := w.f.pacer.Call(func() (bool, error) {
  5895  		_, err := w.f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
  5896  			Bucket:       w.bucket,
  5897  			Key:          w.key,
  5898  			UploadId:     w.uploadID,
  5899  			RequestPayer: w.multiPartUploadInput.RequestPayer,
  5900  		})
  5901  		return w.f.shouldRetry(ctx, err)
  5902  	})
  5903  	if err != nil {
  5904  		return fmt.Errorf("failed to abort multipart upload %q: %w", *w.uploadID, err)
  5905  	}
  5906  	fs.Debugf(w.o, "multipart upload %q aborted", *w.uploadID)
  5907  	return err
  5908  }
  5909  
  5910  // Close and finalise the multipart upload
  5911  func (w *s3ChunkWriter) Close(ctx context.Context) (err error) {
  5912  	// sort the completed parts by part number
  5913  	sort.Slice(w.completedParts, func(i, j int) bool {
  5914  		return *w.completedParts[i].PartNumber < *w.completedParts[j].PartNumber
  5915  	})
  5916  	var resp *s3.CompleteMultipartUploadOutput
  5917  	err = w.f.pacer.Call(func() (bool, error) {
  5918  		resp, err = w.f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
  5919  			Bucket: w.bucket,
  5920  			Key:    w.key,
  5921  			MultipartUpload: &s3.CompletedMultipartUpload{
  5922  				Parts: w.completedParts,
  5923  			},
  5924  			RequestPayer: w.multiPartUploadInput.RequestPayer,
  5925  			UploadId:     w.uploadID,
  5926  		})
  5927  		return w.f.shouldRetry(ctx, err)
  5928  	})
  5929  	if err != nil {
  5930  		return fmt.Errorf("failed to complete multipart upload %q: %w", *w.uploadID, err)
  5931  	}
  5932  	if resp != nil {
  5933  		if resp.ETag != nil {
  5934  			w.eTag = *resp.ETag
  5935  		}
  5936  		if resp.VersionId != nil {
  5937  			w.versionID = *resp.VersionId
  5938  		}
  5939  	}
  5940  	fs.Debugf(w.o, "multipart upload %q finished", *w.uploadID)
  5941  	return err
  5942  }
  5943  
  5944  func (o *Object) uploadMultipart(ctx context.Context, src fs.ObjectInfo, in io.Reader, options ...fs.OpenOption) (wantETag, gotETag string, versionID *string, ui uploadInfo, err error) {
  5945  	chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
  5946  		Open:        o.fs,
  5947  		OpenOptions: options,
  5948  	})
  5949  	if err != nil {
  5950  		return wantETag, gotETag, versionID, ui, err
  5951  	}
  5952  
  5953  	var s3cw *s3ChunkWriter = chunkWriter.(*s3ChunkWriter)
  5954  	gotETag = s3cw.eTag
  5955  	versionID = aws.String(s3cw.versionID)
  5956  
  5957  	hashOfHashes := md5.Sum(s3cw.md5s)
  5958  	wantETag = fmt.Sprintf("%s-%d", hex.EncodeToString(hashOfHashes[:]), len(s3cw.completedParts))
  5959  
  5960  	return wantETag, gotETag, versionID, s3cw.ui, nil
  5961  }
  5962  
  5963  // unWrapAwsError unwraps AWS errors, looking for a non AWS error
  5964  //
  5965  // It returns true if one was found and the error, or false and the
  5966  // error passed in.
  5967  func unWrapAwsError(err error) (found bool, outErr error) {
  5968  	if awsErr, ok := err.(awserr.Error); ok {
  5969  		var origErrs []error
  5970  		if batchErr, ok := awsErr.(awserr.BatchedErrors); ok {
  5971  			origErrs = batchErr.OrigErrs()
  5972  		} else {
  5973  			origErrs = []error{awsErr.OrigErr()}
  5974  		}
  5975  		for _, origErr := range origErrs {
  5976  			found, newErr := unWrapAwsError(origErr)
  5977  			if found {
  5978  				return found, newErr
  5979  			}
  5980  		}
  5981  		return false, err
  5982  	}
  5983  	return true, err
  5984  }
  5985  
  5986  // Upload a single part using PutObject
  5987  func (o *Object) uploadSinglepartPutObject(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, lastModified time.Time, versionID *string, err error) {
  5988  	r, resp := o.fs.c.PutObjectRequest(req)
  5989  	if req.ContentLength != nil && *req.ContentLength == 0 {
  5990  		// Can't upload zero length files like this for some reason
  5991  		r.Body = bytes.NewReader([]byte{})
  5992  	} else {
  5993  		r.SetStreamingBody(io.NopCloser(in))
  5994  	}
  5995  	r.SetContext(ctx)
  5996  	r.HTTPRequest.Header.Set("X-Amz-Content-Sha256", "UNSIGNED-PAYLOAD")
  5997  
  5998  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  5999  		err := r.Send()
  6000  		return o.fs.shouldRetry(ctx, err)
  6001  	})
  6002  	if err != nil {
  6003  		// Return the underlying error if we have a
  6004  		// Serialization or RequestError error if possible
  6005  		//
  6006  		// These errors are synthesized locally in the SDK
  6007  		// (not returned from the server) and we'd rather have
  6008  		// the underlying error if there is one.
  6009  		if do, ok := err.(awserr.Error); ok && (do.Code() == request.ErrCodeSerialization || do.Code() == request.ErrCodeRequestError) {
  6010  			if found, newErr := unWrapAwsError(err); found {
  6011  				err = newErr
  6012  			}
  6013  		}
  6014  		return etag, lastModified, nil, err
  6015  	}
  6016  	lastModified = time.Now()
  6017  	if resp != nil {
  6018  		etag = aws.StringValue(resp.ETag)
  6019  		versionID = resp.VersionId
  6020  	}
  6021  	return etag, lastModified, versionID, nil
  6022  }
  6023  
  6024  // Upload a single part using a presigned request
  6025  func (o *Object) uploadSinglepartPresignedRequest(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (etag string, lastModified time.Time, versionID *string, err error) {
  6026  	// Create the request
  6027  	putObj, _ := o.fs.c.PutObjectRequest(req)
  6028  
  6029  	// Sign it so we can upload using a presigned request.
  6030  	//
  6031  	// Note the SDK didn't used to support streaming to
  6032  	// PutObject so we used this work-around.
  6033  	url, headers, err := putObj.PresignRequest(15 * time.Minute)
  6034  	if err != nil {
  6035  		return etag, lastModified, nil, fmt.Errorf("s3 upload: sign request: %w", err)
  6036  	}
  6037  
  6038  	if o.fs.opt.V2Auth && headers == nil {
  6039  		headers = putObj.HTTPRequest.Header
  6040  	}
  6041  
  6042  	// Set request to nil if empty so as not to make chunked encoding
  6043  	if size == 0 {
  6044  		in = nil
  6045  	}
  6046  
  6047  	// create the vanilla http request
  6048  	httpReq, err := http.NewRequestWithContext(ctx, "PUT", url, in)
  6049  	if err != nil {
  6050  		return etag, lastModified, nil, fmt.Errorf("s3 upload: new request: %w", err)
  6051  	}
  6052  
  6053  	// set the headers we signed and the length
  6054  	httpReq.Header = headers
  6055  	httpReq.ContentLength = size
  6056  
  6057  	var resp *http.Response
  6058  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  6059  		var err error
  6060  		resp, err = o.fs.srv.Do(httpReq)
  6061  		if err != nil {
  6062  			return o.fs.shouldRetry(ctx, err)
  6063  		}
  6064  		body, err := rest.ReadBody(resp)
  6065  		if err != nil {
  6066  			return o.fs.shouldRetry(ctx, err)
  6067  		}
  6068  		if resp.StatusCode >= 200 && resp.StatusCode < 299 {
  6069  			return false, nil
  6070  		}
  6071  		err = fmt.Errorf("s3 upload: %s: %s", resp.Status, body)
  6072  		return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
  6073  	})
  6074  	if err != nil {
  6075  		return etag, lastModified, nil, err
  6076  	}
  6077  	if resp != nil {
  6078  		if date, err := http.ParseTime(resp.Header.Get("Date")); err != nil {
  6079  			lastModified = date
  6080  		}
  6081  		etag = resp.Header.Get("Etag")
  6082  		vID := resp.Header.Get("x-amz-version-id")
  6083  		if vID != "" {
  6084  			versionID = &vID
  6085  		}
  6086  	}
  6087  	return etag, lastModified, versionID, nil
  6088  }
  6089  
  6090  // Info needed for an upload
  6091  type uploadInfo struct {
  6092  	req       *s3.PutObjectInput
  6093  	md5sumHex string
  6094  }
  6095  
  6096  // Prepare object for being uploaded
  6097  //
  6098  // If noHash is true the md5sum will not be calculated
  6099  func (o *Object) prepareUpload(ctx context.Context, src fs.ObjectInfo, options []fs.OpenOption, noHash bool) (ui uploadInfo, err error) {
  6100  	bucket, bucketPath := o.split()
  6101  	// Create parent dir/bucket if not saving directory marker
  6102  	if !strings.HasSuffix(o.remote, "/") {
  6103  		err := o.fs.mkdirParent(ctx, o.remote)
  6104  		if err != nil {
  6105  			return ui, err
  6106  		}
  6107  	}
  6108  	modTime := src.ModTime(ctx)
  6109  
  6110  	ui.req = &s3.PutObjectInput{
  6111  		Bucket: &bucket,
  6112  		ACL:    stringPointerOrNil(o.fs.opt.ACL),
  6113  		Key:    &bucketPath,
  6114  	}
  6115  
  6116  	// Fetch metadata if --metadata is in use
  6117  	meta, err := fs.GetMetadataOptions(ctx, o.fs, src, options)
  6118  	if err != nil {
  6119  		return ui, fmt.Errorf("failed to read metadata from source object: %w", err)
  6120  	}
  6121  	ui.req.Metadata = make(map[string]*string, len(meta)+2)
  6122  	// merge metadata into request and user metadata
  6123  	for k, v := range meta {
  6124  		pv := aws.String(v)
  6125  		k = strings.ToLower(k)
  6126  		if o.fs.opt.NoSystemMetadata {
  6127  			ui.req.Metadata[k] = pv
  6128  			continue
  6129  		}
  6130  		switch k {
  6131  		case "cache-control":
  6132  			ui.req.CacheControl = pv
  6133  		case "content-disposition":
  6134  			ui.req.ContentDisposition = pv
  6135  		case "content-encoding":
  6136  			ui.req.ContentEncoding = pv
  6137  		case "content-language":
  6138  			ui.req.ContentLanguage = pv
  6139  		case "content-type":
  6140  			ui.req.ContentType = pv
  6141  		case "x-amz-tagging":
  6142  			ui.req.Tagging = pv
  6143  		case "tier":
  6144  			// ignore
  6145  		case "mtime":
  6146  			// mtime in meta overrides source ModTime
  6147  			metaModTime, err := time.Parse(time.RFC3339Nano, v)
  6148  			if err != nil {
  6149  				fs.Debugf(o, "failed to parse metadata %s: %q: %v", k, v, err)
  6150  			} else {
  6151  				modTime = metaModTime
  6152  			}
  6153  		case "btime":
  6154  			// write as metadata since we can't set it
  6155  			ui.req.Metadata[k] = pv
  6156  		default:
  6157  			ui.req.Metadata[k] = pv
  6158  		}
  6159  	}
  6160  
  6161  	// Set the mtime in the meta data
  6162  	ui.req.Metadata[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
  6163  
  6164  	// read the md5sum if available
  6165  	// - for non multipart
  6166  	//    - so we can add a ContentMD5
  6167  	//    - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C
  6168  	// - for multipart provided checksums aren't disabled
  6169  	//    - so we can add the md5sum in the metadata as metaMD5Hash
  6170  	var md5sumBase64 string
  6171  	size := src.Size()
  6172  	multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
  6173  	if !noHash && (!multipart || !o.fs.opt.DisableChecksum) {
  6174  		ui.md5sumHex, err = src.Hash(ctx, hash.MD5)
  6175  		if err == nil && matchMd5.MatchString(ui.md5sumHex) {
  6176  			hashBytes, err := hex.DecodeString(ui.md5sumHex)
  6177  			if err == nil {
  6178  				md5sumBase64 = base64.StdEncoding.EncodeToString(hashBytes)
  6179  				if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum {
  6180  					// Set the md5sum as metadata on the object if
  6181  					// - a multipart upload
  6182  					// - the Etag is not an MD5, eg when using SSE/SSE-C
  6183  					// provided checksums aren't disabled
  6184  					ui.req.Metadata[metaMD5Hash] = &md5sumBase64
  6185  				}
  6186  			}
  6187  		}
  6188  	}
  6189  
  6190  	// Set the content type if it isn't set already
  6191  	if ui.req.ContentType == nil {
  6192  		ui.req.ContentType = aws.String(fs.MimeType(ctx, src))
  6193  	}
  6194  	if size >= 0 {
  6195  		ui.req.ContentLength = &size
  6196  	}
  6197  	if md5sumBase64 != "" {
  6198  		ui.req.ContentMD5 = &md5sumBase64
  6199  	}
  6200  	if o.fs.opt.RequesterPays {
  6201  		ui.req.RequestPayer = aws.String(s3.RequestPayerRequester)
  6202  	}
  6203  	if o.fs.opt.ServerSideEncryption != "" {
  6204  		ui.req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
  6205  	}
  6206  	if o.fs.opt.SSECustomerAlgorithm != "" {
  6207  		ui.req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
  6208  	}
  6209  	if o.fs.opt.SSECustomerKey != "" {
  6210  		ui.req.SSECustomerKey = &o.fs.opt.SSECustomerKey
  6211  	}
  6212  	if o.fs.opt.SSECustomerKeyMD5 != "" {
  6213  		ui.req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
  6214  	}
  6215  	if o.fs.opt.SSEKMSKeyID != "" {
  6216  		ui.req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
  6217  	}
  6218  	if o.fs.opt.StorageClass != "" {
  6219  		ui.req.StorageClass = &o.fs.opt.StorageClass
  6220  	}
  6221  	// Apply upload options
  6222  	for _, option := range options {
  6223  		key, value := option.Header()
  6224  		lowerKey := strings.ToLower(key)
  6225  		switch lowerKey {
  6226  		case "":
  6227  			// ignore
  6228  		case "cache-control":
  6229  			ui.req.CacheControl = aws.String(value)
  6230  		case "content-disposition":
  6231  			ui.req.ContentDisposition = aws.String(value)
  6232  		case "content-encoding":
  6233  			ui.req.ContentEncoding = aws.String(value)
  6234  		case "content-language":
  6235  			ui.req.ContentLanguage = aws.String(value)
  6236  		case "content-type":
  6237  			ui.req.ContentType = aws.String(value)
  6238  		case "x-amz-tagging":
  6239  			ui.req.Tagging = aws.String(value)
  6240  		default:
  6241  			const amzMetaPrefix = "x-amz-meta-"
  6242  			if strings.HasPrefix(lowerKey, amzMetaPrefix) {
  6243  				metaKey := lowerKey[len(amzMetaPrefix):]
  6244  				ui.req.Metadata[metaKey] = aws.String(value)
  6245  			} else {
  6246  				fs.Errorf(o, "Don't know how to set key %q on upload", key)
  6247  			}
  6248  		}
  6249  	}
  6250  
  6251  	// Check metadata keys and values are valid
  6252  	for key, value := range ui.req.Metadata {
  6253  		if !httpguts.ValidHeaderFieldName(key) {
  6254  			fs.Errorf(o, "Dropping invalid metadata key %q", key)
  6255  			delete(ui.req.Metadata, key)
  6256  		} else if value == nil {
  6257  			fs.Errorf(o, "Dropping nil metadata value for key %q", key)
  6258  			delete(ui.req.Metadata, key)
  6259  		} else if !httpguts.ValidHeaderFieldValue(*value) {
  6260  			fs.Errorf(o, "Dropping invalid metadata value %q for key %q", *value, key)
  6261  			delete(ui.req.Metadata, key)
  6262  		}
  6263  	}
  6264  
  6265  	return ui, nil
  6266  }
  6267  
  6268  // Update the Object from in with modTime and size
  6269  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  6270  	if o.fs.opt.VersionAt.IsSet() {
  6271  		return errNotWithVersionAt
  6272  	}
  6273  	size := src.Size()
  6274  	multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
  6275  
  6276  	var wantETag string        // Multipart upload Etag to check
  6277  	var gotETag string         // Etag we got from the upload
  6278  	var lastModified time.Time // Time we got from the upload
  6279  	var versionID *string      // versionID we got from the upload
  6280  	var err error
  6281  	var ui uploadInfo
  6282  	if multipart {
  6283  		wantETag, gotETag, versionID, ui, err = o.uploadMultipart(ctx, src, in, options...)
  6284  	} else {
  6285  		ui, err = o.prepareUpload(ctx, src, options, false)
  6286  		if err != nil {
  6287  			return fmt.Errorf("failed to prepare upload: %w", err)
  6288  		}
  6289  
  6290  		if o.fs.opt.UsePresignedRequest {
  6291  			gotETag, lastModified, versionID, err = o.uploadSinglepartPresignedRequest(ctx, ui.req, size, in)
  6292  		} else {
  6293  			gotETag, lastModified, versionID, err = o.uploadSinglepartPutObject(ctx, ui.req, size, in)
  6294  		}
  6295  	}
  6296  	if err != nil {
  6297  		return err
  6298  	}
  6299  	// Only record versionID if we are using --s3-versions or --s3-version-at
  6300  	if o.fs.opt.Versions || o.fs.opt.VersionAt.IsSet() {
  6301  		o.versionID = versionID
  6302  	} else {
  6303  		o.versionID = nil
  6304  	}
  6305  
  6306  	// User requested we don't HEAD the object after uploading it
  6307  	// so make up the object as best we can assuming it got
  6308  	// uploaded properly. If size < 0 then we need to do the HEAD.
  6309  	var head *s3.HeadObjectOutput
  6310  	if o.fs.opt.NoHead && size >= 0 {
  6311  		head = new(s3.HeadObjectOutput)
  6312  		//structs.SetFrom(head, &req)
  6313  		setFrom_s3HeadObjectOutput_s3PutObjectInput(head, ui.req)
  6314  		head.ETag = &ui.md5sumHex // doesn't matter quotes are missing
  6315  		head.ContentLength = &size
  6316  		// We get etag back from single and multipart upload so fill it in here
  6317  		if gotETag != "" {
  6318  			head.ETag = &gotETag
  6319  		}
  6320  		if lastModified.IsZero() {
  6321  			lastModified = time.Now()
  6322  		}
  6323  		head.LastModified = &lastModified
  6324  		head.VersionId = versionID
  6325  	} else {
  6326  		// Read the metadata from the newly created object
  6327  		o.meta = nil // wipe old metadata
  6328  		head, err = o.headObject(ctx)
  6329  		if err != nil {
  6330  			return err
  6331  		}
  6332  	}
  6333  	o.setMetaData(head)
  6334  
  6335  	// Check multipart upload ETag if required
  6336  	if o.fs.opt.UseMultipartEtag.Value && !o.fs.etagIsNotMD5 && wantETag != "" && head.ETag != nil && *head.ETag != "" {
  6337  		gotETag := strings.Trim(strings.ToLower(*head.ETag), `"`)
  6338  		if wantETag != gotETag {
  6339  			return fmt.Errorf("multipart upload corrupted: Etag differ: expecting %s but got %s", wantETag, gotETag)
  6340  		}
  6341  		fs.Debugf(o, "Multipart upload Etag: %s OK", wantETag)
  6342  	}
  6343  	return err
  6344  }
  6345  
  6346  // Remove an object
  6347  func (o *Object) Remove(ctx context.Context) error {
  6348  	if o.fs.opt.VersionAt.IsSet() {
  6349  		return errNotWithVersionAt
  6350  	}
  6351  	bucket, bucketPath := o.split()
  6352  	req := s3.DeleteObjectInput{
  6353  		Bucket:    &bucket,
  6354  		Key:       &bucketPath,
  6355  		VersionId: o.versionID,
  6356  	}
  6357  	if o.fs.opt.RequesterPays {
  6358  		req.RequestPayer = aws.String(s3.RequestPayerRequester)
  6359  	}
  6360  	err := o.fs.pacer.Call(func() (bool, error) {
  6361  		_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
  6362  		return o.fs.shouldRetry(ctx, err)
  6363  	})
  6364  	return err
  6365  }
  6366  
  6367  // MimeType of an Object if known, "" otherwise
  6368  func (o *Object) MimeType(ctx context.Context) string {
  6369  	err := o.readMetaData(ctx)
  6370  	if err != nil {
  6371  		fs.Logf(o, "Failed to read metadata: %v", err)
  6372  		return ""
  6373  	}
  6374  	return o.mimeType
  6375  }
  6376  
  6377  // SetTier performs changing storage class
  6378  func (o *Object) SetTier(tier string) (err error) {
  6379  	ctx := context.TODO()
  6380  	tier = strings.ToUpper(tier)
  6381  	bucket, bucketPath := o.split()
  6382  	req := s3.CopyObjectInput{
  6383  		MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
  6384  		StorageClass:      aws.String(tier),
  6385  	}
  6386  	err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o)
  6387  	if err != nil {
  6388  		return err
  6389  	}
  6390  	o.storageClass = &tier
  6391  	return err
  6392  }
  6393  
  6394  // GetTier returns storage class as string
  6395  func (o *Object) GetTier() string {
  6396  	if o.storageClass == nil || *o.storageClass == "" {
  6397  		return "STANDARD"
  6398  	}
  6399  	return *o.storageClass
  6400  }
  6401  
  6402  // Metadata returns metadata for an object
  6403  //
  6404  // It should return nil if there is no Metadata
  6405  func (o *Object) Metadata(ctx context.Context) (metadata fs.Metadata, err error) {
  6406  	err = o.readMetaData(ctx)
  6407  	if err != nil {
  6408  		return nil, err
  6409  	}
  6410  	metadata = make(fs.Metadata, len(o.meta)+7)
  6411  	for k, v := range o.meta {
  6412  		switch k {
  6413  		case metaMtime:
  6414  			if modTime, err := swift.FloatStringToTime(v); err == nil {
  6415  				metadata["mtime"] = modTime.Format(time.RFC3339Nano)
  6416  			}
  6417  		case metaMD5Hash:
  6418  			// don't write hash metadata
  6419  		default:
  6420  			metadata[k] = v
  6421  		}
  6422  	}
  6423  	if o.mimeType != "" {
  6424  		metadata["content-type"] = o.mimeType
  6425  	}
  6426  	// metadata["x-amz-tagging"] = ""
  6427  	if !o.lastModified.IsZero() {
  6428  		metadata["btime"] = o.lastModified.Format(time.RFC3339Nano)
  6429  	}
  6430  
  6431  	// Set system metadata
  6432  	setMetadata := func(k string, v *string) {
  6433  		if o.fs.opt.NoSystemMetadata {
  6434  			return
  6435  		}
  6436  		if v == nil || *v == "" {
  6437  			return
  6438  		}
  6439  		metadata[k] = *v
  6440  	}
  6441  	setMetadata("cache-control", o.cacheControl)
  6442  	setMetadata("content-disposition", o.contentDisposition)
  6443  	setMetadata("content-encoding", o.contentEncoding)
  6444  	setMetadata("content-language", o.contentLanguage)
  6445  	metadata["tier"] = o.GetTier()
  6446  
  6447  	return metadata, nil
  6448  }
  6449  
  6450  // Check the interfaces are satisfied
  6451  var (
  6452  	_ fs.Fs              = &Fs{}
  6453  	_ fs.Purger          = &Fs{}
  6454  	_ fs.Copier          = &Fs{}
  6455  	_ fs.PutStreamer     = &Fs{}
  6456  	_ fs.ListRer         = &Fs{}
  6457  	_ fs.Commander       = &Fs{}
  6458  	_ fs.CleanUpper      = &Fs{}
  6459  	_ fs.OpenChunkWriter = &Fs{}
  6460  	_ fs.Object          = &Object{}
  6461  	_ fs.MimeTyper       = &Object{}
  6462  	_ fs.GetTierer       = &Object{}
  6463  	_ fs.SetTierer       = &Object{}
  6464  	_ fs.Metadataer      = &Object{}
  6465  )