github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/backend/s3/s3.go (about)

     1  // Package s3 provides an interface to Amazon S3 oject storage
     2  package s3
     3  
     4  // FIXME need to prevent anything but ListDir working for s3://
     5  
     6  /*
     7  Progress of port to aws-sdk
     8  
     9   * Don't really need o.meta at all?
    10  
    11  What happens if you CTRL-C a multipart upload
    12    * get an incomplete upload
    13    * disappears when you delete the bucket
    14  */
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"crypto/md5"
    20  	"encoding/base64"
    21  	"encoding/hex"
    22  	"encoding/xml"
    23  	"fmt"
    24  	"io"
    25  	"net/http"
    26  	"net/url"
    27  	"path"
    28  	"regexp"
    29  	"sort"
    30  	"strconv"
    31  	"strings"
    32  	"sync"
    33  	"time"
    34  
    35  	"github.com/aws/aws-sdk-go/aws"
    36  	"github.com/aws/aws-sdk-go/aws/awserr"
    37  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    38  	"github.com/aws/aws-sdk-go/aws/credentials"
    39  	"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
    40  	"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
    41  	"github.com/aws/aws-sdk-go/aws/defaults"
    42  	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    43  	"github.com/aws/aws-sdk-go/aws/request"
    44  	"github.com/aws/aws-sdk-go/aws/session"
    45  	"github.com/aws/aws-sdk-go/service/s3"
    46  	"github.com/ncw/swift"
    47  	"github.com/pkg/errors"
    48  	"github.com/rclone/rclone/fs"
    49  	"github.com/rclone/rclone/fs/config"
    50  	"github.com/rclone/rclone/fs/config/configmap"
    51  	"github.com/rclone/rclone/fs/config/configstruct"
    52  	"github.com/rclone/rclone/fs/fserrors"
    53  	"github.com/rclone/rclone/fs/fshttp"
    54  	"github.com/rclone/rclone/fs/hash"
    55  	"github.com/rclone/rclone/fs/walk"
    56  	"github.com/rclone/rclone/lib/bucket"
    57  	"github.com/rclone/rclone/lib/encoder"
    58  	"github.com/rclone/rclone/lib/pacer"
    59  	"github.com/rclone/rclone/lib/readers"
    60  	"github.com/rclone/rclone/lib/rest"
    61  	"golang.org/x/sync/errgroup"
    62  )
    63  
    64  // Register with Fs
    65  func init() {
    66  	fs.Register(&fs.RegInfo{
    67  		Name:        "s3",
    68  		Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
    69  		NewFs:       NewFs,
    70  		Options: []fs.Option{{
    71  			Name: fs.ConfigProvider,
    72  			Help: "Choose your S3 provider.",
    73  			Examples: []fs.OptionExample{{
    74  				Value: "AWS",
    75  				Help:  "Amazon Web Services (AWS) S3",
    76  			}, {
    77  				Value: "Alibaba",
    78  				Help:  "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
    79  			}, {
    80  				Value: "Ceph",
    81  				Help:  "Ceph Object Storage",
    82  			}, {
    83  				Value: "DigitalOcean",
    84  				Help:  "Digital Ocean Spaces",
    85  			}, {
    86  				Value: "Dreamhost",
    87  				Help:  "Dreamhost DreamObjects",
    88  			}, {
    89  				Value: "IBMCOS",
    90  				Help:  "IBM COS S3",
    91  			}, {
    92  				Value: "Minio",
    93  				Help:  "Minio Object Storage",
    94  			}, {
    95  				Value: "Netease",
    96  				Help:  "Netease Object Storage (NOS)",
    97  			}, {
    98  				Value: "StackPath",
    99  				Help:  "StackPath Object Storage",
   100  			}, {
   101  				Value: "Wasabi",
   102  				Help:  "Wasabi Object Storage",
   103  			}, {
   104  				Value: "Other",
   105  				Help:  "Any other S3 compatible provider",
   106  			}},
   107  		}, {
   108  			Name:    "env_auth",
   109  			Help:    "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
   110  			Default: false,
   111  			Examples: []fs.OptionExample{{
   112  				Value: "false",
   113  				Help:  "Enter AWS credentials in the next step",
   114  			}, {
   115  				Value: "true",
   116  				Help:  "Get AWS credentials from the environment (env vars or IAM)",
   117  			}},
   118  		}, {
   119  			Name: "access_key_id",
   120  			Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
   121  		}, {
   122  			Name: "secret_access_key",
   123  			Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
   124  		}, {
   125  			Name:     "region",
   126  			Help:     "Region to connect to.",
   127  			Provider: "AWS",
   128  			Examples: []fs.OptionExample{{
   129  				Value: "us-east-1",
   130  				Help:  "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
   131  			}, {
   132  				Value: "us-east-2",
   133  				Help:  "US East (Ohio) Region\nNeeds location constraint us-east-2.",
   134  			}, {
   135  				Value: "us-west-2",
   136  				Help:  "US West (Oregon) Region\nNeeds location constraint us-west-2.",
   137  			}, {
   138  				Value: "us-west-1",
   139  				Help:  "US West (Northern California) Region\nNeeds location constraint us-west-1.",
   140  			}, {
   141  				Value: "ca-central-1",
   142  				Help:  "Canada (Central) Region\nNeeds location constraint ca-central-1.",
   143  			}, {
   144  				Value: "eu-west-1",
   145  				Help:  "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
   146  			}, {
   147  				Value: "eu-west-2",
   148  				Help:  "EU (London) Region\nNeeds location constraint eu-west-2.",
   149  			}, {
   150  				Value: "eu-north-1",
   151  				Help:  "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
   152  			}, {
   153  				Value: "eu-central-1",
   154  				Help:  "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
   155  			}, {
   156  				Value: "ap-southeast-1",
   157  				Help:  "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
   158  			}, {
   159  				Value: "ap-southeast-2",
   160  				Help:  "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
   161  			}, {
   162  				Value: "ap-northeast-1",
   163  				Help:  "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
   164  			}, {
   165  				Value: "ap-northeast-2",
   166  				Help:  "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
   167  			}, {
   168  				Value: "ap-south-1",
   169  				Help:  "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
   170  			}, {
   171  				Value: "ap-east-1",
   172  				Help:  "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
   173  			}, {
   174  				Value: "sa-east-1",
   175  				Help:  "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
   176  			}},
   177  		}, {
   178  			Name:     "region",
   179  			Help:     "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
   180  			Provider: "!AWS,Alibaba",
   181  			Examples: []fs.OptionExample{{
   182  				Value: "",
   183  				Help:  "Use this if unsure. Will use v4 signatures and an empty region.",
   184  			}, {
   185  				Value: "other-v2-signature",
   186  				Help:  "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
   187  			}},
   188  		}, {
   189  			Name:     "endpoint",
   190  			Help:     "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
   191  			Provider: "AWS",
   192  		}, {
   193  			Name:     "endpoint",
   194  			Help:     "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
   195  			Provider: "IBMCOS",
   196  			Examples: []fs.OptionExample{{
   197  				Value: "s3-api.us-geo.objectstorage.softlayer.net",
   198  				Help:  "US Cross Region Endpoint",
   199  			}, {
   200  				Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
   201  				Help:  "US Cross Region Dallas Endpoint",
   202  			}, {
   203  				Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
   204  				Help:  "US Cross Region Washington DC Endpoint",
   205  			}, {
   206  				Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
   207  				Help:  "US Cross Region San Jose Endpoint",
   208  			}, {
   209  				Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
   210  				Help:  "US Cross Region Private Endpoint",
   211  			}, {
   212  				Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
   213  				Help:  "US Cross Region Dallas Private Endpoint",
   214  			}, {
   215  				Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
   216  				Help:  "US Cross Region Washington DC Private Endpoint",
   217  			}, {
   218  				Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
   219  				Help:  "US Cross Region San Jose Private Endpoint",
   220  			}, {
   221  				Value: "s3.us-east.objectstorage.softlayer.net",
   222  				Help:  "US Region East Endpoint",
   223  			}, {
   224  				Value: "s3.us-east.objectstorage.service.networklayer.com",
   225  				Help:  "US Region East Private Endpoint",
   226  			}, {
   227  				Value: "s3.us-south.objectstorage.softlayer.net",
   228  				Help:  "US Region South Endpoint",
   229  			}, {
   230  				Value: "s3.us-south.objectstorage.service.networklayer.com",
   231  				Help:  "US Region South Private Endpoint",
   232  			}, {
   233  				Value: "s3.eu-geo.objectstorage.softlayer.net",
   234  				Help:  "EU Cross Region Endpoint",
   235  			}, {
   236  				Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
   237  				Help:  "EU Cross Region Frankfurt Endpoint",
   238  			}, {
   239  				Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
   240  				Help:  "EU Cross Region Milan Endpoint",
   241  			}, {
   242  				Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
   243  				Help:  "EU Cross Region Amsterdam Endpoint",
   244  			}, {
   245  				Value: "s3.eu-geo.objectstorage.service.networklayer.com",
   246  				Help:  "EU Cross Region Private Endpoint",
   247  			}, {
   248  				Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
   249  				Help:  "EU Cross Region Frankfurt Private Endpoint",
   250  			}, {
   251  				Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
   252  				Help:  "EU Cross Region Milan Private Endpoint",
   253  			}, {
   254  				Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
   255  				Help:  "EU Cross Region Amsterdam Private Endpoint",
   256  			}, {
   257  				Value: "s3.eu-gb.objectstorage.softlayer.net",
   258  				Help:  "Great Britain Endpoint",
   259  			}, {
   260  				Value: "s3.eu-gb.objectstorage.service.networklayer.com",
   261  				Help:  "Great Britain Private Endpoint",
   262  			}, {
   263  				Value: "s3.ap-geo.objectstorage.softlayer.net",
   264  				Help:  "APAC Cross Regional Endpoint",
   265  			}, {
   266  				Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
   267  				Help:  "APAC Cross Regional Tokyo Endpoint",
   268  			}, {
   269  				Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
   270  				Help:  "APAC Cross Regional HongKong Endpoint",
   271  			}, {
   272  				Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
   273  				Help:  "APAC Cross Regional Seoul Endpoint",
   274  			}, {
   275  				Value: "s3.ap-geo.objectstorage.service.networklayer.com",
   276  				Help:  "APAC Cross Regional Private Endpoint",
   277  			}, {
   278  				Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
   279  				Help:  "APAC Cross Regional Tokyo Private Endpoint",
   280  			}, {
   281  				Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
   282  				Help:  "APAC Cross Regional HongKong Private Endpoint",
   283  			}, {
   284  				Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
   285  				Help:  "APAC Cross Regional Seoul Private Endpoint",
   286  			}, {
   287  				Value: "s3.mel01.objectstorage.softlayer.net",
   288  				Help:  "Melbourne Single Site Endpoint",
   289  			}, {
   290  				Value: "s3.mel01.objectstorage.service.networklayer.com",
   291  				Help:  "Melbourne Single Site Private Endpoint",
   292  			}, {
   293  				Value: "s3.tor01.objectstorage.softlayer.net",
   294  				Help:  "Toronto Single Site Endpoint",
   295  			}, {
   296  				Value: "s3.tor01.objectstorage.service.networklayer.com",
   297  				Help:  "Toronto Single Site Private Endpoint",
   298  			}},
   299  		}, {
   300  			// oss endpoints: https://help.aliyun.com/document_detail/31837.html
   301  			Name:     "endpoint",
   302  			Help:     "Endpoint for OSS API.",
   303  			Provider: "Alibaba",
   304  			Examples: []fs.OptionExample{{
   305  				Value: "oss-cn-hangzhou.aliyuncs.com",
   306  				Help:  "East China 1 (Hangzhou)",
   307  			}, {
   308  				Value: "oss-cn-shanghai.aliyuncs.com",
   309  				Help:  "East China 2 (Shanghai)",
   310  			}, {
   311  				Value: "oss-cn-qingdao.aliyuncs.com",
   312  				Help:  "North China 1 (Qingdao)",
   313  			}, {
   314  				Value: "oss-cn-beijing.aliyuncs.com",
   315  				Help:  "North China 2 (Beijing)",
   316  			}, {
   317  				Value: "oss-cn-zhangjiakou.aliyuncs.com",
   318  				Help:  "North China 3 (Zhangjiakou)",
   319  			}, {
   320  				Value: "oss-cn-huhehaote.aliyuncs.com",
   321  				Help:  "North China 5 (Huhehaote)",
   322  			}, {
   323  				Value: "oss-cn-shenzhen.aliyuncs.com",
   324  				Help:  "South China 1 (Shenzhen)",
   325  			}, {
   326  				Value: "oss-cn-hongkong.aliyuncs.com",
   327  				Help:  "Hong Kong (Hong Kong)",
   328  			}, {
   329  				Value: "oss-us-west-1.aliyuncs.com",
   330  				Help:  "US West 1 (Silicon Valley)",
   331  			}, {
   332  				Value: "oss-us-east-1.aliyuncs.com",
   333  				Help:  "US East 1 (Virginia)",
   334  			}, {
   335  				Value: "oss-ap-southeast-1.aliyuncs.com",
   336  				Help:  "Southeast Asia Southeast 1 (Singapore)",
   337  			}, {
   338  				Value: "oss-ap-southeast-2.aliyuncs.com",
   339  				Help:  "Asia Pacific Southeast 2 (Sydney)",
   340  			}, {
   341  				Value: "oss-ap-southeast-3.aliyuncs.com",
   342  				Help:  "Southeast Asia Southeast 3 (Kuala Lumpur)",
   343  			}, {
   344  				Value: "oss-ap-southeast-5.aliyuncs.com",
   345  				Help:  "Asia Pacific Southeast 5 (Jakarta)",
   346  			}, {
   347  				Value: "oss-ap-northeast-1.aliyuncs.com",
   348  				Help:  "Asia Pacific Northeast 1 (Japan)",
   349  			}, {
   350  				Value: "oss-ap-south-1.aliyuncs.com",
   351  				Help:  "Asia Pacific South 1 (Mumbai)",
   352  			}, {
   353  				Value: "oss-eu-central-1.aliyuncs.com",
   354  				Help:  "Central Europe 1 (Frankfurt)",
   355  			}, {
   356  				Value: "oss-eu-west-1.aliyuncs.com",
   357  				Help:  "West Europe (London)",
   358  			}, {
   359  				Value: "oss-me-east-1.aliyuncs.com",
   360  				Help:  "Middle East 1 (Dubai)",
   361  			}},
   362  		}, {
   363  			Name:     "endpoint",
   364  			Help:     "Endpoint for StackPath Object Storage.",
   365  			Provider: "StackPath",
   366  			Examples: []fs.OptionExample{{
   367  				Value: "s3.us-east-2.stackpathstorage.com",
   368  				Help:  "US East Endpoint",
   369  			}, {
   370  				Value: "s3.us-west-1.stackpathstorage.com",
   371  				Help:  "US West Endpoint",
   372  			}, {
   373  				Value: "s3.eu-central-1.stackpathstorage.com",
   374  				Help:  "EU Endpoint",
   375  			}},
   376  		}, {
   377  			Name:     "endpoint",
   378  			Help:     "Endpoint for S3 API.\nRequired when using an S3 clone.",
   379  			Provider: "!AWS,IBMCOS,Alibaba,StackPath",
   380  			Examples: []fs.OptionExample{{
   381  				Value:    "objects-us-east-1.dream.io",
   382  				Help:     "Dream Objects endpoint",
   383  				Provider: "Dreamhost",
   384  			}, {
   385  				Value:    "nyc3.digitaloceanspaces.com",
   386  				Help:     "Digital Ocean Spaces New York 3",
   387  				Provider: "DigitalOcean",
   388  			}, {
   389  				Value:    "ams3.digitaloceanspaces.com",
   390  				Help:     "Digital Ocean Spaces Amsterdam 3",
   391  				Provider: "DigitalOcean",
   392  			}, {
   393  				Value:    "sgp1.digitaloceanspaces.com",
   394  				Help:     "Digital Ocean Spaces Singapore 1",
   395  				Provider: "DigitalOcean",
   396  			}, {
   397  				Value:    "s3.wasabisys.com",
   398  				Help:     "Wasabi US East endpoint",
   399  				Provider: "Wasabi",
   400  			}, {
   401  				Value:    "s3.us-west-1.wasabisys.com",
   402  				Help:     "Wasabi US West endpoint",
   403  				Provider: "Wasabi",
   404  			}, {
   405  				Value:    "s3.eu-central-1.wasabisys.com",
   406  				Help:     "Wasabi EU Central endpoint",
   407  				Provider: "Wasabi",
   408  			}},
   409  		}, {
   410  			Name:     "location_constraint",
   411  			Help:     "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
   412  			Provider: "AWS",
   413  			Examples: []fs.OptionExample{{
   414  				Value: "",
   415  				Help:  "Empty for US Region, Northern Virginia or Pacific Northwest.",
   416  			}, {
   417  				Value: "us-east-2",
   418  				Help:  "US East (Ohio) Region.",
   419  			}, {
   420  				Value: "us-west-2",
   421  				Help:  "US West (Oregon) Region.",
   422  			}, {
   423  				Value: "us-west-1",
   424  				Help:  "US West (Northern California) Region.",
   425  			}, {
   426  				Value: "ca-central-1",
   427  				Help:  "Canada (Central) Region.",
   428  			}, {
   429  				Value: "eu-west-1",
   430  				Help:  "EU (Ireland) Region.",
   431  			}, {
   432  				Value: "eu-west-2",
   433  				Help:  "EU (London) Region.",
   434  			}, {
   435  				Value: "eu-north-1",
   436  				Help:  "EU (Stockholm) Region.",
   437  			}, {
   438  				Value: "EU",
   439  				Help:  "EU Region.",
   440  			}, {
   441  				Value: "ap-southeast-1",
   442  				Help:  "Asia Pacific (Singapore) Region.",
   443  			}, {
   444  				Value: "ap-southeast-2",
   445  				Help:  "Asia Pacific (Sydney) Region.",
   446  			}, {
   447  				Value: "ap-northeast-1",
   448  				Help:  "Asia Pacific (Tokyo) Region.",
   449  			}, {
   450  				Value: "ap-northeast-2",
   451  				Help:  "Asia Pacific (Seoul)",
   452  			}, {
   453  				Value: "ap-south-1",
   454  				Help:  "Asia Pacific (Mumbai)",
   455  			}, {
   456  				Value: "ap-east-1",
   457  				Help:  "Asia Pacific (Hong Kong)",
   458  			}, {
   459  				Value: "sa-east-1",
   460  				Help:  "South America (Sao Paulo) Region.",
   461  			}},
   462  		}, {
   463  			Name:     "location_constraint",
   464  			Help:     "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
   465  			Provider: "IBMCOS",
   466  			Examples: []fs.OptionExample{{
   467  				Value: "us-standard",
   468  				Help:  "US Cross Region Standard",
   469  			}, {
   470  				Value: "us-vault",
   471  				Help:  "US Cross Region Vault",
   472  			}, {
   473  				Value: "us-cold",
   474  				Help:  "US Cross Region Cold",
   475  			}, {
   476  				Value: "us-flex",
   477  				Help:  "US Cross Region Flex",
   478  			}, {
   479  				Value: "us-east-standard",
   480  				Help:  "US East Region Standard",
   481  			}, {
   482  				Value: "us-east-vault",
   483  				Help:  "US East Region Vault",
   484  			}, {
   485  				Value: "us-east-cold",
   486  				Help:  "US East Region Cold",
   487  			}, {
   488  				Value: "us-east-flex",
   489  				Help:  "US East Region Flex",
   490  			}, {
   491  				Value: "us-south-standard",
   492  				Help:  "US South Region Standard",
   493  			}, {
   494  				Value: "us-south-vault",
   495  				Help:  "US South Region Vault",
   496  			}, {
   497  				Value: "us-south-cold",
   498  				Help:  "US South Region Cold",
   499  			}, {
   500  				Value: "us-south-flex",
   501  				Help:  "US South Region Flex",
   502  			}, {
   503  				Value: "eu-standard",
   504  				Help:  "EU Cross Region Standard",
   505  			}, {
   506  				Value: "eu-vault",
   507  				Help:  "EU Cross Region Vault",
   508  			}, {
   509  				Value: "eu-cold",
   510  				Help:  "EU Cross Region Cold",
   511  			}, {
   512  				Value: "eu-flex",
   513  				Help:  "EU Cross Region Flex",
   514  			}, {
   515  				Value: "eu-gb-standard",
   516  				Help:  "Great Britain Standard",
   517  			}, {
   518  				Value: "eu-gb-vault",
   519  				Help:  "Great Britain Vault",
   520  			}, {
   521  				Value: "eu-gb-cold",
   522  				Help:  "Great Britain Cold",
   523  			}, {
   524  				Value: "eu-gb-flex",
   525  				Help:  "Great Britain Flex",
   526  			}, {
   527  				Value: "ap-standard",
   528  				Help:  "APAC Standard",
   529  			}, {
   530  				Value: "ap-vault",
   531  				Help:  "APAC Vault",
   532  			}, {
   533  				Value: "ap-cold",
   534  				Help:  "APAC Cold",
   535  			}, {
   536  				Value: "ap-flex",
   537  				Help:  "APAC Flex",
   538  			}, {
   539  				Value: "mel01-standard",
   540  				Help:  "Melbourne Standard",
   541  			}, {
   542  				Value: "mel01-vault",
   543  				Help:  "Melbourne Vault",
   544  			}, {
   545  				Value: "mel01-cold",
   546  				Help:  "Melbourne Cold",
   547  			}, {
   548  				Value: "mel01-flex",
   549  				Help:  "Melbourne Flex",
   550  			}, {
   551  				Value: "tor01-standard",
   552  				Help:  "Toronto Standard",
   553  			}, {
   554  				Value: "tor01-vault",
   555  				Help:  "Toronto Vault",
   556  			}, {
   557  				Value: "tor01-cold",
   558  				Help:  "Toronto Cold",
   559  			}, {
   560  				Value: "tor01-flex",
   561  				Help:  "Toronto Flex",
   562  			}},
   563  		}, {
   564  			Name:     "location_constraint",
   565  			Help:     "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
   566  			Provider: "!AWS,IBMCOS,Alibaba,StackPath",
   567  		}, {
   568  			Name: "acl",
   569  			Help: `Canned ACL used when creating buckets and storing or copying objects.
   570  
   571  This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
   572  
   573  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
   574  
   575  Note that this ACL is applied when server side copying objects as S3
   576  doesn't copy the ACL from the source but rather writes a fresh one.`,
   577  			Examples: []fs.OptionExample{{
   578  				Value:    "private",
   579  				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default).",
   580  				Provider: "!IBMCOS",
   581  			}, {
   582  				Value:    "public-read",
   583  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
   584  				Provider: "!IBMCOS",
   585  			}, {
   586  				Value:    "public-read-write",
   587  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
   588  				Provider: "!IBMCOS",
   589  			}, {
   590  				Value:    "authenticated-read",
   591  				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
   592  				Provider: "!IBMCOS",
   593  			}, {
   594  				Value:    "bucket-owner-read",
   595  				Help:     "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
   596  				Provider: "!IBMCOS",
   597  			}, {
   598  				Value:    "bucket-owner-full-control",
   599  				Help:     "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
   600  				Provider: "!IBMCOS",
   601  			}, {
   602  				Value:    "private",
   603  				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
   604  				Provider: "IBMCOS",
   605  			}, {
   606  				Value:    "public-read",
   607  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
   608  				Provider: "IBMCOS",
   609  			}, {
   610  				Value:    "public-read-write",
   611  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
   612  				Provider: "IBMCOS",
   613  			}, {
   614  				Value:    "authenticated-read",
   615  				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
   616  				Provider: "IBMCOS",
   617  			}},
   618  		}, {
   619  			Name: "bucket_acl",
   620  			Help: `Canned ACL used when creating buckets.
   621  
   622  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
   623  
   624  Note that this ACL is applied when only when creating buckets.  If it
   625  isn't set then "acl" is used instead.`,
   626  			Advanced: true,
   627  			Examples: []fs.OptionExample{{
   628  				Value: "private",
   629  				Help:  "Owner gets FULL_CONTROL. No one else has access rights (default).",
   630  			}, {
   631  				Value: "public-read",
   632  				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
   633  			}, {
   634  				Value: "public-read-write",
   635  				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
   636  			}, {
   637  				Value: "authenticated-read",
   638  				Help:  "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
   639  			}},
   640  		}, {
   641  			Name:     "server_side_encryption",
   642  			Help:     "The server-side encryption algorithm used when storing this object in S3.",
   643  			Provider: "AWS",
   644  			Examples: []fs.OptionExample{{
   645  				Value: "",
   646  				Help:  "None",
   647  			}, {
   648  				Value: "AES256",
   649  				Help:  "AES256",
   650  			}, {
   651  				Value: "aws:kms",
   652  				Help:  "aws:kms",
   653  			}},
   654  		}, {
   655  			Name:     "sse_kms_key_id",
   656  			Help:     "If using KMS ID you must provide the ARN of Key.",
   657  			Provider: "AWS",
   658  			Examples: []fs.OptionExample{{
   659  				Value: "",
   660  				Help:  "None",
   661  			}, {
   662  				Value: "arn:aws:kms:us-east-1:*",
   663  				Help:  "arn:aws:kms:*",
   664  			}},
   665  		}, {
   666  			Name:     "storage_class",
   667  			Help:     "The storage class to use when storing new objects in S3.",
   668  			Provider: "AWS",
   669  			Examples: []fs.OptionExample{{
   670  				Value: "",
   671  				Help:  "Default",
   672  			}, {
   673  				Value: "STANDARD",
   674  				Help:  "Standard storage class",
   675  			}, {
   676  				Value: "REDUCED_REDUNDANCY",
   677  				Help:  "Reduced redundancy storage class",
   678  			}, {
   679  				Value: "STANDARD_IA",
   680  				Help:  "Standard Infrequent Access storage class",
   681  			}, {
   682  				Value: "ONEZONE_IA",
   683  				Help:  "One Zone Infrequent Access storage class",
   684  			}, {
   685  				Value: "GLACIER",
   686  				Help:  "Glacier storage class",
   687  			}, {
   688  				Value: "DEEP_ARCHIVE",
   689  				Help:  "Glacier Deep Archive storage class",
   690  			}, {
   691  				Value: "INTELLIGENT_TIERING",
   692  				Help:  "Intelligent-Tiering storage class",
   693  			}},
   694  		}, {
   695  			// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
   696  			Name:     "storage_class",
   697  			Help:     "The storage class to use when storing new objects in OSS.",
   698  			Provider: "Alibaba",
   699  			Examples: []fs.OptionExample{{
   700  				Value: "",
   701  				Help:  "Default",
   702  			}, {
   703  				Value: "STANDARD",
   704  				Help:  "Standard storage class",
   705  			}, {
   706  				Value: "GLACIER",
   707  				Help:  "Archive storage mode.",
   708  			}, {
   709  				Value: "STANDARD_IA",
   710  				Help:  "Infrequent access storage mode.",
   711  			}},
   712  		}, {
   713  			Name: "upload_cutoff",
   714  			Help: `Cutoff for switching to chunked upload
   715  
   716  Any files larger than this will be uploaded in chunks of chunk_size.
   717  The minimum is 0 and the maximum is 5GB.`,
   718  			Default:  defaultUploadCutoff,
   719  			Advanced: true,
   720  		}, {
   721  			Name: "chunk_size",
   722  			Help: `Chunk size to use for uploading.
   723  
   724  When uploading files larger than upload_cutoff or files with unknown
   725  size (eg from "rclone rcat" or uploaded with "rclone mount" or google
   726  photos or google docs) they will be uploaded as multipart uploads
   727  using this chunk size.
   728  
   729  Note that "--s3-upload-concurrency" chunks of this size are buffered
   730  in memory per transfer.
   731  
   732  If you are transferring large files over high speed links and you have
   733  enough memory, then increasing this will speed up the transfers.
   734  
   735  Rclone will automatically increase the chunk size when uploading a
   736  large file of known size to stay below the 10,000 chunks limit.
   737  
   738  Files of unknown size are uploaded with the configured
   739  chunk_size. Since the default chunk size is 5MB and there can be at
   740  most 10,000 chunks, this means that by default the maximum size of
   741  file you can stream upload is 48GB.  If you wish to stream upload
   742  larger files then you will need to increase chunk_size.`,
   743  			Default:  minChunkSize,
   744  			Advanced: true,
   745  		}, {
   746  			Name: "copy_cutoff",
   747  			Help: `Cutoff for switching to multipart copy
   748  
   749  Any files larger than this that need to be server side copied will be
   750  copied in chunks of this size.
   751  
   752  The minimum is 0 and the maximum is 5GB.`,
   753  			Default:  fs.SizeSuffix(maxSizeForCopy),
   754  			Advanced: true,
   755  		}, {
   756  			Name:     "disable_checksum",
   757  			Help:     "Don't store MD5 checksum with object metadata",
   758  			Default:  false,
   759  			Advanced: true,
   760  		}, {
   761  			Name:     "session_token",
   762  			Help:     "An AWS session token",
   763  			Advanced: true,
   764  		}, {
   765  			Name: "upload_concurrency",
   766  			Help: `Concurrency for multipart uploads.
   767  
   768  This is the number of chunks of the same file that are uploaded
   769  concurrently.
   770  
   771  If you are uploading small numbers of large file over high speed link
   772  and these uploads do not fully utilize your bandwidth, then increasing
   773  this may help to speed up the transfers.`,
   774  			Default:  4,
   775  			Advanced: true,
   776  		}, {
   777  			Name: "force_path_style",
   778  			Help: `If true use path style access if false use virtual hosted style.
   779  
   780  If this is true (the default) then rclone will use path style access,
   781  if false then rclone will use virtual path style. See [the AWS S3
   782  docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
   783  for more info.
   784  
   785  Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
   786  false - rclone will do this automatically based on the provider
   787  setting.`,
   788  			Default:  true,
   789  			Advanced: true,
   790  		}, {
   791  			Name: "v2_auth",
   792  			Help: `If true use v2 authentication.
   793  
   794  If this is false (the default) then rclone will use v4 authentication.
   795  If it is set then rclone will use v2 authentication.
   796  
   797  Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
   798  			Default:  false,
   799  			Advanced: true,
   800  		}, {
   801  			Name:     "use_accelerate_endpoint",
   802  			Provider: "AWS",
   803  			Help: `If true use the AWS S3 accelerated endpoint.
   804  
   805  See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
   806  			Default:  false,
   807  			Advanced: true,
   808  		}, {
   809  			Name:     "leave_parts_on_error",
   810  			Provider: "AWS",
   811  			Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
   812  
   813  It should be set to true for resuming uploads across different sessions.
   814  
   815  WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
   816  `,
   817  			Default:  false,
   818  			Advanced: true,
   819  		}, {
   820  			Name: "list_chunk",
   821  			Help: `Size of listing chunk (response list for each ListObject S3 request).
   822  
   823  This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification.
   824  Most services truncate the response list to 1000 objects even if requested more than that.
   825  In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
   826  In Ceph, this can be increased with the "rgw list buckets max chunk" option.
   827  `,
   828  			Default:  1000,
   829  			Advanced: true,
   830  		}, {
   831  			Name:     config.ConfigEncoding,
   832  			Help:     config.ConfigEncodingHelp,
   833  			Advanced: true,
   834  			// Any UTF-8 character is valid in a key, however it can't handle
   835  			// invalid UTF-8 and / have a special meaning.
   836  			//
   837  			// The SDK can't seem to handle uploading files called '.'
   838  			//
   839  			// FIXME would be nice to add
   840  			// - initial / encoding
   841  			// - doubled / encoding
   842  			// - trailing / encoding
   843  			// so that AWS keys are always valid file names
   844  			Default: (encoder.EncodeInvalidUtf8 |
   845  				encoder.EncodeSlash |
   846  				encoder.EncodeDot),
   847  		}},
   848  	})
   849  }
   850  
   851  // Constants
   852  const (
   853  	metaMtime           = "Mtime"                // the meta key to store mtime in - eg X-Amz-Meta-Mtime
   854  	metaMD5Hash         = "Md5chksum"            // the meta key to store md5hash in
   855  	maxRetries          = 10                     // number of retries to make of operations
   856  	maxSizeForCopy      = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
   857  	maxUploadParts      = 10000                  // maximum allowed number of parts in a multi-part upload
   858  	minChunkSize        = fs.SizeSuffix(1024 * 1024 * 5)
   859  	defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
   860  	maxUploadCutoff     = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
   861  	minSleep            = 10 * time.Millisecond // In case of error, start at 10ms sleep.
   862  )
   863  
   864  // Options defines the configuration for this backend
   865  type Options struct {
   866  	Provider              string               `config:"provider"`
   867  	EnvAuth               bool                 `config:"env_auth"`
   868  	AccessKeyID           string               `config:"access_key_id"`
   869  	SecretAccessKey       string               `config:"secret_access_key"`
   870  	Region                string               `config:"region"`
   871  	Endpoint              string               `config:"endpoint"`
   872  	LocationConstraint    string               `config:"location_constraint"`
   873  	ACL                   string               `config:"acl"`
   874  	BucketACL             string               `config:"bucket_acl"`
   875  	ServerSideEncryption  string               `config:"server_side_encryption"`
   876  	SSEKMSKeyID           string               `config:"sse_kms_key_id"`
   877  	StorageClass          string               `config:"storage_class"`
   878  	UploadCutoff          fs.SizeSuffix        `config:"upload_cutoff"`
   879  	CopyCutoff            fs.SizeSuffix        `config:"copy_cutoff"`
   880  	ChunkSize             fs.SizeSuffix        `config:"chunk_size"`
   881  	DisableChecksum       bool                 `config:"disable_checksum"`
   882  	SessionToken          string               `config:"session_token"`
   883  	UploadConcurrency     int                  `config:"upload_concurrency"`
   884  	ForcePathStyle        bool                 `config:"force_path_style"`
   885  	V2Auth                bool                 `config:"v2_auth"`
   886  	UseAccelerateEndpoint bool                 `config:"use_accelerate_endpoint"`
   887  	LeavePartsOnError     bool                 `config:"leave_parts_on_error"`
   888  	ListChunk             int64                `config:"list_chunk"`
   889  	Enc                   encoder.MultiEncoder `config:"encoding"`
   890  }
   891  
   892  // Fs represents a remote s3 server
   893  type Fs struct {
   894  	name          string           // the name of the remote
   895  	root          string           // root of the bucket - ignore all objects above this
   896  	opt           Options          // parsed options
   897  	features      *fs.Features     // optional features
   898  	c             *s3.S3           // the connection to the s3 server
   899  	ses           *session.Session // the s3 session
   900  	rootBucket    string           // bucket part of root (if any)
   901  	rootDirectory string           // directory part of root (if any)
   902  	cache         *bucket.Cache    // cache for bucket creation status
   903  	pacer         *fs.Pacer        // To pace the API calls
   904  	srv           *http.Client     // a plain http client
   905  }
   906  
   907  // Object describes a s3 object
   908  type Object struct {
   909  	// Will definitely have everything but meta which may be nil
   910  	//
   911  	// List will read everything but meta & mimeType - to fill
   912  	// that in you need to call readMetaData
   913  	fs           *Fs                // what this object is part of
   914  	remote       string             // The remote path
   915  	etag         string             // md5sum of the object
   916  	bytes        int64              // size of the object
   917  	lastModified time.Time          // Last modified
   918  	meta         map[string]*string // The object metadata if known - may be nil
   919  	mimeType     string             // MimeType of object - may be ""
   920  	storageClass string             // eg GLACIER
   921  }
   922  
   923  // ------------------------------------------------------------
   924  
   925  // Name of the remote (as passed into NewFs)
   926  func (f *Fs) Name() string {
   927  	return f.name
   928  }
   929  
   930  // Root of the remote (as passed into NewFs)
   931  func (f *Fs) Root() string {
   932  	return f.root
   933  }
   934  
   935  // String converts this Fs to a string
   936  func (f *Fs) String() string {
   937  	if f.rootBucket == "" {
   938  		return fmt.Sprintf("S3 root")
   939  	}
   940  	if f.rootDirectory == "" {
   941  		return fmt.Sprintf("S3 bucket %s", f.rootBucket)
   942  	}
   943  	return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory)
   944  }
   945  
   946  // Features returns the optional features of this Fs
   947  func (f *Fs) Features() *fs.Features {
   948  	return f.features
   949  }
   950  
   951  // retryErrorCodes is a slice of error codes that we will retry
   952  // See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
   953  var retryErrorCodes = []int{
   954  	// 409, // Conflict - various states that could be resolved on a retry
   955  	503, // Service Unavailable/Slow Down - "Reduce your request rate"
   956  }
   957  
   958  //S3 is pretty resilient, and the built in retry handling is probably sufficient
   959  // as it should notice closed connections and timeouts which are the most likely
   960  // sort of failure modes
   961  func (f *Fs) shouldRetry(err error) (bool, error) {
   962  	// If this is an awserr object, try and extract more useful information to determine if we should retry
   963  	if awsError, ok := err.(awserr.Error); ok {
   964  		// Simple case, check the original embedded error in case it's generically retryable
   965  		if fserrors.ShouldRetry(awsError.OrigErr()) {
   966  			return true, err
   967  		}
   968  		// Failing that, if it's a RequestFailure it's probably got an http status code we can check
   969  		if reqErr, ok := err.(awserr.RequestFailure); ok {
   970  			// 301 if wrong region for bucket - can only update if running from a bucket
   971  			if f.rootBucket != "" {
   972  				if reqErr.StatusCode() == http.StatusMovedPermanently {
   973  					urfbErr := f.updateRegionForBucket(f.rootBucket)
   974  					if urfbErr != nil {
   975  						fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
   976  						return false, err
   977  					}
   978  					return true, err
   979  				}
   980  			}
   981  			for _, e := range retryErrorCodes {
   982  				if reqErr.StatusCode() == e {
   983  					return true, err
   984  				}
   985  			}
   986  		}
   987  	}
   988  	// Ok, not an awserr, check for generic failure conditions
   989  	return fserrors.ShouldRetry(err), err
   990  }
   991  
   992  // parsePath parses a remote 'url'
   993  func parsePath(path string) (root string) {
   994  	root = strings.Trim(path, "/")
   995  	return
   996  }
   997  
   998  // split returns bucket and bucketPath from the rootRelativePath
   999  // relative to f.root
  1000  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
  1001  	bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
  1002  	return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
  1003  }
  1004  
  1005  // split returns bucket and bucketPath from the object
  1006  func (o *Object) split() (bucket, bucketPath string) {
  1007  	return o.fs.split(o.remote)
  1008  }
  1009  
  1010  // s3Connection makes a connection to s3
  1011  func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
  1012  	// Make the auth
  1013  	v := credentials.Value{
  1014  		AccessKeyID:     opt.AccessKeyID,
  1015  		SecretAccessKey: opt.SecretAccessKey,
  1016  		SessionToken:    opt.SessionToken,
  1017  	}
  1018  
  1019  	lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
  1020  	def := defaults.Get()
  1021  	def.Config.HTTPClient = lowTimeoutClient
  1022  
  1023  	// first provider to supply a credential set "wins"
  1024  	providers := []credentials.Provider{
  1025  		// use static credentials if they're present (checked by provider)
  1026  		&credentials.StaticProvider{Value: v},
  1027  
  1028  		// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
  1029  		// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
  1030  		&credentials.EnvProvider{},
  1031  
  1032  		// A SharedCredentialsProvider retrieves credentials
  1033  		// from the current user's home directory.  It checks
  1034  		// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
  1035  		&credentials.SharedCredentialsProvider{},
  1036  
  1037  		// Pick up IAM role if we're in an ECS task
  1038  		defaults.RemoteCredProvider(*def.Config, def.Handlers),
  1039  
  1040  		// Pick up IAM role in case we're on EC2
  1041  		&ec2rolecreds.EC2RoleProvider{
  1042  			Client: ec2metadata.New(session.New(), &aws.Config{
  1043  				HTTPClient: lowTimeoutClient,
  1044  			}),
  1045  			ExpiryWindow: 3 * time.Minute,
  1046  		},
  1047  
  1048  		// Pick up IAM role if we are in EKS
  1049  		&stscreds.WebIdentityRoleProvider{
  1050  			ExpiryWindow: 3 * time.Minute,
  1051  		},
  1052  	}
  1053  	cred := credentials.NewChainCredentials(providers)
  1054  
  1055  	switch {
  1056  	case opt.EnvAuth:
  1057  		// No need for empty checks if "env_auth" is true
  1058  	case v.AccessKeyID == "" && v.SecretAccessKey == "":
  1059  		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
  1060  		cred = credentials.AnonymousCredentials
  1061  	case v.AccessKeyID == "":
  1062  		return nil, nil, errors.New("access_key_id not found")
  1063  	case v.SecretAccessKey == "":
  1064  		return nil, nil, errors.New("secret_access_key not found")
  1065  	}
  1066  
  1067  	if opt.Region == "" && opt.Endpoint == "" {
  1068  		opt.Endpoint = "https://s3.amazonaws.com/"
  1069  	}
  1070  	if opt.Region == "" {
  1071  		opt.Region = "us-east-1"
  1072  	}
  1073  	if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
  1074  		opt.ForcePathStyle = false
  1075  	}
  1076  	awsConfig := aws.NewConfig().
  1077  		WithMaxRetries(maxRetries).
  1078  		WithCredentials(cred).
  1079  		WithHTTPClient(fshttp.NewClient(fs.Config)).
  1080  		WithS3ForcePathStyle(opt.ForcePathStyle).
  1081  		WithS3UseAccelerate(opt.UseAccelerateEndpoint)
  1082  	if opt.Region != "" {
  1083  		awsConfig.WithRegion(opt.Region)
  1084  	}
  1085  	if opt.Endpoint != "" {
  1086  		awsConfig.WithEndpoint(opt.Endpoint)
  1087  	}
  1088  
  1089  	// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
  1090  	awsSessionOpts := session.Options{
  1091  		Config: *awsConfig,
  1092  	}
  1093  	if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
  1094  		// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
  1095  		awsSessionOpts.SharedConfigState = session.SharedConfigEnable
  1096  		// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
  1097  		// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
  1098  		awsSessionOpts.Config.Credentials = nil
  1099  	}
  1100  	ses, err := session.NewSessionWithOptions(awsSessionOpts)
  1101  	if err != nil {
  1102  		return nil, nil, err
  1103  	}
  1104  	c := s3.New(ses)
  1105  	if opt.V2Auth || opt.Region == "other-v2-signature" {
  1106  		fs.Debugf(nil, "Using v2 auth")
  1107  		signer := func(req *request.Request) {
  1108  			// Ignore AnonymousCredentials object
  1109  			if req.Config.Credentials == credentials.AnonymousCredentials {
  1110  				return
  1111  			}
  1112  			sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
  1113  		}
  1114  		c.Handlers.Sign.Clear()
  1115  		c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
  1116  		c.Handlers.Sign.PushBack(signer)
  1117  	}
  1118  	return c, ses, nil
  1119  }
  1120  
  1121  func checkUploadChunkSize(cs fs.SizeSuffix) error {
  1122  	if cs < minChunkSize {
  1123  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
  1124  	}
  1125  	return nil
  1126  }
  1127  
  1128  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1129  	err = checkUploadChunkSize(cs)
  1130  	if err == nil {
  1131  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
  1132  	}
  1133  	return
  1134  }
  1135  
  1136  func checkUploadCutoff(cs fs.SizeSuffix) error {
  1137  	if cs > maxUploadCutoff {
  1138  		return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
  1139  	}
  1140  	return nil
  1141  }
  1142  
  1143  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1144  	err = checkUploadCutoff(cs)
  1145  	if err == nil {
  1146  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
  1147  	}
  1148  	return
  1149  }
  1150  
  1151  // setRoot changes the root of the Fs
  1152  func (f *Fs) setRoot(root string) {
  1153  	f.root = parsePath(root)
  1154  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
  1155  }
  1156  
  1157  // NewFs constructs an Fs from the path, bucket:path
  1158  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
  1159  	// Parse config into Options struct
  1160  	opt := new(Options)
  1161  	err := configstruct.Set(m, opt)
  1162  	if err != nil {
  1163  		return nil, err
  1164  	}
  1165  	err = checkUploadChunkSize(opt.ChunkSize)
  1166  	if err != nil {
  1167  		return nil, errors.Wrap(err, "s3: chunk size")
  1168  	}
  1169  	err = checkUploadCutoff(opt.UploadCutoff)
  1170  	if err != nil {
  1171  		return nil, errors.Wrap(err, "s3: upload cutoff")
  1172  	}
  1173  	if opt.ACL == "" {
  1174  		opt.ACL = "private"
  1175  	}
  1176  	if opt.BucketACL == "" {
  1177  		opt.BucketACL = opt.ACL
  1178  	}
  1179  	c, ses, err := s3Connection(opt)
  1180  	if err != nil {
  1181  		return nil, err
  1182  	}
  1183  	f := &Fs{
  1184  		name:  name,
  1185  		opt:   *opt,
  1186  		c:     c,
  1187  		ses:   ses,
  1188  		pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
  1189  		cache: bucket.NewCache(),
  1190  		srv:   fshttp.NewClient(fs.Config),
  1191  	}
  1192  	f.setRoot(root)
  1193  	f.features = (&fs.Features{
  1194  		ReadMimeType:      true,
  1195  		WriteMimeType:     true,
  1196  		BucketBased:       true,
  1197  		BucketBasedRootOK: true,
  1198  		SetTier:           true,
  1199  		GetTier:           true,
  1200  	}).Fill(f)
  1201  	if f.rootBucket != "" && f.rootDirectory != "" {
  1202  		// Check to see if the object exists
  1203  		encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
  1204  		req := s3.HeadObjectInput{
  1205  			Bucket: &f.rootBucket,
  1206  			Key:    &encodedDirectory,
  1207  		}
  1208  		err = f.pacer.Call(func() (bool, error) {
  1209  			_, err = f.c.HeadObject(&req)
  1210  			return f.shouldRetry(err)
  1211  		})
  1212  		if err == nil {
  1213  			newRoot := path.Dir(f.root)
  1214  			if newRoot == "." {
  1215  				newRoot = ""
  1216  			}
  1217  			f.setRoot(newRoot)
  1218  			// return an error with an fs which points to the parent
  1219  			return f, fs.ErrorIsFile
  1220  		}
  1221  	}
  1222  	// f.listMultipartUploads()
  1223  	return f, nil
  1224  }
  1225  
  1226  // Return an Object from a path
  1227  //
  1228  //If it can't be found it returns the error ErrorObjectNotFound.
  1229  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) {
  1230  	o := &Object{
  1231  		fs:     f,
  1232  		remote: remote,
  1233  	}
  1234  	if info != nil {
  1235  		// Set info but not meta
  1236  		if info.LastModified == nil {
  1237  			fs.Logf(o, "Failed to read last modified")
  1238  			o.lastModified = time.Now()
  1239  		} else {
  1240  			o.lastModified = *info.LastModified
  1241  		}
  1242  		o.etag = aws.StringValue(info.ETag)
  1243  		o.bytes = aws.Int64Value(info.Size)
  1244  		o.storageClass = aws.StringValue(info.StorageClass)
  1245  	} else {
  1246  		err := o.readMetaData(ctx) // reads info and meta, returning an error
  1247  		if err != nil {
  1248  			return nil, err
  1249  		}
  1250  	}
  1251  	return o, nil
  1252  }
  1253  
  1254  // NewObject finds the Object at remote.  If it can't be found
  1255  // it returns the error fs.ErrorObjectNotFound.
  1256  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1257  	return f.newObjectWithInfo(ctx, remote, nil)
  1258  }
  1259  
  1260  // Gets the bucket location
  1261  func (f *Fs) getBucketLocation(bucket string) (string, error) {
  1262  	req := s3.GetBucketLocationInput{
  1263  		Bucket: &bucket,
  1264  	}
  1265  	var resp *s3.GetBucketLocationOutput
  1266  	var err error
  1267  	err = f.pacer.Call(func() (bool, error) {
  1268  		resp, err = f.c.GetBucketLocation(&req)
  1269  		return f.shouldRetry(err)
  1270  	})
  1271  	if err != nil {
  1272  		return "", err
  1273  	}
  1274  	return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
  1275  }
  1276  
  1277  // Updates the region for the bucket by reading the region from the
  1278  // bucket then updating the session.
  1279  func (f *Fs) updateRegionForBucket(bucket string) error {
  1280  	region, err := f.getBucketLocation(bucket)
  1281  	if err != nil {
  1282  		return errors.Wrap(err, "reading bucket location failed")
  1283  	}
  1284  	if aws.StringValue(f.c.Config.Endpoint) != "" {
  1285  		return errors.Errorf("can't set region to %q as endpoint is set", region)
  1286  	}
  1287  	if aws.StringValue(f.c.Config.Region) == region {
  1288  		return errors.Errorf("region is already %q - not updating", region)
  1289  	}
  1290  
  1291  	// Make a new session with the new region
  1292  	oldRegion := f.opt.Region
  1293  	f.opt.Region = region
  1294  	c, ses, err := s3Connection(&f.opt)
  1295  	if err != nil {
  1296  		return errors.Wrap(err, "creating new session failed")
  1297  	}
  1298  	f.c = c
  1299  	f.ses = ses
  1300  
  1301  	fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
  1302  	return nil
  1303  }
  1304  
  1305  // listFn is called from list to handle an object.
  1306  type listFn func(remote string, object *s3.Object, isDirectory bool) error
  1307  
  1308  // list lists the objects into the function supplied from
  1309  // the bucket and directory supplied.  The remote has prefix
  1310  // removed from it and if addBucket is set then it adds the
  1311  // bucket to the start.
  1312  //
  1313  // Set recurse to read sub directories
  1314  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
  1315  	if prefix != "" {
  1316  		prefix += "/"
  1317  	}
  1318  	if directory != "" {
  1319  		directory += "/"
  1320  	}
  1321  	delimiter := ""
  1322  	if !recurse {
  1323  		delimiter = "/"
  1324  	}
  1325  	var marker *string
  1326  	// URL encode the listings so we can use control characters in object names
  1327  	// See: https://github.com/aws/aws-sdk-go/issues/1914
  1328  	//
  1329  	// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
  1330  	// it doesn't encode CommonPrefixes.
  1331  	// See: https://tracker.ceph.com/issues/41870
  1332  	//
  1333  	// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
  1334  	// though maybe it does on some versions.
  1335  	//
  1336  	// This does work with minio but was only added relatively recently
  1337  	// https://github.com/minio/minio/pull/7265
  1338  	//
  1339  	// So we enable only on providers we know supports it properly, all others can retry when a
  1340  	// XML Syntax error is detected.
  1341  	var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba")
  1342  	for {
  1343  		// FIXME need to implement ALL loop
  1344  		req := s3.ListObjectsInput{
  1345  			Bucket:    &bucket,
  1346  			Delimiter: &delimiter,
  1347  			Prefix:    &directory,
  1348  			MaxKeys:   &f.opt.ListChunk,
  1349  			Marker:    marker,
  1350  		}
  1351  		if urlEncodeListings {
  1352  			req.EncodingType = aws.String(s3.EncodingTypeUrl)
  1353  		}
  1354  		var resp *s3.ListObjectsOutput
  1355  		var err error
  1356  		err = f.pacer.Call(func() (bool, error) {
  1357  			resp, err = f.c.ListObjectsWithContext(ctx, &req)
  1358  			if err != nil && !urlEncodeListings {
  1359  				if awsErr, ok := err.(awserr.RequestFailure); ok {
  1360  					if origErr := awsErr.OrigErr(); origErr != nil {
  1361  						if _, ok := origErr.(*xml.SyntaxError); ok {
  1362  							// Retry the listing with URL encoding as there were characters that XML can't encode
  1363  							urlEncodeListings = true
  1364  							req.EncodingType = aws.String(s3.EncodingTypeUrl)
  1365  							fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded")
  1366  							return true, err
  1367  						}
  1368  					}
  1369  				}
  1370  			}
  1371  			return f.shouldRetry(err)
  1372  		})
  1373  		if err != nil {
  1374  			if awsErr, ok := err.(awserr.RequestFailure); ok {
  1375  				if awsErr.StatusCode() == http.StatusNotFound {
  1376  					err = fs.ErrorDirNotFound
  1377  				}
  1378  			}
  1379  			if f.rootBucket == "" {
  1380  				// if listing from the root ignore wrong region requests returning
  1381  				// empty directory
  1382  				if reqErr, ok := err.(awserr.RequestFailure); ok {
  1383  					// 301 if wrong region for bucket
  1384  					if reqErr.StatusCode() == http.StatusMovedPermanently {
  1385  						fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
  1386  						return nil
  1387  					}
  1388  				}
  1389  			}
  1390  			return err
  1391  		}
  1392  		if !recurse {
  1393  			for _, commonPrefix := range resp.CommonPrefixes {
  1394  				if commonPrefix.Prefix == nil {
  1395  					fs.Logf(f, "Nil common prefix received")
  1396  					continue
  1397  				}
  1398  				remote := *commonPrefix.Prefix
  1399  				if urlEncodeListings {
  1400  					remote, err = url.QueryUnescape(remote)
  1401  					if err != nil {
  1402  						fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err)
  1403  						continue
  1404  					}
  1405  				}
  1406  				remote = f.opt.Enc.ToStandardPath(remote)
  1407  				if !strings.HasPrefix(remote, prefix) {
  1408  					fs.Logf(f, "Odd name received %q", remote)
  1409  					continue
  1410  				}
  1411  				remote = remote[len(prefix):]
  1412  				if addBucket {
  1413  					remote = path.Join(bucket, remote)
  1414  				}
  1415  				if strings.HasSuffix(remote, "/") {
  1416  					remote = remote[:len(remote)-1]
  1417  				}
  1418  				err = fn(remote, &s3.Object{Key: &remote}, true)
  1419  				if err != nil {
  1420  					return err
  1421  				}
  1422  			}
  1423  		}
  1424  		for _, object := range resp.Contents {
  1425  			remote := aws.StringValue(object.Key)
  1426  			if urlEncodeListings {
  1427  				remote, err = url.QueryUnescape(remote)
  1428  				if err != nil {
  1429  					fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err)
  1430  					continue
  1431  				}
  1432  			}
  1433  			remote = f.opt.Enc.ToStandardPath(remote)
  1434  			if !strings.HasPrefix(remote, prefix) {
  1435  				fs.Logf(f, "Odd name received %q", remote)
  1436  				continue
  1437  			}
  1438  			remote = remote[len(prefix):]
  1439  			isDirectory := strings.HasSuffix(remote, "/")
  1440  			if addBucket {
  1441  				remote = path.Join(bucket, remote)
  1442  			}
  1443  			// is this a directory marker?
  1444  			if isDirectory && object.Size != nil && *object.Size == 0 {
  1445  				continue // skip directory marker
  1446  			}
  1447  			err = fn(remote, object, false)
  1448  			if err != nil {
  1449  				return err
  1450  			}
  1451  		}
  1452  		if !aws.BoolValue(resp.IsTruncated) {
  1453  			break
  1454  		}
  1455  		// Use NextMarker if set, otherwise use last Key
  1456  		if resp.NextMarker == nil || *resp.NextMarker == "" {
  1457  			if len(resp.Contents) == 0 {
  1458  				return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
  1459  			}
  1460  			marker = resp.Contents[len(resp.Contents)-1].Key
  1461  		} else {
  1462  			marker = resp.NextMarker
  1463  		}
  1464  		if urlEncodeListings {
  1465  			*marker, err = url.QueryUnescape(*marker)
  1466  			if err != nil {
  1467  				return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
  1468  			}
  1469  		}
  1470  	}
  1471  	return nil
  1472  }
  1473  
  1474  // Convert a list item into a DirEntry
  1475  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
  1476  	if isDirectory {
  1477  		size := int64(0)
  1478  		if object.Size != nil {
  1479  			size = *object.Size
  1480  		}
  1481  		d := fs.NewDir(remote, time.Time{}).SetSize(size)
  1482  		return d, nil
  1483  	}
  1484  	o, err := f.newObjectWithInfo(ctx, remote, object)
  1485  	if err != nil {
  1486  		return nil, err
  1487  	}
  1488  	return o, nil
  1489  }
  1490  
  1491  // listDir lists files and directories to out
  1492  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
  1493  	// List the objects and directories
  1494  	err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *s3.Object, isDirectory bool) error {
  1495  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
  1496  		if err != nil {
  1497  			return err
  1498  		}
  1499  		if entry != nil {
  1500  			entries = append(entries, entry)
  1501  		}
  1502  		return nil
  1503  	})
  1504  	if err != nil {
  1505  		return nil, err
  1506  	}
  1507  	// bucket must be present if listing succeeded
  1508  	f.cache.MarkOK(bucket)
  1509  	return entries, nil
  1510  }
  1511  
  1512  // listBuckets lists the buckets to out
  1513  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
  1514  	req := s3.ListBucketsInput{}
  1515  	var resp *s3.ListBucketsOutput
  1516  	err = f.pacer.Call(func() (bool, error) {
  1517  		resp, err = f.c.ListBucketsWithContext(ctx, &req)
  1518  		return f.shouldRetry(err)
  1519  	})
  1520  	if err != nil {
  1521  		return nil, err
  1522  	}
  1523  	for _, bucket := range resp.Buckets {
  1524  		bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name))
  1525  		f.cache.MarkOK(bucketName)
  1526  		d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
  1527  		entries = append(entries, d)
  1528  	}
  1529  	return entries, nil
  1530  }
  1531  
  1532  // List the objects and directories in dir into entries.  The
  1533  // entries can be returned in any order but should be for a
  1534  // complete directory.
  1535  //
  1536  // dir should be "" to list the root, and should not have
  1537  // trailing slashes.
  1538  //
  1539  // This should return ErrDirNotFound if the directory isn't
  1540  // found.
  1541  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1542  	bucket, directory := f.split(dir)
  1543  	if bucket == "" {
  1544  		if directory != "" {
  1545  			return nil, fs.ErrorListBucketRequired
  1546  		}
  1547  		return f.listBuckets(ctx)
  1548  	}
  1549  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
  1550  }
  1551  
  1552  // ListR lists the objects and directories of the Fs starting
  1553  // from dir recursively into out.
  1554  //
  1555  // dir should be "" to start from the root, and should not
  1556  // have trailing slashes.
  1557  //
  1558  // This should return ErrDirNotFound if the directory isn't
  1559  // found.
  1560  //
  1561  // It should call callback for each tranche of entries read.
  1562  // These need not be returned in any particular order.  If
  1563  // callback returns an error then the listing will stop
  1564  // immediately.
  1565  //
  1566  // Don't implement this unless you have a more efficient way
  1567  // of listing recursively than doing a directory traversal.
  1568  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1569  	bucket, directory := f.split(dir)
  1570  	list := walk.NewListRHelper(callback)
  1571  	listR := func(bucket, directory, prefix string, addBucket bool) error {
  1572  		return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *s3.Object, isDirectory bool) error {
  1573  			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
  1574  			if err != nil {
  1575  				return err
  1576  			}
  1577  			return list.Add(entry)
  1578  		})
  1579  	}
  1580  	if bucket == "" {
  1581  		entries, err := f.listBuckets(ctx)
  1582  		if err != nil {
  1583  			return err
  1584  		}
  1585  		for _, entry := range entries {
  1586  			err = list.Add(entry)
  1587  			if err != nil {
  1588  				return err
  1589  			}
  1590  			bucket := entry.Remote()
  1591  			err = listR(bucket, "", f.rootDirectory, true)
  1592  			if err != nil {
  1593  				return err
  1594  			}
  1595  			// bucket must be present if listing succeeded
  1596  			f.cache.MarkOK(bucket)
  1597  		}
  1598  	} else {
  1599  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
  1600  		if err != nil {
  1601  			return err
  1602  		}
  1603  		// bucket must be present if listing succeeded
  1604  		f.cache.MarkOK(bucket)
  1605  	}
  1606  	return list.Flush()
  1607  }
  1608  
  1609  // Put the Object into the bucket
  1610  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1611  	// Temporary Object under construction
  1612  	fs := &Object{
  1613  		fs:     f,
  1614  		remote: src.Remote(),
  1615  	}
  1616  	return fs, fs.Update(ctx, in, src, options...)
  1617  }
  1618  
  1619  // PutStream uploads to the remote path with the modTime given of indeterminate size
  1620  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1621  	return f.Put(ctx, in, src, options...)
  1622  }
  1623  
  1624  // Check if the bucket exists
  1625  //
  1626  // NB this can return incorrect results if called immediately after bucket deletion
  1627  func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
  1628  	req := s3.HeadBucketInput{
  1629  		Bucket: &bucket,
  1630  	}
  1631  	err := f.pacer.Call(func() (bool, error) {
  1632  		_, err := f.c.HeadBucketWithContext(ctx, &req)
  1633  		return f.shouldRetry(err)
  1634  	})
  1635  	if err == nil {
  1636  		return true, nil
  1637  	}
  1638  	if err, ok := err.(awserr.RequestFailure); ok {
  1639  		if err.StatusCode() == http.StatusNotFound {
  1640  			return false, nil
  1641  		}
  1642  	}
  1643  	return false, err
  1644  }
  1645  
  1646  // Mkdir creates the bucket if it doesn't exist
  1647  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1648  	bucket, _ := f.split(dir)
  1649  	return f.makeBucket(ctx, bucket)
  1650  }
  1651  
  1652  // makeBucket creates the bucket if it doesn't exist
  1653  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
  1654  	return f.cache.Create(bucket, func() error {
  1655  		req := s3.CreateBucketInput{
  1656  			Bucket: &bucket,
  1657  			ACL:    &f.opt.BucketACL,
  1658  		}
  1659  		if f.opt.LocationConstraint != "" {
  1660  			req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
  1661  				LocationConstraint: &f.opt.LocationConstraint,
  1662  			}
  1663  		}
  1664  		err := f.pacer.Call(func() (bool, error) {
  1665  			_, err := f.c.CreateBucketWithContext(ctx, &req)
  1666  			return f.shouldRetry(err)
  1667  		})
  1668  		if err == nil {
  1669  			fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
  1670  		}
  1671  		if err, ok := err.(awserr.Error); ok {
  1672  			if err.Code() == "BucketAlreadyOwnedByYou" {
  1673  				err = nil
  1674  			}
  1675  		}
  1676  		return nil
  1677  	}, func() (bool, error) {
  1678  		return f.bucketExists(ctx, bucket)
  1679  	})
  1680  }
  1681  
  1682  // Rmdir deletes the bucket if the fs is at the root
  1683  //
  1684  // Returns an error if it isn't empty
  1685  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1686  	bucket, directory := f.split(dir)
  1687  	if bucket == "" || directory != "" {
  1688  		return nil
  1689  	}
  1690  	return f.cache.Remove(bucket, func() error {
  1691  		req := s3.DeleteBucketInput{
  1692  			Bucket: &bucket,
  1693  		}
  1694  		err := f.pacer.Call(func() (bool, error) {
  1695  			_, err := f.c.DeleteBucketWithContext(ctx, &req)
  1696  			return f.shouldRetry(err)
  1697  		})
  1698  		if err == nil {
  1699  			fs.Infof(f, "Bucket %q deleted", bucket)
  1700  		}
  1701  		return err
  1702  	})
  1703  }
  1704  
  1705  // Precision of the remote
  1706  func (f *Fs) Precision() time.Duration {
  1707  	return time.Nanosecond
  1708  }
  1709  
  1710  // pathEscape escapes s as for a URL path.  It uses rest.URLPathEscape
  1711  // but also escapes '+' for S3 and Digital Ocean spaces compatibility
  1712  func pathEscape(s string) string {
  1713  	return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
  1714  }
  1715  
  1716  // copy does a server side copy
  1717  //
  1718  // It adds the boiler plate to the req passed in and calls the s3
  1719  // method
  1720  func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) error {
  1721  	req.Bucket = &dstBucket
  1722  	req.ACL = &f.opt.ACL
  1723  	req.Key = &dstPath
  1724  	source := pathEscape(path.Join(srcBucket, srcPath))
  1725  	req.CopySource = &source
  1726  	if f.opt.ServerSideEncryption != "" {
  1727  		req.ServerSideEncryption = &f.opt.ServerSideEncryption
  1728  	}
  1729  	if f.opt.SSEKMSKeyID != "" {
  1730  		req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
  1731  	}
  1732  	if req.StorageClass == nil && f.opt.StorageClass != "" {
  1733  		req.StorageClass = &f.opt.StorageClass
  1734  	}
  1735  
  1736  	if srcSize >= int64(f.opt.CopyCutoff) {
  1737  		return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
  1738  	}
  1739  	return f.pacer.Call(func() (bool, error) {
  1740  		_, err := f.c.CopyObjectWithContext(ctx, req)
  1741  		return f.shouldRetry(err)
  1742  	})
  1743  }
  1744  
  1745  func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
  1746  	start := partIndex * partSize
  1747  	var ends string
  1748  	if partIndex == numParts-1 {
  1749  		if totalSize >= 1 {
  1750  			ends = strconv.FormatInt(totalSize-1, 10)
  1751  		}
  1752  	} else {
  1753  		ends = strconv.FormatInt(start+partSize-1, 10)
  1754  	}
  1755  	return fmt.Sprintf("bytes=%v-%v", start, ends)
  1756  }
  1757  
  1758  func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) (err error) {
  1759  	var cout *s3.CreateMultipartUploadOutput
  1760  	if err := f.pacer.Call(func() (bool, error) {
  1761  		var err error
  1762  		cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
  1763  			Bucket: &dstBucket,
  1764  			Key:    &dstPath,
  1765  		})
  1766  		return f.shouldRetry(err)
  1767  	}); err != nil {
  1768  		return err
  1769  	}
  1770  	uid := cout.UploadId
  1771  
  1772  	defer func() {
  1773  		if err != nil {
  1774  			// We can try to abort the upload, but ignore the error.
  1775  			_ = f.pacer.Call(func() (bool, error) {
  1776  				_, err := f.c.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
  1777  					Bucket:       &dstBucket,
  1778  					Key:          &dstPath,
  1779  					UploadId:     uid,
  1780  					RequestPayer: req.RequestPayer,
  1781  				})
  1782  				return f.shouldRetry(err)
  1783  			})
  1784  		}
  1785  	}()
  1786  
  1787  	partSize := int64(f.opt.CopyCutoff)
  1788  	numParts := (srcSize-1)/partSize + 1
  1789  
  1790  	var parts []*s3.CompletedPart
  1791  	for partNum := int64(1); partNum <= numParts; partNum++ {
  1792  		if err := f.pacer.Call(func() (bool, error) {
  1793  			partNum := partNum
  1794  			uploadPartReq := &s3.UploadPartCopyInput{
  1795  				Bucket:          &dstBucket,
  1796  				Key:             &dstPath,
  1797  				PartNumber:      &partNum,
  1798  				UploadId:        uid,
  1799  				CopySourceRange: aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)),
  1800  				// Args copy from req
  1801  				CopySource:                     req.CopySource,
  1802  				CopySourceIfMatch:              req.CopySourceIfMatch,
  1803  				CopySourceIfModifiedSince:      req.CopySourceIfModifiedSince,
  1804  				CopySourceIfNoneMatch:          req.CopySourceIfNoneMatch,
  1805  				CopySourceIfUnmodifiedSince:    req.CopySourceIfUnmodifiedSince,
  1806  				CopySourceSSECustomerAlgorithm: req.CopySourceSSECustomerAlgorithm,
  1807  				CopySourceSSECustomerKey:       req.CopySourceSSECustomerKey,
  1808  				CopySourceSSECustomerKeyMD5:    req.CopySourceSSECustomerKeyMD5,
  1809  				RequestPayer:                   req.RequestPayer,
  1810  				SSECustomerAlgorithm:           req.SSECustomerAlgorithm,
  1811  				SSECustomerKey:                 req.SSECustomerKey,
  1812  				SSECustomerKeyMD5:              req.SSECustomerKeyMD5,
  1813  			}
  1814  			uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
  1815  			if err != nil {
  1816  				return f.shouldRetry(err)
  1817  			}
  1818  			parts = append(parts, &s3.CompletedPart{
  1819  				PartNumber: &partNum,
  1820  				ETag:       uout.CopyPartResult.ETag,
  1821  			})
  1822  			return false, nil
  1823  		}); err != nil {
  1824  			return err
  1825  		}
  1826  	}
  1827  
  1828  	return f.pacer.Call(func() (bool, error) {
  1829  		_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
  1830  			Bucket: &dstBucket,
  1831  			Key:    &dstPath,
  1832  			MultipartUpload: &s3.CompletedMultipartUpload{
  1833  				Parts: parts,
  1834  			},
  1835  			RequestPayer: req.RequestPayer,
  1836  			UploadId:     uid,
  1837  		})
  1838  		return f.shouldRetry(err)
  1839  	})
  1840  }
  1841  
  1842  // Copy src to this remote using server side copy operations.
  1843  //
  1844  // This is stored with the remote path given
  1845  //
  1846  // It returns the destination Object and a possible error
  1847  //
  1848  // Will only be called if src.Fs().Name() == f.Name()
  1849  //
  1850  // If it isn't possible then return fs.ErrorCantCopy
  1851  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1852  	dstBucket, dstPath := f.split(remote)
  1853  	err := f.makeBucket(ctx, dstBucket)
  1854  	if err != nil {
  1855  		return nil, err
  1856  	}
  1857  	srcObj, ok := src.(*Object)
  1858  	if !ok {
  1859  		fs.Debugf(src, "Can't copy - not same remote type")
  1860  		return nil, fs.ErrorCantCopy
  1861  	}
  1862  	srcBucket, srcPath := srcObj.split()
  1863  	req := s3.CopyObjectInput{
  1864  		MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
  1865  	}
  1866  	err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj.Size())
  1867  	if err != nil {
  1868  		return nil, err
  1869  	}
  1870  	return f.NewObject(ctx, remote)
  1871  }
  1872  
  1873  // Hashes returns the supported hash sets.
  1874  func (f *Fs) Hashes() hash.Set {
  1875  	return hash.Set(hash.MD5)
  1876  }
  1877  
  1878  // ------------------------------------------------------------
  1879  
  1880  // Fs returns the parent Fs
  1881  func (o *Object) Fs() fs.Info {
  1882  	return o.fs
  1883  }
  1884  
  1885  // Return a string version
  1886  func (o *Object) String() string {
  1887  	if o == nil {
  1888  		return "<nil>"
  1889  	}
  1890  	return o.remote
  1891  }
  1892  
  1893  // Remote returns the remote path
  1894  func (o *Object) Remote() string {
  1895  	return o.remote
  1896  }
  1897  
  1898  var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
  1899  
  1900  // Hash returns the Md5sum of an object returning a lowercase hex string
  1901  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1902  	if t != hash.MD5 {
  1903  		return "", hash.ErrUnsupported
  1904  	}
  1905  	hash := strings.Trim(strings.ToLower(o.etag), `"`)
  1906  	// Check the etag is a valid md5sum
  1907  	if !matchMd5.MatchString(hash) {
  1908  		err := o.readMetaData(ctx)
  1909  		if err != nil {
  1910  			return "", err
  1911  		}
  1912  
  1913  		if md5sum, ok := o.meta[metaMD5Hash]; ok {
  1914  			md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
  1915  			if err != nil {
  1916  				return "", err
  1917  			}
  1918  			hash = hex.EncodeToString(md5sumBytes)
  1919  		} else {
  1920  			hash = ""
  1921  		}
  1922  	}
  1923  	return hash, nil
  1924  }
  1925  
  1926  // Size returns the size of an object in bytes
  1927  func (o *Object) Size() int64 {
  1928  	return o.bytes
  1929  }
  1930  
  1931  // readMetaData gets the metadata if it hasn't already been fetched
  1932  //
  1933  // it also sets the info
  1934  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1935  	if o.meta != nil {
  1936  		return nil
  1937  	}
  1938  	bucket, bucketPath := o.split()
  1939  	req := s3.HeadObjectInput{
  1940  		Bucket: &bucket,
  1941  		Key:    &bucketPath,
  1942  	}
  1943  	var resp *s3.HeadObjectOutput
  1944  	err = o.fs.pacer.Call(func() (bool, error) {
  1945  		var err error
  1946  		resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
  1947  		return o.fs.shouldRetry(err)
  1948  	})
  1949  	if err != nil {
  1950  		if awsErr, ok := err.(awserr.RequestFailure); ok {
  1951  			if awsErr.StatusCode() == http.StatusNotFound {
  1952  				return fs.ErrorObjectNotFound
  1953  			}
  1954  		}
  1955  		return err
  1956  	}
  1957  	var size int64
  1958  	// Ignore missing Content-Length assuming it is 0
  1959  	// Some versions of ceph do this due their apache proxies
  1960  	if resp.ContentLength != nil {
  1961  		size = *resp.ContentLength
  1962  	}
  1963  	o.etag = aws.StringValue(resp.ETag)
  1964  	o.bytes = size
  1965  	o.meta = resp.Metadata
  1966  	if o.meta == nil {
  1967  		o.meta = map[string]*string{}
  1968  	}
  1969  	o.storageClass = aws.StringValue(resp.StorageClass)
  1970  	if resp.LastModified == nil {
  1971  		fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
  1972  		o.lastModified = time.Now()
  1973  	} else {
  1974  		o.lastModified = *resp.LastModified
  1975  	}
  1976  	o.mimeType = aws.StringValue(resp.ContentType)
  1977  	return nil
  1978  }
  1979  
  1980  // ModTime returns the modification time of the object
  1981  //
  1982  // It attempts to read the objects mtime and if that isn't present the
  1983  // LastModified returned in the http headers
  1984  func (o *Object) ModTime(ctx context.Context) time.Time {
  1985  	if fs.Config.UseServerModTime {
  1986  		return o.lastModified
  1987  	}
  1988  	err := o.readMetaData(ctx)
  1989  	if err != nil {
  1990  		fs.Logf(o, "Failed to read metadata: %v", err)
  1991  		return time.Now()
  1992  	}
  1993  	// read mtime out of metadata if available
  1994  	d, ok := o.meta[metaMtime]
  1995  	if !ok || d == nil {
  1996  		// fs.Debugf(o, "No metadata")
  1997  		return o.lastModified
  1998  	}
  1999  	modTime, err := swift.FloatStringToTime(*d)
  2000  	if err != nil {
  2001  		fs.Logf(o, "Failed to read mtime from object: %v", err)
  2002  		return o.lastModified
  2003  	}
  2004  	return modTime
  2005  }
  2006  
  2007  // SetModTime sets the modification time of the local fs object
  2008  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  2009  	err := o.readMetaData(ctx)
  2010  	if err != nil {
  2011  		return err
  2012  	}
  2013  	o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
  2014  
  2015  	// Can't update metadata here, so return this error to force a recopy
  2016  	if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
  2017  		return fs.ErrorCantSetModTime
  2018  	}
  2019  
  2020  	// Copy the object to itself to update the metadata
  2021  	bucket, bucketPath := o.split()
  2022  	req := s3.CopyObjectInput{
  2023  		ContentType:       aws.String(fs.MimeType(ctx, o)), // Guess the content type
  2024  		Metadata:          o.meta,
  2025  		MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
  2026  	}
  2027  	return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
  2028  }
  2029  
  2030  // Storable raturns a boolean indicating if this object is storable
  2031  func (o *Object) Storable() bool {
  2032  	return true
  2033  }
  2034  
  2035  // Open an object for read
  2036  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2037  	bucket, bucketPath := o.split()
  2038  	req := s3.GetObjectInput{
  2039  		Bucket: &bucket,
  2040  		Key:    &bucketPath,
  2041  	}
  2042  	fs.FixRangeOption(options, o.bytes)
  2043  	for _, option := range options {
  2044  		switch option.(type) {
  2045  		case *fs.RangeOption, *fs.SeekOption:
  2046  			_, value := option.Header()
  2047  			req.Range = &value
  2048  		default:
  2049  			if option.Mandatory() {
  2050  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  2051  			}
  2052  		}
  2053  	}
  2054  	var resp *s3.GetObjectOutput
  2055  	err = o.fs.pacer.Call(func() (bool, error) {
  2056  		var err error
  2057  		resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
  2058  		return o.fs.shouldRetry(err)
  2059  	})
  2060  	if err, ok := err.(awserr.RequestFailure); ok {
  2061  		if err.Code() == "InvalidObjectState" {
  2062  			return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
  2063  		}
  2064  	}
  2065  	if err != nil {
  2066  		return nil, err
  2067  	}
  2068  	return resp.Body, nil
  2069  }
  2070  
  2071  var warnStreamUpload sync.Once
  2072  
  2073  func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
  2074  	f := o.fs
  2075  
  2076  	// make concurrency machinery
  2077  	concurrency := f.opt.UploadConcurrency
  2078  	if concurrency < 1 {
  2079  		concurrency = 1
  2080  	}
  2081  	bufs := make(chan []byte, concurrency)
  2082  	defer func() {
  2083  		// empty the channel on exit
  2084  		close(bufs)
  2085  		for range bufs {
  2086  		}
  2087  	}()
  2088  	for i := 0; i < concurrency; i++ {
  2089  		bufs <- nil
  2090  	}
  2091  
  2092  	// calculate size of parts
  2093  	partSize := int(f.opt.ChunkSize)
  2094  
  2095  	// size can be -1 here meaning we don't know the size of the incoming file.  We use ChunkSize
  2096  	// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
  2097  	// 48GB which seems like a not too unreasonable limit.
  2098  	if size == -1 {
  2099  		warnStreamUpload.Do(func() {
  2100  			fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
  2101  				f.opt.ChunkSize, fs.SizeSuffix(partSize*maxUploadParts))
  2102  		})
  2103  	} else {
  2104  		// Adjust partSize until the number of parts is small enough.
  2105  		if size/int64(partSize) >= maxUploadParts {
  2106  			// Calculate partition size rounded up to the nearest MB
  2107  			partSize = int((((size / maxUploadParts) >> 20) + 1) << 20)
  2108  		}
  2109  	}
  2110  
  2111  	var cout *s3.CreateMultipartUploadOutput
  2112  	err = f.pacer.Call(func() (bool, error) {
  2113  		var err error
  2114  		cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
  2115  			Bucket:               req.Bucket,
  2116  			ACL:                  req.ACL,
  2117  			Key:                  req.Key,
  2118  			ContentType:          req.ContentType,
  2119  			Metadata:             req.Metadata,
  2120  			ServerSideEncryption: req.ServerSideEncryption,
  2121  			SSEKMSKeyId:          req.SSEKMSKeyId,
  2122  			StorageClass:         req.StorageClass,
  2123  		})
  2124  		return f.shouldRetry(err)
  2125  	})
  2126  	if err != nil {
  2127  		return errors.Wrap(err, "multipart upload failed to initialise")
  2128  	}
  2129  	uid := cout.UploadId
  2130  
  2131  	defer func() {
  2132  		if o.fs.opt.LeavePartsOnError {
  2133  			return
  2134  		}
  2135  		if err != nil {
  2136  			// We can try to abort the upload, but ignore the error.
  2137  			fs.Debugf(o, "Cancelling multipart upload")
  2138  			errCancel := f.pacer.Call(func() (bool, error) {
  2139  				_, err := f.c.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
  2140  					Bucket:       req.Bucket,
  2141  					Key:          req.Key,
  2142  					UploadId:     uid,
  2143  					RequestPayer: req.RequestPayer,
  2144  				})
  2145  				return f.shouldRetry(err)
  2146  			})
  2147  			if errCancel != nil {
  2148  				fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
  2149  			}
  2150  		}
  2151  	}()
  2152  
  2153  	var (
  2154  		g, gCtx  = errgroup.WithContext(ctx)
  2155  		finished = false
  2156  		partsMu  sync.Mutex // to protect parts
  2157  		parts    []*s3.CompletedPart
  2158  		off      int64
  2159  	)
  2160  
  2161  	for partNum := int64(1); !finished; partNum++ {
  2162  		// Get a block of memory from the channel (which limits concurrency)
  2163  		buf := <-bufs
  2164  		if buf == nil {
  2165  			buf = make([]byte, partSize)
  2166  		}
  2167  
  2168  		// Read the chunk
  2169  		var n int
  2170  		n, err = readers.ReadFill(in, buf) // this can never return 0, nil
  2171  		if err == io.EOF {
  2172  			if n == 0 && partNum != 1 { // end if no data and if not first chunk
  2173  				break
  2174  			}
  2175  			finished = true
  2176  		} else if err != nil {
  2177  			return errors.Wrap(err, "multipart upload failed to read source")
  2178  		}
  2179  		buf = buf[:n]
  2180  
  2181  		partNum := partNum
  2182  		fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
  2183  		off += int64(n)
  2184  		g.Go(func() (err error) {
  2185  			partLength := int64(len(buf))
  2186  
  2187  			// create checksum of buffer for integrity checking
  2188  			md5sumBinary := md5.Sum(buf)
  2189  			md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
  2190  
  2191  			err = f.pacer.Call(func() (bool, error) {
  2192  				uploadPartReq := &s3.UploadPartInput{
  2193  					Body:                 bytes.NewReader(buf),
  2194  					Bucket:               req.Bucket,
  2195  					Key:                  req.Key,
  2196  					PartNumber:           &partNum,
  2197  					UploadId:             uid,
  2198  					ContentMD5:           &md5sum,
  2199  					ContentLength:        &partLength,
  2200  					RequestPayer:         req.RequestPayer,
  2201  					SSECustomerAlgorithm: req.SSECustomerAlgorithm,
  2202  					SSECustomerKey:       req.SSECustomerKey,
  2203  					SSECustomerKeyMD5:    req.SSECustomerKeyMD5,
  2204  				}
  2205  				uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
  2206  				if err != nil {
  2207  					if partNum <= int64(concurrency) {
  2208  						return f.shouldRetry(err)
  2209  					}
  2210  					// retry all chunks once have done the first batch
  2211  					return true, err
  2212  				}
  2213  				partsMu.Lock()
  2214  				parts = append(parts, &s3.CompletedPart{
  2215  					PartNumber: &partNum,
  2216  					ETag:       uout.ETag,
  2217  				})
  2218  				partsMu.Unlock()
  2219  
  2220  				return false, nil
  2221  			})
  2222  
  2223  			// return the memory
  2224  			bufs <- buf[:partSize]
  2225  
  2226  			if err != nil {
  2227  				return errors.Wrap(err, "multipart upload failed to upload part")
  2228  			}
  2229  			return nil
  2230  		})
  2231  	}
  2232  	err = g.Wait()
  2233  	if err != nil {
  2234  		return err
  2235  	}
  2236  
  2237  	// sort the completed parts by part number
  2238  	sort.Slice(parts, func(i, j int) bool {
  2239  		return *parts[i].PartNumber < *parts[j].PartNumber
  2240  	})
  2241  
  2242  	err = f.pacer.Call(func() (bool, error) {
  2243  		_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
  2244  			Bucket: req.Bucket,
  2245  			Key:    req.Key,
  2246  			MultipartUpload: &s3.CompletedMultipartUpload{
  2247  				Parts: parts,
  2248  			},
  2249  			RequestPayer: req.RequestPayer,
  2250  			UploadId:     uid,
  2251  		})
  2252  		return f.shouldRetry(err)
  2253  	})
  2254  	if err != nil {
  2255  		return errors.Wrap(err, "multipart upload failed to finalise")
  2256  	}
  2257  	return nil
  2258  }
  2259  
  2260  // Update the Object from in with modTime and size
  2261  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  2262  	bucket, bucketPath := o.split()
  2263  	err := o.fs.makeBucket(ctx, bucket)
  2264  	if err != nil {
  2265  		return err
  2266  	}
  2267  	modTime := src.ModTime(ctx)
  2268  	size := src.Size()
  2269  
  2270  	multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
  2271  
  2272  	// Set the mtime in the meta data
  2273  	metadata := map[string]*string{
  2274  		metaMtime: aws.String(swift.TimeToFloatString(modTime)),
  2275  	}
  2276  
  2277  	// read the md5sum if available
  2278  	// - for non multpart
  2279  	//    - so we can add a ContentMD5
  2280  	// - for multipart provided checksums aren't disabled
  2281  	//    - so we can add the md5sum in the metadata as metaMD5Hash
  2282  	var md5sum string
  2283  	if !multipart || !o.fs.opt.DisableChecksum {
  2284  		hash, err := src.Hash(ctx, hash.MD5)
  2285  		if err == nil && matchMd5.MatchString(hash) {
  2286  			hashBytes, err := hex.DecodeString(hash)
  2287  			if err == nil {
  2288  				md5sum = base64.StdEncoding.EncodeToString(hashBytes)
  2289  				if multipart {
  2290  					metadata[metaMD5Hash] = &md5sum
  2291  				}
  2292  			}
  2293  		}
  2294  	}
  2295  
  2296  	// Guess the content type
  2297  	mimeType := fs.MimeType(ctx, src)
  2298  	req := s3.PutObjectInput{
  2299  		Bucket:      &bucket,
  2300  		ACL:         &o.fs.opt.ACL,
  2301  		Key:         &bucketPath,
  2302  		ContentType: &mimeType,
  2303  		Metadata:    metadata,
  2304  	}
  2305  	if md5sum != "" {
  2306  		req.ContentMD5 = &md5sum
  2307  	}
  2308  	if o.fs.opt.ServerSideEncryption != "" {
  2309  		req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
  2310  	}
  2311  	if o.fs.opt.SSEKMSKeyID != "" {
  2312  		req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
  2313  	}
  2314  	if o.fs.opt.StorageClass != "" {
  2315  		req.StorageClass = &o.fs.opt.StorageClass
  2316  	}
  2317  
  2318  	if multipart {
  2319  		err = o.uploadMultipart(ctx, &req, size, in)
  2320  		if err != nil {
  2321  			return err
  2322  		}
  2323  	} else {
  2324  
  2325  		// Create the request
  2326  		putObj, _ := o.fs.c.PutObjectRequest(&req)
  2327  
  2328  		// Sign it so we can upload using a presigned request.
  2329  		//
  2330  		// Note the SDK doesn't currently support streaming to
  2331  		// PutObject so we'll use this work-around.
  2332  		url, headers, err := putObj.PresignRequest(15 * time.Minute)
  2333  		if err != nil {
  2334  			return errors.Wrap(err, "s3 upload: sign request")
  2335  		}
  2336  
  2337  		if o.fs.opt.V2Auth && headers == nil {
  2338  			headers = putObj.HTTPRequest.Header
  2339  		}
  2340  
  2341  		// Set request to nil if empty so as not to make chunked encoding
  2342  		if size == 0 {
  2343  			in = nil
  2344  		}
  2345  
  2346  		// create the vanilla http request
  2347  		httpReq, err := http.NewRequest("PUT", url, in)
  2348  		if err != nil {
  2349  			return errors.Wrap(err, "s3 upload: new request")
  2350  		}
  2351  		httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
  2352  
  2353  		// set the headers we signed and the length
  2354  		httpReq.Header = headers
  2355  		httpReq.ContentLength = size
  2356  
  2357  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  2358  			resp, err := o.fs.srv.Do(httpReq)
  2359  			if err != nil {
  2360  				return o.fs.shouldRetry(err)
  2361  			}
  2362  			body, err := rest.ReadBody(resp)
  2363  			if err != nil {
  2364  				return o.fs.shouldRetry(err)
  2365  			}
  2366  			if resp.StatusCode >= 200 && resp.StatusCode < 299 {
  2367  				return false, nil
  2368  			}
  2369  			err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
  2370  			return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
  2371  		})
  2372  		if err != nil {
  2373  			return err
  2374  		}
  2375  	}
  2376  
  2377  	// Read the metadata from the newly created object
  2378  	o.meta = nil // wipe old metadata
  2379  	err = o.readMetaData(ctx)
  2380  	return err
  2381  }
  2382  
  2383  // Remove an object
  2384  func (o *Object) Remove(ctx context.Context) error {
  2385  	bucket, bucketPath := o.split()
  2386  	req := s3.DeleteObjectInput{
  2387  		Bucket: &bucket,
  2388  		Key:    &bucketPath,
  2389  	}
  2390  	err := o.fs.pacer.Call(func() (bool, error) {
  2391  		_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
  2392  		return o.fs.shouldRetry(err)
  2393  	})
  2394  	return err
  2395  }
  2396  
  2397  // MimeType of an Object if known, "" otherwise
  2398  func (o *Object) MimeType(ctx context.Context) string {
  2399  	err := o.readMetaData(ctx)
  2400  	if err != nil {
  2401  		fs.Logf(o, "Failed to read metadata: %v", err)
  2402  		return ""
  2403  	}
  2404  	return o.mimeType
  2405  }
  2406  
  2407  // SetTier performs changing storage class
  2408  func (o *Object) SetTier(tier string) (err error) {
  2409  	ctx := context.TODO()
  2410  	tier = strings.ToUpper(tier)
  2411  	bucket, bucketPath := o.split()
  2412  	req := s3.CopyObjectInput{
  2413  		MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
  2414  		StorageClass:      aws.String(tier),
  2415  	}
  2416  	err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
  2417  	if err != nil {
  2418  		return err
  2419  	}
  2420  	o.storageClass = tier
  2421  	return err
  2422  }
  2423  
  2424  // GetTier returns storage class as string
  2425  func (o *Object) GetTier() string {
  2426  	if o.storageClass == "" {
  2427  		return "STANDARD"
  2428  	}
  2429  	return o.storageClass
  2430  }
  2431  
  2432  // Check the interfaces are satisfied
  2433  var (
  2434  	_ fs.Fs          = &Fs{}
  2435  	_ fs.Copier      = &Fs{}
  2436  	_ fs.PutStreamer = &Fs{}
  2437  	_ fs.ListRer     = &Fs{}
  2438  	_ fs.Object      = &Object{}
  2439  	_ fs.MimeTyper   = &Object{}
  2440  	_ fs.GetTierer   = &Object{}
  2441  	_ fs.SetTierer   = &Object{}
  2442  )