github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/s3/s3.go (about)

     1  // Package s3 provides an interface to Amazon S3 oject storage
     2  package s3
     3  
     4  // FIXME need to prevent anything but ListDir working for s3://
     5  
     6  /*
     7  Progress of port to aws-sdk
     8  
     9   * Don't really need o.meta at all?
    10  
    11  What happens if you CTRL-C a multipart upload
    12    * get an incomplete upload
    13    * disappears when you delete the bucket
    14  */
    15  
    16  import (
    17  	"bytes"
    18  	"context"
    19  	"crypto/md5"
    20  	"encoding/base64"
    21  	"encoding/hex"
    22  	"encoding/xml"
    23  	"fmt"
    24  	"io"
    25  	"net/http"
    26  	"net/url"
    27  	"path"
    28  	"regexp"
    29  	"sort"
    30  	"strconv"
    31  	"strings"
    32  	"sync"
    33  	"time"
    34  
    35  	"github.com/aws/aws-sdk-go/aws"
    36  	"github.com/aws/aws-sdk-go/aws/awserr"
    37  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    38  	"github.com/aws/aws-sdk-go/aws/credentials"
    39  	"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
    40  	"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
    41  	"github.com/aws/aws-sdk-go/aws/defaults"
    42  	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    43  	"github.com/aws/aws-sdk-go/aws/request"
    44  	"github.com/aws/aws-sdk-go/aws/session"
    45  	"github.com/aws/aws-sdk-go/service/s3"
    46  	"github.com/ncw/swift"
    47  	"github.com/pkg/errors"
    48  	"github.com/rclone/rclone/fs"
    49  	"github.com/rclone/rclone/fs/config"
    50  	"github.com/rclone/rclone/fs/config/configmap"
    51  	"github.com/rclone/rclone/fs/config/configstruct"
    52  	"github.com/rclone/rclone/fs/fserrors"
    53  	"github.com/rclone/rclone/fs/fshttp"
    54  	"github.com/rclone/rclone/fs/hash"
    55  	"github.com/rclone/rclone/fs/walk"
    56  	"github.com/rclone/rclone/lib/bucket"
    57  	"github.com/rclone/rclone/lib/encoder"
    58  	"github.com/rclone/rclone/lib/pacer"
    59  	"github.com/rclone/rclone/lib/pool"
    60  	"github.com/rclone/rclone/lib/readers"
    61  	"github.com/rclone/rclone/lib/rest"
    62  	"github.com/rclone/rclone/lib/structs"
    63  	"golang.org/x/sync/errgroup"
    64  )
    65  
    66  // Register with Fs
    67  func init() {
    68  	fs.Register(&fs.RegInfo{
    69  		Name:        "s3",
    70  		Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
    71  		NewFs:       NewFs,
    72  		Options: []fs.Option{{
    73  			Name: fs.ConfigProvider,
    74  			Help: "Choose your S3 provider.",
    75  			Examples: []fs.OptionExample{{
    76  				Value: "AWS",
    77  				Help:  "Amazon Web Services (AWS) S3",
    78  			}, {
    79  				Value: "Alibaba",
    80  				Help:  "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
    81  			}, {
    82  				Value: "Ceph",
    83  				Help:  "Ceph Object Storage",
    84  			}, {
    85  				Value: "DigitalOcean",
    86  				Help:  "Digital Ocean Spaces",
    87  			}, {
    88  				Value: "Dreamhost",
    89  				Help:  "Dreamhost DreamObjects",
    90  			}, {
    91  				Value: "IBMCOS",
    92  				Help:  "IBM COS S3",
    93  			}, {
    94  				Value: "Minio",
    95  				Help:  "Minio Object Storage",
    96  			}, {
    97  				Value: "Netease",
    98  				Help:  "Netease Object Storage (NOS)",
    99  			}, {
   100  				Value: "StackPath",
   101  				Help:  "StackPath Object Storage",
   102  			}, {
   103  				Value: "Wasabi",
   104  				Help:  "Wasabi Object Storage",
   105  			}, {
   106  				Value: "Other",
   107  				Help:  "Any other S3 compatible provider",
   108  			}},
   109  		}, {
   110  			Name:    "env_auth",
   111  			Help:    "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
   112  			Default: false,
   113  			Examples: []fs.OptionExample{{
   114  				Value: "false",
   115  				Help:  "Enter AWS credentials in the next step",
   116  			}, {
   117  				Value: "true",
   118  				Help:  "Get AWS credentials from the environment (env vars or IAM)",
   119  			}},
   120  		}, {
   121  			Name: "access_key_id",
   122  			Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
   123  		}, {
   124  			Name: "secret_access_key",
   125  			Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
   126  		}, {
   127  			Name:     "region",
   128  			Help:     "Region to connect to.",
   129  			Provider: "AWS",
   130  			Examples: []fs.OptionExample{{
   131  				Value: "us-east-1",
   132  				Help:  "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
   133  			}, {
   134  				Value: "us-east-2",
   135  				Help:  "US East (Ohio) Region\nNeeds location constraint us-east-2.",
   136  			}, {
   137  				Value: "us-west-2",
   138  				Help:  "US West (Oregon) Region\nNeeds location constraint us-west-2.",
   139  			}, {
   140  				Value: "us-west-1",
   141  				Help:  "US West (Northern California) Region\nNeeds location constraint us-west-1.",
   142  			}, {
   143  				Value: "ca-central-1",
   144  				Help:  "Canada (Central) Region\nNeeds location constraint ca-central-1.",
   145  			}, {
   146  				Value: "eu-west-1",
   147  				Help:  "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
   148  			}, {
   149  				Value: "eu-west-2",
   150  				Help:  "EU (London) Region\nNeeds location constraint eu-west-2.",
   151  			}, {
   152  				Value: "eu-north-1",
   153  				Help:  "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
   154  			}, {
   155  				Value: "eu-central-1",
   156  				Help:  "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
   157  			}, {
   158  				Value: "ap-southeast-1",
   159  				Help:  "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
   160  			}, {
   161  				Value: "ap-southeast-2",
   162  				Help:  "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
   163  			}, {
   164  				Value: "ap-northeast-1",
   165  				Help:  "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
   166  			}, {
   167  				Value: "ap-northeast-2",
   168  				Help:  "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
   169  			}, {
   170  				Value: "ap-south-1",
   171  				Help:  "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
   172  			}, {
   173  				Value: "ap-east-1",
   174  				Help:  "Asia Patific (Hong Kong) Region\nNeeds location constraint ap-east-1.",
   175  			}, {
   176  				Value: "sa-east-1",
   177  				Help:  "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
   178  			}},
   179  		}, {
   180  			Name:     "region",
   181  			Help:     "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
   182  			Provider: "!AWS,Alibaba",
   183  			Examples: []fs.OptionExample{{
   184  				Value: "",
   185  				Help:  "Use this if unsure. Will use v4 signatures and an empty region.",
   186  			}, {
   187  				Value: "other-v2-signature",
   188  				Help:  "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
   189  			}},
   190  		}, {
   191  			Name:     "endpoint",
   192  			Help:     "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
   193  			Provider: "AWS",
   194  		}, {
   195  			Name:     "endpoint",
   196  			Help:     "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
   197  			Provider: "IBMCOS",
   198  			Examples: []fs.OptionExample{{
   199  				Value: "s3-api.us-geo.objectstorage.softlayer.net",
   200  				Help:  "US Cross Region Endpoint",
   201  			}, {
   202  				Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
   203  				Help:  "US Cross Region Dallas Endpoint",
   204  			}, {
   205  				Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
   206  				Help:  "US Cross Region Washington DC Endpoint",
   207  			}, {
   208  				Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
   209  				Help:  "US Cross Region San Jose Endpoint",
   210  			}, {
   211  				Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
   212  				Help:  "US Cross Region Private Endpoint",
   213  			}, {
   214  				Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
   215  				Help:  "US Cross Region Dallas Private Endpoint",
   216  			}, {
   217  				Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
   218  				Help:  "US Cross Region Washington DC Private Endpoint",
   219  			}, {
   220  				Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
   221  				Help:  "US Cross Region San Jose Private Endpoint",
   222  			}, {
   223  				Value: "s3.us-east.objectstorage.softlayer.net",
   224  				Help:  "US Region East Endpoint",
   225  			}, {
   226  				Value: "s3.us-east.objectstorage.service.networklayer.com",
   227  				Help:  "US Region East Private Endpoint",
   228  			}, {
   229  				Value: "s3.us-south.objectstorage.softlayer.net",
   230  				Help:  "US Region South Endpoint",
   231  			}, {
   232  				Value: "s3.us-south.objectstorage.service.networklayer.com",
   233  				Help:  "US Region South Private Endpoint",
   234  			}, {
   235  				Value: "s3.eu-geo.objectstorage.softlayer.net",
   236  				Help:  "EU Cross Region Endpoint",
   237  			}, {
   238  				Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
   239  				Help:  "EU Cross Region Frankfurt Endpoint",
   240  			}, {
   241  				Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
   242  				Help:  "EU Cross Region Milan Endpoint",
   243  			}, {
   244  				Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
   245  				Help:  "EU Cross Region Amsterdam Endpoint",
   246  			}, {
   247  				Value: "s3.eu-geo.objectstorage.service.networklayer.com",
   248  				Help:  "EU Cross Region Private Endpoint",
   249  			}, {
   250  				Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
   251  				Help:  "EU Cross Region Frankfurt Private Endpoint",
   252  			}, {
   253  				Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
   254  				Help:  "EU Cross Region Milan Private Endpoint",
   255  			}, {
   256  				Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
   257  				Help:  "EU Cross Region Amsterdam Private Endpoint",
   258  			}, {
   259  				Value: "s3.eu-gb.objectstorage.softlayer.net",
   260  				Help:  "Great Britain Endpoint",
   261  			}, {
   262  				Value: "s3.eu-gb.objectstorage.service.networklayer.com",
   263  				Help:  "Great Britain Private Endpoint",
   264  			}, {
   265  				Value: "s3.ap-geo.objectstorage.softlayer.net",
   266  				Help:  "APAC Cross Regional Endpoint",
   267  			}, {
   268  				Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
   269  				Help:  "APAC Cross Regional Tokyo Endpoint",
   270  			}, {
   271  				Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
   272  				Help:  "APAC Cross Regional HongKong Endpoint",
   273  			}, {
   274  				Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
   275  				Help:  "APAC Cross Regional Seoul Endpoint",
   276  			}, {
   277  				Value: "s3.ap-geo.objectstorage.service.networklayer.com",
   278  				Help:  "APAC Cross Regional Private Endpoint",
   279  			}, {
   280  				Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
   281  				Help:  "APAC Cross Regional Tokyo Private Endpoint",
   282  			}, {
   283  				Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
   284  				Help:  "APAC Cross Regional HongKong Private Endpoint",
   285  			}, {
   286  				Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
   287  				Help:  "APAC Cross Regional Seoul Private Endpoint",
   288  			}, {
   289  				Value: "s3.mel01.objectstorage.softlayer.net",
   290  				Help:  "Melbourne Single Site Endpoint",
   291  			}, {
   292  				Value: "s3.mel01.objectstorage.service.networklayer.com",
   293  				Help:  "Melbourne Single Site Private Endpoint",
   294  			}, {
   295  				Value: "s3.tor01.objectstorage.softlayer.net",
   296  				Help:  "Toronto Single Site Endpoint",
   297  			}, {
   298  				Value: "s3.tor01.objectstorage.service.networklayer.com",
   299  				Help:  "Toronto Single Site Private Endpoint",
   300  			}},
   301  		}, {
   302  			// oss endpoints: https://help.aliyun.com/document_detail/31837.html
   303  			Name:     "endpoint",
   304  			Help:     "Endpoint for OSS API.",
   305  			Provider: "Alibaba",
   306  			Examples: []fs.OptionExample{{
   307  				Value: "oss-cn-hangzhou.aliyuncs.com",
   308  				Help:  "East China 1 (Hangzhou)",
   309  			}, {
   310  				Value: "oss-cn-shanghai.aliyuncs.com",
   311  				Help:  "East China 2 (Shanghai)",
   312  			}, {
   313  				Value: "oss-cn-qingdao.aliyuncs.com",
   314  				Help:  "North China 1 (Qingdao)",
   315  			}, {
   316  				Value: "oss-cn-beijing.aliyuncs.com",
   317  				Help:  "North China 2 (Beijing)",
   318  			}, {
   319  				Value: "oss-cn-zhangjiakou.aliyuncs.com",
   320  				Help:  "North China 3 (Zhangjiakou)",
   321  			}, {
   322  				Value: "oss-cn-huhehaote.aliyuncs.com",
   323  				Help:  "North China 5 (Huhehaote)",
   324  			}, {
   325  				Value: "oss-cn-shenzhen.aliyuncs.com",
   326  				Help:  "South China 1 (Shenzhen)",
   327  			}, {
   328  				Value: "oss-cn-hongkong.aliyuncs.com",
   329  				Help:  "Hong Kong (Hong Kong)",
   330  			}, {
   331  				Value: "oss-us-west-1.aliyuncs.com",
   332  				Help:  "US West 1 (Silicon Valley)",
   333  			}, {
   334  				Value: "oss-us-east-1.aliyuncs.com",
   335  				Help:  "US East 1 (Virginia)",
   336  			}, {
   337  				Value: "oss-ap-southeast-1.aliyuncs.com",
   338  				Help:  "Southeast Asia Southeast 1 (Singapore)",
   339  			}, {
   340  				Value: "oss-ap-southeast-2.aliyuncs.com",
   341  				Help:  "Asia Pacific Southeast 2 (Sydney)",
   342  			}, {
   343  				Value: "oss-ap-southeast-3.aliyuncs.com",
   344  				Help:  "Southeast Asia Southeast 3 (Kuala Lumpur)",
   345  			}, {
   346  				Value: "oss-ap-southeast-5.aliyuncs.com",
   347  				Help:  "Asia Pacific Southeast 5 (Jakarta)",
   348  			}, {
   349  				Value: "oss-ap-northeast-1.aliyuncs.com",
   350  				Help:  "Asia Pacific Northeast 1 (Japan)",
   351  			}, {
   352  				Value: "oss-ap-south-1.aliyuncs.com",
   353  				Help:  "Asia Pacific South 1 (Mumbai)",
   354  			}, {
   355  				Value: "oss-eu-central-1.aliyuncs.com",
   356  				Help:  "Central Europe 1 (Frankfurt)",
   357  			}, {
   358  				Value: "oss-eu-west-1.aliyuncs.com",
   359  				Help:  "West Europe (London)",
   360  			}, {
   361  				Value: "oss-me-east-1.aliyuncs.com",
   362  				Help:  "Middle East 1 (Dubai)",
   363  			}},
   364  		}, {
   365  			Name:     "endpoint",
   366  			Help:     "Endpoint for StackPath Object Storage.",
   367  			Provider: "StackPath",
   368  			Examples: []fs.OptionExample{{
   369  				Value: "s3.us-east-2.stackpathstorage.com",
   370  				Help:  "US East Endpoint",
   371  			}, {
   372  				Value: "s3.us-west-1.stackpathstorage.com",
   373  				Help:  "US West Endpoint",
   374  			}, {
   375  				Value: "s3.eu-central-1.stackpathstorage.com",
   376  				Help:  "EU Endpoint",
   377  			}},
   378  		}, {
   379  			Name:     "endpoint",
   380  			Help:     "Endpoint for S3 API.\nRequired when using an S3 clone.",
   381  			Provider: "!AWS,IBMCOS,Alibaba,StackPath",
   382  			Examples: []fs.OptionExample{{
   383  				Value:    "objects-us-east-1.dream.io",
   384  				Help:     "Dream Objects endpoint",
   385  				Provider: "Dreamhost",
   386  			}, {
   387  				Value:    "nyc3.digitaloceanspaces.com",
   388  				Help:     "Digital Ocean Spaces New York 3",
   389  				Provider: "DigitalOcean",
   390  			}, {
   391  				Value:    "ams3.digitaloceanspaces.com",
   392  				Help:     "Digital Ocean Spaces Amsterdam 3",
   393  				Provider: "DigitalOcean",
   394  			}, {
   395  				Value:    "sgp1.digitaloceanspaces.com",
   396  				Help:     "Digital Ocean Spaces Singapore 1",
   397  				Provider: "DigitalOcean",
   398  			}, {
   399  				Value:    "s3.wasabisys.com",
   400  				Help:     "Wasabi US East endpoint",
   401  				Provider: "Wasabi",
   402  			}, {
   403  				Value:    "s3.us-west-1.wasabisys.com",
   404  				Help:     "Wasabi US West endpoint",
   405  				Provider: "Wasabi",
   406  			}, {
   407  				Value:    "s3.eu-central-1.wasabisys.com",
   408  				Help:     "Wasabi EU Central endpoint",
   409  				Provider: "Wasabi",
   410  			}},
   411  		}, {
   412  			Name:     "location_constraint",
   413  			Help:     "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
   414  			Provider: "AWS",
   415  			Examples: []fs.OptionExample{{
   416  				Value: "",
   417  				Help:  "Empty for US Region, Northern Virginia or Pacific Northwest.",
   418  			}, {
   419  				Value: "us-east-2",
   420  				Help:  "US East (Ohio) Region.",
   421  			}, {
   422  				Value: "us-west-2",
   423  				Help:  "US West (Oregon) Region.",
   424  			}, {
   425  				Value: "us-west-1",
   426  				Help:  "US West (Northern California) Region.",
   427  			}, {
   428  				Value: "ca-central-1",
   429  				Help:  "Canada (Central) Region.",
   430  			}, {
   431  				Value: "eu-west-1",
   432  				Help:  "EU (Ireland) Region.",
   433  			}, {
   434  				Value: "eu-west-2",
   435  				Help:  "EU (London) Region.",
   436  			}, {
   437  				Value: "eu-north-1",
   438  				Help:  "EU (Stockholm) Region.",
   439  			}, {
   440  				Value: "EU",
   441  				Help:  "EU Region.",
   442  			}, {
   443  				Value: "ap-southeast-1",
   444  				Help:  "Asia Pacific (Singapore) Region.",
   445  			}, {
   446  				Value: "ap-southeast-2",
   447  				Help:  "Asia Pacific (Sydney) Region.",
   448  			}, {
   449  				Value: "ap-northeast-1",
   450  				Help:  "Asia Pacific (Tokyo) Region.",
   451  			}, {
   452  				Value: "ap-northeast-2",
   453  				Help:  "Asia Pacific (Seoul)",
   454  			}, {
   455  				Value: "ap-south-1",
   456  				Help:  "Asia Pacific (Mumbai)",
   457  			}, {
   458  				Value: "ap-east-1",
   459  				Help:  "Asia Pacific (Hong Kong)",
   460  			}, {
   461  				Value: "sa-east-1",
   462  				Help:  "South America (Sao Paulo) Region.",
   463  			}},
   464  		}, {
   465  			Name:     "location_constraint",
   466  			Help:     "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
   467  			Provider: "IBMCOS",
   468  			Examples: []fs.OptionExample{{
   469  				Value: "us-standard",
   470  				Help:  "US Cross Region Standard",
   471  			}, {
   472  				Value: "us-vault",
   473  				Help:  "US Cross Region Vault",
   474  			}, {
   475  				Value: "us-cold",
   476  				Help:  "US Cross Region Cold",
   477  			}, {
   478  				Value: "us-flex",
   479  				Help:  "US Cross Region Flex",
   480  			}, {
   481  				Value: "us-east-standard",
   482  				Help:  "US East Region Standard",
   483  			}, {
   484  				Value: "us-east-vault",
   485  				Help:  "US East Region Vault",
   486  			}, {
   487  				Value: "us-east-cold",
   488  				Help:  "US East Region Cold",
   489  			}, {
   490  				Value: "us-east-flex",
   491  				Help:  "US East Region Flex",
   492  			}, {
   493  				Value: "us-south-standard",
   494  				Help:  "US South Region Standard",
   495  			}, {
   496  				Value: "us-south-vault",
   497  				Help:  "US South Region Vault",
   498  			}, {
   499  				Value: "us-south-cold",
   500  				Help:  "US South Region Cold",
   501  			}, {
   502  				Value: "us-south-flex",
   503  				Help:  "US South Region Flex",
   504  			}, {
   505  				Value: "eu-standard",
   506  				Help:  "EU Cross Region Standard",
   507  			}, {
   508  				Value: "eu-vault",
   509  				Help:  "EU Cross Region Vault",
   510  			}, {
   511  				Value: "eu-cold",
   512  				Help:  "EU Cross Region Cold",
   513  			}, {
   514  				Value: "eu-flex",
   515  				Help:  "EU Cross Region Flex",
   516  			}, {
   517  				Value: "eu-gb-standard",
   518  				Help:  "Great Britain Standard",
   519  			}, {
   520  				Value: "eu-gb-vault",
   521  				Help:  "Great Britain Vault",
   522  			}, {
   523  				Value: "eu-gb-cold",
   524  				Help:  "Great Britain Cold",
   525  			}, {
   526  				Value: "eu-gb-flex",
   527  				Help:  "Great Britain Flex",
   528  			}, {
   529  				Value: "ap-standard",
   530  				Help:  "APAC Standard",
   531  			}, {
   532  				Value: "ap-vault",
   533  				Help:  "APAC Vault",
   534  			}, {
   535  				Value: "ap-cold",
   536  				Help:  "APAC Cold",
   537  			}, {
   538  				Value: "ap-flex",
   539  				Help:  "APAC Flex",
   540  			}, {
   541  				Value: "mel01-standard",
   542  				Help:  "Melbourne Standard",
   543  			}, {
   544  				Value: "mel01-vault",
   545  				Help:  "Melbourne Vault",
   546  			}, {
   547  				Value: "mel01-cold",
   548  				Help:  "Melbourne Cold",
   549  			}, {
   550  				Value: "mel01-flex",
   551  				Help:  "Melbourne Flex",
   552  			}, {
   553  				Value: "tor01-standard",
   554  				Help:  "Toronto Standard",
   555  			}, {
   556  				Value: "tor01-vault",
   557  				Help:  "Toronto Vault",
   558  			}, {
   559  				Value: "tor01-cold",
   560  				Help:  "Toronto Cold",
   561  			}, {
   562  				Value: "tor01-flex",
   563  				Help:  "Toronto Flex",
   564  			}},
   565  		}, {
   566  			Name:     "location_constraint",
   567  			Help:     "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
   568  			Provider: "!AWS,IBMCOS,Alibaba,StackPath",
   569  		}, {
   570  			Name: "acl",
   571  			Help: `Canned ACL used when creating buckets and storing or copying objects.
   572  
   573  This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
   574  
   575  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
   576  
   577  Note that this ACL is applied when server side copying objects as S3
   578  doesn't copy the ACL from the source but rather writes a fresh one.`,
   579  			Examples: []fs.OptionExample{{
   580  				Value:    "private",
   581  				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default).",
   582  				Provider: "!IBMCOS",
   583  			}, {
   584  				Value:    "public-read",
   585  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
   586  				Provider: "!IBMCOS",
   587  			}, {
   588  				Value:    "public-read-write",
   589  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
   590  				Provider: "!IBMCOS",
   591  			}, {
   592  				Value:    "authenticated-read",
   593  				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
   594  				Provider: "!IBMCOS",
   595  			}, {
   596  				Value:    "bucket-owner-read",
   597  				Help:     "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
   598  				Provider: "!IBMCOS",
   599  			}, {
   600  				Value:    "bucket-owner-full-control",
   601  				Help:     "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
   602  				Provider: "!IBMCOS",
   603  			}, {
   604  				Value:    "private",
   605  				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
   606  				Provider: "IBMCOS",
   607  			}, {
   608  				Value:    "public-read",
   609  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
   610  				Provider: "IBMCOS",
   611  			}, {
   612  				Value:    "public-read-write",
   613  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
   614  				Provider: "IBMCOS",
   615  			}, {
   616  				Value:    "authenticated-read",
   617  				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
   618  				Provider: "IBMCOS",
   619  			}},
   620  		}, {
   621  			Name: "bucket_acl",
   622  			Help: `Canned ACL used when creating buckets.
   623  
   624  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
   625  
   626  Note that this ACL is applied when only when creating buckets.  If it
   627  isn't set then "acl" is used instead.`,
   628  			Advanced: true,
   629  			Examples: []fs.OptionExample{{
   630  				Value: "private",
   631  				Help:  "Owner gets FULL_CONTROL. No one else has access rights (default).",
   632  			}, {
   633  				Value: "public-read",
   634  				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
   635  			}, {
   636  				Value: "public-read-write",
   637  				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
   638  			}, {
   639  				Value: "authenticated-read",
   640  				Help:  "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
   641  			}},
   642  		}, {
   643  			Name:     "server_side_encryption",
   644  			Help:     "The server-side encryption algorithm used when storing this object in S3.",
   645  			Provider: "AWS,Ceph,Minio",
   646  			Examples: []fs.OptionExample{{
   647  				Value: "",
   648  				Help:  "None",
   649  			}, {
   650  				Value: "AES256",
   651  				Help:  "AES256",
   652  			}, {
   653  				Value: "aws:kms",
   654  				Help:  "aws:kms",
   655  			}},
   656  		}, {
   657  			Name:     "sse_customer_algorithm",
   658  			Help:     "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.",
   659  			Provider: "AWS,Ceph,Minio",
   660  			Advanced: true,
   661  			Examples: []fs.OptionExample{{
   662  				Value: "",
   663  				Help:  "None",
   664  			}, {
   665  				Value: "AES256",
   666  				Help:  "AES256",
   667  			}},
   668  		}, {
   669  			Name:     "sse_kms_key_id",
   670  			Help:     "If using KMS ID you must provide the ARN of Key.",
   671  			Provider: "AWS,Ceph,Minio",
   672  			Examples: []fs.OptionExample{{
   673  				Value: "",
   674  				Help:  "None",
   675  			}, {
   676  				Value: "arn:aws:kms:us-east-1:*",
   677  				Help:  "arn:aws:kms:*",
   678  			}},
   679  		}, {
   680  			Name:     "sse_customer_key",
   681  			Help:     "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.",
   682  			Provider: "AWS,Ceph,Minio",
   683  			Advanced: true,
   684  			Examples: []fs.OptionExample{{
   685  				Value: "",
   686  				Help:  "None",
   687  			}},
   688  		}, {
   689  			Name:     "sse_customer_key_md5",
   690  			Help:     "If using SSE-C you must provide the secret encryption key MD5 checksum.",
   691  			Provider: "AWS,Ceph,Minio",
   692  			Advanced: true,
   693  			Examples: []fs.OptionExample{{
   694  				Value: "",
   695  				Help:  "None",
   696  			}},
   697  		}, {
   698  			Name:     "storage_class",
   699  			Help:     "The storage class to use when storing new objects in S3.",
   700  			Provider: "AWS",
   701  			Examples: []fs.OptionExample{{
   702  				Value: "",
   703  				Help:  "Default",
   704  			}, {
   705  				Value: "STANDARD",
   706  				Help:  "Standard storage class",
   707  			}, {
   708  				Value: "REDUCED_REDUNDANCY",
   709  				Help:  "Reduced redundancy storage class",
   710  			}, {
   711  				Value: "STANDARD_IA",
   712  				Help:  "Standard Infrequent Access storage class",
   713  			}, {
   714  				Value: "ONEZONE_IA",
   715  				Help:  "One Zone Infrequent Access storage class",
   716  			}, {
   717  				Value: "GLACIER",
   718  				Help:  "Glacier storage class",
   719  			}, {
   720  				Value: "DEEP_ARCHIVE",
   721  				Help:  "Glacier Deep Archive storage class",
   722  			}, {
   723  				Value: "INTELLIGENT_TIERING",
   724  				Help:  "Intelligent-Tiering storage class",
   725  			}},
   726  		}, {
   727  			// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
   728  			Name:     "storage_class",
   729  			Help:     "The storage class to use when storing new objects in OSS.",
   730  			Provider: "Alibaba",
   731  			Examples: []fs.OptionExample{{
   732  				Value: "",
   733  				Help:  "Default",
   734  			}, {
   735  				Value: "STANDARD",
   736  				Help:  "Standard storage class",
   737  			}, {
   738  				Value: "GLACIER",
   739  				Help:  "Archive storage mode.",
   740  			}, {
   741  				Value: "STANDARD_IA",
   742  				Help:  "Infrequent access storage mode.",
   743  			}},
   744  		}, {
   745  			Name: "upload_cutoff",
   746  			Help: `Cutoff for switching to chunked upload
   747  
   748  Any files larger than this will be uploaded in chunks of chunk_size.
   749  The minimum is 0 and the maximum is 5GB.`,
   750  			Default:  defaultUploadCutoff,
   751  			Advanced: true,
   752  		}, {
   753  			Name: "chunk_size",
   754  			Help: `Chunk size to use for uploading.
   755  
   756  When uploading files larger than upload_cutoff or files with unknown
   757  size (eg from "rclone rcat" or uploaded with "rclone mount" or google
   758  photos or google docs) they will be uploaded as multipart uploads
   759  using this chunk size.
   760  
   761  Note that "--s3-upload-concurrency" chunks of this size are buffered
   762  in memory per transfer.
   763  
   764  If you are transferring large files over high speed links and you have
   765  enough memory, then increasing this will speed up the transfers.
   766  
   767  Rclone will automatically increase the chunk size when uploading a
   768  large file of known size to stay below the 10,000 chunks limit.
   769  
   770  Files of unknown size are uploaded with the configured
   771  chunk_size. Since the default chunk size is 5MB and there can be at
   772  most 10,000 chunks, this means that by default the maximum size of
   773  file you can stream upload is 48GB.  If you wish to stream upload
   774  larger files then you will need to increase chunk_size.`,
   775  			Default:  minChunkSize,
   776  			Advanced: true,
   777  		}, {
   778  			Name: "copy_cutoff",
   779  			Help: `Cutoff for switching to multipart copy
   780  
   781  Any files larger than this that need to be server side copied will be
   782  copied in chunks of this size.
   783  
   784  The minimum is 0 and the maximum is 5GB.`,
   785  			Default:  fs.SizeSuffix(maxSizeForCopy),
   786  			Advanced: true,
   787  		}, {
   788  			Name: "disable_checksum",
   789  			Help: `Don't store MD5 checksum with object metadata
   790  
   791  Normally rclone will calculate the MD5 checksum of the input before
   792  uploading it so it can add it to metadata on the object. This is great
   793  for data integrity checking but can cause long delays for large files
   794  to start uploading.`,
   795  			Default:  false,
   796  			Advanced: true,
   797  		}, {
   798  			Name:     "session_token",
   799  			Help:     "An AWS session token",
   800  			Advanced: true,
   801  		}, {
   802  			Name: "upload_concurrency",
   803  			Help: `Concurrency for multipart uploads.
   804  
   805  This is the number of chunks of the same file that are uploaded
   806  concurrently.
   807  
   808  If you are uploading small numbers of large file over high speed link
   809  and these uploads do not fully utilize your bandwidth, then increasing
   810  this may help to speed up the transfers.`,
   811  			Default:  4,
   812  			Advanced: true,
   813  		}, {
   814  			Name: "force_path_style",
   815  			Help: `If true use path style access if false use virtual hosted style.
   816  
   817  If this is true (the default) then rclone will use path style access,
   818  if false then rclone will use virtual path style. See [the AWS S3
   819  docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
   820  for more info.
   821  
   822  Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
   823  false - rclone will do this automatically based on the provider
   824  setting.`,
   825  			Default:  true,
   826  			Advanced: true,
   827  		}, {
   828  			Name: "v2_auth",
   829  			Help: `If true use v2 authentication.
   830  
   831  If this is false (the default) then rclone will use v4 authentication.
   832  If it is set then rclone will use v2 authentication.
   833  
   834  Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
   835  			Default:  false,
   836  			Advanced: true,
   837  		}, {
   838  			Name:     "use_accelerate_endpoint",
   839  			Provider: "AWS",
   840  			Help: `If true use the AWS S3 accelerated endpoint.
   841  
   842  See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
   843  			Default:  false,
   844  			Advanced: true,
   845  		}, {
   846  			Name:     "leave_parts_on_error",
   847  			Provider: "AWS",
   848  			Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
   849  
   850  It should be set to true for resuming uploads across different sessions.
   851  
   852  WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
   853  `,
   854  			Default:  false,
   855  			Advanced: true,
   856  		}, {
   857  			Name: "list_chunk",
   858  			Help: `Size of listing chunk (response list for each ListObject S3 request).
   859  
   860  This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification.
   861  Most services truncate the response list to 1000 objects even if requested more than that.
   862  In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html).
   863  In Ceph, this can be increased with the "rgw list buckets max chunk" option.
   864  `,
   865  			Default:  1000,
   866  			Advanced: true,
   867  		}, {
   868  			Name:     config.ConfigEncoding,
   869  			Help:     config.ConfigEncodingHelp,
   870  			Advanced: true,
   871  			// Any UTF-8 character is valid in a key, however it can't handle
   872  			// invalid UTF-8 and / have a special meaning.
   873  			//
   874  			// The SDK can't seem to handle uploading files called '.'
   875  			//
   876  			// FIXME would be nice to add
   877  			// - initial / encoding
   878  			// - doubled / encoding
   879  			// - trailing / encoding
   880  			// so that AWS keys are always valid file names
   881  			Default: encoder.EncodeInvalidUtf8 |
   882  				encoder.EncodeSlash |
   883  				encoder.EncodeDot,
   884  		}, {
   885  			Name:     "memory_pool_flush_time",
   886  			Default:  memoryPoolFlushTime,
   887  			Advanced: true,
   888  			Help: `How often internal memory buffer pools will be flushed.
   889  Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations.
   890  This option controls how often unused buffers will be removed from the pool.`,
   891  		}, {
   892  			Name:     "memory_pool_use_mmap",
   893  			Default:  memoryPoolUseMmap,
   894  			Advanced: true,
   895  			Help:     `Whether to use mmap buffers in internal memory pool.`,
   896  		},
   897  		}})
   898  }
   899  
   900  // Constants
   901  const (
   902  	metaMtime           = "Mtime"                // the meta key to store mtime in - eg X-Amz-Meta-Mtime
   903  	metaMD5Hash         = "Md5chksum"            // the meta key to store md5hash in
   904  	maxSizeForCopy      = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
   905  	maxUploadParts      = 10000                  // maximum allowed number of parts in a multi-part upload
   906  	minChunkSize        = fs.SizeSuffix(1024 * 1024 * 5)
   907  	defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
   908  	maxUploadCutoff     = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
   909  	minSleep            = 10 * time.Millisecond // In case of error, start at 10ms sleep.
   910  
   911  	memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long
   912  	memoryPoolUseMmap   = false
   913  )
   914  
   915  // Options defines the configuration for this backend
   916  type Options struct {
   917  	Provider              string               `config:"provider"`
   918  	EnvAuth               bool                 `config:"env_auth"`
   919  	AccessKeyID           string               `config:"access_key_id"`
   920  	SecretAccessKey       string               `config:"secret_access_key"`
   921  	Region                string               `config:"region"`
   922  	Endpoint              string               `config:"endpoint"`
   923  	LocationConstraint    string               `config:"location_constraint"`
   924  	ACL                   string               `config:"acl"`
   925  	BucketACL             string               `config:"bucket_acl"`
   926  	ServerSideEncryption  string               `config:"server_side_encryption"`
   927  	SSEKMSKeyID           string               `config:"sse_kms_key_id"`
   928  	SSECustomerAlgorithm  string               `config:"sse_customer_algorithm"`
   929  	SSECustomerKey        string               `config:"sse_customer_key"`
   930  	SSECustomerKeyMD5     string               `config:"sse_customer_key_md5"`
   931  	StorageClass          string               `config:"storage_class"`
   932  	UploadCutoff          fs.SizeSuffix        `config:"upload_cutoff"`
   933  	CopyCutoff            fs.SizeSuffix        `config:"copy_cutoff"`
   934  	ChunkSize             fs.SizeSuffix        `config:"chunk_size"`
   935  	DisableChecksum       bool                 `config:"disable_checksum"`
   936  	SessionToken          string               `config:"session_token"`
   937  	UploadConcurrency     int                  `config:"upload_concurrency"`
   938  	ForcePathStyle        bool                 `config:"force_path_style"`
   939  	V2Auth                bool                 `config:"v2_auth"`
   940  	UseAccelerateEndpoint bool                 `config:"use_accelerate_endpoint"`
   941  	LeavePartsOnError     bool                 `config:"leave_parts_on_error"`
   942  	ListChunk             int64                `config:"list_chunk"`
   943  	Enc                   encoder.MultiEncoder `config:"encoding"`
   944  	MemoryPoolFlushTime   fs.Duration          `config:"memory_pool_flush_time"`
   945  	MemoryPoolUseMmap     bool                 `config:"memory_pool_use_mmap"`
   946  }
   947  
   948  // Fs represents a remote s3 server
   949  type Fs struct {
   950  	name          string           // the name of the remote
   951  	root          string           // root of the bucket - ignore all objects above this
   952  	opt           Options          // parsed options
   953  	features      *fs.Features     // optional features
   954  	c             *s3.S3           // the connection to the s3 server
   955  	ses           *session.Session // the s3 session
   956  	rootBucket    string           // bucket part of root (if any)
   957  	rootDirectory string           // directory part of root (if any)
   958  	cache         *bucket.Cache    // cache for bucket creation status
   959  	pacer         *fs.Pacer        // To pace the API calls
   960  	srv           *http.Client     // a plain http client
   961  	pool          *pool.Pool       // memory pool
   962  }
   963  
   964  // Object describes a s3 object
   965  type Object struct {
   966  	// Will definitely have everything but meta which may be nil
   967  	//
   968  	// List will read everything but meta & mimeType - to fill
   969  	// that in you need to call readMetaData
   970  	fs           *Fs                // what this object is part of
   971  	remote       string             // The remote path
   972  	etag         string             // md5sum of the object
   973  	bytes        int64              // size of the object
   974  	lastModified time.Time          // Last modified
   975  	meta         map[string]*string // The object metadata if known - may be nil
   976  	mimeType     string             // MimeType of object - may be ""
   977  	storageClass string             // eg GLACIER
   978  }
   979  
   980  // ------------------------------------------------------------
   981  
   982  // Name of the remote (as passed into NewFs)
   983  func (f *Fs) Name() string {
   984  	return f.name
   985  }
   986  
   987  // Root of the remote (as passed into NewFs)
   988  func (f *Fs) Root() string {
   989  	return f.root
   990  }
   991  
   992  // String converts this Fs to a string
   993  func (f *Fs) String() string {
   994  	if f.rootBucket == "" {
   995  		return fmt.Sprintf("S3 root")
   996  	}
   997  	if f.rootDirectory == "" {
   998  		return fmt.Sprintf("S3 bucket %s", f.rootBucket)
   999  	}
  1000  	return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory)
  1001  }
  1002  
  1003  // Features returns the optional features of this Fs
  1004  func (f *Fs) Features() *fs.Features {
  1005  	return f.features
  1006  }
  1007  
  1008  // retryErrorCodes is a slice of error codes that we will retry
  1009  // See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
  1010  var retryErrorCodes = []int{
  1011  	500, // Internal Server Error - "We encountered an internal error. Please try again."
  1012  	503, // Service Unavailable/Slow Down - "Reduce your request rate"
  1013  }
  1014  
  1015  //S3 is pretty resilient, and the built in retry handling is probably sufficient
  1016  // as it should notice closed connections and timeouts which are the most likely
  1017  // sort of failure modes
  1018  func (f *Fs) shouldRetry(err error) (bool, error) {
  1019  	// If this is an awserr object, try and extract more useful information to determine if we should retry
  1020  	if awsError, ok := err.(awserr.Error); ok {
  1021  		// Simple case, check the original embedded error in case it's generically retryable
  1022  		if fserrors.ShouldRetry(awsError.OrigErr()) {
  1023  			return true, err
  1024  		}
  1025  		// Failing that, if it's a RequestFailure it's probably got an http status code we can check
  1026  		if reqErr, ok := err.(awserr.RequestFailure); ok {
  1027  			// 301 if wrong region for bucket - can only update if running from a bucket
  1028  			if f.rootBucket != "" {
  1029  				if reqErr.StatusCode() == http.StatusMovedPermanently {
  1030  					urfbErr := f.updateRegionForBucket(f.rootBucket)
  1031  					if urfbErr != nil {
  1032  						fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
  1033  						return false, err
  1034  					}
  1035  					return true, err
  1036  				}
  1037  			}
  1038  			for _, e := range retryErrorCodes {
  1039  				if reqErr.StatusCode() == e {
  1040  					return true, err
  1041  				}
  1042  			}
  1043  		}
  1044  	}
  1045  	// Ok, not an awserr, check for generic failure conditions
  1046  	return fserrors.ShouldRetry(err), err
  1047  }
  1048  
  1049  // parsePath parses a remote 'url'
  1050  func parsePath(path string) (root string) {
  1051  	root = strings.Trim(path, "/")
  1052  	return
  1053  }
  1054  
  1055  // split returns bucket and bucketPath from the rootRelativePath
  1056  // relative to f.root
  1057  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
  1058  	bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
  1059  	return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
  1060  }
  1061  
  1062  // split returns bucket and bucketPath from the object
  1063  func (o *Object) split() (bucket, bucketPath string) {
  1064  	return o.fs.split(o.remote)
  1065  }
  1066  
  1067  // s3Connection makes a connection to s3
  1068  func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
  1069  	// Make the auth
  1070  	v := credentials.Value{
  1071  		AccessKeyID:     opt.AccessKeyID,
  1072  		SecretAccessKey: opt.SecretAccessKey,
  1073  		SessionToken:    opt.SessionToken,
  1074  	}
  1075  
  1076  	lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
  1077  	def := defaults.Get()
  1078  	def.Config.HTTPClient = lowTimeoutClient
  1079  
  1080  	// start a new AWS session
  1081  	awsSession, err := session.NewSession()
  1082  	if err != nil {
  1083  		return nil, nil, errors.Wrap(err, "NewSession")
  1084  	}
  1085  
  1086  	// first provider to supply a credential set "wins"
  1087  	providers := []credentials.Provider{
  1088  		// use static credentials if they're present (checked by provider)
  1089  		&credentials.StaticProvider{Value: v},
  1090  
  1091  		// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
  1092  		// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
  1093  		&credentials.EnvProvider{},
  1094  
  1095  		// A SharedCredentialsProvider retrieves credentials
  1096  		// from the current user's home directory.  It checks
  1097  		// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
  1098  		&credentials.SharedCredentialsProvider{},
  1099  
  1100  		// Pick up IAM role if we're in an ECS task
  1101  		defaults.RemoteCredProvider(*def.Config, def.Handlers),
  1102  
  1103  		// Pick up IAM role in case we're on EC2
  1104  		&ec2rolecreds.EC2RoleProvider{
  1105  			Client: ec2metadata.New(awsSession, &aws.Config{
  1106  				HTTPClient: lowTimeoutClient,
  1107  			}),
  1108  			ExpiryWindow: 3 * time.Minute,
  1109  		},
  1110  
  1111  		// Pick up IAM role if we are in EKS
  1112  		&stscreds.WebIdentityRoleProvider{
  1113  			ExpiryWindow: 3 * time.Minute,
  1114  		},
  1115  	}
  1116  	cred := credentials.NewChainCredentials(providers)
  1117  
  1118  	switch {
  1119  	case opt.EnvAuth:
  1120  		// No need for empty checks if "env_auth" is true
  1121  	case v.AccessKeyID == "" && v.SecretAccessKey == "":
  1122  		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
  1123  		cred = credentials.AnonymousCredentials
  1124  	case v.AccessKeyID == "":
  1125  		return nil, nil, errors.New("access_key_id not found")
  1126  	case v.SecretAccessKey == "":
  1127  		return nil, nil, errors.New("secret_access_key not found")
  1128  	}
  1129  
  1130  	if opt.Region == "" && opt.Endpoint == "" {
  1131  		opt.Endpoint = "https://s3.amazonaws.com/"
  1132  	}
  1133  	if opt.Region == "" {
  1134  		opt.Region = "us-east-1"
  1135  	}
  1136  	if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
  1137  		opt.ForcePathStyle = false
  1138  	}
  1139  	awsConfig := aws.NewConfig().
  1140  		WithMaxRetries(0). // Rely on rclone's retry logic
  1141  		WithCredentials(cred).
  1142  		WithHTTPClient(fshttp.NewClient(fs.Config)).
  1143  		WithS3ForcePathStyle(opt.ForcePathStyle).
  1144  		WithS3UseAccelerate(opt.UseAccelerateEndpoint)
  1145  	if opt.Region != "" {
  1146  		awsConfig.WithRegion(opt.Region)
  1147  	}
  1148  	if opt.Endpoint != "" {
  1149  		awsConfig.WithEndpoint(opt.Endpoint)
  1150  	}
  1151  
  1152  	// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
  1153  	awsSessionOpts := session.Options{
  1154  		Config: *awsConfig,
  1155  	}
  1156  	if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
  1157  		// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
  1158  		awsSessionOpts.SharedConfigState = session.SharedConfigEnable
  1159  		// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
  1160  		// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
  1161  		awsSessionOpts.Config.Credentials = nil
  1162  	}
  1163  	ses, err := session.NewSessionWithOptions(awsSessionOpts)
  1164  	if err != nil {
  1165  		return nil, nil, err
  1166  	}
  1167  	c := s3.New(ses)
  1168  	if opt.V2Auth || opt.Region == "other-v2-signature" {
  1169  		fs.Debugf(nil, "Using v2 auth")
  1170  		signer := func(req *request.Request) {
  1171  			// Ignore AnonymousCredentials object
  1172  			if req.Config.Credentials == credentials.AnonymousCredentials {
  1173  				return
  1174  			}
  1175  			sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
  1176  		}
  1177  		c.Handlers.Sign.Clear()
  1178  		c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
  1179  		c.Handlers.Sign.PushBack(signer)
  1180  	}
  1181  	return c, ses, nil
  1182  }
  1183  
  1184  func checkUploadChunkSize(cs fs.SizeSuffix) error {
  1185  	if cs < minChunkSize {
  1186  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
  1187  	}
  1188  	return nil
  1189  }
  1190  
  1191  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1192  	err = checkUploadChunkSize(cs)
  1193  	if err == nil {
  1194  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
  1195  	}
  1196  	return
  1197  }
  1198  
  1199  func checkUploadCutoff(cs fs.SizeSuffix) error {
  1200  	if cs > maxUploadCutoff {
  1201  		return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
  1202  	}
  1203  	return nil
  1204  }
  1205  
  1206  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1207  	err = checkUploadCutoff(cs)
  1208  	if err == nil {
  1209  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
  1210  	}
  1211  	return
  1212  }
  1213  
  1214  // setRoot changes the root of the Fs
  1215  func (f *Fs) setRoot(root string) {
  1216  	f.root = parsePath(root)
  1217  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
  1218  }
  1219  
  1220  // NewFs constructs an Fs from the path, bucket:path
  1221  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
  1222  	// Parse config into Options struct
  1223  	opt := new(Options)
  1224  	err := configstruct.Set(m, opt)
  1225  	if err != nil {
  1226  		return nil, err
  1227  	}
  1228  	err = checkUploadChunkSize(opt.ChunkSize)
  1229  	if err != nil {
  1230  		return nil, errors.Wrap(err, "s3: chunk size")
  1231  	}
  1232  	err = checkUploadCutoff(opt.UploadCutoff)
  1233  	if err != nil {
  1234  		return nil, errors.Wrap(err, "s3: upload cutoff")
  1235  	}
  1236  	if opt.ACL == "" {
  1237  		opt.ACL = "private"
  1238  	}
  1239  	if opt.BucketACL == "" {
  1240  		opt.BucketACL = opt.ACL
  1241  	}
  1242  	c, ses, err := s3Connection(opt)
  1243  	if err != nil {
  1244  		return nil, err
  1245  	}
  1246  
  1247  	f := &Fs{
  1248  		name:  name,
  1249  		opt:   *opt,
  1250  		c:     c,
  1251  		ses:   ses,
  1252  		pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
  1253  		cache: bucket.NewCache(),
  1254  		srv:   fshttp.NewClient(fs.Config),
  1255  		pool: pool.New(
  1256  			time.Duration(opt.MemoryPoolFlushTime),
  1257  			int(opt.ChunkSize),
  1258  			opt.UploadConcurrency*fs.Config.Transfers,
  1259  			opt.MemoryPoolUseMmap,
  1260  		),
  1261  	}
  1262  
  1263  	f.setRoot(root)
  1264  	f.features = (&fs.Features{
  1265  		ReadMimeType:      true,
  1266  		WriteMimeType:     true,
  1267  		BucketBased:       true,
  1268  		BucketBasedRootOK: true,
  1269  		SetTier:           true,
  1270  		GetTier:           true,
  1271  	}).Fill(f)
  1272  	if f.rootBucket != "" && f.rootDirectory != "" {
  1273  		// Check to see if the object exists
  1274  		encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
  1275  		req := s3.HeadObjectInput{
  1276  			Bucket: &f.rootBucket,
  1277  			Key:    &encodedDirectory,
  1278  		}
  1279  		err = f.pacer.Call(func() (bool, error) {
  1280  			_, err = f.c.HeadObject(&req)
  1281  			return f.shouldRetry(err)
  1282  		})
  1283  		if err == nil {
  1284  			newRoot := path.Dir(f.root)
  1285  			if newRoot == "." {
  1286  				newRoot = ""
  1287  			}
  1288  			f.setRoot(newRoot)
  1289  			// return an error with an fs which points to the parent
  1290  			return f, fs.ErrorIsFile
  1291  		}
  1292  	}
  1293  	// f.listMultipartUploads()
  1294  	return f, nil
  1295  }
  1296  
  1297  // Return an Object from a path
  1298  //
  1299  //If it can't be found it returns the error ErrorObjectNotFound.
  1300  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) {
  1301  	o := &Object{
  1302  		fs:     f,
  1303  		remote: remote,
  1304  	}
  1305  	if info != nil {
  1306  		// Set info but not meta
  1307  		if info.LastModified == nil {
  1308  			fs.Logf(o, "Failed to read last modified")
  1309  			o.lastModified = time.Now()
  1310  		} else {
  1311  			o.lastModified = *info.LastModified
  1312  		}
  1313  		o.etag = aws.StringValue(info.ETag)
  1314  		o.bytes = aws.Int64Value(info.Size)
  1315  		o.storageClass = aws.StringValue(info.StorageClass)
  1316  	} else {
  1317  		err := o.readMetaData(ctx) // reads info and meta, returning an error
  1318  		if err != nil {
  1319  			return nil, err
  1320  		}
  1321  	}
  1322  	return o, nil
  1323  }
  1324  
  1325  // NewObject finds the Object at remote.  If it can't be found
  1326  // it returns the error fs.ErrorObjectNotFound.
  1327  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1328  	return f.newObjectWithInfo(ctx, remote, nil)
  1329  }
  1330  
  1331  // Gets the bucket location
  1332  func (f *Fs) getBucketLocation(bucket string) (string, error) {
  1333  	req := s3.GetBucketLocationInput{
  1334  		Bucket: &bucket,
  1335  	}
  1336  	var resp *s3.GetBucketLocationOutput
  1337  	var err error
  1338  	err = f.pacer.Call(func() (bool, error) {
  1339  		resp, err = f.c.GetBucketLocation(&req)
  1340  		return f.shouldRetry(err)
  1341  	})
  1342  	if err != nil {
  1343  		return "", err
  1344  	}
  1345  	return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
  1346  }
  1347  
  1348  // Updates the region for the bucket by reading the region from the
  1349  // bucket then updating the session.
  1350  func (f *Fs) updateRegionForBucket(bucket string) error {
  1351  	region, err := f.getBucketLocation(bucket)
  1352  	if err != nil {
  1353  		return errors.Wrap(err, "reading bucket location failed")
  1354  	}
  1355  	if aws.StringValue(f.c.Config.Endpoint) != "" {
  1356  		return errors.Errorf("can't set region to %q as endpoint is set", region)
  1357  	}
  1358  	if aws.StringValue(f.c.Config.Region) == region {
  1359  		return errors.Errorf("region is already %q - not updating", region)
  1360  	}
  1361  
  1362  	// Make a new session with the new region
  1363  	oldRegion := f.opt.Region
  1364  	f.opt.Region = region
  1365  	c, ses, err := s3Connection(&f.opt)
  1366  	if err != nil {
  1367  		return errors.Wrap(err, "creating new session failed")
  1368  	}
  1369  	f.c = c
  1370  	f.ses = ses
  1371  
  1372  	fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
  1373  	return nil
  1374  }
  1375  
  1376  // listFn is called from list to handle an object.
  1377  type listFn func(remote string, object *s3.Object, isDirectory bool) error
  1378  
  1379  // list lists the objects into the function supplied from
  1380  // the bucket and directory supplied.  The remote has prefix
  1381  // removed from it and if addBucket is set then it adds the
  1382  // bucket to the start.
  1383  //
  1384  // Set recurse to read sub directories
  1385  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
  1386  	if prefix != "" {
  1387  		prefix += "/"
  1388  	}
  1389  	if directory != "" {
  1390  		directory += "/"
  1391  	}
  1392  	delimiter := ""
  1393  	if !recurse {
  1394  		delimiter = "/"
  1395  	}
  1396  	var marker *string
  1397  	// URL encode the listings so we can use control characters in object names
  1398  	// See: https://github.com/aws/aws-sdk-go/issues/1914
  1399  	//
  1400  	// However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because
  1401  	// it doesn't encode CommonPrefixes.
  1402  	// See: https://tracker.ceph.com/issues/41870
  1403  	//
  1404  	// This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345
  1405  	// though maybe it does on some versions.
  1406  	//
  1407  	// This does work with minio but was only added relatively recently
  1408  	// https://github.com/minio/minio/pull/7265
  1409  	//
  1410  	// So we enable only on providers we know supports it properly, all others can retry when a
  1411  	// XML Syntax error is detected.
  1412  	var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
  1413  	for {
  1414  		// FIXME need to implement ALL loop
  1415  		req := s3.ListObjectsInput{
  1416  			Bucket:    &bucket,
  1417  			Delimiter: &delimiter,
  1418  			Prefix:    &directory,
  1419  			MaxKeys:   &f.opt.ListChunk,
  1420  			Marker:    marker,
  1421  		}
  1422  		if urlEncodeListings {
  1423  			req.EncodingType = aws.String(s3.EncodingTypeUrl)
  1424  		}
  1425  		var resp *s3.ListObjectsOutput
  1426  		var err error
  1427  		err = f.pacer.Call(func() (bool, error) {
  1428  			resp, err = f.c.ListObjectsWithContext(ctx, &req)
  1429  			if err != nil && !urlEncodeListings {
  1430  				if awsErr, ok := err.(awserr.RequestFailure); ok {
  1431  					if origErr := awsErr.OrigErr(); origErr != nil {
  1432  						if _, ok := origErr.(*xml.SyntaxError); ok {
  1433  							// Retry the listing with URL encoding as there were characters that XML can't encode
  1434  							urlEncodeListings = true
  1435  							req.EncodingType = aws.String(s3.EncodingTypeUrl)
  1436  							fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded")
  1437  							return true, err
  1438  						}
  1439  					}
  1440  				}
  1441  			}
  1442  			return f.shouldRetry(err)
  1443  		})
  1444  		if err != nil {
  1445  			if awsErr, ok := err.(awserr.RequestFailure); ok {
  1446  				if awsErr.StatusCode() == http.StatusNotFound {
  1447  					err = fs.ErrorDirNotFound
  1448  				}
  1449  			}
  1450  			if f.rootBucket == "" {
  1451  				// if listing from the root ignore wrong region requests returning
  1452  				// empty directory
  1453  				if reqErr, ok := err.(awserr.RequestFailure); ok {
  1454  					// 301 if wrong region for bucket
  1455  					if reqErr.StatusCode() == http.StatusMovedPermanently {
  1456  						fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket)
  1457  						return nil
  1458  					}
  1459  				}
  1460  			}
  1461  			return err
  1462  		}
  1463  		if !recurse {
  1464  			for _, commonPrefix := range resp.CommonPrefixes {
  1465  				if commonPrefix.Prefix == nil {
  1466  					fs.Logf(f, "Nil common prefix received")
  1467  					continue
  1468  				}
  1469  				remote := *commonPrefix.Prefix
  1470  				if urlEncodeListings {
  1471  					remote, err = url.QueryUnescape(remote)
  1472  					if err != nil {
  1473  						fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err)
  1474  						continue
  1475  					}
  1476  				}
  1477  				remote = f.opt.Enc.ToStandardPath(remote)
  1478  				if !strings.HasPrefix(remote, prefix) {
  1479  					fs.Logf(f, "Odd name received %q", remote)
  1480  					continue
  1481  				}
  1482  				remote = remote[len(prefix):]
  1483  				if addBucket {
  1484  					remote = path.Join(bucket, remote)
  1485  				}
  1486  				if strings.HasSuffix(remote, "/") {
  1487  					remote = remote[:len(remote)-1]
  1488  				}
  1489  				err = fn(remote, &s3.Object{Key: &remote}, true)
  1490  				if err != nil {
  1491  					return err
  1492  				}
  1493  			}
  1494  		}
  1495  		for _, object := range resp.Contents {
  1496  			remote := aws.StringValue(object.Key)
  1497  			if urlEncodeListings {
  1498  				remote, err = url.QueryUnescape(remote)
  1499  				if err != nil {
  1500  					fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err)
  1501  					continue
  1502  				}
  1503  			}
  1504  			remote = f.opt.Enc.ToStandardPath(remote)
  1505  			if !strings.HasPrefix(remote, prefix) {
  1506  				fs.Logf(f, "Odd name received %q", remote)
  1507  				continue
  1508  			}
  1509  			remote = remote[len(prefix):]
  1510  			isDirectory := remote == "" || strings.HasSuffix(remote, "/")
  1511  			if addBucket {
  1512  				remote = path.Join(bucket, remote)
  1513  			}
  1514  			// is this a directory marker?
  1515  			if isDirectory && object.Size != nil && *object.Size == 0 {
  1516  				continue // skip directory marker
  1517  			}
  1518  			err = fn(remote, object, false)
  1519  			if err != nil {
  1520  				return err
  1521  			}
  1522  		}
  1523  		if !aws.BoolValue(resp.IsTruncated) {
  1524  			break
  1525  		}
  1526  		// Use NextMarker if set, otherwise use last Key
  1527  		if resp.NextMarker == nil || *resp.NextMarker == "" {
  1528  			if len(resp.Contents) == 0 {
  1529  				return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
  1530  			}
  1531  			marker = resp.Contents[len(resp.Contents)-1].Key
  1532  		} else {
  1533  			marker = resp.NextMarker
  1534  		}
  1535  		if urlEncodeListings {
  1536  			*marker, err = url.QueryUnescape(*marker)
  1537  			if err != nil {
  1538  				return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
  1539  			}
  1540  		}
  1541  	}
  1542  	return nil
  1543  }
  1544  
  1545  // Convert a list item into a DirEntry
  1546  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
  1547  	if isDirectory {
  1548  		size := int64(0)
  1549  		if object.Size != nil {
  1550  			size = *object.Size
  1551  		}
  1552  		d := fs.NewDir(remote, time.Time{}).SetSize(size)
  1553  		return d, nil
  1554  	}
  1555  	o, err := f.newObjectWithInfo(ctx, remote, object)
  1556  	if err != nil {
  1557  		return nil, err
  1558  	}
  1559  	return o, nil
  1560  }
  1561  
  1562  // listDir lists files and directories to out
  1563  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
  1564  	// List the objects and directories
  1565  	err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *s3.Object, isDirectory bool) error {
  1566  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
  1567  		if err != nil {
  1568  			return err
  1569  		}
  1570  		if entry != nil {
  1571  			entries = append(entries, entry)
  1572  		}
  1573  		return nil
  1574  	})
  1575  	if err != nil {
  1576  		return nil, err
  1577  	}
  1578  	// bucket must be present if listing succeeded
  1579  	f.cache.MarkOK(bucket)
  1580  	return entries, nil
  1581  }
  1582  
  1583  // listBuckets lists the buckets to out
  1584  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
  1585  	req := s3.ListBucketsInput{}
  1586  	var resp *s3.ListBucketsOutput
  1587  	err = f.pacer.Call(func() (bool, error) {
  1588  		resp, err = f.c.ListBucketsWithContext(ctx, &req)
  1589  		return f.shouldRetry(err)
  1590  	})
  1591  	if err != nil {
  1592  		return nil, err
  1593  	}
  1594  	for _, bucket := range resp.Buckets {
  1595  		bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name))
  1596  		f.cache.MarkOK(bucketName)
  1597  		d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate))
  1598  		entries = append(entries, d)
  1599  	}
  1600  	return entries, nil
  1601  }
  1602  
  1603  // List the objects and directories in dir into entries.  The
  1604  // entries can be returned in any order but should be for a
  1605  // complete directory.
  1606  //
  1607  // dir should be "" to list the root, and should not have
  1608  // trailing slashes.
  1609  //
  1610  // This should return ErrDirNotFound if the directory isn't
  1611  // found.
  1612  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1613  	bucket, directory := f.split(dir)
  1614  	if bucket == "" {
  1615  		if directory != "" {
  1616  			return nil, fs.ErrorListBucketRequired
  1617  		}
  1618  		return f.listBuckets(ctx)
  1619  	}
  1620  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
  1621  }
  1622  
  1623  // ListR lists the objects and directories of the Fs starting
  1624  // from dir recursively into out.
  1625  //
  1626  // dir should be "" to start from the root, and should not
  1627  // have trailing slashes.
  1628  //
  1629  // This should return ErrDirNotFound if the directory isn't
  1630  // found.
  1631  //
  1632  // It should call callback for each tranche of entries read.
  1633  // These need not be returned in any particular order.  If
  1634  // callback returns an error then the listing will stop
  1635  // immediately.
  1636  //
  1637  // Don't implement this unless you have a more efficient way
  1638  // of listing recursively than doing a directory traversal.
  1639  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1640  	bucket, directory := f.split(dir)
  1641  	list := walk.NewListRHelper(callback)
  1642  	listR := func(bucket, directory, prefix string, addBucket bool) error {
  1643  		return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *s3.Object, isDirectory bool) error {
  1644  			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
  1645  			if err != nil {
  1646  				return err
  1647  			}
  1648  			return list.Add(entry)
  1649  		})
  1650  	}
  1651  	if bucket == "" {
  1652  		entries, err := f.listBuckets(ctx)
  1653  		if err != nil {
  1654  			return err
  1655  		}
  1656  		for _, entry := range entries {
  1657  			err = list.Add(entry)
  1658  			if err != nil {
  1659  				return err
  1660  			}
  1661  			bucket := entry.Remote()
  1662  			err = listR(bucket, "", f.rootDirectory, true)
  1663  			if err != nil {
  1664  				return err
  1665  			}
  1666  			// bucket must be present if listing succeeded
  1667  			f.cache.MarkOK(bucket)
  1668  		}
  1669  	} else {
  1670  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
  1671  		if err != nil {
  1672  			return err
  1673  		}
  1674  		// bucket must be present if listing succeeded
  1675  		f.cache.MarkOK(bucket)
  1676  	}
  1677  	return list.Flush()
  1678  }
  1679  
  1680  // Put the Object into the bucket
  1681  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1682  	// Temporary Object under construction
  1683  	fs := &Object{
  1684  		fs:     f,
  1685  		remote: src.Remote(),
  1686  	}
  1687  	return fs, fs.Update(ctx, in, src, options...)
  1688  }
  1689  
  1690  // PutStream uploads to the remote path with the modTime given of indeterminate size
  1691  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1692  	return f.Put(ctx, in, src, options...)
  1693  }
  1694  
  1695  // Check if the bucket exists
  1696  //
  1697  // NB this can return incorrect results if called immediately after bucket deletion
  1698  func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) {
  1699  	req := s3.HeadBucketInput{
  1700  		Bucket: &bucket,
  1701  	}
  1702  	err := f.pacer.Call(func() (bool, error) {
  1703  		_, err := f.c.HeadBucketWithContext(ctx, &req)
  1704  		return f.shouldRetry(err)
  1705  	})
  1706  	if err == nil {
  1707  		return true, nil
  1708  	}
  1709  	if err, ok := err.(awserr.RequestFailure); ok {
  1710  		if err.StatusCode() == http.StatusNotFound {
  1711  			return false, nil
  1712  		}
  1713  	}
  1714  	return false, err
  1715  }
  1716  
  1717  // Mkdir creates the bucket if it doesn't exist
  1718  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1719  	bucket, _ := f.split(dir)
  1720  	return f.makeBucket(ctx, bucket)
  1721  }
  1722  
  1723  // makeBucket creates the bucket if it doesn't exist
  1724  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
  1725  	return f.cache.Create(bucket, func() error {
  1726  		req := s3.CreateBucketInput{
  1727  			Bucket: &bucket,
  1728  			ACL:    &f.opt.BucketACL,
  1729  		}
  1730  		if f.opt.LocationConstraint != "" {
  1731  			req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
  1732  				LocationConstraint: &f.opt.LocationConstraint,
  1733  			}
  1734  		}
  1735  		err := f.pacer.Call(func() (bool, error) {
  1736  			_, err := f.c.CreateBucketWithContext(ctx, &req)
  1737  			return f.shouldRetry(err)
  1738  		})
  1739  		if err == nil {
  1740  			fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL)
  1741  		}
  1742  		if awsErr, ok := err.(awserr.Error); ok {
  1743  			if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" {
  1744  				err = nil
  1745  			}
  1746  		}
  1747  		return err
  1748  	}, func() (bool, error) {
  1749  		return f.bucketExists(ctx, bucket)
  1750  	})
  1751  }
  1752  
  1753  // Rmdir deletes the bucket if the fs is at the root
  1754  //
  1755  // Returns an error if it isn't empty
  1756  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1757  	bucket, directory := f.split(dir)
  1758  	if bucket == "" || directory != "" {
  1759  		return nil
  1760  	}
  1761  	return f.cache.Remove(bucket, func() error {
  1762  		req := s3.DeleteBucketInput{
  1763  			Bucket: &bucket,
  1764  		}
  1765  		err := f.pacer.Call(func() (bool, error) {
  1766  			_, err := f.c.DeleteBucketWithContext(ctx, &req)
  1767  			return f.shouldRetry(err)
  1768  		})
  1769  		if err == nil {
  1770  			fs.Infof(f, "Bucket %q deleted", bucket)
  1771  		}
  1772  		return err
  1773  	})
  1774  }
  1775  
  1776  // Precision of the remote
  1777  func (f *Fs) Precision() time.Duration {
  1778  	return time.Nanosecond
  1779  }
  1780  
  1781  // pathEscape escapes s as for a URL path.  It uses rest.URLPathEscape
  1782  // but also escapes '+' for S3 and Digital Ocean spaces compatibility
  1783  func pathEscape(s string) string {
  1784  	return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
  1785  }
  1786  
  1787  // copy does a server side copy
  1788  //
  1789  // It adds the boiler plate to the req passed in and calls the s3
  1790  // method
  1791  func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) error {
  1792  	req.Bucket = &dstBucket
  1793  	req.ACL = &f.opt.ACL
  1794  	req.Key = &dstPath
  1795  	source := pathEscape(path.Join(srcBucket, srcPath))
  1796  	req.CopySource = &source
  1797  	if f.opt.ServerSideEncryption != "" {
  1798  		req.ServerSideEncryption = &f.opt.ServerSideEncryption
  1799  	}
  1800  	if f.opt.SSEKMSKeyID != "" {
  1801  		req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
  1802  	}
  1803  	if req.StorageClass == nil && f.opt.StorageClass != "" {
  1804  		req.StorageClass = &f.opt.StorageClass
  1805  	}
  1806  
  1807  	if srcSize >= int64(f.opt.CopyCutoff) {
  1808  		return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
  1809  	}
  1810  	return f.pacer.Call(func() (bool, error) {
  1811  		_, err := f.c.CopyObjectWithContext(ctx, req)
  1812  		return f.shouldRetry(err)
  1813  	})
  1814  }
  1815  
  1816  func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
  1817  	start := partIndex * partSize
  1818  	var ends string
  1819  	if partIndex == numParts-1 {
  1820  		if totalSize >= 1 {
  1821  			ends = strconv.FormatInt(totalSize-1, 10)
  1822  		}
  1823  	} else {
  1824  		ends = strconv.FormatInt(start+partSize-1, 10)
  1825  	}
  1826  	return fmt.Sprintf("bytes=%v-%v", start, ends)
  1827  }
  1828  
  1829  func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, srcSize int64) (err error) {
  1830  	var cout *s3.CreateMultipartUploadOutput
  1831  	if err := f.pacer.Call(func() (bool, error) {
  1832  		var err error
  1833  		cout, err = f.c.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
  1834  			Bucket: &dstBucket,
  1835  			Key:    &dstPath,
  1836  		})
  1837  		return f.shouldRetry(err)
  1838  	}); err != nil {
  1839  		return err
  1840  	}
  1841  	uid := cout.UploadId
  1842  
  1843  	defer func() {
  1844  		if err != nil {
  1845  			// We can try to abort the upload, but ignore the error.
  1846  			fs.Debugf(nil, "Cancelling multipart copy")
  1847  			_ = f.pacer.Call(func() (bool, error) {
  1848  				_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
  1849  					Bucket:       &dstBucket,
  1850  					Key:          &dstPath,
  1851  					UploadId:     uid,
  1852  					RequestPayer: req.RequestPayer,
  1853  				})
  1854  				return f.shouldRetry(err)
  1855  			})
  1856  		}
  1857  	}()
  1858  
  1859  	partSize := int64(f.opt.CopyCutoff)
  1860  	numParts := (srcSize-1)/partSize + 1
  1861  
  1862  	var parts []*s3.CompletedPart
  1863  	for partNum := int64(1); partNum <= numParts; partNum++ {
  1864  		if err := f.pacer.Call(func() (bool, error) {
  1865  			partNum := partNum
  1866  			uploadPartReq := &s3.UploadPartCopyInput{
  1867  				Bucket:          &dstBucket,
  1868  				Key:             &dstPath,
  1869  				PartNumber:      &partNum,
  1870  				UploadId:        uid,
  1871  				CopySourceRange: aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)),
  1872  				// Args copy from req
  1873  				CopySource:                     req.CopySource,
  1874  				CopySourceIfMatch:              req.CopySourceIfMatch,
  1875  				CopySourceIfModifiedSince:      req.CopySourceIfModifiedSince,
  1876  				CopySourceIfNoneMatch:          req.CopySourceIfNoneMatch,
  1877  				CopySourceIfUnmodifiedSince:    req.CopySourceIfUnmodifiedSince,
  1878  				CopySourceSSECustomerAlgorithm: req.CopySourceSSECustomerAlgorithm,
  1879  				CopySourceSSECustomerKey:       req.CopySourceSSECustomerKey,
  1880  				CopySourceSSECustomerKeyMD5:    req.CopySourceSSECustomerKeyMD5,
  1881  				RequestPayer:                   req.RequestPayer,
  1882  				SSECustomerAlgorithm:           req.SSECustomerAlgorithm,
  1883  				SSECustomerKey:                 req.SSECustomerKey,
  1884  				SSECustomerKeyMD5:              req.SSECustomerKeyMD5,
  1885  			}
  1886  			uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq)
  1887  			if err != nil {
  1888  				return f.shouldRetry(err)
  1889  			}
  1890  			parts = append(parts, &s3.CompletedPart{
  1891  				PartNumber: &partNum,
  1892  				ETag:       uout.CopyPartResult.ETag,
  1893  			})
  1894  			return false, nil
  1895  		}); err != nil {
  1896  			return err
  1897  		}
  1898  	}
  1899  
  1900  	return f.pacer.Call(func() (bool, error) {
  1901  		_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
  1902  			Bucket: &dstBucket,
  1903  			Key:    &dstPath,
  1904  			MultipartUpload: &s3.CompletedMultipartUpload{
  1905  				Parts: parts,
  1906  			},
  1907  			RequestPayer: req.RequestPayer,
  1908  			UploadId:     uid,
  1909  		})
  1910  		return f.shouldRetry(err)
  1911  	})
  1912  }
  1913  
  1914  // Copy src to this remote using server side copy operations.
  1915  //
  1916  // This is stored with the remote path given
  1917  //
  1918  // It returns the destination Object and a possible error
  1919  //
  1920  // Will only be called if src.Fs().Name() == f.Name()
  1921  //
  1922  // If it isn't possible then return fs.ErrorCantCopy
  1923  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1924  	dstBucket, dstPath := f.split(remote)
  1925  	err := f.makeBucket(ctx, dstBucket)
  1926  	if err != nil {
  1927  		return nil, err
  1928  	}
  1929  	srcObj, ok := src.(*Object)
  1930  	if !ok {
  1931  		fs.Debugf(src, "Can't copy - not same remote type")
  1932  		return nil, fs.ErrorCantCopy
  1933  	}
  1934  	srcBucket, srcPath := srcObj.split()
  1935  	req := s3.CopyObjectInput{
  1936  		MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
  1937  	}
  1938  	err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj.Size())
  1939  	if err != nil {
  1940  		return nil, err
  1941  	}
  1942  	return f.NewObject(ctx, remote)
  1943  }
  1944  
  1945  // Hashes returns the supported hash sets.
  1946  func (f *Fs) Hashes() hash.Set {
  1947  	return hash.Set(hash.MD5)
  1948  }
  1949  
  1950  func (f *Fs) getMemoryPool(size int64) *pool.Pool {
  1951  	if size == int64(f.opt.ChunkSize) {
  1952  		return f.pool
  1953  	}
  1954  
  1955  	return pool.New(
  1956  		time.Duration(f.opt.MemoryPoolFlushTime),
  1957  		int(size),
  1958  		f.opt.UploadConcurrency*fs.Config.Transfers,
  1959  		f.opt.MemoryPoolUseMmap,
  1960  	)
  1961  }
  1962  
  1963  // ------------------------------------------------------------
  1964  
  1965  // Fs returns the parent Fs
  1966  func (o *Object) Fs() fs.Info {
  1967  	return o.fs
  1968  }
  1969  
  1970  // Return a string version
  1971  func (o *Object) String() string {
  1972  	if o == nil {
  1973  		return "<nil>"
  1974  	}
  1975  	return o.remote
  1976  }
  1977  
  1978  // Remote returns the remote path
  1979  func (o *Object) Remote() string {
  1980  	return o.remote
  1981  }
  1982  
  1983  var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
  1984  
  1985  // Hash returns the Md5sum of an object returning a lowercase hex string
  1986  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1987  	if t != hash.MD5 {
  1988  		return "", hash.ErrUnsupported
  1989  	}
  1990  	hash := strings.Trim(strings.ToLower(o.etag), `"`)
  1991  	// Check the etag is a valid md5sum
  1992  	if !matchMd5.MatchString(hash) {
  1993  		err := o.readMetaData(ctx)
  1994  		if err != nil {
  1995  			return "", err
  1996  		}
  1997  
  1998  		if md5sum, ok := o.meta[metaMD5Hash]; ok {
  1999  			md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
  2000  			if err != nil {
  2001  				return "", err
  2002  			}
  2003  			hash = hex.EncodeToString(md5sumBytes)
  2004  		} else {
  2005  			hash = ""
  2006  		}
  2007  	}
  2008  	return hash, nil
  2009  }
  2010  
  2011  // Size returns the size of an object in bytes
  2012  func (o *Object) Size() int64 {
  2013  	return o.bytes
  2014  }
  2015  
  2016  // readMetaData gets the metadata if it hasn't already been fetched
  2017  //
  2018  // it also sets the info
  2019  func (o *Object) readMetaData(ctx context.Context) (err error) {
  2020  	if o.meta != nil {
  2021  		return nil
  2022  	}
  2023  	bucket, bucketPath := o.split()
  2024  	req := s3.HeadObjectInput{
  2025  		Bucket: &bucket,
  2026  		Key:    &bucketPath,
  2027  	}
  2028  	var resp *s3.HeadObjectOutput
  2029  	err = o.fs.pacer.Call(func() (bool, error) {
  2030  		var err error
  2031  		resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
  2032  		return o.fs.shouldRetry(err)
  2033  	})
  2034  	if err != nil {
  2035  		if awsErr, ok := err.(awserr.RequestFailure); ok {
  2036  			if awsErr.StatusCode() == http.StatusNotFound {
  2037  				// NotFound indicates bucket was OK
  2038  				// NoSuchBucket would be returned if bucket was bad
  2039  				if awsErr.Code() == "NotFound" {
  2040  					o.fs.cache.MarkOK(bucket)
  2041  				}
  2042  				return fs.ErrorObjectNotFound
  2043  			}
  2044  		}
  2045  		return err
  2046  	}
  2047  	o.fs.cache.MarkOK(bucket)
  2048  	var size int64
  2049  	// Ignore missing Content-Length assuming it is 0
  2050  	// Some versions of ceph do this due their apache proxies
  2051  	if resp.ContentLength != nil {
  2052  		size = *resp.ContentLength
  2053  	}
  2054  	o.etag = aws.StringValue(resp.ETag)
  2055  	o.bytes = size
  2056  	o.meta = resp.Metadata
  2057  	if o.meta == nil {
  2058  		o.meta = map[string]*string{}
  2059  	}
  2060  	o.storageClass = aws.StringValue(resp.StorageClass)
  2061  	if resp.LastModified == nil {
  2062  		fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
  2063  		o.lastModified = time.Now()
  2064  	} else {
  2065  		o.lastModified = *resp.LastModified
  2066  	}
  2067  	o.mimeType = aws.StringValue(resp.ContentType)
  2068  	return nil
  2069  }
  2070  
  2071  // ModTime returns the modification time of the object
  2072  //
  2073  // It attempts to read the objects mtime and if that isn't present the
  2074  // LastModified returned in the http headers
  2075  func (o *Object) ModTime(ctx context.Context) time.Time {
  2076  	if fs.Config.UseServerModTime {
  2077  		return o.lastModified
  2078  	}
  2079  	err := o.readMetaData(ctx)
  2080  	if err != nil {
  2081  		fs.Logf(o, "Failed to read metadata: %v", err)
  2082  		return time.Now()
  2083  	}
  2084  	// read mtime out of metadata if available
  2085  	d, ok := o.meta[metaMtime]
  2086  	if !ok || d == nil {
  2087  		// fs.Debugf(o, "No metadata")
  2088  		return o.lastModified
  2089  	}
  2090  	modTime, err := swift.FloatStringToTime(*d)
  2091  	if err != nil {
  2092  		fs.Logf(o, "Failed to read mtime from object: %v", err)
  2093  		return o.lastModified
  2094  	}
  2095  	return modTime
  2096  }
  2097  
  2098  // SetModTime sets the modification time of the local fs object
  2099  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  2100  	err := o.readMetaData(ctx)
  2101  	if err != nil {
  2102  		return err
  2103  	}
  2104  	o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
  2105  
  2106  	// Can't update metadata here, so return this error to force a recopy
  2107  	if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
  2108  		return fs.ErrorCantSetModTime
  2109  	}
  2110  
  2111  	// Copy the object to itself to update the metadata
  2112  	bucket, bucketPath := o.split()
  2113  	req := s3.CopyObjectInput{
  2114  		ContentType:       aws.String(fs.MimeType(ctx, o)), // Guess the content type
  2115  		Metadata:          o.meta,
  2116  		MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in
  2117  	}
  2118  	return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
  2119  }
  2120  
  2121  // Storable raturns a boolean indicating if this object is storable
  2122  func (o *Object) Storable() bool {
  2123  	return true
  2124  }
  2125  
  2126  // Open an object for read
  2127  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  2128  	bucket, bucketPath := o.split()
  2129  	req := s3.GetObjectInput{
  2130  		Bucket: &bucket,
  2131  		Key:    &bucketPath,
  2132  	}
  2133  	if o.fs.opt.SSECustomerAlgorithm != "" {
  2134  		req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
  2135  	}
  2136  	if o.fs.opt.SSECustomerKey != "" {
  2137  		req.SSECustomerKey = &o.fs.opt.SSECustomerKey
  2138  	}
  2139  	if o.fs.opt.SSECustomerKeyMD5 != "" {
  2140  		req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
  2141  	}
  2142  	httpReq, resp := o.fs.c.GetObjectRequest(&req)
  2143  	fs.FixRangeOption(options, o.bytes)
  2144  	for _, option := range options {
  2145  		switch option.(type) {
  2146  		case *fs.RangeOption, *fs.SeekOption:
  2147  			_, value := option.Header()
  2148  			req.Range = &value
  2149  		case *fs.HTTPOption:
  2150  			key, value := option.Header()
  2151  			httpReq.HTTPRequest.Header.Add(key, value)
  2152  		default:
  2153  			if option.Mandatory() {
  2154  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  2155  			}
  2156  		}
  2157  	}
  2158  	err = o.fs.pacer.Call(func() (bool, error) {
  2159  		var err error
  2160  		httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx)
  2161  		err = httpReq.Send()
  2162  		return o.fs.shouldRetry(err)
  2163  	})
  2164  	if err, ok := err.(awserr.RequestFailure); ok {
  2165  		if err.Code() == "InvalidObjectState" {
  2166  			return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath)
  2167  		}
  2168  	}
  2169  	if err != nil {
  2170  		return nil, err
  2171  	}
  2172  	return resp.Body, nil
  2173  }
  2174  
  2175  var warnStreamUpload sync.Once
  2176  
  2177  func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) {
  2178  	f := o.fs
  2179  
  2180  	// make concurrency machinery
  2181  	concurrency := f.opt.UploadConcurrency
  2182  	if concurrency < 1 {
  2183  		concurrency = 1
  2184  	}
  2185  	tokens := pacer.NewTokenDispenser(concurrency)
  2186  
  2187  	// calculate size of parts
  2188  	partSize := int(f.opt.ChunkSize)
  2189  
  2190  	// size can be -1 here meaning we don't know the size of the incoming file.  We use ChunkSize
  2191  	// buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of
  2192  	// 48GB which seems like a not too unreasonable limit.
  2193  	if size == -1 {
  2194  		warnStreamUpload.Do(func() {
  2195  			fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v",
  2196  				f.opt.ChunkSize, fs.SizeSuffix(partSize*maxUploadParts))
  2197  		})
  2198  	} else {
  2199  		// Adjust partSize until the number of parts is small enough.
  2200  		if size/int64(partSize) >= maxUploadParts {
  2201  			// Calculate partition size rounded up to the nearest MB
  2202  			partSize = int((((size / maxUploadParts) >> 20) + 1) << 20)
  2203  		}
  2204  	}
  2205  
  2206  	memPool := f.getMemoryPool(int64(partSize))
  2207  
  2208  	var mReq s3.CreateMultipartUploadInput
  2209  	structs.SetFrom(&mReq, req)
  2210  	var cout *s3.CreateMultipartUploadOutput
  2211  	err = f.pacer.Call(func() (bool, error) {
  2212  		var err error
  2213  		cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq)
  2214  		return f.shouldRetry(err)
  2215  	})
  2216  	if err != nil {
  2217  		return errors.Wrap(err, "multipart upload failed to initialise")
  2218  	}
  2219  	uid := cout.UploadId
  2220  
  2221  	defer func() {
  2222  		if o.fs.opt.LeavePartsOnError {
  2223  			return
  2224  		}
  2225  		if err != nil {
  2226  			// We can try to abort the upload, but ignore the error.
  2227  			fs.Debugf(o, "Cancelling multipart upload")
  2228  			errCancel := f.pacer.Call(func() (bool, error) {
  2229  				_, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{
  2230  					Bucket:       req.Bucket,
  2231  					Key:          req.Key,
  2232  					UploadId:     uid,
  2233  					RequestPayer: req.RequestPayer,
  2234  				})
  2235  				return f.shouldRetry(err)
  2236  			})
  2237  			if errCancel != nil {
  2238  				fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel)
  2239  			}
  2240  		}
  2241  	}()
  2242  
  2243  	var (
  2244  		g, gCtx  = errgroup.WithContext(ctx)
  2245  		finished = false
  2246  		partsMu  sync.Mutex // to protect parts
  2247  		parts    []*s3.CompletedPart
  2248  		off      int64
  2249  	)
  2250  
  2251  	for partNum := int64(1); !finished; partNum++ {
  2252  		// Get a block of memory from the pool and token which limits concurrency.
  2253  		tokens.Get()
  2254  		buf := memPool.Get()
  2255  
  2256  		free := func() {
  2257  			// return the memory and token
  2258  			memPool.Put(buf)
  2259  			tokens.Put()
  2260  		}
  2261  
  2262  		// Fail fast, in case an errgroup managed function returns an error
  2263  		// gCtx is cancelled. There is no point in uploading all the other parts.
  2264  		if gCtx.Err() != nil {
  2265  			free()
  2266  			break
  2267  		}
  2268  
  2269  		// Read the chunk
  2270  		var n int
  2271  		n, err = readers.ReadFill(in, buf) // this can never return 0, nil
  2272  		if err == io.EOF {
  2273  			if n == 0 && partNum != 1 { // end if no data and if not first chunk
  2274  				free()
  2275  				break
  2276  			}
  2277  			finished = true
  2278  		} else if err != nil {
  2279  			free()
  2280  			return errors.Wrap(err, "multipart upload failed to read source")
  2281  		}
  2282  		buf = buf[:n]
  2283  
  2284  		partNum := partNum
  2285  		fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size))
  2286  		off += int64(n)
  2287  		g.Go(func() (err error) {
  2288  			defer free()
  2289  			partLength := int64(len(buf))
  2290  
  2291  			// create checksum of buffer for integrity checking
  2292  			md5sumBinary := md5.Sum(buf)
  2293  			md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:])
  2294  
  2295  			err = f.pacer.Call(func() (bool, error) {
  2296  				uploadPartReq := &s3.UploadPartInput{
  2297  					Body:                 bytes.NewReader(buf),
  2298  					Bucket:               req.Bucket,
  2299  					Key:                  req.Key,
  2300  					PartNumber:           &partNum,
  2301  					UploadId:             uid,
  2302  					ContentMD5:           &md5sum,
  2303  					ContentLength:        &partLength,
  2304  					RequestPayer:         req.RequestPayer,
  2305  					SSECustomerAlgorithm: req.SSECustomerAlgorithm,
  2306  					SSECustomerKey:       req.SSECustomerKey,
  2307  					SSECustomerKeyMD5:    req.SSECustomerKeyMD5,
  2308  				}
  2309  				uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq)
  2310  				if err != nil {
  2311  					if partNum <= int64(concurrency) {
  2312  						return f.shouldRetry(err)
  2313  					}
  2314  					// retry all chunks once have done the first batch
  2315  					return true, err
  2316  				}
  2317  				partsMu.Lock()
  2318  				parts = append(parts, &s3.CompletedPart{
  2319  					PartNumber: &partNum,
  2320  					ETag:       uout.ETag,
  2321  				})
  2322  				partsMu.Unlock()
  2323  
  2324  				return false, nil
  2325  			})
  2326  			if err != nil {
  2327  				return errors.Wrap(err, "multipart upload failed to upload part")
  2328  			}
  2329  			return nil
  2330  		})
  2331  	}
  2332  	err = g.Wait()
  2333  	if err != nil {
  2334  		return err
  2335  	}
  2336  
  2337  	// sort the completed parts by part number
  2338  	sort.Slice(parts, func(i, j int) bool {
  2339  		return *parts[i].PartNumber < *parts[j].PartNumber
  2340  	})
  2341  
  2342  	err = f.pacer.Call(func() (bool, error) {
  2343  		_, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
  2344  			Bucket: req.Bucket,
  2345  			Key:    req.Key,
  2346  			MultipartUpload: &s3.CompletedMultipartUpload{
  2347  				Parts: parts,
  2348  			},
  2349  			RequestPayer: req.RequestPayer,
  2350  			UploadId:     uid,
  2351  		})
  2352  		return f.shouldRetry(err)
  2353  	})
  2354  	if err != nil {
  2355  		return errors.Wrap(err, "multipart upload failed to finalise")
  2356  	}
  2357  	return nil
  2358  }
  2359  
  2360  // Update the Object from in with modTime and size
  2361  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  2362  	bucket, bucketPath := o.split()
  2363  	err := o.fs.makeBucket(ctx, bucket)
  2364  	if err != nil {
  2365  		return err
  2366  	}
  2367  	modTime := src.ModTime(ctx)
  2368  	size := src.Size()
  2369  
  2370  	multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
  2371  
  2372  	// Set the mtime in the meta data
  2373  	metadata := map[string]*string{
  2374  		metaMtime: aws.String(swift.TimeToFloatString(modTime)),
  2375  	}
  2376  
  2377  	// read the md5sum if available
  2378  	// - for non multpart
  2379  	//    - so we can add a ContentMD5
  2380  	// - for multipart provided checksums aren't disabled
  2381  	//    - so we can add the md5sum in the metadata as metaMD5Hash
  2382  	var md5sum string
  2383  	if !multipart || !o.fs.opt.DisableChecksum {
  2384  		hash, err := src.Hash(ctx, hash.MD5)
  2385  		if err == nil && matchMd5.MatchString(hash) {
  2386  			hashBytes, err := hex.DecodeString(hash)
  2387  			if err == nil {
  2388  				md5sum = base64.StdEncoding.EncodeToString(hashBytes)
  2389  				if multipart {
  2390  					metadata[metaMD5Hash] = &md5sum
  2391  				}
  2392  			}
  2393  		}
  2394  	}
  2395  
  2396  	// Guess the content type
  2397  	mimeType := fs.MimeType(ctx, src)
  2398  	req := s3.PutObjectInput{
  2399  		Bucket:      &bucket,
  2400  		ACL:         &o.fs.opt.ACL,
  2401  		Key:         &bucketPath,
  2402  		ContentType: &mimeType,
  2403  		Metadata:    metadata,
  2404  	}
  2405  	if md5sum != "" {
  2406  		req.ContentMD5 = &md5sum
  2407  	}
  2408  	if o.fs.opt.ServerSideEncryption != "" {
  2409  		req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
  2410  	}
  2411  	if o.fs.opt.SSECustomerAlgorithm != "" {
  2412  		req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm
  2413  	}
  2414  	if o.fs.opt.SSECustomerKey != "" {
  2415  		req.SSECustomerKey = &o.fs.opt.SSECustomerKey
  2416  	}
  2417  	if o.fs.opt.SSECustomerKeyMD5 != "" {
  2418  		req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5
  2419  	}
  2420  	if o.fs.opt.SSEKMSKeyID != "" {
  2421  		req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
  2422  	}
  2423  	if o.fs.opt.StorageClass != "" {
  2424  		req.StorageClass = &o.fs.opt.StorageClass
  2425  	}
  2426  	// Apply upload options
  2427  	for _, option := range options {
  2428  		key, value := option.Header()
  2429  		lowerKey := strings.ToLower(key)
  2430  		switch lowerKey {
  2431  		case "":
  2432  			// ignore
  2433  		case "cache-control":
  2434  			req.CacheControl = aws.String(value)
  2435  		case "content-disposition":
  2436  			req.ContentDisposition = aws.String(value)
  2437  		case "content-encoding":
  2438  			req.ContentEncoding = aws.String(value)
  2439  		case "content-language":
  2440  			req.ContentLanguage = aws.String(value)
  2441  		case "content-type":
  2442  			req.ContentType = aws.String(value)
  2443  		case "x-amz-tagging":
  2444  			req.Tagging = aws.String(value)
  2445  		default:
  2446  			const amzMetaPrefix = "x-amz-meta-"
  2447  			if strings.HasPrefix(lowerKey, amzMetaPrefix) {
  2448  				metaKey := lowerKey[len(amzMetaPrefix):]
  2449  				req.Metadata[metaKey] = aws.String(value)
  2450  			} else {
  2451  				fs.Errorf(o, "Don't know how to set key %q on upload", key)
  2452  			}
  2453  		}
  2454  	}
  2455  
  2456  	if multipart {
  2457  		err = o.uploadMultipart(ctx, &req, size, in)
  2458  		if err != nil {
  2459  			return err
  2460  		}
  2461  	} else {
  2462  
  2463  		// Create the request
  2464  		putObj, _ := o.fs.c.PutObjectRequest(&req)
  2465  
  2466  		// Sign it so we can upload using a presigned request.
  2467  		//
  2468  		// Note the SDK doesn't currently support streaming to
  2469  		// PutObject so we'll use this work-around.
  2470  		url, headers, err := putObj.PresignRequest(15 * time.Minute)
  2471  		if err != nil {
  2472  			return errors.Wrap(err, "s3 upload: sign request")
  2473  		}
  2474  
  2475  		if o.fs.opt.V2Auth && headers == nil {
  2476  			headers = putObj.HTTPRequest.Header
  2477  		}
  2478  
  2479  		// Set request to nil if empty so as not to make chunked encoding
  2480  		if size == 0 {
  2481  			in = nil
  2482  		}
  2483  
  2484  		// create the vanilla http request
  2485  		httpReq, err := http.NewRequest("PUT", url, in)
  2486  		if err != nil {
  2487  			return errors.Wrap(err, "s3 upload: new request")
  2488  		}
  2489  		httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext
  2490  
  2491  		// set the headers we signed and the length
  2492  		httpReq.Header = headers
  2493  		httpReq.ContentLength = size
  2494  
  2495  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  2496  			resp, err := o.fs.srv.Do(httpReq)
  2497  			if err != nil {
  2498  				return o.fs.shouldRetry(err)
  2499  			}
  2500  			body, err := rest.ReadBody(resp)
  2501  			if err != nil {
  2502  				return o.fs.shouldRetry(err)
  2503  			}
  2504  			if resp.StatusCode >= 200 && resp.StatusCode < 299 {
  2505  				return false, nil
  2506  			}
  2507  			err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
  2508  			return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
  2509  		})
  2510  		if err != nil {
  2511  			return err
  2512  		}
  2513  	}
  2514  
  2515  	// Read the metadata from the newly created object
  2516  	o.meta = nil // wipe old metadata
  2517  	err = o.readMetaData(ctx)
  2518  	return err
  2519  }
  2520  
  2521  // Remove an object
  2522  func (o *Object) Remove(ctx context.Context) error {
  2523  	bucket, bucketPath := o.split()
  2524  	req := s3.DeleteObjectInput{
  2525  		Bucket: &bucket,
  2526  		Key:    &bucketPath,
  2527  	}
  2528  	err := o.fs.pacer.Call(func() (bool, error) {
  2529  		_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
  2530  		return o.fs.shouldRetry(err)
  2531  	})
  2532  	return err
  2533  }
  2534  
  2535  // MimeType of an Object if known, "" otherwise
  2536  func (o *Object) MimeType(ctx context.Context) string {
  2537  	err := o.readMetaData(ctx)
  2538  	if err != nil {
  2539  		fs.Logf(o, "Failed to read metadata: %v", err)
  2540  		return ""
  2541  	}
  2542  	return o.mimeType
  2543  }
  2544  
  2545  // SetTier performs changing storage class
  2546  func (o *Object) SetTier(tier string) (err error) {
  2547  	ctx := context.TODO()
  2548  	tier = strings.ToUpper(tier)
  2549  	bucket, bucketPath := o.split()
  2550  	req := s3.CopyObjectInput{
  2551  		MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
  2552  		StorageClass:      aws.String(tier),
  2553  	}
  2554  	err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o.bytes)
  2555  	if err != nil {
  2556  		return err
  2557  	}
  2558  	o.storageClass = tier
  2559  	return err
  2560  }
  2561  
  2562  // GetTier returns storage class as string
  2563  func (o *Object) GetTier() string {
  2564  	if o.storageClass == "" {
  2565  		return "STANDARD"
  2566  	}
  2567  	return o.storageClass
  2568  }
  2569  
  2570  // Check the interfaces are satisfied
  2571  var (
  2572  	_ fs.Fs          = &Fs{}
  2573  	_ fs.Copier      = &Fs{}
  2574  	_ fs.PutStreamer = &Fs{}
  2575  	_ fs.ListRer     = &Fs{}
  2576  	_ fs.Object      = &Object{}
  2577  	_ fs.MimeTyper   = &Object{}
  2578  	_ fs.GetTierer   = &Object{}
  2579  	_ fs.SetTierer   = &Object{}
  2580  )