github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/s3/s3.go (about)

     1  // Package s3 provides an interface to Amazon S3 oject storage
     2  package s3
     3  
     4  // FIXME need to prevent anything but ListDir working for s3://
     5  
     6  /*
     7  Progress of port to aws-sdk
     8  
     9   * Don't really need o.meta at all?
    10  
    11  What happens if you CTRL-C a multipart upload
    12    * get an incomplete upload
    13    * disappears when you delete the bucket
    14  */
    15  
    16  import (
    17  	"context"
    18  	"encoding/base64"
    19  	"encoding/hex"
    20  	"fmt"
    21  	"io"
    22  	"net/http"
    23  	"path"
    24  	"regexp"
    25  	"strings"
    26  	"sync"
    27  	"time"
    28  
    29  	"github.com/aws/aws-sdk-go/aws"
    30  	"github.com/aws/aws-sdk-go/aws/awserr"
    31  	"github.com/aws/aws-sdk-go/aws/corehandlers"
    32  	"github.com/aws/aws-sdk-go/aws/credentials"
    33  	"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
    34  	"github.com/aws/aws-sdk-go/aws/defaults"
    35  	"github.com/aws/aws-sdk-go/aws/ec2metadata"
    36  	"github.com/aws/aws-sdk-go/aws/request"
    37  	"github.com/aws/aws-sdk-go/aws/session"
    38  	"github.com/aws/aws-sdk-go/service/s3"
    39  	"github.com/aws/aws-sdk-go/service/s3/s3manager"
    40  	"github.com/ncw/rclone/fs"
    41  	"github.com/ncw/rclone/fs/config/configmap"
    42  	"github.com/ncw/rclone/fs/config/configstruct"
    43  	"github.com/ncw/rclone/fs/fserrors"
    44  	"github.com/ncw/rclone/fs/fshttp"
    45  	"github.com/ncw/rclone/fs/hash"
    46  	"github.com/ncw/rclone/fs/walk"
    47  	"github.com/ncw/rclone/lib/pacer"
    48  	"github.com/ncw/rclone/lib/rest"
    49  	"github.com/ncw/swift"
    50  	"github.com/pkg/errors"
    51  )
    52  
    53  // Register with Fs
    54  func init() {
    55  	fs.Register(&fs.RegInfo{
    56  		Name:        "s3",
    57  		Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
    58  		NewFs:       NewFs,
    59  		Options: []fs.Option{{
    60  			Name: fs.ConfigProvider,
    61  			Help: "Choose your S3 provider.",
    62  			Examples: []fs.OptionExample{{
    63  				Value: "AWS",
    64  				Help:  "Amazon Web Services (AWS) S3",
    65  			}, {
    66  				Value: "Alibaba",
    67  				Help:  "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
    68  			}, {
    69  				Value: "Ceph",
    70  				Help:  "Ceph Object Storage",
    71  			}, {
    72  				Value: "DigitalOcean",
    73  				Help:  "Digital Ocean Spaces",
    74  			}, {
    75  				Value: "Dreamhost",
    76  				Help:  "Dreamhost DreamObjects",
    77  			}, {
    78  				Value: "IBMCOS",
    79  				Help:  "IBM COS S3",
    80  			}, {
    81  				Value: "Minio",
    82  				Help:  "Minio Object Storage",
    83  			}, {
    84  				Value: "Netease",
    85  				Help:  "Netease Object Storage (NOS)",
    86  			}, {
    87  				Value: "Wasabi",
    88  				Help:  "Wasabi Object Storage",
    89  			}, {
    90  				Value: "Other",
    91  				Help:  "Any other S3 compatible provider",
    92  			}},
    93  		}, {
    94  			Name:    "env_auth",
    95  			Help:    "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
    96  			Default: false,
    97  			Examples: []fs.OptionExample{{
    98  				Value: "false",
    99  				Help:  "Enter AWS credentials in the next step",
   100  			}, {
   101  				Value: "true",
   102  				Help:  "Get AWS credentials from the environment (env vars or IAM)",
   103  			}},
   104  		}, {
   105  			Name: "access_key_id",
   106  			Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
   107  		}, {
   108  			Name: "secret_access_key",
   109  			Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
   110  		}, {
   111  			Name:     "region",
   112  			Help:     "Region to connect to.",
   113  			Provider: "AWS",
   114  			Examples: []fs.OptionExample{{
   115  				Value: "us-east-1",
   116  				Help:  "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
   117  			}, {
   118  				Value: "us-east-2",
   119  				Help:  "US East (Ohio) Region\nNeeds location constraint us-east-2.",
   120  			}, {
   121  				Value: "us-west-2",
   122  				Help:  "US West (Oregon) Region\nNeeds location constraint us-west-2.",
   123  			}, {
   124  				Value: "us-west-1",
   125  				Help:  "US West (Northern California) Region\nNeeds location constraint us-west-1.",
   126  			}, {
   127  				Value: "ca-central-1",
   128  				Help:  "Canada (Central) Region\nNeeds location constraint ca-central-1.",
   129  			}, {
   130  				Value: "eu-west-1",
   131  				Help:  "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
   132  			}, {
   133  				Value: "eu-west-2",
   134  				Help:  "EU (London) Region\nNeeds location constraint eu-west-2.",
   135  			}, {
   136  				Value: "eu-north-1",
   137  				Help:  "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
   138  			}, {
   139  				Value: "eu-central-1",
   140  				Help:  "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
   141  			}, {
   142  				Value: "ap-southeast-1",
   143  				Help:  "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
   144  			}, {
   145  				Value: "ap-southeast-2",
   146  				Help:  "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
   147  			}, {
   148  				Value: "ap-northeast-1",
   149  				Help:  "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
   150  			}, {
   151  				Value: "ap-northeast-2",
   152  				Help:  "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
   153  			}, {
   154  				Value: "ap-south-1",
   155  				Help:  "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
   156  			}, {
   157  				Value: "sa-east-1",
   158  				Help:  "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
   159  			}},
   160  		}, {
   161  			Name:     "region",
   162  			Help:     "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
   163  			Provider: "!AWS,Alibaba",
   164  			Examples: []fs.OptionExample{{
   165  				Value: "",
   166  				Help:  "Use this if unsure. Will use v4 signatures and an empty region.",
   167  			}, {
   168  				Value: "other-v2-signature",
   169  				Help:  "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
   170  			}},
   171  		}, {
   172  			Name:     "endpoint",
   173  			Help:     "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
   174  			Provider: "AWS",
   175  		}, {
   176  			Name:     "endpoint",
   177  			Help:     "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
   178  			Provider: "IBMCOS",
   179  			Examples: []fs.OptionExample{{
   180  				Value: "s3-api.us-geo.objectstorage.softlayer.net",
   181  				Help:  "US Cross Region Endpoint",
   182  			}, {
   183  				Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
   184  				Help:  "US Cross Region Dallas Endpoint",
   185  			}, {
   186  				Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
   187  				Help:  "US Cross Region Washington DC Endpoint",
   188  			}, {
   189  				Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
   190  				Help:  "US Cross Region San Jose Endpoint",
   191  			}, {
   192  				Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
   193  				Help:  "US Cross Region Private Endpoint",
   194  			}, {
   195  				Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
   196  				Help:  "US Cross Region Dallas Private Endpoint",
   197  			}, {
   198  				Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
   199  				Help:  "US Cross Region Washington DC Private Endpoint",
   200  			}, {
   201  				Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
   202  				Help:  "US Cross Region San Jose Private Endpoint",
   203  			}, {
   204  				Value: "s3.us-east.objectstorage.softlayer.net",
   205  				Help:  "US Region East Endpoint",
   206  			}, {
   207  				Value: "s3.us-east.objectstorage.service.networklayer.com",
   208  				Help:  "US Region East Private Endpoint",
   209  			}, {
   210  				Value: "s3.us-south.objectstorage.softlayer.net",
   211  				Help:  "US Region South Endpoint",
   212  			}, {
   213  				Value: "s3.us-south.objectstorage.service.networklayer.com",
   214  				Help:  "US Region South Private Endpoint",
   215  			}, {
   216  				Value: "s3.eu-geo.objectstorage.softlayer.net",
   217  				Help:  "EU Cross Region Endpoint",
   218  			}, {
   219  				Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
   220  				Help:  "EU Cross Region Frankfurt Endpoint",
   221  			}, {
   222  				Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
   223  				Help:  "EU Cross Region Milan Endpoint",
   224  			}, {
   225  				Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
   226  				Help:  "EU Cross Region Amsterdam Endpoint",
   227  			}, {
   228  				Value: "s3.eu-geo.objectstorage.service.networklayer.com",
   229  				Help:  "EU Cross Region Private Endpoint",
   230  			}, {
   231  				Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
   232  				Help:  "EU Cross Region Frankfurt Private Endpoint",
   233  			}, {
   234  				Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
   235  				Help:  "EU Cross Region Milan Private Endpoint",
   236  			}, {
   237  				Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
   238  				Help:  "EU Cross Region Amsterdam Private Endpoint",
   239  			}, {
   240  				Value: "s3.eu-gb.objectstorage.softlayer.net",
   241  				Help:  "Great Britain Endpoint",
   242  			}, {
   243  				Value: "s3.eu-gb.objectstorage.service.networklayer.com",
   244  				Help:  "Great Britain Private Endpoint",
   245  			}, {
   246  				Value: "s3.ap-geo.objectstorage.softlayer.net",
   247  				Help:  "APAC Cross Regional Endpoint",
   248  			}, {
   249  				Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
   250  				Help:  "APAC Cross Regional Tokyo Endpoint",
   251  			}, {
   252  				Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
   253  				Help:  "APAC Cross Regional HongKong Endpoint",
   254  			}, {
   255  				Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
   256  				Help:  "APAC Cross Regional Seoul Endpoint",
   257  			}, {
   258  				Value: "s3.ap-geo.objectstorage.service.networklayer.com",
   259  				Help:  "APAC Cross Regional Private Endpoint",
   260  			}, {
   261  				Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
   262  				Help:  "APAC Cross Regional Tokyo Private Endpoint",
   263  			}, {
   264  				Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
   265  				Help:  "APAC Cross Regional HongKong Private Endpoint",
   266  			}, {
   267  				Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
   268  				Help:  "APAC Cross Regional Seoul Private Endpoint",
   269  			}, {
   270  				Value: "s3.mel01.objectstorage.softlayer.net",
   271  				Help:  "Melbourne Single Site Endpoint",
   272  			}, {
   273  				Value: "s3.mel01.objectstorage.service.networklayer.com",
   274  				Help:  "Melbourne Single Site Private Endpoint",
   275  			}, {
   276  				Value: "s3.tor01.objectstorage.softlayer.net",
   277  				Help:  "Toronto Single Site Endpoint",
   278  			}, {
   279  				Value: "s3.tor01.objectstorage.service.networklayer.com",
   280  				Help:  "Toronto Single Site Private Endpoint",
   281  			}},
   282  		}, {
   283  			// oss endpoints: https://help.aliyun.com/document_detail/31837.html
   284  			Name:     "endpoint",
   285  			Help:     "Endpoint for OSS API.",
   286  			Provider: "Alibaba",
   287  			Examples: []fs.OptionExample{{
   288  				Value: "oss-cn-hangzhou.aliyuncs.com",
   289  				Help:  "East China 1 (Hangzhou)",
   290  			}, {
   291  				Value: "oss-cn-shanghai.aliyuncs.com",
   292  				Help:  "East China 2 (Shanghai)",
   293  			}, {
   294  				Value: "oss-cn-qingdao.aliyuncs.com",
   295  				Help:  "North China 1 (Qingdao)",
   296  			}, {
   297  				Value: "oss-cn-beijing.aliyuncs.com",
   298  				Help:  "North China 2 (Beijing)",
   299  			}, {
   300  				Value: "oss-cn-zhangjiakou.aliyuncs.com",
   301  				Help:  "North China 3 (Zhangjiakou)",
   302  			}, {
   303  				Value: "oss-cn-huhehaote.aliyuncs.com",
   304  				Help:  "North China 5 (Huhehaote)",
   305  			}, {
   306  				Value: "oss-cn-shenzhen.aliyuncs.com",
   307  				Help:  "South China 1 (Shenzhen)",
   308  			}, {
   309  				Value: "oss-cn-hongkong.aliyuncs.com",
   310  				Help:  "Hong Kong (Hong Kong)",
   311  			}, {
   312  				Value: "oss-us-west-1.aliyuncs.com",
   313  				Help:  "US West 1 (Silicon Valley)",
   314  			}, {
   315  				Value: "oss-us-east-1.aliyuncs.com",
   316  				Help:  "US East 1 (Virginia)",
   317  			}, {
   318  				Value: "oss-ap-southeast-1.aliyuncs.com",
   319  				Help:  "Southeast Asia Southeast 1 (Singapore)",
   320  			}, {
   321  				Value: "oss-ap-southeast-2.aliyuncs.com",
   322  				Help:  "Asia Pacific Southeast 2 (Sydney)",
   323  			}, {
   324  				Value: "oss-ap-southeast-3.aliyuncs.com",
   325  				Help:  "Southeast Asia Southeast 3 (Kuala Lumpur)",
   326  			}, {
   327  				Value: "oss-ap-southeast-5.aliyuncs.com",
   328  				Help:  "Asia Pacific Southeast 5 (Jakarta)",
   329  			}, {
   330  				Value: "oss-ap-northeast-1.aliyuncs.com",
   331  				Help:  "Asia Pacific Northeast 1 (Japan)",
   332  			}, {
   333  				Value: "oss-ap-south-1.aliyuncs.com",
   334  				Help:  "Asia Pacific South 1 (Mumbai)",
   335  			}, {
   336  				Value: "oss-eu-central-1.aliyuncs.com",
   337  				Help:  "Central Europe 1 (Frankfurt)",
   338  			}, {
   339  				Value: "oss-eu-west-1.aliyuncs.com",
   340  				Help:  "West Europe (London)",
   341  			}, {
   342  				Value: "oss-me-east-1.aliyuncs.com",
   343  				Help:  "Middle East 1 (Dubai)",
   344  			}},
   345  		}, {
   346  			Name:     "endpoint",
   347  			Help:     "Endpoint for S3 API.\nRequired when using an S3 clone.",
   348  			Provider: "!AWS,IBMCOS,Alibaba",
   349  			Examples: []fs.OptionExample{{
   350  				Value:    "objects-us-east-1.dream.io",
   351  				Help:     "Dream Objects endpoint",
   352  				Provider: "Dreamhost",
   353  			}, {
   354  				Value:    "nyc3.digitaloceanspaces.com",
   355  				Help:     "Digital Ocean Spaces New York 3",
   356  				Provider: "DigitalOcean",
   357  			}, {
   358  				Value:    "ams3.digitaloceanspaces.com",
   359  				Help:     "Digital Ocean Spaces Amsterdam 3",
   360  				Provider: "DigitalOcean",
   361  			}, {
   362  				Value:    "sgp1.digitaloceanspaces.com",
   363  				Help:     "Digital Ocean Spaces Singapore 1",
   364  				Provider: "DigitalOcean",
   365  			}, {
   366  				Value:    "s3.wasabisys.com",
   367  				Help:     "Wasabi US East endpoint",
   368  				Provider: "Wasabi",
   369  			}, {
   370  				Value:    "s3.us-west-1.wasabisys.com",
   371  				Help:     "Wasabi US West endpoint",
   372  				Provider: "Wasabi",
   373  			}, {
   374  				Value:    "s3.eu-central-1.wasabisys.com",
   375  				Help:     "Wasabi EU Central endpoint",
   376  				Provider: "Wasabi",
   377  			}},
   378  		}, {
   379  			Name:     "location_constraint",
   380  			Help:     "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
   381  			Provider: "AWS",
   382  			Examples: []fs.OptionExample{{
   383  				Value: "",
   384  				Help:  "Empty for US Region, Northern Virginia or Pacific Northwest.",
   385  			}, {
   386  				Value: "us-east-2",
   387  				Help:  "US East (Ohio) Region.",
   388  			}, {
   389  				Value: "us-west-2",
   390  				Help:  "US West (Oregon) Region.",
   391  			}, {
   392  				Value: "us-west-1",
   393  				Help:  "US West (Northern California) Region.",
   394  			}, {
   395  				Value: "ca-central-1",
   396  				Help:  "Canada (Central) Region.",
   397  			}, {
   398  				Value: "eu-west-1",
   399  				Help:  "EU (Ireland) Region.",
   400  			}, {
   401  				Value: "eu-west-2",
   402  				Help:  "EU (London) Region.",
   403  			}, {
   404  				Value: "eu-north-1",
   405  				Help:  "EU (Stockholm) Region.",
   406  			}, {
   407  				Value: "EU",
   408  				Help:  "EU Region.",
   409  			}, {
   410  				Value: "ap-southeast-1",
   411  				Help:  "Asia Pacific (Singapore) Region.",
   412  			}, {
   413  				Value: "ap-southeast-2",
   414  				Help:  "Asia Pacific (Sydney) Region.",
   415  			}, {
   416  				Value: "ap-northeast-1",
   417  				Help:  "Asia Pacific (Tokyo) Region.",
   418  			}, {
   419  				Value: "ap-northeast-2",
   420  				Help:  "Asia Pacific (Seoul)",
   421  			}, {
   422  				Value: "ap-south-1",
   423  				Help:  "Asia Pacific (Mumbai)",
   424  			}, {
   425  				Value: "sa-east-1",
   426  				Help:  "South America (Sao Paulo) Region.",
   427  			}},
   428  		}, {
   429  			Name:     "location_constraint",
   430  			Help:     "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
   431  			Provider: "IBMCOS",
   432  			Examples: []fs.OptionExample{{
   433  				Value: "us-standard",
   434  				Help:  "US Cross Region Standard",
   435  			}, {
   436  				Value: "us-vault",
   437  				Help:  "US Cross Region Vault",
   438  			}, {
   439  				Value: "us-cold",
   440  				Help:  "US Cross Region Cold",
   441  			}, {
   442  				Value: "us-flex",
   443  				Help:  "US Cross Region Flex",
   444  			}, {
   445  				Value: "us-east-standard",
   446  				Help:  "US East Region Standard",
   447  			}, {
   448  				Value: "us-east-vault",
   449  				Help:  "US East Region Vault",
   450  			}, {
   451  				Value: "us-east-cold",
   452  				Help:  "US East Region Cold",
   453  			}, {
   454  				Value: "us-east-flex",
   455  				Help:  "US East Region Flex",
   456  			}, {
   457  				Value: "us-south-standard",
   458  				Help:  "US South Region Standard",
   459  			}, {
   460  				Value: "us-south-vault",
   461  				Help:  "US South Region Vault",
   462  			}, {
   463  				Value: "us-south-cold",
   464  				Help:  "US South Region Cold",
   465  			}, {
   466  				Value: "us-south-flex",
   467  				Help:  "US South Region Flex",
   468  			}, {
   469  				Value: "eu-standard",
   470  				Help:  "EU Cross Region Standard",
   471  			}, {
   472  				Value: "eu-vault",
   473  				Help:  "EU Cross Region Vault",
   474  			}, {
   475  				Value: "eu-cold",
   476  				Help:  "EU Cross Region Cold",
   477  			}, {
   478  				Value: "eu-flex",
   479  				Help:  "EU Cross Region Flex",
   480  			}, {
   481  				Value: "eu-gb-standard",
   482  				Help:  "Great Britain Standard",
   483  			}, {
   484  				Value: "eu-gb-vault",
   485  				Help:  "Great Britain Vault",
   486  			}, {
   487  				Value: "eu-gb-cold",
   488  				Help:  "Great Britain Cold",
   489  			}, {
   490  				Value: "eu-gb-flex",
   491  				Help:  "Great Britain Flex",
   492  			}, {
   493  				Value: "ap-standard",
   494  				Help:  "APAC Standard",
   495  			}, {
   496  				Value: "ap-vault",
   497  				Help:  "APAC Vault",
   498  			}, {
   499  				Value: "ap-cold",
   500  				Help:  "APAC Cold",
   501  			}, {
   502  				Value: "ap-flex",
   503  				Help:  "APAC Flex",
   504  			}, {
   505  				Value: "mel01-standard",
   506  				Help:  "Melbourne Standard",
   507  			}, {
   508  				Value: "mel01-vault",
   509  				Help:  "Melbourne Vault",
   510  			}, {
   511  				Value: "mel01-cold",
   512  				Help:  "Melbourne Cold",
   513  			}, {
   514  				Value: "mel01-flex",
   515  				Help:  "Melbourne Flex",
   516  			}, {
   517  				Value: "tor01-standard",
   518  				Help:  "Toronto Standard",
   519  			}, {
   520  				Value: "tor01-vault",
   521  				Help:  "Toronto Vault",
   522  			}, {
   523  				Value: "tor01-cold",
   524  				Help:  "Toronto Cold",
   525  			}, {
   526  				Value: "tor01-flex",
   527  				Help:  "Toronto Flex",
   528  			}},
   529  		}, {
   530  			Name:     "location_constraint",
   531  			Help:     "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
   532  			Provider: "!AWS,IBMCOS,Alibaba",
   533  		}, {
   534  			Name: "acl",
   535  			Help: `Canned ACL used when creating buckets and storing or copying objects.
   536  
   537  This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
   538  
   539  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
   540  
   541  Note that this ACL is applied when server side copying objects as S3
   542  doesn't copy the ACL from the source but rather writes a fresh one.`,
   543  			Examples: []fs.OptionExample{{
   544  				Value:    "private",
   545  				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default).",
   546  				Provider: "!IBMCOS",
   547  			}, {
   548  				Value:    "public-read",
   549  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
   550  				Provider: "!IBMCOS",
   551  			}, {
   552  				Value:    "public-read-write",
   553  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
   554  				Provider: "!IBMCOS",
   555  			}, {
   556  				Value:    "authenticated-read",
   557  				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
   558  				Provider: "!IBMCOS",
   559  			}, {
   560  				Value:    "bucket-owner-read",
   561  				Help:     "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
   562  				Provider: "!IBMCOS",
   563  			}, {
   564  				Value:    "bucket-owner-full-control",
   565  				Help:     "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
   566  				Provider: "!IBMCOS",
   567  			}, {
   568  				Value:    "private",
   569  				Help:     "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
   570  				Provider: "IBMCOS",
   571  			}, {
   572  				Value:    "public-read",
   573  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
   574  				Provider: "IBMCOS",
   575  			}, {
   576  				Value:    "public-read-write",
   577  				Help:     "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
   578  				Provider: "IBMCOS",
   579  			}, {
   580  				Value:    "authenticated-read",
   581  				Help:     "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
   582  				Provider: "IBMCOS",
   583  			}},
   584  		}, {
   585  			Name: "bucket_acl",
   586  			Help: `Canned ACL used when creating buckets.
   587  
   588  For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
   589  
   590  Note that this ACL is applied when only when creating buckets.  If it
   591  isn't set then "acl" is used instead.`,
   592  			Advanced: true,
   593  			Examples: []fs.OptionExample{{
   594  				Value: "private",
   595  				Help:  "Owner gets FULL_CONTROL. No one else has access rights (default).",
   596  			}, {
   597  				Value: "public-read",
   598  				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
   599  			}, {
   600  				Value: "public-read-write",
   601  				Help:  "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
   602  			}, {
   603  				Value: "authenticated-read",
   604  				Help:  "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
   605  			}},
   606  		}, {
   607  			Name:     "server_side_encryption",
   608  			Help:     "The server-side encryption algorithm used when storing this object in S3.",
   609  			Provider: "AWS",
   610  			Examples: []fs.OptionExample{{
   611  				Value: "",
   612  				Help:  "None",
   613  			}, {
   614  				Value: "AES256",
   615  				Help:  "AES256",
   616  			}, {
   617  				Value: "aws:kms",
   618  				Help:  "aws:kms",
   619  			}},
   620  		}, {
   621  			Name:     "sse_kms_key_id",
   622  			Help:     "If using KMS ID you must provide the ARN of Key.",
   623  			Provider: "AWS",
   624  			Examples: []fs.OptionExample{{
   625  				Value: "",
   626  				Help:  "None",
   627  			}, {
   628  				Value: "arn:aws:kms:us-east-1:*",
   629  				Help:  "arn:aws:kms:*",
   630  			}},
   631  		}, {
   632  			Name:     "storage_class",
   633  			Help:     "The storage class to use when storing new objects in S3.",
   634  			Provider: "AWS",
   635  			Examples: []fs.OptionExample{{
   636  				Value: "",
   637  				Help:  "Default",
   638  			}, {
   639  				Value: "STANDARD",
   640  				Help:  "Standard storage class",
   641  			}, {
   642  				Value: "REDUCED_REDUNDANCY",
   643  				Help:  "Reduced redundancy storage class",
   644  			}, {
   645  				Value: "STANDARD_IA",
   646  				Help:  "Standard Infrequent Access storage class",
   647  			}, {
   648  				Value: "ONEZONE_IA",
   649  				Help:  "One Zone Infrequent Access storage class",
   650  			}, {
   651  				Value: "GLACIER",
   652  				Help:  "Glacier storage class",
   653  			}, {
   654  				Value: "DEEP_ARCHIVE",
   655  				Help:  "Glacier Deep Archive storage class",
   656  			}, {
   657  				Value: "INTELLIGENT_TIERING",
   658  				Help:  "Intelligent-Tiering storage class",
   659  			}},
   660  		}, {
   661  			// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
   662  			Name:     "storage_class",
   663  			Help:     "The storage class to use when storing new objects in OSS.",
   664  			Provider: "Alibaba",
   665  			Examples: []fs.OptionExample{{
   666  				Value: "",
   667  				Help:  "Default",
   668  			}, {
   669  				Value: "STANDARD",
   670  				Help:  "Standard storage class",
   671  			}, {
   672  				Value: "GLACIER",
   673  				Help:  "Archive storage mode.",
   674  			}, {
   675  				Value: "STANDARD_IA",
   676  				Help:  "Infrequent access storage mode.",
   677  			}},
   678  		}, {
   679  			Name: "upload_cutoff",
   680  			Help: `Cutoff for switching to chunked upload
   681  
   682  Any files larger than this will be uploaded in chunks of chunk_size.
   683  The minimum is 0 and the maximum is 5GB.`,
   684  			Default:  defaultUploadCutoff,
   685  			Advanced: true,
   686  		}, {
   687  			Name: "chunk_size",
   688  			Help: `Chunk size to use for uploading.
   689  
   690  When uploading files larger than upload_cutoff they will be uploaded
   691  as multipart uploads using this chunk size.
   692  
   693  Note that "--s3-upload-concurrency" chunks of this size are buffered
   694  in memory per transfer.
   695  
   696  If you are transferring large files over high speed links and you have
   697  enough memory, then increasing this will speed up the transfers.`,
   698  			Default:  minChunkSize,
   699  			Advanced: true,
   700  		}, {
   701  			Name:     "disable_checksum",
   702  			Help:     "Don't store MD5 checksum with object metadata",
   703  			Default:  false,
   704  			Advanced: true,
   705  		}, {
   706  			Name:     "session_token",
   707  			Help:     "An AWS session token",
   708  			Advanced: true,
   709  		}, {
   710  			Name: "upload_concurrency",
   711  			Help: `Concurrency for multipart uploads.
   712  
   713  This is the number of chunks of the same file that are uploaded
   714  concurrently.
   715  
   716  If you are uploading small numbers of large file over high speed link
   717  and these uploads do not fully utilize your bandwidth, then increasing
   718  this may help to speed up the transfers.`,
   719  			Default:  4,
   720  			Advanced: true,
   721  		}, {
   722  			Name: "force_path_style",
   723  			Help: `If true use path style access if false use virtual hosted style.
   724  
   725  If this is true (the default) then rclone will use path style access,
   726  if false then rclone will use virtual path style. See [the AWS S3
   727  docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
   728  for more info.
   729  
   730  Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
   731  			Default:  true,
   732  			Advanced: true,
   733  		}, {
   734  			Name: "v2_auth",
   735  			Help: `If true use v2 authentication.
   736  
   737  If this is false (the default) then rclone will use v4 authentication.
   738  If it is set then rclone will use v2 authentication.
   739  
   740  Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
   741  			Default:  false,
   742  			Advanced: true,
   743  		}, {
   744  			Name:     "use_accelerate_endpoint",
   745  			Provider: "AWS",
   746  			Help: `If true use the AWS S3 accelerated endpoint.
   747  
   748  See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
   749  			Default:  false,
   750  			Advanced: true,
   751  		}},
   752  	})
   753  }
   754  
   755  // Constants
   756  const (
   757  	metaMtime           = "Mtime"                       // the meta key to store mtime in - eg X-Amz-Meta-Mtime
   758  	metaMD5Hash         = "Md5chksum"                   // the meta key to store md5hash in
   759  	listChunkSize       = 1000                          // number of items to read at once
   760  	maxRetries          = 10                            // number of retries to make of operations
   761  	maxSizeForCopy      = 5 * 1024 * 1024 * 1024        // The maximum size of object we can COPY
   762  	maxFileSize         = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
   763  	minChunkSize        = fs.SizeSuffix(s3manager.MinUploadPartSize)
   764  	defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
   765  	maxUploadCutoff     = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
   766  	minSleep            = 10 * time.Millisecond // In case of error, start at 10ms sleep.
   767  )
   768  
   769  // Options defines the configuration for this backend
   770  type Options struct {
   771  	Provider              string        `config:"provider"`
   772  	EnvAuth               bool          `config:"env_auth"`
   773  	AccessKeyID           string        `config:"access_key_id"`
   774  	SecretAccessKey       string        `config:"secret_access_key"`
   775  	Region                string        `config:"region"`
   776  	Endpoint              string        `config:"endpoint"`
   777  	LocationConstraint    string        `config:"location_constraint"`
   778  	ACL                   string        `config:"acl"`
   779  	BucketACL             string        `config:"bucket_acl"`
   780  	ServerSideEncryption  string        `config:"server_side_encryption"`
   781  	SSEKMSKeyID           string        `config:"sse_kms_key_id"`
   782  	StorageClass          string        `config:"storage_class"`
   783  	UploadCutoff          fs.SizeSuffix `config:"upload_cutoff"`
   784  	ChunkSize             fs.SizeSuffix `config:"chunk_size"`
   785  	DisableChecksum       bool          `config:"disable_checksum"`
   786  	SessionToken          string        `config:"session_token"`
   787  	UploadConcurrency     int           `config:"upload_concurrency"`
   788  	ForcePathStyle        bool          `config:"force_path_style"`
   789  	V2Auth                bool          `config:"v2_auth"`
   790  	UseAccelerateEndpoint bool          `config:"use_accelerate_endpoint"`
   791  }
   792  
   793  // Fs represents a remote s3 server
   794  type Fs struct {
   795  	name          string           // the name of the remote
   796  	root          string           // root of the bucket - ignore all objects above this
   797  	opt           Options          // parsed options
   798  	features      *fs.Features     // optional features
   799  	c             *s3.S3           // the connection to the s3 server
   800  	ses           *session.Session // the s3 session
   801  	bucket        string           // the bucket we are working on
   802  	bucketOKMu    sync.Mutex       // mutex to protect bucket OK
   803  	bucketOK      bool             // true if we have created the bucket
   804  	bucketDeleted bool             // true if we have deleted the bucket
   805  	pacer         *fs.Pacer        // To pace the API calls
   806  	srv           *http.Client     // a plain http client
   807  }
   808  
   809  // Object describes a s3 object
   810  type Object struct {
   811  	// Will definitely have everything but meta which may be nil
   812  	//
   813  	// List will read everything but meta & mimeType - to fill
   814  	// that in you need to call readMetaData
   815  	fs           *Fs                // what this object is part of
   816  	remote       string             // The remote path
   817  	etag         string             // md5sum of the object
   818  	bytes        int64              // size of the object
   819  	lastModified time.Time          // Last modified
   820  	meta         map[string]*string // The object metadata if known - may be nil
   821  	mimeType     string             // MimeType of object - may be ""
   822  }
   823  
   824  // ------------------------------------------------------------
   825  
   826  // Name of the remote (as passed into NewFs)
   827  func (f *Fs) Name() string {
   828  	return f.name
   829  }
   830  
   831  // Root of the remote (as passed into NewFs)
   832  func (f *Fs) Root() string {
   833  	if f.root == "" {
   834  		return f.bucket
   835  	}
   836  	return f.bucket + "/" + f.root
   837  }
   838  
   839  // String converts this Fs to a string
   840  func (f *Fs) String() string {
   841  	if f.root == "" {
   842  		return fmt.Sprintf("S3 bucket %s", f.bucket)
   843  	}
   844  	return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
   845  }
   846  
   847  // Features returns the optional features of this Fs
   848  func (f *Fs) Features() *fs.Features {
   849  	return f.features
   850  }
   851  
   852  // retryErrorCodes is a slice of error codes that we will retry
   853  // See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
   854  var retryErrorCodes = []int{
   855  	// 409, // Conflict - various states that could be resolved on a retry
   856  	503, // Service Unavailable/Slow Down - "Reduce your request rate"
   857  }
   858  
   859  //S3 is pretty resilient, and the built in retry handling is probably sufficient
   860  // as it should notice closed connections and timeouts which are the most likely
   861  // sort of failure modes
   862  func (f *Fs) shouldRetry(err error) (bool, error) {
   863  	// If this is an awserr object, try and extract more useful information to determine if we should retry
   864  	if awsError, ok := err.(awserr.Error); ok {
   865  		// Simple case, check the original embedded error in case it's generically retryable
   866  		if fserrors.ShouldRetry(awsError.OrigErr()) {
   867  			return true, err
   868  		}
   869  		// Failing that, if it's a RequestFailure it's probably got an http status code we can check
   870  		if reqErr, ok := err.(awserr.RequestFailure); ok {
   871  			// 301 if wrong region for bucket
   872  			if reqErr.StatusCode() == http.StatusMovedPermanently {
   873  				urfbErr := f.updateRegionForBucket()
   874  				if urfbErr != nil {
   875  					fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
   876  					return false, err
   877  				}
   878  				return true, err
   879  			}
   880  			for _, e := range retryErrorCodes {
   881  				if reqErr.StatusCode() == e {
   882  					return true, err
   883  				}
   884  			}
   885  		}
   886  	}
   887  	// Ok, not an awserr, check for generic failure conditions
   888  	return fserrors.ShouldRetry(err), err
   889  }
   890  
   891  // Pattern to match a s3 path
   892  var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
   893  
   894  // parseParse parses a s3 'url'
   895  func s3ParsePath(path string) (bucket, directory string, err error) {
   896  	parts := matcher.FindStringSubmatch(path)
   897  	if parts == nil {
   898  		err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
   899  	} else {
   900  		bucket, directory = parts[1], parts[2]
   901  		directory = strings.Trim(directory, "/")
   902  	}
   903  	return
   904  }
   905  
   906  // s3Connection makes a connection to s3
   907  func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
   908  	// Make the auth
   909  	v := credentials.Value{
   910  		AccessKeyID:     opt.AccessKeyID,
   911  		SecretAccessKey: opt.SecretAccessKey,
   912  		SessionToken:    opt.SessionToken,
   913  	}
   914  
   915  	lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
   916  	def := defaults.Get()
   917  	def.Config.HTTPClient = lowTimeoutClient
   918  
   919  	// first provider to supply a credential set "wins"
   920  	providers := []credentials.Provider{
   921  		// use static credentials if they're present (checked by provider)
   922  		&credentials.StaticProvider{Value: v},
   923  
   924  		// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
   925  		// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
   926  		&credentials.EnvProvider{},
   927  
   928  		// A SharedCredentialsProvider retrieves credentials
   929  		// from the current user's home directory.  It checks
   930  		// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
   931  		&credentials.SharedCredentialsProvider{},
   932  
   933  		// Pick up IAM role if we're in an ECS task
   934  		defaults.RemoteCredProvider(*def.Config, def.Handlers),
   935  
   936  		// Pick up IAM role in case we're on EC2
   937  		&ec2rolecreds.EC2RoleProvider{
   938  			Client: ec2metadata.New(session.New(), &aws.Config{
   939  				HTTPClient: lowTimeoutClient,
   940  			}),
   941  			ExpiryWindow: 3,
   942  		},
   943  	}
   944  	cred := credentials.NewChainCredentials(providers)
   945  
   946  	switch {
   947  	case opt.EnvAuth:
   948  		// No need for empty checks if "env_auth" is true
   949  	case v.AccessKeyID == "" && v.SecretAccessKey == "":
   950  		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
   951  		cred = credentials.AnonymousCredentials
   952  	case v.AccessKeyID == "":
   953  		return nil, nil, errors.New("access_key_id not found")
   954  	case v.SecretAccessKey == "":
   955  		return nil, nil, errors.New("secret_access_key not found")
   956  	}
   957  
   958  	if opt.Region == "" && opt.Endpoint == "" {
   959  		opt.Endpoint = "https://s3.amazonaws.com/"
   960  	}
   961  	if opt.Region == "" {
   962  		opt.Region = "us-east-1"
   963  	}
   964  	if opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
   965  		opt.ForcePathStyle = false
   966  	}
   967  	awsConfig := aws.NewConfig().
   968  		WithMaxRetries(maxRetries).
   969  		WithCredentials(cred).
   970  		WithHTTPClient(fshttp.NewClient(fs.Config)).
   971  		WithS3ForcePathStyle(opt.ForcePathStyle).
   972  		WithS3UseAccelerate(opt.UseAccelerateEndpoint)
   973  	if opt.Region != "" {
   974  		awsConfig.WithRegion(opt.Region)
   975  	}
   976  	if opt.Endpoint != "" {
   977  		awsConfig.WithEndpoint(opt.Endpoint)
   978  	}
   979  
   980  	// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
   981  	awsSessionOpts := session.Options{
   982  		Config: *awsConfig,
   983  	}
   984  	if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
   985  		// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
   986  		awsSessionOpts.SharedConfigState = session.SharedConfigEnable
   987  		// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
   988  		// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
   989  		awsSessionOpts.Config.Credentials = nil
   990  	}
   991  	ses, err := session.NewSessionWithOptions(awsSessionOpts)
   992  	if err != nil {
   993  		return nil, nil, err
   994  	}
   995  	c := s3.New(ses)
   996  	if opt.V2Auth || opt.Region == "other-v2-signature" {
   997  		fs.Debugf(nil, "Using v2 auth")
   998  		signer := func(req *request.Request) {
   999  			// Ignore AnonymousCredentials object
  1000  			if req.Config.Credentials == credentials.AnonymousCredentials {
  1001  				return
  1002  			}
  1003  			sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
  1004  		}
  1005  		c.Handlers.Sign.Clear()
  1006  		c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
  1007  		c.Handlers.Sign.PushBack(signer)
  1008  	}
  1009  	return c, ses, nil
  1010  }
  1011  
  1012  func checkUploadChunkSize(cs fs.SizeSuffix) error {
  1013  	if cs < minChunkSize {
  1014  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
  1015  	}
  1016  	return nil
  1017  }
  1018  
  1019  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1020  	err = checkUploadChunkSize(cs)
  1021  	if err == nil {
  1022  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
  1023  	}
  1024  	return
  1025  }
  1026  
  1027  func checkUploadCutoff(cs fs.SizeSuffix) error {
  1028  	if cs > maxUploadCutoff {
  1029  		return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
  1030  	}
  1031  	return nil
  1032  }
  1033  
  1034  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
  1035  	err = checkUploadCutoff(cs)
  1036  	if err == nil {
  1037  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
  1038  	}
  1039  	return
  1040  }
  1041  
  1042  // NewFs constructs an Fs from the path, bucket:path
  1043  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
  1044  	// Parse config into Options struct
  1045  	opt := new(Options)
  1046  	err := configstruct.Set(m, opt)
  1047  	if err != nil {
  1048  		return nil, err
  1049  	}
  1050  	err = checkUploadChunkSize(opt.ChunkSize)
  1051  	if err != nil {
  1052  		return nil, errors.Wrap(err, "s3: chunk size")
  1053  	}
  1054  	err = checkUploadCutoff(opt.UploadCutoff)
  1055  	if err != nil {
  1056  		return nil, errors.Wrap(err, "s3: upload cutoff")
  1057  	}
  1058  	bucket, directory, err := s3ParsePath(root)
  1059  	if err != nil {
  1060  		return nil, err
  1061  	}
  1062  	if opt.ACL == "" {
  1063  		opt.ACL = "private"
  1064  	}
  1065  	if opt.BucketACL == "" {
  1066  		opt.BucketACL = opt.ACL
  1067  	}
  1068  	c, ses, err := s3Connection(opt)
  1069  	if err != nil {
  1070  		return nil, err
  1071  	}
  1072  	f := &Fs{
  1073  		name:   name,
  1074  		root:   directory,
  1075  		opt:    *opt,
  1076  		c:      c,
  1077  		bucket: bucket,
  1078  		ses:    ses,
  1079  		pacer:  fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
  1080  		srv:    fshttp.NewClient(fs.Config),
  1081  	}
  1082  	f.features = (&fs.Features{
  1083  		ReadMimeType:  true,
  1084  		WriteMimeType: true,
  1085  		BucketBased:   true,
  1086  	}).Fill(f)
  1087  	if f.root != "" {
  1088  		f.root += "/"
  1089  		// Check to see if the object exists
  1090  		req := s3.HeadObjectInput{
  1091  			Bucket: &f.bucket,
  1092  			Key:    &directory,
  1093  		}
  1094  		err = f.pacer.Call(func() (bool, error) {
  1095  			_, err = f.c.HeadObject(&req)
  1096  			return f.shouldRetry(err)
  1097  		})
  1098  		if err == nil {
  1099  			f.root = path.Dir(directory)
  1100  			if f.root == "." {
  1101  				f.root = ""
  1102  			} else {
  1103  				f.root += "/"
  1104  			}
  1105  			// return an error with an fs which points to the parent
  1106  			return f, fs.ErrorIsFile
  1107  		}
  1108  	}
  1109  	// f.listMultipartUploads()
  1110  	return f, nil
  1111  }
  1112  
  1113  // Return an Object from a path
  1114  //
  1115  //If it can't be found it returns the error ErrorObjectNotFound.
  1116  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) {
  1117  	o := &Object{
  1118  		fs:     f,
  1119  		remote: remote,
  1120  	}
  1121  	if info != nil {
  1122  		// Set info but not meta
  1123  		if info.LastModified == nil {
  1124  			fs.Logf(o, "Failed to read last modified")
  1125  			o.lastModified = time.Now()
  1126  		} else {
  1127  			o.lastModified = *info.LastModified
  1128  		}
  1129  		o.etag = aws.StringValue(info.ETag)
  1130  		o.bytes = aws.Int64Value(info.Size)
  1131  	} else {
  1132  		err := o.readMetaData(ctx) // reads info and meta, returning an error
  1133  		if err != nil {
  1134  			return nil, err
  1135  		}
  1136  	}
  1137  	return o, nil
  1138  }
  1139  
  1140  // NewObject finds the Object at remote.  If it can't be found
  1141  // it returns the error fs.ErrorObjectNotFound.
  1142  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
  1143  	return f.newObjectWithInfo(ctx, remote, nil)
  1144  }
  1145  
  1146  // Gets the bucket location
  1147  func (f *Fs) getBucketLocation() (string, error) {
  1148  	req := s3.GetBucketLocationInput{
  1149  		Bucket: &f.bucket,
  1150  	}
  1151  	var resp *s3.GetBucketLocationOutput
  1152  	var err error
  1153  	err = f.pacer.Call(func() (bool, error) {
  1154  		resp, err = f.c.GetBucketLocation(&req)
  1155  		return f.shouldRetry(err)
  1156  	})
  1157  	if err != nil {
  1158  		return "", err
  1159  	}
  1160  	return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
  1161  }
  1162  
  1163  // Updates the region for the bucket by reading the region from the
  1164  // bucket then updating the session.
  1165  func (f *Fs) updateRegionForBucket() error {
  1166  	region, err := f.getBucketLocation()
  1167  	if err != nil {
  1168  		return errors.Wrap(err, "reading bucket location failed")
  1169  	}
  1170  	if aws.StringValue(f.c.Config.Endpoint) != "" {
  1171  		return errors.Errorf("can't set region to %q as endpoint is set", region)
  1172  	}
  1173  	if aws.StringValue(f.c.Config.Region) == region {
  1174  		return errors.Errorf("region is already %q - not updating", region)
  1175  	}
  1176  
  1177  	// Make a new session with the new region
  1178  	oldRegion := f.opt.Region
  1179  	f.opt.Region = region
  1180  	c, ses, err := s3Connection(&f.opt)
  1181  	if err != nil {
  1182  		return errors.Wrap(err, "creating new session failed")
  1183  	}
  1184  	f.c = c
  1185  	f.ses = ses
  1186  
  1187  	fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
  1188  	return nil
  1189  }
  1190  
  1191  // listFn is called from list to handle an object.
  1192  type listFn func(remote string, object *s3.Object, isDirectory bool) error
  1193  
  1194  // list the objects into the function supplied
  1195  //
  1196  // dir is the starting directory, "" for root
  1197  //
  1198  // Set recurse to read sub directories
  1199  func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
  1200  	root := f.root
  1201  	if dir != "" {
  1202  		root += dir + "/"
  1203  	}
  1204  	maxKeys := int64(listChunkSize)
  1205  	delimiter := ""
  1206  	if !recurse {
  1207  		delimiter = "/"
  1208  	}
  1209  	var marker *string
  1210  	for {
  1211  		// FIXME need to implement ALL loop
  1212  		req := s3.ListObjectsInput{
  1213  			Bucket:    &f.bucket,
  1214  			Delimiter: &delimiter,
  1215  			Prefix:    &root,
  1216  			MaxKeys:   &maxKeys,
  1217  			Marker:    marker,
  1218  		}
  1219  		var resp *s3.ListObjectsOutput
  1220  		var err error
  1221  		err = f.pacer.Call(func() (bool, error) {
  1222  			resp, err = f.c.ListObjectsWithContext(ctx, &req)
  1223  			return f.shouldRetry(err)
  1224  		})
  1225  		if err != nil {
  1226  			if awsErr, ok := err.(awserr.RequestFailure); ok {
  1227  				if awsErr.StatusCode() == http.StatusNotFound {
  1228  					err = fs.ErrorDirNotFound
  1229  				}
  1230  			}
  1231  			return err
  1232  		}
  1233  		rootLength := len(f.root)
  1234  		if !recurse {
  1235  			for _, commonPrefix := range resp.CommonPrefixes {
  1236  				if commonPrefix.Prefix == nil {
  1237  					fs.Logf(f, "Nil common prefix received")
  1238  					continue
  1239  				}
  1240  				remote := *commonPrefix.Prefix
  1241  				if !strings.HasPrefix(remote, f.root) {
  1242  					fs.Logf(f, "Odd name received %q", remote)
  1243  					continue
  1244  				}
  1245  				remote = remote[rootLength:]
  1246  				if strings.HasSuffix(remote, "/") {
  1247  					remote = remote[:len(remote)-1]
  1248  				}
  1249  				err = fn(remote, &s3.Object{Key: &remote}, true)
  1250  				if err != nil {
  1251  					return err
  1252  				}
  1253  			}
  1254  		}
  1255  		for _, object := range resp.Contents {
  1256  			key := aws.StringValue(object.Key)
  1257  			if !strings.HasPrefix(key, f.root) {
  1258  				fs.Logf(f, "Odd name received %q", key)
  1259  				continue
  1260  			}
  1261  			remote := key[rootLength:]
  1262  			// is this a directory marker?
  1263  			if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
  1264  				if recurse && remote != "" {
  1265  					// add a directory in if --fast-list since will have no prefixes
  1266  					remote = remote[:len(remote)-1]
  1267  					err = fn(remote, &s3.Object{Key: &remote}, true)
  1268  					if err != nil {
  1269  						return err
  1270  					}
  1271  				}
  1272  				continue // skip directory marker
  1273  			}
  1274  			err = fn(remote, object, false)
  1275  			if err != nil {
  1276  				return err
  1277  			}
  1278  		}
  1279  		if !aws.BoolValue(resp.IsTruncated) {
  1280  			break
  1281  		}
  1282  		// Use NextMarker if set, otherwise use last Key
  1283  		if resp.NextMarker == nil || *resp.NextMarker == "" {
  1284  			if len(resp.Contents) == 0 {
  1285  				return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
  1286  			}
  1287  			marker = resp.Contents[len(resp.Contents)-1].Key
  1288  		} else {
  1289  			marker = resp.NextMarker
  1290  		}
  1291  	}
  1292  	return nil
  1293  }
  1294  
  1295  // Convert a list item into a DirEntry
  1296  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
  1297  	if isDirectory {
  1298  		size := int64(0)
  1299  		if object.Size != nil {
  1300  			size = *object.Size
  1301  		}
  1302  		d := fs.NewDir(remote, time.Time{}).SetSize(size)
  1303  		return d, nil
  1304  	}
  1305  	o, err := f.newObjectWithInfo(ctx, remote, object)
  1306  	if err != nil {
  1307  		return nil, err
  1308  	}
  1309  	return o, nil
  1310  }
  1311  
  1312  // mark the bucket as being OK
  1313  func (f *Fs) markBucketOK() {
  1314  	if f.bucket != "" {
  1315  		f.bucketOKMu.Lock()
  1316  		f.bucketOK = true
  1317  		f.bucketDeleted = false
  1318  		f.bucketOKMu.Unlock()
  1319  	}
  1320  }
  1321  
  1322  // listDir lists files and directories to out
  1323  func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1324  	// List the objects and directories
  1325  	err = f.list(ctx, dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
  1326  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
  1327  		if err != nil {
  1328  			return err
  1329  		}
  1330  		if entry != nil {
  1331  			entries = append(entries, entry)
  1332  		}
  1333  		return nil
  1334  	})
  1335  	if err != nil {
  1336  		return nil, err
  1337  	}
  1338  	// bucket must be present if listing succeeded
  1339  	f.markBucketOK()
  1340  	return entries, nil
  1341  }
  1342  
  1343  // listBuckets lists the buckets to out
  1344  func (f *Fs) listBuckets(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1345  	if dir != "" {
  1346  		return nil, fs.ErrorListBucketRequired
  1347  	}
  1348  	req := s3.ListBucketsInput{}
  1349  	var resp *s3.ListBucketsOutput
  1350  	err = f.pacer.Call(func() (bool, error) {
  1351  		resp, err = f.c.ListBucketsWithContext(ctx, &req)
  1352  		return f.shouldRetry(err)
  1353  	})
  1354  	if err != nil {
  1355  		return nil, err
  1356  	}
  1357  	for _, bucket := range resp.Buckets {
  1358  		d := fs.NewDir(aws.StringValue(bucket.Name), aws.TimeValue(bucket.CreationDate))
  1359  		entries = append(entries, d)
  1360  	}
  1361  	return entries, nil
  1362  }
  1363  
  1364  // List the objects and directories in dir into entries.  The
  1365  // entries can be returned in any order but should be for a
  1366  // complete directory.
  1367  //
  1368  // dir should be "" to list the root, and should not have
  1369  // trailing slashes.
  1370  //
  1371  // This should return ErrDirNotFound if the directory isn't
  1372  // found.
  1373  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
  1374  	if f.bucket == "" {
  1375  		return f.listBuckets(ctx, dir)
  1376  	}
  1377  	return f.listDir(ctx, dir)
  1378  }
  1379  
  1380  // ListR lists the objects and directories of the Fs starting
  1381  // from dir recursively into out.
  1382  //
  1383  // dir should be "" to start from the root, and should not
  1384  // have trailing slashes.
  1385  //
  1386  // This should return ErrDirNotFound if the directory isn't
  1387  // found.
  1388  //
  1389  // It should call callback for each tranche of entries read.
  1390  // These need not be returned in any particular order.  If
  1391  // callback returns an error then the listing will stop
  1392  // immediately.
  1393  //
  1394  // Don't implement this unless you have a more efficient way
  1395  // of listing recursively that doing a directory traversal.
  1396  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
  1397  	if f.bucket == "" {
  1398  		return fs.ErrorListBucketRequired
  1399  	}
  1400  	list := walk.NewListRHelper(callback)
  1401  	err = f.list(ctx, dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
  1402  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
  1403  		if err != nil {
  1404  			return err
  1405  		}
  1406  		return list.Add(entry)
  1407  	})
  1408  	if err != nil {
  1409  		return err
  1410  	}
  1411  	// bucket must be present if listing succeeded
  1412  	f.markBucketOK()
  1413  	return list.Flush()
  1414  }
  1415  
  1416  // Put the Object into the bucket
  1417  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1418  	// Temporary Object under construction
  1419  	fs := &Object{
  1420  		fs:     f,
  1421  		remote: src.Remote(),
  1422  	}
  1423  	return fs, fs.Update(ctx, in, src, options...)
  1424  }
  1425  
  1426  // PutStream uploads to the remote path with the modTime given of indeterminate size
  1427  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1428  	return f.Put(ctx, in, src, options...)
  1429  }
  1430  
  1431  // Check if the bucket exists
  1432  //
  1433  // NB this can return incorrect results if called immediately after bucket deletion
  1434  func (f *Fs) dirExists(ctx context.Context) (bool, error) {
  1435  	req := s3.HeadBucketInput{
  1436  		Bucket: &f.bucket,
  1437  	}
  1438  	err := f.pacer.Call(func() (bool, error) {
  1439  		_, err := f.c.HeadBucketWithContext(ctx, &req)
  1440  		return f.shouldRetry(err)
  1441  	})
  1442  	if err == nil {
  1443  		return true, nil
  1444  	}
  1445  	if err, ok := err.(awserr.RequestFailure); ok {
  1446  		if err.StatusCode() == http.StatusNotFound {
  1447  			return false, nil
  1448  		}
  1449  	}
  1450  	return false, err
  1451  }
  1452  
  1453  // Mkdir creates the bucket if it doesn't exist
  1454  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1455  	f.bucketOKMu.Lock()
  1456  	defer f.bucketOKMu.Unlock()
  1457  	if f.bucketOK {
  1458  		return nil
  1459  	}
  1460  	if !f.bucketDeleted {
  1461  		exists, err := f.dirExists(ctx)
  1462  		if err == nil {
  1463  			f.bucketOK = exists
  1464  		}
  1465  		if err != nil || exists {
  1466  			return err
  1467  		}
  1468  	}
  1469  	req := s3.CreateBucketInput{
  1470  		Bucket: &f.bucket,
  1471  		ACL:    &f.opt.BucketACL,
  1472  	}
  1473  	if f.opt.LocationConstraint != "" {
  1474  		req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
  1475  			LocationConstraint: &f.opt.LocationConstraint,
  1476  		}
  1477  	}
  1478  	err := f.pacer.Call(func() (bool, error) {
  1479  		_, err := f.c.CreateBucketWithContext(ctx, &req)
  1480  		return f.shouldRetry(err)
  1481  	})
  1482  	if err, ok := err.(awserr.Error); ok {
  1483  		if err.Code() == "BucketAlreadyOwnedByYou" {
  1484  			err = nil
  1485  		}
  1486  	}
  1487  	if err == nil {
  1488  		f.bucketOK = true
  1489  		f.bucketDeleted = false
  1490  		fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
  1491  	}
  1492  	return err
  1493  }
  1494  
  1495  // Rmdir deletes the bucket if the fs is at the root
  1496  //
  1497  // Returns an error if it isn't empty
  1498  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1499  	f.bucketOKMu.Lock()
  1500  	defer f.bucketOKMu.Unlock()
  1501  	if f.root != "" || dir != "" {
  1502  		return nil
  1503  	}
  1504  	req := s3.DeleteBucketInput{
  1505  		Bucket: &f.bucket,
  1506  	}
  1507  	err := f.pacer.Call(func() (bool, error) {
  1508  		_, err := f.c.DeleteBucketWithContext(ctx, &req)
  1509  		return f.shouldRetry(err)
  1510  	})
  1511  	if err == nil {
  1512  		f.bucketOK = false
  1513  		f.bucketDeleted = true
  1514  		fs.Infof(f, "Bucket deleted")
  1515  	}
  1516  	return err
  1517  }
  1518  
  1519  // Precision of the remote
  1520  func (f *Fs) Precision() time.Duration {
  1521  	return time.Nanosecond
  1522  }
  1523  
  1524  // pathEscape escapes s as for a URL path.  It uses rest.URLPathEscape
  1525  // but also escapes '+' for S3 and Digital Ocean spaces compatibility
  1526  func pathEscape(s string) string {
  1527  	return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
  1528  }
  1529  
  1530  // Copy src to this remote using server side copy operations.
  1531  //
  1532  // This is stored with the remote path given
  1533  //
  1534  // It returns the destination Object and a possible error
  1535  //
  1536  // Will only be called if src.Fs().Name() == f.Name()
  1537  //
  1538  // If it isn't possible then return fs.ErrorCantCopy
  1539  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1540  	err := f.Mkdir(ctx, "")
  1541  	if err != nil {
  1542  		return nil, err
  1543  	}
  1544  	srcObj, ok := src.(*Object)
  1545  	if !ok {
  1546  		fs.Debugf(src, "Can't copy - not same remote type")
  1547  		return nil, fs.ErrorCantCopy
  1548  	}
  1549  	srcFs := srcObj.fs
  1550  	key := f.root + remote
  1551  	source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
  1552  	req := s3.CopyObjectInput{
  1553  		Bucket:            &f.bucket,
  1554  		ACL:               &f.opt.ACL,
  1555  		Key:               &key,
  1556  		CopySource:        &source,
  1557  		MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
  1558  	}
  1559  	if f.opt.ServerSideEncryption != "" {
  1560  		req.ServerSideEncryption = &f.opt.ServerSideEncryption
  1561  	}
  1562  	if f.opt.SSEKMSKeyID != "" {
  1563  		req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
  1564  	}
  1565  	if f.opt.StorageClass != "" {
  1566  		req.StorageClass = &f.opt.StorageClass
  1567  	}
  1568  	err = f.pacer.Call(func() (bool, error) {
  1569  		_, err = f.c.CopyObjectWithContext(ctx, &req)
  1570  		return f.shouldRetry(err)
  1571  	})
  1572  	if err != nil {
  1573  		return nil, err
  1574  	}
  1575  	return f.NewObject(ctx, remote)
  1576  }
  1577  
  1578  // Hashes returns the supported hash sets.
  1579  func (f *Fs) Hashes() hash.Set {
  1580  	return hash.Set(hash.MD5)
  1581  }
  1582  
  1583  // ------------------------------------------------------------
  1584  
  1585  // Fs returns the parent Fs
  1586  func (o *Object) Fs() fs.Info {
  1587  	return o.fs
  1588  }
  1589  
  1590  // Return a string version
  1591  func (o *Object) String() string {
  1592  	if o == nil {
  1593  		return "<nil>"
  1594  	}
  1595  	return o.remote
  1596  }
  1597  
  1598  // Remote returns the remote path
  1599  func (o *Object) Remote() string {
  1600  	return o.remote
  1601  }
  1602  
  1603  var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
  1604  
  1605  // Hash returns the Md5sum of an object returning a lowercase hex string
  1606  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1607  	if t != hash.MD5 {
  1608  		return "", hash.ErrUnsupported
  1609  	}
  1610  	hash := strings.Trim(strings.ToLower(o.etag), `"`)
  1611  	// Check the etag is a valid md5sum
  1612  	if !matchMd5.MatchString(hash) {
  1613  		err := o.readMetaData(ctx)
  1614  		if err != nil {
  1615  			return "", err
  1616  		}
  1617  
  1618  		if md5sum, ok := o.meta[metaMD5Hash]; ok {
  1619  			md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
  1620  			if err != nil {
  1621  				return "", err
  1622  			}
  1623  			hash = hex.EncodeToString(md5sumBytes)
  1624  		} else {
  1625  			hash = ""
  1626  		}
  1627  	}
  1628  	return hash, nil
  1629  }
  1630  
  1631  // Size returns the size of an object in bytes
  1632  func (o *Object) Size() int64 {
  1633  	return o.bytes
  1634  }
  1635  
  1636  // readMetaData gets the metadata if it hasn't already been fetched
  1637  //
  1638  // it also sets the info
  1639  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1640  	if o.meta != nil {
  1641  		return nil
  1642  	}
  1643  	key := o.fs.root + o.remote
  1644  	req := s3.HeadObjectInput{
  1645  		Bucket: &o.fs.bucket,
  1646  		Key:    &key,
  1647  	}
  1648  	var resp *s3.HeadObjectOutput
  1649  	err = o.fs.pacer.Call(func() (bool, error) {
  1650  		var err error
  1651  		resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
  1652  		return o.fs.shouldRetry(err)
  1653  	})
  1654  	if err != nil {
  1655  		if awsErr, ok := err.(awserr.RequestFailure); ok {
  1656  			if awsErr.StatusCode() == http.StatusNotFound {
  1657  				return fs.ErrorObjectNotFound
  1658  			}
  1659  		}
  1660  		return err
  1661  	}
  1662  	var size int64
  1663  	// Ignore missing Content-Length assuming it is 0
  1664  	// Some versions of ceph do this due their apache proxies
  1665  	if resp.ContentLength != nil {
  1666  		size = *resp.ContentLength
  1667  	}
  1668  	o.etag = aws.StringValue(resp.ETag)
  1669  	o.bytes = size
  1670  	o.meta = resp.Metadata
  1671  	if resp.LastModified == nil {
  1672  		fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
  1673  		o.lastModified = time.Now()
  1674  	} else {
  1675  		o.lastModified = *resp.LastModified
  1676  	}
  1677  	o.mimeType = aws.StringValue(resp.ContentType)
  1678  	return nil
  1679  }
  1680  
  1681  // ModTime returns the modification time of the object
  1682  //
  1683  // It attempts to read the objects mtime and if that isn't present the
  1684  // LastModified returned in the http headers
  1685  func (o *Object) ModTime(ctx context.Context) time.Time {
  1686  	if fs.Config.UseServerModTime {
  1687  		return o.lastModified
  1688  	}
  1689  	err := o.readMetaData(ctx)
  1690  	if err != nil {
  1691  		fs.Logf(o, "Failed to read metadata: %v", err)
  1692  		return time.Now()
  1693  	}
  1694  	// read mtime out of metadata if available
  1695  	d, ok := o.meta[metaMtime]
  1696  	if !ok || d == nil {
  1697  		// fs.Debugf(o, "No metadata")
  1698  		return o.lastModified
  1699  	}
  1700  	modTime, err := swift.FloatStringToTime(*d)
  1701  	if err != nil {
  1702  		fs.Logf(o, "Failed to read mtime from object: %v", err)
  1703  		return o.lastModified
  1704  	}
  1705  	return modTime
  1706  }
  1707  
  1708  // SetModTime sets the modification time of the local fs object
  1709  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1710  	err := o.readMetaData(ctx)
  1711  	if err != nil {
  1712  		return err
  1713  	}
  1714  	o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
  1715  
  1716  	if o.bytes >= maxSizeForCopy {
  1717  		fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
  1718  		return nil
  1719  	}
  1720  
  1721  	// Guess the content type
  1722  	mimeType := fs.MimeType(ctx, o)
  1723  
  1724  	// Copy the object to itself to update the metadata
  1725  	key := o.fs.root + o.remote
  1726  	sourceKey := o.fs.bucket + "/" + key
  1727  	directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
  1728  	req := s3.CopyObjectInput{
  1729  		Bucket:            &o.fs.bucket,
  1730  		ACL:               &o.fs.opt.ACL,
  1731  		Key:               &key,
  1732  		ContentType:       &mimeType,
  1733  		CopySource:        aws.String(pathEscape(sourceKey)),
  1734  		Metadata:          o.meta,
  1735  		MetadataDirective: &directive,
  1736  	}
  1737  	if o.fs.opt.ServerSideEncryption != "" {
  1738  		req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
  1739  	}
  1740  	if o.fs.opt.SSEKMSKeyID != "" {
  1741  		req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
  1742  	}
  1743  	if o.fs.opt.StorageClass == "GLACIER" || o.fs.opt.StorageClass == "DEEP_ARCHIVE" {
  1744  		return fs.ErrorCantSetModTime
  1745  	}
  1746  	if o.fs.opt.StorageClass != "" {
  1747  		req.StorageClass = &o.fs.opt.StorageClass
  1748  	}
  1749  	err = o.fs.pacer.Call(func() (bool, error) {
  1750  		_, err := o.fs.c.CopyObjectWithContext(ctx, &req)
  1751  		return o.fs.shouldRetry(err)
  1752  	})
  1753  	return err
  1754  }
  1755  
  1756  // Storable raturns a boolean indicating if this object is storable
  1757  func (o *Object) Storable() bool {
  1758  	return true
  1759  }
  1760  
  1761  // Open an object for read
  1762  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1763  	key := o.fs.root + o.remote
  1764  	req := s3.GetObjectInput{
  1765  		Bucket: &o.fs.bucket,
  1766  		Key:    &key,
  1767  	}
  1768  	for _, option := range options {
  1769  		switch option.(type) {
  1770  		case *fs.RangeOption, *fs.SeekOption:
  1771  			_, value := option.Header()
  1772  			req.Range = &value
  1773  		default:
  1774  			if option.Mandatory() {
  1775  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  1776  			}
  1777  		}
  1778  	}
  1779  	var resp *s3.GetObjectOutput
  1780  	err = o.fs.pacer.Call(func() (bool, error) {
  1781  		var err error
  1782  		resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
  1783  		return o.fs.shouldRetry(err)
  1784  	})
  1785  	if err, ok := err.(awserr.RequestFailure); ok {
  1786  		if err.Code() == "InvalidObjectState" {
  1787  			return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
  1788  		}
  1789  	}
  1790  	if err != nil {
  1791  		return nil, err
  1792  	}
  1793  	return resp.Body, nil
  1794  }
  1795  
  1796  // Update the Object from in with modTime and size
  1797  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1798  	err := o.fs.Mkdir(ctx, "")
  1799  	if err != nil {
  1800  		return err
  1801  	}
  1802  	modTime := src.ModTime(ctx)
  1803  	size := src.Size()
  1804  
  1805  	multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
  1806  	var uploader *s3manager.Uploader
  1807  	if multipart {
  1808  		uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
  1809  			u.Concurrency = o.fs.opt.UploadConcurrency
  1810  			u.LeavePartsOnError = false
  1811  			u.S3 = o.fs.c
  1812  			u.PartSize = int64(o.fs.opt.ChunkSize)
  1813  
  1814  			if size == -1 {
  1815  				// Make parts as small as possible while still being able to upload to the
  1816  				// S3 file size limit. Rounded up to nearest MB.
  1817  				u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
  1818  				return
  1819  			}
  1820  			// Adjust PartSize until the number of parts is small enough.
  1821  			if size/u.PartSize >= s3manager.MaxUploadParts {
  1822  				// Calculate partition size rounded up to the nearest MB
  1823  				u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
  1824  			}
  1825  		})
  1826  	}
  1827  
  1828  	// Set the mtime in the meta data
  1829  	metadata := map[string]*string{
  1830  		metaMtime: aws.String(swift.TimeToFloatString(modTime)),
  1831  	}
  1832  
  1833  	// read the md5sum if available for non multpart and if
  1834  	// disable checksum isn't present.
  1835  	var md5sum string
  1836  	if !multipart || !o.fs.opt.DisableChecksum {
  1837  		hash, err := src.Hash(ctx, hash.MD5)
  1838  		if err == nil && matchMd5.MatchString(hash) {
  1839  			hashBytes, err := hex.DecodeString(hash)
  1840  			if err == nil {
  1841  				md5sum = base64.StdEncoding.EncodeToString(hashBytes)
  1842  				if multipart {
  1843  					metadata[metaMD5Hash] = &md5sum
  1844  				}
  1845  			}
  1846  		}
  1847  	}
  1848  
  1849  	// Guess the content type
  1850  	mimeType := fs.MimeType(ctx, src)
  1851  
  1852  	key := o.fs.root + o.remote
  1853  	if multipart {
  1854  		req := s3manager.UploadInput{
  1855  			Bucket:      &o.fs.bucket,
  1856  			ACL:         &o.fs.opt.ACL,
  1857  			Key:         &key,
  1858  			Body:        in,
  1859  			ContentType: &mimeType,
  1860  			Metadata:    metadata,
  1861  			//ContentLength: &size,
  1862  		}
  1863  		if o.fs.opt.ServerSideEncryption != "" {
  1864  			req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
  1865  		}
  1866  		if o.fs.opt.SSEKMSKeyID != "" {
  1867  			req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
  1868  		}
  1869  		if o.fs.opt.StorageClass != "" {
  1870  			req.StorageClass = &o.fs.opt.StorageClass
  1871  		}
  1872  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1873  			_, err = uploader.UploadWithContext(ctx, &req)
  1874  			return o.fs.shouldRetry(err)
  1875  		})
  1876  		if err != nil {
  1877  			return err
  1878  		}
  1879  	} else {
  1880  		req := s3.PutObjectInput{
  1881  			Bucket:      &o.fs.bucket,
  1882  			ACL:         &o.fs.opt.ACL,
  1883  			Key:         &key,
  1884  			ContentType: &mimeType,
  1885  			Metadata:    metadata,
  1886  		}
  1887  		if md5sum != "" {
  1888  			req.ContentMD5 = &md5sum
  1889  		}
  1890  		if o.fs.opt.ServerSideEncryption != "" {
  1891  			req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
  1892  		}
  1893  		if o.fs.opt.SSEKMSKeyID != "" {
  1894  			req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
  1895  		}
  1896  		if o.fs.opt.StorageClass != "" {
  1897  			req.StorageClass = &o.fs.opt.StorageClass
  1898  		}
  1899  
  1900  		// Create the request
  1901  		putObj, _ := o.fs.c.PutObjectRequest(&req)
  1902  
  1903  		// Sign it so we can upload using a presigned request.
  1904  		//
  1905  		// Note the SDK doesn't currently support streaming to
  1906  		// PutObject so we'll use this work-around.
  1907  		url, headers, err := putObj.PresignRequest(15 * time.Minute)
  1908  		if err != nil {
  1909  			return errors.Wrap(err, "s3 upload: sign request")
  1910  		}
  1911  
  1912  		// Set request to nil if empty so as not to make chunked encoding
  1913  		if size == 0 {
  1914  			in = nil
  1915  		}
  1916  
  1917  		// create the vanilla http request
  1918  		httpReq, err := http.NewRequest("PUT", url, in)
  1919  		if err != nil {
  1920  			return errors.Wrap(err, "s3 upload: new request")
  1921  		}
  1922  		httpReq = httpReq.WithContext(ctx)
  1923  
  1924  		// set the headers we signed and the length
  1925  		httpReq.Header = headers
  1926  		httpReq.ContentLength = size
  1927  
  1928  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1929  			resp, err := o.fs.srv.Do(httpReq)
  1930  			if err != nil {
  1931  				return o.fs.shouldRetry(err)
  1932  			}
  1933  			body, err := rest.ReadBody(resp)
  1934  			if err != nil {
  1935  				return o.fs.shouldRetry(err)
  1936  			}
  1937  			if resp.StatusCode >= 200 && resp.StatusCode < 299 {
  1938  				return false, nil
  1939  			}
  1940  			err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
  1941  			return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
  1942  		})
  1943  		if err != nil {
  1944  			return err
  1945  		}
  1946  	}
  1947  
  1948  	// Read the metadata from the newly created object
  1949  	o.meta = nil // wipe old metadata
  1950  	err = o.readMetaData(ctx)
  1951  	return err
  1952  }
  1953  
  1954  // Remove an object
  1955  func (o *Object) Remove(ctx context.Context) error {
  1956  	key := o.fs.root + o.remote
  1957  	req := s3.DeleteObjectInput{
  1958  		Bucket: &o.fs.bucket,
  1959  		Key:    &key,
  1960  	}
  1961  	err := o.fs.pacer.Call(func() (bool, error) {
  1962  		_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
  1963  		return o.fs.shouldRetry(err)
  1964  	})
  1965  	return err
  1966  }
  1967  
  1968  // MimeType of an Object if known, "" otherwise
  1969  func (o *Object) MimeType(ctx context.Context) string {
  1970  	err := o.readMetaData(ctx)
  1971  	if err != nil {
  1972  		fs.Logf(o, "Failed to read metadata: %v", err)
  1973  		return ""
  1974  	}
  1975  	return o.mimeType
  1976  }
  1977  
  1978  // Check the interfaces are satisfied
  1979  var (
  1980  	_ fs.Fs          = &Fs{}
  1981  	_ fs.Copier      = &Fs{}
  1982  	_ fs.PutStreamer = &Fs{}
  1983  	_ fs.ListRer     = &Fs{}
  1984  	_ fs.Object      = &Object{}
  1985  	_ fs.MimeTyper   = &Object{}
  1986  )