github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/swift/swift.go (about)

     1  // Package swift provides an interface to the Swift object storage system
     2  package swift
     3  
     4  import (
     5  	"bufio"
     6  	"bytes"
     7  	"context"
     8  	"fmt"
     9  	"io"
    10  	"path"
    11  	"regexp"
    12  	"strconv"
    13  	"strings"
    14  	"sync"
    15  	"time"
    16  
    17  	"github.com/ncw/rclone/fs"
    18  	"github.com/ncw/rclone/fs/config/configmap"
    19  	"github.com/ncw/rclone/fs/config/configstruct"
    20  	"github.com/ncw/rclone/fs/fserrors"
    21  	"github.com/ncw/rclone/fs/fshttp"
    22  	"github.com/ncw/rclone/fs/hash"
    23  	"github.com/ncw/rclone/fs/operations"
    24  	"github.com/ncw/rclone/fs/walk"
    25  	"github.com/ncw/rclone/lib/pacer"
    26  	"github.com/ncw/swift"
    27  	"github.com/pkg/errors"
    28  )
    29  
    30  // Constants
    31  const (
    32  	directoryMarkerContentType = "application/directory" // content type of directory marker objects
    33  	listChunks                 = 1000                    // chunk size to read directory listings
    34  	defaultChunkSize           = 5 * fs.GibiByte
    35  	minSleep                   = 10 * time.Millisecond // In case of error, start at 10ms sleep.
    36  )
    37  
    38  // SharedOptions are shared between swift and hubic
    39  var SharedOptions = []fs.Option{{
    40  	Name: "chunk_size",
    41  	Help: `Above this size files will be chunked into a _segments container.
    42  
    43  Above this size files will be chunked into a _segments container.  The
    44  default for this is 5GB which is its maximum value.`,
    45  	Default:  defaultChunkSize,
    46  	Advanced: true,
    47  }, {
    48  	Name: "no_chunk",
    49  	Help: `Don't chunk files during streaming upload.
    50  
    51  When doing streaming uploads (eg using rcat or mount) setting this
    52  flag will cause the swift backend to not upload chunked files.
    53  
    54  This will limit the maximum upload size to 5GB. However non chunked
    55  files are easier to deal with and have an MD5SUM.
    56  
    57  Rclone will still chunk files bigger than chunk_size when doing normal
    58  copy operations.`,
    59  	Default:  false,
    60  	Advanced: true,
    61  }}
    62  
    63  // Register with Fs
    64  func init() {
    65  	fs.Register(&fs.RegInfo{
    66  		Name:        "swift",
    67  		Description: "Openstack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
    68  		NewFs:       NewFs,
    69  		Options: append([]fs.Option{{
    70  			Name:    "env_auth",
    71  			Help:    "Get swift credentials from environment variables in standard OpenStack form.",
    72  			Default: false,
    73  			Examples: []fs.OptionExample{
    74  				{
    75  					Value: "false",
    76  					Help:  "Enter swift credentials in the next step",
    77  				}, {
    78  					Value: "true",
    79  					Help:  "Get swift credentials from environment vars. Leave other fields blank if using this.",
    80  				},
    81  			},
    82  		}, {
    83  			Name: "user",
    84  			Help: "User name to log in (OS_USERNAME).",
    85  		}, {
    86  			Name: "key",
    87  			Help: "API key or password (OS_PASSWORD).",
    88  		}, {
    89  			Name: "auth",
    90  			Help: "Authentication URL for server (OS_AUTH_URL).",
    91  			Examples: []fs.OptionExample{{
    92  				Help:  "Rackspace US",
    93  				Value: "https://auth.api.rackspacecloud.com/v1.0",
    94  			}, {
    95  				Help:  "Rackspace UK",
    96  				Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
    97  			}, {
    98  				Help:  "Rackspace v2",
    99  				Value: "https://identity.api.rackspacecloud.com/v2.0",
   100  			}, {
   101  				Help:  "Memset Memstore UK",
   102  				Value: "https://auth.storage.memset.com/v1.0",
   103  			}, {
   104  				Help:  "Memset Memstore UK v2",
   105  				Value: "https://auth.storage.memset.com/v2.0",
   106  			}, {
   107  				Help:  "OVH",
   108  				Value: "https://auth.cloud.ovh.net/v2.0",
   109  			}},
   110  		}, {
   111  			Name: "user_id",
   112  			Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
   113  		}, {
   114  			Name: "domain",
   115  			Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
   116  		}, {
   117  			Name: "tenant",
   118  			Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME)",
   119  		}, {
   120  			Name: "tenant_id",
   121  			Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID)",
   122  		}, {
   123  			Name: "tenant_domain",
   124  			Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME)",
   125  		}, {
   126  			Name: "region",
   127  			Help: "Region name - optional (OS_REGION_NAME)",
   128  		}, {
   129  			Name: "storage_url",
   130  			Help: "Storage URL - optional (OS_STORAGE_URL)",
   131  		}, {
   132  			Name: "auth_token",
   133  			Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN)",
   134  		}, {
   135  			Name: "application_credential_id",
   136  			Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID)",
   137  		}, {
   138  			Name: "application_credential_name",
   139  			Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME)",
   140  		}, {
   141  			Name: "application_credential_secret",
   142  			Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET)",
   143  		}, {
   144  			Name:    "auth_version",
   145  			Help:    "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION)",
   146  			Default: 0,
   147  		}, {
   148  			Name:    "endpoint_type",
   149  			Help:    "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE)",
   150  			Default: "public",
   151  			Examples: []fs.OptionExample{{
   152  				Help:  "Public (default, choose this if not sure)",
   153  				Value: "public",
   154  			}, {
   155  				Help:  "Internal (use internal service net)",
   156  				Value: "internal",
   157  			}, {
   158  				Help:  "Admin",
   159  				Value: "admin",
   160  			}},
   161  		}, {
   162  			Name: "storage_policy",
   163  			Help: `The storage policy to use when creating a new container
   164  
   165  This applies the specified storage policy when creating a new
   166  container. The policy cannot be changed afterwards. The allowed
   167  configuration values and their meaning depend on your Swift storage
   168  provider.`,
   169  			Default: "",
   170  			Examples: []fs.OptionExample{{
   171  				Help:  "Default",
   172  				Value: "",
   173  			}, {
   174  				Help:  "OVH Public Cloud Storage",
   175  				Value: "pcs",
   176  			}, {
   177  				Help:  "OVH Public Cloud Archive",
   178  				Value: "pca",
   179  			}},
   180  		}}, SharedOptions...),
   181  	})
   182  }
   183  
   184  // Options defines the configuration for this backend
   185  type Options struct {
   186  	EnvAuth                     bool          `config:"env_auth"`
   187  	User                        string        `config:"user"`
   188  	Key                         string        `config:"key"`
   189  	Auth                        string        `config:"auth"`
   190  	UserID                      string        `config:"user_id"`
   191  	Domain                      string        `config:"domain"`
   192  	Tenant                      string        `config:"tenant"`
   193  	TenantID                    string        `config:"tenant_id"`
   194  	TenantDomain                string        `config:"tenant_domain"`
   195  	Region                      string        `config:"region"`
   196  	StorageURL                  string        `config:"storage_url"`
   197  	AuthToken                   string        `config:"auth_token"`
   198  	AuthVersion                 int           `config:"auth_version"`
   199  	ApplicationCredentialID     string        `config:"application_credential_id"`
   200  	ApplicationCredentialName   string        `config:"application_credential_name"`
   201  	ApplicationCredentialSecret string        `config:"application_credential_secret"`
   202  	StoragePolicy               string        `config:"storage_policy"`
   203  	EndpointType                string        `config:"endpoint_type"`
   204  	ChunkSize                   fs.SizeSuffix `config:"chunk_size"`
   205  	NoChunk                     bool          `config:"no_chunk"`
   206  }
   207  
   208  // Fs represents a remote swift server
   209  type Fs struct {
   210  	name              string            // name of this remote
   211  	root              string            // the path we are working on if any
   212  	features          *fs.Features      // optional features
   213  	opt               Options           // options for this backend
   214  	c                 *swift.Connection // the connection to the swift server
   215  	container         string            // the container we are working on
   216  	containerOKMu     sync.Mutex        // mutex to protect container OK
   217  	containerOK       bool              // true if we have created the container
   218  	segmentsContainer string            // container to store the segments (if any) in
   219  	noCheckContainer  bool              // don't check the container before creating it
   220  	pacer             *fs.Pacer         // To pace the API calls
   221  }
   222  
   223  // Object describes a swift object
   224  //
   225  // Will definitely have info but maybe not meta
   226  type Object struct {
   227  	fs           *Fs    // what this object is part of
   228  	remote       string // The remote path
   229  	size         int64
   230  	lastModified time.Time
   231  	contentType  string
   232  	md5          string
   233  	headers      swift.Headers // The object headers if known
   234  }
   235  
   236  // ------------------------------------------------------------
   237  
   238  // Name of the remote (as passed into NewFs)
   239  func (f *Fs) Name() string {
   240  	return f.name
   241  }
   242  
   243  // Root of the remote (as passed into NewFs)
   244  func (f *Fs) Root() string {
   245  	if f.root == "" {
   246  		return f.container
   247  	}
   248  	return f.container + "/" + f.root
   249  }
   250  
   251  // String converts this Fs to a string
   252  func (f *Fs) String() string {
   253  	if f.root == "" {
   254  		return fmt.Sprintf("Swift container %s", f.container)
   255  	}
   256  	return fmt.Sprintf("Swift container %s path %s", f.container, f.root)
   257  }
   258  
   259  // Features returns the optional features of this Fs
   260  func (f *Fs) Features() *fs.Features {
   261  	return f.features
   262  }
   263  
   264  // retryErrorCodes is a slice of error codes that we will retry
   265  var retryErrorCodes = []int{
   266  	401, // Unauthorized (eg "Token has expired")
   267  	408, // Request Timeout
   268  	409, // Conflict - various states that could be resolved on a retry
   269  	429, // Rate exceeded.
   270  	500, // Get occasional 500 Internal Server Error
   271  	503, // Service Unavailable/Slow Down - "Reduce your request rate"
   272  	504, // Gateway Time-out
   273  }
   274  
   275  // shouldRetry returns a boolean as to whether this err deserves to be
   276  // retried.  It returns the err as a convenience
   277  func shouldRetry(err error) (bool, error) {
   278  	// If this is an swift.Error object extract the HTTP error code
   279  	if swiftError, ok := err.(*swift.Error); ok {
   280  		for _, e := range retryErrorCodes {
   281  			if swiftError.StatusCode == e {
   282  				return true, err
   283  			}
   284  		}
   285  	}
   286  	// Check for generic failure conditions
   287  	return fserrors.ShouldRetry(err), err
   288  }
   289  
   290  // shouldRetryHeaders returns a boolean as to whether this err
   291  // deserves to be retried.  It reads the headers passed in looking for
   292  // `Retry-After`. It returns the err as a convenience
   293  func shouldRetryHeaders(headers swift.Headers, err error) (bool, error) {
   294  	if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
   295  		if value := headers["Retry-After"]; value != "" {
   296  			retryAfter, parseErr := strconv.Atoi(value)
   297  			if parseErr != nil {
   298  				fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
   299  			} else {
   300  				duration := time.Second * time.Duration(retryAfter)
   301  				if duration <= 60*time.Second {
   302  					// Do a short sleep immediately
   303  					fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
   304  					time.Sleep(duration)
   305  					return true, err
   306  				}
   307  				// Delay a long sleep for a retry
   308  				return false, fserrors.NewErrorRetryAfter(duration)
   309  			}
   310  		}
   311  	}
   312  	return shouldRetry(err)
   313  }
   314  
   315  // Pattern to match a swift path
   316  var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
   317  
   318  // parseParse parses a swift 'url'
   319  func parsePath(path string) (container, directory string, err error) {
   320  	parts := matcher.FindStringSubmatch(path)
   321  	if parts == nil {
   322  		err = errors.Errorf("couldn't find container in swift path %q", path)
   323  	} else {
   324  		container, directory = parts[1], parts[2]
   325  		directory = strings.Trim(directory, "/")
   326  	}
   327  	return
   328  }
   329  
   330  // swiftConnection makes a connection to swift
   331  func swiftConnection(opt *Options, name string) (*swift.Connection, error) {
   332  	c := &swift.Connection{
   333  		// Keep these in the same order as the Config for ease of checking
   334  		UserName:                    opt.User,
   335  		ApiKey:                      opt.Key,
   336  		AuthUrl:                     opt.Auth,
   337  		UserId:                      opt.UserID,
   338  		Domain:                      opt.Domain,
   339  		Tenant:                      opt.Tenant,
   340  		TenantId:                    opt.TenantID,
   341  		TenantDomain:                opt.TenantDomain,
   342  		Region:                      opt.Region,
   343  		StorageUrl:                  opt.StorageURL,
   344  		AuthToken:                   opt.AuthToken,
   345  		AuthVersion:                 opt.AuthVersion,
   346  		ApplicationCredentialId:     opt.ApplicationCredentialID,
   347  		ApplicationCredentialName:   opt.ApplicationCredentialName,
   348  		ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
   349  		EndpointType:                swift.EndpointType(opt.EndpointType),
   350  		ConnectTimeout:              10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
   351  		Timeout:                     10 * fs.Config.Timeout,        // Use the timeouts in the transport
   352  		Transport:                   fshttp.NewTransport(fs.Config),
   353  	}
   354  	if opt.EnvAuth {
   355  		err := c.ApplyEnvironment()
   356  		if err != nil {
   357  			return nil, errors.Wrap(err, "failed to read environment variables")
   358  		}
   359  	}
   360  	StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
   361  	if !c.Authenticated() {
   362  		if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
   363  			if c.UserName == "" && c.UserId == "" {
   364  				return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
   365  			}
   366  			if c.ApiKey == "" {
   367  				return nil, errors.New("key not found")
   368  			}
   369  		}
   370  		if c.AuthUrl == "" {
   371  			return nil, errors.New("auth not found")
   372  		}
   373  		err := c.Authenticate() // fills in c.StorageUrl and c.AuthToken
   374  		if err != nil {
   375  			return nil, err
   376  		}
   377  	}
   378  	// Make sure we re-auth with the AuthToken and StorageUrl
   379  	// provided by wrapping the existing auth, so we can just
   380  	// override one or the other or both.
   381  	if StorageUrl != "" || AuthToken != "" {
   382  		// Re-write StorageURL and AuthToken if they are being
   383  		// overridden as c.Authenticate above will have
   384  		// overwritten them.
   385  		if StorageUrl != "" {
   386  			c.StorageUrl = StorageUrl
   387  		}
   388  		if AuthToken != "" {
   389  			c.AuthToken = AuthToken
   390  		}
   391  		c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
   392  	}
   393  	return c, nil
   394  }
   395  
   396  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   397  	const minChunkSize = fs.Byte
   398  	if cs < minChunkSize {
   399  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   400  	}
   401  	return nil
   402  }
   403  
   404  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   405  	err = checkUploadChunkSize(cs)
   406  	if err == nil {
   407  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   408  	}
   409  	return
   410  }
   411  
   412  // NewFsWithConnection constructs an Fs from the path, container:path
   413  // and authenticated connection.
   414  //
   415  // if noCheckContainer is set then the Fs won't check the container
   416  // exists before creating it.
   417  func NewFsWithConnection(opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
   418  	container, directory, err := parsePath(root)
   419  	if err != nil {
   420  		return nil, err
   421  	}
   422  	f := &Fs{
   423  		name:              name,
   424  		opt:               *opt,
   425  		c:                 c,
   426  		container:         container,
   427  		segmentsContainer: container + "_segments",
   428  		root:              directory,
   429  		noCheckContainer:  noCheckContainer,
   430  		pacer:             fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
   431  	}
   432  	f.features = (&fs.Features{
   433  		ReadMimeType:  true,
   434  		WriteMimeType: true,
   435  		BucketBased:   true,
   436  	}).Fill(f)
   437  	if f.root != "" {
   438  		f.root += "/"
   439  		// Check to see if the object exists - ignoring directory markers
   440  		var info swift.Object
   441  		err = f.pacer.Call(func() (bool, error) {
   442  			var rxHeaders swift.Headers
   443  			info, rxHeaders, err = f.c.Object(container, directory)
   444  			return shouldRetryHeaders(rxHeaders, err)
   445  		})
   446  		if err == nil && info.ContentType != directoryMarkerContentType {
   447  			f.root = path.Dir(directory)
   448  			if f.root == "." {
   449  				f.root = ""
   450  			} else {
   451  				f.root += "/"
   452  			}
   453  			// return an error with an fs which points to the parent
   454  			return f, fs.ErrorIsFile
   455  		}
   456  	}
   457  	return f, nil
   458  }
   459  
   460  // NewFs constructs an Fs from the path, container:path
   461  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   462  	// Parse config into Options struct
   463  	opt := new(Options)
   464  	err := configstruct.Set(m, opt)
   465  	if err != nil {
   466  		return nil, err
   467  	}
   468  	err = checkUploadChunkSize(opt.ChunkSize)
   469  	if err != nil {
   470  		return nil, errors.Wrap(err, "swift: chunk size")
   471  	}
   472  
   473  	c, err := swiftConnection(opt, name)
   474  	if err != nil {
   475  		return nil, err
   476  	}
   477  	return NewFsWithConnection(opt, name, root, c, false)
   478  }
   479  
   480  // Return an Object from a path
   481  //
   482  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   483  func (f *Fs) newObjectWithInfo(remote string, info *swift.Object) (fs.Object, error) {
   484  	o := &Object{
   485  		fs:     f,
   486  		remote: remote,
   487  	}
   488  	// Note that due to a quirk of swift, dynamic large objects are
   489  	// returned as 0 bytes in the listing.  Correct this here by
   490  	// making sure we read the full metadata for all 0 byte files.
   491  	// We don't read the metadata for directory marker objects.
   492  	if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
   493  		info = nil
   494  	}
   495  	if info != nil {
   496  		// Set info but not headers
   497  		err := o.decodeMetaData(info)
   498  		if err != nil {
   499  			return nil, err
   500  		}
   501  	} else {
   502  		err := o.readMetaData() // reads info and headers, returning an error
   503  		if err != nil {
   504  			return nil, err
   505  		}
   506  	}
   507  	return o, nil
   508  }
   509  
   510  // NewObject finds the Object at remote.  If it can't be found it
   511  // returns the error fs.ErrorObjectNotFound.
   512  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   513  	return f.newObjectWithInfo(remote, nil)
   514  }
   515  
   516  // listFn is called from list and listContainerRoot to handle an object.
   517  type listFn func(remote string, object *swift.Object, isDirectory bool) error
   518  
   519  // listContainerRoot lists the objects into the function supplied from
   520  // the container and root supplied
   521  //
   522  // Set recurse to read sub directories
   523  func (f *Fs) listContainerRoot(container, root string, dir string, recurse bool, fn listFn) error {
   524  	prefix := root
   525  	if dir != "" {
   526  		prefix += dir + "/"
   527  	}
   528  	// Options for ObjectsWalk
   529  	opts := swift.ObjectsOpts{
   530  		Prefix: prefix,
   531  		Limit:  listChunks,
   532  	}
   533  	if !recurse {
   534  		opts.Delimiter = '/'
   535  	}
   536  	rootLength := len(root)
   537  	return f.c.ObjectsWalk(container, &opts, func(opts *swift.ObjectsOpts) (interface{}, error) {
   538  		var objects []swift.Object
   539  		var err error
   540  		err = f.pacer.Call(func() (bool, error) {
   541  			objects, err = f.c.Objects(container, opts)
   542  			return shouldRetry(err)
   543  		})
   544  		if err == nil {
   545  			for i := range objects {
   546  				object := &objects[i]
   547  				isDirectory := false
   548  				if !recurse {
   549  					isDirectory = strings.HasSuffix(object.Name, "/")
   550  				}
   551  				if !strings.HasPrefix(object.Name, prefix) {
   552  					fs.Logf(f, "Odd name received %q", object.Name)
   553  					continue
   554  				}
   555  				if object.Name == prefix {
   556  					// If we have zero length directory markers ending in / then swift
   557  					// will return them in the listing for the directory which causes
   558  					// duplicate directories.  Ignore them here.
   559  					continue
   560  				}
   561  				remote := object.Name[rootLength:]
   562  				err = fn(remote, object, isDirectory)
   563  				if err != nil {
   564  					break
   565  				}
   566  			}
   567  		}
   568  		return objects, err
   569  	})
   570  }
   571  
   572  type addEntryFn func(fs.DirEntry) error
   573  
   574  // list the objects into the function supplied
   575  func (f *Fs) list(dir string, recurse bool, fn addEntryFn) error {
   576  	err := f.listContainerRoot(f.container, f.root, dir, recurse, func(remote string, object *swift.Object, isDirectory bool) (err error) {
   577  		if isDirectory {
   578  			remote = strings.TrimRight(remote, "/")
   579  			d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
   580  			err = fn(d)
   581  		} else {
   582  			// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
   583  			var o fs.Object
   584  			o, err = f.newObjectWithInfo(remote, object)
   585  			if err != nil {
   586  				return err
   587  			}
   588  			if o.Storable() {
   589  				err = fn(o)
   590  			}
   591  		}
   592  		return err
   593  	})
   594  	if err == swift.ContainerNotFound {
   595  		err = fs.ErrorDirNotFound
   596  	}
   597  	return err
   598  }
   599  
   600  // mark the container as being OK
   601  func (f *Fs) markContainerOK() {
   602  	if f.container != "" {
   603  		f.containerOKMu.Lock()
   604  		f.containerOK = true
   605  		f.containerOKMu.Unlock()
   606  	}
   607  }
   608  
   609  // listDir lists a single directory
   610  func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
   611  	if f.container == "" {
   612  		return nil, fs.ErrorListBucketRequired
   613  	}
   614  	// List the objects
   615  	err = f.list(dir, false, func(entry fs.DirEntry) error {
   616  		entries = append(entries, entry)
   617  		return nil
   618  	})
   619  	if err != nil {
   620  		return nil, err
   621  	}
   622  	// container must be present if listing succeeded
   623  	f.markContainerOK()
   624  	return entries, nil
   625  }
   626  
   627  // listContainers lists the containers
   628  func (f *Fs) listContainers(dir string) (entries fs.DirEntries, err error) {
   629  	if dir != "" {
   630  		return nil, fs.ErrorListBucketRequired
   631  	}
   632  	var containers []swift.Container
   633  	err = f.pacer.Call(func() (bool, error) {
   634  		containers, err = f.c.ContainersAll(nil)
   635  		return shouldRetry(err)
   636  	})
   637  	if err != nil {
   638  		return nil, errors.Wrap(err, "container listing failed")
   639  	}
   640  	for _, container := range containers {
   641  		d := fs.NewDir(container.Name, time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
   642  		entries = append(entries, d)
   643  	}
   644  	return entries, nil
   645  }
   646  
   647  // List the objects and directories in dir into entries.  The
   648  // entries can be returned in any order but should be for a
   649  // complete directory.
   650  //
   651  // dir should be "" to list the root, and should not have
   652  // trailing slashes.
   653  //
   654  // This should return ErrDirNotFound if the directory isn't
   655  // found.
   656  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   657  	if f.container == "" {
   658  		return f.listContainers(dir)
   659  	}
   660  	return f.listDir(dir)
   661  }
   662  
   663  // ListR lists the objects and directories of the Fs starting
   664  // from dir recursively into out.
   665  //
   666  // dir should be "" to start from the root, and should not
   667  // have trailing slashes.
   668  //
   669  // This should return ErrDirNotFound if the directory isn't
   670  // found.
   671  //
   672  // It should call callback for each tranche of entries read.
   673  // These need not be returned in any particular order.  If
   674  // callback returns an error then the listing will stop
   675  // immediately.
   676  //
   677  // Don't implement this unless you have a more efficient way
   678  // of listing recursively that doing a directory traversal.
   679  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   680  	if f.container == "" {
   681  		return errors.New("container needed for recursive list")
   682  	}
   683  	list := walk.NewListRHelper(callback)
   684  	err = f.list(dir, true, func(entry fs.DirEntry) error {
   685  		return list.Add(entry)
   686  	})
   687  	if err != nil {
   688  		return err
   689  	}
   690  	// container must be present if listing succeeded
   691  	f.markContainerOK()
   692  	return list.Flush()
   693  }
   694  
   695  // About gets quota information
   696  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
   697  	var containers []swift.Container
   698  	var err error
   699  	err = f.pacer.Call(func() (bool, error) {
   700  		containers, err = f.c.ContainersAll(nil)
   701  		return shouldRetry(err)
   702  	})
   703  	if err != nil {
   704  		return nil, errors.Wrap(err, "container listing failed")
   705  	}
   706  	var total, objects int64
   707  	for _, c := range containers {
   708  		total += c.Bytes
   709  		objects += c.Count
   710  	}
   711  	usage := &fs.Usage{
   712  		Used:    fs.NewUsageValue(total),   // bytes in use
   713  		Objects: fs.NewUsageValue(objects), // objects in use
   714  	}
   715  	return usage, nil
   716  }
   717  
   718  // Put the object into the container
   719  //
   720  // Copy the reader in to the new object which is returned
   721  //
   722  // The new object may have been created if an error is returned
   723  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   724  	// Temporary Object under construction
   725  	fs := &Object{
   726  		fs:      f,
   727  		remote:  src.Remote(),
   728  		headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
   729  	}
   730  	return fs, fs.Update(ctx, in, src, options...)
   731  }
   732  
   733  // PutStream uploads to the remote path with the modTime given of indeterminate size
   734  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   735  	return f.Put(ctx, in, src, options...)
   736  }
   737  
   738  // Mkdir creates the container if it doesn't exist
   739  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   740  	f.containerOKMu.Lock()
   741  	defer f.containerOKMu.Unlock()
   742  	if f.containerOK {
   743  		return nil
   744  	}
   745  	// if we are at the root, then it is OK
   746  	if f.container == "" {
   747  		return nil
   748  	}
   749  	// Check to see if container exists first
   750  	var err error = swift.ContainerNotFound
   751  	if !f.noCheckContainer {
   752  		err = f.pacer.Call(func() (bool, error) {
   753  			var rxHeaders swift.Headers
   754  			_, rxHeaders, err = f.c.Container(f.container)
   755  			return shouldRetryHeaders(rxHeaders, err)
   756  		})
   757  	}
   758  	if err == swift.ContainerNotFound {
   759  		headers := swift.Headers{}
   760  		if f.opt.StoragePolicy != "" {
   761  			headers["X-Storage-Policy"] = f.opt.StoragePolicy
   762  		}
   763  		err = f.pacer.Call(func() (bool, error) {
   764  			err = f.c.ContainerCreate(f.container, headers)
   765  			return shouldRetry(err)
   766  		})
   767  	}
   768  	if err == nil {
   769  		f.containerOK = true
   770  	}
   771  	return err
   772  }
   773  
   774  // Rmdir deletes the container if the fs is at the root
   775  //
   776  // Returns an error if it isn't empty
   777  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   778  	f.containerOKMu.Lock()
   779  	defer f.containerOKMu.Unlock()
   780  	if f.root != "" || dir != "" {
   781  		return nil
   782  	}
   783  	var err error
   784  	err = f.pacer.Call(func() (bool, error) {
   785  		err = f.c.ContainerDelete(f.container)
   786  		return shouldRetry(err)
   787  	})
   788  	if err == nil {
   789  		f.containerOK = false
   790  	}
   791  	return err
   792  }
   793  
   794  // Precision of the remote
   795  func (f *Fs) Precision() time.Duration {
   796  	return time.Nanosecond
   797  }
   798  
   799  // Purge deletes all the files and directories
   800  //
   801  // Implemented here so we can make sure we delete directory markers
   802  func (f *Fs) Purge(ctx context.Context) error {
   803  	// Delete all the files including the directory markers
   804  	toBeDeleted := make(chan fs.Object, fs.Config.Transfers)
   805  	delErr := make(chan error, 1)
   806  	go func() {
   807  		delErr <- operations.DeleteFiles(ctx, toBeDeleted)
   808  	}()
   809  	err := f.list("", true, func(entry fs.DirEntry) error {
   810  		if o, ok := entry.(*Object); ok {
   811  			toBeDeleted <- o
   812  		}
   813  		return nil
   814  	})
   815  	close(toBeDeleted)
   816  	delError := <-delErr
   817  	if err == nil {
   818  		err = delError
   819  	}
   820  	if err != nil {
   821  		return err
   822  	}
   823  	return f.Rmdir(ctx, "")
   824  }
   825  
   826  // Copy src to this remote using server side copy operations.
   827  //
   828  // This is stored with the remote path given
   829  //
   830  // It returns the destination Object and a possible error
   831  //
   832  // Will only be called if src.Fs().Name() == f.Name()
   833  //
   834  // If it isn't possible then return fs.ErrorCantCopy
   835  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   836  	err := f.Mkdir(ctx, "")
   837  	if err != nil {
   838  		return nil, err
   839  	}
   840  	srcObj, ok := src.(*Object)
   841  	if !ok {
   842  		fs.Debugf(src, "Can't copy - not same remote type")
   843  		return nil, fs.ErrorCantCopy
   844  	}
   845  	srcFs := srcObj.fs
   846  	err = f.pacer.Call(func() (bool, error) {
   847  		var rxHeaders swift.Headers
   848  		rxHeaders, err = f.c.ObjectCopy(srcFs.container, srcFs.root+srcObj.remote, f.container, f.root+remote, nil)
   849  		return shouldRetryHeaders(rxHeaders, err)
   850  	})
   851  	if err != nil {
   852  		return nil, err
   853  	}
   854  	return f.NewObject(ctx, remote)
   855  }
   856  
   857  // Hashes returns the supported hash sets.
   858  func (f *Fs) Hashes() hash.Set {
   859  	return hash.Set(hash.MD5)
   860  }
   861  
   862  // ------------------------------------------------------------
   863  
   864  // Fs returns the parent Fs
   865  func (o *Object) Fs() fs.Info {
   866  	return o.fs
   867  }
   868  
   869  // Return a string version
   870  func (o *Object) String() string {
   871  	if o == nil {
   872  		return "<nil>"
   873  	}
   874  	return o.remote
   875  }
   876  
   877  // Remote returns the remote path
   878  func (o *Object) Remote() string {
   879  	return o.remote
   880  }
   881  
   882  // Hash returns the Md5sum of an object returning a lowercase hex string
   883  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
   884  	if t != hash.MD5 {
   885  		return "", hash.ErrUnsupported
   886  	}
   887  	isDynamicLargeObject, err := o.isDynamicLargeObject()
   888  	if err != nil {
   889  		return "", err
   890  	}
   891  	isStaticLargeObject, err := o.isStaticLargeObject()
   892  	if err != nil {
   893  		return "", err
   894  	}
   895  	if isDynamicLargeObject || isStaticLargeObject {
   896  		fs.Debugf(o, "Returning empty Md5sum for swift large object")
   897  		return "", nil
   898  	}
   899  	return strings.ToLower(o.md5), nil
   900  }
   901  
   902  // hasHeader checks for the header passed in returning false if the
   903  // object isn't found.
   904  func (o *Object) hasHeader(header string) (bool, error) {
   905  	err := o.readMetaData()
   906  	if err != nil {
   907  		if err == fs.ErrorObjectNotFound {
   908  			return false, nil
   909  		}
   910  		return false, err
   911  	}
   912  	_, isDynamicLargeObject := o.headers[header]
   913  	return isDynamicLargeObject, nil
   914  }
   915  
   916  // isDynamicLargeObject checks for X-Object-Manifest header
   917  func (o *Object) isDynamicLargeObject() (bool, error) {
   918  	return o.hasHeader("X-Object-Manifest")
   919  }
   920  
   921  // isStaticLargeObjectFile checks for the X-Static-Large-Object header
   922  func (o *Object) isStaticLargeObject() (bool, error) {
   923  	return o.hasHeader("X-Static-Large-Object")
   924  }
   925  
   926  // Size returns the size of an object in bytes
   927  func (o *Object) Size() int64 {
   928  	return o.size
   929  }
   930  
   931  // decodeMetaData sets the metadata in the object from a swift.Object
   932  //
   933  // Sets
   934  //  o.lastModified
   935  //  o.size
   936  //  o.md5
   937  //  o.contentType
   938  func (o *Object) decodeMetaData(info *swift.Object) (err error) {
   939  	o.lastModified = info.LastModified
   940  	o.size = info.Bytes
   941  	o.md5 = info.Hash
   942  	o.contentType = info.ContentType
   943  	return nil
   944  }
   945  
   946  // readMetaData gets the metadata if it hasn't already been fetched
   947  //
   948  // it also sets the info
   949  //
   950  // it returns fs.ErrorObjectNotFound if the object isn't found
   951  func (o *Object) readMetaData() (err error) {
   952  	if o.headers != nil {
   953  		return nil
   954  	}
   955  	var info swift.Object
   956  	var h swift.Headers
   957  	err = o.fs.pacer.Call(func() (bool, error) {
   958  		info, h, err = o.fs.c.Object(o.fs.container, o.fs.root+o.remote)
   959  		return shouldRetryHeaders(h, err)
   960  	})
   961  	if err != nil {
   962  		if err == swift.ObjectNotFound {
   963  			return fs.ErrorObjectNotFound
   964  		}
   965  		return err
   966  	}
   967  	o.headers = h
   968  	err = o.decodeMetaData(&info)
   969  	if err != nil {
   970  		return err
   971  	}
   972  	return nil
   973  }
   974  
   975  // ModTime returns the modification time of the object
   976  //
   977  //
   978  // It attempts to read the objects mtime and if that isn't present the
   979  // LastModified returned in the http headers
   980  func (o *Object) ModTime(ctx context.Context) time.Time {
   981  	if fs.Config.UseServerModTime {
   982  		return o.lastModified
   983  	}
   984  	err := o.readMetaData()
   985  	if err != nil {
   986  		fs.Debugf(o, "Failed to read metadata: %s", err)
   987  		return o.lastModified
   988  	}
   989  	modTime, err := o.headers.ObjectMetadata().GetModTime()
   990  	if err != nil {
   991  		// fs.Logf(o, "Failed to read mtime from object: %v", err)
   992  		return o.lastModified
   993  	}
   994  	return modTime
   995  }
   996  
   997  // SetModTime sets the modification time of the local fs object
   998  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
   999  	err := o.readMetaData()
  1000  	if err != nil {
  1001  		return err
  1002  	}
  1003  	meta := o.headers.ObjectMetadata()
  1004  	meta.SetModTime(modTime)
  1005  	newHeaders := meta.ObjectHeaders()
  1006  	for k, v := range newHeaders {
  1007  		o.headers[k] = v
  1008  	}
  1009  	// Include any other metadata from request
  1010  	for k, v := range o.headers {
  1011  		if strings.HasPrefix(k, "X-Object-") {
  1012  			newHeaders[k] = v
  1013  		}
  1014  	}
  1015  	return o.fs.pacer.Call(func() (bool, error) {
  1016  		err = o.fs.c.ObjectUpdate(o.fs.container, o.fs.root+o.remote, newHeaders)
  1017  		return shouldRetry(err)
  1018  	})
  1019  }
  1020  
  1021  // Storable returns if this object is storable
  1022  //
  1023  // It compares the Content-Type to directoryMarkerContentType - that
  1024  // makes it a directory marker which is not storable.
  1025  func (o *Object) Storable() bool {
  1026  	return o.contentType != directoryMarkerContentType
  1027  }
  1028  
  1029  // Open an object for read
  1030  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1031  	headers := fs.OpenOptionHeaders(options)
  1032  	_, isRanging := headers["Range"]
  1033  	err = o.fs.pacer.Call(func() (bool, error) {
  1034  		var rxHeaders swift.Headers
  1035  		in, rxHeaders, err = o.fs.c.ObjectOpen(o.fs.container, o.fs.root+o.remote, !isRanging, headers)
  1036  		return shouldRetryHeaders(rxHeaders, err)
  1037  	})
  1038  	return
  1039  }
  1040  
  1041  // min returns the smallest of x, y
  1042  func min(x, y int64) int64 {
  1043  	if x < y {
  1044  		return x
  1045  	}
  1046  	return y
  1047  }
  1048  
  1049  // removeSegments removes any old segments from o
  1050  //
  1051  // if except is passed in then segments with that prefix won't be deleted
  1052  func (o *Object) removeSegments(except string) error {
  1053  	segmentsRoot := o.fs.root + o.remote + "/"
  1054  	err := o.fs.listContainerRoot(o.fs.segmentsContainer, segmentsRoot, "", true, func(remote string, object *swift.Object, isDirectory bool) error {
  1055  		if isDirectory {
  1056  			return nil
  1057  		}
  1058  		if except != "" && strings.HasPrefix(remote, except) {
  1059  			// fs.Debugf(o, "Ignoring current segment file %q in container %q", segmentsRoot+remote, o.fs.segmentsContainer)
  1060  			return nil
  1061  		}
  1062  		segmentPath := segmentsRoot + remote
  1063  		fs.Debugf(o, "Removing segment file %q in container %q", segmentPath, o.fs.segmentsContainer)
  1064  		var err error
  1065  		return o.fs.pacer.Call(func() (bool, error) {
  1066  			err = o.fs.c.ObjectDelete(o.fs.segmentsContainer, segmentPath)
  1067  			return shouldRetry(err)
  1068  		})
  1069  	})
  1070  	if err != nil {
  1071  		return err
  1072  	}
  1073  	// remove the segments container if empty, ignore errors
  1074  	err = o.fs.pacer.Call(func() (bool, error) {
  1075  		err = o.fs.c.ContainerDelete(o.fs.segmentsContainer)
  1076  		return shouldRetry(err)
  1077  	})
  1078  	if err == nil {
  1079  		fs.Debugf(o, "Removed empty container %q", o.fs.segmentsContainer)
  1080  	}
  1081  	return nil
  1082  }
  1083  
  1084  // urlEncode encodes a string so that it is a valid URL
  1085  //
  1086  // We don't use any of Go's standard methods as we need `/` not
  1087  // encoded but we need '&' encoded.
  1088  func urlEncode(str string) string {
  1089  	var buf bytes.Buffer
  1090  	for i := 0; i < len(str); i++ {
  1091  		c := str[i]
  1092  		if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' {
  1093  			_ = buf.WriteByte(c)
  1094  		} else {
  1095  			_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
  1096  		}
  1097  	}
  1098  	return buf.String()
  1099  }
  1100  
  1101  // updateChunks updates the existing object using chunks to a separate
  1102  // container.  It returns a string which prefixes current segments.
  1103  func (o *Object) updateChunks(in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
  1104  	// Create the segmentsContainer if it doesn't exist
  1105  	var err error
  1106  	err = o.fs.pacer.Call(func() (bool, error) {
  1107  		var rxHeaders swift.Headers
  1108  		_, rxHeaders, err = o.fs.c.Container(o.fs.segmentsContainer)
  1109  		return shouldRetryHeaders(rxHeaders, err)
  1110  	})
  1111  	if err == swift.ContainerNotFound {
  1112  		headers := swift.Headers{}
  1113  		if o.fs.opt.StoragePolicy != "" {
  1114  			headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
  1115  		}
  1116  		err = o.fs.pacer.Call(func() (bool, error) {
  1117  			err = o.fs.c.ContainerCreate(o.fs.segmentsContainer, headers)
  1118  			return shouldRetry(err)
  1119  		})
  1120  	}
  1121  	if err != nil {
  1122  		return "", err
  1123  	}
  1124  	// Upload the chunks
  1125  	left := size
  1126  	i := 0
  1127  	uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
  1128  	segmentsPath := fmt.Sprintf("%s%s/%s", o.fs.root, o.remote, uniquePrefix)
  1129  	in := bufio.NewReader(in0)
  1130  	segmentInfos := make([]string, 0, ((size / int64(o.fs.opt.ChunkSize)) + 1))
  1131  	for {
  1132  		// can we read at least one byte?
  1133  		if _, err := in.Peek(1); err != nil {
  1134  			if left > 0 {
  1135  				return "", err // read less than expected
  1136  			}
  1137  			fs.Debugf(o, "Uploading segments into %q seems done (%v)", o.fs.segmentsContainer, err)
  1138  			break
  1139  		}
  1140  		n := int64(o.fs.opt.ChunkSize)
  1141  		if size != -1 {
  1142  			n = min(left, n)
  1143  			headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
  1144  			left -= n
  1145  		}
  1146  		segmentReader := io.LimitReader(in, n)
  1147  		segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
  1148  		fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, o.fs.segmentsContainer)
  1149  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1150  			var rxHeaders swift.Headers
  1151  			rxHeaders, err = o.fs.c.ObjectPut(o.fs.segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
  1152  			if err == nil {
  1153  				segmentInfos = append(segmentInfos, segmentPath)
  1154  			}
  1155  			return shouldRetryHeaders(rxHeaders, err)
  1156  		})
  1157  		if err != nil {
  1158  			deleteChunks(o, segmentInfos)
  1159  			segmentInfos = nil
  1160  			return "", err
  1161  		}
  1162  		i++
  1163  	}
  1164  	// Upload the manifest
  1165  	headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", o.fs.segmentsContainer, segmentsPath))
  1166  	headers["Content-Length"] = "0" // set Content-Length as we know it
  1167  	emptyReader := bytes.NewReader(nil)
  1168  	manifestName := o.fs.root + o.remote
  1169  	err = o.fs.pacer.Call(func() (bool, error) {
  1170  		var rxHeaders swift.Headers
  1171  		rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, manifestName, emptyReader, true, "", contentType, headers)
  1172  		return shouldRetryHeaders(rxHeaders, err)
  1173  	})
  1174  	if err != nil {
  1175  		deleteChunks(o, segmentInfos)
  1176  		segmentInfos = nil
  1177  	}
  1178  	return uniquePrefix + "/", err
  1179  }
  1180  
  1181  func deleteChunks(o *Object, segmentInfos []string) {
  1182  	if segmentInfos != nil && len(segmentInfos) > 0 {
  1183  		for _, v := range segmentInfos {
  1184  			fs.Debugf(o, "Delete segment file %q on %q", v, o.fs.segmentsContainer)
  1185  			e := o.fs.c.ObjectDelete(o.fs.segmentsContainer, v)
  1186  			if e != nil {
  1187  				fs.Errorf(o, "Error occured in delete segment file %q on %q , error: %q", v, o.fs.segmentsContainer, e)
  1188  			}
  1189  		}
  1190  	}
  1191  }
  1192  
  1193  // Update the object with the contents of the io.Reader, modTime and size
  1194  //
  1195  // The new object may have been created if an error is returned
  1196  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1197  	if o.fs.container == "" {
  1198  		return fserrors.FatalError(errors.New("container name needed in remote"))
  1199  	}
  1200  	err := o.fs.Mkdir(ctx, "")
  1201  	if err != nil {
  1202  		return err
  1203  	}
  1204  	size := src.Size()
  1205  	modTime := src.ModTime(ctx)
  1206  
  1207  	// Note whether this is a dynamic large object before starting
  1208  	isDynamicLargeObject, err := o.isDynamicLargeObject()
  1209  	if err != nil {
  1210  		return err
  1211  	}
  1212  
  1213  	// Set the mtime
  1214  	m := swift.Metadata{}
  1215  	m.SetModTime(modTime)
  1216  	contentType := fs.MimeType(ctx, src)
  1217  	headers := m.ObjectHeaders()
  1218  	uniquePrefix := ""
  1219  	if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
  1220  		uniquePrefix, err = o.updateChunks(in, headers, size, contentType)
  1221  		if err != nil {
  1222  			return err
  1223  		}
  1224  		o.headers = nil // wipe old metadata
  1225  	} else {
  1226  		if size >= 0 {
  1227  			headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
  1228  		}
  1229  		var rxHeaders swift.Headers
  1230  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1231  			rxHeaders, err = o.fs.c.ObjectPut(o.fs.container, o.fs.root+o.remote, in, true, "", contentType, headers)
  1232  			return shouldRetryHeaders(rxHeaders, err)
  1233  		})
  1234  		if err != nil {
  1235  			return err
  1236  		}
  1237  		// set Metadata since ObjectPut checked the hash and length so we know the
  1238  		// object has been safely uploaded
  1239  		o.lastModified = modTime
  1240  		o.size = size
  1241  		o.md5 = rxHeaders["ETag"]
  1242  		o.contentType = contentType
  1243  		o.headers = headers
  1244  	}
  1245  
  1246  	// If file was a dynamic large object then remove old/all segments
  1247  	if isDynamicLargeObject {
  1248  		err = o.removeSegments(uniquePrefix)
  1249  		if err != nil {
  1250  			fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
  1251  		}
  1252  	}
  1253  
  1254  	// Read the metadata from the newly created object if necessary
  1255  	return o.readMetaData()
  1256  }
  1257  
  1258  // Remove an object
  1259  func (o *Object) Remove(ctx context.Context) error {
  1260  	isDynamicLargeObject, err := o.isDynamicLargeObject()
  1261  	if err != nil {
  1262  		return err
  1263  	}
  1264  	// Remove file/manifest first
  1265  	err = o.fs.pacer.Call(func() (bool, error) {
  1266  		err = o.fs.c.ObjectDelete(o.fs.container, o.fs.root+o.remote)
  1267  		return shouldRetry(err)
  1268  	})
  1269  	if err != nil {
  1270  		return err
  1271  	}
  1272  	// ...then segments if required
  1273  	if isDynamicLargeObject {
  1274  		err = o.removeSegments("")
  1275  		if err != nil {
  1276  			return err
  1277  		}
  1278  	}
  1279  	return nil
  1280  }
  1281  
  1282  // MimeType of an Object if known, "" otherwise
  1283  func (o *Object) MimeType(ctx context.Context) string {
  1284  	return o.contentType
  1285  }
  1286  
  1287  // Check the interfaces are satisfied
  1288  var (
  1289  	_ fs.Fs          = &Fs{}
  1290  	_ fs.Purger      = &Fs{}
  1291  	_ fs.PutStreamer = &Fs{}
  1292  	_ fs.Copier      = &Fs{}
  1293  	_ fs.ListRer     = &Fs{}
  1294  	_ fs.Object      = &Object{}
  1295  	_ fs.MimeTyper   = &Object{}
  1296  )