github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/swift/swift.go (about)

     1  // Package swift provides an interface to the Swift object storage system
     2  package swift
     3  
     4  import (
     5  	"bufio"
     6  	"bytes"
     7  	"context"
     8  	"errors"
     9  	"fmt"
    10  	"io"
    11  	"path"
    12  	"regexp"
    13  	"strconv"
    14  	"strings"
    15  	"sync"
    16  	"time"
    17  
    18  	"github.com/ncw/swift/v2"
    19  	"github.com/rclone/rclone/fs"
    20  	"github.com/rclone/rclone/fs/config"
    21  	"github.com/rclone/rclone/fs/config/configmap"
    22  	"github.com/rclone/rclone/fs/config/configstruct"
    23  	"github.com/rclone/rclone/fs/fserrors"
    24  	"github.com/rclone/rclone/fs/fshttp"
    25  	"github.com/rclone/rclone/fs/hash"
    26  	"github.com/rclone/rclone/fs/operations"
    27  	"github.com/rclone/rclone/fs/walk"
    28  	"github.com/rclone/rclone/lib/atexit"
    29  	"github.com/rclone/rclone/lib/bucket"
    30  	"github.com/rclone/rclone/lib/encoder"
    31  	"github.com/rclone/rclone/lib/pacer"
    32  	"github.com/rclone/rclone/lib/random"
    33  	"github.com/rclone/rclone/lib/readers"
    34  )
    35  
    36  // Constants
    37  const (
    38  	directoryMarkerContentType = "application/directory" // content type of directory marker objects
    39  	listChunks                 = 1000                    // chunk size to read directory listings
    40  	defaultChunkSize           = 5 * fs.Gibi
    41  	minSleep                   = 10 * time.Millisecond // In case of error, start at 10ms sleep.
    42  	segmentsContainerSuffix    = "_segments"
    43  	segmentsDirectory          = ".file-segments"
    44  	segmentsDirectorySlash     = segmentsDirectory + "/"
    45  )
    46  
    47  // Auth URLs which imply using fileSegmentsDirectory
    48  var needFileSegmentsDirectory = regexp.MustCompile(`(?s)\.(ain?\.net|blomp\.com|praetector\.com|signmy\.name|rackfactory\.com)($|/)`)
    49  
    50  // SharedOptions are shared between swift and backends which depend on swift
    51  var SharedOptions = []fs.Option{{
    52  	Name: "chunk_size",
    53  	Help: strings.ReplaceAll(`Above this size files will be chunked.
    54  
    55  Above this size files will be chunked into a a |`+segmentsContainerSuffix+`| container
    56  or a |`+segmentsDirectory+`| directory. (See the |use_segments_container| option
    57  for more info). Default for this is 5 GiB which is its maximum value, which
    58  means only files above this size will be chunked.
    59  
    60  Rclone uploads chunked files as dynamic large objects (DLO).
    61  `, "|", "`"),
    62  	Default:  defaultChunkSize,
    63  	Advanced: true,
    64  }, {
    65  	Name: "no_chunk",
    66  	Help: strings.ReplaceAll(`Don't chunk files during streaming upload.
    67  
    68  When doing streaming uploads (e.g. using |rcat| or |mount| with
    69  |--vfs-cache-mode off|) setting this flag will cause the swift backend
    70  to not upload chunked files.
    71  
    72  This will limit the maximum streamed upload size to 5 GiB. This is
    73  useful because non chunked files are easier to deal with and have an
    74  MD5SUM.
    75  
    76  Rclone will still chunk files bigger than |chunk_size| when doing
    77  normal copy operations.`, "|", "`"),
    78  	Default:  false,
    79  	Advanced: true,
    80  }, {
    81  	Name: "no_large_objects",
    82  	Help: strings.ReplaceAll(`Disable support for static and dynamic large objects
    83  
    84  Swift cannot transparently store files bigger than 5 GiB. There are
    85  two schemes for chunking large files, static large objects (SLO) or
    86  dynamic large objects (DLO), and the API does not allow rclone to
    87  determine whether a file is a static or dynamic large object without
    88  doing a HEAD on the object. Since these need to be treated
    89  differently, this means rclone has to issue HEAD requests for objects
    90  for example when reading checksums.
    91  
    92  When |no_large_objects| is set, rclone will assume that there are no
    93  static or dynamic large objects stored. This means it can stop doing
    94  the extra HEAD calls which in turn increases performance greatly
    95  especially when doing a swift to swift transfer with |--checksum| set.
    96  
    97  Setting this option implies |no_chunk| and also that no files will be
    98  uploaded in chunks, so files bigger than 5 GiB will just fail on
    99  upload.
   100  
   101  If you set this option and there **are** static or dynamic large objects,
   102  then this will give incorrect hashes for them. Downloads will succeed,
   103  but other operations such as Remove and Copy will fail.
   104  `, "|", "`"),
   105  	Default:  false,
   106  	Advanced: true,
   107  }, {
   108  	Name: "use_segments_container",
   109  	Help: strings.ReplaceAll(`Choose destination for large object segments
   110  
   111  Swift cannot transparently store files bigger than 5 GiB and rclone
   112  will chunk files larger than |chunk_size| (default 5 GiB) in order to
   113  upload them.
   114  
   115  If this value is |true| the chunks will be stored in an additional
   116  container named the same as the destination container but with
   117  |`+segmentsContainerSuffix+`| appended. This means that there won't be any duplicated
   118  data in the original container but having another container may not be
   119  acceptable.
   120  
   121  If this value is |false| the chunks will be stored in a
   122  |`+segmentsDirectory+`| directory in the root of the container. This
   123  directory will be omitted when listing the container. Some
   124  providers (eg Blomp) require this mode as creating additional
   125  containers isn't allowed. If it is desired to see the |`+segmentsDirectory+`|
   126  directory in the root then this flag must be set to |true|.
   127  
   128  If this value is |unset| (the default), then rclone will choose the value
   129  to use. It will be |false| unless rclone detects any |auth_url|s that
   130  it knows need it to be |true|. In this case you'll see a message in
   131  the DEBUG log.
   132  `, "|", "`"),
   133  	Default:  fs.Tristate{},
   134  	Advanced: true,
   135  }, {
   136  	Name:     config.ConfigEncoding,
   137  	Help:     config.ConfigEncodingHelp,
   138  	Advanced: true,
   139  	Default: (encoder.EncodeInvalidUtf8 |
   140  		encoder.EncodeSlash),
   141  }}
   142  
   143  // Register with Fs
   144  func init() {
   145  	fs.Register(&fs.RegInfo{
   146  		Name:        "swift",
   147  		Description: "OpenStack Swift (Rackspace Cloud Files, Blomp Cloud Storage, Memset Memstore, OVH)",
   148  		NewFs:       NewFs,
   149  		Options: append([]fs.Option{{
   150  			Name:    "env_auth",
   151  			Help:    "Get swift credentials from environment variables in standard OpenStack form.",
   152  			Default: false,
   153  			Examples: []fs.OptionExample{
   154  				{
   155  					Value: "false",
   156  					Help:  "Enter swift credentials in the next step.",
   157  				}, {
   158  					Value: "true",
   159  					Help:  "Get swift credentials from environment vars.\nLeave other fields blank if using this.",
   160  				},
   161  			},
   162  		}, {
   163  			Name:      "user",
   164  			Help:      "User name to log in (OS_USERNAME).",
   165  			Sensitive: true,
   166  		}, {
   167  			Name:      "key",
   168  			Help:      "API key or password (OS_PASSWORD).",
   169  			Sensitive: true,
   170  		}, {
   171  			Name: "auth",
   172  			Help: "Authentication URL for server (OS_AUTH_URL).",
   173  			Examples: []fs.OptionExample{{
   174  				Value: "https://auth.api.rackspacecloud.com/v1.0",
   175  				Help:  "Rackspace US",
   176  			}, {
   177  				Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
   178  				Help:  "Rackspace UK",
   179  			}, {
   180  				Value: "https://identity.api.rackspacecloud.com/v2.0",
   181  				Help:  "Rackspace v2",
   182  			}, {
   183  				Value: "https://auth.storage.memset.com/v1.0",
   184  				Help:  "Memset Memstore UK",
   185  			}, {
   186  				Value: "https://auth.storage.memset.com/v2.0",
   187  				Help:  "Memset Memstore UK v2",
   188  			}, {
   189  				Value: "https://auth.cloud.ovh.net/v3",
   190  				Help:  "OVH",
   191  			}, {
   192  				Value: "https://authenticate.ain.net",
   193  				Help:  "Blomp Cloud Storage",
   194  			}},
   195  		}, {
   196  			Name:      "user_id",
   197  			Help:      "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
   198  			Sensitive: true,
   199  		}, {
   200  			Name:      "domain",
   201  			Help:      "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
   202  			Sensitive: true,
   203  		}, {
   204  			Name:      "tenant",
   205  			Help:      "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
   206  			Sensitive: true,
   207  		}, {
   208  			Name:      "tenant_id",
   209  			Help:      "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
   210  			Sensitive: true,
   211  		}, {
   212  			Name:      "tenant_domain",
   213  			Help:      "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
   214  			Sensitive: true,
   215  		}, {
   216  			Name: "region",
   217  			Help: "Region name - optional (OS_REGION_NAME).",
   218  		}, {
   219  			Name: "storage_url",
   220  			Help: "Storage URL - optional (OS_STORAGE_URL).",
   221  		}, {
   222  			Name:      "auth_token",
   223  			Help:      "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
   224  			Sensitive: true,
   225  		}, {
   226  			Name:      "application_credential_id",
   227  			Help:      "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
   228  			Sensitive: true,
   229  		}, {
   230  			Name:      "application_credential_name",
   231  			Help:      "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
   232  			Sensitive: true,
   233  		}, {
   234  			Name:      "application_credential_secret",
   235  			Help:      "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
   236  			Sensitive: true,
   237  		}, {
   238  			Name:    "auth_version",
   239  			Help:    "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
   240  			Default: 0,
   241  		}, {
   242  			Name:    "endpoint_type",
   243  			Help:    "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).",
   244  			Default: "public",
   245  			Examples: []fs.OptionExample{{
   246  				Value: "public",
   247  				Help:  "Public (default, choose this if not sure)",
   248  			}, {
   249  				Value: "internal",
   250  				Help:  "Internal (use internal service net)",
   251  			}, {
   252  				Value: "admin",
   253  				Help:  "Admin",
   254  			}},
   255  		}, {
   256  			Name: "leave_parts_on_error",
   257  			Help: `If true avoid calling abort upload on a failure.
   258  
   259  It should be set to true for resuming uploads across different sessions.`,
   260  			Default:  false,
   261  			Advanced: true,
   262  		}, {
   263  			Name: "storage_policy",
   264  			Help: `The storage policy to use when creating a new container.
   265  
   266  This applies the specified storage policy when creating a new
   267  container. The policy cannot be changed afterwards. The allowed
   268  configuration values and their meaning depend on your Swift storage
   269  provider.`,
   270  			Default: "",
   271  			Examples: []fs.OptionExample{{
   272  				Value: "",
   273  				Help:  "Default",
   274  			}, {
   275  				Value: "pcs",
   276  				Help:  "OVH Public Cloud Storage",
   277  			}, {
   278  				Value: "pca",
   279  				Help:  "OVH Public Cloud Archive",
   280  			}},
   281  		}}, SharedOptions...),
   282  	})
   283  }
   284  
   285  // Options defines the configuration for this backend
   286  type Options struct {
   287  	EnvAuth                     bool                 `config:"env_auth"`
   288  	User                        string               `config:"user"`
   289  	Key                         string               `config:"key"`
   290  	Auth                        string               `config:"auth"`
   291  	UserID                      string               `config:"user_id"`
   292  	Domain                      string               `config:"domain"`
   293  	Tenant                      string               `config:"tenant"`
   294  	TenantID                    string               `config:"tenant_id"`
   295  	TenantDomain                string               `config:"tenant_domain"`
   296  	Region                      string               `config:"region"`
   297  	StorageURL                  string               `config:"storage_url"`
   298  	AuthToken                   string               `config:"auth_token"`
   299  	AuthVersion                 int                  `config:"auth_version"`
   300  	ApplicationCredentialID     string               `config:"application_credential_id"`
   301  	ApplicationCredentialName   string               `config:"application_credential_name"`
   302  	ApplicationCredentialSecret string               `config:"application_credential_secret"`
   303  	LeavePartsOnError           bool                 `config:"leave_parts_on_error"`
   304  	StoragePolicy               string               `config:"storage_policy"`
   305  	EndpointType                string               `config:"endpoint_type"`
   306  	ChunkSize                   fs.SizeSuffix        `config:"chunk_size"`
   307  	NoChunk                     bool                 `config:"no_chunk"`
   308  	NoLargeObjects              bool                 `config:"no_large_objects"`
   309  	UseSegmentsContainer        fs.Tristate          `config:"use_segments_container"`
   310  	Enc                         encoder.MultiEncoder `config:"encoding"`
   311  }
   312  
   313  // Fs represents a remote swift server
   314  type Fs struct {
   315  	name             string            // name of this remote
   316  	root             string            // the path we are working on if any
   317  	features         *fs.Features      // optional features
   318  	opt              Options           // options for this backend
   319  	ci               *fs.ConfigInfo    // global config
   320  	c                *swift.Connection // the connection to the swift server
   321  	rootContainer    string            // container part of root (if any)
   322  	rootDirectory    string            // directory part of root (if any)
   323  	cache            *bucket.Cache     // cache of container status
   324  	noCheckContainer bool              // don't check the container before creating it
   325  	pacer            *fs.Pacer         // To pace the API calls
   326  }
   327  
   328  // Object describes a swift object
   329  //
   330  // Will definitely have info but maybe not meta
   331  type Object struct {
   332  	fs           *Fs    // what this object is part of
   333  	remote       string // The remote path
   334  	size         int64
   335  	lastModified time.Time
   336  	contentType  string
   337  	md5          string
   338  	headers      swift.Headers // The object headers if known
   339  }
   340  
   341  // ------------------------------------------------------------
   342  
   343  // Name of the remote (as passed into NewFs)
   344  func (f *Fs) Name() string {
   345  	return f.name
   346  }
   347  
   348  // Root of the remote (as passed into NewFs)
   349  func (f *Fs) Root() string {
   350  	return f.root
   351  }
   352  
   353  // String converts this Fs to a string
   354  func (f *Fs) String() string {
   355  	if f.rootContainer == "" {
   356  		return "Swift root"
   357  	}
   358  	if f.rootDirectory == "" {
   359  		return fmt.Sprintf("Swift container %s", f.rootContainer)
   360  	}
   361  	return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
   362  }
   363  
   364  // Features returns the optional features of this Fs
   365  func (f *Fs) Features() *fs.Features {
   366  	return f.features
   367  }
   368  
   369  // retryErrorCodes is a slice of error codes that we will retry
   370  var retryErrorCodes = []int{
   371  	401, // Unauthorized (e.g. "Token has expired")
   372  	408, // Request Timeout
   373  	409, // Conflict - various states that could be resolved on a retry
   374  	429, // Rate exceeded.
   375  	500, // Get occasional 500 Internal Server Error
   376  	503, // Service Unavailable/Slow Down - "Reduce your request rate"
   377  	504, // Gateway Time-out
   378  }
   379  
   380  // shouldRetry returns a boolean as to whether this err deserves to be
   381  // retried.  It returns the err as a convenience
   382  func shouldRetry(ctx context.Context, err error) (bool, error) {
   383  	if fserrors.ContextError(ctx, &err) {
   384  		return false, err
   385  	}
   386  	// If this is a swift.Error object extract the HTTP error code
   387  	if swiftError, ok := err.(*swift.Error); ok {
   388  		for _, e := range retryErrorCodes {
   389  			if swiftError.StatusCode == e {
   390  				return true, err
   391  			}
   392  		}
   393  	}
   394  	// Check for generic failure conditions
   395  	return fserrors.ShouldRetry(err), err
   396  }
   397  
   398  // shouldRetryHeaders returns a boolean as to whether this err
   399  // deserves to be retried.  It reads the headers passed in looking for
   400  // `Retry-After`. It returns the err as a convenience
   401  func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (bool, error) {
   402  	if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
   403  		if value := headers["Retry-After"]; value != "" {
   404  			retryAfter, parseErr := strconv.Atoi(value)
   405  			if parseErr != nil {
   406  				fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
   407  			} else {
   408  				duration := time.Second * time.Duration(retryAfter)
   409  				if duration <= 60*time.Second {
   410  					// Do a short sleep immediately
   411  					fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
   412  					time.Sleep(duration)
   413  					return true, err
   414  				}
   415  				// Delay a long sleep for a retry
   416  				return false, fserrors.NewErrorRetryAfter(duration)
   417  			}
   418  		}
   419  	}
   420  	return shouldRetry(ctx, err)
   421  }
   422  
   423  // parsePath parses a remote 'url'
   424  func parsePath(path string) (root string) {
   425  	root = strings.Trim(path, "/")
   426  	return
   427  }
   428  
   429  // split returns container and containerPath from the rootRelativePath
   430  // relative to f.root
   431  func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
   432  	container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
   433  	return f.opt.Enc.FromStandardName(container), f.opt.Enc.FromStandardPath(containerPath)
   434  }
   435  
   436  // split returns container and containerPath from the object
   437  func (o *Object) split() (container, containerPath string) {
   438  	return o.fs.split(o.remote)
   439  }
   440  
   441  // swiftConnection makes a connection to swift
   442  func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Connection, error) {
   443  	ci := fs.GetConfig(ctx)
   444  	c := &swift.Connection{
   445  		// Keep these in the same order as the Config for ease of checking
   446  		UserName:                    opt.User,
   447  		ApiKey:                      opt.Key,
   448  		AuthUrl:                     opt.Auth,
   449  		UserId:                      opt.UserID,
   450  		Domain:                      opt.Domain,
   451  		Tenant:                      opt.Tenant,
   452  		TenantId:                    opt.TenantID,
   453  		TenantDomain:                opt.TenantDomain,
   454  		Region:                      opt.Region,
   455  		StorageUrl:                  opt.StorageURL,
   456  		AuthToken:                   opt.AuthToken,
   457  		AuthVersion:                 opt.AuthVersion,
   458  		ApplicationCredentialId:     opt.ApplicationCredentialID,
   459  		ApplicationCredentialName:   opt.ApplicationCredentialName,
   460  		ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
   461  		EndpointType:                swift.EndpointType(opt.EndpointType),
   462  		ConnectTimeout:              10 * ci.ConnectTimeout, // Use the timeouts in the transport
   463  		Timeout:                     10 * ci.Timeout,        // Use the timeouts in the transport
   464  		Transport:                   fshttp.NewTransport(ctx),
   465  	}
   466  	if opt.EnvAuth {
   467  		err := c.ApplyEnvironment()
   468  		if err != nil {
   469  			return nil, fmt.Errorf("failed to read environment variables: %w", err)
   470  		}
   471  	}
   472  	StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
   473  	if !c.Authenticated() {
   474  		if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
   475  			if c.UserName == "" && c.UserId == "" {
   476  				return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
   477  			}
   478  			if c.ApiKey == "" {
   479  				return nil, errors.New("key not found")
   480  			}
   481  		}
   482  		if c.AuthUrl == "" {
   483  			return nil, errors.New("auth not found")
   484  		}
   485  		err := c.Authenticate(ctx) // fills in c.StorageUrl and c.AuthToken
   486  		if err != nil {
   487  			return nil, err
   488  		}
   489  	}
   490  	// Make sure we re-auth with the AuthToken and StorageUrl
   491  	// provided by wrapping the existing auth, so we can just
   492  	// override one or the other or both.
   493  	if StorageUrl != "" || AuthToken != "" {
   494  		// Re-write StorageURL and AuthToken if they are being
   495  		// overridden as c.Authenticate above will have
   496  		// overwritten them.
   497  		if StorageUrl != "" {
   498  			c.StorageUrl = StorageUrl
   499  		}
   500  		if AuthToken != "" {
   501  			c.AuthToken = AuthToken
   502  		}
   503  		c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
   504  	}
   505  	return c, nil
   506  }
   507  
   508  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   509  	const minChunkSize = fs.SizeSuffixBase
   510  	if cs < minChunkSize {
   511  		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
   512  	}
   513  	return nil
   514  }
   515  
   516  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   517  	err = checkUploadChunkSize(cs)
   518  	if err == nil {
   519  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   520  	}
   521  	return
   522  }
   523  
   524  // setRoot changes the root of the Fs
   525  func (f *Fs) setRoot(root string) {
   526  	f.root = parsePath(root)
   527  	f.rootContainer, f.rootDirectory = bucket.Split(f.root)
   528  }
   529  
   530  // NewFsWithConnection constructs an Fs from the path, container:path
   531  // and authenticated connection.
   532  //
   533  // if noCheckContainer is set then the Fs won't check the container
   534  // exists before creating it.
   535  func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
   536  	ci := fs.GetConfig(ctx)
   537  	f := &Fs{
   538  		name:             name,
   539  		opt:              *opt,
   540  		ci:               ci,
   541  		c:                c,
   542  		noCheckContainer: noCheckContainer,
   543  		pacer:            fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
   544  		cache:            bucket.NewCache(),
   545  	}
   546  	f.setRoot(root)
   547  	f.features = (&fs.Features{
   548  		ReadMimeType:      true,
   549  		WriteMimeType:     true,
   550  		BucketBased:       true,
   551  		BucketBasedRootOK: true,
   552  		SlowModTime:       true,
   553  	}).Fill(ctx, f)
   554  	if !f.opt.UseSegmentsContainer.Valid {
   555  		f.opt.UseSegmentsContainer.Value = !needFileSegmentsDirectory.MatchString(opt.Auth)
   556  		f.opt.UseSegmentsContainer.Valid = true
   557  		fs.Debugf(f, "Auto set use_segments_container to %v", f.opt.UseSegmentsContainer.Value)
   558  	}
   559  	if f.rootContainer != "" && f.rootDirectory != "" {
   560  		// Check to see if the object exists - ignoring directory markers
   561  		var info swift.Object
   562  		var err error
   563  		encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
   564  		err = f.pacer.Call(func() (bool, error) {
   565  			var rxHeaders swift.Headers
   566  			info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
   567  			return shouldRetryHeaders(ctx, rxHeaders, err)
   568  		})
   569  		if err == nil && info.ContentType != directoryMarkerContentType {
   570  			newRoot := path.Dir(f.root)
   571  			if newRoot == "." {
   572  				newRoot = ""
   573  			}
   574  			f.setRoot(newRoot)
   575  			// return an error with an fs which points to the parent
   576  			return f, fs.ErrorIsFile
   577  		}
   578  	}
   579  	return f, nil
   580  }
   581  
   582  // NewFs constructs an Fs from the path, container:path
   583  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
   584  	// Parse config into Options struct
   585  	opt := new(Options)
   586  	err := configstruct.Set(m, opt)
   587  	if err != nil {
   588  		return nil, err
   589  	}
   590  	err = checkUploadChunkSize(opt.ChunkSize)
   591  	if err != nil {
   592  		return nil, fmt.Errorf("swift: chunk size: %w", err)
   593  	}
   594  
   595  	c, err := swiftConnection(ctx, opt, name)
   596  	if err != nil {
   597  		return nil, err
   598  	}
   599  	return NewFsWithConnection(ctx, opt, name, root, c, false)
   600  }
   601  
   602  // Return an Object from a path
   603  //
   604  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   605  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.Object) (fs.Object, error) {
   606  	o := &Object{
   607  		fs:     f,
   608  		remote: remote,
   609  	}
   610  	// Note that due to a quirk of swift, dynamic large objects are
   611  	// returned as 0 bytes in the listing.  Correct this here by
   612  	// making sure we read the full metadata for all 0 byte files.
   613  	// We don't read the metadata for directory marker objects.
   614  	if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" && !o.fs.opt.NoLargeObjects {
   615  		err := o.readMetaData(ctx) // reads info and headers, returning an error
   616  		if err == fs.ErrorObjectNotFound {
   617  			// We have a dangling large object here so just return the original metadata
   618  			fs.Errorf(o, "dangling large object with no contents")
   619  		} else if err != nil {
   620  			return nil, err
   621  		} else {
   622  			return o, nil
   623  		}
   624  	}
   625  	if info != nil {
   626  		// Set info but not headers
   627  		err := o.decodeMetaData(info)
   628  		if err != nil {
   629  			return nil, err
   630  		}
   631  	} else {
   632  		err := o.readMetaData(ctx) // reads info and headers, returning an error
   633  		if err != nil {
   634  			return nil, err
   635  		}
   636  	}
   637  	return o, nil
   638  }
   639  
   640  // NewObject finds the Object at remote.  If it can't be found it
   641  // returns the error fs.ErrorObjectNotFound.
   642  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   643  	return f.newObjectWithInfo(ctx, remote, nil)
   644  }
   645  
   646  // listFn is called from list and listContainerRoot to handle an object.
   647  type listFn func(remote string, object *swift.Object, isDirectory bool) error
   648  
   649  // listContainerRoot lists the objects into the function supplied from
   650  // the container and directory supplied.  The remote has prefix
   651  // removed from it and if addContainer is set then it adds the
   652  // container to the start.
   653  //
   654  // Set recurse to read sub directories
   655  func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
   656  	if prefix != "" && !strings.HasSuffix(prefix, "/") {
   657  		prefix += "/"
   658  	}
   659  	if directory != "" && !strings.HasSuffix(directory, "/") {
   660  		directory += "/"
   661  	}
   662  	// Options for ObjectsWalk
   663  	opts := swift.ObjectsOpts{
   664  		Prefix: directory,
   665  		Limit:  listChunks,
   666  	}
   667  	if !recurse {
   668  		opts.Delimiter = '/'
   669  	}
   670  	return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
   671  		var objects []swift.Object
   672  		var err error
   673  		err = f.pacer.Call(func() (bool, error) {
   674  			objects, err = f.c.Objects(ctx, container, opts)
   675  			return shouldRetry(ctx, err)
   676  		})
   677  		if err == nil {
   678  			for i := range objects {
   679  				object := &objects[i]
   680  				if !includeDirMarkers && !f.opt.UseSegmentsContainer.Value && (object.Name == segmentsDirectory || strings.HasPrefix(object.Name, segmentsDirectorySlash)) {
   681  					// Don't show segments in listing unless showing directory markers
   682  					continue
   683  				}
   684  				isDirectory := false
   685  				if !recurse {
   686  					isDirectory = strings.HasSuffix(object.Name, "/")
   687  				}
   688  				remote := f.opt.Enc.ToStandardPath(object.Name)
   689  				if !strings.HasPrefix(remote, prefix) {
   690  					fs.Logf(f, "Odd name received %q", remote)
   691  					continue
   692  				}
   693  				if !includeDirMarkers && remote == prefix {
   694  					// If we have zero length directory markers ending in / then swift
   695  					// will return them in the listing for the directory which causes
   696  					// duplicate directories.  Ignore them here.
   697  					continue
   698  				}
   699  				remote = remote[len(prefix):]
   700  				if addContainer {
   701  					remote = path.Join(container, remote)
   702  				}
   703  				err = fn(remote, object, isDirectory)
   704  				if err != nil {
   705  					break
   706  				}
   707  			}
   708  		}
   709  		return objects, err
   710  	})
   711  }
   712  
   713  type addEntryFn func(fs.DirEntry) error
   714  
   715  // list the objects into the function supplied
   716  func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
   717  	err := f.listContainerRoot(ctx, container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
   718  		if isDirectory {
   719  			remote = strings.TrimRight(remote, "/")
   720  			d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
   721  			err = fn(d)
   722  		} else {
   723  			// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
   724  			var o fs.Object
   725  			o, err = f.newObjectWithInfo(ctx, remote, object)
   726  			if err != nil {
   727  				return err
   728  			}
   729  			if includeDirMarkers || o.Storable() {
   730  				err = fn(o)
   731  			}
   732  		}
   733  		return err
   734  	})
   735  	if err == swift.ContainerNotFound {
   736  		err = fs.ErrorDirNotFound
   737  	}
   738  	return err
   739  }
   740  
   741  // listDir lists a single directory
   742  func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
   743  	if container == "" {
   744  		return nil, fs.ErrorListBucketRequired
   745  	}
   746  	// List the objects
   747  	err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
   748  		entries = append(entries, entry)
   749  		return nil
   750  	})
   751  	if err != nil {
   752  		return nil, err
   753  	}
   754  	// container must be present if listing succeeded
   755  	f.cache.MarkOK(container)
   756  	return entries, nil
   757  }
   758  
   759  // listContainers lists the containers
   760  func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
   761  	var containers []swift.Container
   762  	err = f.pacer.Call(func() (bool, error) {
   763  		containers, err = f.c.ContainersAll(ctx, nil)
   764  		return shouldRetry(ctx, err)
   765  	})
   766  	if err != nil {
   767  		return nil, fmt.Errorf("container listing failed: %w", err)
   768  	}
   769  	for _, container := range containers {
   770  		f.cache.MarkOK(container.Name)
   771  		d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
   772  		entries = append(entries, d)
   773  	}
   774  	return entries, nil
   775  }
   776  
   777  // List the objects and directories in dir into entries.  The
   778  // entries can be returned in any order but should be for a
   779  // complete directory.
   780  //
   781  // dir should be "" to list the root, and should not have
   782  // trailing slashes.
   783  //
   784  // This should return ErrDirNotFound if the directory isn't
   785  // found.
   786  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   787  	container, directory := f.split(dir)
   788  	if container == "" {
   789  		if directory != "" {
   790  			return nil, fs.ErrorListBucketRequired
   791  		}
   792  		return f.listContainers(ctx)
   793  	}
   794  	return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
   795  }
   796  
   797  // ListR lists the objects and directories of the Fs starting
   798  // from dir recursively into out.
   799  //
   800  // dir should be "" to start from the root, and should not
   801  // have trailing slashes.
   802  //
   803  // This should return ErrDirNotFound if the directory isn't
   804  // found.
   805  //
   806  // It should call callback for each tranche of entries read.
   807  // These need not be returned in any particular order.  If
   808  // callback returns an error then the listing will stop
   809  // immediately.
   810  //
   811  // Don't implement this unless you have a more efficient way
   812  // of listing recursively than doing a directory traversal.
   813  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   814  	container, directory := f.split(dir)
   815  	list := walk.NewListRHelper(callback)
   816  	listR := func(container, directory, prefix string, addContainer bool) error {
   817  		return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
   818  			return list.Add(entry)
   819  		})
   820  	}
   821  	if container == "" {
   822  		entries, err := f.listContainers(ctx)
   823  		if err != nil {
   824  			return err
   825  		}
   826  		for _, entry := range entries {
   827  			err = list.Add(entry)
   828  			if err != nil {
   829  				return err
   830  			}
   831  			container := entry.Remote()
   832  			err = listR(container, "", f.rootDirectory, true)
   833  			if err != nil {
   834  				return err
   835  			}
   836  			// container must be present if listing succeeded
   837  			f.cache.MarkOK(container)
   838  		}
   839  	} else {
   840  		err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
   841  		if err != nil {
   842  			return err
   843  		}
   844  		// container must be present if listing succeeded
   845  		f.cache.MarkOK(container)
   846  	}
   847  	return list.Flush()
   848  }
   849  
   850  // About gets quota information
   851  func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
   852  	var total, objects int64
   853  	if f.rootContainer != "" {
   854  		var container swift.Container
   855  		err = f.pacer.Call(func() (bool, error) {
   856  			container, _, err = f.c.Container(ctx, f.rootContainer)
   857  			return shouldRetry(ctx, err)
   858  		})
   859  		if err != nil {
   860  			return nil, fmt.Errorf("container info failed: %w", err)
   861  		}
   862  		total = container.Bytes
   863  		objects = container.Count
   864  	} else {
   865  		var containers []swift.Container
   866  		err = f.pacer.Call(func() (bool, error) {
   867  			containers, err = f.c.ContainersAll(ctx, nil)
   868  			return shouldRetry(ctx, err)
   869  		})
   870  		if err != nil {
   871  			return nil, fmt.Errorf("container listing failed: %w", err)
   872  		}
   873  		for _, c := range containers {
   874  			total += c.Bytes
   875  			objects += c.Count
   876  		}
   877  	}
   878  	usage = &fs.Usage{
   879  		Used:    fs.NewUsageValue(total),   // bytes in use
   880  		Objects: fs.NewUsageValue(objects), // objects in use
   881  	}
   882  	return usage, nil
   883  }
   884  
   885  // Put the object into the container
   886  //
   887  // Copy the reader in to the new object which is returned.
   888  //
   889  // The new object may have been created if an error is returned
   890  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   891  	// Temporary Object under construction
   892  	fs := &Object{
   893  		fs:      f,
   894  		remote:  src.Remote(),
   895  		headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
   896  	}
   897  	return fs, fs.Update(ctx, in, src, options...)
   898  }
   899  
   900  // PutStream uploads to the remote path with the modTime given of indeterminate size
   901  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   902  	return f.Put(ctx, in, src, options...)
   903  }
   904  
   905  // Mkdir creates the container if it doesn't exist
   906  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   907  	container, _ := f.split(dir)
   908  	return f.makeContainer(ctx, container)
   909  }
   910  
   911  // makeContainer creates the container if it doesn't exist
   912  func (f *Fs) makeContainer(ctx context.Context, container string) error {
   913  	return f.cache.Create(container, func() error {
   914  		// Check to see if container exists first
   915  		var err error = swift.ContainerNotFound
   916  		if !f.noCheckContainer {
   917  			err = f.pacer.Call(func() (bool, error) {
   918  				var rxHeaders swift.Headers
   919  				_, rxHeaders, err = f.c.Container(ctx, container)
   920  				return shouldRetryHeaders(ctx, rxHeaders, err)
   921  			})
   922  		}
   923  		if err == swift.ContainerNotFound {
   924  			headers := swift.Headers{}
   925  			if f.opt.StoragePolicy != "" {
   926  				headers["X-Storage-Policy"] = f.opt.StoragePolicy
   927  			}
   928  			err = f.pacer.Call(func() (bool, error) {
   929  				err = f.c.ContainerCreate(ctx, container, headers)
   930  				return shouldRetry(ctx, err)
   931  			})
   932  			if err == nil {
   933  				fs.Infof(f, "Container %q created", container)
   934  			}
   935  		}
   936  		return err
   937  	}, nil)
   938  }
   939  
   940  // Rmdir deletes the container if the fs is at the root
   941  //
   942  // Returns an error if it isn't empty
   943  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   944  	container, directory := f.split(dir)
   945  	if container == "" || directory != "" {
   946  		return nil
   947  	}
   948  	err := f.cache.Remove(container, func() error {
   949  		err := f.pacer.Call(func() (bool, error) {
   950  			err := f.c.ContainerDelete(ctx, container)
   951  			return shouldRetry(ctx, err)
   952  		})
   953  		if err == nil {
   954  			fs.Infof(f, "Container %q removed", container)
   955  		}
   956  		return err
   957  	})
   958  	return err
   959  }
   960  
   961  // Precision of the remote
   962  func (f *Fs) Precision() time.Duration {
   963  	return time.Nanosecond
   964  }
   965  
   966  // Purge deletes all the files in the directory
   967  //
   968  // Implemented here so we can make sure we delete directory markers
   969  func (f *Fs) Purge(ctx context.Context, dir string) error {
   970  	container, directory := f.split(dir)
   971  	if container == "" {
   972  		return fs.ErrorListBucketRequired
   973  	}
   974  	// Delete all the files including the directory markers
   975  	toBeDeleted := make(chan fs.Object, f.ci.Transfers)
   976  	delErr := make(chan error, 1)
   977  	go func() {
   978  		delErr <- operations.DeleteFiles(ctx, toBeDeleted)
   979  	}()
   980  	err := f.list(ctx, container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
   981  		if o, ok := entry.(*Object); ok {
   982  			toBeDeleted <- o
   983  		}
   984  		return nil
   985  	})
   986  	close(toBeDeleted)
   987  	delError := <-delErr
   988  	if err == nil {
   989  		err = delError
   990  	}
   991  	if err != nil {
   992  		return err
   993  	}
   994  	return f.Rmdir(ctx, dir)
   995  }
   996  
   997  // Copy src to this remote using server-side copy operations.
   998  //
   999  // This is stored with the remote path given.
  1000  //
  1001  // It returns the destination Object and a possible error.
  1002  //
  1003  // Will only be called if src.Fs().Name() == f.Name()
  1004  //
  1005  // If it isn't possible then return fs.ErrorCantCopy
  1006  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1007  	dstContainer, dstPath := f.split(remote)
  1008  	err := f.makeContainer(ctx, dstContainer)
  1009  	if err != nil {
  1010  		return nil, err
  1011  	}
  1012  	srcObj, ok := src.(*Object)
  1013  	if !ok {
  1014  		fs.Debugf(src, "Can't copy - not same remote type")
  1015  		return nil, fs.ErrorCantCopy
  1016  	}
  1017  	isLargeObject, err := srcObj.isLargeObject(ctx)
  1018  	if err != nil {
  1019  		return nil, err
  1020  	}
  1021  	if isLargeObject {
  1022  		// handle large object
  1023  		err = f.copyLargeObject(ctx, srcObj, dstContainer, dstPath)
  1024  	} else {
  1025  		srcContainer, srcPath := srcObj.split()
  1026  		err = f.pacer.Call(func() (bool, error) {
  1027  			var rxHeaders swift.Headers
  1028  			rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
  1029  			return shouldRetryHeaders(ctx, rxHeaders, err)
  1030  		})
  1031  	}
  1032  	if err != nil {
  1033  		return nil, err
  1034  	}
  1035  	return f.NewObject(ctx, remote)
  1036  }
  1037  
  1038  // Represents a segmented upload or copy
  1039  type segmentedUpload struct {
  1040  	f            *Fs        // parent
  1041  	dstContainer string     // container for the file to live once uploaded
  1042  	container    string     // container for the segments
  1043  	dstPath      string     // path for the object to live once uploaded
  1044  	path         string     // unique path for the segments
  1045  	mu           sync.Mutex // protects the variables below
  1046  	segments     []string   // segments successfully uploaded
  1047  }
  1048  
  1049  // Create a new segmented upload using the correct container and path
  1050  func (f *Fs) newSegmentedUpload(ctx context.Context, dstContainer string, dstPath string) (su *segmentedUpload, err error) {
  1051  	randomString, err := random.Password(128)
  1052  	if err != nil {
  1053  		return nil, fmt.Errorf("failed to create random string for upload: %w", err)
  1054  	}
  1055  	uniqueString := time.Now().Format("2006-01-02-150405") + "-" + randomString
  1056  	su = &segmentedUpload{
  1057  		f:            f,
  1058  		dstPath:      dstPath,
  1059  		path:         dstPath + "/" + uniqueString,
  1060  		dstContainer: dstContainer,
  1061  		container:    dstContainer,
  1062  	}
  1063  	if f.opt.UseSegmentsContainer.Value {
  1064  		su.container += segmentsContainerSuffix
  1065  		err = f.makeContainer(ctx, su.container)
  1066  		if err != nil {
  1067  			return nil, err
  1068  		}
  1069  	} else {
  1070  		su.path = segmentsDirectorySlash + su.path
  1071  	}
  1072  	return su, nil
  1073  }
  1074  
  1075  // Return the path of the i-th segment
  1076  func (su *segmentedUpload) segmentPath(i int) string {
  1077  	return fmt.Sprintf("%s/%08d", su.path, i)
  1078  }
  1079  
  1080  // Mark segment as successfully uploaded
  1081  func (su *segmentedUpload) uploaded(segment string) {
  1082  	su.mu.Lock()
  1083  	defer su.mu.Unlock()
  1084  	su.segments = append(su.segments, segment)
  1085  }
  1086  
  1087  // Return the full path including the container
  1088  func (su *segmentedUpload) fullPath() string {
  1089  	return fmt.Sprintf("%s/%s", su.container, su.path)
  1090  }
  1091  
  1092  // Remove segments when upload/copy process fails
  1093  func (su *segmentedUpload) onFail() {
  1094  	f := su.f
  1095  	if f.opt.LeavePartsOnError {
  1096  		return
  1097  	}
  1098  	ctx := context.Background()
  1099  	fs.Debugf(f, "Segment operation failed: bulk deleting failed segments")
  1100  	if len(su.container) == 0 {
  1101  		fs.Debugf(f, "Invalid segments container")
  1102  		return
  1103  	}
  1104  	if len(su.segments) == 0 {
  1105  		fs.Debugf(f, "No segments to delete")
  1106  		return
  1107  	}
  1108  	_, err := f.c.BulkDelete(ctx, su.container, su.segments)
  1109  	if err != nil {
  1110  		fs.Errorf(f, "Failed to bulk delete failed segments: %v", err)
  1111  	}
  1112  }
  1113  
  1114  // upload the manifest when upload is done
  1115  func (su *segmentedUpload) uploadManifest(ctx context.Context, contentType string, headers swift.Headers) (err error) {
  1116  	delete(headers, "Etag") // remove Etag if present as it is wrong for the manifest
  1117  	headers["X-Object-Manifest"] = urlEncode(su.fullPath())
  1118  	headers["Content-Length"] = "0" // set Content-Length as we know it
  1119  	emptyReader := bytes.NewReader(nil)
  1120  	fs.Debugf(su.f, "uploading manifest %q to %q", su.dstPath, su.dstContainer)
  1121  	err = su.f.pacer.Call(func() (bool, error) {
  1122  		var rxHeaders swift.Headers
  1123  		rxHeaders, err = su.f.c.ObjectPut(ctx, su.dstContainer, su.dstPath, emptyReader, true, "", contentType, headers)
  1124  		return shouldRetryHeaders(ctx, rxHeaders, err)
  1125  	})
  1126  	return err
  1127  }
  1128  
  1129  // Copy a large object src into (dstContainer, dstPath)
  1130  func (f *Fs) copyLargeObject(ctx context.Context, src *Object, dstContainer string, dstPath string) (err error) {
  1131  	su, err := f.newSegmentedUpload(ctx, dstContainer, dstPath)
  1132  	if err != nil {
  1133  		return err
  1134  	}
  1135  	srcSegmentsContainer, srcSegments, err := src.getSegmentsLargeObject(ctx)
  1136  	if err != nil {
  1137  		return fmt.Errorf("copy large object: %w", err)
  1138  	}
  1139  	if len(srcSegments) == 0 {
  1140  		return errors.New("could not copy object, list segments are empty")
  1141  	}
  1142  	defer atexit.OnError(&err, su.onFail)()
  1143  	for i, srcSegment := range srcSegments {
  1144  		dstSegment := su.segmentPath(i)
  1145  		err = f.pacer.Call(func() (bool, error) {
  1146  			var rxHeaders swift.Headers
  1147  			rxHeaders, err = f.c.ObjectCopy(ctx, srcSegmentsContainer, srcSegment, su.container, dstSegment, nil)
  1148  			return shouldRetryHeaders(ctx, rxHeaders, err)
  1149  		})
  1150  		if err != nil {
  1151  			return err
  1152  		}
  1153  		su.uploaded(dstSegment)
  1154  	}
  1155  	return su.uploadManifest(ctx, src.contentType, src.headers)
  1156  }
  1157  
  1158  // Hashes returns the supported hash sets.
  1159  func (f *Fs) Hashes() hash.Set {
  1160  	return hash.Set(hash.MD5)
  1161  }
  1162  
  1163  // ------------------------------------------------------------
  1164  
  1165  // Fs returns the parent Fs
  1166  func (o *Object) Fs() fs.Info {
  1167  	return o.fs
  1168  }
  1169  
  1170  // Return a string version
  1171  func (o *Object) String() string {
  1172  	if o == nil {
  1173  		return "<nil>"
  1174  	}
  1175  	return o.remote
  1176  }
  1177  
  1178  // Remote returns the remote path
  1179  func (o *Object) Remote() string {
  1180  	return o.remote
  1181  }
  1182  
  1183  // Hash returns the Md5sum of an object returning a lowercase hex string
  1184  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1185  	if t != hash.MD5 {
  1186  		return "", hash.ErrUnsupported
  1187  	}
  1188  	isDynamicLargeObject, err := o.isDynamicLargeObject(ctx)
  1189  	if err != nil {
  1190  		return "", err
  1191  	}
  1192  	isStaticLargeObject, err := o.isStaticLargeObject(ctx)
  1193  	if err != nil {
  1194  		return "", err
  1195  	}
  1196  	if isDynamicLargeObject || isStaticLargeObject {
  1197  		fs.Debugf(o, "Returning empty Md5sum for swift large object")
  1198  		return "", nil
  1199  	}
  1200  	return strings.ToLower(o.md5), nil
  1201  }
  1202  
  1203  // hasHeader checks for the header passed in returning false if the
  1204  // object isn't found.
  1205  func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
  1206  	err := o.readMetaData(ctx)
  1207  	if err != nil {
  1208  		if err == fs.ErrorObjectNotFound {
  1209  			return false, nil
  1210  		}
  1211  		return false, err
  1212  	}
  1213  	_, isDynamicLargeObject := o.headers[header]
  1214  	return isDynamicLargeObject, nil
  1215  }
  1216  
  1217  // isDynamicLargeObject checks for X-Object-Manifest header
  1218  func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
  1219  	if o.fs.opt.NoLargeObjects {
  1220  		return false, nil
  1221  	}
  1222  	return o.hasHeader(ctx, "X-Object-Manifest")
  1223  }
  1224  
  1225  // isStaticLargeObjectFile checks for the X-Static-Large-Object header
  1226  func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
  1227  	if o.fs.opt.NoLargeObjects {
  1228  		return false, nil
  1229  	}
  1230  	return o.hasHeader(ctx, "X-Static-Large-Object")
  1231  }
  1232  
  1233  func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
  1234  	if o.fs.opt.NoLargeObjects {
  1235  		return false, nil
  1236  	}
  1237  	result, err = o.hasHeader(ctx, "X-Static-Large-Object")
  1238  	if result {
  1239  		return
  1240  	}
  1241  	result, err = o.hasHeader(ctx, "X-Object-Manifest")
  1242  	if result {
  1243  		return
  1244  	}
  1245  	return false, nil
  1246  }
  1247  
  1248  func (o *Object) isInContainerVersioning(ctx context.Context, container string) (bool, error) {
  1249  	_, headers, err := o.fs.c.Container(ctx, container)
  1250  	if err != nil {
  1251  		return false, err
  1252  	}
  1253  	xHistoryLocation := headers["X-History-Location"]
  1254  	if len(xHistoryLocation) > 0 {
  1255  		return true, nil
  1256  	}
  1257  	return false, nil
  1258  }
  1259  
  1260  // Size returns the size of an object in bytes
  1261  func (o *Object) Size() int64 {
  1262  	return o.size
  1263  }
  1264  
  1265  // decodeMetaData sets the metadata in the object from a swift.Object
  1266  //
  1267  // Sets
  1268  //
  1269  //	o.lastModified
  1270  //	o.size
  1271  //	o.md5
  1272  //	o.contentType
  1273  func (o *Object) decodeMetaData(info *swift.Object) (err error) {
  1274  	o.lastModified = info.LastModified
  1275  	o.size = info.Bytes
  1276  	o.md5 = info.Hash
  1277  	o.contentType = info.ContentType
  1278  	return nil
  1279  }
  1280  
  1281  // readMetaData gets the metadata if it hasn't already been fetched
  1282  //
  1283  // it also sets the info
  1284  //
  1285  // it returns fs.ErrorObjectNotFound if the object isn't found
  1286  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1287  	if o.headers != nil {
  1288  		return nil
  1289  	}
  1290  	var info swift.Object
  1291  	var h swift.Headers
  1292  	container, containerPath := o.split()
  1293  	err = o.fs.pacer.Call(func() (bool, error) {
  1294  		info, h, err = o.fs.c.Object(ctx, container, containerPath)
  1295  		return shouldRetryHeaders(ctx, h, err)
  1296  	})
  1297  	if err != nil {
  1298  		if err == swift.ObjectNotFound {
  1299  			return fs.ErrorObjectNotFound
  1300  		}
  1301  		return err
  1302  	}
  1303  	o.headers = h
  1304  	err = o.decodeMetaData(&info)
  1305  	if err != nil {
  1306  		return err
  1307  	}
  1308  	return nil
  1309  }
  1310  
  1311  // ModTime returns the modification time of the object
  1312  //
  1313  // It attempts to read the objects mtime and if that isn't present the
  1314  // LastModified returned in the http headers
  1315  func (o *Object) ModTime(ctx context.Context) time.Time {
  1316  	if o.fs.ci.UseServerModTime {
  1317  		return o.lastModified
  1318  	}
  1319  	err := o.readMetaData(ctx)
  1320  	if err != nil {
  1321  		fs.Debugf(o, "Failed to read metadata: %s", err)
  1322  		return o.lastModified
  1323  	}
  1324  	modTime, err := o.headers.ObjectMetadata().GetModTime()
  1325  	if err != nil {
  1326  		// fs.Logf(o, "Failed to read mtime from object: %v", err)
  1327  		return o.lastModified
  1328  	}
  1329  	return modTime
  1330  }
  1331  
  1332  // SetModTime sets the modification time of the local fs object
  1333  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1334  	err := o.readMetaData(ctx)
  1335  	if err != nil {
  1336  		return err
  1337  	}
  1338  	meta := o.headers.ObjectMetadata()
  1339  	meta.SetModTime(modTime)
  1340  	newHeaders := meta.ObjectHeaders()
  1341  	for k, v := range newHeaders {
  1342  		o.headers[k] = v
  1343  	}
  1344  	// Include any other metadata from request
  1345  	for k, v := range o.headers {
  1346  		if strings.HasPrefix(k, "X-Object-") {
  1347  			newHeaders[k] = v
  1348  		}
  1349  	}
  1350  	container, containerPath := o.split()
  1351  	return o.fs.pacer.Call(func() (bool, error) {
  1352  		err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
  1353  		return shouldRetry(ctx, err)
  1354  	})
  1355  }
  1356  
  1357  // Storable returns if this object is storable
  1358  //
  1359  // It compares the Content-Type to directoryMarkerContentType - that
  1360  // makes it a directory marker which is not storable.
  1361  func (o *Object) Storable() bool {
  1362  	return o.contentType != directoryMarkerContentType
  1363  }
  1364  
  1365  // Open an object for read
  1366  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1367  	fs.FixRangeOption(options, o.size)
  1368  	headers := fs.OpenOptionHeaders(options)
  1369  	_, isRanging := headers["Range"]
  1370  	container, containerPath := o.split()
  1371  	err = o.fs.pacer.Call(func() (bool, error) {
  1372  		var rxHeaders swift.Headers
  1373  		in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
  1374  		return shouldRetryHeaders(ctx, rxHeaders, err)
  1375  	})
  1376  	return
  1377  }
  1378  
  1379  // min returns the smallest of x, y
  1380  func min(x, y int64) int64 {
  1381  	if x < y {
  1382  		return x
  1383  	}
  1384  	return y
  1385  }
  1386  
  1387  // Get the segments for a large object
  1388  //
  1389  // It returns the names of the segments and the container that they live in
  1390  func (o *Object) getSegmentsLargeObject(ctx context.Context) (container string, segments []string, err error) {
  1391  	container, objectName := o.split()
  1392  	container, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName)
  1393  	if err != nil {
  1394  		return container, segments, fmt.Errorf("failed to get list segments of object: %w", err)
  1395  	}
  1396  	segments = make([]string, len(segmentObjects))
  1397  	for i := range segmentObjects {
  1398  		segments[i] = segmentObjects[i].Name
  1399  	}
  1400  	return container, segments, nil
  1401  }
  1402  
  1403  // Remove the segments for a large object
  1404  func (o *Object) removeSegmentsLargeObject(ctx context.Context, container string, segments []string) error {
  1405  	if len(segments) == 0 {
  1406  		return nil
  1407  	}
  1408  	_, err := o.fs.c.BulkDelete(ctx, container, segments)
  1409  	if err != nil {
  1410  		return fmt.Errorf("failed to delete bulk segments: %w", err)
  1411  	}
  1412  	return nil
  1413  }
  1414  
  1415  // urlEncode encodes a string so that it is a valid URL
  1416  //
  1417  // We don't use any of Go's standard methods as we need `/` not
  1418  // encoded but we need '&' encoded.
  1419  func urlEncode(str string) string {
  1420  	var buf bytes.Buffer
  1421  	for i := 0; i < len(str); i++ {
  1422  		c := str[i]
  1423  		if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
  1424  			_ = buf.WriteByte(c)
  1425  		} else {
  1426  			_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
  1427  		}
  1428  	}
  1429  	return buf.String()
  1430  }
  1431  
  1432  // updateChunks updates the existing object using chunks to a separate
  1433  // container.
  1434  func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (err error) {
  1435  	container, containerPath := o.split()
  1436  	su, err := o.fs.newSegmentedUpload(ctx, container, containerPath)
  1437  	if err != nil {
  1438  		return err
  1439  	}
  1440  	// Upload the chunks
  1441  	left := size
  1442  	i := 0
  1443  	in := bufio.NewReader(in0)
  1444  	defer atexit.OnError(&err, su.onFail)()
  1445  	for {
  1446  		// can we read at least one byte?
  1447  		if _, err = in.Peek(1); err != nil {
  1448  			if left > 0 {
  1449  				return err // read less than expected
  1450  			}
  1451  			fs.Debugf(o, "Uploading segments into %q seems done (%v)", su.container, err)
  1452  			break
  1453  		}
  1454  		n := int64(o.fs.opt.ChunkSize)
  1455  		if size != -1 {
  1456  			n = min(left, n)
  1457  			headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
  1458  			left -= n
  1459  		}
  1460  		segmentReader := io.LimitReader(in, n)
  1461  		segmentPath := su.segmentPath(i)
  1462  		fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, su.container)
  1463  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1464  			var rxHeaders swift.Headers
  1465  			rxHeaders, err = o.fs.c.ObjectPut(ctx, su.container, segmentPath, segmentReader, true, "", "", headers)
  1466  			return shouldRetryHeaders(ctx, rxHeaders, err)
  1467  		})
  1468  		if err != nil {
  1469  			return err
  1470  		}
  1471  		su.uploaded(segmentPath)
  1472  		i++
  1473  	}
  1474  	return su.uploadManifest(ctx, contentType, headers)
  1475  }
  1476  
  1477  // Update the object with the contents of the io.Reader, modTime and size
  1478  //
  1479  // The new object may have been created if an error is returned
  1480  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1481  	container, containerPath := o.split()
  1482  	if container == "" {
  1483  		return fserrors.FatalError(errors.New("can't upload files to the root"))
  1484  	}
  1485  	err := o.fs.makeContainer(ctx, container)
  1486  	if err != nil {
  1487  		return err
  1488  	}
  1489  	size := src.Size()
  1490  	modTime := src.ModTime(ctx)
  1491  
  1492  	// Note whether this is a dynamic large object before starting
  1493  	isLargeObject, err := o.isLargeObject(ctx)
  1494  	if err != nil {
  1495  		return err
  1496  	}
  1497  
  1498  	// Capture segments before upload
  1499  	var segmentsContainer string
  1500  	var segments []string
  1501  	if isLargeObject {
  1502  		segmentsContainer, segments, _ = o.getSegmentsLargeObject(ctx)
  1503  	}
  1504  
  1505  	// Set the mtime
  1506  	m := swift.Metadata{}
  1507  	m.SetModTime(modTime)
  1508  	contentType := fs.MimeType(ctx, src)
  1509  	headers := m.ObjectHeaders()
  1510  	fs.OpenOptionAddHeaders(options, headers)
  1511  
  1512  	if (size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk)) && !o.fs.opt.NoLargeObjects {
  1513  		err = o.updateChunks(ctx, in, headers, size, contentType)
  1514  		if err != nil {
  1515  			return err
  1516  		}
  1517  		o.headers = nil // wipe old metadata
  1518  	} else {
  1519  		var inCount *readers.CountingReader
  1520  		if size >= 0 {
  1521  			headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
  1522  		} else {
  1523  			// otherwise count the size for later
  1524  			inCount = readers.NewCountingReader(in)
  1525  			in = inCount
  1526  		}
  1527  		var rxHeaders swift.Headers
  1528  		err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1529  			rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
  1530  			return shouldRetryHeaders(ctx, rxHeaders, err)
  1531  		})
  1532  		if err != nil {
  1533  			return err
  1534  		}
  1535  		// set Metadata since ObjectPut checked the hash and length so we know the
  1536  		// object has been safely uploaded
  1537  		o.lastModified = modTime
  1538  		o.size = size
  1539  		o.md5 = rxHeaders["Etag"]
  1540  		o.contentType = contentType
  1541  		o.headers = headers
  1542  		if inCount != nil {
  1543  			// update the size if streaming from the reader
  1544  			o.size = int64(inCount.BytesRead())
  1545  		}
  1546  	}
  1547  	// If file was a large object and the container is not enable versioning then remove old/all segments
  1548  	if isLargeObject && len(segmentsContainer) > 0 {
  1549  		isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
  1550  		if !isInContainerVersioning {
  1551  			err := o.removeSegmentsLargeObject(ctx, segmentsContainer, segments)
  1552  			if err != nil {
  1553  				fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
  1554  			}
  1555  		}
  1556  	}
  1557  
  1558  	// Read the metadata from the newly created object if necessary
  1559  	return o.readMetaData(ctx)
  1560  }
  1561  
  1562  // Remove an object
  1563  func (o *Object) Remove(ctx context.Context) (err error) {
  1564  	container, containerPath := o.split()
  1565  
  1566  	//check object is large object
  1567  	isLargeObject, err := o.isLargeObject(ctx)
  1568  	if err != nil {
  1569  		return err
  1570  	}
  1571  	//check container has enabled version to reserve segment when delete
  1572  	isInContainerVersioning := false
  1573  	if isLargeObject {
  1574  		isInContainerVersioning, err = o.isInContainerVersioning(ctx, container)
  1575  		if err != nil {
  1576  			return err
  1577  		}
  1578  	}
  1579  	// Capture segments object if this object is large object
  1580  	var segmentsContainer string
  1581  	var segments []string
  1582  	if isLargeObject {
  1583  		segmentsContainer, segments, err = o.getSegmentsLargeObject(ctx)
  1584  		if err != nil {
  1585  			return err
  1586  		}
  1587  	}
  1588  	// Remove file/manifest first
  1589  	err = o.fs.pacer.Call(func() (bool, error) {
  1590  		err = o.fs.c.ObjectDelete(ctx, container, containerPath)
  1591  		if err == swift.ObjectNotFound {
  1592  			fs.Errorf(o, "Dangling object - ignoring: %v", err)
  1593  			err = nil
  1594  		}
  1595  		return shouldRetry(ctx, err)
  1596  	})
  1597  	if err != nil {
  1598  		return err
  1599  	}
  1600  
  1601  	if !isLargeObject || isInContainerVersioning {
  1602  		return nil
  1603  	}
  1604  
  1605  	if isLargeObject {
  1606  		return o.removeSegmentsLargeObject(ctx, segmentsContainer, segments)
  1607  	}
  1608  	return nil
  1609  }
  1610  
  1611  // MimeType of an Object if known, "" otherwise
  1612  func (o *Object) MimeType(ctx context.Context) string {
  1613  	return o.contentType
  1614  }
  1615  
  1616  // Check the interfaces are satisfied
  1617  var (
  1618  	_ fs.Fs          = &Fs{}
  1619  	_ fs.Purger      = &Fs{}
  1620  	_ fs.PutStreamer = &Fs{}
  1621  	_ fs.Copier      = &Fs{}
  1622  	_ fs.ListRer     = &Fs{}
  1623  	_ fs.Object      = &Object{}
  1624  	_ fs.MimeTyper   = &Object{}
  1625  )