github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/b2/b2.go (about)

     1  // Package b2 provides an interface to the Backblaze B2 object storage system.
     2  package b2
     3  
     4  // FIXME should we remove sha1 checks from here as rclone now supports
     5  // checking SHA1s?
     6  
     7  import (
     8  	"bufio"
     9  	"bytes"
    10  	"context"
    11  	"crypto/sha1"
    12  	"encoding/json"
    13  	"errors"
    14  	"fmt"
    15  	gohash "hash"
    16  	"io"
    17  	"net/http"
    18  	"path"
    19  	"strconv"
    20  	"strings"
    21  	"sync"
    22  	"time"
    23  
    24  	"github.com/rclone/rclone/backend/b2/api"
    25  	"github.com/rclone/rclone/fs"
    26  	"github.com/rclone/rclone/fs/accounting"
    27  	"github.com/rclone/rclone/fs/config"
    28  	"github.com/rclone/rclone/fs/config/configmap"
    29  	"github.com/rclone/rclone/fs/config/configstruct"
    30  	"github.com/rclone/rclone/fs/fserrors"
    31  	"github.com/rclone/rclone/fs/fshttp"
    32  	"github.com/rclone/rclone/fs/hash"
    33  	"github.com/rclone/rclone/fs/walk"
    34  	"github.com/rclone/rclone/lib/bucket"
    35  	"github.com/rclone/rclone/lib/encoder"
    36  	"github.com/rclone/rclone/lib/multipart"
    37  	"github.com/rclone/rclone/lib/pacer"
    38  	"github.com/rclone/rclone/lib/pool"
    39  	"github.com/rclone/rclone/lib/rest"
    40  )
    41  
    42  const (
    43  	defaultEndpoint     = "https://api.backblazeb2.com"
    44  	headerPrefix        = "x-bz-info-" // lower case as that is what the server returns
    45  	timeKey             = "src_last_modified_millis"
    46  	timeHeader          = headerPrefix + timeKey
    47  	sha1Key             = "large_file_sha1"
    48  	sha1Header          = "X-Bz-Content-Sha1"
    49  	testModeHeader      = "X-Bz-Test-Mode"
    50  	idHeader            = "X-Bz-File-Id"
    51  	nameHeader          = "X-Bz-File-Name"
    52  	timestampHeader     = "X-Bz-Upload-Timestamp"
    53  	retryAfterHeader    = "Retry-After"
    54  	minSleep            = 10 * time.Millisecond
    55  	maxSleep            = 5 * time.Minute
    56  	decayConstant       = 1 // bigger for slower decay, exponential
    57  	maxParts            = 10000
    58  	maxVersions         = 100 // maximum number of versions we search in --b2-versions mode
    59  	minChunkSize        = 5 * fs.Mebi
    60  	defaultChunkSize    = 96 * fs.Mebi
    61  	defaultUploadCutoff = 200 * fs.Mebi
    62  	largeFileCopyCutoff = 4 * fs.Gibi // 5E9 is the max
    63  	defaultMaxAge       = 24 * time.Hour
    64  )
    65  
    66  // Globals
    67  var (
    68  	errNotWithVersions  = errors.New("can't modify or delete files in --b2-versions mode")
    69  	errNotWithVersionAt = errors.New("can't modify or delete files in --b2-version-at mode")
    70  )
    71  
    72  // Register with Fs
    73  func init() {
    74  	fs.Register(&fs.RegInfo{
    75  		Name:        "b2",
    76  		Description: "Backblaze B2",
    77  		NewFs:       NewFs,
    78  		CommandHelp: commandHelp,
    79  		Options: []fs.Option{{
    80  			Name:      "account",
    81  			Help:      "Account ID or Application Key ID.",
    82  			Required:  true,
    83  			Sensitive: true,
    84  		}, {
    85  			Name:      "key",
    86  			Help:      "Application Key.",
    87  			Required:  true,
    88  			Sensitive: true,
    89  		}, {
    90  			Name:     "endpoint",
    91  			Help:     "Endpoint for the service.\n\nLeave blank normally.",
    92  			Advanced: true,
    93  		}, {
    94  			Name: "test_mode",
    95  			Help: `A flag string for X-Bz-Test-Mode header for debugging.
    96  
    97  This is for debugging purposes only. Setting it to one of the strings
    98  below will cause b2 to return specific errors:
    99  
   100    * "fail_some_uploads"
   101    * "expire_some_account_authorization_tokens"
   102    * "force_cap_exceeded"
   103  
   104  These will be set in the "X-Bz-Test-Mode" header which is documented
   105  in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
   106  			Default:  "",
   107  			Hide:     fs.OptionHideConfigurator,
   108  			Advanced: true,
   109  		}, {
   110  			Name:     "versions",
   111  			Help:     "Include old versions in directory listings.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
   112  			Default:  false,
   113  			Advanced: true,
   114  		}, {
   115  			Name:     "version_at",
   116  			Help:     "Show file versions as they were at the specified time.\n\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
   117  			Default:  fs.Time{},
   118  			Advanced: true,
   119  		}, {
   120  			Name:    "hard_delete",
   121  			Help:    "Permanently delete files on remote removal, otherwise hide files.",
   122  			Default: false,
   123  		}, {
   124  			Name: "upload_cutoff",
   125  			Help: `Cutoff for switching to chunked upload.
   126  
   127  Files above this size will be uploaded in chunks of "--b2-chunk-size".
   128  
   129  This value should be set no larger than 4.657 GiB (== 5 GB).`,
   130  			Default:  defaultUploadCutoff,
   131  			Advanced: true,
   132  		}, {
   133  			Name: "copy_cutoff",
   134  			Help: `Cutoff for switching to multipart copy.
   135  
   136  Any files larger than this that need to be server-side copied will be
   137  copied in chunks of this size.
   138  
   139  The minimum is 0 and the maximum is 4.6 GiB.`,
   140  			Default:  largeFileCopyCutoff,
   141  			Advanced: true,
   142  		}, {
   143  			Name: "chunk_size",
   144  			Help: `Upload chunk size.
   145  
   146  When uploading large files, chunk the file into this size.
   147  
   148  Must fit in memory. These chunks are buffered in memory and there
   149  might a maximum of "--transfers" chunks in progress at once.
   150  
   151  5,000,000 Bytes is the minimum size.`,
   152  			Default:  defaultChunkSize,
   153  			Advanced: true,
   154  		}, {
   155  			Name: "upload_concurrency",
   156  			Help: `Concurrency for multipart uploads.
   157  
   158  This is the number of chunks of the same file that are uploaded
   159  concurrently.
   160  
   161  Note that chunks are stored in memory and there may be up to
   162  "--transfers" * "--b2-upload-concurrency" chunks stored at once
   163  in memory.`,
   164  			Default:  4,
   165  			Advanced: true,
   166  		}, {
   167  			Name: "disable_checksum",
   168  			Help: `Disable checksums for large (> upload cutoff) files.
   169  
   170  Normally rclone will calculate the SHA1 checksum of the input before
   171  uploading it so it can add it to metadata on the object. This is great
   172  for data integrity checking but can cause long delays for large files
   173  to start uploading.`,
   174  			Default:  false,
   175  			Advanced: true,
   176  		}, {
   177  			Name: "download_url",
   178  			Help: `Custom endpoint for downloads.
   179  
   180  This is usually set to a Cloudflare CDN URL as Backblaze offers
   181  free egress for data downloaded through the Cloudflare network.
   182  Rclone works with private buckets by sending an "Authorization" header.
   183  If the custom endpoint rewrites the requests for authentication,
   184  e.g., in Cloudflare Workers, this header needs to be handled properly.
   185  Leave blank if you want to use the endpoint provided by Backblaze.
   186  
   187  The URL provided here SHOULD have the protocol and SHOULD NOT have
   188  a trailing slash or specify the /file/bucket subpath as rclone will
   189  request files with "{download_url}/file/{bucket_name}/{path}".
   190  
   191  Example:
   192  > https://mysubdomain.mydomain.tld
   193  (No trailing "/", "file" or "bucket")`,
   194  			Advanced: true,
   195  		}, {
   196  			Name: "download_auth_duration",
   197  			Help: `Time before the public link authorization token will expire in s or suffix ms|s|m|h|d.
   198  
   199  This is used in combination with "rclone link" for making files
   200  accessible to the public and sets the duration before the download
   201  authorization token will expire.
   202  
   203  The minimum value is 1 second. The maximum value is one week.`,
   204  			Default:  fs.Duration(7 * 24 * time.Hour),
   205  			Advanced: true,
   206  		}, {
   207  			Name:     "memory_pool_flush_time",
   208  			Default:  fs.Duration(time.Minute),
   209  			Advanced: true,
   210  			Hide:     fs.OptionHideBoth,
   211  			Help:     `How often internal memory buffer pools will be flushed. (no longer used)`,
   212  		}, {
   213  			Name:     "memory_pool_use_mmap",
   214  			Default:  false,
   215  			Advanced: true,
   216  			Hide:     fs.OptionHideBoth,
   217  			Help:     `Whether to use mmap buffers in internal memory pool. (no longer used)`,
   218  		}, {
   219  			Name: "lifecycle",
   220  			Help: `Set the number of days deleted files should be kept when creating a bucket.
   221  
   222  On bucket creation, this parameter is used to create a lifecycle rule
   223  for the entire bucket.
   224  
   225  If lifecycle is 0 (the default) it does not create a lifecycle rule so
   226  the default B2 behaviour applies. This is to create versions of files
   227  on delete and overwrite and to keep them indefinitely.
   228  
   229  If lifecycle is >0 then it creates a single rule setting the number of
   230  days before a file that is deleted or overwritten is deleted
   231  permanently. This is known as daysFromHidingToDeleting in the b2 docs.
   232  
   233  The minimum value for this parameter is 1 day.
   234  
   235  You can also enable hard_delete in the config also which will mean
   236  deletions won't cause versions but overwrites will still cause
   237  versions to be made.
   238  
   239  See: [rclone backend lifecycle](#lifecycle) for setting lifecycles after bucket creation.
   240  `,
   241  			Default:  0,
   242  			Advanced: true,
   243  		}, {
   244  			Name:     config.ConfigEncoding,
   245  			Help:     config.ConfigEncodingHelp,
   246  			Advanced: true,
   247  			// See: https://www.backblaze.com/b2/docs/files.html
   248  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   249  			// FIXME: allow /, but not leading, trailing or double
   250  			Default: (encoder.Display |
   251  				encoder.EncodeBackSlash |
   252  				encoder.EncodeInvalidUtf8),
   253  		}},
   254  	})
   255  }
   256  
   257  // Options defines the configuration for this backend
   258  type Options struct {
   259  	Account                       string               `config:"account"`
   260  	Key                           string               `config:"key"`
   261  	Endpoint                      string               `config:"endpoint"`
   262  	TestMode                      string               `config:"test_mode"`
   263  	Versions                      bool                 `config:"versions"`
   264  	VersionAt                     fs.Time              `config:"version_at"`
   265  	HardDelete                    bool                 `config:"hard_delete"`
   266  	UploadCutoff                  fs.SizeSuffix        `config:"upload_cutoff"`
   267  	CopyCutoff                    fs.SizeSuffix        `config:"copy_cutoff"`
   268  	ChunkSize                     fs.SizeSuffix        `config:"chunk_size"`
   269  	UploadConcurrency             int                  `config:"upload_concurrency"`
   270  	DisableCheckSum               bool                 `config:"disable_checksum"`
   271  	DownloadURL                   string               `config:"download_url"`
   272  	DownloadAuthorizationDuration fs.Duration          `config:"download_auth_duration"`
   273  	Lifecycle                     int                  `config:"lifecycle"`
   274  	Enc                           encoder.MultiEncoder `config:"encoding"`
   275  }
   276  
   277  // Fs represents a remote b2 server
   278  type Fs struct {
   279  	name            string                                 // name of this remote
   280  	root            string                                 // the path we are working on if any
   281  	opt             Options                                // parsed config options
   282  	ci              *fs.ConfigInfo                         // global config
   283  	features        *fs.Features                           // optional features
   284  	srv             *rest.Client                           // the connection to the b2 server
   285  	rootBucket      string                                 // bucket part of root (if any)
   286  	rootDirectory   string                                 // directory part of root (if any)
   287  	cache           *bucket.Cache                          // cache for bucket creation status
   288  	bucketIDMutex   sync.Mutex                             // mutex to protect _bucketID
   289  	_bucketID       map[string]string                      // the ID of the bucket we are working on
   290  	bucketTypeMutex sync.Mutex                             // mutex to protect _bucketType
   291  	_bucketType     map[string]string                      // the Type of the bucket we are working on
   292  	info            api.AuthorizeAccountResponse           // result of authorize call
   293  	uploadMu        sync.Mutex                             // lock for upload variable
   294  	uploads         map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
   295  	authMu          sync.Mutex                             // lock for authorizing the account
   296  	pacer           *fs.Pacer                              // To pace and retry the API calls
   297  	uploadToken     *pacer.TokenDispenser                  // control concurrency
   298  }
   299  
   300  // Object describes a b2 object
   301  type Object struct {
   302  	fs       *Fs       // what this object is part of
   303  	remote   string    // The remote path
   304  	id       string    // b2 id of the file
   305  	modTime  time.Time // The modified time of the object if known
   306  	sha1     string    // SHA-1 hash if known
   307  	size     int64     // Size of the object
   308  	mimeType string    // Content-Type of the object
   309  }
   310  
   311  // ------------------------------------------------------------
   312  
   313  // Name of the remote (as passed into NewFs)
   314  func (f *Fs) Name() string {
   315  	return f.name
   316  }
   317  
   318  // Root of the remote (as passed into NewFs)
   319  func (f *Fs) Root() string {
   320  	return f.root
   321  }
   322  
   323  // String converts this Fs to a string
   324  func (f *Fs) String() string {
   325  	if f.rootBucket == "" {
   326  		return "B2 root"
   327  	}
   328  	if f.rootDirectory == "" {
   329  		return fmt.Sprintf("B2 bucket %s", f.rootBucket)
   330  	}
   331  	return fmt.Sprintf("B2 bucket %s path %s", f.rootBucket, f.rootDirectory)
   332  }
   333  
   334  // Features returns the optional features of this Fs
   335  func (f *Fs) Features() *fs.Features {
   336  	return f.features
   337  }
   338  
   339  // parsePath parses a remote 'url'
   340  func parsePath(path string) (root string) {
   341  	root = strings.Trim(path, "/")
   342  	return
   343  }
   344  
   345  // split returns bucket and bucketPath from the rootRelativePath
   346  // relative to f.root
   347  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
   348  	return bucket.Split(path.Join(f.root, rootRelativePath))
   349  }
   350  
   351  // split returns bucket and bucketPath from the object
   352  func (o *Object) split() (bucket, bucketPath string) {
   353  	return o.fs.split(o.remote)
   354  }
   355  
   356  // retryErrorCodes is a slice of error codes that we will retry
   357  var retryErrorCodes = []int{
   358  	401, // Unauthorized (e.g. "Token has expired")
   359  	408, // Request Timeout
   360  	429, // Rate exceeded.
   361  	500, // Get occasional 500 Internal Server Error
   362  	503, // Service Unavailable
   363  	504, // Gateway Time-out
   364  }
   365  
   366  // shouldRetryNoReauth returns a boolean as to whether this resp and err
   367  // deserve to be retried.  It returns the err as a convenience
   368  func (f *Fs) shouldRetryNoReauth(ctx context.Context, resp *http.Response, err error) (bool, error) {
   369  	if fserrors.ContextError(ctx, &err) {
   370  		return false, err
   371  	}
   372  	// For 429 or 503 errors look at the Retry-After: header and
   373  	// set the retry appropriately, starting with a minimum of 1
   374  	// second if it isn't set.
   375  	if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
   376  		var retryAfter = 1
   377  		retryAfterString := resp.Header.Get(retryAfterHeader)
   378  		if retryAfterString != "" {
   379  			var err error
   380  			retryAfter, err = strconv.Atoi(retryAfterString)
   381  			if err != nil {
   382  				fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
   383  			}
   384  		}
   385  		return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
   386  	}
   387  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   388  }
   389  
   390  // shouldRetry returns a boolean as to whether this resp and err
   391  // deserve to be retried.  It returns the err as a convenience
   392  func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
   393  	if resp != nil && resp.StatusCode == 401 {
   394  		fs.Debugf(f, "Unauthorized: %v", err)
   395  		// Reauth
   396  		authErr := f.authorizeAccount(ctx)
   397  		if authErr != nil {
   398  			err = authErr
   399  		}
   400  		return true, err
   401  	}
   402  	return f.shouldRetryNoReauth(ctx, resp, err)
   403  }
   404  
   405  // errorHandler parses a non 2xx error response into an error
   406  func errorHandler(resp *http.Response) error {
   407  	body, err := rest.ReadBody(resp)
   408  	if err != nil {
   409  		fs.Errorf(nil, "Couldn't read error out of body: %v", err)
   410  		body = nil
   411  	}
   412  	// Decode error response if there was one - they can be blank
   413  	errResponse := new(api.Error)
   414  	if len(body) > 0 {
   415  		err = json.Unmarshal(body, errResponse)
   416  		if err != nil {
   417  			fs.Errorf(nil, "Couldn't decode error response: %v", err)
   418  		}
   419  	}
   420  	if errResponse.Code == "" {
   421  		errResponse.Code = "unknown"
   422  	}
   423  	if errResponse.Status == 0 {
   424  		errResponse.Status = resp.StatusCode
   425  	}
   426  	if errResponse.Message == "" {
   427  		errResponse.Message = "Unknown " + resp.Status
   428  	}
   429  	return errResponse
   430  }
   431  
   432  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   433  	if cs < minChunkSize {
   434  		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
   435  	}
   436  	return nil
   437  }
   438  
   439  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   440  	err = checkUploadChunkSize(cs)
   441  	if err == nil {
   442  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   443  	}
   444  	return
   445  }
   446  
   447  func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
   448  	if cs < opt.ChunkSize {
   449  		return fmt.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
   450  	}
   451  	return nil
   452  }
   453  
   454  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   455  	err = checkUploadCutoff(&f.opt, cs)
   456  	if err == nil {
   457  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   458  	}
   459  	return
   460  }
   461  
   462  func (f *Fs) setCopyCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   463  	err = checkUploadChunkSize(cs)
   464  	if err == nil {
   465  		old, f.opt.CopyCutoff = f.opt.CopyCutoff, cs
   466  	}
   467  	return
   468  }
   469  
   470  // setRoot changes the root of the Fs
   471  func (f *Fs) setRoot(root string) {
   472  	f.root = parsePath(root)
   473  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
   474  }
   475  
   476  // NewFs constructs an Fs from the path, bucket:path
   477  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
   478  	// Parse config into Options struct
   479  	opt := new(Options)
   480  	err := configstruct.Set(m, opt)
   481  	if err != nil {
   482  		return nil, err
   483  	}
   484  	if opt.UploadCutoff < opt.ChunkSize {
   485  		opt.UploadCutoff = opt.ChunkSize
   486  		fs.Infof(nil, "b2: raising upload cutoff to chunk size: %v", opt.UploadCutoff)
   487  	}
   488  	err = checkUploadCutoff(opt, opt.UploadCutoff)
   489  	if err != nil {
   490  		return nil, fmt.Errorf("b2: upload cutoff: %w", err)
   491  	}
   492  	err = checkUploadChunkSize(opt.ChunkSize)
   493  	if err != nil {
   494  		return nil, fmt.Errorf("b2: chunk size: %w", err)
   495  	}
   496  	if opt.Account == "" {
   497  		return nil, errors.New("account not found")
   498  	}
   499  	if opt.Key == "" {
   500  		return nil, errors.New("key not found")
   501  	}
   502  	if opt.Endpoint == "" {
   503  		opt.Endpoint = defaultEndpoint
   504  	}
   505  	ci := fs.GetConfig(ctx)
   506  	f := &Fs{
   507  		name:        name,
   508  		opt:         *opt,
   509  		ci:          ci,
   510  		srv:         rest.NewClient(fshttp.NewClient(ctx)).SetErrorHandler(errorHandler),
   511  		cache:       bucket.NewCache(),
   512  		_bucketID:   make(map[string]string, 1),
   513  		_bucketType: make(map[string]string, 1),
   514  		uploads:     make(map[string][]*api.GetUploadURLResponse),
   515  		pacer:       fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   516  		uploadToken: pacer.NewTokenDispenser(ci.Transfers),
   517  	}
   518  	f.setRoot(root)
   519  	f.features = (&fs.Features{
   520  		ReadMimeType:          true,
   521  		WriteMimeType:         true,
   522  		BucketBased:           true,
   523  		BucketBasedRootOK:     true,
   524  		ChunkWriterDoesntSeek: true,
   525  	}).Fill(ctx, f)
   526  	// Set the test flag if required
   527  	if opt.TestMode != "" {
   528  		testMode := strings.TrimSpace(opt.TestMode)
   529  		f.srv.SetHeader(testModeHeader, testMode)
   530  		fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
   531  	}
   532  	err = f.authorizeAccount(ctx)
   533  	if err != nil {
   534  		return nil, fmt.Errorf("failed to authorize account: %w", err)
   535  	}
   536  	// If this is a key limited to a single bucket, it must exist already
   537  	if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
   538  		allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
   539  		if allowedBucket == "" {
   540  			return nil, errors.New("bucket that application key is restricted to no longer exists")
   541  		}
   542  		if allowedBucket != f.rootBucket {
   543  			return nil, fmt.Errorf("you must use bucket %q with this application key", allowedBucket)
   544  		}
   545  		f.cache.MarkOK(f.rootBucket)
   546  		f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
   547  	}
   548  	if f.rootBucket != "" && f.rootDirectory != "" {
   549  		// Check to see if the (bucket,directory) is actually an existing file
   550  		oldRoot := f.root
   551  		newRoot, leaf := path.Split(oldRoot)
   552  		f.setRoot(newRoot)
   553  		_, err := f.NewObject(ctx, leaf)
   554  		if err != nil {
   555  			// File doesn't exist so return old f
   556  			f.setRoot(oldRoot)
   557  			return f, nil
   558  		}
   559  		// return an error with an fs which points to the parent
   560  		return f, fs.ErrorIsFile
   561  	}
   562  	return f, nil
   563  }
   564  
   565  // authorizeAccount gets the API endpoint and auth token.  Can be used
   566  // for reauthentication too.
   567  func (f *Fs) authorizeAccount(ctx context.Context) error {
   568  	f.authMu.Lock()
   569  	defer f.authMu.Unlock()
   570  	opts := rest.Opts{
   571  		Method:       "GET",
   572  		Path:         "/b2api/v1/b2_authorize_account",
   573  		RootURL:      f.opt.Endpoint,
   574  		UserName:     f.opt.Account,
   575  		Password:     f.opt.Key,
   576  		ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
   577  	}
   578  	err := f.pacer.Call(func() (bool, error) {
   579  		resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
   580  		return f.shouldRetryNoReauth(ctx, resp, err)
   581  	})
   582  	if err != nil {
   583  		return fmt.Errorf("failed to authenticate: %w", err)
   584  	}
   585  	f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
   586  	return nil
   587  }
   588  
   589  // hasPermission returns if the current AuthorizationToken has the selected permission
   590  func (f *Fs) hasPermission(permission string) bool {
   591  	for _, capability := range f.info.Allowed.Capabilities {
   592  		if capability == permission {
   593  			return true
   594  		}
   595  	}
   596  	return false
   597  }
   598  
   599  // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
   600  //
   601  // This should be returned with returnUploadURL when finished
   602  func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUploadURLResponse, err error) {
   603  	f.uploadMu.Lock()
   604  	defer f.uploadMu.Unlock()
   605  	bucketID, err := f.getBucketID(ctx, bucket)
   606  	if err != nil {
   607  		return nil, err
   608  	}
   609  	// look for a stored upload URL for the correct bucketID
   610  	uploads := f.uploads[bucketID]
   611  	if len(uploads) > 0 {
   612  		upload, uploads = uploads[0], uploads[1:]
   613  		f.uploads[bucketID] = uploads
   614  		return upload, nil
   615  	}
   616  	// get a new upload URL since not found
   617  	opts := rest.Opts{
   618  		Method: "POST",
   619  		Path:   "/b2_get_upload_url",
   620  	}
   621  	var request = api.GetUploadURLRequest{
   622  		BucketID: bucketID,
   623  	}
   624  	err = f.pacer.Call(func() (bool, error) {
   625  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &upload)
   626  		return f.shouldRetry(ctx, resp, err)
   627  	})
   628  	if err != nil {
   629  		return nil, fmt.Errorf("failed to get upload URL: %w", err)
   630  	}
   631  	return upload, nil
   632  }
   633  
   634  // returnUploadURL returns the UploadURL to the cache
   635  func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) {
   636  	if upload == nil {
   637  		return
   638  	}
   639  	f.uploadMu.Lock()
   640  	f.uploads[upload.BucketID] = append(f.uploads[upload.BucketID], upload)
   641  	f.uploadMu.Unlock()
   642  }
   643  
   644  // clearUploadURL clears the current UploadURL and the AuthorizationToken
   645  func (f *Fs) clearUploadURL(bucketID string) {
   646  	f.uploadMu.Lock()
   647  	delete(f.uploads, bucketID)
   648  	f.uploadMu.Unlock()
   649  }
   650  
   651  // getRW gets a RW buffer and an upload token
   652  //
   653  // If noBuf is set then it just gets an upload token
   654  func (f *Fs) getRW(noBuf bool) (rw *pool.RW) {
   655  	f.uploadToken.Get()
   656  	if !noBuf {
   657  		rw = multipart.NewRW()
   658  	}
   659  	return rw
   660  }
   661  
   662  // putRW returns a RW buffer to the memory pool and returns an upload
   663  // token
   664  //
   665  // If buf is nil then it just returns the upload token
   666  func (f *Fs) putRW(rw *pool.RW) {
   667  	if rw != nil {
   668  		_ = rw.Close()
   669  	}
   670  	f.uploadToken.Put()
   671  }
   672  
   673  // Return an Object from a path
   674  //
   675  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   676  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
   677  	o := &Object{
   678  		fs:     f,
   679  		remote: remote,
   680  	}
   681  	if info != nil {
   682  		err := o.decodeMetaData(info)
   683  		if err != nil {
   684  			return nil, err
   685  		}
   686  	} else {
   687  		err := o.readMetaData(ctx) // reads info and headers, returning an error
   688  		if err != nil {
   689  			return nil, err
   690  		}
   691  	}
   692  	return o, nil
   693  }
   694  
   695  // NewObject finds the Object at remote.  If it can't be found
   696  // it returns the error fs.ErrorObjectNotFound.
   697  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   698  	return f.newObjectWithInfo(ctx, remote, nil)
   699  }
   700  
   701  // listFn is called from list to handle an object
   702  type listFn func(remote string, object *api.File, isDirectory bool) error
   703  
   704  // errEndList is a sentinel used to end the list iteration now.
   705  // listFn should return it to end the iteration with no errors.
   706  var errEndList = errors.New("end list")
   707  
   708  // list lists the objects into the function supplied from
   709  // the bucket and root supplied
   710  //
   711  // (bucket, directory) is the starting directory
   712  //
   713  // If prefix is set then it is removed from all file names.
   714  //
   715  // If addBucket is set then it adds the bucket to the start of the
   716  // remotes generated.
   717  //
   718  // If recurse is set the function will recursively list.
   719  //
   720  // If limit is > 0 then it limits to that many files (must be less
   721  // than 1000).
   722  //
   723  // If hidden is set then it will list the hidden (deleted) files too.
   724  //
   725  // if findFile is set it will look for files called (bucket, directory)
   726  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, hidden bool, findFile bool, fn listFn) error {
   727  	if !findFile {
   728  		if prefix != "" {
   729  			prefix += "/"
   730  		}
   731  		if directory != "" {
   732  			directory += "/"
   733  		}
   734  	}
   735  	delimiter := ""
   736  	if !recurse {
   737  		delimiter = "/"
   738  	}
   739  	bucketID, err := f.getBucketID(ctx, bucket)
   740  	if err != nil {
   741  		return err
   742  	}
   743  	chunkSize := 1000
   744  	if limit > 0 {
   745  		chunkSize = limit
   746  	}
   747  	var request = api.ListFileNamesRequest{
   748  		BucketID:     bucketID,
   749  		MaxFileCount: chunkSize,
   750  		Prefix:       f.opt.Enc.FromStandardPath(directory),
   751  		Delimiter:    delimiter,
   752  	}
   753  	if directory != "" {
   754  		request.StartFileName = f.opt.Enc.FromStandardPath(directory)
   755  	}
   756  	opts := rest.Opts{
   757  		Method: "POST",
   758  		Path:   "/b2_list_file_names",
   759  	}
   760  	if hidden || f.opt.VersionAt.IsSet() {
   761  		opts.Path = "/b2_list_file_versions"
   762  	}
   763  
   764  	lastFileName := ""
   765  
   766  	for {
   767  		var response api.ListFileNamesResponse
   768  		err := f.pacer.Call(func() (bool, error) {
   769  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
   770  			return f.shouldRetry(ctx, resp, err)
   771  		})
   772  		if err != nil {
   773  			return err
   774  		}
   775  		for i := range response.Files {
   776  			file := &response.Files[i]
   777  			file.Name = f.opt.Enc.ToStandardPath(file.Name)
   778  			// Finish if file name no longer has prefix
   779  			if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
   780  				return nil
   781  			}
   782  			if !strings.HasPrefix(file.Name, prefix) {
   783  				fs.Debugf(f, "Odd name received %q", file.Name)
   784  				continue
   785  			}
   786  			remote := file.Name[len(prefix):]
   787  			// Check for directory
   788  			isDirectory := remote == "" || strings.HasSuffix(remote, "/")
   789  			if isDirectory && len(remote) > 1 {
   790  				remote = remote[:len(remote)-1]
   791  			}
   792  			if addBucket {
   793  				remote = path.Join(bucket, remote)
   794  			}
   795  
   796  			if f.opt.VersionAt.IsSet() {
   797  				if time.Time(file.UploadTimestamp).After(time.Time(f.opt.VersionAt)) {
   798  					// Ignore versions that were created after the specified time
   799  					continue
   800  				}
   801  
   802  				if file.Name == lastFileName {
   803  					// Ignore versions before the already returned version
   804  					continue
   805  				}
   806  			}
   807  
   808  			// Send object
   809  			lastFileName = file.Name
   810  			err = fn(remote, file, isDirectory)
   811  			if err != nil {
   812  				if err == errEndList {
   813  					return nil
   814  				}
   815  				return err
   816  			}
   817  		}
   818  		// end if no NextFileName
   819  		if response.NextFileName == nil {
   820  			break
   821  		}
   822  		request.StartFileName = *response.NextFileName
   823  		if response.NextFileID != nil {
   824  			request.StartFileID = *response.NextFileID
   825  		}
   826  	}
   827  	return nil
   828  }
   829  
   830  // Convert a list item into a DirEntry
   831  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
   832  	if isDirectory {
   833  		d := fs.NewDir(remote, time.Time{})
   834  		return d, nil
   835  	}
   836  	if remote == *last {
   837  		remote = object.UploadTimestamp.AddVersion(remote)
   838  	} else {
   839  		*last = remote
   840  	}
   841  	// hide objects represent deleted files which we don't list
   842  	if object.Action == "hide" {
   843  		return nil, nil
   844  	}
   845  	o, err := f.newObjectWithInfo(ctx, remote, object)
   846  	if err != nil {
   847  		return nil, err
   848  	}
   849  	return o, nil
   850  }
   851  
   852  // listDir lists a single directory
   853  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
   854  	last := ""
   855  	err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
   856  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   857  		if err != nil {
   858  			return err
   859  		}
   860  		if entry != nil {
   861  			entries = append(entries, entry)
   862  		}
   863  		return nil
   864  	})
   865  	if err != nil {
   866  		return nil, err
   867  	}
   868  	// bucket must be present if listing succeeded
   869  	f.cache.MarkOK(bucket)
   870  	return entries, nil
   871  }
   872  
   873  // listBuckets returns all the buckets to out
   874  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   875  	err = f.listBucketsToFn(ctx, "", func(bucket *api.Bucket) error {
   876  		d := fs.NewDir(bucket.Name, time.Time{})
   877  		entries = append(entries, d)
   878  		return nil
   879  	})
   880  	if err != nil {
   881  		return nil, err
   882  	}
   883  	return entries, nil
   884  }
   885  
   886  // List the objects and directories in dir into entries.  The
   887  // entries can be returned in any order but should be for a
   888  // complete directory.
   889  //
   890  // dir should be "" to list the root, and should not have
   891  // trailing slashes.
   892  //
   893  // This should return ErrDirNotFound if the directory isn't
   894  // found.
   895  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   896  	bucket, directory := f.split(dir)
   897  	if bucket == "" {
   898  		if directory != "" {
   899  			return nil, fs.ErrorListBucketRequired
   900  		}
   901  		return f.listBuckets(ctx)
   902  	}
   903  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
   904  }
   905  
   906  // ListR lists the objects and directories of the Fs starting
   907  // from dir recursively into out.
   908  //
   909  // dir should be "" to start from the root, and should not
   910  // have trailing slashes.
   911  //
   912  // This should return ErrDirNotFound if the directory isn't
   913  // found.
   914  //
   915  // It should call callback for each tranche of entries read.
   916  // These need not be returned in any particular order.  If
   917  // callback returns an error then the listing will stop
   918  // immediately.
   919  //
   920  // Don't implement this unless you have a more efficient way
   921  // of listing recursively that doing a directory traversal.
   922  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   923  	bucket, directory := f.split(dir)
   924  	list := walk.NewListRHelper(callback)
   925  	listR := func(bucket, directory, prefix string, addBucket bool) error {
   926  		last := ""
   927  		return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
   928  			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   929  			if err != nil {
   930  				return err
   931  			}
   932  			return list.Add(entry)
   933  		})
   934  	}
   935  	if bucket == "" {
   936  		entries, err := f.listBuckets(ctx)
   937  		if err != nil {
   938  			return err
   939  		}
   940  		for _, entry := range entries {
   941  			err = list.Add(entry)
   942  			if err != nil {
   943  				return err
   944  			}
   945  			bucket := entry.Remote()
   946  			err = listR(bucket, "", f.rootDirectory, true)
   947  			if err != nil {
   948  				return err
   949  			}
   950  			// bucket must be present if listing succeeded
   951  			f.cache.MarkOK(bucket)
   952  		}
   953  	} else {
   954  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
   955  		if err != nil {
   956  			return err
   957  		}
   958  		// bucket must be present if listing succeeded
   959  		f.cache.MarkOK(bucket)
   960  	}
   961  	return list.Flush()
   962  }
   963  
   964  // listBucketFn is called from listBucketsToFn to handle a bucket
   965  type listBucketFn func(*api.Bucket) error
   966  
   967  // listBucketsToFn lists the buckets to the function supplied
   968  func (f *Fs) listBucketsToFn(ctx context.Context, bucketName string, fn listBucketFn) error {
   969  	var account = api.ListBucketsRequest{
   970  		AccountID: f.info.AccountID,
   971  		BucketID:  f.info.Allowed.BucketID,
   972  	}
   973  	if bucketName != "" && account.BucketID == "" {
   974  		account.BucketName = f.opt.Enc.FromStandardName(bucketName)
   975  	}
   976  
   977  	var response api.ListBucketsResponse
   978  	opts := rest.Opts{
   979  		Method: "POST",
   980  		Path:   "/b2_list_buckets",
   981  	}
   982  	err := f.pacer.Call(func() (bool, error) {
   983  		resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
   984  		return f.shouldRetry(ctx, resp, err)
   985  	})
   986  	if err != nil {
   987  		return err
   988  	}
   989  	f.bucketIDMutex.Lock()
   990  	f.bucketTypeMutex.Lock()
   991  	f._bucketID = make(map[string]string, 1)
   992  	f._bucketType = make(map[string]string, 1)
   993  	for i := range response.Buckets {
   994  		bucket := &response.Buckets[i]
   995  		bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
   996  		f.cache.MarkOK(bucket.Name)
   997  		f._bucketID[bucket.Name] = bucket.ID
   998  		f._bucketType[bucket.Name] = bucket.Type
   999  	}
  1000  	f.bucketTypeMutex.Unlock()
  1001  	f.bucketIDMutex.Unlock()
  1002  	for i := range response.Buckets {
  1003  		bucket := &response.Buckets[i]
  1004  		err = fn(bucket)
  1005  		if err != nil {
  1006  			return err
  1007  		}
  1008  	}
  1009  	return nil
  1010  }
  1011  
  1012  // getbucketType finds the bucketType for the current bucket name
  1013  // can be one of allPublic. allPrivate, or snapshot
  1014  func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType string, err error) {
  1015  	f.bucketTypeMutex.Lock()
  1016  	bucketType = f._bucketType[bucket]
  1017  	f.bucketTypeMutex.Unlock()
  1018  	if bucketType != "" {
  1019  		return bucketType, nil
  1020  	}
  1021  	err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
  1022  		// listBucketsToFn reads bucket Types
  1023  		return nil
  1024  	})
  1025  	f.bucketTypeMutex.Lock()
  1026  	bucketType = f._bucketType[bucket]
  1027  	f.bucketTypeMutex.Unlock()
  1028  	if bucketType == "" {
  1029  		err = fs.ErrorDirNotFound
  1030  	}
  1031  	return bucketType, err
  1032  }
  1033  
  1034  // setBucketType sets the Type for the current bucket name
  1035  func (f *Fs) setBucketType(bucket string, Type string) {
  1036  	f.bucketTypeMutex.Lock()
  1037  	f._bucketType[bucket] = Type
  1038  	f.bucketTypeMutex.Unlock()
  1039  }
  1040  
  1041  // clearBucketType clears the Type for the current bucket name
  1042  func (f *Fs) clearBucketType(bucket string) {
  1043  	f.bucketTypeMutex.Lock()
  1044  	delete(f._bucketType, bucket)
  1045  	f.bucketTypeMutex.Unlock()
  1046  }
  1047  
  1048  // getBucketID finds the ID for the current bucket name
  1049  func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, err error) {
  1050  	f.bucketIDMutex.Lock()
  1051  	bucketID = f._bucketID[bucket]
  1052  	f.bucketIDMutex.Unlock()
  1053  	if bucketID != "" {
  1054  		return bucketID, nil
  1055  	}
  1056  	err = f.listBucketsToFn(ctx, bucket, func(bucket *api.Bucket) error {
  1057  		// listBucketsToFn sets IDs
  1058  		return nil
  1059  	})
  1060  	f.bucketIDMutex.Lock()
  1061  	bucketID = f._bucketID[bucket]
  1062  	f.bucketIDMutex.Unlock()
  1063  	if bucketID == "" {
  1064  		err = fs.ErrorDirNotFound
  1065  	}
  1066  	return bucketID, err
  1067  }
  1068  
  1069  // setBucketID sets the ID for the current bucket name
  1070  func (f *Fs) setBucketID(bucket, ID string) {
  1071  	f.bucketIDMutex.Lock()
  1072  	f._bucketID[bucket] = ID
  1073  	f.bucketIDMutex.Unlock()
  1074  }
  1075  
  1076  // clearBucketID clears the ID for the current bucket name
  1077  func (f *Fs) clearBucketID(bucket string) {
  1078  	f.bucketIDMutex.Lock()
  1079  	delete(f._bucketID, bucket)
  1080  	f.bucketIDMutex.Unlock()
  1081  }
  1082  
  1083  // Put the object into the bucket
  1084  //
  1085  // Copy the reader in to the new object which is returned.
  1086  //
  1087  // The new object may have been created if an error is returned
  1088  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1089  	// Temporary Object under construction
  1090  	fs := &Object{
  1091  		fs:     f,
  1092  		remote: src.Remote(),
  1093  	}
  1094  	return fs, fs.Update(ctx, in, src, options...)
  1095  }
  1096  
  1097  // PutStream uploads to the remote path with the modTime given of indeterminate size
  1098  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
  1099  	return f.Put(ctx, in, src, options...)
  1100  }
  1101  
  1102  // Mkdir creates the bucket if it doesn't exist
  1103  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
  1104  	bucket, _ := f.split(dir)
  1105  	return f.makeBucket(ctx, bucket)
  1106  }
  1107  
  1108  // makeBucket creates the bucket if it doesn't exist
  1109  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
  1110  	return f.cache.Create(bucket, func() error {
  1111  		opts := rest.Opts{
  1112  			Method: "POST",
  1113  			Path:   "/b2_create_bucket",
  1114  		}
  1115  		var request = api.CreateBucketRequest{
  1116  			AccountID: f.info.AccountID,
  1117  			Name:      f.opt.Enc.FromStandardName(bucket),
  1118  			Type:      "allPrivate",
  1119  		}
  1120  		if f.opt.Lifecycle > 0 {
  1121  			request.LifecycleRules = []api.LifecycleRule{{
  1122  				DaysFromHidingToDeleting: &f.opt.Lifecycle,
  1123  			}}
  1124  		}
  1125  		var response api.Bucket
  1126  		err := f.pacer.Call(func() (bool, error) {
  1127  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1128  			return f.shouldRetry(ctx, resp, err)
  1129  		})
  1130  		if err != nil {
  1131  			if apiErr, ok := err.(*api.Error); ok {
  1132  				if apiErr.Code == "duplicate_bucket_name" {
  1133  					// Check this is our bucket - buckets are globally unique and this
  1134  					// might be someone elses.
  1135  					_, getBucketErr := f.getBucketID(ctx, bucket)
  1136  					if getBucketErr == nil {
  1137  						// found so it is our bucket
  1138  						return nil
  1139  					}
  1140  					if getBucketErr != fs.ErrorDirNotFound {
  1141  						fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
  1142  					}
  1143  				}
  1144  			}
  1145  			return fmt.Errorf("failed to create bucket: %w", err)
  1146  		}
  1147  		f.setBucketID(bucket, response.ID)
  1148  		f.setBucketType(bucket, response.Type)
  1149  		return nil
  1150  	}, nil)
  1151  }
  1152  
  1153  // Rmdir deletes the bucket if the fs is at the root
  1154  //
  1155  // Returns an error if it isn't empty
  1156  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1157  	bucket, directory := f.split(dir)
  1158  	if bucket == "" || directory != "" {
  1159  		return nil
  1160  	}
  1161  	return f.cache.Remove(bucket, func() error {
  1162  		opts := rest.Opts{
  1163  			Method: "POST",
  1164  			Path:   "/b2_delete_bucket",
  1165  		}
  1166  		bucketID, err := f.getBucketID(ctx, bucket)
  1167  		if err != nil {
  1168  			return err
  1169  		}
  1170  		var request = api.DeleteBucketRequest{
  1171  			ID:        bucketID,
  1172  			AccountID: f.info.AccountID,
  1173  		}
  1174  		var response api.Bucket
  1175  		err = f.pacer.Call(func() (bool, error) {
  1176  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1177  			return f.shouldRetry(ctx, resp, err)
  1178  		})
  1179  		if err != nil {
  1180  			return fmt.Errorf("failed to delete bucket: %w", err)
  1181  		}
  1182  		f.clearBucketID(bucket)
  1183  		f.clearBucketType(bucket)
  1184  		f.clearUploadURL(bucketID)
  1185  		return nil
  1186  	})
  1187  }
  1188  
  1189  // Precision of the remote
  1190  func (f *Fs) Precision() time.Duration {
  1191  	return time.Millisecond
  1192  }
  1193  
  1194  // hide hides a file on the remote
  1195  func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
  1196  	bucketID, err := f.getBucketID(ctx, bucket)
  1197  	if err != nil {
  1198  		return err
  1199  	}
  1200  	opts := rest.Opts{
  1201  		Method: "POST",
  1202  		Path:   "/b2_hide_file",
  1203  	}
  1204  	var request = api.HideFileRequest{
  1205  		BucketID: bucketID,
  1206  		Name:     f.opt.Enc.FromStandardPath(bucketPath),
  1207  	}
  1208  	var response api.File
  1209  	err = f.pacer.Call(func() (bool, error) {
  1210  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1211  		return f.shouldRetry(ctx, resp, err)
  1212  	})
  1213  	if err != nil {
  1214  		if apiErr, ok := err.(*api.Error); ok {
  1215  			if apiErr.Code == "already_hidden" {
  1216  				// sometimes eventual consistency causes this, so
  1217  				// ignore this error since it is harmless
  1218  				return nil
  1219  			}
  1220  		}
  1221  		return fmt.Errorf("failed to hide %q: %w", bucketPath, err)
  1222  	}
  1223  	return nil
  1224  }
  1225  
  1226  // deleteByID deletes a file version given Name and ID
  1227  func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
  1228  	opts := rest.Opts{
  1229  		Method: "POST",
  1230  		Path:   "/b2_delete_file_version",
  1231  	}
  1232  	var request = api.DeleteFileRequest{
  1233  		ID:   ID,
  1234  		Name: f.opt.Enc.FromStandardPath(Name),
  1235  	}
  1236  	var response api.File
  1237  	err := f.pacer.Call(func() (bool, error) {
  1238  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1239  		return f.shouldRetry(ctx, resp, err)
  1240  	})
  1241  	if err != nil {
  1242  		return fmt.Errorf("failed to delete %q: %w", Name, err)
  1243  	}
  1244  	return nil
  1245  }
  1246  
  1247  // purge deletes all the files and directories
  1248  //
  1249  // if oldOnly is true then it deletes only non current files.
  1250  //
  1251  // Implemented here so we can make sure we delete old versions.
  1252  func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) error {
  1253  	bucket, directory := f.split(dir)
  1254  	if bucket == "" {
  1255  		return errors.New("can't purge from root")
  1256  	}
  1257  	var errReturn error
  1258  	var checkErrMutex sync.Mutex
  1259  	var checkErr = func(err error) {
  1260  		if err == nil {
  1261  			return
  1262  		}
  1263  		checkErrMutex.Lock()
  1264  		defer checkErrMutex.Unlock()
  1265  		if errReturn == nil {
  1266  			errReturn = err
  1267  		}
  1268  	}
  1269  	var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
  1270  		return time.Since(time.Time(timestamp)) > maxAge
  1271  	}
  1272  
  1273  	// Delete Config.Transfers in parallel
  1274  	toBeDeleted := make(chan *api.File, f.ci.Transfers)
  1275  	var wg sync.WaitGroup
  1276  	wg.Add(f.ci.Transfers)
  1277  	for i := 0; i < f.ci.Transfers; i++ {
  1278  		go func() {
  1279  			defer wg.Done()
  1280  			for object := range toBeDeleted {
  1281  				oi, err := f.newObjectWithInfo(ctx, object.Name, object)
  1282  				if err != nil {
  1283  					fs.Errorf(object.Name, "Can't create object %v", err)
  1284  					continue
  1285  				}
  1286  				tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
  1287  				err = f.deleteByID(ctx, object.ID, object.Name)
  1288  				checkErr(err)
  1289  				tr.Done(ctx, err)
  1290  			}
  1291  		}()
  1292  	}
  1293  	if oldOnly {
  1294  		if deleteHidden && deleteUnfinished {
  1295  			fs.Infof(f, "cleaning bucket %q of all hidden files, and pending multipart uploads older than %v", bucket, maxAge)
  1296  		} else if deleteHidden {
  1297  			fs.Infof(f, "cleaning bucket %q of all hidden files", bucket)
  1298  		} else if deleteUnfinished {
  1299  			fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge)
  1300  		} else {
  1301  			fs.Errorf(f, "cleaning bucket %q of nothing. This should never happen!", bucket)
  1302  			return nil
  1303  		}
  1304  	} else {
  1305  		fs.Infof(f, "cleaning bucket %q of all files", bucket)
  1306  	}
  1307  
  1308  	last := ""
  1309  	checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
  1310  		if !isDirectory {
  1311  			oi, err := f.newObjectWithInfo(ctx, object.Name, object)
  1312  			if err != nil {
  1313  				fs.Errorf(object, "Can't create object %+v", err)
  1314  			}
  1315  			tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
  1316  			if oldOnly && last != remote {
  1317  				// Check current version of the file
  1318  				if deleteHidden && object.Action == "hide" {
  1319  					fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
  1320  					toBeDeleted <- object
  1321  				} else if deleteUnfinished && object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
  1322  					fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
  1323  					toBeDeleted <- object
  1324  				} else {
  1325  					fs.Debugf(remote, "Not deleting current version (id %q) %q dated %v (%v ago)", object.ID, object.Action, time.Time(object.UploadTimestamp).Local(), time.Since(time.Time(object.UploadTimestamp)))
  1326  				}
  1327  			} else {
  1328  				fs.Debugf(remote, "Deleting (id %q)", object.ID)
  1329  				toBeDeleted <- object
  1330  			}
  1331  			last = remote
  1332  			tr.Done(ctx, nil)
  1333  		}
  1334  		return nil
  1335  	}))
  1336  	close(toBeDeleted)
  1337  	wg.Wait()
  1338  
  1339  	if !oldOnly {
  1340  		checkErr(f.Rmdir(ctx, dir))
  1341  	}
  1342  	return errReturn
  1343  }
  1344  
  1345  // Purge deletes all the files and directories including the old versions.
  1346  func (f *Fs) Purge(ctx context.Context, dir string) error {
  1347  	return f.purge(ctx, dir, false, false, false, defaultMaxAge)
  1348  }
  1349  
  1350  // CleanUp deletes all hidden files and pending multipart uploads older than 24 hours.
  1351  func (f *Fs) CleanUp(ctx context.Context) error {
  1352  	return f.purge(ctx, "", true, true, true, defaultMaxAge)
  1353  }
  1354  
  1355  // cleanUp deletes all hidden files and/or pending multipart uploads older than the specified age.
  1356  func (f *Fs) cleanUp(ctx context.Context, deleteHidden bool, deleteUnfinished bool, maxAge time.Duration) (err error) {
  1357  	return f.purge(ctx, "", true, deleteHidden, deleteUnfinished, maxAge)
  1358  }
  1359  
  1360  // copy does a server-side copy from dstObj <- srcObj
  1361  //
  1362  // If newInfo is nil then the metadata will be copied otherwise it
  1363  // will be replaced with newInfo
  1364  func (f *Fs) copy(ctx context.Context, dstObj *Object, srcObj *Object, newInfo *api.File) (err error) {
  1365  	if srcObj.size > int64(f.opt.CopyCutoff) {
  1366  		if newInfo == nil {
  1367  			newInfo, err = srcObj.getMetaData(ctx)
  1368  			if err != nil {
  1369  				return err
  1370  			}
  1371  		}
  1372  		up, err := f.newLargeUpload(ctx, dstObj, nil, srcObj, f.opt.CopyCutoff, true, newInfo)
  1373  		if err != nil {
  1374  			return err
  1375  		}
  1376  		err = up.Copy(ctx)
  1377  		if err != nil {
  1378  			return err
  1379  		}
  1380  		return dstObj.decodeMetaDataFileInfo(up.info)
  1381  	}
  1382  
  1383  	dstBucket, dstPath := dstObj.split()
  1384  	err = f.makeBucket(ctx, dstBucket)
  1385  	if err != nil {
  1386  		return err
  1387  	}
  1388  
  1389  	destBucketID, err := f.getBucketID(ctx, dstBucket)
  1390  	if err != nil {
  1391  		return err
  1392  	}
  1393  
  1394  	opts := rest.Opts{
  1395  		Method: "POST",
  1396  		Path:   "/b2_copy_file",
  1397  	}
  1398  	var request = api.CopyFileRequest{
  1399  		SourceID:     srcObj.id,
  1400  		Name:         f.opt.Enc.FromStandardPath(dstPath),
  1401  		DestBucketID: destBucketID,
  1402  	}
  1403  	if newInfo == nil {
  1404  		request.MetadataDirective = "COPY"
  1405  	} else {
  1406  		request.MetadataDirective = "REPLACE"
  1407  		request.ContentType = newInfo.ContentType
  1408  		request.Info = newInfo.Info
  1409  	}
  1410  	var response api.FileInfo
  1411  	err = f.pacer.Call(func() (bool, error) {
  1412  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1413  		return f.shouldRetry(ctx, resp, err)
  1414  	})
  1415  	if err != nil {
  1416  		return err
  1417  	}
  1418  	return dstObj.decodeMetaDataFileInfo(&response)
  1419  }
  1420  
  1421  // Copy src to this remote using server-side copy operations.
  1422  //
  1423  // This is stored with the remote path given.
  1424  //
  1425  // It returns the destination Object and a possible error.
  1426  //
  1427  // Will only be called if src.Fs().Name() == f.Name()
  1428  //
  1429  // If it isn't possible then return fs.ErrorCantCopy
  1430  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1431  	srcObj, ok := src.(*Object)
  1432  	if !ok {
  1433  		fs.Debugf(src, "Can't copy - not same remote type")
  1434  		return nil, fs.ErrorCantCopy
  1435  	}
  1436  	// Temporary Object under construction
  1437  	dstObj := &Object{
  1438  		fs:     f,
  1439  		remote: remote,
  1440  	}
  1441  	err := f.copy(ctx, dstObj, srcObj, nil)
  1442  	if err != nil {
  1443  		return nil, err
  1444  	}
  1445  	return dstObj, nil
  1446  }
  1447  
  1448  // Hashes returns the supported hash sets.
  1449  func (f *Fs) Hashes() hash.Set {
  1450  	return hash.Set(hash.SHA1)
  1451  }
  1452  
  1453  // getDownloadAuthorization returns authorization token for downloading
  1454  // without account.
  1455  func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string) (authorization string, err error) {
  1456  	validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
  1457  	if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
  1458  		return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
  1459  	}
  1460  	if !f.hasPermission("shareFiles") {
  1461  		return "", errors.New("sharing a file link requires the shareFiles permission")
  1462  	}
  1463  	bucketID, err := f.getBucketID(ctx, bucket)
  1464  	if err != nil {
  1465  		return "", err
  1466  	}
  1467  	opts := rest.Opts{
  1468  		Method: "POST",
  1469  		Path:   "/b2_get_download_authorization",
  1470  	}
  1471  	var request = api.GetDownloadAuthorizationRequest{
  1472  		BucketID:               bucketID,
  1473  		FileNamePrefix:         f.opt.Enc.FromStandardPath(path.Join(f.rootDirectory, remote)),
  1474  		ValidDurationInSeconds: validDurationInSeconds,
  1475  	}
  1476  	var response api.GetDownloadAuthorizationResponse
  1477  	err = f.pacer.Call(func() (bool, error) {
  1478  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1479  		return f.shouldRetry(ctx, resp, err)
  1480  	})
  1481  	if err != nil {
  1482  		return "", fmt.Errorf("failed to get download authorization: %w", err)
  1483  	}
  1484  	return response.AuthorizationToken, nil
  1485  }
  1486  
  1487  // PublicLink returns a link for downloading without account
  1488  func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) {
  1489  	bucket, bucketPath := f.split(remote)
  1490  	var RootURL string
  1491  	if f.opt.DownloadURL == "" {
  1492  		RootURL = f.info.DownloadURL
  1493  	} else {
  1494  		RootURL = f.opt.DownloadURL
  1495  	}
  1496  	_, err = f.NewObject(ctx, remote)
  1497  	if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
  1498  		err2 := f.list(ctx, bucket, bucketPath, f.rootDirectory, f.rootBucket == "", false, 1, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
  1499  			err = nil
  1500  			return nil
  1501  		})
  1502  		if err2 != nil {
  1503  			return "", err2
  1504  		}
  1505  	}
  1506  	if err != nil {
  1507  		return "", err
  1508  	}
  1509  	absPath := "/" + urlEncode(bucketPath)
  1510  	link = RootURL + "/file/" + urlEncode(bucket) + absPath
  1511  	bucketType, err := f.getbucketType(ctx, bucket)
  1512  	if err != nil {
  1513  		return "", err
  1514  	}
  1515  	if bucketType == "allPrivate" || bucketType == "snapshot" {
  1516  		AuthorizationToken, err := f.getDownloadAuthorization(ctx, bucket, remote)
  1517  		if err != nil {
  1518  			return "", err
  1519  		}
  1520  		link += "?Authorization=" + AuthorizationToken
  1521  	}
  1522  	return link, nil
  1523  }
  1524  
  1525  // ------------------------------------------------------------
  1526  
  1527  // Fs returns the parent Fs
  1528  func (o *Object) Fs() fs.Info {
  1529  	return o.fs
  1530  }
  1531  
  1532  // Return a string version
  1533  func (o *Object) String() string {
  1534  	if o == nil {
  1535  		return "<nil>"
  1536  	}
  1537  	return o.remote
  1538  }
  1539  
  1540  // Remote returns the remote path
  1541  func (o *Object) Remote() string {
  1542  	return o.remote
  1543  }
  1544  
  1545  // Hash returns the Sha-1 of an object returning a lowercase hex string
  1546  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1547  	if t != hash.SHA1 {
  1548  		return "", hash.ErrUnsupported
  1549  	}
  1550  	if o.sha1 == "" {
  1551  		// Error is logged in readMetaData
  1552  		err := o.readMetaData(ctx)
  1553  		if err != nil {
  1554  			return "", err
  1555  		}
  1556  	}
  1557  	return o.sha1, nil
  1558  }
  1559  
  1560  // Size returns the size of an object in bytes
  1561  func (o *Object) Size() int64 {
  1562  	return o.size
  1563  }
  1564  
  1565  // Clean the SHA1
  1566  //
  1567  // Make sure it is lower case.
  1568  //
  1569  // Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
  1570  // Some tools (e.g. Cyberduck) use this
  1571  func cleanSHA1(sha1 string) string {
  1572  	const unverified = "unverified:"
  1573  	return strings.TrimPrefix(strings.ToLower(sha1), unverified)
  1574  }
  1575  
  1576  // decodeMetaDataRaw sets the metadata from the data passed in
  1577  //
  1578  // Sets
  1579  //
  1580  //	o.id
  1581  //	o.modTime
  1582  //	o.size
  1583  //	o.sha1
  1584  func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
  1585  	o.id = ID
  1586  	o.sha1 = SHA1
  1587  	o.mimeType = mimeType
  1588  	// Read SHA1 from metadata if it exists and isn't set
  1589  	if o.sha1 == "" || o.sha1 == "none" {
  1590  		o.sha1 = Info[sha1Key]
  1591  	}
  1592  	o.sha1 = cleanSHA1(o.sha1)
  1593  	o.size = Size
  1594  	// Use the UploadTimestamp if can't get file info
  1595  	o.modTime = time.Time(UploadTimestamp)
  1596  	return o.parseTimeString(Info[timeKey])
  1597  }
  1598  
  1599  // decodeMetaData sets the metadata in the object from an api.File
  1600  //
  1601  // Sets
  1602  //
  1603  //	o.id
  1604  //	o.modTime
  1605  //	o.size
  1606  //	o.sha1
  1607  func (o *Object) decodeMetaData(info *api.File) (err error) {
  1608  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1609  }
  1610  
  1611  // decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
  1612  //
  1613  // Sets
  1614  //
  1615  //	o.id
  1616  //	o.modTime
  1617  //	o.size
  1618  //	o.sha1
  1619  func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
  1620  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1621  }
  1622  
  1623  // getMetaDataListing gets the metadata from the object unconditionally from the listing
  1624  //
  1625  // Note that listing is a class C transaction which costs more than
  1626  // the B transaction used in getMetaData
  1627  func (o *Object) getMetaDataListing(ctx context.Context) (info *api.File, err error) {
  1628  	bucket, bucketPath := o.split()
  1629  	maxSearched := 1
  1630  	var timestamp api.Timestamp
  1631  	if o.fs.opt.Versions {
  1632  		timestamp, bucketPath = api.RemoveVersion(bucketPath)
  1633  		maxSearched = maxVersions
  1634  	}
  1635  
  1636  	err = o.fs.list(ctx, bucket, bucketPath, "", false, true, maxSearched, o.fs.opt.Versions, true, func(remote string, object *api.File, isDirectory bool) error {
  1637  		if isDirectory {
  1638  			return nil
  1639  		}
  1640  		if remote == bucketPath {
  1641  			if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) {
  1642  				return nil
  1643  			}
  1644  			info = object
  1645  		}
  1646  		return errEndList // read only 1 item
  1647  	})
  1648  	if err != nil {
  1649  		if err == fs.ErrorDirNotFound {
  1650  			return nil, fs.ErrorObjectNotFound
  1651  		}
  1652  		return nil, err
  1653  	}
  1654  	if info == nil {
  1655  		return nil, fs.ErrorObjectNotFound
  1656  	}
  1657  	return info, nil
  1658  }
  1659  
  1660  // getMetaData gets the metadata from the object unconditionally
  1661  func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
  1662  	// If using versions and have a version suffix, need to list the directory to find the correct versions
  1663  	if o.fs.opt.Versions {
  1664  		timestamp, _ := api.RemoveVersion(o.remote)
  1665  		if !timestamp.IsZero() {
  1666  			return o.getMetaDataListing(ctx)
  1667  		}
  1668  	}
  1669  	_, info, err = o.getOrHead(ctx, "HEAD", nil)
  1670  	return info, err
  1671  }
  1672  
  1673  // readMetaData gets the metadata if it hasn't already been fetched
  1674  //
  1675  // Sets
  1676  //
  1677  //	o.id
  1678  //	o.modTime
  1679  //	o.size
  1680  //	o.sha1
  1681  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1682  	if o.id != "" {
  1683  		return nil
  1684  	}
  1685  	info, err := o.getMetaData(ctx)
  1686  	if err != nil {
  1687  		return err
  1688  	}
  1689  	return o.decodeMetaData(info)
  1690  }
  1691  
  1692  // timeString returns modTime as the number of milliseconds
  1693  // elapsed since January 1, 1970 UTC as a decimal string.
  1694  func timeString(modTime time.Time) string {
  1695  	return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
  1696  }
  1697  
  1698  // parseTimeString converts a decimal string number of milliseconds
  1699  // elapsed since January 1, 1970 UTC into a time.Time and stores it in
  1700  // the modTime variable.
  1701  func (o *Object) parseTimeString(timeString string) (err error) {
  1702  	if timeString == "" {
  1703  		return nil
  1704  	}
  1705  	unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
  1706  	if err != nil {
  1707  		fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
  1708  		return nil
  1709  	}
  1710  	o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
  1711  	return nil
  1712  }
  1713  
  1714  // ModTime returns the modification time of the object
  1715  //
  1716  // It attempts to read the objects mtime and if that isn't present the
  1717  // LastModified returned in the http headers
  1718  //
  1719  // SHA-1 will also be updated once the request has completed.
  1720  func (o *Object) ModTime(ctx context.Context) (result time.Time) {
  1721  	// The error is logged in readMetaData
  1722  	_ = o.readMetaData(ctx)
  1723  	return o.modTime
  1724  }
  1725  
  1726  // SetModTime sets the modification time of the Object
  1727  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1728  	info, err := o.getMetaData(ctx)
  1729  	if err != nil {
  1730  		return err
  1731  	}
  1732  	info.Info[timeKey] = timeString(modTime)
  1733  
  1734  	// Copy to the same name, overwriting the metadata only
  1735  	return o.fs.copy(ctx, o, o, info)
  1736  }
  1737  
  1738  // Storable returns if this object is storable
  1739  func (o *Object) Storable() bool {
  1740  	return true
  1741  }
  1742  
  1743  // openFile represents an Object open for reading
  1744  type openFile struct {
  1745  	o     *Object        // Object we are reading for
  1746  	resp  *http.Response // response of the GET
  1747  	body  io.Reader      // reading from here
  1748  	hash  gohash.Hash    // currently accumulating SHA1
  1749  	bytes int64          // number of bytes read on this connection
  1750  	eof   bool           // whether we have read end of file
  1751  }
  1752  
  1753  // newOpenFile wraps an io.ReadCloser and checks the sha1sum
  1754  func newOpenFile(o *Object, resp *http.Response) *openFile {
  1755  	file := &openFile{
  1756  		o:    o,
  1757  		resp: resp,
  1758  		hash: sha1.New(),
  1759  	}
  1760  	file.body = io.TeeReader(resp.Body, file.hash)
  1761  	return file
  1762  }
  1763  
  1764  // Read bytes from the object - see io.Reader
  1765  func (file *openFile) Read(p []byte) (n int, err error) {
  1766  	n, err = file.body.Read(p)
  1767  	file.bytes += int64(n)
  1768  	if err == io.EOF {
  1769  		file.eof = true
  1770  	}
  1771  	return
  1772  }
  1773  
  1774  // Close the object and checks the length and SHA1 if all the object
  1775  // was read
  1776  func (file *openFile) Close() (err error) {
  1777  	// Close the body at the end
  1778  	defer fs.CheckClose(file.resp.Body, &err)
  1779  
  1780  	// If not end of file then can't check SHA1
  1781  	if !file.eof {
  1782  		return nil
  1783  	}
  1784  
  1785  	// Check to see we read the correct number of bytes
  1786  	if file.o.Size() != file.bytes {
  1787  		return fmt.Errorf("corrupted on transfer: lengths differ want %d vs got %d", file.o.Size(), file.bytes)
  1788  	}
  1789  
  1790  	// Check the SHA1
  1791  	receivedSHA1 := file.o.sha1
  1792  	calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
  1793  	if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
  1794  		return fmt.Errorf("corrupted on transfer: SHA1 hashes differ want %q vs got %q", receivedSHA1, calculatedSHA1)
  1795  	}
  1796  
  1797  	return nil
  1798  }
  1799  
  1800  // Check it satisfies the interfaces
  1801  var _ io.ReadCloser = &openFile{}
  1802  
  1803  func (o *Object) getOrHead(ctx context.Context, method string, options []fs.OpenOption) (resp *http.Response, info *api.File, err error) {
  1804  	opts := rest.Opts{
  1805  		Method:     method,
  1806  		Options:    options,
  1807  		NoResponse: method == "HEAD",
  1808  	}
  1809  
  1810  	// Use downloadUrl from backblaze if downloadUrl is not set
  1811  	// otherwise use the custom downloadUrl
  1812  	if o.fs.opt.DownloadURL == "" {
  1813  		opts.RootURL = o.fs.info.DownloadURL
  1814  	} else {
  1815  		opts.RootURL = o.fs.opt.DownloadURL
  1816  	}
  1817  
  1818  	// Download by id if set and not using DownloadURL otherwise by name
  1819  	if o.id != "" && o.fs.opt.DownloadURL == "" {
  1820  		opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
  1821  	} else {
  1822  		bucket, bucketPath := o.split()
  1823  		opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
  1824  	}
  1825  	err = o.fs.pacer.Call(func() (bool, error) {
  1826  		resp, err = o.fs.srv.Call(ctx, &opts)
  1827  		return o.fs.shouldRetry(ctx, resp, err)
  1828  	})
  1829  	if err != nil {
  1830  		// 404 for files, 400 for directories
  1831  		if resp != nil && (resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusBadRequest) {
  1832  			return nil, nil, fs.ErrorObjectNotFound
  1833  		}
  1834  		return nil, nil, fmt.Errorf("failed to %s for download: %w", method, err)
  1835  	}
  1836  
  1837  	// NB resp may be Open here - don't return err != nil without closing
  1838  
  1839  	// Convert the Headers into an api.File
  1840  	var uploadTimestamp api.Timestamp
  1841  	err = uploadTimestamp.UnmarshalJSON([]byte(resp.Header.Get(timestampHeader)))
  1842  	if err != nil {
  1843  		fs.Debugf(o, "Bad "+timestampHeader+" header: %v", err)
  1844  	}
  1845  	var Info = make(map[string]string)
  1846  	for k, vs := range resp.Header {
  1847  		k = strings.ToLower(k)
  1848  		for _, v := range vs {
  1849  			if strings.HasPrefix(k, headerPrefix) {
  1850  				Info[k[len(headerPrefix):]] = v
  1851  			}
  1852  		}
  1853  	}
  1854  	info = &api.File{
  1855  		ID:              resp.Header.Get(idHeader),
  1856  		Name:            resp.Header.Get(nameHeader),
  1857  		Action:          "upload",
  1858  		Size:            resp.ContentLength,
  1859  		UploadTimestamp: uploadTimestamp,
  1860  		SHA1:            resp.Header.Get(sha1Header),
  1861  		ContentType:     resp.Header.Get("Content-Type"),
  1862  		Info:            Info,
  1863  	}
  1864  	// When reading files from B2 via cloudflare using
  1865  	// --b2-download-url cloudflare strips the Content-Length
  1866  	// headers (presumably so it can inject stuff) so use the old
  1867  	// length read from the listing.
  1868  	if info.Size < 0 {
  1869  		info.Size = o.size
  1870  	}
  1871  	return resp, info, nil
  1872  }
  1873  
  1874  // Open an object for read
  1875  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1876  	fs.FixRangeOption(options, o.size)
  1877  
  1878  	resp, info, err := o.getOrHead(ctx, "GET", options)
  1879  	if err != nil {
  1880  		return nil, err
  1881  	}
  1882  
  1883  	// Don't check length or hash or metadata on partial content
  1884  	if resp.StatusCode == http.StatusPartialContent {
  1885  		return resp.Body, nil
  1886  	}
  1887  
  1888  	err = o.decodeMetaData(info)
  1889  	if err != nil {
  1890  		_ = resp.Body.Close()
  1891  		return nil, err
  1892  	}
  1893  	return newOpenFile(o, resp), nil
  1894  }
  1895  
  1896  // dontEncode is the characters that do not need percent-encoding
  1897  //
  1898  // The characters that do not need percent-encoding are a subset of
  1899  // the printable ASCII characters: upper-case letters, lower-case
  1900  // letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
  1901  // "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
  1902  // be replaced with "%" and the two-digit hex value of the byte.
  1903  const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
  1904  	`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
  1905  	`0123456789` +
  1906  	`._-/~!$'()*;=:@`)
  1907  
  1908  // noNeedToEncode is a bitmap of characters which don't need % encoding
  1909  var noNeedToEncode [256]bool
  1910  
  1911  func init() {
  1912  	for _, c := range dontEncode {
  1913  		noNeedToEncode[c] = true
  1914  	}
  1915  }
  1916  
  1917  // urlEncode encodes in with % encoding
  1918  func urlEncode(in string) string {
  1919  	var out bytes.Buffer
  1920  	for i := 0; i < len(in); i++ {
  1921  		c := in[i]
  1922  		if noNeedToEncode[c] {
  1923  			_ = out.WriteByte(c)
  1924  		} else {
  1925  			_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
  1926  		}
  1927  	}
  1928  	return out.String()
  1929  }
  1930  
  1931  // Update the object with the contents of the io.Reader, modTime and size
  1932  //
  1933  // The new object may have been created if an error is returned
  1934  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1935  	if o.fs.opt.Versions {
  1936  		return errNotWithVersions
  1937  	}
  1938  	if o.fs.opt.VersionAt.IsSet() {
  1939  		return errNotWithVersionAt
  1940  	}
  1941  	size := src.Size()
  1942  
  1943  	bucket, bucketPath := o.split()
  1944  	err = o.fs.makeBucket(ctx, bucket)
  1945  	if err != nil {
  1946  		return err
  1947  	}
  1948  	if size < 0 {
  1949  		// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
  1950  		rw := o.fs.getRW(false)
  1951  
  1952  		n, err := io.CopyN(rw, in, int64(o.fs.opt.ChunkSize))
  1953  		if err == nil {
  1954  			bufReader := bufio.NewReader(in)
  1955  			in = bufReader
  1956  			_, err = bufReader.Peek(1)
  1957  		}
  1958  
  1959  		if err == nil {
  1960  			fs.Debugf(o, "File is big enough for chunked streaming")
  1961  			up, err := o.fs.newLargeUpload(ctx, o, in, src, o.fs.opt.ChunkSize, false, nil)
  1962  			if err != nil {
  1963  				o.fs.putRW(rw)
  1964  				return err
  1965  			}
  1966  			// NB Stream returns the buffer and token
  1967  			err = up.Stream(ctx, rw)
  1968  			if err != nil {
  1969  				return err
  1970  			}
  1971  			return o.decodeMetaDataFileInfo(up.info)
  1972  		} else if err == io.EOF {
  1973  			fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
  1974  			defer o.fs.putRW(rw)
  1975  			size = n
  1976  			in = rw
  1977  		} else {
  1978  			o.fs.putRW(rw)
  1979  			return err
  1980  		}
  1981  	} else if size > int64(o.fs.opt.UploadCutoff) {
  1982  		chunkWriter, err := multipart.UploadMultipart(ctx, src, in, multipart.UploadMultipartOptions{
  1983  			Open:        o.fs,
  1984  			OpenOptions: options,
  1985  		})
  1986  		if err != nil {
  1987  			return err
  1988  		}
  1989  		up := chunkWriter.(*largeUpload)
  1990  		return o.decodeMetaDataFileInfo(up.info)
  1991  	}
  1992  
  1993  	modTime := src.ModTime(ctx)
  1994  
  1995  	calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
  1996  	if calculatedSha1 == "" {
  1997  		calculatedSha1 = "hex_digits_at_end"
  1998  		har := newHashAppendingReader(in, sha1.New())
  1999  		size += int64(har.AdditionalLength())
  2000  		in = har
  2001  	}
  2002  
  2003  	// Get upload URL
  2004  	upload, err := o.fs.getUploadURL(ctx, bucket)
  2005  	if err != nil {
  2006  		return err
  2007  	}
  2008  	defer func() {
  2009  		// return it like this because we might nil it out
  2010  		o.fs.returnUploadURL(upload)
  2011  	}()
  2012  
  2013  	// Headers for upload file
  2014  	//
  2015  	// Authorization
  2016  	// required
  2017  	// An upload authorization token, from b2_get_upload_url.
  2018  	//
  2019  	// X-Bz-File-Name
  2020  	// required
  2021  	//
  2022  	// The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding.
  2023  	//
  2024  	// Content-Type
  2025  	// required
  2026  	//
  2027  	// The MIME type of the content of the file, which will be returned in
  2028  	// the Content-Type header when downloading the file. Use the
  2029  	// Content-Type b2/x-auto to automatically set the stored Content-Type
  2030  	// post upload. In the case where a file extension is absent or the
  2031  	// lookup fails, the Content-Type is set to application/octet-stream. The
  2032  	// Content-Type mappings can be pursued here.
  2033  	//
  2034  	// X-Bz-Content-Sha1
  2035  	// required
  2036  	//
  2037  	// The SHA1 checksum of the content of the file. B2 will check this when
  2038  	// the file is uploaded, to make sure that the file arrived correctly. It
  2039  	// will be returned in the X-Bz-Content-Sha1 header when the file is
  2040  	// downloaded.
  2041  	//
  2042  	// X-Bz-Info-src_last_modified_millis
  2043  	// optional
  2044  	//
  2045  	// If the original source of the file being uploaded has a last modified
  2046  	// time concept, Backblaze recommends using this spelling of one of your
  2047  	// ten X-Bz-Info-* headers (see below). Using a standard spelling allows
  2048  	// different B2 clients and the B2 web user interface to interoperate
  2049  	// correctly. The value should be a base 10 number which represents a UTC
  2050  	// time when the original source file was last modified. It is a base 10
  2051  	// number of milliseconds since midnight, January 1, 1970 UTC. This fits
  2052  	// in a 64 bit integer such as the type "long" in the programming
  2053  	// language Java. It is intended to be compatible with Java's time
  2054  	// long. For example, it can be passed directly into the Java call
  2055  	// Date.setTime(long time).
  2056  	//
  2057  	// X-Bz-Info-*
  2058  	// optional
  2059  	//
  2060  	// Up to 10 of these headers may be present. The * part of the header
  2061  	// name is replace with the name of a custom field in the file
  2062  	// information stored with the file, and the value is an arbitrary UTF-8
  2063  	// string, percent-encoded. The same info headers sent with the upload
  2064  	// will be returned with the download.
  2065  
  2066  	opts := rest.Opts{
  2067  		Method:  "POST",
  2068  		RootURL: upload.UploadURL,
  2069  		Body:    in,
  2070  		Options: options,
  2071  		ExtraHeaders: map[string]string{
  2072  			"Authorization":  upload.AuthorizationToken,
  2073  			"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
  2074  			"Content-Type":   fs.MimeType(ctx, src),
  2075  			sha1Header:       calculatedSha1,
  2076  			timeHeader:       timeString(modTime),
  2077  		},
  2078  		ContentLength: &size,
  2079  	}
  2080  	var response api.FileInfo
  2081  	// Don't retry, return a retry error instead
  2082  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  2083  		resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &response)
  2084  		retry, err := o.fs.shouldRetry(ctx, resp, err)
  2085  		// On retryable error clear UploadURL
  2086  		if retry {
  2087  			fs.Debugf(o, "Clearing upload URL because of error: %v", err)
  2088  			upload = nil
  2089  		}
  2090  		return retry, err
  2091  	})
  2092  	if err != nil {
  2093  		return err
  2094  	}
  2095  	return o.decodeMetaDataFileInfo(&response)
  2096  }
  2097  
  2098  // OpenChunkWriter returns the chunk size and a ChunkWriter
  2099  //
  2100  // Pass in the remote and the src object
  2101  // You can also use options to hint at the desired chunk size
  2102  func (f *Fs) OpenChunkWriter(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
  2103  	// FIXME what if file is smaller than 1 chunk?
  2104  	if f.opt.Versions {
  2105  		return info, nil, errNotWithVersions
  2106  	}
  2107  	if f.opt.VersionAt.IsSet() {
  2108  		return info, nil, errNotWithVersionAt
  2109  	}
  2110  	//size := src.Size()
  2111  
  2112  	// Temporary Object under construction
  2113  	o := &Object{
  2114  		fs:     f,
  2115  		remote: remote,
  2116  	}
  2117  
  2118  	bucket, _ := o.split()
  2119  	err = f.makeBucket(ctx, bucket)
  2120  	if err != nil {
  2121  		return info, nil, err
  2122  	}
  2123  
  2124  	info = fs.ChunkWriterInfo{
  2125  		ChunkSize:   int64(f.opt.ChunkSize),
  2126  		Concurrency: o.fs.opt.UploadConcurrency,
  2127  		//LeavePartsOnError: o.fs.opt.LeavePartsOnError,
  2128  	}
  2129  	up, err := f.newLargeUpload(ctx, o, nil, src, f.opt.ChunkSize, false, nil)
  2130  	return info, up, err
  2131  }
  2132  
  2133  // Remove an object
  2134  func (o *Object) Remove(ctx context.Context) error {
  2135  	bucket, bucketPath := o.split()
  2136  	if o.fs.opt.Versions {
  2137  		return errNotWithVersions
  2138  	}
  2139  	if o.fs.opt.VersionAt.IsSet() {
  2140  		return errNotWithVersionAt
  2141  	}
  2142  	if o.fs.opt.HardDelete {
  2143  		return o.fs.deleteByID(ctx, o.id, bucketPath)
  2144  	}
  2145  	return o.fs.hide(ctx, bucket, bucketPath)
  2146  }
  2147  
  2148  // MimeType of an Object if known, "" otherwise
  2149  func (o *Object) MimeType(ctx context.Context) string {
  2150  	return o.mimeType
  2151  }
  2152  
  2153  // ID returns the ID of the Object if known, or "" if not
  2154  func (o *Object) ID() string {
  2155  	return o.id
  2156  }
  2157  
  2158  var lifecycleHelp = fs.CommandHelp{
  2159  	Name:  "lifecycle",
  2160  	Short: "Read or set the lifecycle for a bucket",
  2161  	Long: `This command can be used to read or set the lifecycle for a bucket.
  2162  
  2163  Usage Examples:
  2164  
  2165  To show the current lifecycle rules:
  2166  
  2167      rclone backend lifecycle b2:bucket
  2168  
  2169  This will dump something like this showing the lifecycle rules.
  2170  
  2171      [
  2172          {
  2173              "daysFromHidingToDeleting": 1,
  2174              "daysFromUploadingToHiding": null,
  2175              "fileNamePrefix": ""
  2176          }
  2177      ]
  2178  
  2179  If there are no lifecycle rules (the default) then it will just return [].
  2180  
  2181  To reset the current lifecycle rules:
  2182  
  2183      rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=30
  2184      rclone backend lifecycle b2:bucket -o daysFromUploadingToHiding=5 -o daysFromHidingToDeleting=1
  2185  
  2186  This will run and then print the new lifecycle rules as above.
  2187  
  2188  Rclone only lets you set lifecycles for the whole bucket with the
  2189  fileNamePrefix = "".
  2190  
  2191  You can't disable versioning with B2. The best you can do is to set
  2192  the daysFromHidingToDeleting to 1 day. You can enable hard_delete in
  2193  the config also which will mean deletions won't cause versions but
  2194  overwrites will still cause versions to be made.
  2195  
  2196      rclone backend lifecycle b2:bucket -o daysFromHidingToDeleting=1
  2197  
  2198  See: https://www.backblaze.com/docs/cloud-storage-lifecycle-rules
  2199  `,
  2200  	Opts: map[string]string{
  2201  		"daysFromHidingToDeleting":  "After a file has been hidden for this many days it is deleted. 0 is off.",
  2202  		"daysFromUploadingToHiding": "This many days after uploading a file is hidden",
  2203  	},
  2204  }
  2205  
  2206  func (f *Fs) lifecycleCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
  2207  	var newRule api.LifecycleRule
  2208  	if daysStr := opt["daysFromHidingToDeleting"]; daysStr != "" {
  2209  		days, err := strconv.Atoi(daysStr)
  2210  		if err != nil {
  2211  			return nil, fmt.Errorf("bad daysFromHidingToDeleting: %w", err)
  2212  		}
  2213  		newRule.DaysFromHidingToDeleting = &days
  2214  	}
  2215  	if daysStr := opt["daysFromUploadingToHiding"]; daysStr != "" {
  2216  		days, err := strconv.Atoi(daysStr)
  2217  		if err != nil {
  2218  			return nil, fmt.Errorf("bad daysFromUploadingToHiding: %w", err)
  2219  		}
  2220  		newRule.DaysFromUploadingToHiding = &days
  2221  	}
  2222  	bucketName, _ := f.split("")
  2223  	if bucketName == "" {
  2224  		return nil, errors.New("bucket required")
  2225  
  2226  	}
  2227  
  2228  	var bucket *api.Bucket
  2229  	if newRule.DaysFromHidingToDeleting != nil || newRule.DaysFromUploadingToHiding != nil {
  2230  		bucketID, err := f.getBucketID(ctx, bucketName)
  2231  		if err != nil {
  2232  			return nil, err
  2233  		}
  2234  		opts := rest.Opts{
  2235  			Method: "POST",
  2236  			Path:   "/b2_update_bucket",
  2237  		}
  2238  		var request = api.UpdateBucketRequest{
  2239  			ID:             bucketID,
  2240  			AccountID:      f.info.AccountID,
  2241  			LifecycleRules: []api.LifecycleRule{newRule},
  2242  		}
  2243  		var response api.Bucket
  2244  		err = f.pacer.Call(func() (bool, error) {
  2245  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  2246  			return f.shouldRetry(ctx, resp, err)
  2247  		})
  2248  		if err != nil {
  2249  			return nil, err
  2250  		}
  2251  		bucket = &response
  2252  	} else {
  2253  		err = f.listBucketsToFn(ctx, bucketName, func(b *api.Bucket) error {
  2254  			bucket = b
  2255  			return nil
  2256  		})
  2257  		if err != nil {
  2258  			return nil, err
  2259  		}
  2260  	}
  2261  	if bucket == nil {
  2262  		return nil, fs.ErrorDirNotFound
  2263  	}
  2264  	return bucket.LifecycleRules, nil
  2265  }
  2266  
  2267  var cleanupHelp = fs.CommandHelp{
  2268  	Name:  "cleanup",
  2269  	Short: "Remove unfinished large file uploads.",
  2270  	Long: `This command removes unfinished large file uploads of age greater than
  2271  max-age, which defaults to 24 hours.
  2272  
  2273  Note that you can use --interactive/-i or --dry-run with this command to see what
  2274  it would do.
  2275  
  2276      rclone backend cleanup b2:bucket/path/to/object
  2277      rclone backend cleanup -o max-age=7w b2:bucket/path/to/object
  2278  
  2279  Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc.
  2280  `,
  2281  	Opts: map[string]string{
  2282  		"max-age": "Max age of upload to delete",
  2283  	},
  2284  }
  2285  
  2286  func (f *Fs) cleanupCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
  2287  	maxAge := defaultMaxAge
  2288  	if opt["max-age"] != "" {
  2289  		maxAge, err = fs.ParseDuration(opt["max-age"])
  2290  		if err != nil {
  2291  			return nil, fmt.Errorf("bad max-age: %w", err)
  2292  		}
  2293  	}
  2294  	return nil, f.cleanUp(ctx, false, true, maxAge)
  2295  }
  2296  
  2297  var cleanupHiddenHelp = fs.CommandHelp{
  2298  	Name:  "cleanup-hidden",
  2299  	Short: "Remove old versions of files.",
  2300  	Long: `This command removes any old hidden versions of files.
  2301  
  2302  Note that you can use --interactive/-i or --dry-run with this command to see what
  2303  it would do.
  2304  
  2305      rclone backend cleanup-hidden b2:bucket/path/to/dir
  2306  `,
  2307  }
  2308  
  2309  func (f *Fs) cleanupHiddenCommand(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
  2310  	return nil, f.cleanUp(ctx, true, false, 0)
  2311  }
  2312  
  2313  var commandHelp = []fs.CommandHelp{
  2314  	lifecycleHelp,
  2315  	cleanupHelp,
  2316  	cleanupHiddenHelp,
  2317  }
  2318  
  2319  // Command the backend to run a named command
  2320  //
  2321  // The command run is name
  2322  // args may be used to read arguments from
  2323  // opts may be used to read optional arguments from
  2324  //
  2325  // The result should be capable of being JSON encoded
  2326  // If it is a string or a []string it will be shown to the user
  2327  // otherwise it will be JSON encoded and shown to the user like that
  2328  func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) {
  2329  	switch name {
  2330  	case "lifecycle":
  2331  		return f.lifecycleCommand(ctx, name, arg, opt)
  2332  	case "cleanup":
  2333  		return f.cleanupCommand(ctx, name, arg, opt)
  2334  	case "cleanup-hidden":
  2335  		return f.cleanupHiddenCommand(ctx, name, arg, opt)
  2336  	default:
  2337  		return nil, fs.ErrorCommandNotFound
  2338  	}
  2339  }
  2340  
  2341  // Check the interfaces are satisfied
  2342  var (
  2343  	_ fs.Fs              = &Fs{}
  2344  	_ fs.Purger          = &Fs{}
  2345  	_ fs.Copier          = &Fs{}
  2346  	_ fs.PutStreamer     = &Fs{}
  2347  	_ fs.CleanUpper      = &Fs{}
  2348  	_ fs.ListRer         = &Fs{}
  2349  	_ fs.PublicLinker    = &Fs{}
  2350  	_ fs.OpenChunkWriter = &Fs{}
  2351  	_ fs.Commander       = &Fs{}
  2352  	_ fs.Object          = &Object{}
  2353  	_ fs.MimeTyper       = &Object{}
  2354  	_ fs.IDer            = &Object{}
  2355  )