github.com/xhghs/rclone@v1.51.1-0.20200430155106-e186a28cced8/backend/b2/b2.go (about)

     1  // Package b2 provides an interface to the Backblaze B2 object storage system
     2  package b2
     3  
     4  // FIXME should we remove sha1 checks from here as rclone now supports
     5  // checking SHA1s?
     6  
     7  import (
     8  	"bufio"
     9  	"bytes"
    10  	"context"
    11  	"crypto/sha1"
    12  	"fmt"
    13  	gohash "hash"
    14  	"io"
    15  	"net/http"
    16  	"path"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/pkg/errors"
    23  	"github.com/rclone/rclone/backend/b2/api"
    24  	"github.com/rclone/rclone/fs"
    25  	"github.com/rclone/rclone/fs/accounting"
    26  	"github.com/rclone/rclone/fs/config"
    27  	"github.com/rclone/rclone/fs/config/configmap"
    28  	"github.com/rclone/rclone/fs/config/configstruct"
    29  	"github.com/rclone/rclone/fs/fserrors"
    30  	"github.com/rclone/rclone/fs/fshttp"
    31  	"github.com/rclone/rclone/fs/hash"
    32  	"github.com/rclone/rclone/fs/walk"
    33  	"github.com/rclone/rclone/lib/bucket"
    34  	"github.com/rclone/rclone/lib/encoder"
    35  	"github.com/rclone/rclone/lib/pacer"
    36  	"github.com/rclone/rclone/lib/rest"
    37  )
    38  
    39  const (
    40  	defaultEndpoint     = "https://api.backblazeb2.com"
    41  	headerPrefix        = "x-bz-info-" // lower case as that is what the server returns
    42  	timeKey             = "src_last_modified_millis"
    43  	timeHeader          = headerPrefix + timeKey
    44  	sha1Key             = "large_file_sha1"
    45  	sha1Header          = "X-Bz-Content-Sha1"
    46  	sha1InfoHeader      = headerPrefix + sha1Key
    47  	testModeHeader      = "X-Bz-Test-Mode"
    48  	retryAfterHeader    = "Retry-After"
    49  	minSleep            = 10 * time.Millisecond
    50  	maxSleep            = 5 * time.Minute
    51  	decayConstant       = 1 // bigger for slower decay, exponential
    52  	maxParts            = 10000
    53  	maxVersions         = 100 // maximum number of versions we search in --b2-versions mode
    54  	minChunkSize        = 5 * fs.MebiByte
    55  	defaultChunkSize    = 96 * fs.MebiByte
    56  	defaultUploadCutoff = 200 * fs.MebiByte
    57  )
    58  
    59  // Globals
    60  var (
    61  	errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
    62  )
    63  
    64  // Register with Fs
    65  func init() {
    66  	fs.Register(&fs.RegInfo{
    67  		Name:        "b2",
    68  		Description: "Backblaze B2",
    69  		NewFs:       NewFs,
    70  		Options: []fs.Option{{
    71  			Name:     "account",
    72  			Help:     "Account ID or Application Key ID",
    73  			Required: true,
    74  		}, {
    75  			Name:     "key",
    76  			Help:     "Application Key",
    77  			Required: true,
    78  		}, {
    79  			Name:     "endpoint",
    80  			Help:     "Endpoint for the service.\nLeave blank normally.",
    81  			Advanced: true,
    82  		}, {
    83  			Name: "test_mode",
    84  			Help: `A flag string for X-Bz-Test-Mode header for debugging.
    85  
    86  This is for debugging purposes only. Setting it to one of the strings
    87  below will cause b2 to return specific errors:
    88  
    89    * "fail_some_uploads"
    90    * "expire_some_account_authorization_tokens"
    91    * "force_cap_exceeded"
    92  
    93  These will be set in the "X-Bz-Test-Mode" header which is documented
    94  in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
    95  			Default:  "",
    96  			Hide:     fs.OptionHideConfigurator,
    97  			Advanced: true,
    98  		}, {
    99  			Name:     "versions",
   100  			Help:     "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
   101  			Default:  false,
   102  			Advanced: true,
   103  		}, {
   104  			Name:    "hard_delete",
   105  			Help:    "Permanently delete files on remote removal, otherwise hide files.",
   106  			Default: false,
   107  		}, {
   108  			Name: "upload_cutoff",
   109  			Help: `Cutoff for switching to chunked upload.
   110  
   111  Files above this size will be uploaded in chunks of "--b2-chunk-size".
   112  
   113  This value should be set no larger than 4.657GiB (== 5GB).`,
   114  			Default:  defaultUploadCutoff,
   115  			Advanced: true,
   116  		}, {
   117  			Name: "chunk_size",
   118  			Help: `Upload chunk size. Must fit in memory.
   119  
   120  When uploading large files, chunk the file into this size.  Note that
   121  these chunks are buffered in memory and there might a maximum of
   122  "--transfers" chunks in progress at once.  5,000,000 Bytes is the
   123  minimum size.`,
   124  			Default:  defaultChunkSize,
   125  			Advanced: true,
   126  		}, {
   127  			Name:     "disable_checksum",
   128  			Help:     `Disable checksums for large (> upload cutoff) files`,
   129  			Default:  false,
   130  			Advanced: true,
   131  		}, {
   132  			Name: "download_url",
   133  			Help: `Custom endpoint for downloads.
   134  
   135  This is usually set to a Cloudflare CDN URL as Backblaze offers
   136  free egress for data downloaded through the Cloudflare network.
   137  This is probably only useful for a public bucket.
   138  Leave blank if you want to use the endpoint provided by Backblaze.`,
   139  			Advanced: true,
   140  		}, {
   141  			Name: "download_auth_duration",
   142  			Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
   143  
   144  The duration before the download authorization token will expire.
   145  The minimum value is 1 second. The maximum value is one week.`,
   146  			Default:  fs.Duration(7 * 24 * time.Hour),
   147  			Advanced: true,
   148  		}, {
   149  			Name:     config.ConfigEncoding,
   150  			Help:     config.ConfigEncodingHelp,
   151  			Advanced: true,
   152  			// See: https://www.backblaze.com/b2/docs/files.html
   153  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   154  			// FIXME: allow /, but not leading, trailing or double
   155  			Default: (encoder.Display |
   156  				encoder.EncodeBackSlash |
   157  				encoder.EncodeInvalidUtf8),
   158  		}},
   159  	})
   160  }
   161  
   162  // Options defines the configuration for this backend
   163  type Options struct {
   164  	Account                       string               `config:"account"`
   165  	Key                           string               `config:"key"`
   166  	Endpoint                      string               `config:"endpoint"`
   167  	TestMode                      string               `config:"test_mode"`
   168  	Versions                      bool                 `config:"versions"`
   169  	HardDelete                    bool                 `config:"hard_delete"`
   170  	UploadCutoff                  fs.SizeSuffix        `config:"upload_cutoff"`
   171  	ChunkSize                     fs.SizeSuffix        `config:"chunk_size"`
   172  	DisableCheckSum               bool                 `config:"disable_checksum"`
   173  	DownloadURL                   string               `config:"download_url"`
   174  	DownloadAuthorizationDuration fs.Duration          `config:"download_auth_duration"`
   175  	Enc                           encoder.MultiEncoder `config:"encoding"`
   176  }
   177  
   178  // Fs represents a remote b2 server
   179  type Fs struct {
   180  	name            string                                 // name of this remote
   181  	root            string                                 // the path we are working on if any
   182  	opt             Options                                // parsed config options
   183  	features        *fs.Features                           // optional features
   184  	srv             *rest.Client                           // the connection to the b2 server
   185  	rootBucket      string                                 // bucket part of root (if any)
   186  	rootDirectory   string                                 // directory part of root (if any)
   187  	cache           *bucket.Cache                          // cache for bucket creation status
   188  	bucketIDMutex   sync.Mutex                             // mutex to protect _bucketID
   189  	_bucketID       map[string]string                      // the ID of the bucket we are working on
   190  	bucketTypeMutex sync.Mutex                             // mutex to protect _bucketType
   191  	_bucketType     map[string]string                      // the Type of the bucket we are working on
   192  	info            api.AuthorizeAccountResponse           // result of authorize call
   193  	uploadMu        sync.Mutex                             // lock for upload variable
   194  	uploads         map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
   195  	authMu          sync.Mutex                             // lock for authorizing the account
   196  	pacer           *fs.Pacer                              // To pace and retry the API calls
   197  	bufferTokens    chan []byte                            // control concurrency of multipart uploads
   198  }
   199  
   200  // Object describes a b2 object
   201  type Object struct {
   202  	fs       *Fs       // what this object is part of
   203  	remote   string    // The remote path
   204  	id       string    // b2 id of the file
   205  	modTime  time.Time // The modified time of the object if known
   206  	sha1     string    // SHA-1 hash if known
   207  	size     int64     // Size of the object
   208  	mimeType string    // Content-Type of the object
   209  }
   210  
   211  // ------------------------------------------------------------
   212  
   213  // Name of the remote (as passed into NewFs)
   214  func (f *Fs) Name() string {
   215  	return f.name
   216  }
   217  
   218  // Root of the remote (as passed into NewFs)
   219  func (f *Fs) Root() string {
   220  	return f.root
   221  }
   222  
   223  // String converts this Fs to a string
   224  func (f *Fs) String() string {
   225  	if f.rootBucket == "" {
   226  		return fmt.Sprintf("B2 root")
   227  	}
   228  	if f.rootDirectory == "" {
   229  		return fmt.Sprintf("B2 bucket %s", f.rootBucket)
   230  	}
   231  	return fmt.Sprintf("B2 bucket %s path %s", f.rootBucket, f.rootDirectory)
   232  }
   233  
   234  // Features returns the optional features of this Fs
   235  func (f *Fs) Features() *fs.Features {
   236  	return f.features
   237  }
   238  
   239  // parsePath parses a remote 'url'
   240  func parsePath(path string) (root string) {
   241  	root = strings.Trim(path, "/")
   242  	return
   243  }
   244  
   245  // split returns bucket and bucketPath from the rootRelativePath
   246  // relative to f.root
   247  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
   248  	return bucket.Split(path.Join(f.root, rootRelativePath))
   249  }
   250  
   251  // split returns bucket and bucketPath from the object
   252  func (o *Object) split() (bucket, bucketPath string) {
   253  	return o.fs.split(o.remote)
   254  }
   255  
   256  // retryErrorCodes is a slice of error codes that we will retry
   257  var retryErrorCodes = []int{
   258  	401, // Unauthorized (eg "Token has expired")
   259  	408, // Request Timeout
   260  	429, // Rate exceeded.
   261  	500, // Get occasional 500 Internal Server Error
   262  	503, // Service Unavailable
   263  	504, // Gateway Time-out
   264  }
   265  
   266  // shouldRetryNoAuth returns a boolean as to whether this resp and err
   267  // deserve to be retried.  It returns the err as a convenience
   268  func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
   269  	// For 429 or 503 errors look at the Retry-After: header and
   270  	// set the retry appropriately, starting with a minimum of 1
   271  	// second if it isn't set.
   272  	if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
   273  		var retryAfter = 1
   274  		retryAfterString := resp.Header.Get(retryAfterHeader)
   275  		if retryAfterString != "" {
   276  			var err error
   277  			retryAfter, err = strconv.Atoi(retryAfterString)
   278  			if err != nil {
   279  				fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
   280  			}
   281  		}
   282  		return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
   283  	}
   284  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   285  }
   286  
   287  // shouldRetry returns a boolean as to whether this resp and err
   288  // deserve to be retried.  It returns the err as a convenience
   289  func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
   290  	if resp != nil && resp.StatusCode == 401 {
   291  		fs.Debugf(f, "Unauthorized: %v", err)
   292  		// Reauth
   293  		authErr := f.authorizeAccount(ctx)
   294  		if authErr != nil {
   295  			err = authErr
   296  		}
   297  		return true, err
   298  	}
   299  	return f.shouldRetryNoReauth(resp, err)
   300  }
   301  
   302  // errorHandler parses a non 2xx error response into an error
   303  func errorHandler(resp *http.Response) error {
   304  	// Decode error response
   305  	errResponse := new(api.Error)
   306  	err := rest.DecodeJSON(resp, &errResponse)
   307  	if err != nil {
   308  		fs.Debugf(nil, "Couldn't decode error response: %v", err)
   309  	}
   310  	if errResponse.Code == "" {
   311  		errResponse.Code = "unknown"
   312  	}
   313  	if errResponse.Status == 0 {
   314  		errResponse.Status = resp.StatusCode
   315  	}
   316  	if errResponse.Message == "" {
   317  		errResponse.Message = "Unknown " + resp.Status
   318  	}
   319  	return errResponse
   320  }
   321  
   322  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   323  	if cs < minChunkSize {
   324  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   325  	}
   326  	return nil
   327  }
   328  
   329  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   330  	err = checkUploadChunkSize(cs)
   331  	if err == nil {
   332  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   333  		f.fillBufferTokens() // reset the buffer tokens
   334  	}
   335  	return
   336  }
   337  
   338  func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
   339  	if cs < opt.ChunkSize {
   340  		return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
   341  	}
   342  	return nil
   343  }
   344  
   345  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   346  	err = checkUploadCutoff(&f.opt, cs)
   347  	if err == nil {
   348  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   349  	}
   350  	return
   351  }
   352  
   353  // setRoot changes the root of the Fs
   354  func (f *Fs) setRoot(root string) {
   355  	f.root = parsePath(root)
   356  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
   357  }
   358  
   359  // NewFs constructs an Fs from the path, bucket:path
   360  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   361  	ctx := context.Background()
   362  	// Parse config into Options struct
   363  	opt := new(Options)
   364  	err := configstruct.Set(m, opt)
   365  	if err != nil {
   366  		return nil, err
   367  	}
   368  	err = checkUploadCutoff(opt, opt.UploadCutoff)
   369  	if err != nil {
   370  		return nil, errors.Wrap(err, "b2: upload cutoff")
   371  	}
   372  	err = checkUploadChunkSize(opt.ChunkSize)
   373  	if err != nil {
   374  		return nil, errors.Wrap(err, "b2: chunk size")
   375  	}
   376  	if opt.Account == "" {
   377  		return nil, errors.New("account not found")
   378  	}
   379  	if opt.Key == "" {
   380  		return nil, errors.New("key not found")
   381  	}
   382  	if opt.Endpoint == "" {
   383  		opt.Endpoint = defaultEndpoint
   384  	}
   385  	f := &Fs{
   386  		name:        name,
   387  		opt:         *opt,
   388  		srv:         rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
   389  		cache:       bucket.NewCache(),
   390  		_bucketID:   make(map[string]string, 1),
   391  		_bucketType: make(map[string]string, 1),
   392  		uploads:     make(map[string][]*api.GetUploadURLResponse),
   393  		pacer:       fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   394  	}
   395  	f.setRoot(root)
   396  	f.features = (&fs.Features{
   397  		ReadMimeType:      true,
   398  		WriteMimeType:     true,
   399  		BucketBased:       true,
   400  		BucketBasedRootOK: true,
   401  	}).Fill(f)
   402  	// Set the test flag if required
   403  	if opt.TestMode != "" {
   404  		testMode := strings.TrimSpace(opt.TestMode)
   405  		f.srv.SetHeader(testModeHeader, testMode)
   406  		fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
   407  	}
   408  	f.fillBufferTokens()
   409  	err = f.authorizeAccount(ctx)
   410  	if err != nil {
   411  		return nil, errors.Wrap(err, "failed to authorize account")
   412  	}
   413  	// If this is a key limited to a single bucket, it must exist already
   414  	if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
   415  		allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
   416  		if allowedBucket == "" {
   417  			return nil, errors.New("bucket that application key is restricted to no longer exists")
   418  		}
   419  		if allowedBucket != f.rootBucket {
   420  			return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
   421  		}
   422  		f.cache.MarkOK(f.rootBucket)
   423  		f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
   424  	}
   425  	if f.rootBucket != "" && f.rootDirectory != "" {
   426  		// Check to see if the (bucket,directory) is actually an existing file
   427  		oldRoot := f.root
   428  		newRoot, leaf := path.Split(oldRoot)
   429  		f.setRoot(newRoot)
   430  		_, err := f.NewObject(ctx, leaf)
   431  		if err != nil {
   432  			if err == fs.ErrorObjectNotFound {
   433  				// File doesn't exist so return old f
   434  				f.setRoot(oldRoot)
   435  				return f, nil
   436  			}
   437  			return nil, err
   438  		}
   439  		// return an error with an fs which points to the parent
   440  		return f, fs.ErrorIsFile
   441  	}
   442  	return f, nil
   443  }
   444  
   445  // authorizeAccount gets the API endpoint and auth token.  Can be used
   446  // for reauthentication too.
   447  func (f *Fs) authorizeAccount(ctx context.Context) error {
   448  	f.authMu.Lock()
   449  	defer f.authMu.Unlock()
   450  	opts := rest.Opts{
   451  		Method:       "GET",
   452  		Path:         "/b2api/v1/b2_authorize_account",
   453  		RootURL:      f.opt.Endpoint,
   454  		UserName:     f.opt.Account,
   455  		Password:     f.opt.Key,
   456  		ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
   457  	}
   458  	err := f.pacer.Call(func() (bool, error) {
   459  		resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
   460  		return f.shouldRetryNoReauth(resp, err)
   461  	})
   462  	if err != nil {
   463  		return errors.Wrap(err, "failed to authenticate")
   464  	}
   465  	f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
   466  	return nil
   467  }
   468  
   469  // hasPermission returns if the current AuthorizationToken has the selected permission
   470  func (f *Fs) hasPermission(permission string) bool {
   471  	for _, capability := range f.info.Allowed.Capabilities {
   472  		if capability == permission {
   473  			return true
   474  		}
   475  	}
   476  	return false
   477  }
   478  
   479  // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
   480  //
   481  // This should be returned with returnUploadURL when finished
   482  func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUploadURLResponse, err error) {
   483  	f.uploadMu.Lock()
   484  	defer f.uploadMu.Unlock()
   485  	bucketID, err := f.getBucketID(ctx, bucket)
   486  	if err != nil {
   487  		return nil, err
   488  	}
   489  	// look for a stored upload URL for the correct bucketID
   490  	uploads := f.uploads[bucketID]
   491  	if len(uploads) > 0 {
   492  		upload, uploads = uploads[0], uploads[1:]
   493  		f.uploads[bucketID] = uploads
   494  		return upload, nil
   495  	}
   496  	// get a new upload URL since not found
   497  	opts := rest.Opts{
   498  		Method: "POST",
   499  		Path:   "/b2_get_upload_url",
   500  	}
   501  	var request = api.GetUploadURLRequest{
   502  		BucketID: bucketID,
   503  	}
   504  	err = f.pacer.Call(func() (bool, error) {
   505  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &upload)
   506  		return f.shouldRetry(ctx, resp, err)
   507  	})
   508  	if err != nil {
   509  		return nil, errors.Wrap(err, "failed to get upload URL")
   510  	}
   511  	return upload, nil
   512  }
   513  
   514  // returnUploadURL returns the UploadURL to the cache
   515  func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) {
   516  	if upload == nil {
   517  		return
   518  	}
   519  	f.uploadMu.Lock()
   520  	f.uploads[upload.BucketID] = append(f.uploads[upload.BucketID], upload)
   521  	f.uploadMu.Unlock()
   522  }
   523  
   524  // clearUploadURL clears the current UploadURL and the AuthorizationToken
   525  func (f *Fs) clearUploadURL(bucketID string) {
   526  	f.uploadMu.Lock()
   527  	delete(f.uploads, bucketID)
   528  	f.uploadMu.Unlock()
   529  }
   530  
   531  // Fill up (or reset) the buffer tokens
   532  func (f *Fs) fillBufferTokens() {
   533  	f.bufferTokens = make(chan []byte, fs.Config.Transfers)
   534  	for i := 0; i < fs.Config.Transfers; i++ {
   535  		f.bufferTokens <- nil
   536  	}
   537  }
   538  
   539  // getUploadBlock gets a block from the pool of size chunkSize
   540  func (f *Fs) getUploadBlock() []byte {
   541  	buf := <-f.bufferTokens
   542  	if buf == nil {
   543  		buf = make([]byte, f.opt.ChunkSize)
   544  	}
   545  	// fs.Debugf(f, "Getting upload block %p", buf)
   546  	return buf
   547  }
   548  
   549  // putUploadBlock returns a block to the pool of size chunkSize
   550  func (f *Fs) putUploadBlock(buf []byte) {
   551  	buf = buf[:cap(buf)]
   552  	if len(buf) != int(f.opt.ChunkSize) {
   553  		panic("bad blocksize returned to pool")
   554  	}
   555  	// fs.Debugf(f, "Returning upload block %p", buf)
   556  	f.bufferTokens <- buf
   557  }
   558  
   559  // Return an Object from a path
   560  //
   561  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   562  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
   563  	o := &Object{
   564  		fs:     f,
   565  		remote: remote,
   566  	}
   567  	if info != nil {
   568  		err := o.decodeMetaData(info)
   569  		if err != nil {
   570  			return nil, err
   571  		}
   572  	} else {
   573  		err := o.readMetaData(ctx) // reads info and headers, returning an error
   574  		if err != nil {
   575  			return nil, err
   576  		}
   577  	}
   578  	return o, nil
   579  }
   580  
   581  // NewObject finds the Object at remote.  If it can't be found
   582  // it returns the error fs.ErrorObjectNotFound.
   583  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   584  	return f.newObjectWithInfo(ctx, remote, nil)
   585  }
   586  
   587  // listFn is called from list to handle an object
   588  type listFn func(remote string, object *api.File, isDirectory bool) error
   589  
   590  // errEndList is a sentinel used to end the list iteration now.
   591  // listFn should return it to end the iteration with no errors.
   592  var errEndList = errors.New("end list")
   593  
   594  // list lists the objects into the function supplied from
   595  // the bucket and root supplied
   596  //
   597  // (bucket, directory) is the starting directory
   598  //
   599  // If prefix is set then it is removed from all file names
   600  //
   601  // If addBucket is set then it adds the bucket to the start of the
   602  // remotes generated
   603  //
   604  // If recurse is set the function will recursively list
   605  //
   606  // If limit is > 0 then it limits to that many files (must be less
   607  // than 1000)
   608  //
   609  // If hidden is set then it will list the hidden (deleted) files too.
   610  //
   611  // if findFile is set it will look for files called (bucket, directory)
   612  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, hidden bool, findFile bool, fn listFn) error {
   613  	if !findFile {
   614  		if prefix != "" {
   615  			prefix += "/"
   616  		}
   617  		if directory != "" {
   618  			directory += "/"
   619  		}
   620  	}
   621  	delimiter := ""
   622  	if !recurse {
   623  		delimiter = "/"
   624  	}
   625  	bucketID, err := f.getBucketID(ctx, bucket)
   626  	if err != nil {
   627  		return err
   628  	}
   629  	chunkSize := 1000
   630  	if limit > 0 {
   631  		chunkSize = limit
   632  	}
   633  	var request = api.ListFileNamesRequest{
   634  		BucketID:     bucketID,
   635  		MaxFileCount: chunkSize,
   636  		Prefix:       f.opt.Enc.FromStandardPath(directory),
   637  		Delimiter:    delimiter,
   638  	}
   639  	if directory != "" {
   640  		request.StartFileName = f.opt.Enc.FromStandardPath(directory)
   641  	}
   642  	opts := rest.Opts{
   643  		Method: "POST",
   644  		Path:   "/b2_list_file_names",
   645  	}
   646  	if hidden {
   647  		opts.Path = "/b2_list_file_versions"
   648  	}
   649  	for {
   650  		var response api.ListFileNamesResponse
   651  		err := f.pacer.Call(func() (bool, error) {
   652  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
   653  			return f.shouldRetry(ctx, resp, err)
   654  		})
   655  		if err != nil {
   656  			return err
   657  		}
   658  		for i := range response.Files {
   659  			file := &response.Files[i]
   660  			file.Name = f.opt.Enc.ToStandardPath(file.Name)
   661  			// Finish if file name no longer has prefix
   662  			if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
   663  				return nil
   664  			}
   665  			if !strings.HasPrefix(file.Name, prefix) {
   666  				fs.Debugf(f, "Odd name received %q", file.Name)
   667  				continue
   668  			}
   669  			remote := file.Name[len(prefix):]
   670  			// Check for directory
   671  			isDirectory := strings.HasSuffix(remote, "/")
   672  			if isDirectory {
   673  				remote = remote[:len(remote)-1]
   674  			}
   675  			if addBucket {
   676  				remote = path.Join(bucket, remote)
   677  			}
   678  			// Send object
   679  			err = fn(remote, file, isDirectory)
   680  			if err != nil {
   681  				if err == errEndList {
   682  					return nil
   683  				}
   684  				return err
   685  			}
   686  		}
   687  		// end if no NextFileName
   688  		if response.NextFileName == nil {
   689  			break
   690  		}
   691  		request.StartFileName = *response.NextFileName
   692  		if response.NextFileID != nil {
   693  			request.StartFileID = *response.NextFileID
   694  		}
   695  	}
   696  	return nil
   697  }
   698  
   699  // Convert a list item into a DirEntry
   700  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
   701  	if isDirectory {
   702  		d := fs.NewDir(remote, time.Time{})
   703  		return d, nil
   704  	}
   705  	if remote == *last {
   706  		remote = object.UploadTimestamp.AddVersion(remote)
   707  	} else {
   708  		*last = remote
   709  	}
   710  	// hide objects represent deleted files which we don't list
   711  	if object.Action == "hide" {
   712  		return nil, nil
   713  	}
   714  	o, err := f.newObjectWithInfo(ctx, remote, object)
   715  	if err != nil {
   716  		return nil, err
   717  	}
   718  	return o, nil
   719  }
   720  
   721  // listDir lists a single directory
   722  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
   723  	last := ""
   724  	err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
   725  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   726  		if err != nil {
   727  			return err
   728  		}
   729  		if entry != nil {
   730  			entries = append(entries, entry)
   731  		}
   732  		return nil
   733  	})
   734  	if err != nil {
   735  		return nil, err
   736  	}
   737  	// bucket must be present if listing succeeded
   738  	f.cache.MarkOK(bucket)
   739  	return entries, nil
   740  }
   741  
   742  // listBuckets returns all the buckets to out
   743  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   744  	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
   745  		d := fs.NewDir(bucket.Name, time.Time{})
   746  		entries = append(entries, d)
   747  		return nil
   748  	})
   749  	if err != nil {
   750  		return nil, err
   751  	}
   752  	return entries, nil
   753  }
   754  
   755  // List the objects and directories in dir into entries.  The
   756  // entries can be returned in any order but should be for a
   757  // complete directory.
   758  //
   759  // dir should be "" to list the root, and should not have
   760  // trailing slashes.
   761  //
   762  // This should return ErrDirNotFound if the directory isn't
   763  // found.
   764  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   765  	bucket, directory := f.split(dir)
   766  	if bucket == "" {
   767  		if directory != "" {
   768  			return nil, fs.ErrorListBucketRequired
   769  		}
   770  		return f.listBuckets(ctx)
   771  	}
   772  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
   773  }
   774  
   775  // ListR lists the objects and directories of the Fs starting
   776  // from dir recursively into out.
   777  //
   778  // dir should be "" to start from the root, and should not
   779  // have trailing slashes.
   780  //
   781  // This should return ErrDirNotFound if the directory isn't
   782  // found.
   783  //
   784  // It should call callback for each tranche of entries read.
   785  // These need not be returned in any particular order.  If
   786  // callback returns an error then the listing will stop
   787  // immediately.
   788  //
   789  // Don't implement this unless you have a more efficient way
   790  // of listing recursively that doing a directory traversal.
   791  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   792  	bucket, directory := f.split(dir)
   793  	list := walk.NewListRHelper(callback)
   794  	listR := func(bucket, directory, prefix string, addBucket bool) error {
   795  		last := ""
   796  		return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
   797  			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   798  			if err != nil {
   799  				return err
   800  			}
   801  			return list.Add(entry)
   802  		})
   803  	}
   804  	if bucket == "" {
   805  		entries, err := f.listBuckets(ctx)
   806  		if err != nil {
   807  			return err
   808  		}
   809  		for _, entry := range entries {
   810  			err = list.Add(entry)
   811  			if err != nil {
   812  				return err
   813  			}
   814  			bucket := entry.Remote()
   815  			err = listR(bucket, "", f.rootDirectory, true)
   816  			if err != nil {
   817  				return err
   818  			}
   819  			// bucket must be present if listing succeeded
   820  			f.cache.MarkOK(bucket)
   821  		}
   822  	} else {
   823  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
   824  		if err != nil {
   825  			return err
   826  		}
   827  		// bucket must be present if listing succeeded
   828  		f.cache.MarkOK(bucket)
   829  	}
   830  	return list.Flush()
   831  }
   832  
   833  // listBucketFn is called from listBucketsToFn to handle a bucket
   834  type listBucketFn func(*api.Bucket) error
   835  
   836  // listBucketsToFn lists the buckets to the function supplied
   837  func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
   838  	var account = api.ListBucketsRequest{
   839  		AccountID: f.info.AccountID,
   840  		BucketID:  f.info.Allowed.BucketID,
   841  	}
   842  
   843  	var response api.ListBucketsResponse
   844  	opts := rest.Opts{
   845  		Method: "POST",
   846  		Path:   "/b2_list_buckets",
   847  	}
   848  	err := f.pacer.Call(func() (bool, error) {
   849  		resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
   850  		return f.shouldRetry(ctx, resp, err)
   851  	})
   852  	if err != nil {
   853  		return err
   854  	}
   855  	f.bucketIDMutex.Lock()
   856  	f.bucketTypeMutex.Lock()
   857  	f._bucketID = make(map[string]string, 1)
   858  	f._bucketType = make(map[string]string, 1)
   859  	for i := range response.Buckets {
   860  		bucket := &response.Buckets[i]
   861  		bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
   862  		f.cache.MarkOK(bucket.Name)
   863  		f._bucketID[bucket.Name] = bucket.ID
   864  		f._bucketType[bucket.Name] = bucket.Type
   865  	}
   866  	f.bucketTypeMutex.Unlock()
   867  	f.bucketIDMutex.Unlock()
   868  	for i := range response.Buckets {
   869  		bucket := &response.Buckets[i]
   870  		err = fn(bucket)
   871  		if err != nil {
   872  			return err
   873  		}
   874  	}
   875  	return nil
   876  }
   877  
   878  // getbucketType finds the bucketType for the current bucket name
   879  // can be one of allPublic. allPrivate, or snapshot
   880  func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType string, err error) {
   881  	f.bucketTypeMutex.Lock()
   882  	bucketType = f._bucketType[bucket]
   883  	f.bucketTypeMutex.Unlock()
   884  	if bucketType != "" {
   885  		return bucketType, nil
   886  	}
   887  	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
   888  		// listBucketsToFn reads bucket Types
   889  		return nil
   890  	})
   891  	f.bucketTypeMutex.Lock()
   892  	bucketType = f._bucketType[bucket]
   893  	f.bucketTypeMutex.Unlock()
   894  	if bucketType == "" {
   895  		err = fs.ErrorDirNotFound
   896  	}
   897  	return bucketType, err
   898  }
   899  
   900  // setBucketType sets the Type for the current bucket name
   901  func (f *Fs) setBucketType(bucket string, Type string) {
   902  	f.bucketTypeMutex.Lock()
   903  	f._bucketType[bucket] = Type
   904  	f.bucketTypeMutex.Unlock()
   905  }
   906  
   907  // clearBucketType clears the Type for the current bucket name
   908  func (f *Fs) clearBucketType(bucket string) {
   909  	f.bucketTypeMutex.Lock()
   910  	delete(f._bucketType, bucket)
   911  	f.bucketTypeMutex.Unlock()
   912  }
   913  
   914  // getBucketID finds the ID for the current bucket name
   915  func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, err error) {
   916  	f.bucketIDMutex.Lock()
   917  	bucketID = f._bucketID[bucket]
   918  	f.bucketIDMutex.Unlock()
   919  	if bucketID != "" {
   920  		return bucketID, nil
   921  	}
   922  	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
   923  		// listBucketsToFn sets IDs
   924  		return nil
   925  	})
   926  	f.bucketIDMutex.Lock()
   927  	bucketID = f._bucketID[bucket]
   928  	f.bucketIDMutex.Unlock()
   929  	if bucketID == "" {
   930  		err = fs.ErrorDirNotFound
   931  	}
   932  	return bucketID, err
   933  }
   934  
   935  // setBucketID sets the ID for the current bucket name
   936  func (f *Fs) setBucketID(bucket, ID string) {
   937  	f.bucketIDMutex.Lock()
   938  	f._bucketID[bucket] = ID
   939  	f.bucketIDMutex.Unlock()
   940  }
   941  
   942  // clearBucketID clears the ID for the current bucket name
   943  func (f *Fs) clearBucketID(bucket string) {
   944  	f.bucketIDMutex.Lock()
   945  	delete(f._bucketID, bucket)
   946  	f.bucketIDMutex.Unlock()
   947  }
   948  
   949  // Put the object into the bucket
   950  //
   951  // Copy the reader in to the new object which is returned
   952  //
   953  // The new object may have been created if an error is returned
   954  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   955  	// Temporary Object under construction
   956  	fs := &Object{
   957  		fs:     f,
   958  		remote: src.Remote(),
   959  	}
   960  	return fs, fs.Update(ctx, in, src, options...)
   961  }
   962  
   963  // PutStream uploads to the remote path with the modTime given of indeterminate size
   964  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   965  	return f.Put(ctx, in, src, options...)
   966  }
   967  
   968  // Mkdir creates the bucket if it doesn't exist
   969  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   970  	bucket, _ := f.split(dir)
   971  	return f.makeBucket(ctx, bucket)
   972  }
   973  
   974  // makeBucket creates the bucket if it doesn't exist
   975  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
   976  	return f.cache.Create(bucket, func() error {
   977  		opts := rest.Opts{
   978  			Method: "POST",
   979  			Path:   "/b2_create_bucket",
   980  		}
   981  		var request = api.CreateBucketRequest{
   982  			AccountID: f.info.AccountID,
   983  			Name:      f.opt.Enc.FromStandardName(bucket),
   984  			Type:      "allPrivate",
   985  		}
   986  		var response api.Bucket
   987  		err := f.pacer.Call(func() (bool, error) {
   988  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
   989  			return f.shouldRetry(ctx, resp, err)
   990  		})
   991  		if err != nil {
   992  			if apiErr, ok := err.(*api.Error); ok {
   993  				if apiErr.Code == "duplicate_bucket_name" {
   994  					// Check this is our bucket - buckets are globally unique and this
   995  					// might be someone elses.
   996  					_, getBucketErr := f.getBucketID(ctx, bucket)
   997  					if getBucketErr == nil {
   998  						// found so it is our bucket
   999  						return nil
  1000  					}
  1001  					if getBucketErr != fs.ErrorDirNotFound {
  1002  						fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
  1003  					}
  1004  				}
  1005  			}
  1006  			return errors.Wrap(err, "failed to create bucket")
  1007  		}
  1008  		f.setBucketID(bucket, response.ID)
  1009  		f.setBucketType(bucket, response.Type)
  1010  		return nil
  1011  	}, nil)
  1012  }
  1013  
  1014  // Rmdir deletes the bucket if the fs is at the root
  1015  //
  1016  // Returns an error if it isn't empty
  1017  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1018  	bucket, directory := f.split(dir)
  1019  	if bucket == "" || directory != "" {
  1020  		return nil
  1021  	}
  1022  	return f.cache.Remove(bucket, func() error {
  1023  		opts := rest.Opts{
  1024  			Method: "POST",
  1025  			Path:   "/b2_delete_bucket",
  1026  		}
  1027  		bucketID, err := f.getBucketID(ctx, bucket)
  1028  		if err != nil {
  1029  			return err
  1030  		}
  1031  		var request = api.DeleteBucketRequest{
  1032  			ID:        bucketID,
  1033  			AccountID: f.info.AccountID,
  1034  		}
  1035  		var response api.Bucket
  1036  		err = f.pacer.Call(func() (bool, error) {
  1037  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1038  			return f.shouldRetry(ctx, resp, err)
  1039  		})
  1040  		if err != nil {
  1041  			return errors.Wrap(err, "failed to delete bucket")
  1042  		}
  1043  		f.clearBucketID(bucket)
  1044  		f.clearBucketType(bucket)
  1045  		f.clearUploadURL(bucketID)
  1046  		return nil
  1047  	})
  1048  }
  1049  
  1050  // Precision of the remote
  1051  func (f *Fs) Precision() time.Duration {
  1052  	return time.Millisecond
  1053  }
  1054  
  1055  // hide hides a file on the remote
  1056  func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
  1057  	bucketID, err := f.getBucketID(ctx, bucket)
  1058  	if err != nil {
  1059  		return err
  1060  	}
  1061  	opts := rest.Opts{
  1062  		Method: "POST",
  1063  		Path:   "/b2_hide_file",
  1064  	}
  1065  	var request = api.HideFileRequest{
  1066  		BucketID: bucketID,
  1067  		Name:     f.opt.Enc.FromStandardPath(bucketPath),
  1068  	}
  1069  	var response api.File
  1070  	err = f.pacer.Call(func() (bool, error) {
  1071  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1072  		return f.shouldRetry(ctx, resp, err)
  1073  	})
  1074  	if err != nil {
  1075  		if apiErr, ok := err.(*api.Error); ok {
  1076  			if apiErr.Code == "already_hidden" {
  1077  				// sometimes eventual consistency causes this, so
  1078  				// ignore this error since it is harmless
  1079  				return nil
  1080  			}
  1081  		}
  1082  		return errors.Wrapf(err, "failed to hide %q", bucketPath)
  1083  	}
  1084  	return nil
  1085  }
  1086  
  1087  // deleteByID deletes a file version given Name and ID
  1088  func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
  1089  	opts := rest.Opts{
  1090  		Method: "POST",
  1091  		Path:   "/b2_delete_file_version",
  1092  	}
  1093  	var request = api.DeleteFileRequest{
  1094  		ID:   ID,
  1095  		Name: f.opt.Enc.FromStandardPath(Name),
  1096  	}
  1097  	var response api.File
  1098  	err := f.pacer.Call(func() (bool, error) {
  1099  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1100  		return f.shouldRetry(ctx, resp, err)
  1101  	})
  1102  	if err != nil {
  1103  		return errors.Wrapf(err, "failed to delete %q", Name)
  1104  	}
  1105  	return nil
  1106  }
  1107  
  1108  // purge deletes all the files and directories
  1109  //
  1110  // if oldOnly is true then it deletes only non current files.
  1111  //
  1112  // Implemented here so we can make sure we delete old versions.
  1113  func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
  1114  	if bucket == "" {
  1115  		return errors.New("can't purge from root")
  1116  	}
  1117  	var errReturn error
  1118  	var checkErrMutex sync.Mutex
  1119  	var checkErr = func(err error) {
  1120  		if err == nil {
  1121  			return
  1122  		}
  1123  		checkErrMutex.Lock()
  1124  		defer checkErrMutex.Unlock()
  1125  		if errReturn == nil {
  1126  			errReturn = err
  1127  		}
  1128  	}
  1129  	var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
  1130  		if time.Since(time.Time(timestamp)).Hours() > 24 {
  1131  			return true
  1132  		}
  1133  		return false
  1134  	}
  1135  
  1136  	// Delete Config.Transfers in parallel
  1137  	toBeDeleted := make(chan *api.File, fs.Config.Transfers)
  1138  	var wg sync.WaitGroup
  1139  	wg.Add(fs.Config.Transfers)
  1140  	for i := 0; i < fs.Config.Transfers; i++ {
  1141  		go func() {
  1142  			defer wg.Done()
  1143  			for object := range toBeDeleted {
  1144  				oi, err := f.newObjectWithInfo(ctx, object.Name, object)
  1145  				if err != nil {
  1146  					fs.Errorf(object.Name, "Can't create object %v", err)
  1147  					continue
  1148  				}
  1149  				tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
  1150  				err = f.deleteByID(ctx, object.ID, object.Name)
  1151  				checkErr(err)
  1152  				tr.Done(err)
  1153  			}
  1154  		}()
  1155  	}
  1156  	last := ""
  1157  	checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
  1158  		if !isDirectory {
  1159  			oi, err := f.newObjectWithInfo(ctx, object.Name, object)
  1160  			if err != nil {
  1161  				fs.Errorf(object, "Can't create object %+v", err)
  1162  			}
  1163  			tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
  1164  			if oldOnly && last != remote {
  1165  				// Check current version of the file
  1166  				if object.Action == "hide" {
  1167  					fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
  1168  					toBeDeleted <- object
  1169  				} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
  1170  					fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
  1171  					toBeDeleted <- object
  1172  				} else {
  1173  					fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
  1174  				}
  1175  			} else {
  1176  				fs.Debugf(remote, "Deleting (id %q)", object.ID)
  1177  				toBeDeleted <- object
  1178  			}
  1179  			last = remote
  1180  			tr.Done(nil)
  1181  		}
  1182  		return nil
  1183  	}))
  1184  	close(toBeDeleted)
  1185  	wg.Wait()
  1186  
  1187  	if !oldOnly {
  1188  		checkErr(f.Rmdir(ctx, ""))
  1189  	}
  1190  	return errReturn
  1191  }
  1192  
  1193  // Purge deletes all the files and directories including the old versions.
  1194  func (f *Fs) Purge(ctx context.Context) error {
  1195  	return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
  1196  }
  1197  
  1198  // CleanUp deletes all the hidden files.
  1199  func (f *Fs) CleanUp(ctx context.Context) error {
  1200  	return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
  1201  }
  1202  
  1203  // Copy src to this remote using server side copy operations.
  1204  //
  1205  // This is stored with the remote path given
  1206  //
  1207  // It returns the destination Object and a possible error
  1208  //
  1209  // Will only be called if src.Fs().Name() == f.Name()
  1210  //
  1211  // If it isn't possible then return fs.ErrorCantCopy
  1212  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1213  	dstBucket, dstPath := f.split(remote)
  1214  	err := f.makeBucket(ctx, dstBucket)
  1215  	if err != nil {
  1216  		return nil, err
  1217  	}
  1218  	srcObj, ok := src.(*Object)
  1219  	if !ok {
  1220  		fs.Debugf(src, "Can't copy - not same remote type")
  1221  		return nil, fs.ErrorCantCopy
  1222  	}
  1223  	destBucketID, err := f.getBucketID(ctx, dstBucket)
  1224  	if err != nil {
  1225  		return nil, err
  1226  	}
  1227  	opts := rest.Opts{
  1228  		Method: "POST",
  1229  		Path:   "/b2_copy_file",
  1230  	}
  1231  	var request = api.CopyFileRequest{
  1232  		SourceID:          srcObj.id,
  1233  		Name:              f.opt.Enc.FromStandardPath(dstPath),
  1234  		MetadataDirective: "COPY",
  1235  		DestBucketID:      destBucketID,
  1236  	}
  1237  	var response api.FileInfo
  1238  	err = f.pacer.Call(func() (bool, error) {
  1239  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1240  		return f.shouldRetry(ctx, resp, err)
  1241  	})
  1242  	if err != nil {
  1243  		return nil, err
  1244  	}
  1245  	o := &Object{
  1246  		fs:     f,
  1247  		remote: remote,
  1248  	}
  1249  	err = o.decodeMetaDataFileInfo(&response)
  1250  	if err != nil {
  1251  		return nil, err
  1252  	}
  1253  	return o, nil
  1254  }
  1255  
  1256  // Hashes returns the supported hash sets.
  1257  func (f *Fs) Hashes() hash.Set {
  1258  	return hash.Set(hash.SHA1)
  1259  }
  1260  
  1261  // getDownloadAuthorization returns authorization token for downloading
  1262  // without account.
  1263  func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string) (authorization string, err error) {
  1264  	validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
  1265  	if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
  1266  		return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
  1267  	}
  1268  	if !f.hasPermission("shareFiles") {
  1269  		return "", errors.New("sharing a file link requires the shareFiles permission")
  1270  	}
  1271  	bucketID, err := f.getBucketID(ctx, bucket)
  1272  	if err != nil {
  1273  		return "", err
  1274  	}
  1275  	opts := rest.Opts{
  1276  		Method: "POST",
  1277  		Path:   "/b2_get_download_authorization",
  1278  	}
  1279  	var request = api.GetDownloadAuthorizationRequest{
  1280  		BucketID:               bucketID,
  1281  		FileNamePrefix:         f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
  1282  		ValidDurationInSeconds: validDurationInSeconds,
  1283  	}
  1284  	var response api.GetDownloadAuthorizationResponse
  1285  	err = f.pacer.Call(func() (bool, error) {
  1286  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1287  		return f.shouldRetry(ctx, resp, err)
  1288  	})
  1289  	if err != nil {
  1290  		return "", errors.Wrap(err, "failed to get download authorization")
  1291  	}
  1292  	return response.AuthorizationToken, nil
  1293  }
  1294  
  1295  // PublicLink returns a link for downloading without account
  1296  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  1297  	bucket, bucketPath := f.split(remote)
  1298  	var RootURL string
  1299  	if f.opt.DownloadURL == "" {
  1300  		RootURL = f.info.DownloadURL
  1301  	} else {
  1302  		RootURL = f.opt.DownloadURL
  1303  	}
  1304  	_, err = f.NewObject(ctx, remote)
  1305  	if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
  1306  		err2 := f.list(ctx, bucket, bucketPath, f.rootDirectory, f.rootBucket == "", false, 1, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
  1307  			err = nil
  1308  			return nil
  1309  		})
  1310  		if err2 != nil {
  1311  			return "", err2
  1312  		}
  1313  	}
  1314  	if err != nil {
  1315  		return "", err
  1316  	}
  1317  	absPath := "/" + bucketPath
  1318  	link = RootURL + "/file/" + urlEncode(bucket) + absPath
  1319  	bucketType, err := f.getbucketType(ctx, bucket)
  1320  	if err != nil {
  1321  		return "", err
  1322  	}
  1323  	if bucketType == "allPrivate" || bucketType == "snapshot" {
  1324  		AuthorizationToken, err := f.getDownloadAuthorization(ctx, bucket, remote)
  1325  		if err != nil {
  1326  			return "", err
  1327  		}
  1328  		link += "?Authorization=" + AuthorizationToken
  1329  	}
  1330  	return link, nil
  1331  }
  1332  
  1333  // ------------------------------------------------------------
  1334  
  1335  // Fs returns the parent Fs
  1336  func (o *Object) Fs() fs.Info {
  1337  	return o.fs
  1338  }
  1339  
  1340  // Return a string version
  1341  func (o *Object) String() string {
  1342  	if o == nil {
  1343  		return "<nil>"
  1344  	}
  1345  	return o.remote
  1346  }
  1347  
  1348  // Remote returns the remote path
  1349  func (o *Object) Remote() string {
  1350  	return o.remote
  1351  }
  1352  
  1353  // Hash returns the Sha-1 of an object returning a lowercase hex string
  1354  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1355  	if t != hash.SHA1 {
  1356  		return "", hash.ErrUnsupported
  1357  	}
  1358  	if o.sha1 == "" {
  1359  		// Error is logged in readMetaData
  1360  		err := o.readMetaData(ctx)
  1361  		if err != nil {
  1362  			return "", err
  1363  		}
  1364  	}
  1365  	return o.sha1, nil
  1366  }
  1367  
  1368  // Size returns the size of an object in bytes
  1369  func (o *Object) Size() int64 {
  1370  	return o.size
  1371  }
  1372  
  1373  // decodeMetaDataRaw sets the metadata from the data passed in
  1374  //
  1375  // Sets
  1376  //  o.id
  1377  //  o.modTime
  1378  //  o.size
  1379  //  o.sha1
  1380  func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
  1381  	o.id = ID
  1382  	o.sha1 = SHA1
  1383  	o.mimeType = mimeType
  1384  	// Read SHA1 from metadata if it exists and isn't set
  1385  	if o.sha1 == "" || o.sha1 == "none" {
  1386  		o.sha1 = Info[sha1Key]
  1387  	}
  1388  	// Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
  1389  	// Some tools (eg Cyberduck) use this
  1390  	const unverified = "unverified:"
  1391  	if strings.HasPrefix(o.sha1, unverified) {
  1392  		o.sha1 = o.sha1[len(unverified):]
  1393  	}
  1394  	o.size = Size
  1395  	// Use the UploadTimestamp if can't get file info
  1396  	o.modTime = time.Time(UploadTimestamp)
  1397  	return o.parseTimeString(Info[timeKey])
  1398  }
  1399  
  1400  // decodeMetaData sets the metadata in the object from an api.File
  1401  //
  1402  // Sets
  1403  //  o.id
  1404  //  o.modTime
  1405  //  o.size
  1406  //  o.sha1
  1407  func (o *Object) decodeMetaData(info *api.File) (err error) {
  1408  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1409  }
  1410  
  1411  // decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
  1412  //
  1413  // Sets
  1414  //  o.id
  1415  //  o.modTime
  1416  //  o.size
  1417  //  o.sha1
  1418  func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
  1419  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1420  }
  1421  
  1422  // getMetaData gets the metadata from the object unconditionally
  1423  func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
  1424  	bucket, bucketPath := o.split()
  1425  	maxSearched := 1
  1426  	var timestamp api.Timestamp
  1427  	if o.fs.opt.Versions {
  1428  		timestamp, bucketPath = api.RemoveVersion(bucketPath)
  1429  		maxSearched = maxVersions
  1430  	}
  1431  
  1432  	err = o.fs.list(ctx, bucket, bucketPath, "", false, true, maxSearched, o.fs.opt.Versions, true, func(remote string, object *api.File, isDirectory bool) error {
  1433  		if isDirectory {
  1434  			return nil
  1435  		}
  1436  		if remote == bucketPath {
  1437  			if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) {
  1438  				return nil
  1439  			}
  1440  			info = object
  1441  		}
  1442  		return errEndList // read only 1 item
  1443  	})
  1444  	if err != nil {
  1445  		if err == fs.ErrorDirNotFound {
  1446  			return nil, fs.ErrorObjectNotFound
  1447  		}
  1448  		return nil, err
  1449  	}
  1450  	if info == nil {
  1451  		return nil, fs.ErrorObjectNotFound
  1452  	}
  1453  	return info, nil
  1454  }
  1455  
  1456  // readMetaData gets the metadata if it hasn't already been fetched
  1457  //
  1458  // Sets
  1459  //  o.id
  1460  //  o.modTime
  1461  //  o.size
  1462  //  o.sha1
  1463  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1464  	if o.id != "" {
  1465  		return nil
  1466  	}
  1467  	info, err := o.getMetaData(ctx)
  1468  	if err != nil {
  1469  		return err
  1470  	}
  1471  	return o.decodeMetaData(info)
  1472  }
  1473  
  1474  // timeString returns modTime as the number of milliseconds
  1475  // elapsed since January 1, 1970 UTC as a decimal string.
  1476  func timeString(modTime time.Time) string {
  1477  	return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
  1478  }
  1479  
  1480  // parseTimeString converts a decimal string number of milliseconds
  1481  // elapsed since January 1, 1970 UTC into a time.Time and stores it in
  1482  // the modTime variable.
  1483  func (o *Object) parseTimeString(timeString string) (err error) {
  1484  	if timeString == "" {
  1485  		return nil
  1486  	}
  1487  	unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
  1488  	if err != nil {
  1489  		fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
  1490  		return nil
  1491  	}
  1492  	o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
  1493  	return nil
  1494  }
  1495  
  1496  // ModTime returns the modification time of the object
  1497  //
  1498  // It attempts to read the objects mtime and if that isn't present the
  1499  // LastModified returned in the http headers
  1500  //
  1501  // SHA-1 will also be updated once the request has completed.
  1502  func (o *Object) ModTime(ctx context.Context) (result time.Time) {
  1503  	// The error is logged in readMetaData
  1504  	_ = o.readMetaData(ctx)
  1505  	return o.modTime
  1506  }
  1507  
  1508  // SetModTime sets the modification time of the Object
  1509  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1510  	info, err := o.getMetaData(ctx)
  1511  	if err != nil {
  1512  		return err
  1513  	}
  1514  	_, bucketPath := o.split()
  1515  	info.Info[timeKey] = timeString(modTime)
  1516  	opts := rest.Opts{
  1517  		Method: "POST",
  1518  		Path:   "/b2_copy_file",
  1519  	}
  1520  	var request = api.CopyFileRequest{
  1521  		SourceID:          o.id,
  1522  		Name:              o.fs.opt.Enc.FromStandardPath(bucketPath), // copy to same name
  1523  		MetadataDirective: "REPLACE",
  1524  		ContentType:       info.ContentType,
  1525  		Info:              info.Info,
  1526  	}
  1527  	var response api.FileInfo
  1528  	err = o.fs.pacer.Call(func() (bool, error) {
  1529  		resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
  1530  		return o.fs.shouldRetry(ctx, resp, err)
  1531  	})
  1532  	if err != nil {
  1533  		return err
  1534  	}
  1535  	return o.decodeMetaDataFileInfo(&response)
  1536  }
  1537  
  1538  // Storable returns if this object is storable
  1539  func (o *Object) Storable() bool {
  1540  	return true
  1541  }
  1542  
  1543  // openFile represents an Object open for reading
  1544  type openFile struct {
  1545  	o     *Object        // Object we are reading for
  1546  	resp  *http.Response // response of the GET
  1547  	body  io.Reader      // reading from here
  1548  	hash  gohash.Hash    // currently accumulating SHA1
  1549  	bytes int64          // number of bytes read on this connection
  1550  	eof   bool           // whether we have read end of file
  1551  }
  1552  
  1553  // newOpenFile wraps an io.ReadCloser and checks the sha1sum
  1554  func newOpenFile(o *Object, resp *http.Response) *openFile {
  1555  	file := &openFile{
  1556  		o:    o,
  1557  		resp: resp,
  1558  		hash: sha1.New(),
  1559  	}
  1560  	file.body = io.TeeReader(resp.Body, file.hash)
  1561  	return file
  1562  }
  1563  
  1564  // Read bytes from the object - see io.Reader
  1565  func (file *openFile) Read(p []byte) (n int, err error) {
  1566  	n, err = file.body.Read(p)
  1567  	file.bytes += int64(n)
  1568  	if err == io.EOF {
  1569  		file.eof = true
  1570  	}
  1571  	return
  1572  }
  1573  
  1574  // Close the object and checks the length and SHA1 if all the object
  1575  // was read
  1576  func (file *openFile) Close() (err error) {
  1577  	// Close the body at the end
  1578  	defer fs.CheckClose(file.resp.Body, &err)
  1579  
  1580  	// If not end of file then can't check SHA1
  1581  	if !file.eof {
  1582  		return nil
  1583  	}
  1584  
  1585  	// Check to see we read the correct number of bytes
  1586  	if file.o.Size() != file.bytes {
  1587  		return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
  1588  	}
  1589  
  1590  	// Check the SHA1
  1591  	receivedSHA1 := file.o.sha1
  1592  	calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
  1593  	if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
  1594  		return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
  1595  	}
  1596  
  1597  	return nil
  1598  }
  1599  
  1600  // Check it satisfies the interfaces
  1601  var _ io.ReadCloser = &openFile{}
  1602  
  1603  // Open an object for read
  1604  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1605  	fs.FixRangeOption(options, o.size)
  1606  	opts := rest.Opts{
  1607  		Method:  "GET",
  1608  		Options: options,
  1609  	}
  1610  
  1611  	// Use downloadUrl from backblaze if downloadUrl is not set
  1612  	// otherwise use the custom downloadUrl
  1613  	if o.fs.opt.DownloadURL == "" {
  1614  		opts.RootURL = o.fs.info.DownloadURL
  1615  	} else {
  1616  		opts.RootURL = o.fs.opt.DownloadURL
  1617  	}
  1618  
  1619  	// Download by id if set otherwise by name
  1620  	if o.id != "" {
  1621  		opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
  1622  	} else {
  1623  		bucket, bucketPath := o.split()
  1624  		opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
  1625  	}
  1626  	var resp *http.Response
  1627  	err = o.fs.pacer.Call(func() (bool, error) {
  1628  		resp, err = o.fs.srv.Call(ctx, &opts)
  1629  		return o.fs.shouldRetry(ctx, resp, err)
  1630  	})
  1631  	if err != nil {
  1632  		return nil, errors.Wrap(err, "failed to open for download")
  1633  	}
  1634  
  1635  	// Parse the time out of the headers if possible
  1636  	err = o.parseTimeString(resp.Header.Get(timeHeader))
  1637  	if err != nil {
  1638  		_ = resp.Body.Close()
  1639  		return nil, err
  1640  	}
  1641  	// Read sha1 from header if it isn't set
  1642  	if o.sha1 == "" {
  1643  		o.sha1 = resp.Header.Get(sha1Header)
  1644  		fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
  1645  		// if sha1 header is "none" (in big files), then need
  1646  		// to read it from the metadata
  1647  		if o.sha1 == "none" {
  1648  			o.sha1 = resp.Header.Get(sha1InfoHeader)
  1649  			fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
  1650  		}
  1651  	}
  1652  	// Don't check length or hash on partial content
  1653  	if resp.StatusCode == http.StatusPartialContent {
  1654  		return resp.Body, nil
  1655  	}
  1656  	return newOpenFile(o, resp), nil
  1657  }
  1658  
  1659  // dontEncode is the characters that do not need percent-encoding
  1660  //
  1661  // The characters that do not need percent-encoding are a subset of
  1662  // the printable ASCII characters: upper-case letters, lower-case
  1663  // letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
  1664  // "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
  1665  // be replaced with "%" and the two-digit hex value of the byte.
  1666  const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
  1667  	`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
  1668  	`0123456789` +
  1669  	`._-/~!$'()*;=:@`)
  1670  
  1671  // noNeedToEncode is a bitmap of characters which don't need % encoding
  1672  var noNeedToEncode [256]bool
  1673  
  1674  func init() {
  1675  	for _, c := range dontEncode {
  1676  		noNeedToEncode[c] = true
  1677  	}
  1678  }
  1679  
  1680  // urlEncode encodes in with % encoding
  1681  func urlEncode(in string) string {
  1682  	var out bytes.Buffer
  1683  	for i := 0; i < len(in); i++ {
  1684  		c := in[i]
  1685  		if noNeedToEncode[c] {
  1686  			_ = out.WriteByte(c)
  1687  		} else {
  1688  			_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
  1689  		}
  1690  	}
  1691  	return out.String()
  1692  }
  1693  
  1694  // Update the object with the contents of the io.Reader, modTime and size
  1695  //
  1696  // The new object may have been created if an error is returned
  1697  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1698  	if o.fs.opt.Versions {
  1699  		return errNotWithVersions
  1700  	}
  1701  	size := src.Size()
  1702  
  1703  	bucket, bucketPath := o.split()
  1704  	err = o.fs.makeBucket(ctx, bucket)
  1705  	if err != nil {
  1706  		return err
  1707  	}
  1708  	if size == -1 {
  1709  		// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
  1710  		buf := o.fs.getUploadBlock()
  1711  		n, err := io.ReadFull(in, buf)
  1712  		if err == nil {
  1713  			bufReader := bufio.NewReader(in)
  1714  			in = bufReader
  1715  			_, err = bufReader.Peek(1)
  1716  		}
  1717  
  1718  		if err == nil {
  1719  			fs.Debugf(o, "File is big enough for chunked streaming")
  1720  			up, err := o.fs.newLargeUpload(ctx, o, in, src)
  1721  			if err != nil {
  1722  				o.fs.putUploadBlock(buf)
  1723  				return err
  1724  			}
  1725  			return up.Stream(ctx, buf)
  1726  		} else if err == io.EOF || err == io.ErrUnexpectedEOF {
  1727  			fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
  1728  			defer o.fs.putUploadBlock(buf)
  1729  			size = int64(n)
  1730  			in = bytes.NewReader(buf[:n])
  1731  		} else {
  1732  			return err
  1733  		}
  1734  	} else if size > int64(o.fs.opt.UploadCutoff) {
  1735  		up, err := o.fs.newLargeUpload(ctx, o, in, src)
  1736  		if err != nil {
  1737  			return err
  1738  		}
  1739  		return up.Upload(ctx)
  1740  	}
  1741  
  1742  	modTime := src.ModTime(ctx)
  1743  
  1744  	calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
  1745  	if calculatedSha1 == "" {
  1746  		calculatedSha1 = "hex_digits_at_end"
  1747  		har := newHashAppendingReader(in, sha1.New())
  1748  		size += int64(har.AdditionalLength())
  1749  		in = har
  1750  	}
  1751  
  1752  	// Get upload URL
  1753  	upload, err := o.fs.getUploadURL(ctx, bucket)
  1754  	if err != nil {
  1755  		return err
  1756  	}
  1757  	defer func() {
  1758  		// return it like this because we might nil it out
  1759  		o.fs.returnUploadURL(upload)
  1760  	}()
  1761  
  1762  	// Headers for upload file
  1763  	//
  1764  	// Authorization
  1765  	// required
  1766  	// An upload authorization token, from b2_get_upload_url.
  1767  	//
  1768  	// X-Bz-File-Name
  1769  	// required
  1770  	//
  1771  	// The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding.
  1772  	//
  1773  	// Content-Type
  1774  	// required
  1775  	//
  1776  	// The MIME type of the content of the file, which will be returned in
  1777  	// the Content-Type header when downloading the file. Use the
  1778  	// Content-Type b2/x-auto to automatically set the stored Content-Type
  1779  	// post upload. In the case where a file extension is absent or the
  1780  	// lookup fails, the Content-Type is set to application/octet-stream. The
  1781  	// Content-Type mappings can be pursued here.
  1782  	//
  1783  	// X-Bz-Content-Sha1
  1784  	// required
  1785  	//
  1786  	// The SHA1 checksum of the content of the file. B2 will check this when
  1787  	// the file is uploaded, to make sure that the file arrived correctly. It
  1788  	// will be returned in the X-Bz-Content-Sha1 header when the file is
  1789  	// downloaded.
  1790  	//
  1791  	// X-Bz-Info-src_last_modified_millis
  1792  	// optional
  1793  	//
  1794  	// If the original source of the file being uploaded has a last modified
  1795  	// time concept, Backblaze recommends using this spelling of one of your
  1796  	// ten X-Bz-Info-* headers (see below). Using a standard spelling allows
  1797  	// different B2 clients and the B2 web user interface to interoperate
  1798  	// correctly. The value should be a base 10 number which represents a UTC
  1799  	// time when the original source file was last modified. It is a base 10
  1800  	// number of milliseconds since midnight, January 1, 1970 UTC. This fits
  1801  	// in a 64 bit integer such as the type "long" in the programming
  1802  	// language Java. It is intended to be compatible with Java's time
  1803  	// long. For example, it can be passed directly into the Java call
  1804  	// Date.setTime(long time).
  1805  	//
  1806  	// X-Bz-Info-*
  1807  	// optional
  1808  	//
  1809  	// Up to 10 of these headers may be present. The * part of the header
  1810  	// name is replace with the name of a custom field in the file
  1811  	// information stored with the file, and the value is an arbitrary UTF-8
  1812  	// string, percent-encoded. The same info headers sent with the upload
  1813  	// will be returned with the download.
  1814  
  1815  	opts := rest.Opts{
  1816  		Method:  "POST",
  1817  		RootURL: upload.UploadURL,
  1818  		Body:    in,
  1819  		ExtraHeaders: map[string]string{
  1820  			"Authorization":  upload.AuthorizationToken,
  1821  			"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
  1822  			"Content-Type":   fs.MimeType(ctx, src),
  1823  			sha1Header:       calculatedSha1,
  1824  			timeHeader:       timeString(modTime),
  1825  		},
  1826  		ContentLength: &size,
  1827  	}
  1828  	var response api.FileInfo
  1829  	// Don't retry, return a retry error instead
  1830  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1831  		resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &response)
  1832  		retry, err := o.fs.shouldRetry(ctx, resp, err)
  1833  		// On retryable error clear UploadURL
  1834  		if retry {
  1835  			fs.Debugf(o, "Clearing upload URL because of error: %v", err)
  1836  			upload = nil
  1837  		}
  1838  		return retry, err
  1839  	})
  1840  	if err != nil {
  1841  		return err
  1842  	}
  1843  	return o.decodeMetaDataFileInfo(&response)
  1844  }
  1845  
  1846  // Remove an object
  1847  func (o *Object) Remove(ctx context.Context) error {
  1848  	bucket, bucketPath := o.split()
  1849  	if o.fs.opt.Versions {
  1850  		return errNotWithVersions
  1851  	}
  1852  	if o.fs.opt.HardDelete {
  1853  		return o.fs.deleteByID(ctx, o.id, bucketPath)
  1854  	}
  1855  	return o.fs.hide(ctx, bucket, bucketPath)
  1856  }
  1857  
  1858  // MimeType of an Object if known, "" otherwise
  1859  func (o *Object) MimeType(ctx context.Context) string {
  1860  	return o.mimeType
  1861  }
  1862  
  1863  // ID returns the ID of the Object if known, or "" if not
  1864  func (o *Object) ID() string {
  1865  	return o.id
  1866  }
  1867  
  1868  // Check the interfaces are satisfied
  1869  var (
  1870  	_ fs.Fs           = &Fs{}
  1871  	_ fs.Purger       = &Fs{}
  1872  	_ fs.Copier       = &Fs{}
  1873  	_ fs.PutStreamer  = &Fs{}
  1874  	_ fs.CleanUpper   = &Fs{}
  1875  	_ fs.ListRer      = &Fs{}
  1876  	_ fs.PublicLinker = &Fs{}
  1877  	_ fs.Object       = &Object{}
  1878  	_ fs.MimeTyper    = &Object{}
  1879  	_ fs.IDer         = &Object{}
  1880  )