github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/b2/b2.go (about)

     1  // Package b2 provides an interface to the Backblaze B2 object storage system
     2  package b2
     3  
     4  // FIXME should we remove sha1 checks from here as rclone now supports
     5  // checking SHA1s?
     6  
     7  import (
     8  	"bufio"
     9  	"bytes"
    10  	"context"
    11  	"crypto/sha1"
    12  	"fmt"
    13  	gohash "hash"
    14  	"io"
    15  	"net/http"
    16  	"path"
    17  	"strconv"
    18  	"strings"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/pkg/errors"
    23  	"github.com/rclone/rclone/backend/b2/api"
    24  	"github.com/rclone/rclone/fs"
    25  	"github.com/rclone/rclone/fs/accounting"
    26  	"github.com/rclone/rclone/fs/config"
    27  	"github.com/rclone/rclone/fs/config/configmap"
    28  	"github.com/rclone/rclone/fs/config/configstruct"
    29  	"github.com/rclone/rclone/fs/fserrors"
    30  	"github.com/rclone/rclone/fs/fshttp"
    31  	"github.com/rclone/rclone/fs/hash"
    32  	"github.com/rclone/rclone/fs/walk"
    33  	"github.com/rclone/rclone/lib/bucket"
    34  	"github.com/rclone/rclone/lib/encoder"
    35  	"github.com/rclone/rclone/lib/pacer"
    36  	"github.com/rclone/rclone/lib/rest"
    37  )
    38  
    39  const (
    40  	defaultEndpoint     = "https://api.backblazeb2.com"
    41  	headerPrefix        = "x-bz-info-" // lower case as that is what the server returns
    42  	timeKey             = "src_last_modified_millis"
    43  	timeHeader          = headerPrefix + timeKey
    44  	sha1Key             = "large_file_sha1"
    45  	sha1Header          = "X-Bz-Content-Sha1"
    46  	sha1InfoHeader      = headerPrefix + sha1Key
    47  	testModeHeader      = "X-Bz-Test-Mode"
    48  	retryAfterHeader    = "Retry-After"
    49  	minSleep            = 10 * time.Millisecond
    50  	maxSleep            = 5 * time.Minute
    51  	decayConstant       = 1 // bigger for slower decay, exponential
    52  	maxParts            = 10000
    53  	maxVersions         = 100 // maximum number of versions we search in --b2-versions mode
    54  	minChunkSize        = 5 * fs.MebiByte
    55  	defaultChunkSize    = 96 * fs.MebiByte
    56  	defaultUploadCutoff = 200 * fs.MebiByte
    57  )
    58  
    59  // Globals
    60  var (
    61  	errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
    62  )
    63  
    64  // Register with Fs
    65  func init() {
    66  	fs.Register(&fs.RegInfo{
    67  		Name:        "b2",
    68  		Description: "Backblaze B2",
    69  		NewFs:       NewFs,
    70  		Options: []fs.Option{{
    71  			Name:     "account",
    72  			Help:     "Account ID or Application Key ID",
    73  			Required: true,
    74  		}, {
    75  			Name:     "key",
    76  			Help:     "Application Key",
    77  			Required: true,
    78  		}, {
    79  			Name:     "endpoint",
    80  			Help:     "Endpoint for the service.\nLeave blank normally.",
    81  			Advanced: true,
    82  		}, {
    83  			Name: "test_mode",
    84  			Help: `A flag string for X-Bz-Test-Mode header for debugging.
    85  
    86  This is for debugging purposes only. Setting it to one of the strings
    87  below will cause b2 to return specific errors:
    88  
    89    * "fail_some_uploads"
    90    * "expire_some_account_authorization_tokens"
    91    * "force_cap_exceeded"
    92  
    93  These will be set in the "X-Bz-Test-Mode" header which is documented
    94  in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
    95  			Default:  "",
    96  			Hide:     fs.OptionHideConfigurator,
    97  			Advanced: true,
    98  		}, {
    99  			Name:     "versions",
   100  			Help:     "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
   101  			Default:  false,
   102  			Advanced: true,
   103  		}, {
   104  			Name:    "hard_delete",
   105  			Help:    "Permanently delete files on remote removal, otherwise hide files.",
   106  			Default: false,
   107  		}, {
   108  			Name: "upload_cutoff",
   109  			Help: `Cutoff for switching to chunked upload.
   110  
   111  Files above this size will be uploaded in chunks of "--b2-chunk-size".
   112  
   113  This value should be set no larger than 4.657GiB (== 5GB).`,
   114  			Default:  defaultUploadCutoff,
   115  			Advanced: true,
   116  		}, {
   117  			Name: "chunk_size",
   118  			Help: `Upload chunk size. Must fit in memory.
   119  
   120  When uploading large files, chunk the file into this size.  Note that
   121  these chunks are buffered in memory and there might a maximum of
   122  "--transfers" chunks in progress at once.  5,000,000 Bytes is the
   123  minimum size.`,
   124  			Default:  defaultChunkSize,
   125  			Advanced: true,
   126  		}, {
   127  			Name: "disable_checksum",
   128  			Help: `Disable checksums for large (> upload cutoff) files
   129  
   130  Normally rclone will calculate the SHA1 checksum of the input before
   131  uploading it so it can add it to metadata on the object. This is great
   132  for data integrity checking but can cause long delays for large files
   133  to start uploading.`,
   134  			Default:  false,
   135  			Advanced: true,
   136  		}, {
   137  			Name: "download_url",
   138  			Help: `Custom endpoint for downloads.
   139  
   140  This is usually set to a Cloudflare CDN URL as Backblaze offers
   141  free egress for data downloaded through the Cloudflare network.
   142  This is probably only useful for a public bucket.
   143  Leave blank if you want to use the endpoint provided by Backblaze.`,
   144  			Advanced: true,
   145  		}, {
   146  			Name: "download_auth_duration",
   147  			Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
   148  
   149  The duration before the download authorization token will expire.
   150  The minimum value is 1 second. The maximum value is one week.`,
   151  			Default:  fs.Duration(7 * 24 * time.Hour),
   152  			Advanced: true,
   153  		}, {
   154  			Name:     config.ConfigEncoding,
   155  			Help:     config.ConfigEncodingHelp,
   156  			Advanced: true,
   157  			// See: https://www.backblaze.com/b2/docs/files.html
   158  			// Encode invalid UTF-8 bytes as json doesn't handle them properly.
   159  			// FIXME: allow /, but not leading, trailing or double
   160  			Default: (encoder.Display |
   161  				encoder.EncodeBackSlash |
   162  				encoder.EncodeInvalidUtf8),
   163  		}},
   164  	})
   165  }
   166  
   167  // Options defines the configuration for this backend
   168  type Options struct {
   169  	Account                       string               `config:"account"`
   170  	Key                           string               `config:"key"`
   171  	Endpoint                      string               `config:"endpoint"`
   172  	TestMode                      string               `config:"test_mode"`
   173  	Versions                      bool                 `config:"versions"`
   174  	HardDelete                    bool                 `config:"hard_delete"`
   175  	UploadCutoff                  fs.SizeSuffix        `config:"upload_cutoff"`
   176  	ChunkSize                     fs.SizeSuffix        `config:"chunk_size"`
   177  	DisableCheckSum               bool                 `config:"disable_checksum"`
   178  	DownloadURL                   string               `config:"download_url"`
   179  	DownloadAuthorizationDuration fs.Duration          `config:"download_auth_duration"`
   180  	Enc                           encoder.MultiEncoder `config:"encoding"`
   181  }
   182  
   183  // Fs represents a remote b2 server
   184  type Fs struct {
   185  	name            string                                 // name of this remote
   186  	root            string                                 // the path we are working on if any
   187  	opt             Options                                // parsed config options
   188  	features        *fs.Features                           // optional features
   189  	srv             *rest.Client                           // the connection to the b2 server
   190  	rootBucket      string                                 // bucket part of root (if any)
   191  	rootDirectory   string                                 // directory part of root (if any)
   192  	cache           *bucket.Cache                          // cache for bucket creation status
   193  	bucketIDMutex   sync.Mutex                             // mutex to protect _bucketID
   194  	_bucketID       map[string]string                      // the ID of the bucket we are working on
   195  	bucketTypeMutex sync.Mutex                             // mutex to protect _bucketType
   196  	_bucketType     map[string]string                      // the Type of the bucket we are working on
   197  	info            api.AuthorizeAccountResponse           // result of authorize call
   198  	uploadMu        sync.Mutex                             // lock for upload variable
   199  	uploads         map[string][]*api.GetUploadURLResponse // Upload URLs by buckedID
   200  	authMu          sync.Mutex                             // lock for authorizing the account
   201  	pacer           *fs.Pacer                              // To pace and retry the API calls
   202  	bufferTokens    chan []byte                            // control concurrency of multipart uploads
   203  }
   204  
   205  // Object describes a b2 object
   206  type Object struct {
   207  	fs       *Fs       // what this object is part of
   208  	remote   string    // The remote path
   209  	id       string    // b2 id of the file
   210  	modTime  time.Time // The modified time of the object if known
   211  	sha1     string    // SHA-1 hash if known
   212  	size     int64     // Size of the object
   213  	mimeType string    // Content-Type of the object
   214  }
   215  
   216  // ------------------------------------------------------------
   217  
   218  // Name of the remote (as passed into NewFs)
   219  func (f *Fs) Name() string {
   220  	return f.name
   221  }
   222  
   223  // Root of the remote (as passed into NewFs)
   224  func (f *Fs) Root() string {
   225  	return f.root
   226  }
   227  
   228  // String converts this Fs to a string
   229  func (f *Fs) String() string {
   230  	if f.rootBucket == "" {
   231  		return fmt.Sprintf("B2 root")
   232  	}
   233  	if f.rootDirectory == "" {
   234  		return fmt.Sprintf("B2 bucket %s", f.rootBucket)
   235  	}
   236  	return fmt.Sprintf("B2 bucket %s path %s", f.rootBucket, f.rootDirectory)
   237  }
   238  
   239  // Features returns the optional features of this Fs
   240  func (f *Fs) Features() *fs.Features {
   241  	return f.features
   242  }
   243  
   244  // parsePath parses a remote 'url'
   245  func parsePath(path string) (root string) {
   246  	root = strings.Trim(path, "/")
   247  	return
   248  }
   249  
   250  // split returns bucket and bucketPath from the rootRelativePath
   251  // relative to f.root
   252  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
   253  	return bucket.Split(path.Join(f.root, rootRelativePath))
   254  }
   255  
   256  // split returns bucket and bucketPath from the object
   257  func (o *Object) split() (bucket, bucketPath string) {
   258  	return o.fs.split(o.remote)
   259  }
   260  
   261  // retryErrorCodes is a slice of error codes that we will retry
   262  var retryErrorCodes = []int{
   263  	401, // Unauthorized (eg "Token has expired")
   264  	408, // Request Timeout
   265  	429, // Rate exceeded.
   266  	500, // Get occasional 500 Internal Server Error
   267  	503, // Service Unavailable
   268  	504, // Gateway Time-out
   269  }
   270  
   271  // shouldRetryNoAuth returns a boolean as to whether this resp and err
   272  // deserve to be retried.  It returns the err as a convenience
   273  func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
   274  	// For 429 or 503 errors look at the Retry-After: header and
   275  	// set the retry appropriately, starting with a minimum of 1
   276  	// second if it isn't set.
   277  	if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
   278  		var retryAfter = 1
   279  		retryAfterString := resp.Header.Get(retryAfterHeader)
   280  		if retryAfterString != "" {
   281  			var err error
   282  			retryAfter, err = strconv.Atoi(retryAfterString)
   283  			if err != nil {
   284  				fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
   285  			}
   286  		}
   287  		return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
   288  	}
   289  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   290  }
   291  
   292  // shouldRetry returns a boolean as to whether this resp and err
   293  // deserve to be retried.  It returns the err as a convenience
   294  func (f *Fs) shouldRetry(ctx context.Context, resp *http.Response, err error) (bool, error) {
   295  	if resp != nil && resp.StatusCode == 401 {
   296  		fs.Debugf(f, "Unauthorized: %v", err)
   297  		// Reauth
   298  		authErr := f.authorizeAccount(ctx)
   299  		if authErr != nil {
   300  			err = authErr
   301  		}
   302  		return true, err
   303  	}
   304  	return f.shouldRetryNoReauth(resp, err)
   305  }
   306  
   307  // errorHandler parses a non 2xx error response into an error
   308  func errorHandler(resp *http.Response) error {
   309  	// Decode error response
   310  	errResponse := new(api.Error)
   311  	err := rest.DecodeJSON(resp, &errResponse)
   312  	if err != nil {
   313  		fs.Debugf(nil, "Couldn't decode error response: %v", err)
   314  	}
   315  	if errResponse.Code == "" {
   316  		errResponse.Code = "unknown"
   317  	}
   318  	if errResponse.Status == 0 {
   319  		errResponse.Status = resp.StatusCode
   320  	}
   321  	if errResponse.Message == "" {
   322  		errResponse.Message = "Unknown " + resp.Status
   323  	}
   324  	return errResponse
   325  }
   326  
   327  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   328  	if cs < minChunkSize {
   329  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   330  	}
   331  	return nil
   332  }
   333  
   334  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   335  	err = checkUploadChunkSize(cs)
   336  	if err == nil {
   337  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   338  		f.fillBufferTokens() // reset the buffer tokens
   339  	}
   340  	return
   341  }
   342  
   343  func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
   344  	if cs < opt.ChunkSize {
   345  		return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
   346  	}
   347  	return nil
   348  }
   349  
   350  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   351  	err = checkUploadCutoff(&f.opt, cs)
   352  	if err == nil {
   353  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   354  	}
   355  	return
   356  }
   357  
   358  // setRoot changes the root of the Fs
   359  func (f *Fs) setRoot(root string) {
   360  	f.root = parsePath(root)
   361  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
   362  }
   363  
   364  // NewFs constructs an Fs from the path, bucket:path
   365  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   366  	ctx := context.Background()
   367  	// Parse config into Options struct
   368  	opt := new(Options)
   369  	err := configstruct.Set(m, opt)
   370  	if err != nil {
   371  		return nil, err
   372  	}
   373  	err = checkUploadCutoff(opt, opt.UploadCutoff)
   374  	if err != nil {
   375  		return nil, errors.Wrap(err, "b2: upload cutoff")
   376  	}
   377  	err = checkUploadChunkSize(opt.ChunkSize)
   378  	if err != nil {
   379  		return nil, errors.Wrap(err, "b2: chunk size")
   380  	}
   381  	if opt.Account == "" {
   382  		return nil, errors.New("account not found")
   383  	}
   384  	if opt.Key == "" {
   385  		return nil, errors.New("key not found")
   386  	}
   387  	if opt.Endpoint == "" {
   388  		opt.Endpoint = defaultEndpoint
   389  	}
   390  	f := &Fs{
   391  		name:        name,
   392  		opt:         *opt,
   393  		srv:         rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
   394  		cache:       bucket.NewCache(),
   395  		_bucketID:   make(map[string]string, 1),
   396  		_bucketType: make(map[string]string, 1),
   397  		uploads:     make(map[string][]*api.GetUploadURLResponse),
   398  		pacer:       fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   399  	}
   400  	f.setRoot(root)
   401  	f.features = (&fs.Features{
   402  		ReadMimeType:      true,
   403  		WriteMimeType:     true,
   404  		BucketBased:       true,
   405  		BucketBasedRootOK: true,
   406  	}).Fill(f)
   407  	// Set the test flag if required
   408  	if opt.TestMode != "" {
   409  		testMode := strings.TrimSpace(opt.TestMode)
   410  		f.srv.SetHeader(testModeHeader, testMode)
   411  		fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
   412  	}
   413  	f.fillBufferTokens()
   414  	err = f.authorizeAccount(ctx)
   415  	if err != nil {
   416  		return nil, errors.Wrap(err, "failed to authorize account")
   417  	}
   418  	// If this is a key limited to a single bucket, it must exist already
   419  	if f.rootBucket != "" && f.info.Allowed.BucketID != "" {
   420  		allowedBucket := f.opt.Enc.ToStandardName(f.info.Allowed.BucketName)
   421  		if allowedBucket == "" {
   422  			return nil, errors.New("bucket that application key is restricted to no longer exists")
   423  		}
   424  		if allowedBucket != f.rootBucket {
   425  			return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
   426  		}
   427  		f.cache.MarkOK(f.rootBucket)
   428  		f.setBucketID(f.rootBucket, f.info.Allowed.BucketID)
   429  	}
   430  	if f.rootBucket != "" && f.rootDirectory != "" {
   431  		// Check to see if the (bucket,directory) is actually an existing file
   432  		oldRoot := f.root
   433  		newRoot, leaf := path.Split(oldRoot)
   434  		f.setRoot(newRoot)
   435  		_, err := f.NewObject(ctx, leaf)
   436  		if err != nil {
   437  			if err == fs.ErrorObjectNotFound {
   438  				// File doesn't exist so return old f
   439  				f.setRoot(oldRoot)
   440  				return f, nil
   441  			}
   442  			return nil, err
   443  		}
   444  		// return an error with an fs which points to the parent
   445  		return f, fs.ErrorIsFile
   446  	}
   447  	return f, nil
   448  }
   449  
   450  // authorizeAccount gets the API endpoint and auth token.  Can be used
   451  // for reauthentication too.
   452  func (f *Fs) authorizeAccount(ctx context.Context) error {
   453  	f.authMu.Lock()
   454  	defer f.authMu.Unlock()
   455  	opts := rest.Opts{
   456  		Method:       "GET",
   457  		Path:         "/b2api/v1/b2_authorize_account",
   458  		RootURL:      f.opt.Endpoint,
   459  		UserName:     f.opt.Account,
   460  		Password:     f.opt.Key,
   461  		ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
   462  	}
   463  	err := f.pacer.Call(func() (bool, error) {
   464  		resp, err := f.srv.CallJSON(ctx, &opts, nil, &f.info)
   465  		return f.shouldRetryNoReauth(resp, err)
   466  	})
   467  	if err != nil {
   468  		return errors.Wrap(err, "failed to authenticate")
   469  	}
   470  	f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
   471  	return nil
   472  }
   473  
   474  // hasPermission returns if the current AuthorizationToken has the selected permission
   475  func (f *Fs) hasPermission(permission string) bool {
   476  	for _, capability := range f.info.Allowed.Capabilities {
   477  		if capability == permission {
   478  			return true
   479  		}
   480  	}
   481  	return false
   482  }
   483  
   484  // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
   485  //
   486  // This should be returned with returnUploadURL when finished
   487  func (f *Fs) getUploadURL(ctx context.Context, bucket string) (upload *api.GetUploadURLResponse, err error) {
   488  	f.uploadMu.Lock()
   489  	defer f.uploadMu.Unlock()
   490  	bucketID, err := f.getBucketID(ctx, bucket)
   491  	if err != nil {
   492  		return nil, err
   493  	}
   494  	// look for a stored upload URL for the correct bucketID
   495  	uploads := f.uploads[bucketID]
   496  	if len(uploads) > 0 {
   497  		upload, uploads = uploads[0], uploads[1:]
   498  		f.uploads[bucketID] = uploads
   499  		return upload, nil
   500  	}
   501  	// get a new upload URL since not found
   502  	opts := rest.Opts{
   503  		Method: "POST",
   504  		Path:   "/b2_get_upload_url",
   505  	}
   506  	var request = api.GetUploadURLRequest{
   507  		BucketID: bucketID,
   508  	}
   509  	err = f.pacer.Call(func() (bool, error) {
   510  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &upload)
   511  		return f.shouldRetry(ctx, resp, err)
   512  	})
   513  	if err != nil {
   514  		return nil, errors.Wrap(err, "failed to get upload URL")
   515  	}
   516  	return upload, nil
   517  }
   518  
   519  // returnUploadURL returns the UploadURL to the cache
   520  func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) {
   521  	if upload == nil {
   522  		return
   523  	}
   524  	f.uploadMu.Lock()
   525  	f.uploads[upload.BucketID] = append(f.uploads[upload.BucketID], upload)
   526  	f.uploadMu.Unlock()
   527  }
   528  
   529  // clearUploadURL clears the current UploadURL and the AuthorizationToken
   530  func (f *Fs) clearUploadURL(bucketID string) {
   531  	f.uploadMu.Lock()
   532  	delete(f.uploads, bucketID)
   533  	f.uploadMu.Unlock()
   534  }
   535  
   536  // Fill up (or reset) the buffer tokens
   537  func (f *Fs) fillBufferTokens() {
   538  	f.bufferTokens = make(chan []byte, fs.Config.Transfers)
   539  	for i := 0; i < fs.Config.Transfers; i++ {
   540  		f.bufferTokens <- nil
   541  	}
   542  }
   543  
   544  // getUploadBlock gets a block from the pool of size chunkSize
   545  func (f *Fs) getUploadBlock() []byte {
   546  	buf := <-f.bufferTokens
   547  	if buf == nil {
   548  		buf = make([]byte, f.opt.ChunkSize)
   549  	}
   550  	// fs.Debugf(f, "Getting upload block %p", buf)
   551  	return buf
   552  }
   553  
   554  // putUploadBlock returns a block to the pool of size chunkSize
   555  func (f *Fs) putUploadBlock(buf []byte) {
   556  	buf = buf[:cap(buf)]
   557  	if len(buf) != int(f.opt.ChunkSize) {
   558  		panic("bad blocksize returned to pool")
   559  	}
   560  	// fs.Debugf(f, "Returning upload block %p", buf)
   561  	f.bufferTokens <- buf
   562  }
   563  
   564  // Return an Object from a path
   565  //
   566  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   567  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
   568  	o := &Object{
   569  		fs:     f,
   570  		remote: remote,
   571  	}
   572  	if info != nil {
   573  		err := o.decodeMetaData(info)
   574  		if err != nil {
   575  			return nil, err
   576  		}
   577  	} else {
   578  		err := o.readMetaData(ctx) // reads info and headers, returning an error
   579  		if err != nil {
   580  			return nil, err
   581  		}
   582  	}
   583  	return o, nil
   584  }
   585  
   586  // NewObject finds the Object at remote.  If it can't be found
   587  // it returns the error fs.ErrorObjectNotFound.
   588  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   589  	return f.newObjectWithInfo(ctx, remote, nil)
   590  }
   591  
   592  // listFn is called from list to handle an object
   593  type listFn func(remote string, object *api.File, isDirectory bool) error
   594  
   595  // errEndList is a sentinel used to end the list iteration now.
   596  // listFn should return it to end the iteration with no errors.
   597  var errEndList = errors.New("end list")
   598  
   599  // list lists the objects into the function supplied from
   600  // the bucket and root supplied
   601  //
   602  // (bucket, directory) is the starting directory
   603  //
   604  // If prefix is set then it is removed from all file names
   605  //
   606  // If addBucket is set then it adds the bucket to the start of the
   607  // remotes generated
   608  //
   609  // If recurse is set the function will recursively list
   610  //
   611  // If limit is > 0 then it limits to that many files (must be less
   612  // than 1000)
   613  //
   614  // If hidden is set then it will list the hidden (deleted) files too.
   615  //
   616  // if findFile is set it will look for files called (bucket, directory)
   617  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, limit int, hidden bool, findFile bool, fn listFn) error {
   618  	if !findFile {
   619  		if prefix != "" {
   620  			prefix += "/"
   621  		}
   622  		if directory != "" {
   623  			directory += "/"
   624  		}
   625  	}
   626  	delimiter := ""
   627  	if !recurse {
   628  		delimiter = "/"
   629  	}
   630  	bucketID, err := f.getBucketID(ctx, bucket)
   631  	if err != nil {
   632  		return err
   633  	}
   634  	chunkSize := 1000
   635  	if limit > 0 {
   636  		chunkSize = limit
   637  	}
   638  	var request = api.ListFileNamesRequest{
   639  		BucketID:     bucketID,
   640  		MaxFileCount: chunkSize,
   641  		Prefix:       f.opt.Enc.FromStandardPath(directory),
   642  		Delimiter:    delimiter,
   643  	}
   644  	if directory != "" {
   645  		request.StartFileName = f.opt.Enc.FromStandardPath(directory)
   646  	}
   647  	opts := rest.Opts{
   648  		Method: "POST",
   649  		Path:   "/b2_list_file_names",
   650  	}
   651  	if hidden {
   652  		opts.Path = "/b2_list_file_versions"
   653  	}
   654  	for {
   655  		var response api.ListFileNamesResponse
   656  		err := f.pacer.Call(func() (bool, error) {
   657  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
   658  			return f.shouldRetry(ctx, resp, err)
   659  		})
   660  		if err != nil {
   661  			return err
   662  		}
   663  		for i := range response.Files {
   664  			file := &response.Files[i]
   665  			file.Name = f.opt.Enc.ToStandardPath(file.Name)
   666  			// Finish if file name no longer has prefix
   667  			if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
   668  				return nil
   669  			}
   670  			if !strings.HasPrefix(file.Name, prefix) {
   671  				fs.Debugf(f, "Odd name received %q", file.Name)
   672  				continue
   673  			}
   674  			remote := file.Name[len(prefix):]
   675  			// Check for directory
   676  			isDirectory := remote == "" || strings.HasSuffix(remote, "/")
   677  			if isDirectory {
   678  				remote = remote[:len(remote)-1]
   679  			}
   680  			if addBucket {
   681  				remote = path.Join(bucket, remote)
   682  			}
   683  			// Send object
   684  			err = fn(remote, file, isDirectory)
   685  			if err != nil {
   686  				if err == errEndList {
   687  					return nil
   688  				}
   689  				return err
   690  			}
   691  		}
   692  		// end if no NextFileName
   693  		if response.NextFileName == nil {
   694  			break
   695  		}
   696  		request.StartFileName = *response.NextFileName
   697  		if response.NextFileID != nil {
   698  			request.StartFileID = *response.NextFileID
   699  		}
   700  	}
   701  	return nil
   702  }
   703  
   704  // Convert a list item into a DirEntry
   705  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
   706  	if isDirectory {
   707  		d := fs.NewDir(remote, time.Time{})
   708  		return d, nil
   709  	}
   710  	if remote == *last {
   711  		remote = object.UploadTimestamp.AddVersion(remote)
   712  	} else {
   713  		*last = remote
   714  	}
   715  	// hide objects represent deleted files which we don't list
   716  	if object.Action == "hide" {
   717  		return nil, nil
   718  	}
   719  	o, err := f.newObjectWithInfo(ctx, remote, object)
   720  	if err != nil {
   721  		return nil, err
   722  	}
   723  	return o, nil
   724  }
   725  
   726  // listDir lists a single directory
   727  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
   728  	last := ""
   729  	err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
   730  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   731  		if err != nil {
   732  			return err
   733  		}
   734  		if entry != nil {
   735  			entries = append(entries, entry)
   736  		}
   737  		return nil
   738  	})
   739  	if err != nil {
   740  		return nil, err
   741  	}
   742  	// bucket must be present if listing succeeded
   743  	f.cache.MarkOK(bucket)
   744  	return entries, nil
   745  }
   746  
   747  // listBuckets returns all the buckets to out
   748  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   749  	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
   750  		d := fs.NewDir(bucket.Name, time.Time{})
   751  		entries = append(entries, d)
   752  		return nil
   753  	})
   754  	if err != nil {
   755  		return nil, err
   756  	}
   757  	return entries, nil
   758  }
   759  
   760  // List the objects and directories in dir into entries.  The
   761  // entries can be returned in any order but should be for a
   762  // complete directory.
   763  //
   764  // dir should be "" to list the root, and should not have
   765  // trailing slashes.
   766  //
   767  // This should return ErrDirNotFound if the directory isn't
   768  // found.
   769  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   770  	bucket, directory := f.split(dir)
   771  	if bucket == "" {
   772  		if directory != "" {
   773  			return nil, fs.ErrorListBucketRequired
   774  		}
   775  		return f.listBuckets(ctx)
   776  	}
   777  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
   778  }
   779  
   780  // ListR lists the objects and directories of the Fs starting
   781  // from dir recursively into out.
   782  //
   783  // dir should be "" to start from the root, and should not
   784  // have trailing slashes.
   785  //
   786  // This should return ErrDirNotFound if the directory isn't
   787  // found.
   788  //
   789  // It should call callback for each tranche of entries read.
   790  // These need not be returned in any particular order.  If
   791  // callback returns an error then the listing will stop
   792  // immediately.
   793  //
   794  // Don't implement this unless you have a more efficient way
   795  // of listing recursively that doing a directory traversal.
   796  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   797  	bucket, directory := f.split(dir)
   798  	list := walk.NewListRHelper(callback)
   799  	listR := func(bucket, directory, prefix string, addBucket bool) error {
   800  		last := ""
   801  		return f.list(ctx, bucket, directory, prefix, addBucket, true, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
   802  			entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   803  			if err != nil {
   804  				return err
   805  			}
   806  			return list.Add(entry)
   807  		})
   808  	}
   809  	if bucket == "" {
   810  		entries, err := f.listBuckets(ctx)
   811  		if err != nil {
   812  			return err
   813  		}
   814  		for _, entry := range entries {
   815  			err = list.Add(entry)
   816  			if err != nil {
   817  				return err
   818  			}
   819  			bucket := entry.Remote()
   820  			err = listR(bucket, "", f.rootDirectory, true)
   821  			if err != nil {
   822  				return err
   823  			}
   824  			// bucket must be present if listing succeeded
   825  			f.cache.MarkOK(bucket)
   826  		}
   827  	} else {
   828  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
   829  		if err != nil {
   830  			return err
   831  		}
   832  		// bucket must be present if listing succeeded
   833  		f.cache.MarkOK(bucket)
   834  	}
   835  	return list.Flush()
   836  }
   837  
   838  // listBucketFn is called from listBucketsToFn to handle a bucket
   839  type listBucketFn func(*api.Bucket) error
   840  
   841  // listBucketsToFn lists the buckets to the function supplied
   842  func (f *Fs) listBucketsToFn(ctx context.Context, fn listBucketFn) error {
   843  	var account = api.ListBucketsRequest{
   844  		AccountID: f.info.AccountID,
   845  		BucketID:  f.info.Allowed.BucketID,
   846  	}
   847  
   848  	var response api.ListBucketsResponse
   849  	opts := rest.Opts{
   850  		Method: "POST",
   851  		Path:   "/b2_list_buckets",
   852  	}
   853  	err := f.pacer.Call(func() (bool, error) {
   854  		resp, err := f.srv.CallJSON(ctx, &opts, &account, &response)
   855  		return f.shouldRetry(ctx, resp, err)
   856  	})
   857  	if err != nil {
   858  		return err
   859  	}
   860  	f.bucketIDMutex.Lock()
   861  	f.bucketTypeMutex.Lock()
   862  	f._bucketID = make(map[string]string, 1)
   863  	f._bucketType = make(map[string]string, 1)
   864  	for i := range response.Buckets {
   865  		bucket := &response.Buckets[i]
   866  		bucket.Name = f.opt.Enc.ToStandardName(bucket.Name)
   867  		f.cache.MarkOK(bucket.Name)
   868  		f._bucketID[bucket.Name] = bucket.ID
   869  		f._bucketType[bucket.Name] = bucket.Type
   870  	}
   871  	f.bucketTypeMutex.Unlock()
   872  	f.bucketIDMutex.Unlock()
   873  	for i := range response.Buckets {
   874  		bucket := &response.Buckets[i]
   875  		err = fn(bucket)
   876  		if err != nil {
   877  			return err
   878  		}
   879  	}
   880  	return nil
   881  }
   882  
   883  // getbucketType finds the bucketType for the current bucket name
   884  // can be one of allPublic. allPrivate, or snapshot
   885  func (f *Fs) getbucketType(ctx context.Context, bucket string) (bucketType string, err error) {
   886  	f.bucketTypeMutex.Lock()
   887  	bucketType = f._bucketType[bucket]
   888  	f.bucketTypeMutex.Unlock()
   889  	if bucketType != "" {
   890  		return bucketType, nil
   891  	}
   892  	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
   893  		// listBucketsToFn reads bucket Types
   894  		return nil
   895  	})
   896  	f.bucketTypeMutex.Lock()
   897  	bucketType = f._bucketType[bucket]
   898  	f.bucketTypeMutex.Unlock()
   899  	if bucketType == "" {
   900  		err = fs.ErrorDirNotFound
   901  	}
   902  	return bucketType, err
   903  }
   904  
   905  // setBucketType sets the Type for the current bucket name
   906  func (f *Fs) setBucketType(bucket string, Type string) {
   907  	f.bucketTypeMutex.Lock()
   908  	f._bucketType[bucket] = Type
   909  	f.bucketTypeMutex.Unlock()
   910  }
   911  
   912  // clearBucketType clears the Type for the current bucket name
   913  func (f *Fs) clearBucketType(bucket string) {
   914  	f.bucketTypeMutex.Lock()
   915  	delete(f._bucketType, bucket)
   916  	f.bucketTypeMutex.Unlock()
   917  }
   918  
   919  // getBucketID finds the ID for the current bucket name
   920  func (f *Fs) getBucketID(ctx context.Context, bucket string) (bucketID string, err error) {
   921  	f.bucketIDMutex.Lock()
   922  	bucketID = f._bucketID[bucket]
   923  	f.bucketIDMutex.Unlock()
   924  	if bucketID != "" {
   925  		return bucketID, nil
   926  	}
   927  	err = f.listBucketsToFn(ctx, func(bucket *api.Bucket) error {
   928  		// listBucketsToFn sets IDs
   929  		return nil
   930  	})
   931  	f.bucketIDMutex.Lock()
   932  	bucketID = f._bucketID[bucket]
   933  	f.bucketIDMutex.Unlock()
   934  	if bucketID == "" {
   935  		err = fs.ErrorDirNotFound
   936  	}
   937  	return bucketID, err
   938  }
   939  
   940  // setBucketID sets the ID for the current bucket name
   941  func (f *Fs) setBucketID(bucket, ID string) {
   942  	f.bucketIDMutex.Lock()
   943  	f._bucketID[bucket] = ID
   944  	f.bucketIDMutex.Unlock()
   945  }
   946  
   947  // clearBucketID clears the ID for the current bucket name
   948  func (f *Fs) clearBucketID(bucket string) {
   949  	f.bucketIDMutex.Lock()
   950  	delete(f._bucketID, bucket)
   951  	f.bucketIDMutex.Unlock()
   952  }
   953  
   954  // Put the object into the bucket
   955  //
   956  // Copy the reader in to the new object which is returned
   957  //
   958  // The new object may have been created if an error is returned
   959  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   960  	// Temporary Object under construction
   961  	fs := &Object{
   962  		fs:     f,
   963  		remote: src.Remote(),
   964  	}
   965  	return fs, fs.Update(ctx, in, src, options...)
   966  }
   967  
   968  // PutStream uploads to the remote path with the modTime given of indeterminate size
   969  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   970  	return f.Put(ctx, in, src, options...)
   971  }
   972  
   973  // Mkdir creates the bucket if it doesn't exist
   974  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   975  	bucket, _ := f.split(dir)
   976  	return f.makeBucket(ctx, bucket)
   977  }
   978  
   979  // makeBucket creates the bucket if it doesn't exist
   980  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
   981  	return f.cache.Create(bucket, func() error {
   982  		opts := rest.Opts{
   983  			Method: "POST",
   984  			Path:   "/b2_create_bucket",
   985  		}
   986  		var request = api.CreateBucketRequest{
   987  			AccountID: f.info.AccountID,
   988  			Name:      f.opt.Enc.FromStandardName(bucket),
   989  			Type:      "allPrivate",
   990  		}
   991  		var response api.Bucket
   992  		err := f.pacer.Call(func() (bool, error) {
   993  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
   994  			return f.shouldRetry(ctx, resp, err)
   995  		})
   996  		if err != nil {
   997  			if apiErr, ok := err.(*api.Error); ok {
   998  				if apiErr.Code == "duplicate_bucket_name" {
   999  					// Check this is our bucket - buckets are globally unique and this
  1000  					// might be someone elses.
  1001  					_, getBucketErr := f.getBucketID(ctx, bucket)
  1002  					if getBucketErr == nil {
  1003  						// found so it is our bucket
  1004  						return nil
  1005  					}
  1006  					if getBucketErr != fs.ErrorDirNotFound {
  1007  						fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
  1008  					}
  1009  				}
  1010  			}
  1011  			return errors.Wrap(err, "failed to create bucket")
  1012  		}
  1013  		f.setBucketID(bucket, response.ID)
  1014  		f.setBucketType(bucket, response.Type)
  1015  		return nil
  1016  	}, nil)
  1017  }
  1018  
  1019  // Rmdir deletes the bucket if the fs is at the root
  1020  //
  1021  // Returns an error if it isn't empty
  1022  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
  1023  	bucket, directory := f.split(dir)
  1024  	if bucket == "" || directory != "" {
  1025  		return nil
  1026  	}
  1027  	return f.cache.Remove(bucket, func() error {
  1028  		opts := rest.Opts{
  1029  			Method: "POST",
  1030  			Path:   "/b2_delete_bucket",
  1031  		}
  1032  		bucketID, err := f.getBucketID(ctx, bucket)
  1033  		if err != nil {
  1034  			return err
  1035  		}
  1036  		var request = api.DeleteBucketRequest{
  1037  			ID:        bucketID,
  1038  			AccountID: f.info.AccountID,
  1039  		}
  1040  		var response api.Bucket
  1041  		err = f.pacer.Call(func() (bool, error) {
  1042  			resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1043  			return f.shouldRetry(ctx, resp, err)
  1044  		})
  1045  		if err != nil {
  1046  			return errors.Wrap(err, "failed to delete bucket")
  1047  		}
  1048  		f.clearBucketID(bucket)
  1049  		f.clearBucketType(bucket)
  1050  		f.clearUploadURL(bucketID)
  1051  		return nil
  1052  	})
  1053  }
  1054  
  1055  // Precision of the remote
  1056  func (f *Fs) Precision() time.Duration {
  1057  	return time.Millisecond
  1058  }
  1059  
  1060  // hide hides a file on the remote
  1061  func (f *Fs) hide(ctx context.Context, bucket, bucketPath string) error {
  1062  	bucketID, err := f.getBucketID(ctx, bucket)
  1063  	if err != nil {
  1064  		return err
  1065  	}
  1066  	opts := rest.Opts{
  1067  		Method: "POST",
  1068  		Path:   "/b2_hide_file",
  1069  	}
  1070  	var request = api.HideFileRequest{
  1071  		BucketID: bucketID,
  1072  		Name:     f.opt.Enc.FromStandardPath(bucketPath),
  1073  	}
  1074  	var response api.File
  1075  	err = f.pacer.Call(func() (bool, error) {
  1076  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1077  		return f.shouldRetry(ctx, resp, err)
  1078  	})
  1079  	if err != nil {
  1080  		if apiErr, ok := err.(*api.Error); ok {
  1081  			if apiErr.Code == "already_hidden" {
  1082  				// sometimes eventual consistency causes this, so
  1083  				// ignore this error since it is harmless
  1084  				return nil
  1085  			}
  1086  		}
  1087  		return errors.Wrapf(err, "failed to hide %q", bucketPath)
  1088  	}
  1089  	return nil
  1090  }
  1091  
  1092  // deleteByID deletes a file version given Name and ID
  1093  func (f *Fs) deleteByID(ctx context.Context, ID, Name string) error {
  1094  	opts := rest.Opts{
  1095  		Method: "POST",
  1096  		Path:   "/b2_delete_file_version",
  1097  	}
  1098  	var request = api.DeleteFileRequest{
  1099  		ID:   ID,
  1100  		Name: f.opt.Enc.FromStandardPath(Name),
  1101  	}
  1102  	var response api.File
  1103  	err := f.pacer.Call(func() (bool, error) {
  1104  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1105  		return f.shouldRetry(ctx, resp, err)
  1106  	})
  1107  	if err != nil {
  1108  		return errors.Wrapf(err, "failed to delete %q", Name)
  1109  	}
  1110  	return nil
  1111  }
  1112  
  1113  // purge deletes all the files and directories
  1114  //
  1115  // if oldOnly is true then it deletes only non current files.
  1116  //
  1117  // Implemented here so we can make sure we delete old versions.
  1118  func (f *Fs) purge(ctx context.Context, bucket, directory string, oldOnly bool) error {
  1119  	if bucket == "" {
  1120  		return errors.New("can't purge from root")
  1121  	}
  1122  	var errReturn error
  1123  	var checkErrMutex sync.Mutex
  1124  	var checkErr = func(err error) {
  1125  		if err == nil {
  1126  			return
  1127  		}
  1128  		checkErrMutex.Lock()
  1129  		defer checkErrMutex.Unlock()
  1130  		if errReturn == nil {
  1131  			errReturn = err
  1132  		}
  1133  	}
  1134  	var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
  1135  		if time.Since(time.Time(timestamp)).Hours() > 24 {
  1136  			return true
  1137  		}
  1138  		return false
  1139  	}
  1140  
  1141  	// Delete Config.Transfers in parallel
  1142  	toBeDeleted := make(chan *api.File, fs.Config.Transfers)
  1143  	var wg sync.WaitGroup
  1144  	wg.Add(fs.Config.Transfers)
  1145  	for i := 0; i < fs.Config.Transfers; i++ {
  1146  		go func() {
  1147  			defer wg.Done()
  1148  			for object := range toBeDeleted {
  1149  				oi, err := f.newObjectWithInfo(ctx, object.Name, object)
  1150  				if err != nil {
  1151  					fs.Errorf(object.Name, "Can't create object %v", err)
  1152  					continue
  1153  				}
  1154  				tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
  1155  				err = f.deleteByID(ctx, object.ID, object.Name)
  1156  				checkErr(err)
  1157  				tr.Done(err)
  1158  			}
  1159  		}()
  1160  	}
  1161  	last := ""
  1162  	checkErr(f.list(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", true, 0, true, false, func(remote string, object *api.File, isDirectory bool) error {
  1163  		if !isDirectory {
  1164  			oi, err := f.newObjectWithInfo(ctx, object.Name, object)
  1165  			if err != nil {
  1166  				fs.Errorf(object, "Can't create object %+v", err)
  1167  			}
  1168  			tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
  1169  			if oldOnly && last != remote {
  1170  				// Check current version of the file
  1171  				if object.Action == "hide" {
  1172  					fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
  1173  					toBeDeleted <- object
  1174  				} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
  1175  					fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
  1176  					toBeDeleted <- object
  1177  				} else {
  1178  					fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
  1179  				}
  1180  			} else {
  1181  				fs.Debugf(remote, "Deleting (id %q)", object.ID)
  1182  				toBeDeleted <- object
  1183  			}
  1184  			last = remote
  1185  			tr.Done(nil)
  1186  		}
  1187  		return nil
  1188  	}))
  1189  	close(toBeDeleted)
  1190  	wg.Wait()
  1191  
  1192  	if !oldOnly {
  1193  		checkErr(f.Rmdir(ctx, ""))
  1194  	}
  1195  	return errReturn
  1196  }
  1197  
  1198  // Purge deletes all the files and directories including the old versions.
  1199  func (f *Fs) Purge(ctx context.Context) error {
  1200  	return f.purge(ctx, f.rootBucket, f.rootDirectory, false)
  1201  }
  1202  
  1203  // CleanUp deletes all the hidden files.
  1204  func (f *Fs) CleanUp(ctx context.Context) error {
  1205  	return f.purge(ctx, f.rootBucket, f.rootDirectory, true)
  1206  }
  1207  
  1208  // Copy src to this remote using server side copy operations.
  1209  //
  1210  // This is stored with the remote path given
  1211  //
  1212  // It returns the destination Object and a possible error
  1213  //
  1214  // Will only be called if src.Fs().Name() == f.Name()
  1215  //
  1216  // If it isn't possible then return fs.ErrorCantCopy
  1217  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1218  	dstBucket, dstPath := f.split(remote)
  1219  	err := f.makeBucket(ctx, dstBucket)
  1220  	if err != nil {
  1221  		return nil, err
  1222  	}
  1223  	srcObj, ok := src.(*Object)
  1224  	if !ok {
  1225  		fs.Debugf(src, "Can't copy - not same remote type")
  1226  		return nil, fs.ErrorCantCopy
  1227  	}
  1228  	destBucketID, err := f.getBucketID(ctx, dstBucket)
  1229  	if err != nil {
  1230  		return nil, err
  1231  	}
  1232  	opts := rest.Opts{
  1233  		Method: "POST",
  1234  		Path:   "/b2_copy_file",
  1235  	}
  1236  	var request = api.CopyFileRequest{
  1237  		SourceID:          srcObj.id,
  1238  		Name:              f.opt.Enc.FromStandardPath(dstPath),
  1239  		MetadataDirective: "COPY",
  1240  		DestBucketID:      destBucketID,
  1241  	}
  1242  	var response api.FileInfo
  1243  	err = f.pacer.Call(func() (bool, error) {
  1244  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1245  		return f.shouldRetry(ctx, resp, err)
  1246  	})
  1247  	if err != nil {
  1248  		return nil, err
  1249  	}
  1250  	o := &Object{
  1251  		fs:     f,
  1252  		remote: remote,
  1253  	}
  1254  	err = o.decodeMetaDataFileInfo(&response)
  1255  	if err != nil {
  1256  		return nil, err
  1257  	}
  1258  	return o, nil
  1259  }
  1260  
  1261  // Hashes returns the supported hash sets.
  1262  func (f *Fs) Hashes() hash.Set {
  1263  	return hash.Set(hash.SHA1)
  1264  }
  1265  
  1266  // getDownloadAuthorization returns authorization token for downloading
  1267  // without account.
  1268  func (f *Fs) getDownloadAuthorization(ctx context.Context, bucket, remote string) (authorization string, err error) {
  1269  	validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
  1270  	if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
  1271  		return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
  1272  	}
  1273  	if !f.hasPermission("shareFiles") {
  1274  		return "", errors.New("sharing a file link requires the shareFiles permission")
  1275  	}
  1276  	bucketID, err := f.getBucketID(ctx, bucket)
  1277  	if err != nil {
  1278  		return "", err
  1279  	}
  1280  	opts := rest.Opts{
  1281  		Method: "POST",
  1282  		Path:   "/b2_get_download_authorization",
  1283  	}
  1284  	var request = api.GetDownloadAuthorizationRequest{
  1285  		BucketID:               bucketID,
  1286  		FileNamePrefix:         f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
  1287  		ValidDurationInSeconds: validDurationInSeconds,
  1288  	}
  1289  	var response api.GetDownloadAuthorizationResponse
  1290  	err = f.pacer.Call(func() (bool, error) {
  1291  		resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
  1292  		return f.shouldRetry(ctx, resp, err)
  1293  	})
  1294  	if err != nil {
  1295  		return "", errors.Wrap(err, "failed to get download authorization")
  1296  	}
  1297  	return response.AuthorizationToken, nil
  1298  }
  1299  
  1300  // PublicLink returns a link for downloading without account
  1301  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  1302  	bucket, bucketPath := f.split(remote)
  1303  	var RootURL string
  1304  	if f.opt.DownloadURL == "" {
  1305  		RootURL = f.info.DownloadURL
  1306  	} else {
  1307  		RootURL = f.opt.DownloadURL
  1308  	}
  1309  	_, err = f.NewObject(ctx, remote)
  1310  	if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile {
  1311  		err2 := f.list(ctx, bucket, bucketPath, f.rootDirectory, f.rootBucket == "", false, 1, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
  1312  			err = nil
  1313  			return nil
  1314  		})
  1315  		if err2 != nil {
  1316  			return "", err2
  1317  		}
  1318  	}
  1319  	if err != nil {
  1320  		return "", err
  1321  	}
  1322  	absPath := "/" + bucketPath
  1323  	link = RootURL + "/file/" + urlEncode(bucket) + absPath
  1324  	bucketType, err := f.getbucketType(ctx, bucket)
  1325  	if err != nil {
  1326  		return "", err
  1327  	}
  1328  	if bucketType == "allPrivate" || bucketType == "snapshot" {
  1329  		AuthorizationToken, err := f.getDownloadAuthorization(ctx, bucket, remote)
  1330  		if err != nil {
  1331  			return "", err
  1332  		}
  1333  		link += "?Authorization=" + AuthorizationToken
  1334  	}
  1335  	return link, nil
  1336  }
  1337  
  1338  // ------------------------------------------------------------
  1339  
  1340  // Fs returns the parent Fs
  1341  func (o *Object) Fs() fs.Info {
  1342  	return o.fs
  1343  }
  1344  
  1345  // Return a string version
  1346  func (o *Object) String() string {
  1347  	if o == nil {
  1348  		return "<nil>"
  1349  	}
  1350  	return o.remote
  1351  }
  1352  
  1353  // Remote returns the remote path
  1354  func (o *Object) Remote() string {
  1355  	return o.remote
  1356  }
  1357  
  1358  // Hash returns the Sha-1 of an object returning a lowercase hex string
  1359  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1360  	if t != hash.SHA1 {
  1361  		return "", hash.ErrUnsupported
  1362  	}
  1363  	if o.sha1 == "" {
  1364  		// Error is logged in readMetaData
  1365  		err := o.readMetaData(ctx)
  1366  		if err != nil {
  1367  			return "", err
  1368  		}
  1369  	}
  1370  	return o.sha1, nil
  1371  }
  1372  
  1373  // Size returns the size of an object in bytes
  1374  func (o *Object) Size() int64 {
  1375  	return o.size
  1376  }
  1377  
  1378  // Clean the SHA1
  1379  //
  1380  // Make sure it is lower case
  1381  //
  1382  // Remove unverified prefix - see https://www.backblaze.com/b2/docs/uploading.html
  1383  // Some tools (eg Cyberduck) use this
  1384  func cleanSHA1(sha1 string) (out string) {
  1385  	out = strings.ToLower(sha1)
  1386  	const unverified = "unverified:"
  1387  	if strings.HasPrefix(out, unverified) {
  1388  		out = out[len(unverified):]
  1389  	}
  1390  	return out
  1391  }
  1392  
  1393  // decodeMetaDataRaw sets the metadata from the data passed in
  1394  //
  1395  // Sets
  1396  //  o.id
  1397  //  o.modTime
  1398  //  o.size
  1399  //  o.sha1
  1400  func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
  1401  	o.id = ID
  1402  	o.sha1 = SHA1
  1403  	o.mimeType = mimeType
  1404  	// Read SHA1 from metadata if it exists and isn't set
  1405  	if o.sha1 == "" || o.sha1 == "none" {
  1406  		o.sha1 = Info[sha1Key]
  1407  	}
  1408  	o.sha1 = cleanSHA1(o.sha1)
  1409  	o.size = Size
  1410  	// Use the UploadTimestamp if can't get file info
  1411  	o.modTime = time.Time(UploadTimestamp)
  1412  	return o.parseTimeString(Info[timeKey])
  1413  }
  1414  
  1415  // decodeMetaData sets the metadata in the object from an api.File
  1416  //
  1417  // Sets
  1418  //  o.id
  1419  //  o.modTime
  1420  //  o.size
  1421  //  o.sha1
  1422  func (o *Object) decodeMetaData(info *api.File) (err error) {
  1423  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1424  }
  1425  
  1426  // decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
  1427  //
  1428  // Sets
  1429  //  o.id
  1430  //  o.modTime
  1431  //  o.size
  1432  //  o.sha1
  1433  func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
  1434  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1435  }
  1436  
  1437  // getMetaData gets the metadata from the object unconditionally
  1438  func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
  1439  	bucket, bucketPath := o.split()
  1440  	maxSearched := 1
  1441  	var timestamp api.Timestamp
  1442  	if o.fs.opt.Versions {
  1443  		timestamp, bucketPath = api.RemoveVersion(bucketPath)
  1444  		maxSearched = maxVersions
  1445  	}
  1446  
  1447  	err = o.fs.list(ctx, bucket, bucketPath, "", false, true, maxSearched, o.fs.opt.Versions, true, func(remote string, object *api.File, isDirectory bool) error {
  1448  		if isDirectory {
  1449  			return nil
  1450  		}
  1451  		if remote == bucketPath {
  1452  			if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) {
  1453  				return nil
  1454  			}
  1455  			info = object
  1456  		}
  1457  		return errEndList // read only 1 item
  1458  	})
  1459  	if err != nil {
  1460  		if err == fs.ErrorDirNotFound {
  1461  			return nil, fs.ErrorObjectNotFound
  1462  		}
  1463  		return nil, err
  1464  	}
  1465  	if info == nil {
  1466  		return nil, fs.ErrorObjectNotFound
  1467  	}
  1468  	return info, nil
  1469  }
  1470  
  1471  // readMetaData gets the metadata if it hasn't already been fetched
  1472  //
  1473  // Sets
  1474  //  o.id
  1475  //  o.modTime
  1476  //  o.size
  1477  //  o.sha1
  1478  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1479  	if o.id != "" {
  1480  		return nil
  1481  	}
  1482  	info, err := o.getMetaData(ctx)
  1483  	if err != nil {
  1484  		return err
  1485  	}
  1486  	return o.decodeMetaData(info)
  1487  }
  1488  
  1489  // timeString returns modTime as the number of milliseconds
  1490  // elapsed since January 1, 1970 UTC as a decimal string.
  1491  func timeString(modTime time.Time) string {
  1492  	return strconv.FormatInt(modTime.UnixNano()/1e6, 10)
  1493  }
  1494  
  1495  // parseTimeString converts a decimal string number of milliseconds
  1496  // elapsed since January 1, 1970 UTC into a time.Time and stores it in
  1497  // the modTime variable.
  1498  func (o *Object) parseTimeString(timeString string) (err error) {
  1499  	if timeString == "" {
  1500  		return nil
  1501  	}
  1502  	unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
  1503  	if err != nil {
  1504  		fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
  1505  		return nil
  1506  	}
  1507  	o.modTime = time.Unix(unixMilliseconds/1e3, (unixMilliseconds%1e3)*1e6).UTC()
  1508  	return nil
  1509  }
  1510  
  1511  // ModTime returns the modification time of the object
  1512  //
  1513  // It attempts to read the objects mtime and if that isn't present the
  1514  // LastModified returned in the http headers
  1515  //
  1516  // SHA-1 will also be updated once the request has completed.
  1517  func (o *Object) ModTime(ctx context.Context) (result time.Time) {
  1518  	// The error is logged in readMetaData
  1519  	_ = o.readMetaData(ctx)
  1520  	return o.modTime
  1521  }
  1522  
  1523  // SetModTime sets the modification time of the Object
  1524  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1525  	info, err := o.getMetaData(ctx)
  1526  	if err != nil {
  1527  		return err
  1528  	}
  1529  	_, bucketPath := o.split()
  1530  	info.Info[timeKey] = timeString(modTime)
  1531  	opts := rest.Opts{
  1532  		Method: "POST",
  1533  		Path:   "/b2_copy_file",
  1534  	}
  1535  	var request = api.CopyFileRequest{
  1536  		SourceID:          o.id,
  1537  		Name:              o.fs.opt.Enc.FromStandardPath(bucketPath), // copy to same name
  1538  		MetadataDirective: "REPLACE",
  1539  		ContentType:       info.ContentType,
  1540  		Info:              info.Info,
  1541  	}
  1542  	var response api.FileInfo
  1543  	err = o.fs.pacer.Call(func() (bool, error) {
  1544  		resp, err := o.fs.srv.CallJSON(ctx, &opts, &request, &response)
  1545  		return o.fs.shouldRetry(ctx, resp, err)
  1546  	})
  1547  	if err != nil {
  1548  		return err
  1549  	}
  1550  	return o.decodeMetaDataFileInfo(&response)
  1551  }
  1552  
  1553  // Storable returns if this object is storable
  1554  func (o *Object) Storable() bool {
  1555  	return true
  1556  }
  1557  
  1558  // openFile represents an Object open for reading
  1559  type openFile struct {
  1560  	o     *Object        // Object we are reading for
  1561  	resp  *http.Response // response of the GET
  1562  	body  io.Reader      // reading from here
  1563  	hash  gohash.Hash    // currently accumulating SHA1
  1564  	bytes int64          // number of bytes read on this connection
  1565  	eof   bool           // whether we have read end of file
  1566  }
  1567  
  1568  // newOpenFile wraps an io.ReadCloser and checks the sha1sum
  1569  func newOpenFile(o *Object, resp *http.Response) *openFile {
  1570  	file := &openFile{
  1571  		o:    o,
  1572  		resp: resp,
  1573  		hash: sha1.New(),
  1574  	}
  1575  	file.body = io.TeeReader(resp.Body, file.hash)
  1576  	return file
  1577  }
  1578  
  1579  // Read bytes from the object - see io.Reader
  1580  func (file *openFile) Read(p []byte) (n int, err error) {
  1581  	n, err = file.body.Read(p)
  1582  	file.bytes += int64(n)
  1583  	if err == io.EOF {
  1584  		file.eof = true
  1585  	}
  1586  	return
  1587  }
  1588  
  1589  // Close the object and checks the length and SHA1 if all the object
  1590  // was read
  1591  func (file *openFile) Close() (err error) {
  1592  	// Close the body at the end
  1593  	defer fs.CheckClose(file.resp.Body, &err)
  1594  
  1595  	// If not end of file then can't check SHA1
  1596  	if !file.eof {
  1597  		return nil
  1598  	}
  1599  
  1600  	// Check to see we read the correct number of bytes
  1601  	if file.o.Size() != file.bytes {
  1602  		return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
  1603  	}
  1604  
  1605  	// Check the SHA1
  1606  	receivedSHA1 := file.o.sha1
  1607  	calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
  1608  	if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
  1609  		return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
  1610  	}
  1611  
  1612  	return nil
  1613  }
  1614  
  1615  // Check it satisfies the interfaces
  1616  var _ io.ReadCloser = &openFile{}
  1617  
  1618  // Open an object for read
  1619  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1620  	fs.FixRangeOption(options, o.size)
  1621  	opts := rest.Opts{
  1622  		Method:  "GET",
  1623  		Options: options,
  1624  	}
  1625  
  1626  	// Use downloadUrl from backblaze if downloadUrl is not set
  1627  	// otherwise use the custom downloadUrl
  1628  	if o.fs.opt.DownloadURL == "" {
  1629  		opts.RootURL = o.fs.info.DownloadURL
  1630  	} else {
  1631  		opts.RootURL = o.fs.opt.DownloadURL
  1632  	}
  1633  
  1634  	// Download by id if set otherwise by name
  1635  	if o.id != "" {
  1636  		opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
  1637  	} else {
  1638  		bucket, bucketPath := o.split()
  1639  		opts.Path += "/file/" + urlEncode(o.fs.opt.Enc.FromStandardName(bucket)) + "/" + urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath))
  1640  	}
  1641  	var resp *http.Response
  1642  	err = o.fs.pacer.Call(func() (bool, error) {
  1643  		resp, err = o.fs.srv.Call(ctx, &opts)
  1644  		return o.fs.shouldRetry(ctx, resp, err)
  1645  	})
  1646  	if err != nil {
  1647  		return nil, errors.Wrap(err, "failed to open for download")
  1648  	}
  1649  
  1650  	// Parse the time out of the headers if possible
  1651  	err = o.parseTimeString(resp.Header.Get(timeHeader))
  1652  	if err != nil {
  1653  		_ = resp.Body.Close()
  1654  		return nil, err
  1655  	}
  1656  	// Read sha1 from header if it isn't set
  1657  	if o.sha1 == "" {
  1658  		o.sha1 = resp.Header.Get(sha1Header)
  1659  		fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
  1660  		// if sha1 header is "none" (in big files), then need
  1661  		// to read it from the metadata
  1662  		if o.sha1 == "none" {
  1663  			o.sha1 = resp.Header.Get(sha1InfoHeader)
  1664  			fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
  1665  		}
  1666  		o.sha1 = cleanSHA1(o.sha1)
  1667  	}
  1668  	// Don't check length or hash on partial content
  1669  	if resp.StatusCode == http.StatusPartialContent {
  1670  		return resp.Body, nil
  1671  	}
  1672  	return newOpenFile(o, resp), nil
  1673  }
  1674  
  1675  // dontEncode is the characters that do not need percent-encoding
  1676  //
  1677  // The characters that do not need percent-encoding are a subset of
  1678  // the printable ASCII characters: upper-case letters, lower-case
  1679  // letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
  1680  // "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
  1681  // be replaced with "%" and the two-digit hex value of the byte.
  1682  const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
  1683  	`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
  1684  	`0123456789` +
  1685  	`._-/~!$'()*;=:@`)
  1686  
  1687  // noNeedToEncode is a bitmap of characters which don't need % encoding
  1688  var noNeedToEncode [256]bool
  1689  
  1690  func init() {
  1691  	for _, c := range dontEncode {
  1692  		noNeedToEncode[c] = true
  1693  	}
  1694  }
  1695  
  1696  // urlEncode encodes in with % encoding
  1697  func urlEncode(in string) string {
  1698  	var out bytes.Buffer
  1699  	for i := 0; i < len(in); i++ {
  1700  		c := in[i]
  1701  		if noNeedToEncode[c] {
  1702  			_ = out.WriteByte(c)
  1703  		} else {
  1704  			_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
  1705  		}
  1706  	}
  1707  	return out.String()
  1708  }
  1709  
  1710  // Update the object with the contents of the io.Reader, modTime and size
  1711  //
  1712  // The new object may have been created if an error is returned
  1713  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1714  	if o.fs.opt.Versions {
  1715  		return errNotWithVersions
  1716  	}
  1717  	size := src.Size()
  1718  
  1719  	bucket, bucketPath := o.split()
  1720  	err = o.fs.makeBucket(ctx, bucket)
  1721  	if err != nil {
  1722  		return err
  1723  	}
  1724  	if size == -1 {
  1725  		// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
  1726  		buf := o.fs.getUploadBlock()
  1727  		n, err := io.ReadFull(in, buf)
  1728  		if err == nil {
  1729  			bufReader := bufio.NewReader(in)
  1730  			in = bufReader
  1731  			_, err = bufReader.Peek(1)
  1732  		}
  1733  
  1734  		if err == nil {
  1735  			fs.Debugf(o, "File is big enough for chunked streaming")
  1736  			up, err := o.fs.newLargeUpload(ctx, o, in, src)
  1737  			if err != nil {
  1738  				o.fs.putUploadBlock(buf)
  1739  				return err
  1740  			}
  1741  			return up.Stream(ctx, buf)
  1742  		} else if err == io.EOF || err == io.ErrUnexpectedEOF {
  1743  			fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
  1744  			defer o.fs.putUploadBlock(buf)
  1745  			size = int64(n)
  1746  			in = bytes.NewReader(buf[:n])
  1747  		} else {
  1748  			return err
  1749  		}
  1750  	} else if size > int64(o.fs.opt.UploadCutoff) {
  1751  		up, err := o.fs.newLargeUpload(ctx, o, in, src)
  1752  		if err != nil {
  1753  			return err
  1754  		}
  1755  		return up.Upload(ctx)
  1756  	}
  1757  
  1758  	modTime := src.ModTime(ctx)
  1759  
  1760  	calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
  1761  	if calculatedSha1 == "" {
  1762  		calculatedSha1 = "hex_digits_at_end"
  1763  		har := newHashAppendingReader(in, sha1.New())
  1764  		size += int64(har.AdditionalLength())
  1765  		in = har
  1766  	}
  1767  
  1768  	// Get upload URL
  1769  	upload, err := o.fs.getUploadURL(ctx, bucket)
  1770  	if err != nil {
  1771  		return err
  1772  	}
  1773  	defer func() {
  1774  		// return it like this because we might nil it out
  1775  		o.fs.returnUploadURL(upload)
  1776  	}()
  1777  
  1778  	// Headers for upload file
  1779  	//
  1780  	// Authorization
  1781  	// required
  1782  	// An upload authorization token, from b2_get_upload_url.
  1783  	//
  1784  	// X-Bz-File-Name
  1785  	// required
  1786  	//
  1787  	// The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding.
  1788  	//
  1789  	// Content-Type
  1790  	// required
  1791  	//
  1792  	// The MIME type of the content of the file, which will be returned in
  1793  	// the Content-Type header when downloading the file. Use the
  1794  	// Content-Type b2/x-auto to automatically set the stored Content-Type
  1795  	// post upload. In the case where a file extension is absent or the
  1796  	// lookup fails, the Content-Type is set to application/octet-stream. The
  1797  	// Content-Type mappings can be pursued here.
  1798  	//
  1799  	// X-Bz-Content-Sha1
  1800  	// required
  1801  	//
  1802  	// The SHA1 checksum of the content of the file. B2 will check this when
  1803  	// the file is uploaded, to make sure that the file arrived correctly. It
  1804  	// will be returned in the X-Bz-Content-Sha1 header when the file is
  1805  	// downloaded.
  1806  	//
  1807  	// X-Bz-Info-src_last_modified_millis
  1808  	// optional
  1809  	//
  1810  	// If the original source of the file being uploaded has a last modified
  1811  	// time concept, Backblaze recommends using this spelling of one of your
  1812  	// ten X-Bz-Info-* headers (see below). Using a standard spelling allows
  1813  	// different B2 clients and the B2 web user interface to interoperate
  1814  	// correctly. The value should be a base 10 number which represents a UTC
  1815  	// time when the original source file was last modified. It is a base 10
  1816  	// number of milliseconds since midnight, January 1, 1970 UTC. This fits
  1817  	// in a 64 bit integer such as the type "long" in the programming
  1818  	// language Java. It is intended to be compatible with Java's time
  1819  	// long. For example, it can be passed directly into the Java call
  1820  	// Date.setTime(long time).
  1821  	//
  1822  	// X-Bz-Info-*
  1823  	// optional
  1824  	//
  1825  	// Up to 10 of these headers may be present. The * part of the header
  1826  	// name is replace with the name of a custom field in the file
  1827  	// information stored with the file, and the value is an arbitrary UTF-8
  1828  	// string, percent-encoded. The same info headers sent with the upload
  1829  	// will be returned with the download.
  1830  
  1831  	opts := rest.Opts{
  1832  		Method:  "POST",
  1833  		RootURL: upload.UploadURL,
  1834  		Body:    in,
  1835  		Options: options,
  1836  		ExtraHeaders: map[string]string{
  1837  			"Authorization":  upload.AuthorizationToken,
  1838  			"X-Bz-File-Name": urlEncode(o.fs.opt.Enc.FromStandardPath(bucketPath)),
  1839  			"Content-Type":   fs.MimeType(ctx, src),
  1840  			sha1Header:       calculatedSha1,
  1841  			timeHeader:       timeString(modTime),
  1842  		},
  1843  		ContentLength: &size,
  1844  	}
  1845  	var response api.FileInfo
  1846  	// Don't retry, return a retry error instead
  1847  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1848  		resp, err := o.fs.srv.CallJSON(ctx, &opts, nil, &response)
  1849  		retry, err := o.fs.shouldRetry(ctx, resp, err)
  1850  		// On retryable error clear UploadURL
  1851  		if retry {
  1852  			fs.Debugf(o, "Clearing upload URL because of error: %v", err)
  1853  			upload = nil
  1854  		}
  1855  		return retry, err
  1856  	})
  1857  	if err != nil {
  1858  		return err
  1859  	}
  1860  	return o.decodeMetaDataFileInfo(&response)
  1861  }
  1862  
  1863  // Remove an object
  1864  func (o *Object) Remove(ctx context.Context) error {
  1865  	bucket, bucketPath := o.split()
  1866  	if o.fs.opt.Versions {
  1867  		return errNotWithVersions
  1868  	}
  1869  	if o.fs.opt.HardDelete {
  1870  		return o.fs.deleteByID(ctx, o.id, bucketPath)
  1871  	}
  1872  	return o.fs.hide(ctx, bucket, bucketPath)
  1873  }
  1874  
  1875  // MimeType of an Object if known, "" otherwise
  1876  func (o *Object) MimeType(ctx context.Context) string {
  1877  	return o.mimeType
  1878  }
  1879  
  1880  // ID returns the ID of the Object if known, or "" if not
  1881  func (o *Object) ID() string {
  1882  	return o.id
  1883  }
  1884  
  1885  // Check the interfaces are satisfied
  1886  var (
  1887  	_ fs.Fs           = &Fs{}
  1888  	_ fs.Purger       = &Fs{}
  1889  	_ fs.Copier       = &Fs{}
  1890  	_ fs.PutStreamer  = &Fs{}
  1891  	_ fs.CleanUpper   = &Fs{}
  1892  	_ fs.ListRer      = &Fs{}
  1893  	_ fs.PublicLinker = &Fs{}
  1894  	_ fs.Object       = &Object{}
  1895  	_ fs.MimeTyper    = &Object{}
  1896  	_ fs.IDer         = &Object{}
  1897  )