github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/b2/b2.go (about)

     1  // Package b2 provides an interface to the Backblaze B2 object storage system
     2  package b2
     3  
     4  // FIXME should we remove sha1 checks from here as rclone now supports
     5  // checking SHA1s?
     6  
     7  import (
     8  	"bufio"
     9  	"bytes"
    10  	"context"
    11  	"crypto/sha1"
    12  	"fmt"
    13  	gohash "hash"
    14  	"io"
    15  	"net/http"
    16  	"path"
    17  	"regexp"
    18  	"strconv"
    19  	"strings"
    20  	"sync"
    21  	"time"
    22  
    23  	"github.com/ncw/rclone/backend/b2/api"
    24  	"github.com/ncw/rclone/fs"
    25  	"github.com/ncw/rclone/fs/accounting"
    26  	"github.com/ncw/rclone/fs/config/configmap"
    27  	"github.com/ncw/rclone/fs/config/configstruct"
    28  	"github.com/ncw/rclone/fs/fserrors"
    29  	"github.com/ncw/rclone/fs/fshttp"
    30  	"github.com/ncw/rclone/fs/hash"
    31  	"github.com/ncw/rclone/fs/walk"
    32  	"github.com/ncw/rclone/lib/pacer"
    33  	"github.com/ncw/rclone/lib/rest"
    34  	"github.com/pkg/errors"
    35  )
    36  
    37  const (
    38  	defaultEndpoint     = "https://api.backblazeb2.com"
    39  	headerPrefix        = "x-bz-info-" // lower case as that is what the server returns
    40  	timeKey             = "src_last_modified_millis"
    41  	timeHeader          = headerPrefix + timeKey
    42  	sha1Key             = "large_file_sha1"
    43  	sha1Header          = "X-Bz-Content-Sha1"
    44  	sha1InfoHeader      = headerPrefix + sha1Key
    45  	testModeHeader      = "X-Bz-Test-Mode"
    46  	retryAfterHeader    = "Retry-After"
    47  	minSleep            = 10 * time.Millisecond
    48  	maxSleep            = 5 * time.Minute
    49  	decayConstant       = 1 // bigger for slower decay, exponential
    50  	maxParts            = 10000
    51  	maxVersions         = 100 // maximum number of versions we search in --b2-versions mode
    52  	minChunkSize        = 5 * fs.MebiByte
    53  	defaultChunkSize    = 96 * fs.MebiByte
    54  	defaultUploadCutoff = 200 * fs.MebiByte
    55  )
    56  
    57  // Globals
    58  var (
    59  	errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
    60  )
    61  
    62  // Register with Fs
    63  func init() {
    64  	fs.Register(&fs.RegInfo{
    65  		Name:        "b2",
    66  		Description: "Backblaze B2",
    67  		NewFs:       NewFs,
    68  		Options: []fs.Option{{
    69  			Name:     "account",
    70  			Help:     "Account ID or Application Key ID",
    71  			Required: true,
    72  		}, {
    73  			Name:     "key",
    74  			Help:     "Application Key",
    75  			Required: true,
    76  		}, {
    77  			Name:     "endpoint",
    78  			Help:     "Endpoint for the service.\nLeave blank normally.",
    79  			Advanced: true,
    80  		}, {
    81  			Name: "test_mode",
    82  			Help: `A flag string for X-Bz-Test-Mode header for debugging.
    83  
    84  This is for debugging purposes only. Setting it to one of the strings
    85  below will cause b2 to return specific errors:
    86  
    87    * "fail_some_uploads"
    88    * "expire_some_account_authorization_tokens"
    89    * "force_cap_exceeded"
    90  
    91  These will be set in the "X-Bz-Test-Mode" header which is documented
    92  in the [b2 integrations checklist](https://www.backblaze.com/b2/docs/integration_checklist.html).`,
    93  			Default:  "",
    94  			Hide:     fs.OptionHideConfigurator,
    95  			Advanced: true,
    96  		}, {
    97  			Name:     "versions",
    98  			Help:     "Include old versions in directory listings.\nNote that when using this no file write operations are permitted,\nso you can't upload files or delete them.",
    99  			Default:  false,
   100  			Advanced: true,
   101  		}, {
   102  			Name:    "hard_delete",
   103  			Help:    "Permanently delete files on remote removal, otherwise hide files.",
   104  			Default: false,
   105  		}, {
   106  			Name: "upload_cutoff",
   107  			Help: `Cutoff for switching to chunked upload.
   108  
   109  Files above this size will be uploaded in chunks of "--b2-chunk-size".
   110  
   111  This value should be set no larger than 4.657GiB (== 5GB).`,
   112  			Default:  defaultUploadCutoff,
   113  			Advanced: true,
   114  		}, {
   115  			Name: "chunk_size",
   116  			Help: `Upload chunk size. Must fit in memory.
   117  
   118  When uploading large files, chunk the file into this size.  Note that
   119  these chunks are buffered in memory and there might a maximum of
   120  "--transfers" chunks in progress at once.  5,000,000 Bytes is the
   121  minimum size.`,
   122  			Default:  defaultChunkSize,
   123  			Advanced: true,
   124  		}, {
   125  			Name:     "disable_checksum",
   126  			Help:     `Disable checksums for large (> upload cutoff) files`,
   127  			Default:  false,
   128  			Advanced: true,
   129  		}, {
   130  			Name: "download_url",
   131  			Help: `Custom endpoint for downloads.
   132  
   133  This is usually set to a Cloudflare CDN URL as Backblaze offers
   134  free egress for data downloaded through the Cloudflare network.
   135  This is probably only useful for a public bucket.
   136  Leave blank if you want to use the endpoint provided by Backblaze.`,
   137  			Advanced: true,
   138  		}, {
   139  			Name: "download_auth_duration",
   140  			Help: `Time before the authorization token will expire in s or suffix ms|s|m|h|d.
   141  
   142  The duration before the download authorization token will expire.
   143  The minimum value is 1 second. The maximum value is one week.`,
   144  			Default:  fs.Duration(7 * 24 * time.Hour),
   145  			Advanced: true,
   146  		}},
   147  	})
   148  }
   149  
   150  // Options defines the configuration for this backend
   151  type Options struct {
   152  	Account                       string        `config:"account"`
   153  	Key                           string        `config:"key"`
   154  	Endpoint                      string        `config:"endpoint"`
   155  	TestMode                      string        `config:"test_mode"`
   156  	Versions                      bool          `config:"versions"`
   157  	HardDelete                    bool          `config:"hard_delete"`
   158  	UploadCutoff                  fs.SizeSuffix `config:"upload_cutoff"`
   159  	ChunkSize                     fs.SizeSuffix `config:"chunk_size"`
   160  	DisableCheckSum               bool          `config:"disable_checksum"`
   161  	DownloadURL                   string        `config:"download_url"`
   162  	DownloadAuthorizationDuration fs.Duration   `config:"download_auth_duration"`
   163  }
   164  
   165  // Fs represents a remote b2 server
   166  type Fs struct {
   167  	name            string                       // name of this remote
   168  	root            string                       // the path we are working on if any
   169  	opt             Options                      // parsed config options
   170  	features        *fs.Features                 // optional features
   171  	srv             *rest.Client                 // the connection to the b2 server
   172  	bucket          string                       // the bucket we are working on
   173  	bucketOKMu      sync.Mutex                   // mutex to protect bucket OK
   174  	bucketOK        bool                         // true if we have created the bucket
   175  	bucketIDMutex   sync.Mutex                   // mutex to protect _bucketID
   176  	_bucketID       string                       // the ID of the bucket we are working on
   177  	bucketTypeMutex sync.Mutex                   // mutex to protect _bucketType
   178  	_bucketType     string                       // the Type of the bucket we are working on
   179  	info            api.AuthorizeAccountResponse // result of authorize call
   180  	uploadMu        sync.Mutex                   // lock for upload variable
   181  	uploads         []*api.GetUploadURLResponse  // result of get upload URL calls
   182  	authMu          sync.Mutex                   // lock for authorizing the account
   183  	pacer           *fs.Pacer                    // To pace and retry the API calls
   184  	bufferTokens    chan []byte                  // control concurrency of multipart uploads
   185  }
   186  
   187  // Object describes a b2 object
   188  type Object struct {
   189  	fs       *Fs       // what this object is part of
   190  	remote   string    // The remote path
   191  	id       string    // b2 id of the file
   192  	modTime  time.Time // The modified time of the object if known
   193  	sha1     string    // SHA-1 hash if known
   194  	size     int64     // Size of the object
   195  	mimeType string    // Content-Type of the object
   196  }
   197  
   198  // ------------------------------------------------------------
   199  
   200  // Name of the remote (as passed into NewFs)
   201  func (f *Fs) Name() string {
   202  	return f.name
   203  }
   204  
   205  // Root of the remote (as passed into NewFs)
   206  func (f *Fs) Root() string {
   207  	if f.root == "" {
   208  		return f.bucket
   209  	}
   210  	return f.bucket + "/" + f.root
   211  }
   212  
   213  // String converts this Fs to a string
   214  func (f *Fs) String() string {
   215  	if f.root == "" {
   216  		return fmt.Sprintf("B2 bucket %s", f.bucket)
   217  	}
   218  	return fmt.Sprintf("B2 bucket %s path %s", f.bucket, f.root)
   219  }
   220  
   221  // Features returns the optional features of this Fs
   222  func (f *Fs) Features() *fs.Features {
   223  	return f.features
   224  }
   225  
   226  // Pattern to match a b2 path
   227  var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
   228  
   229  // parseParse parses a b2 'url'
   230  func parsePath(path string) (bucket, directory string, err error) {
   231  	parts := matcher.FindStringSubmatch(path)
   232  	if parts == nil {
   233  		err = errors.Errorf("couldn't find bucket in b2 path %q", path)
   234  	} else {
   235  		bucket, directory = parts[1], parts[2]
   236  		directory = strings.Trim(directory, "/")
   237  	}
   238  	return
   239  }
   240  
   241  // retryErrorCodes is a slice of error codes that we will retry
   242  var retryErrorCodes = []int{
   243  	401, // Unauthorized (eg "Token has expired")
   244  	408, // Request Timeout
   245  	429, // Rate exceeded.
   246  	500, // Get occasional 500 Internal Server Error
   247  	503, // Service Unavailable
   248  	504, // Gateway Time-out
   249  }
   250  
   251  // shouldRetryNoAuth returns a boolean as to whether this resp and err
   252  // deserve to be retried.  It returns the err as a convenience
   253  func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
   254  	// For 429 or 503 errors look at the Retry-After: header and
   255  	// set the retry appropriately, starting with a minimum of 1
   256  	// second if it isn't set.
   257  	if resp != nil && (resp.StatusCode == 429 || resp.StatusCode == 503) {
   258  		var retryAfter = 1
   259  		retryAfterString := resp.Header.Get(retryAfterHeader)
   260  		if retryAfterString != "" {
   261  			var err error
   262  			retryAfter, err = strconv.Atoi(retryAfterString)
   263  			if err != nil {
   264  				fs.Errorf(f, "Malformed %s header %q: %v", retryAfterHeader, retryAfterString, err)
   265  			}
   266  		}
   267  		return true, pacer.RetryAfterError(err, time.Duration(retryAfter)*time.Second)
   268  	}
   269  	return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
   270  }
   271  
   272  // shouldRetry returns a boolean as to whether this resp and err
   273  // deserve to be retried.  It returns the err as a convenience
   274  func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
   275  	if resp != nil && resp.StatusCode == 401 {
   276  		fs.Debugf(f, "Unauthorized: %v", err)
   277  		// Reauth
   278  		authErr := f.authorizeAccount()
   279  		if authErr != nil {
   280  			err = authErr
   281  		}
   282  		return true, err
   283  	}
   284  	return f.shouldRetryNoReauth(resp, err)
   285  }
   286  
   287  // errorHandler parses a non 2xx error response into an error
   288  func errorHandler(resp *http.Response) error {
   289  	// Decode error response
   290  	errResponse := new(api.Error)
   291  	err := rest.DecodeJSON(resp, &errResponse)
   292  	if err != nil {
   293  		fs.Debugf(nil, "Couldn't decode error response: %v", err)
   294  	}
   295  	if errResponse.Code == "" {
   296  		errResponse.Code = "unknown"
   297  	}
   298  	if errResponse.Status == 0 {
   299  		errResponse.Status = resp.StatusCode
   300  	}
   301  	if errResponse.Message == "" {
   302  		errResponse.Message = "Unknown " + resp.Status
   303  	}
   304  	return errResponse
   305  }
   306  
   307  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   308  	if cs < minChunkSize {
   309  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   310  	}
   311  	return nil
   312  }
   313  
   314  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   315  	err = checkUploadChunkSize(cs)
   316  	if err == nil {
   317  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   318  		f.fillBufferTokens() // reset the buffer tokens
   319  	}
   320  	return
   321  }
   322  
   323  func checkUploadCutoff(opt *Options, cs fs.SizeSuffix) error {
   324  	if cs < opt.ChunkSize {
   325  		return errors.Errorf("%v is less than chunk size %v", cs, opt.ChunkSize)
   326  	}
   327  	return nil
   328  }
   329  
   330  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   331  	err = checkUploadCutoff(&f.opt, cs)
   332  	if err == nil {
   333  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   334  	}
   335  	return
   336  }
   337  
   338  // NewFs constructs an Fs from the path, bucket:path
   339  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   340  	ctx := context.Background()
   341  	// Parse config into Options struct
   342  	opt := new(Options)
   343  	err := configstruct.Set(m, opt)
   344  	if err != nil {
   345  		return nil, err
   346  	}
   347  	err = checkUploadCutoff(opt, opt.UploadCutoff)
   348  	if err != nil {
   349  		return nil, errors.Wrap(err, "b2: upload cutoff")
   350  	}
   351  	err = checkUploadChunkSize(opt.ChunkSize)
   352  	if err != nil {
   353  		return nil, errors.Wrap(err, "b2: chunk size")
   354  	}
   355  	bucket, directory, err := parsePath(root)
   356  	if err != nil {
   357  		return nil, err
   358  	}
   359  	if opt.Account == "" {
   360  		return nil, errors.New("account not found")
   361  	}
   362  	if opt.Key == "" {
   363  		return nil, errors.New("key not found")
   364  	}
   365  	if opt.Endpoint == "" {
   366  		opt.Endpoint = defaultEndpoint
   367  	}
   368  	f := &Fs{
   369  		name:   name,
   370  		opt:    *opt,
   371  		bucket: bucket,
   372  		root:   directory,
   373  		srv:    rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
   374  		pacer:  fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
   375  	}
   376  	f.features = (&fs.Features{
   377  		ReadMimeType:  true,
   378  		WriteMimeType: true,
   379  		BucketBased:   true,
   380  	}).Fill(f)
   381  	// Set the test flag if required
   382  	if opt.TestMode != "" {
   383  		testMode := strings.TrimSpace(opt.TestMode)
   384  		f.srv.SetHeader(testModeHeader, testMode)
   385  		fs.Debugf(f, "Setting test header \"%s: %s\"", testModeHeader, testMode)
   386  	}
   387  	f.fillBufferTokens()
   388  	err = f.authorizeAccount()
   389  	if err != nil {
   390  		return nil, errors.Wrap(err, "failed to authorize account")
   391  	}
   392  	// If this is a key limited to a single bucket, it must exist already
   393  	if f.bucket != "" && f.info.Allowed.BucketID != "" {
   394  		allowedBucket := f.info.Allowed.BucketName
   395  		if allowedBucket == "" {
   396  			return nil, errors.New("bucket that application key is restricted to no longer exists")
   397  		}
   398  		if allowedBucket != f.bucket {
   399  			return nil, errors.Errorf("you must use bucket %q with this application key", allowedBucket)
   400  		}
   401  		f.markBucketOK()
   402  		f.setBucketID(f.info.Allowed.BucketID)
   403  	}
   404  	if f.root != "" {
   405  		f.root += "/"
   406  		// Check to see if the (bucket,directory) is actually an existing file
   407  		oldRoot := f.root
   408  		remote := path.Base(directory)
   409  		f.root = path.Dir(directory)
   410  		if f.root == "." {
   411  			f.root = ""
   412  		} else {
   413  			f.root += "/"
   414  		}
   415  		_, err := f.NewObject(ctx, remote)
   416  		if err != nil {
   417  			if err == fs.ErrorObjectNotFound {
   418  				// File doesn't exist so return old f
   419  				f.root = oldRoot
   420  				return f, nil
   421  			}
   422  			return nil, err
   423  		}
   424  		// return an error with an fs which points to the parent
   425  		return f, fs.ErrorIsFile
   426  	}
   427  	return f, nil
   428  }
   429  
   430  // authorizeAccount gets the API endpoint and auth token.  Can be used
   431  // for reauthentication too.
   432  func (f *Fs) authorizeAccount() error {
   433  	f.authMu.Lock()
   434  	defer f.authMu.Unlock()
   435  	opts := rest.Opts{
   436  		Method:       "GET",
   437  		Path:         "/b2api/v1/b2_authorize_account",
   438  		RootURL:      f.opt.Endpoint,
   439  		UserName:     f.opt.Account,
   440  		Password:     f.opt.Key,
   441  		ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
   442  	}
   443  	err := f.pacer.Call(func() (bool, error) {
   444  		resp, err := f.srv.CallJSON(&opts, nil, &f.info)
   445  		return f.shouldRetryNoReauth(resp, err)
   446  	})
   447  	if err != nil {
   448  		return errors.Wrap(err, "failed to authenticate")
   449  	}
   450  	f.srv.SetRoot(f.info.APIURL+"/b2api/v1").SetHeader("Authorization", f.info.AuthorizationToken)
   451  	return nil
   452  }
   453  
   454  // hasPermission returns if the current AuthorizationToken has the selected permission
   455  func (f *Fs) hasPermission(permission string) bool {
   456  	for _, capability := range f.info.Allowed.Capabilities {
   457  		if capability == permission {
   458  			return true
   459  		}
   460  	}
   461  	return false
   462  }
   463  
   464  // getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
   465  //
   466  // This should be returned with returnUploadURL when finished
   467  func (f *Fs) getUploadURL() (upload *api.GetUploadURLResponse, err error) {
   468  	f.uploadMu.Lock()
   469  	defer f.uploadMu.Unlock()
   470  	bucketID, err := f.getBucketID()
   471  	if err != nil {
   472  		return nil, err
   473  	}
   474  	if len(f.uploads) == 0 {
   475  		opts := rest.Opts{
   476  			Method: "POST",
   477  			Path:   "/b2_get_upload_url",
   478  		}
   479  		var request = api.GetUploadURLRequest{
   480  			BucketID: bucketID,
   481  		}
   482  		err := f.pacer.Call(func() (bool, error) {
   483  			resp, err := f.srv.CallJSON(&opts, &request, &upload)
   484  			return f.shouldRetry(resp, err)
   485  		})
   486  		if err != nil {
   487  			return nil, errors.Wrap(err, "failed to get upload URL")
   488  		}
   489  	} else {
   490  		upload, f.uploads = f.uploads[0], f.uploads[1:]
   491  	}
   492  	return upload, nil
   493  }
   494  
   495  // returnUploadURL returns the UploadURL to the cache
   496  func (f *Fs) returnUploadURL(upload *api.GetUploadURLResponse) {
   497  	if upload == nil {
   498  		return
   499  	}
   500  	f.uploadMu.Lock()
   501  	f.uploads = append(f.uploads, upload)
   502  	f.uploadMu.Unlock()
   503  }
   504  
   505  // clearUploadURL clears the current UploadURL and the AuthorizationToken
   506  func (f *Fs) clearUploadURL() {
   507  	f.uploadMu.Lock()
   508  	f.uploads = nil
   509  	f.uploadMu.Unlock()
   510  }
   511  
   512  // Fill up (or reset) the buffer tokens
   513  func (f *Fs) fillBufferTokens() {
   514  	f.bufferTokens = make(chan []byte, fs.Config.Transfers)
   515  	for i := 0; i < fs.Config.Transfers; i++ {
   516  		f.bufferTokens <- nil
   517  	}
   518  }
   519  
   520  // getUploadBlock gets a block from the pool of size chunkSize
   521  func (f *Fs) getUploadBlock() []byte {
   522  	buf := <-f.bufferTokens
   523  	if buf == nil {
   524  		buf = make([]byte, f.opt.ChunkSize)
   525  	}
   526  	// fs.Debugf(f, "Getting upload block %p", buf)
   527  	return buf
   528  }
   529  
   530  // putUploadBlock returns a block to the pool of size chunkSize
   531  func (f *Fs) putUploadBlock(buf []byte) {
   532  	buf = buf[:cap(buf)]
   533  	if len(buf) != int(f.opt.ChunkSize) {
   534  		panic("bad blocksize returned to pool")
   535  	}
   536  	// fs.Debugf(f, "Returning upload block %p", buf)
   537  	f.bufferTokens <- buf
   538  }
   539  
   540  // Return an Object from a path
   541  //
   542  // If it can't be found it returns the error fs.ErrorObjectNotFound.
   543  func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.File) (fs.Object, error) {
   544  	o := &Object{
   545  		fs:     f,
   546  		remote: remote,
   547  	}
   548  	if info != nil {
   549  		err := o.decodeMetaData(info)
   550  		if err != nil {
   551  			return nil, err
   552  		}
   553  	} else {
   554  		err := o.readMetaData(ctx) // reads info and headers, returning an error
   555  		if err != nil {
   556  			return nil, err
   557  		}
   558  	}
   559  	return o, nil
   560  }
   561  
   562  // NewObject finds the Object at remote.  If it can't be found
   563  // it returns the error fs.ErrorObjectNotFound.
   564  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   565  	return f.newObjectWithInfo(ctx, remote, nil)
   566  }
   567  
   568  // listFn is called from list to handle an object
   569  type listFn func(remote string, object *api.File, isDirectory bool) error
   570  
   571  // errEndList is a sentinel used to end the list iteration now.
   572  // listFn should return it to end the iteration with no errors.
   573  var errEndList = errors.New("end list")
   574  
   575  // list lists the objects into the function supplied from
   576  // the bucket and root supplied
   577  //
   578  // dir is the starting directory, "" for root
   579  //
   580  // level is the depth to search to
   581  //
   582  // If prefix is set then startFileName is used as a prefix which all
   583  // files must have
   584  //
   585  // If limit is > 0 then it limits to that many files (must be less
   586  // than 1000)
   587  //
   588  // If hidden is set then it will list the hidden (deleted) files too.
   589  func (f *Fs) list(ctx context.Context, dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
   590  	root := f.root
   591  	if dir != "" {
   592  		root += dir + "/"
   593  	}
   594  	delimiter := ""
   595  	if !recurse {
   596  		delimiter = "/"
   597  	}
   598  	bucketID, err := f.getBucketID()
   599  	if err != nil {
   600  		return err
   601  	}
   602  	chunkSize := 1000
   603  	if limit > 0 {
   604  		chunkSize = limit
   605  	}
   606  	var request = api.ListFileNamesRequest{
   607  		BucketID:     bucketID,
   608  		MaxFileCount: chunkSize,
   609  		Prefix:       root,
   610  		Delimiter:    delimiter,
   611  	}
   612  	prefix = root + prefix
   613  	if prefix != "" {
   614  		request.StartFileName = prefix
   615  	}
   616  	opts := rest.Opts{
   617  		Method: "POST",
   618  		Path:   "/b2_list_file_names",
   619  	}
   620  	if hidden {
   621  		opts.Path = "/b2_list_file_versions"
   622  	}
   623  	for {
   624  		var response api.ListFileNamesResponse
   625  		err := f.pacer.Call(func() (bool, error) {
   626  			resp, err := f.srv.CallJSON(&opts, &request, &response)
   627  			return f.shouldRetry(resp, err)
   628  		})
   629  		if err != nil {
   630  			return err
   631  		}
   632  		for i := range response.Files {
   633  			file := &response.Files[i]
   634  			// Finish if file name no longer has prefix
   635  			if prefix != "" && !strings.HasPrefix(file.Name, prefix) {
   636  				return nil
   637  			}
   638  			if !strings.HasPrefix(file.Name, f.root) {
   639  				fs.Debugf(f, "Odd name received %q", file.Name)
   640  				continue
   641  			}
   642  			remote := file.Name[len(f.root):]
   643  			// Check for directory
   644  			isDirectory := strings.HasSuffix(remote, "/")
   645  			if isDirectory {
   646  				remote = remote[:len(remote)-1]
   647  			}
   648  			// Send object
   649  			err = fn(remote, file, isDirectory)
   650  			if err != nil {
   651  				if err == errEndList {
   652  					return nil
   653  				}
   654  				return err
   655  			}
   656  		}
   657  		// end if no NextFileName
   658  		if response.NextFileName == nil {
   659  			break
   660  		}
   661  		request.StartFileName = *response.NextFileName
   662  		if response.NextFileID != nil {
   663  			request.StartFileID = *response.NextFileID
   664  		}
   665  	}
   666  	return nil
   667  }
   668  
   669  // Convert a list item into a DirEntry
   670  func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
   671  	if isDirectory {
   672  		d := fs.NewDir(remote, time.Time{})
   673  		return d, nil
   674  	}
   675  	if remote == *last {
   676  		remote = object.UploadTimestamp.AddVersion(remote)
   677  	} else {
   678  		*last = remote
   679  	}
   680  	// hide objects represent deleted files which we don't list
   681  	if object.Action == "hide" {
   682  		return nil, nil
   683  	}
   684  	o, err := f.newObjectWithInfo(ctx, remote, object)
   685  	if err != nil {
   686  		return nil, err
   687  	}
   688  	return o, nil
   689  }
   690  
   691  // mark the bucket as being OK
   692  func (f *Fs) markBucketOK() {
   693  	if f.bucket != "" {
   694  		f.bucketOKMu.Lock()
   695  		f.bucketOK = true
   696  		f.bucketOKMu.Unlock()
   697  	}
   698  }
   699  
   700  // listDir lists a single directory
   701  func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   702  	last := ""
   703  	err = f.list(ctx, dir, false, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
   704  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   705  		if err != nil {
   706  			return err
   707  		}
   708  		if entry != nil {
   709  			entries = append(entries, entry)
   710  		}
   711  		return nil
   712  	})
   713  	if err != nil {
   714  		return nil, err
   715  	}
   716  	// bucket must be present if listing succeeded
   717  	f.markBucketOK()
   718  	return entries, nil
   719  }
   720  
   721  // listBuckets returns all the buckets to out
   722  func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
   723  	if dir != "" {
   724  		return nil, fs.ErrorListBucketRequired
   725  	}
   726  	err = f.listBucketsToFn(func(bucket *api.Bucket) error {
   727  		d := fs.NewDir(bucket.Name, time.Time{})
   728  		entries = append(entries, d)
   729  		return nil
   730  	})
   731  	if err != nil {
   732  		return nil, err
   733  	}
   734  	return entries, nil
   735  }
   736  
   737  // List the objects and directories in dir into entries.  The
   738  // entries can be returned in any order but should be for a
   739  // complete directory.
   740  //
   741  // dir should be "" to list the root, and should not have
   742  // trailing slashes.
   743  //
   744  // This should return ErrDirNotFound if the directory isn't
   745  // found.
   746  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   747  	if f.bucket == "" {
   748  		return f.listBuckets(dir)
   749  	}
   750  	return f.listDir(ctx, dir)
   751  }
   752  
   753  // ListR lists the objects and directories of the Fs starting
   754  // from dir recursively into out.
   755  //
   756  // dir should be "" to start from the root, and should not
   757  // have trailing slashes.
   758  //
   759  // This should return ErrDirNotFound if the directory isn't
   760  // found.
   761  //
   762  // It should call callback for each tranche of entries read.
   763  // These need not be returned in any particular order.  If
   764  // callback returns an error then the listing will stop
   765  // immediately.
   766  //
   767  // Don't implement this unless you have a more efficient way
   768  // of listing recursively that doing a directory traversal.
   769  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   770  	if f.bucket == "" {
   771  		return fs.ErrorListBucketRequired
   772  	}
   773  	list := walk.NewListRHelper(callback)
   774  	last := ""
   775  	err = f.list(ctx, dir, true, "", 0, f.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
   776  		entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
   777  		if err != nil {
   778  			return err
   779  		}
   780  		return list.Add(entry)
   781  	})
   782  	if err != nil {
   783  		return err
   784  	}
   785  	// bucket must be present if listing succeeded
   786  	f.markBucketOK()
   787  	return list.Flush()
   788  }
   789  
   790  // listBucketFn is called from listBucketsToFn to handle a bucket
   791  type listBucketFn func(*api.Bucket) error
   792  
   793  // listBucketsToFn lists the buckets to the function supplied
   794  func (f *Fs) listBucketsToFn(fn listBucketFn) error {
   795  	var account = api.ListBucketsRequest{
   796  		AccountID: f.info.AccountID,
   797  		BucketID:  f.info.Allowed.BucketID,
   798  	}
   799  
   800  	var response api.ListBucketsResponse
   801  	opts := rest.Opts{
   802  		Method: "POST",
   803  		Path:   "/b2_list_buckets",
   804  	}
   805  	err := f.pacer.Call(func() (bool, error) {
   806  		resp, err := f.srv.CallJSON(&opts, &account, &response)
   807  		return f.shouldRetry(resp, err)
   808  	})
   809  	if err != nil {
   810  		return err
   811  	}
   812  	for i := range response.Buckets {
   813  		err = fn(&response.Buckets[i])
   814  		if err != nil {
   815  			return err
   816  		}
   817  	}
   818  	return nil
   819  }
   820  
   821  // getbucketType finds the bucketType for the current bucket name
   822  // can be one of allPublic. allPrivate, or snapshot
   823  func (f *Fs) getbucketType() (bucketType string, err error) {
   824  	f.bucketTypeMutex.Lock()
   825  	defer f.bucketTypeMutex.Unlock()
   826  	if f._bucketType != "" {
   827  		return f._bucketType, nil
   828  	}
   829  	err = f.listBucketsToFn(func(bucket *api.Bucket) error {
   830  		if bucket.Name == f.bucket {
   831  			bucketType = bucket.Type
   832  		}
   833  		return nil
   834  
   835  	})
   836  	if bucketType == "" {
   837  		err = fs.ErrorDirNotFound
   838  	}
   839  	f._bucketType = bucketType
   840  	return bucketType, err
   841  }
   842  
   843  // setBucketType sets the Type for the current bucket name
   844  func (f *Fs) setBucketType(Type string) {
   845  	f.bucketTypeMutex.Lock()
   846  	f._bucketType = Type
   847  	f.bucketTypeMutex.Unlock()
   848  }
   849  
   850  // clearBucketType clears the Type for the current bucket name
   851  func (f *Fs) clearBucketType() {
   852  	f.bucketTypeMutex.Lock()
   853  	f._bucketType = ""
   854  	f.bucketTypeMutex.Unlock()
   855  }
   856  
   857  // getBucketID finds the ID for the current bucket name
   858  func (f *Fs) getBucketID() (bucketID string, err error) {
   859  	f.bucketIDMutex.Lock()
   860  	defer f.bucketIDMutex.Unlock()
   861  	if f._bucketID != "" {
   862  		return f._bucketID, nil
   863  	}
   864  	err = f.listBucketsToFn(func(bucket *api.Bucket) error {
   865  		if bucket.Name == f.bucket {
   866  			bucketID = bucket.ID
   867  		}
   868  		return nil
   869  
   870  	})
   871  	if bucketID == "" {
   872  		err = fs.ErrorDirNotFound
   873  	}
   874  	f._bucketID = bucketID
   875  	return bucketID, err
   876  }
   877  
   878  // setBucketID sets the ID for the current bucket name
   879  func (f *Fs) setBucketID(ID string) {
   880  	f.bucketIDMutex.Lock()
   881  	f._bucketID = ID
   882  	f.bucketIDMutex.Unlock()
   883  }
   884  
   885  // clearBucketID clears the ID for the current bucket name
   886  func (f *Fs) clearBucketID() {
   887  	f.bucketIDMutex.Lock()
   888  	f._bucketID = ""
   889  	f.bucketIDMutex.Unlock()
   890  }
   891  
   892  // Put the object into the bucket
   893  //
   894  // Copy the reader in to the new object which is returned
   895  //
   896  // The new object may have been created if an error is returned
   897  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   898  	// Temporary Object under construction
   899  	fs := &Object{
   900  		fs:     f,
   901  		remote: src.Remote(),
   902  	}
   903  	return fs, fs.Update(ctx, in, src, options...)
   904  }
   905  
   906  // PutStream uploads to the remote path with the modTime given of indeterminate size
   907  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   908  	return f.Put(ctx, in, src, options...)
   909  }
   910  
   911  // Mkdir creates the bucket if it doesn't exist
   912  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   913  	f.bucketOKMu.Lock()
   914  	defer f.bucketOKMu.Unlock()
   915  	if f.bucketOK {
   916  		return nil
   917  	}
   918  	opts := rest.Opts{
   919  		Method: "POST",
   920  		Path:   "/b2_create_bucket",
   921  	}
   922  	var request = api.CreateBucketRequest{
   923  		AccountID: f.info.AccountID,
   924  		Name:      f.bucket,
   925  		Type:      "allPrivate",
   926  	}
   927  	var response api.Bucket
   928  	err := f.pacer.Call(func() (bool, error) {
   929  		resp, err := f.srv.CallJSON(&opts, &request, &response)
   930  		return f.shouldRetry(resp, err)
   931  	})
   932  	if err != nil {
   933  		if apiErr, ok := err.(*api.Error); ok {
   934  			if apiErr.Code == "duplicate_bucket_name" {
   935  				// Check this is our bucket - buckets are globally unique and this
   936  				// might be someone elses.
   937  				_, getBucketErr := f.getBucketID()
   938  				if getBucketErr == nil {
   939  					// found so it is our bucket
   940  					f.bucketOK = true
   941  					return nil
   942  				}
   943  				if getBucketErr != fs.ErrorDirNotFound {
   944  					fs.Debugf(f, "Error checking bucket exists: %v", getBucketErr)
   945  				}
   946  			}
   947  		}
   948  		return errors.Wrap(err, "failed to create bucket")
   949  	}
   950  	f.setBucketID(response.ID)
   951  	f.setBucketType(response.Type)
   952  	f.bucketOK = true
   953  	return nil
   954  }
   955  
   956  // Rmdir deletes the bucket if the fs is at the root
   957  //
   958  // Returns an error if it isn't empty
   959  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   960  	f.bucketOKMu.Lock()
   961  	defer f.bucketOKMu.Unlock()
   962  	if f.root != "" || dir != "" {
   963  		return nil
   964  	}
   965  	opts := rest.Opts{
   966  		Method: "POST",
   967  		Path:   "/b2_delete_bucket",
   968  	}
   969  	bucketID, err := f.getBucketID()
   970  	if err != nil {
   971  		return err
   972  	}
   973  	var request = api.DeleteBucketRequest{
   974  		ID:        bucketID,
   975  		AccountID: f.info.AccountID,
   976  	}
   977  	var response api.Bucket
   978  	err = f.pacer.Call(func() (bool, error) {
   979  		resp, err := f.srv.CallJSON(&opts, &request, &response)
   980  		return f.shouldRetry(resp, err)
   981  	})
   982  	if err != nil {
   983  		return errors.Wrap(err, "failed to delete bucket")
   984  	}
   985  	f.bucketOK = false
   986  	f.clearBucketID()
   987  	f.clearBucketType()
   988  	f.clearUploadURL()
   989  	return nil
   990  }
   991  
   992  // Precision of the remote
   993  func (f *Fs) Precision() time.Duration {
   994  	return time.Millisecond
   995  }
   996  
   997  // hide hides a file on the remote
   998  func (f *Fs) hide(Name string) error {
   999  	bucketID, err := f.getBucketID()
  1000  	if err != nil {
  1001  		return err
  1002  	}
  1003  	opts := rest.Opts{
  1004  		Method: "POST",
  1005  		Path:   "/b2_hide_file",
  1006  	}
  1007  	var request = api.HideFileRequest{
  1008  		BucketID: bucketID,
  1009  		Name:     Name,
  1010  	}
  1011  	var response api.File
  1012  	err = f.pacer.Call(func() (bool, error) {
  1013  		resp, err := f.srv.CallJSON(&opts, &request, &response)
  1014  		return f.shouldRetry(resp, err)
  1015  	})
  1016  	if err != nil {
  1017  		if apiErr, ok := err.(*api.Error); ok {
  1018  			if apiErr.Code == "already_hidden" {
  1019  				// sometimes eventual consistency causes this, so
  1020  				// ignore this error since it is harmless
  1021  				return nil
  1022  			}
  1023  		}
  1024  		return errors.Wrapf(err, "failed to hide %q", Name)
  1025  	}
  1026  	return nil
  1027  }
  1028  
  1029  // deleteByID deletes a file version given Name and ID
  1030  func (f *Fs) deleteByID(ID, Name string) error {
  1031  	opts := rest.Opts{
  1032  		Method: "POST",
  1033  		Path:   "/b2_delete_file_version",
  1034  	}
  1035  	var request = api.DeleteFileRequest{
  1036  		ID:   ID,
  1037  		Name: Name,
  1038  	}
  1039  	var response api.File
  1040  	err := f.pacer.Call(func() (bool, error) {
  1041  		resp, err := f.srv.CallJSON(&opts, &request, &response)
  1042  		return f.shouldRetry(resp, err)
  1043  	})
  1044  	if err != nil {
  1045  		return errors.Wrapf(err, "failed to delete %q", Name)
  1046  	}
  1047  	return nil
  1048  }
  1049  
  1050  // purge deletes all the files and directories
  1051  //
  1052  // if oldOnly is true then it deletes only non current files.
  1053  //
  1054  // Implemented here so we can make sure we delete old versions.
  1055  func (f *Fs) purge(ctx context.Context, oldOnly bool) error {
  1056  	var errReturn error
  1057  	var checkErrMutex sync.Mutex
  1058  	var checkErr = func(err error) {
  1059  		if err == nil {
  1060  			return
  1061  		}
  1062  		checkErrMutex.Lock()
  1063  		defer checkErrMutex.Unlock()
  1064  		if errReturn == nil {
  1065  			errReturn = err
  1066  		}
  1067  	}
  1068  	var isUnfinishedUploadStale = func(timestamp api.Timestamp) bool {
  1069  		if time.Since(time.Time(timestamp)).Hours() > 24 {
  1070  			return true
  1071  		}
  1072  		return false
  1073  	}
  1074  
  1075  	// Delete Config.Transfers in parallel
  1076  	toBeDeleted := make(chan *api.File, fs.Config.Transfers)
  1077  	var wg sync.WaitGroup
  1078  	wg.Add(fs.Config.Transfers)
  1079  	for i := 0; i < fs.Config.Transfers; i++ {
  1080  		go func() {
  1081  			defer wg.Done()
  1082  			for object := range toBeDeleted {
  1083  				accounting.Stats.Checking(object.Name)
  1084  				checkErr(f.deleteByID(object.ID, object.Name))
  1085  				accounting.Stats.DoneChecking(object.Name)
  1086  			}
  1087  		}()
  1088  	}
  1089  	last := ""
  1090  	checkErr(f.list(ctx, "", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
  1091  		if !isDirectory {
  1092  			accounting.Stats.Checking(remote)
  1093  			if oldOnly && last != remote {
  1094  				if object.Action == "hide" {
  1095  					fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
  1096  					toBeDeleted <- object
  1097  				} else if object.Action == "start" && isUnfinishedUploadStale(object.UploadTimestamp) {
  1098  					fs.Debugf(remote, "Deleting current version (id %q) as it is a start marker (upload started at %s)", object.ID, time.Time(object.UploadTimestamp).Local())
  1099  					toBeDeleted <- object
  1100  				} else {
  1101  					fs.Debugf(remote, "Not deleting current version (id %q) %q", object.ID, object.Action)
  1102  				}
  1103  			} else {
  1104  				fs.Debugf(remote, "Deleting (id %q)", object.ID)
  1105  				toBeDeleted <- object
  1106  			}
  1107  			last = remote
  1108  			accounting.Stats.DoneChecking(remote)
  1109  		}
  1110  		return nil
  1111  	}))
  1112  	close(toBeDeleted)
  1113  	wg.Wait()
  1114  
  1115  	if !oldOnly {
  1116  		checkErr(f.Rmdir(ctx, ""))
  1117  	}
  1118  	return errReturn
  1119  }
  1120  
  1121  // Purge deletes all the files and directories including the old versions.
  1122  func (f *Fs) Purge(ctx context.Context) error {
  1123  	return f.purge(ctx, false)
  1124  }
  1125  
  1126  // CleanUp deletes all the hidden files.
  1127  func (f *Fs) CleanUp(ctx context.Context) error {
  1128  	return f.purge(ctx, true)
  1129  }
  1130  
  1131  // Copy src to this remote using server side copy operations.
  1132  //
  1133  // This is stored with the remote path given
  1134  //
  1135  // It returns the destination Object and a possible error
  1136  //
  1137  // Will only be called if src.Fs().Name() == f.Name()
  1138  //
  1139  // If it isn't possible then return fs.ErrorCantCopy
  1140  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1141  	err := f.Mkdir(ctx, "")
  1142  	if err != nil {
  1143  		return nil, err
  1144  	}
  1145  	srcObj, ok := src.(*Object)
  1146  	if !ok {
  1147  		fs.Debugf(src, "Can't copy - not same remote type")
  1148  		return nil, fs.ErrorCantCopy
  1149  	}
  1150  	destBucketID, err := f.getBucketID()
  1151  	if err != nil {
  1152  		return nil, err
  1153  	}
  1154  	opts := rest.Opts{
  1155  		Method: "POST",
  1156  		Path:   "/b2_copy_file",
  1157  	}
  1158  	var request = api.CopyFileRequest{
  1159  		SourceID:          srcObj.id,
  1160  		Name:              f.root + remote,
  1161  		MetadataDirective: "COPY",
  1162  		DestBucketID:      destBucketID,
  1163  	}
  1164  	var response api.FileInfo
  1165  	err = f.pacer.Call(func() (bool, error) {
  1166  		resp, err := f.srv.CallJSON(&opts, &request, &response)
  1167  		return f.shouldRetry(resp, err)
  1168  	})
  1169  	if err != nil {
  1170  		return nil, err
  1171  	}
  1172  	o := &Object{
  1173  		fs:     f,
  1174  		remote: remote,
  1175  	}
  1176  	err = o.decodeMetaDataFileInfo(&response)
  1177  	if err != nil {
  1178  		return nil, err
  1179  	}
  1180  	return o, nil
  1181  }
  1182  
  1183  // Hashes returns the supported hash sets.
  1184  func (f *Fs) Hashes() hash.Set {
  1185  	return hash.Set(hash.SHA1)
  1186  }
  1187  
  1188  // getDownloadAuthorization returns authorization token for downloading
  1189  // without accout.
  1190  func (f *Fs) getDownloadAuthorization(remote string) (authorization string, err error) {
  1191  	validDurationInSeconds := time.Duration(f.opt.DownloadAuthorizationDuration).Nanoseconds() / 1e9
  1192  	if validDurationInSeconds <= 0 || validDurationInSeconds > 604800 {
  1193  		return "", errors.New("--b2-download-auth-duration must be between 1 sec and 1 week")
  1194  	}
  1195  	if !f.hasPermission("shareFiles") {
  1196  		return "", errors.New("sharing a file link requires the shareFiles permission")
  1197  	}
  1198  	bucketID, err := f.getBucketID()
  1199  	if err != nil {
  1200  		return "", err
  1201  	}
  1202  	opts := rest.Opts{
  1203  		Method: "POST",
  1204  		Path:   "/b2_get_download_authorization",
  1205  	}
  1206  	var request = api.GetDownloadAuthorizationRequest{
  1207  		BucketID:               bucketID,
  1208  		FileNamePrefix:         path.Join(f.root, remote),
  1209  		ValidDurationInSeconds: validDurationInSeconds,
  1210  	}
  1211  	var response api.GetDownloadAuthorizationResponse
  1212  	err = f.pacer.Call(func() (bool, error) {
  1213  		resp, err := f.srv.CallJSON(&opts, &request, &response)
  1214  		return f.shouldRetry(resp, err)
  1215  	})
  1216  	if err != nil {
  1217  		return "", errors.Wrap(err, "failed to get download authorization")
  1218  	}
  1219  	return response.AuthorizationToken, nil
  1220  }
  1221  
  1222  // PublicLink returns a link for downloading without accout.
  1223  func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
  1224  	var RootURL string
  1225  	if f.opt.DownloadURL == "" {
  1226  		RootURL = f.info.DownloadURL
  1227  	} else {
  1228  		RootURL = f.opt.DownloadURL
  1229  	}
  1230  	absPath := "/" + path.Join(f.root, remote)
  1231  	link = RootURL + "/file/" + urlEncode(f.bucket) + absPath
  1232  	bucketType, err := f.getbucketType()
  1233  	if err != nil {
  1234  		return "", err
  1235  	}
  1236  	if bucketType == "allPrivate" || bucketType == "snapshot" {
  1237  		AuthorizationToken, err := f.getDownloadAuthorization(remote)
  1238  		if err != nil {
  1239  			return "", err
  1240  		}
  1241  		link += "?Authorization=" + AuthorizationToken
  1242  	}
  1243  	return link, nil
  1244  }
  1245  
  1246  // ------------------------------------------------------------
  1247  
  1248  // Fs returns the parent Fs
  1249  func (o *Object) Fs() fs.Info {
  1250  	return o.fs
  1251  }
  1252  
  1253  // Return a string version
  1254  func (o *Object) String() string {
  1255  	if o == nil {
  1256  		return "<nil>"
  1257  	}
  1258  	return o.remote
  1259  }
  1260  
  1261  // Remote returns the remote path
  1262  func (o *Object) Remote() string {
  1263  	return o.remote
  1264  }
  1265  
  1266  // Hash returns the Sha-1 of an object returning a lowercase hex string
  1267  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1268  	if t != hash.SHA1 {
  1269  		return "", hash.ErrUnsupported
  1270  	}
  1271  	if o.sha1 == "" {
  1272  		// Error is logged in readMetaData
  1273  		err := o.readMetaData(ctx)
  1274  		if err != nil {
  1275  			return "", err
  1276  		}
  1277  	}
  1278  	return o.sha1, nil
  1279  }
  1280  
  1281  // Size returns the size of an object in bytes
  1282  func (o *Object) Size() int64 {
  1283  	return o.size
  1284  }
  1285  
  1286  // decodeMetaDataRaw sets the metadata from the data passed in
  1287  //
  1288  // Sets
  1289  //  o.id
  1290  //  o.modTime
  1291  //  o.size
  1292  //  o.sha1
  1293  func (o *Object) decodeMetaDataRaw(ID, SHA1 string, Size int64, UploadTimestamp api.Timestamp, Info map[string]string, mimeType string) (err error) {
  1294  	o.id = ID
  1295  	o.sha1 = SHA1
  1296  	o.mimeType = mimeType
  1297  	// Read SHA1 from metadata if it exists and isn't set
  1298  	if o.sha1 == "" || o.sha1 == "none" {
  1299  		o.sha1 = Info[sha1Key]
  1300  	}
  1301  	o.size = Size
  1302  	// Use the UploadTimestamp if can't get file info
  1303  	o.modTime = time.Time(UploadTimestamp)
  1304  	return o.parseTimeString(Info[timeKey])
  1305  }
  1306  
  1307  // decodeMetaData sets the metadata in the object from an api.File
  1308  //
  1309  // Sets
  1310  //  o.id
  1311  //  o.modTime
  1312  //  o.size
  1313  //  o.sha1
  1314  func (o *Object) decodeMetaData(info *api.File) (err error) {
  1315  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1316  }
  1317  
  1318  // decodeMetaDataFileInfo sets the metadata in the object from an api.FileInfo
  1319  //
  1320  // Sets
  1321  //  o.id
  1322  //  o.modTime
  1323  //  o.size
  1324  //  o.sha1
  1325  func (o *Object) decodeMetaDataFileInfo(info *api.FileInfo) (err error) {
  1326  	return o.decodeMetaDataRaw(info.ID, info.SHA1, info.Size, info.UploadTimestamp, info.Info, info.ContentType)
  1327  }
  1328  
  1329  // getMetaData gets the metadata from the object unconditionally
  1330  func (o *Object) getMetaData(ctx context.Context) (info *api.File, err error) {
  1331  	maxSearched := 1
  1332  	var timestamp api.Timestamp
  1333  	baseRemote := o.remote
  1334  	if o.fs.opt.Versions {
  1335  		timestamp, baseRemote = api.RemoveVersion(baseRemote)
  1336  		maxSearched = maxVersions
  1337  	}
  1338  
  1339  	err = o.fs.list(ctx, "", true, baseRemote, maxSearched, o.fs.opt.Versions, func(remote string, object *api.File, isDirectory bool) error {
  1340  		if isDirectory {
  1341  			return nil
  1342  		}
  1343  		if remote == baseRemote {
  1344  			if !timestamp.IsZero() && !timestamp.Equal(object.UploadTimestamp) {
  1345  				return nil
  1346  			}
  1347  			info = object
  1348  		}
  1349  		return errEndList // read only 1 item
  1350  	})
  1351  	if err != nil {
  1352  		if err == fs.ErrorDirNotFound {
  1353  			return nil, fs.ErrorObjectNotFound
  1354  		}
  1355  		return nil, err
  1356  	}
  1357  	if info == nil {
  1358  		return nil, fs.ErrorObjectNotFound
  1359  	}
  1360  	return info, nil
  1361  }
  1362  
  1363  // readMetaData gets the metadata if it hasn't already been fetched
  1364  //
  1365  // Sets
  1366  //  o.id
  1367  //  o.modTime
  1368  //  o.size
  1369  //  o.sha1
  1370  func (o *Object) readMetaData(ctx context.Context) (err error) {
  1371  	if o.id != "" {
  1372  		return nil
  1373  	}
  1374  	info, err := o.getMetaData(ctx)
  1375  	if err != nil {
  1376  		return err
  1377  	}
  1378  	return o.decodeMetaData(info)
  1379  }
  1380  
  1381  // timeString returns modTime as the number of milliseconds
  1382  // elapsed since January 1, 1970 UTC as a decimal string.
  1383  func timeString(modTime time.Time) string {
  1384  	return strconv.FormatInt(modTime.UnixNano()/1E6, 10)
  1385  }
  1386  
  1387  // parseTimeString converts a decimal string number of milliseconds
  1388  // elapsed since January 1, 1970 UTC into a time.Time and stores it in
  1389  // the modTime variable.
  1390  func (o *Object) parseTimeString(timeString string) (err error) {
  1391  	if timeString == "" {
  1392  		return nil
  1393  	}
  1394  	unixMilliseconds, err := strconv.ParseInt(timeString, 10, 64)
  1395  	if err != nil {
  1396  		fs.Debugf(o, "Failed to parse mod time string %q: %v", timeString, err)
  1397  		return nil
  1398  	}
  1399  	o.modTime = time.Unix(unixMilliseconds/1E3, (unixMilliseconds%1E3)*1E6).UTC()
  1400  	return nil
  1401  }
  1402  
  1403  // ModTime returns the modification time of the object
  1404  //
  1405  // It attempts to read the objects mtime and if that isn't present the
  1406  // LastModified returned in the http headers
  1407  //
  1408  // SHA-1 will also be updated once the request has completed.
  1409  func (o *Object) ModTime(ctx context.Context) (result time.Time) {
  1410  	// The error is logged in readMetaData
  1411  	_ = o.readMetaData(ctx)
  1412  	return o.modTime
  1413  }
  1414  
  1415  // SetModTime sets the modification time of the Object
  1416  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
  1417  	info, err := o.getMetaData(ctx)
  1418  	if err != nil {
  1419  		return err
  1420  	}
  1421  	info.Info[timeKey] = timeString(modTime)
  1422  	opts := rest.Opts{
  1423  		Method: "POST",
  1424  		Path:   "/b2_copy_file",
  1425  	}
  1426  	var request = api.CopyFileRequest{
  1427  		SourceID:          o.id,
  1428  		Name:              o.fs.root + o.remote, // copy to same name
  1429  		MetadataDirective: "REPLACE",
  1430  		ContentType:       info.ContentType,
  1431  		Info:              info.Info,
  1432  	}
  1433  	var response api.FileInfo
  1434  	err = o.fs.pacer.Call(func() (bool, error) {
  1435  		resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
  1436  		return o.fs.shouldRetry(resp, err)
  1437  	})
  1438  	if err != nil {
  1439  		return err
  1440  	}
  1441  	return o.decodeMetaDataFileInfo(&response)
  1442  }
  1443  
  1444  // Storable returns if this object is storable
  1445  func (o *Object) Storable() bool {
  1446  	return true
  1447  }
  1448  
  1449  // openFile represents an Object open for reading
  1450  type openFile struct {
  1451  	o     *Object        // Object we are reading for
  1452  	resp  *http.Response // response of the GET
  1453  	body  io.Reader      // reading from here
  1454  	hash  gohash.Hash    // currently accumulating SHA1
  1455  	bytes int64          // number of bytes read on this connection
  1456  	eof   bool           // whether we have read end of file
  1457  }
  1458  
  1459  // newOpenFile wraps an io.ReadCloser and checks the sha1sum
  1460  func newOpenFile(o *Object, resp *http.Response) *openFile {
  1461  	file := &openFile{
  1462  		o:    o,
  1463  		resp: resp,
  1464  		hash: sha1.New(),
  1465  	}
  1466  	file.body = io.TeeReader(resp.Body, file.hash)
  1467  	return file
  1468  }
  1469  
  1470  // Read bytes from the object - see io.Reader
  1471  func (file *openFile) Read(p []byte) (n int, err error) {
  1472  	n, err = file.body.Read(p)
  1473  	file.bytes += int64(n)
  1474  	if err == io.EOF {
  1475  		file.eof = true
  1476  	}
  1477  	return
  1478  }
  1479  
  1480  // Close the object and checks the length and SHA1 if all the object
  1481  // was read
  1482  func (file *openFile) Close() (err error) {
  1483  	// Close the body at the end
  1484  	defer fs.CheckClose(file.resp.Body, &err)
  1485  
  1486  	// If not end of file then can't check SHA1
  1487  	if !file.eof {
  1488  		return nil
  1489  	}
  1490  
  1491  	// Check to see we read the correct number of bytes
  1492  	if file.o.Size() != file.bytes {
  1493  		return errors.Errorf("object corrupted on transfer - length mismatch (want %d got %d)", file.o.Size(), file.bytes)
  1494  	}
  1495  
  1496  	// Check the SHA1
  1497  	receivedSHA1 := file.o.sha1
  1498  	calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
  1499  	if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
  1500  		return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
  1501  	}
  1502  
  1503  	return nil
  1504  }
  1505  
  1506  // Check it satisfies the interfaces
  1507  var _ io.ReadCloser = &openFile{}
  1508  
  1509  // Open an object for read
  1510  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
  1511  	opts := rest.Opts{
  1512  		Method:  "GET",
  1513  		Options: options,
  1514  	}
  1515  
  1516  	// Use downloadUrl from backblaze if downloadUrl is not set
  1517  	// otherwise use the custom downloadUrl
  1518  	if o.fs.opt.DownloadURL == "" {
  1519  		opts.RootURL = o.fs.info.DownloadURL
  1520  	} else {
  1521  		opts.RootURL = o.fs.opt.DownloadURL
  1522  	}
  1523  
  1524  	// Download by id if set otherwise by name
  1525  	if o.id != "" {
  1526  		opts.Path += "/b2api/v1/b2_download_file_by_id?fileId=" + urlEncode(o.id)
  1527  	} else {
  1528  		opts.Path += "/file/" + urlEncode(o.fs.bucket) + "/" + urlEncode(o.fs.root+o.remote)
  1529  	}
  1530  	var resp *http.Response
  1531  	err = o.fs.pacer.Call(func() (bool, error) {
  1532  		resp, err = o.fs.srv.Call(&opts)
  1533  		return o.fs.shouldRetry(resp, err)
  1534  	})
  1535  	if err != nil {
  1536  		return nil, errors.Wrap(err, "failed to open for download")
  1537  	}
  1538  
  1539  	// Parse the time out of the headers if possible
  1540  	err = o.parseTimeString(resp.Header.Get(timeHeader))
  1541  	if err != nil {
  1542  		_ = resp.Body.Close()
  1543  		return nil, err
  1544  	}
  1545  	// Read sha1 from header if it isn't set
  1546  	if o.sha1 == "" {
  1547  		o.sha1 = resp.Header.Get(sha1Header)
  1548  		fs.Debugf(o, "Reading sha1 from header - %q", o.sha1)
  1549  		// if sha1 header is "none" (in big files), then need
  1550  		// to read it from the metadata
  1551  		if o.sha1 == "none" {
  1552  			o.sha1 = resp.Header.Get(sha1InfoHeader)
  1553  			fs.Debugf(o, "Reading sha1 from info - %q", o.sha1)
  1554  		}
  1555  	}
  1556  	// Don't check length or hash on partial content
  1557  	if resp.StatusCode == http.StatusPartialContent {
  1558  		return resp.Body, nil
  1559  	}
  1560  	return newOpenFile(o, resp), nil
  1561  }
  1562  
  1563  // dontEncode is the characters that do not need percent-encoding
  1564  //
  1565  // The characters that do not need percent-encoding are a subset of
  1566  // the printable ASCII characters: upper-case letters, lower-case
  1567  // letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")",
  1568  // "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must
  1569  // be replaced with "%" and the two-digit hex value of the byte.
  1570  const dontEncode = (`abcdefghijklmnopqrstuvwxyz` +
  1571  	`ABCDEFGHIJKLMNOPQRSTUVWXYZ` +
  1572  	`0123456789` +
  1573  	`._-/~!$'()*;=:@`)
  1574  
  1575  // noNeedToEncode is a bitmap of characters which don't need % encoding
  1576  var noNeedToEncode [256]bool
  1577  
  1578  func init() {
  1579  	for _, c := range dontEncode {
  1580  		noNeedToEncode[c] = true
  1581  	}
  1582  }
  1583  
  1584  // urlEncode encodes in with % encoding
  1585  func urlEncode(in string) string {
  1586  	var out bytes.Buffer
  1587  	for i := 0; i < len(in); i++ {
  1588  		c := in[i]
  1589  		if noNeedToEncode[c] {
  1590  			_ = out.WriteByte(c)
  1591  		} else {
  1592  			_, _ = out.WriteString(fmt.Sprintf("%%%2X", c))
  1593  		}
  1594  	}
  1595  	return out.String()
  1596  }
  1597  
  1598  // Update the object with the contents of the io.Reader, modTime and size
  1599  //
  1600  // The new object may have been created if an error is returned
  1601  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1602  	if o.fs.opt.Versions {
  1603  		return errNotWithVersions
  1604  	}
  1605  	err = o.fs.Mkdir(ctx, "")
  1606  	if err != nil {
  1607  		return err
  1608  	}
  1609  	size := src.Size()
  1610  
  1611  	if size == -1 {
  1612  		// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
  1613  		buf := o.fs.getUploadBlock()
  1614  		n, err := io.ReadFull(in, buf)
  1615  		if err == nil {
  1616  			bufReader := bufio.NewReader(in)
  1617  			in = bufReader
  1618  			_, err = bufReader.Peek(1)
  1619  		}
  1620  
  1621  		if err == nil {
  1622  			fs.Debugf(o, "File is big enough for chunked streaming")
  1623  			up, err := o.fs.newLargeUpload(ctx, o, in, src)
  1624  			if err != nil {
  1625  				o.fs.putUploadBlock(buf)
  1626  				return err
  1627  			}
  1628  			return up.Stream(buf)
  1629  		} else if err == io.EOF || err == io.ErrUnexpectedEOF {
  1630  			fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
  1631  			defer o.fs.putUploadBlock(buf)
  1632  			size = int64(n)
  1633  			in = bytes.NewReader(buf[:n])
  1634  		} else {
  1635  			return err
  1636  		}
  1637  	} else if size > int64(o.fs.opt.UploadCutoff) {
  1638  		up, err := o.fs.newLargeUpload(ctx, o, in, src)
  1639  		if err != nil {
  1640  			return err
  1641  		}
  1642  		return up.Upload()
  1643  	}
  1644  
  1645  	modTime := src.ModTime(ctx)
  1646  
  1647  	calculatedSha1, _ := src.Hash(ctx, hash.SHA1)
  1648  	if calculatedSha1 == "" {
  1649  		calculatedSha1 = "hex_digits_at_end"
  1650  		har := newHashAppendingReader(in, sha1.New())
  1651  		size += int64(har.AdditionalLength())
  1652  		in = har
  1653  	}
  1654  
  1655  	// Get upload URL
  1656  	upload, err := o.fs.getUploadURL()
  1657  	if err != nil {
  1658  		return err
  1659  	}
  1660  	defer func() {
  1661  		// return it like this because we might nil it out
  1662  		o.fs.returnUploadURL(upload)
  1663  	}()
  1664  
  1665  	// Headers for upload file
  1666  	//
  1667  	// Authorization
  1668  	// required
  1669  	// An upload authorization token, from b2_get_upload_url.
  1670  	//
  1671  	// X-Bz-File-Name
  1672  	// required
  1673  	//
  1674  	// The name of the file, in percent-encoded UTF-8. See Files for requirements on file names. See String Encoding.
  1675  	//
  1676  	// Content-Type
  1677  	// required
  1678  	//
  1679  	// The MIME type of the content of the file, which will be returned in
  1680  	// the Content-Type header when downloading the file. Use the
  1681  	// Content-Type b2/x-auto to automatically set the stored Content-Type
  1682  	// post upload. In the case where a file extension is absent or the
  1683  	// lookup fails, the Content-Type is set to application/octet-stream. The
  1684  	// Content-Type mappings can be pursued here.
  1685  	//
  1686  	// X-Bz-Content-Sha1
  1687  	// required
  1688  	//
  1689  	// The SHA1 checksum of the content of the file. B2 will check this when
  1690  	// the file is uploaded, to make sure that the file arrived correctly. It
  1691  	// will be returned in the X-Bz-Content-Sha1 header when the file is
  1692  	// downloaded.
  1693  	//
  1694  	// X-Bz-Info-src_last_modified_millis
  1695  	// optional
  1696  	//
  1697  	// If the original source of the file being uploaded has a last modified
  1698  	// time concept, Backblaze recommends using this spelling of one of your
  1699  	// ten X-Bz-Info-* headers (see below). Using a standard spelling allows
  1700  	// different B2 clients and the B2 web user interface to interoperate
  1701  	// correctly. The value should be a base 10 number which represents a UTC
  1702  	// time when the original source file was last modified. It is a base 10
  1703  	// number of milliseconds since midnight, January 1, 1970 UTC. This fits
  1704  	// in a 64 bit integer such as the type "long" in the programming
  1705  	// language Java. It is intended to be compatible with Java's time
  1706  	// long. For example, it can be passed directly into the Java call
  1707  	// Date.setTime(long time).
  1708  	//
  1709  	// X-Bz-Info-*
  1710  	// optional
  1711  	//
  1712  	// Up to 10 of these headers may be present. The * part of the header
  1713  	// name is replace with the name of a custom field in the file
  1714  	// information stored with the file, and the value is an arbitrary UTF-8
  1715  	// string, percent-encoded. The same info headers sent with the upload
  1716  	// will be returned with the download.
  1717  
  1718  	opts := rest.Opts{
  1719  		Method:  "POST",
  1720  		RootURL: upload.UploadURL,
  1721  		Body:    in,
  1722  		ExtraHeaders: map[string]string{
  1723  			"Authorization":  upload.AuthorizationToken,
  1724  			"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
  1725  			"Content-Type":   fs.MimeType(ctx, src),
  1726  			sha1Header:       calculatedSha1,
  1727  			timeHeader:       timeString(modTime),
  1728  		},
  1729  		ContentLength: &size,
  1730  	}
  1731  	var response api.FileInfo
  1732  	// Don't retry, return a retry error instead
  1733  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
  1734  		resp, err := o.fs.srv.CallJSON(&opts, nil, &response)
  1735  		retry, err := o.fs.shouldRetry(resp, err)
  1736  		// On retryable error clear UploadURL
  1737  		if retry {
  1738  			fs.Debugf(o, "Clearing upload URL because of error: %v", err)
  1739  			upload = nil
  1740  		}
  1741  		return retry, err
  1742  	})
  1743  	if err != nil {
  1744  		return err
  1745  	}
  1746  	return o.decodeMetaDataFileInfo(&response)
  1747  }
  1748  
  1749  // Remove an object
  1750  func (o *Object) Remove(ctx context.Context) error {
  1751  	if o.fs.opt.Versions {
  1752  		return errNotWithVersions
  1753  	}
  1754  	if o.fs.opt.HardDelete {
  1755  		return o.fs.deleteByID(o.id, o.fs.root+o.remote)
  1756  	}
  1757  	return o.fs.hide(o.fs.root + o.remote)
  1758  }
  1759  
  1760  // MimeType of an Object if known, "" otherwise
  1761  func (o *Object) MimeType(ctx context.Context) string {
  1762  	return o.mimeType
  1763  }
  1764  
  1765  // ID returns the ID of the Object if known, or "" if not
  1766  func (o *Object) ID() string {
  1767  	return o.id
  1768  }
  1769  
  1770  // Check the interfaces are satisfied
  1771  var (
  1772  	_ fs.Fs           = &Fs{}
  1773  	_ fs.Purger       = &Fs{}
  1774  	_ fs.Copier       = &Fs{}
  1775  	_ fs.PutStreamer  = &Fs{}
  1776  	_ fs.CleanUpper   = &Fs{}
  1777  	_ fs.ListRer      = &Fs{}
  1778  	_ fs.PublicLinker = &Fs{}
  1779  	_ fs.Object       = &Object{}
  1780  	_ fs.MimeTyper    = &Object{}
  1781  	_ fs.IDer         = &Object{}
  1782  )