github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/qingstor/qingstor.go (about)

     1  // Package qingstor provides an interface to QingStor object storage
     2  // Home: https://www.qingcloud.com/
     3  
     4  // +build !plan9
     5  
     6  package qingstor
     7  
     8  import (
     9  	"context"
    10  	"fmt"
    11  	"io"
    12  	"net/http"
    13  	"path"
    14  	"regexp"
    15  	"strconv"
    16  	"strings"
    17  	"time"
    18  
    19  	"github.com/pkg/errors"
    20  	"github.com/rclone/rclone/fs"
    21  	"github.com/rclone/rclone/fs/config"
    22  	"github.com/rclone/rclone/fs/config/configmap"
    23  	"github.com/rclone/rclone/fs/config/configstruct"
    24  	"github.com/rclone/rclone/fs/fshttp"
    25  	"github.com/rclone/rclone/fs/hash"
    26  	"github.com/rclone/rclone/fs/walk"
    27  	"github.com/rclone/rclone/lib/bucket"
    28  	"github.com/rclone/rclone/lib/encoder"
    29  	qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
    30  	qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors"
    31  	qs "github.com/yunify/qingstor-sdk-go/v3/service"
    32  )
    33  
    34  // Register with Fs
    35  func init() {
    36  	fs.Register(&fs.RegInfo{
    37  		Name:        "qingstor",
    38  		Description: "QingCloud Object Storage",
    39  		NewFs:       NewFs,
    40  		Options: []fs.Option{{
    41  			Name:    "env_auth",
    42  			Help:    "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.",
    43  			Default: false,
    44  			Examples: []fs.OptionExample{{
    45  				Value: "false",
    46  				Help:  "Enter QingStor credentials in the next step",
    47  			}, {
    48  				Value: "true",
    49  				Help:  "Get QingStor credentials from the environment (env vars or IAM)",
    50  			}},
    51  		}, {
    52  			Name: "access_key_id",
    53  			Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.",
    54  		}, {
    55  			Name: "secret_access_key",
    56  			Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
    57  		}, {
    58  			Name: "endpoint",
    59  			Help: "Enter an endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"",
    60  		}, {
    61  			Name: "zone",
    62  			Help: "Zone to connect to.\nDefault is \"pek3a\".",
    63  			Examples: []fs.OptionExample{{
    64  				Value: "pek3a",
    65  				Help:  "The Beijing (China) Three Zone\nNeeds location constraint pek3a.",
    66  			}, {
    67  				Value: "sh1a",
    68  				Help:  "The Shanghai (China) First Zone\nNeeds location constraint sh1a.",
    69  			}, {
    70  				Value: "gd2a",
    71  				Help:  "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.",
    72  			}},
    73  		}, {
    74  			Name:     "connection_retries",
    75  			Help:     "Number of connection retries.",
    76  			Default:  3,
    77  			Advanced: true,
    78  		}, {
    79  			Name: "upload_cutoff",
    80  			Help: `Cutoff for switching to chunked upload
    81  
    82  Any files larger than this will be uploaded in chunks of chunk_size.
    83  The minimum is 0 and the maximum is 5GB.`,
    84  			Default:  defaultUploadCutoff,
    85  			Advanced: true,
    86  		}, {
    87  			Name: "chunk_size",
    88  			Help: `Chunk size to use for uploading.
    89  
    90  When uploading files larger than upload_cutoff they will be uploaded
    91  as multipart uploads using this chunk size.
    92  
    93  Note that "--qingstor-upload-concurrency" chunks of this size are buffered
    94  in memory per transfer.
    95  
    96  If you are transferring large files over high speed links and you have
    97  enough memory, then increasing this will speed up the transfers.`,
    98  			Default:  minChunkSize,
    99  			Advanced: true,
   100  		}, {
   101  			Name: "upload_concurrency",
   102  			Help: `Concurrency for multipart uploads.
   103  
   104  This is the number of chunks of the same file that are uploaded
   105  concurrently.
   106  
   107  NB if you set this to > 1 then the checksums of multpart uploads
   108  become corrupted (the uploads themselves are not corrupted though).
   109  
   110  If you are uploading small numbers of large file over high speed link
   111  and these uploads do not fully utilize your bandwidth, then increasing
   112  this may help to speed up the transfers.`,
   113  			Default:  1,
   114  			Advanced: true,
   115  		}, {
   116  			Name:     config.ConfigEncoding,
   117  			Help:     config.ConfigEncodingHelp,
   118  			Advanced: true,
   119  			Default: (encoder.EncodeInvalidUtf8 |
   120  				encoder.EncodeCtl |
   121  				encoder.EncodeSlash),
   122  		}},
   123  	})
   124  }
   125  
   126  // Constants
   127  const (
   128  	listLimitSize       = 1000                   // Number of items to read at once
   129  	maxSizeForCopy      = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
   130  	minChunkSize        = fs.SizeSuffix(minMultiPartSize)
   131  	defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
   132  	maxUploadCutoff     = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
   133  )
   134  
   135  // Globals
   136  func timestampToTime(tp int64) time.Time {
   137  	timeLayout := time.RFC3339Nano
   138  	ts := time.Unix(tp, 0).Format(timeLayout)
   139  	tm, _ := time.Parse(timeLayout, ts)
   140  	return tm.UTC()
   141  }
   142  
   143  // Options defines the configuration for this backend
   144  type Options struct {
   145  	EnvAuth           bool                 `config:"env_auth"`
   146  	AccessKeyID       string               `config:"access_key_id"`
   147  	SecretAccessKey   string               `config:"secret_access_key"`
   148  	Endpoint          string               `config:"endpoint"`
   149  	Zone              string               `config:"zone"`
   150  	ConnectionRetries int                  `config:"connection_retries"`
   151  	UploadCutoff      fs.SizeSuffix        `config:"upload_cutoff"`
   152  	ChunkSize         fs.SizeSuffix        `config:"chunk_size"`
   153  	UploadConcurrency int                  `config:"upload_concurrency"`
   154  	Enc               encoder.MultiEncoder `config:"encoding"`
   155  }
   156  
   157  // Fs represents a remote qingstor server
   158  type Fs struct {
   159  	name          string        // The name of the remote
   160  	root          string        // The root is a subdir, is a special object
   161  	opt           Options       // parsed options
   162  	features      *fs.Features  // optional features
   163  	svc           *qs.Service   // The connection to the qingstor server
   164  	zone          string        // The zone we are working on
   165  	rootBucket    string        // bucket part of root (if any)
   166  	rootDirectory string        // directory part of root (if any)
   167  	cache         *bucket.Cache // cache for bucket creation status
   168  }
   169  
   170  // Object describes a qingstor object
   171  type Object struct {
   172  	// Will definitely have everything but meta which may be nil
   173  	//
   174  	// List will read everything but meta & mimeType - to fill
   175  	// that in you need to call readMetaData
   176  	fs           *Fs       // what this object is part of
   177  	remote       string    // object of remote
   178  	etag         string    // md5sum of the object
   179  	size         int64     // length of the object content
   180  	mimeType     string    // ContentType of object - may be ""
   181  	lastModified time.Time // Last modified
   182  	encrypted    bool      // whether the object is encryption
   183  	algo         string    // Custom encryption algorithms
   184  }
   185  
   186  // ------------------------------------------------------------
   187  
   188  // parsePath parses a remote 'url'
   189  func parsePath(path string) (root string) {
   190  	root = strings.Trim(path, "/")
   191  	return
   192  }
   193  
   194  // split returns bucket and bucketPath from the rootRelativePath
   195  // relative to f.root
   196  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
   197  	bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
   198  	return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
   199  }
   200  
   201  // split returns bucket and bucketPath from the object
   202  func (o *Object) split() (bucket, bucketPath string) {
   203  	return o.fs.split(o.remote)
   204  }
   205  
   206  // Split a URL into three parts: protocol host and port
   207  func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
   208  	/*
   209  	  Pattern to match an endpoint,
   210  	  eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
   211  	      "http(s)//qingstor.com"      --> "http(s)", "qingstor.com", ""
   212  	      "qingstor.com"               --> "", "qingstor.com", ""
   213  	*/
   214  	defer func() {
   215  		if r := recover(); r != nil {
   216  			switch x := r.(type) {
   217  			case error:
   218  				err = x
   219  			default:
   220  				err = nil
   221  			}
   222  		}
   223  	}()
   224  	var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`)
   225  	parts := mather.FindStringSubmatch(endpoint)
   226  	protocol, host, port = parts[1], parts[2], parts[3]
   227  	return
   228  }
   229  
   230  // qsConnection makes a connection to qingstor
   231  func qsServiceConnection(opt *Options) (*qs.Service, error) {
   232  	accessKeyID := opt.AccessKeyID
   233  	secretAccessKey := opt.SecretAccessKey
   234  
   235  	switch {
   236  	case opt.EnvAuth:
   237  		// No need for empty checks if "env_auth" is true
   238  	case accessKeyID == "" && secretAccessKey == "":
   239  		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
   240  	case accessKeyID == "":
   241  		return nil, errors.New("access_key_id not found")
   242  	case secretAccessKey == "":
   243  		return nil, errors.New("secret_access_key not found")
   244  	}
   245  
   246  	protocol := "https"
   247  	host := "qingstor.com"
   248  	port := 443
   249  
   250  	endpoint := opt.Endpoint
   251  	if endpoint != "" {
   252  		_protocol, _host, _port, err := qsParseEndpoint(endpoint)
   253  
   254  		if err != nil {
   255  			return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint)
   256  		}
   257  
   258  		if _protocol != "" {
   259  			protocol = _protocol
   260  		}
   261  		host = _host
   262  		if _port != "" {
   263  			port, _ = strconv.Atoi(_port)
   264  		} else if protocol == "http" {
   265  			port = 80
   266  		}
   267  
   268  	}
   269  
   270  	cf, err := qsConfig.NewDefault()
   271  	if err != nil {
   272  		return nil, err
   273  	}
   274  	cf.AccessKeyID = accessKeyID
   275  	cf.SecretAccessKey = secretAccessKey
   276  	cf.Protocol = protocol
   277  	cf.Host = host
   278  	cf.Port = port
   279  	// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
   280  	cf.Connection = fshttp.NewClient(fs.Config)
   281  
   282  	return qs.Init(cf)
   283  }
   284  
   285  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   286  	if cs < minChunkSize {
   287  		return errors.Errorf("%s is less than %s", cs, minChunkSize)
   288  	}
   289  	return nil
   290  }
   291  
   292  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   293  	err = checkUploadChunkSize(cs)
   294  	if err == nil {
   295  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   296  	}
   297  	return
   298  }
   299  
   300  func checkUploadCutoff(cs fs.SizeSuffix) error {
   301  	if cs > maxUploadCutoff {
   302  		return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
   303  	}
   304  	return nil
   305  }
   306  
   307  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   308  	err = checkUploadCutoff(cs)
   309  	if err == nil {
   310  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   311  	}
   312  	return
   313  }
   314  
   315  // setRoot changes the root of the Fs
   316  func (f *Fs) setRoot(root string) {
   317  	f.root = parsePath(root)
   318  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
   319  }
   320  
   321  // NewFs constructs an Fs from the path, bucket:path
   322  func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
   323  	// Parse config into Options struct
   324  	opt := new(Options)
   325  	err := configstruct.Set(m, opt)
   326  	if err != nil {
   327  		return nil, err
   328  	}
   329  	err = checkUploadChunkSize(opt.ChunkSize)
   330  	if err != nil {
   331  		return nil, errors.Wrap(err, "qingstor: chunk size")
   332  	}
   333  	err = checkUploadCutoff(opt.UploadCutoff)
   334  	if err != nil {
   335  		return nil, errors.Wrap(err, "qingstor: upload cutoff")
   336  	}
   337  	svc, err := qsServiceConnection(opt)
   338  	if err != nil {
   339  		return nil, err
   340  	}
   341  
   342  	if opt.Zone == "" {
   343  		opt.Zone = "pek3a"
   344  	}
   345  
   346  	f := &Fs{
   347  		name:  name,
   348  		opt:   *opt,
   349  		svc:   svc,
   350  		zone:  opt.Zone,
   351  		cache: bucket.NewCache(),
   352  	}
   353  	f.setRoot(root)
   354  	f.features = (&fs.Features{
   355  		ReadMimeType:      true,
   356  		WriteMimeType:     true,
   357  		BucketBased:       true,
   358  		BucketBasedRootOK: true,
   359  	}).Fill(f)
   360  
   361  	if f.rootBucket != "" && f.rootDirectory != "" {
   362  		// Check to see if the object exists
   363  		bucketInit, err := svc.Bucket(f.rootBucket, opt.Zone)
   364  		if err != nil {
   365  			return nil, err
   366  		}
   367  		encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
   368  		_, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{})
   369  		if err == nil {
   370  			newRoot := path.Dir(f.root)
   371  			if newRoot == "." {
   372  				newRoot = ""
   373  			}
   374  			f.setRoot(newRoot)
   375  			// return an error with an fs which points to the parent
   376  			return f, fs.ErrorIsFile
   377  		}
   378  	}
   379  	return f, nil
   380  }
   381  
   382  // Name of the remote (as passed into NewFs)
   383  func (f *Fs) Name() string {
   384  	return f.name
   385  }
   386  
   387  // Root of the remote (as passed into NewFs)
   388  func (f *Fs) Root() string {
   389  	return f.root
   390  }
   391  
   392  // String converts this Fs to a string
   393  func (f *Fs) String() string {
   394  	if f.rootBucket == "" {
   395  		return "QingStor root"
   396  	}
   397  	if f.rootDirectory == "" {
   398  		return fmt.Sprintf("QingStor bucket %s", f.rootBucket)
   399  	}
   400  	return fmt.Sprintf("QingStor bucket %s path %s", f.rootBucket, f.rootDirectory)
   401  }
   402  
   403  // Precision of the remote
   404  func (f *Fs) Precision() time.Duration {
   405  	//return time.Nanosecond
   406  	//Not supported temporary
   407  	return fs.ModTimeNotSupported
   408  }
   409  
   410  // Hashes returns the supported hash sets.
   411  func (f *Fs) Hashes() hash.Set {
   412  	return hash.Set(hash.MD5)
   413  	//return hash.HashSet(hash.HashNone)
   414  }
   415  
   416  // Features returns the optional features of this Fs
   417  func (f *Fs) Features() *fs.Features {
   418  	return f.features
   419  }
   420  
   421  // Put created a new object
   422  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   423  	fsObj := &Object{
   424  		fs:     f,
   425  		remote: src.Remote(),
   426  	}
   427  	return fsObj, fsObj.Update(ctx, in, src, options...)
   428  }
   429  
   430  // Copy src to this remote using server side copy operations.
   431  //
   432  // This is stored with the remote path given
   433  //
   434  // It returns the destination Object and a possible error
   435  //
   436  // Will only be called if src.Fs().Name() == f.Name()
   437  //
   438  // If it isn't possible then return fs.ErrorCantCopy
   439  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   440  	dstBucket, dstPath := f.split(remote)
   441  	err := f.makeBucket(ctx, dstBucket)
   442  	if err != nil {
   443  		return nil, err
   444  	}
   445  	srcObj, ok := src.(*Object)
   446  	if !ok {
   447  		fs.Debugf(src, "Can't copy - not same remote type")
   448  		return nil, fs.ErrorCantCopy
   449  	}
   450  	srcBucket, srcPath := srcObj.split()
   451  	source := path.Join("/", srcBucket, srcPath)
   452  
   453  	// fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
   454  	req := qs.PutObjectInput{
   455  		XQSCopySource: &source,
   456  	}
   457  	bucketInit, err := f.svc.Bucket(dstBucket, f.zone)
   458  
   459  	if err != nil {
   460  		return nil, err
   461  	}
   462  	_, err = bucketInit.PutObject(dstPath, &req)
   463  	if err != nil {
   464  		// fs.Debugf(f, "Copy Failed, API Error: %v", err)
   465  		return nil, err
   466  	}
   467  	return f.NewObject(ctx, remote)
   468  }
   469  
   470  // NewObject finds the Object at remote.  If it can't be found
   471  // it returns the error fs.ErrorObjectNotFound.
   472  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   473  	return f.newObjectWithInfo(remote, nil)
   474  }
   475  
   476  // Return an Object from a path
   477  //
   478  //If it can't be found it returns the error ErrorObjectNotFound.
   479  func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
   480  	o := &Object{
   481  		fs:     f,
   482  		remote: remote,
   483  	}
   484  	if info != nil {
   485  		// Set info
   486  		if info.Size != nil {
   487  			o.size = *info.Size
   488  		}
   489  
   490  		if info.Etag != nil {
   491  			o.etag = qs.StringValue(info.Etag)
   492  		}
   493  		if info.Modified == nil {
   494  			fs.Logf(o, "Failed to read last modified")
   495  			o.lastModified = time.Now()
   496  		} else {
   497  			o.lastModified = timestampToTime(int64(*info.Modified))
   498  		}
   499  
   500  		if info.MimeType != nil {
   501  			o.mimeType = qs.StringValue(info.MimeType)
   502  		}
   503  
   504  		if info.Encrypted != nil {
   505  			o.encrypted = qs.BoolValue(info.Encrypted)
   506  		}
   507  
   508  	} else {
   509  		err := o.readMetaData() // reads info and meta, returning an error
   510  		if err != nil {
   511  			return nil, err
   512  		}
   513  	}
   514  	return o, nil
   515  }
   516  
   517  // listFn is called from list to handle an object.
   518  type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
   519  
   520  // list the objects into the function supplied
   521  //
   522  // dir is the starting directory, "" for root
   523  //
   524  // Set recurse to read sub directories
   525  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
   526  	if prefix != "" {
   527  		prefix += "/"
   528  	}
   529  	if directory != "" {
   530  		directory += "/"
   531  	}
   532  	delimiter := ""
   533  	if !recurse {
   534  		delimiter = "/"
   535  	}
   536  	maxLimit := int(listLimitSize)
   537  	var marker *string
   538  	for {
   539  		bucketInit, err := f.svc.Bucket(bucket, f.zone)
   540  		if err != nil {
   541  			return err
   542  		}
   543  		req := qs.ListObjectsInput{
   544  			Delimiter: &delimiter,
   545  			Prefix:    &directory,
   546  			Limit:     &maxLimit,
   547  			Marker:    marker,
   548  		}
   549  		resp, err := bucketInit.ListObjects(&req)
   550  		if err != nil {
   551  			if e, ok := err.(*qsErr.QingStorError); ok {
   552  				if e.StatusCode == http.StatusNotFound {
   553  					err = fs.ErrorDirNotFound
   554  				}
   555  			}
   556  			return err
   557  		}
   558  		if !recurse {
   559  			for _, commonPrefix := range resp.CommonPrefixes {
   560  				if commonPrefix == nil {
   561  					fs.Logf(f, "Nil common prefix received")
   562  					continue
   563  				}
   564  				remote := *commonPrefix
   565  				remote = f.opt.Enc.ToStandardPath(remote)
   566  				if !strings.HasPrefix(remote, prefix) {
   567  					fs.Logf(f, "Odd name received %q", remote)
   568  					continue
   569  				}
   570  				remote = remote[len(prefix):]
   571  				if addBucket {
   572  					remote = path.Join(bucket, remote)
   573  				}
   574  				if strings.HasSuffix(remote, "/") {
   575  					remote = remote[:len(remote)-1]
   576  				}
   577  				err = fn(remote, &qs.KeyType{Key: &remote}, true)
   578  				if err != nil {
   579  					return err
   580  				}
   581  			}
   582  		}
   583  
   584  		for _, object := range resp.Keys {
   585  			remote := qs.StringValue(object.Key)
   586  			remote = f.opt.Enc.ToStandardPath(remote)
   587  			if !strings.HasPrefix(remote, prefix) {
   588  				fs.Logf(f, "Odd name received %q", remote)
   589  				continue
   590  			}
   591  			remote = remote[len(prefix):]
   592  			if addBucket {
   593  				remote = path.Join(bucket, remote)
   594  			}
   595  			err = fn(remote, object, false)
   596  			if err != nil {
   597  				return err
   598  			}
   599  		}
   600  		if resp.HasMore != nil && !*resp.HasMore {
   601  			break
   602  		}
   603  		// Use NextMarker if set, otherwise use last Key
   604  		if resp.NextMarker == nil || *resp.NextMarker == "" {
   605  			fs.Errorf(f, "Expecting NextMarker but didn't find one")
   606  			break
   607  		} else {
   608  			marker = resp.NextMarker
   609  		}
   610  	}
   611  	return nil
   612  }
   613  
   614  // Convert a list item into a BasicInfo
   615  func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) {
   616  	if isDirectory {
   617  		size := int64(0)
   618  		if object.Size != nil {
   619  			size = *object.Size
   620  		}
   621  		d := fs.NewDir(remote, time.Time{}).SetSize(size)
   622  		return d, nil
   623  	}
   624  	o, err := f.newObjectWithInfo(remote, object)
   625  	if err != nil {
   626  		return nil, err
   627  	}
   628  	return o, nil
   629  }
   630  
   631  // listDir lists files and directories to out
   632  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
   633  	// List the objects and directories
   634  	err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
   635  		entry, err := f.itemToDirEntry(remote, object, isDirectory)
   636  		if err != nil {
   637  			return err
   638  		}
   639  		if entry != nil {
   640  			entries = append(entries, entry)
   641  		}
   642  		return nil
   643  	})
   644  	if err != nil {
   645  		return nil, err
   646  	}
   647  	// bucket must be present if listing succeeded
   648  	f.cache.MarkOK(bucket)
   649  	return entries, nil
   650  }
   651  
   652  // listBuckets lists the buckets to out
   653  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   654  	req := qs.ListBucketsInput{
   655  		Location: &f.zone,
   656  	}
   657  	resp, err := f.svc.ListBuckets(&req)
   658  	if err != nil {
   659  		return nil, err
   660  	}
   661  
   662  	for _, bucket := range resp.Buckets {
   663  		d := fs.NewDir(f.opt.Enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
   664  		entries = append(entries, d)
   665  	}
   666  	return entries, nil
   667  }
   668  
   669  // List the objects and directories in dir into entries.  The
   670  // entries can be returned in any order but should be for a
   671  // complete directory.
   672  //
   673  // dir should be "" to list the root, and should not have
   674  // trailing slashes.
   675  //
   676  // This should return ErrDirNotFound if the directory isn't
   677  // found.
   678  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   679  	bucket, directory := f.split(dir)
   680  	if bucket == "" {
   681  		if directory != "" {
   682  			return nil, fs.ErrorListBucketRequired
   683  		}
   684  		return f.listBuckets(ctx)
   685  	}
   686  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
   687  }
   688  
   689  // ListR lists the objects and directories of the Fs starting
   690  // from dir recursively into out.
   691  //
   692  // dir should be "" to start from the root, and should not
   693  // have trailing slashes.
   694  //
   695  // This should return ErrDirNotFound if the directory isn't
   696  // found.
   697  //
   698  // It should call callback for each tranche of entries read.
   699  // These need not be returned in any particular order.  If
   700  // callback returns an error then the listing will stop
   701  // immediately.
   702  //
   703  // Don't implement this unless you have a more efficient way
   704  // of listing recursively that doing a directory traversal.
   705  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   706  	bucket, directory := f.split(dir)
   707  	list := walk.NewListRHelper(callback)
   708  	listR := func(bucket, directory, prefix string, addBucket bool) error {
   709  		return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
   710  			entry, err := f.itemToDirEntry(remote, object, isDirectory)
   711  			if err != nil {
   712  				return err
   713  			}
   714  			return list.Add(entry)
   715  		})
   716  	}
   717  	if bucket == "" {
   718  		entries, err := f.listBuckets(ctx)
   719  		if err != nil {
   720  			return err
   721  		}
   722  		for _, entry := range entries {
   723  			err = list.Add(entry)
   724  			if err != nil {
   725  				return err
   726  			}
   727  			bucket := entry.Remote()
   728  			err = listR(bucket, "", f.rootDirectory, true)
   729  			if err != nil {
   730  				return err
   731  			}
   732  			// bucket must be present if listing succeeded
   733  			f.cache.MarkOK(bucket)
   734  		}
   735  	} else {
   736  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
   737  		if err != nil {
   738  			return err
   739  		}
   740  		// bucket must be present if listing succeeded
   741  		f.cache.MarkOK(bucket)
   742  	}
   743  	return list.Flush()
   744  }
   745  
   746  // Mkdir creates the bucket if it doesn't exist
   747  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   748  	bucket, _ := f.split(dir)
   749  	return f.makeBucket(ctx, bucket)
   750  }
   751  
   752  // makeBucket creates the bucket if it doesn't exist
   753  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
   754  	return f.cache.Create(bucket, func() error {
   755  		bucketInit, err := f.svc.Bucket(bucket, f.zone)
   756  		if err != nil {
   757  			return err
   758  		}
   759  		/* When delete a bucket, qingstor need about 60 second to sync status;
   760  		So, need wait for it sync end if we try to operation a just deleted bucket
   761  		*/
   762  		wasDeleted := false
   763  		retries := 0
   764  		for retries <= 120 {
   765  			statistics, err := bucketInit.GetStatistics()
   766  			if statistics == nil || err != nil {
   767  				break
   768  			}
   769  			switch *statistics.Status {
   770  			case "deleted":
   771  				fs.Debugf(f, "Wait for qingstor bucket to be deleted, retries: %d", retries)
   772  				time.Sleep(time.Second * 1)
   773  				retries++
   774  				wasDeleted = true
   775  				continue
   776  			default:
   777  				break
   778  			}
   779  			break
   780  		}
   781  
   782  		retries = 0
   783  		for retries <= 120 {
   784  			_, err = bucketInit.Put()
   785  			if e, ok := err.(*qsErr.QingStorError); ok {
   786  				if e.StatusCode == http.StatusConflict {
   787  					if wasDeleted {
   788  						fs.Debugf(f, "Wait for qingstor bucket to be creatable, retries: %d", retries)
   789  						time.Sleep(time.Second * 1)
   790  						retries++
   791  						continue
   792  					}
   793  					err = nil
   794  				}
   795  			}
   796  			break
   797  		}
   798  		return err
   799  	}, nil)
   800  }
   801  
   802  // bucketIsEmpty check if the bucket empty
   803  func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
   804  	bucketInit, err := f.svc.Bucket(bucket, f.zone)
   805  	if err != nil {
   806  		return true, err
   807  	}
   808  
   809  	statistics, err := bucketInit.GetStatistics()
   810  	if err != nil {
   811  		return true, err
   812  	}
   813  
   814  	if *statistics.Count == 0 {
   815  		return true, nil
   816  	}
   817  	return false, nil
   818  }
   819  
   820  // Rmdir delete a bucket
   821  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   822  	bucket, directory := f.split(dir)
   823  	if bucket == "" || directory != "" {
   824  		return nil
   825  	}
   826  	isEmpty, err := f.bucketIsEmpty(bucket)
   827  	if err != nil {
   828  		return err
   829  	}
   830  	if !isEmpty {
   831  		// fs.Debugf(f, "The bucket %s you tried to delete not empty.", bucket)
   832  		return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
   833  	}
   834  	return f.cache.Remove(bucket, func() error {
   835  		// fs.Debugf(f, "Deleting the bucket %s", bucket)
   836  		bucketInit, err := f.svc.Bucket(bucket, f.zone)
   837  		if err != nil {
   838  			return err
   839  		}
   840  		retries := 0
   841  		for retries <= 10 {
   842  			_, delErr := bucketInit.Delete()
   843  			if delErr != nil {
   844  				if e, ok := delErr.(*qsErr.QingStorError); ok {
   845  					switch e.Code {
   846  					// The status of "lease" takes a few seconds to "ready" when creating a new bucket
   847  					// wait for lease status ready
   848  					case "lease_not_ready":
   849  						fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
   850  						retries++
   851  						time.Sleep(time.Second * 1)
   852  						continue
   853  					default:
   854  						err = e
   855  						break
   856  					}
   857  				}
   858  			} else {
   859  				err = delErr
   860  			}
   861  			break
   862  		}
   863  		return err
   864  	})
   865  }
   866  
   867  // cleanUpBucket removes all pending multipart uploads for a given bucket
   868  func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
   869  	fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket)
   870  	bucketInit, err := f.svc.Bucket(bucket, f.zone)
   871  	if err != nil {
   872  		return err
   873  	}
   874  	maxLimit := int(listLimitSize)
   875  	var marker *string
   876  	for {
   877  		req := qs.ListMultipartUploadsInput{
   878  			Limit:     &maxLimit,
   879  			KeyMarker: marker,
   880  		}
   881  		var resp *qs.ListMultipartUploadsOutput
   882  		resp, err = bucketInit.ListMultipartUploads(&req)
   883  		if err != nil {
   884  			return errors.Wrap(err, "clean up bucket list multipart uploads")
   885  		}
   886  		for _, upload := range resp.Uploads {
   887  			if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
   888  				age := time.Since(*upload.Created)
   889  				if age > 24*time.Hour {
   890  					fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
   891  					req := qs.AbortMultipartUploadInput{
   892  						UploadID: upload.UploadID,
   893  					}
   894  					_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
   895  					if abortErr != nil {
   896  						err = errors.Wrapf(abortErr, "failed to remove multipart upload for %q", *upload.Key)
   897  						fs.Errorf(f, "%v", err)
   898  					}
   899  				} else {
   900  					fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
   901  				}
   902  			}
   903  		}
   904  		if resp.HasMore != nil && !*resp.HasMore {
   905  			break
   906  		}
   907  		// Use NextMarker if set, otherwise use last Key
   908  		if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" {
   909  			fs.Errorf(f, "Expecting NextKeyMarker but didn't find one")
   910  			break
   911  		} else {
   912  			marker = resp.NextKeyMarker
   913  		}
   914  	}
   915  	return err
   916  }
   917  
   918  // CleanUp removes all pending multipart uploads
   919  func (f *Fs) CleanUp(ctx context.Context) (err error) {
   920  	if f.rootBucket != "" {
   921  		return f.cleanUpBucket(ctx, f.rootBucket)
   922  	}
   923  	entries, err := f.listBuckets(ctx)
   924  	if err != nil {
   925  		return err
   926  	}
   927  	for _, entry := range entries {
   928  		cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
   929  		if err != nil {
   930  			fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
   931  			err = cleanErr
   932  		}
   933  	}
   934  	return err
   935  }
   936  
   937  // readMetaData gets the metadata if it hasn't already been fetched
   938  //
   939  // it also sets the info
   940  func (o *Object) readMetaData() (err error) {
   941  	bucket, bucketPath := o.split()
   942  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
   943  	if err != nil {
   944  		return err
   945  	}
   946  	// fs.Debugf(o, "Read metadata of key: %s", key)
   947  	resp, err := bucketInit.HeadObject(bucketPath, &qs.HeadObjectInput{})
   948  	if err != nil {
   949  		// fs.Debugf(o, "Read metadata failed, API Error: %v", err)
   950  		if e, ok := err.(*qsErr.QingStorError); ok {
   951  			if e.StatusCode == http.StatusNotFound {
   952  				return fs.ErrorObjectNotFound
   953  			}
   954  		}
   955  		return err
   956  	}
   957  	// Ignore missing Content-Length assuming it is 0
   958  	if resp.ContentLength != nil {
   959  		o.size = *resp.ContentLength
   960  	}
   961  
   962  	if resp.ETag != nil {
   963  		o.etag = qs.StringValue(resp.ETag)
   964  	}
   965  
   966  	if resp.LastModified == nil {
   967  		fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
   968  		o.lastModified = time.Now()
   969  	} else {
   970  		o.lastModified = *resp.LastModified
   971  	}
   972  
   973  	if resp.ContentType != nil {
   974  		o.mimeType = qs.StringValue(resp.ContentType)
   975  	}
   976  
   977  	if resp.XQSEncryptionCustomerAlgorithm != nil {
   978  		o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm)
   979  		o.encrypted = true
   980  	}
   981  
   982  	return nil
   983  }
   984  
   985  // ModTime returns the modification date of the file
   986  // It should return a best guess if one isn't available
   987  func (o *Object) ModTime(ctx context.Context) time.Time {
   988  	err := o.readMetaData()
   989  	if err != nil {
   990  		fs.Logf(o, "Failed to read metadata, %v", err)
   991  		return time.Now()
   992  	}
   993  	modTime := o.lastModified
   994  	return modTime
   995  }
   996  
   997  // SetModTime sets the modification time of the local fs object
   998  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
   999  	err := o.readMetaData()
  1000  	if err != nil {
  1001  		return err
  1002  	}
  1003  	o.lastModified = modTime
  1004  	mimeType := fs.MimeType(ctx, o)
  1005  
  1006  	if o.size >= maxSizeForCopy {
  1007  		fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
  1008  		return nil
  1009  	}
  1010  	// Copy the object to itself to update the metadata
  1011  	bucket, bucketPath := o.split()
  1012  	sourceKey := path.Join("/", bucket, bucketPath)
  1013  
  1014  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
  1015  	if err != nil {
  1016  		return err
  1017  	}
  1018  
  1019  	req := qs.PutObjectInput{
  1020  		XQSCopySource: &sourceKey,
  1021  		ContentType:   &mimeType,
  1022  	}
  1023  	_, err = bucketInit.PutObject(bucketPath, &req)
  1024  
  1025  	return err
  1026  }
  1027  
  1028  // Open opens the file for read.  Call Close() on the returned io.ReadCloser
  1029  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
  1030  	bucket, bucketPath := o.split()
  1031  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
  1032  	if err != nil {
  1033  		return nil, err
  1034  	}
  1035  
  1036  	req := qs.GetObjectInput{}
  1037  	fs.FixRangeOption(options, o.size)
  1038  	for _, option := range options {
  1039  		switch option.(type) {
  1040  		case *fs.RangeOption, *fs.SeekOption:
  1041  			_, value := option.Header()
  1042  			req.Range = &value
  1043  		default:
  1044  			if option.Mandatory() {
  1045  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  1046  			}
  1047  		}
  1048  	}
  1049  	resp, err := bucketInit.GetObject(bucketPath, &req)
  1050  	if err != nil {
  1051  		return nil, err
  1052  	}
  1053  	return resp.Body, nil
  1054  }
  1055  
  1056  // Update in to the object
  1057  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1058  	// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
  1059  	bucket, bucketPath := o.split()
  1060  	err := o.fs.makeBucket(ctx, bucket)
  1061  	if err != nil {
  1062  		return err
  1063  	}
  1064  
  1065  	// Guess the content type
  1066  	mimeType := fs.MimeType(ctx, src)
  1067  
  1068  	req := uploadInput{
  1069  		body:        in,
  1070  		qsSvc:       o.fs.svc,
  1071  		bucket:      bucket,
  1072  		zone:        o.fs.zone,
  1073  		key:         bucketPath,
  1074  		mimeType:    mimeType,
  1075  		partSize:    int64(o.fs.opt.ChunkSize),
  1076  		concurrency: o.fs.opt.UploadConcurrency,
  1077  	}
  1078  	uploader := newUploader(&req)
  1079  
  1080  	size := src.Size()
  1081  	multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
  1082  	if multipart {
  1083  		err = uploader.upload()
  1084  	} else {
  1085  		err = uploader.singlePartUpload(in, size)
  1086  	}
  1087  	if err != nil {
  1088  		return err
  1089  	}
  1090  	// Read Metadata of object
  1091  	err = o.readMetaData()
  1092  	return err
  1093  }
  1094  
  1095  // Remove this object
  1096  func (o *Object) Remove(ctx context.Context) error {
  1097  	bucket, bucketPath := o.split()
  1098  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
  1099  	if err != nil {
  1100  		return err
  1101  	}
  1102  	_, err = bucketInit.DeleteObject(bucketPath)
  1103  	return err
  1104  }
  1105  
  1106  // Fs returns read only access to the Fs that this object is part of
  1107  func (o *Object) Fs() fs.Info {
  1108  	return o.fs
  1109  }
  1110  
  1111  var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
  1112  
  1113  // Hash returns the selected checksum of the file
  1114  // If no checksum is available it returns ""
  1115  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1116  	if t != hash.MD5 {
  1117  		return "", hash.ErrUnsupported
  1118  	}
  1119  	etag := strings.Trim(strings.ToLower(o.etag), `"`)
  1120  	// Check the etag is a valid md5sum
  1121  	if !matchMd5.MatchString(etag) {
  1122  		fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
  1123  		return "", nil
  1124  	}
  1125  	return etag, nil
  1126  }
  1127  
  1128  // Storable says whether this object can be stored
  1129  func (o *Object) Storable() bool {
  1130  	return true
  1131  }
  1132  
  1133  // String returns a description of the Object
  1134  func (o *Object) String() string {
  1135  	if o == nil {
  1136  		return "<nil>"
  1137  	}
  1138  	return o.remote
  1139  }
  1140  
  1141  // Remote returns the remote path
  1142  func (o *Object) Remote() string {
  1143  	return o.remote
  1144  }
  1145  
  1146  // Size returns the size of the file
  1147  func (o *Object) Size() int64 {
  1148  	return o.size
  1149  }
  1150  
  1151  // MimeType of an Object if known, "" otherwise
  1152  func (o *Object) MimeType(ctx context.Context) string {
  1153  	err := o.readMetaData()
  1154  	if err != nil {
  1155  		fs.Logf(o, "Failed to read metadata: %v", err)
  1156  		return ""
  1157  	}
  1158  	return o.mimeType
  1159  }
  1160  
  1161  // Check the interfaces are satisfied
  1162  var (
  1163  	_ fs.Fs         = &Fs{}
  1164  	_ fs.CleanUpper = &Fs{}
  1165  	_ fs.Copier     = &Fs{}
  1166  	_ fs.Object     = &Object{}
  1167  	_ fs.ListRer    = &Fs{}
  1168  	_ fs.MimeTyper  = &Object{}
  1169  )