github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/qingstor/qingstor.go (about)

     1  //go:build !plan9 && !js
     2  
     3  // Package qingstor provides an interface to QingStor object storage
     4  // Home: https://www.qingcloud.com/
     5  package qingstor
     6  
     7  import (
     8  	"context"
     9  	"errors"
    10  	"fmt"
    11  	"io"
    12  	"net/http"
    13  	"path"
    14  	"regexp"
    15  	"strconv"
    16  	"strings"
    17  	"time"
    18  
    19  	"github.com/rclone/rclone/fs"
    20  	"github.com/rclone/rclone/fs/config"
    21  	"github.com/rclone/rclone/fs/config/configmap"
    22  	"github.com/rclone/rclone/fs/config/configstruct"
    23  	"github.com/rclone/rclone/fs/fshttp"
    24  	"github.com/rclone/rclone/fs/hash"
    25  	"github.com/rclone/rclone/fs/walk"
    26  	"github.com/rclone/rclone/lib/bucket"
    27  	"github.com/rclone/rclone/lib/encoder"
    28  	qsConfig "github.com/yunify/qingstor-sdk-go/v3/config"
    29  	qsErr "github.com/yunify/qingstor-sdk-go/v3/request/errors"
    30  	qs "github.com/yunify/qingstor-sdk-go/v3/service"
    31  )
    32  
    33  // Register with Fs
    34  func init() {
    35  	fs.Register(&fs.RegInfo{
    36  		Name:        "qingstor",
    37  		Description: "QingCloud Object Storage",
    38  		NewFs:       NewFs,
    39  		Options: []fs.Option{{
    40  			Name:    "env_auth",
    41  			Help:    "Get QingStor credentials from runtime.\n\nOnly applies if access_key_id and secret_access_key is blank.",
    42  			Default: false,
    43  			Examples: []fs.OptionExample{{
    44  				Value: "false",
    45  				Help:  "Enter QingStor credentials in the next step.",
    46  			}, {
    47  				Value: "true",
    48  				Help:  "Get QingStor credentials from the environment (env vars or IAM).",
    49  			}},
    50  		}, {
    51  			Name:      "access_key_id",
    52  			Help:      "QingStor Access Key ID.\n\nLeave blank for anonymous access or runtime credentials.",
    53  			Sensitive: true,
    54  		}, {
    55  			Name:      "secret_access_key",
    56  			Help:      "QingStor Secret Access Key (password).\n\nLeave blank for anonymous access or runtime credentials.",
    57  			Sensitive: true,
    58  		}, {
    59  			Name: "endpoint",
    60  			Help: "Enter an endpoint URL to connection QingStor API.\n\nLeave blank will use the default value \"https://qingstor.com:443\".",
    61  		}, {
    62  			Name: "zone",
    63  			Help: "Zone to connect to.\n\nDefault is \"pek3a\".",
    64  			Examples: []fs.OptionExample{{
    65  				Value: "pek3a",
    66  				Help:  "The Beijing (China) Three Zone.\nNeeds location constraint pek3a.",
    67  			}, {
    68  				Value: "sh1a",
    69  				Help:  "The Shanghai (China) First Zone.\nNeeds location constraint sh1a.",
    70  			}, {
    71  				Value: "gd2a",
    72  				Help:  "The Guangdong (China) Second Zone.\nNeeds location constraint gd2a.",
    73  			}},
    74  		}, {
    75  			Name:     "connection_retries",
    76  			Help:     "Number of connection retries.",
    77  			Default:  3,
    78  			Advanced: true,
    79  		}, {
    80  			Name: "upload_cutoff",
    81  			Help: `Cutoff for switching to chunked upload.
    82  
    83  Any files larger than this will be uploaded in chunks of chunk_size.
    84  The minimum is 0 and the maximum is 5 GiB.`,
    85  			Default:  defaultUploadCutoff,
    86  			Advanced: true,
    87  		}, {
    88  			Name: "chunk_size",
    89  			Help: `Chunk size to use for uploading.
    90  
    91  When uploading files larger than upload_cutoff they will be uploaded
    92  as multipart uploads using this chunk size.
    93  
    94  Note that "--qingstor-upload-concurrency" chunks of this size are buffered
    95  in memory per transfer.
    96  
    97  If you are transferring large files over high-speed links and you have
    98  enough memory, then increasing this will speed up the transfers.`,
    99  			Default:  minChunkSize,
   100  			Advanced: true,
   101  		}, {
   102  			Name: "upload_concurrency",
   103  			Help: `Concurrency for multipart uploads.
   104  
   105  This is the number of chunks of the same file that are uploaded
   106  concurrently.
   107  
   108  NB if you set this to > 1 then the checksums of multipart uploads
   109  become corrupted (the uploads themselves are not corrupted though).
   110  
   111  If you are uploading small numbers of large files over high-speed links
   112  and these uploads do not fully utilize your bandwidth, then increasing
   113  this may help to speed up the transfers.`,
   114  			Default:  1,
   115  			Advanced: true,
   116  		}, {
   117  			Name:     config.ConfigEncoding,
   118  			Help:     config.ConfigEncodingHelp,
   119  			Advanced: true,
   120  			Default: (encoder.EncodeInvalidUtf8 |
   121  				encoder.EncodeCtl |
   122  				encoder.EncodeSlash),
   123  		}},
   124  	})
   125  }
   126  
   127  // Constants
   128  const (
   129  	listLimitSize       = 1000                   // Number of items to read at once
   130  	maxSizeForCopy      = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY
   131  	minChunkSize        = fs.SizeSuffix(minMultiPartSize)
   132  	defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
   133  	maxUploadCutoff     = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
   134  )
   135  
   136  // Globals
   137  func timestampToTime(tp int64) time.Time {
   138  	timeLayout := time.RFC3339Nano
   139  	ts := time.Unix(tp, 0).Format(timeLayout)
   140  	tm, _ := time.Parse(timeLayout, ts)
   141  	return tm.UTC()
   142  }
   143  
   144  // Options defines the configuration for this backend
   145  type Options struct {
   146  	EnvAuth           bool                 `config:"env_auth"`
   147  	AccessKeyID       string               `config:"access_key_id"`
   148  	SecretAccessKey   string               `config:"secret_access_key"`
   149  	Endpoint          string               `config:"endpoint"`
   150  	Zone              string               `config:"zone"`
   151  	ConnectionRetries int                  `config:"connection_retries"`
   152  	UploadCutoff      fs.SizeSuffix        `config:"upload_cutoff"`
   153  	ChunkSize         fs.SizeSuffix        `config:"chunk_size"`
   154  	UploadConcurrency int                  `config:"upload_concurrency"`
   155  	Enc               encoder.MultiEncoder `config:"encoding"`
   156  }
   157  
   158  // Fs represents a remote qingstor server
   159  type Fs struct {
   160  	name          string        // The name of the remote
   161  	root          string        // The root is a subdir, is a special object
   162  	opt           Options       // parsed options
   163  	features      *fs.Features  // optional features
   164  	svc           *qs.Service   // The connection to the qingstor server
   165  	zone          string        // The zone we are working on
   166  	rootBucket    string        // bucket part of root (if any)
   167  	rootDirectory string        // directory part of root (if any)
   168  	cache         *bucket.Cache // cache for bucket creation status
   169  }
   170  
   171  // Object describes a qingstor object
   172  type Object struct {
   173  	// Will definitely have everything but meta which may be nil
   174  	//
   175  	// List will read everything but meta & mimeType - to fill
   176  	// that in you need to call readMetaData
   177  	fs           *Fs       // what this object is part of
   178  	remote       string    // object of remote
   179  	etag         string    // md5sum of the object
   180  	size         int64     // length of the object content
   181  	mimeType     string    // ContentType of object - may be ""
   182  	lastModified time.Time // Last modified
   183  	encrypted    bool      // whether the object is encryption
   184  	algo         string    // Custom encryption algorithms
   185  }
   186  
   187  // ------------------------------------------------------------
   188  
   189  // parsePath parses a remote 'url'
   190  func parsePath(path string) (root string) {
   191  	root = strings.Trim(path, "/")
   192  	return
   193  }
   194  
   195  // split returns bucket and bucketPath from the rootRelativePath
   196  // relative to f.root
   197  func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
   198  	bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
   199  	return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
   200  }
   201  
   202  // split returns bucket and bucketPath from the object
   203  func (o *Object) split() (bucket, bucketPath string) {
   204  	return o.fs.split(o.remote)
   205  }
   206  
   207  // Split a URL into three parts: protocol host and port
   208  func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) {
   209  	/*
   210  	  Pattern to match an endpoint,
   211  	  e.g.: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443
   212  	      "http(s)//qingstor.com"      --> "http(s)", "qingstor.com", ""
   213  	      "qingstor.com"               --> "", "qingstor.com", ""
   214  	*/
   215  	defer func() {
   216  		if r := recover(); r != nil {
   217  			switch x := r.(type) {
   218  			case error:
   219  				err = x
   220  			default:
   221  				err = nil
   222  			}
   223  		}
   224  	}()
   225  	var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`)
   226  	parts := mather.FindStringSubmatch(endpoint)
   227  	protocol, host, port = parts[1], parts[2], parts[3]
   228  	return
   229  }
   230  
   231  // qsConnection makes a connection to qingstor
   232  func qsServiceConnection(ctx context.Context, opt *Options) (*qs.Service, error) {
   233  	accessKeyID := opt.AccessKeyID
   234  	secretAccessKey := opt.SecretAccessKey
   235  
   236  	switch {
   237  	case opt.EnvAuth:
   238  		// No need for empty checks if "env_auth" is true
   239  	case accessKeyID == "" && secretAccessKey == "":
   240  		// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
   241  	case accessKeyID == "":
   242  		return nil, errors.New("access_key_id not found")
   243  	case secretAccessKey == "":
   244  		return nil, errors.New("secret_access_key not found")
   245  	}
   246  
   247  	protocol := "https"
   248  	host := "qingstor.com"
   249  	port := 443
   250  
   251  	endpoint := opt.Endpoint
   252  	if endpoint != "" {
   253  		_protocol, _host, _port, err := qsParseEndpoint(endpoint)
   254  
   255  		if err != nil {
   256  			return nil, fmt.Errorf("the endpoint \"%s\" format error", endpoint)
   257  		}
   258  
   259  		if _protocol != "" {
   260  			protocol = _protocol
   261  		}
   262  		host = _host
   263  		if _port != "" {
   264  			port, _ = strconv.Atoi(_port)
   265  		} else if protocol == "http" {
   266  			port = 80
   267  		}
   268  
   269  	}
   270  
   271  	cf, err := qsConfig.NewDefault()
   272  	if err != nil {
   273  		return nil, err
   274  	}
   275  	cf.AccessKeyID = accessKeyID
   276  	cf.SecretAccessKey = secretAccessKey
   277  	cf.Protocol = protocol
   278  	cf.Host = host
   279  	cf.Port = port
   280  	// unsupported in v3.1: cf.ConnectionRetries = opt.ConnectionRetries
   281  	cf.Connection = fshttp.NewClient(ctx)
   282  
   283  	return qs.Init(cf)
   284  }
   285  
   286  func checkUploadChunkSize(cs fs.SizeSuffix) error {
   287  	if cs < minChunkSize {
   288  		return fmt.Errorf("%s is less than %s", cs, minChunkSize)
   289  	}
   290  	return nil
   291  }
   292  
   293  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   294  	err = checkUploadChunkSize(cs)
   295  	if err == nil {
   296  		old, f.opt.ChunkSize = f.opt.ChunkSize, cs
   297  	}
   298  	return
   299  }
   300  
   301  func checkUploadCutoff(cs fs.SizeSuffix) error {
   302  	if cs > maxUploadCutoff {
   303  		return fmt.Errorf("%s is greater than %s", cs, maxUploadCutoff)
   304  	}
   305  	return nil
   306  }
   307  
   308  func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
   309  	err = checkUploadCutoff(cs)
   310  	if err == nil {
   311  		old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
   312  	}
   313  	return
   314  }
   315  
   316  // setRoot changes the root of the Fs
   317  func (f *Fs) setRoot(root string) {
   318  	f.root = parsePath(root)
   319  	f.rootBucket, f.rootDirectory = bucket.Split(f.root)
   320  }
   321  
   322  // NewFs constructs an Fs from the path, bucket:path
   323  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
   324  	// Parse config into Options struct
   325  	opt := new(Options)
   326  	err := configstruct.Set(m, opt)
   327  	if err != nil {
   328  		return nil, err
   329  	}
   330  	err = checkUploadChunkSize(opt.ChunkSize)
   331  	if err != nil {
   332  		return nil, fmt.Errorf("qingstor: chunk size: %w", err)
   333  	}
   334  	err = checkUploadCutoff(opt.UploadCutoff)
   335  	if err != nil {
   336  		return nil, fmt.Errorf("qingstor: upload cutoff: %w", err)
   337  	}
   338  	svc, err := qsServiceConnection(ctx, opt)
   339  	if err != nil {
   340  		return nil, err
   341  	}
   342  
   343  	if opt.Zone == "" {
   344  		opt.Zone = "pek3a"
   345  	}
   346  
   347  	f := &Fs{
   348  		name:  name,
   349  		opt:   *opt,
   350  		svc:   svc,
   351  		zone:  opt.Zone,
   352  		cache: bucket.NewCache(),
   353  	}
   354  	f.setRoot(root)
   355  	f.features = (&fs.Features{
   356  		ReadMimeType:      true,
   357  		WriteMimeType:     true,
   358  		BucketBased:       true,
   359  		BucketBasedRootOK: true,
   360  		SlowModTime:       true,
   361  	}).Fill(ctx, f)
   362  
   363  	if f.rootBucket != "" && f.rootDirectory != "" {
   364  		// Check to see if the object exists
   365  		bucketInit, err := svc.Bucket(f.rootBucket, opt.Zone)
   366  		if err != nil {
   367  			return nil, err
   368  		}
   369  		encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
   370  		_, err = bucketInit.HeadObject(encodedDirectory, &qs.HeadObjectInput{})
   371  		if err == nil {
   372  			newRoot := path.Dir(f.root)
   373  			if newRoot == "." {
   374  				newRoot = ""
   375  			}
   376  			f.setRoot(newRoot)
   377  			// return an error with an fs which points to the parent
   378  			return f, fs.ErrorIsFile
   379  		}
   380  	}
   381  	return f, nil
   382  }
   383  
   384  // Name of the remote (as passed into NewFs)
   385  func (f *Fs) Name() string {
   386  	return f.name
   387  }
   388  
   389  // Root of the remote (as passed into NewFs)
   390  func (f *Fs) Root() string {
   391  	return f.root
   392  }
   393  
   394  // String converts this Fs to a string
   395  func (f *Fs) String() string {
   396  	if f.rootBucket == "" {
   397  		return "QingStor root"
   398  	}
   399  	if f.rootDirectory == "" {
   400  		return fmt.Sprintf("QingStor bucket %s", f.rootBucket)
   401  	}
   402  	return fmt.Sprintf("QingStor bucket %s path %s", f.rootBucket, f.rootDirectory)
   403  }
   404  
   405  // Precision of the remote
   406  func (f *Fs) Precision() time.Duration {
   407  	//return time.Nanosecond
   408  	//Not supported temporary
   409  	return fs.ModTimeNotSupported
   410  }
   411  
   412  // Hashes returns the supported hash sets.
   413  func (f *Fs) Hashes() hash.Set {
   414  	return hash.Set(hash.MD5)
   415  	//return hash.HashSet(hash.HashNone)
   416  }
   417  
   418  // Features returns the optional features of this Fs
   419  func (f *Fs) Features() *fs.Features {
   420  	return f.features
   421  }
   422  
   423  // Put created a new object
   424  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   425  	fsObj := &Object{
   426  		fs:     f,
   427  		remote: src.Remote(),
   428  	}
   429  	return fsObj, fsObj.Update(ctx, in, src, options...)
   430  }
   431  
   432  // Copy src to this remote using server-side copy operations.
   433  //
   434  // This is stored with the remote path given.
   435  //
   436  // It returns the destination Object and a possible error.
   437  //
   438  // Will only be called if src.Fs().Name() == f.Name()
   439  //
   440  // If it isn't possible then return fs.ErrorCantCopy
   441  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
   442  	dstBucket, dstPath := f.split(remote)
   443  	err := f.makeBucket(ctx, dstBucket)
   444  	if err != nil {
   445  		return nil, err
   446  	}
   447  	srcObj, ok := src.(*Object)
   448  	if !ok {
   449  		fs.Debugf(src, "Can't copy - not same remote type")
   450  		return nil, fs.ErrorCantCopy
   451  	}
   452  	srcBucket, srcPath := srcObj.split()
   453  	source := path.Join("/", srcBucket, srcPath)
   454  
   455  	// fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key)
   456  	req := qs.PutObjectInput{
   457  		XQSCopySource: &source,
   458  	}
   459  	bucketInit, err := f.svc.Bucket(dstBucket, f.zone)
   460  
   461  	if err != nil {
   462  		return nil, err
   463  	}
   464  	_, err = bucketInit.PutObject(dstPath, &req)
   465  	if err != nil {
   466  		// fs.Debugf(f, "Copy Failed, API Error: %v", err)
   467  		return nil, err
   468  	}
   469  	return f.NewObject(ctx, remote)
   470  }
   471  
   472  // NewObject finds the Object at remote.  If it can't be found
   473  // it returns the error fs.ErrorObjectNotFound.
   474  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   475  	return f.newObjectWithInfo(remote, nil)
   476  }
   477  
   478  // Return an Object from a path
   479  //
   480  // If it can't be found it returns the error ErrorObjectNotFound.
   481  func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) {
   482  	o := &Object{
   483  		fs:     f,
   484  		remote: remote,
   485  	}
   486  	if info != nil {
   487  		// Set info
   488  		if info.Size != nil {
   489  			o.size = *info.Size
   490  		}
   491  
   492  		if info.Etag != nil {
   493  			o.etag = qs.StringValue(info.Etag)
   494  		}
   495  		if info.Modified == nil {
   496  			fs.Logf(o, "Failed to read last modified")
   497  			o.lastModified = time.Now()
   498  		} else {
   499  			o.lastModified = timestampToTime(int64(*info.Modified))
   500  		}
   501  
   502  		if info.MimeType != nil {
   503  			o.mimeType = qs.StringValue(info.MimeType)
   504  		}
   505  
   506  		if info.Encrypted != nil {
   507  			o.encrypted = qs.BoolValue(info.Encrypted)
   508  		}
   509  
   510  	} else {
   511  		err := o.readMetaData() // reads info and meta, returning an error
   512  		if err != nil {
   513  			return nil, err
   514  		}
   515  	}
   516  	return o, nil
   517  }
   518  
   519  // listFn is called from list to handle an object.
   520  type listFn func(remote string, object *qs.KeyType, isDirectory bool) error
   521  
   522  // list the objects into the function supplied
   523  //
   524  // dir is the starting directory, "" for root
   525  //
   526  // Set recurse to read sub directories
   527  func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error {
   528  	if prefix != "" {
   529  		prefix += "/"
   530  	}
   531  	if directory != "" {
   532  		directory += "/"
   533  	}
   534  	delimiter := ""
   535  	if !recurse {
   536  		delimiter = "/"
   537  	}
   538  	maxLimit := int(listLimitSize)
   539  	var marker *string
   540  	for {
   541  		bucketInit, err := f.svc.Bucket(bucket, f.zone)
   542  		if err != nil {
   543  			return err
   544  		}
   545  		req := qs.ListObjectsInput{
   546  			Delimiter: &delimiter,
   547  			Prefix:    &directory,
   548  			Limit:     &maxLimit,
   549  			Marker:    marker,
   550  		}
   551  		resp, err := bucketInit.ListObjects(&req)
   552  		if err != nil {
   553  			if e, ok := err.(*qsErr.QingStorError); ok {
   554  				if e.StatusCode == http.StatusNotFound {
   555  					err = fs.ErrorDirNotFound
   556  				}
   557  			}
   558  			return err
   559  		}
   560  		if !recurse {
   561  			for _, commonPrefix := range resp.CommonPrefixes {
   562  				if commonPrefix == nil {
   563  					fs.Logf(f, "Nil common prefix received")
   564  					continue
   565  				}
   566  				remote := *commonPrefix
   567  				remote = f.opt.Enc.ToStandardPath(remote)
   568  				if !strings.HasPrefix(remote, prefix) {
   569  					fs.Logf(f, "Odd name received %q", remote)
   570  					continue
   571  				}
   572  				remote = remote[len(prefix):]
   573  				if addBucket {
   574  					remote = path.Join(bucket, remote)
   575  				}
   576  				remote = strings.TrimSuffix(remote, "/")
   577  				err = fn(remote, &qs.KeyType{Key: &remote}, true)
   578  				if err != nil {
   579  					return err
   580  				}
   581  			}
   582  		}
   583  
   584  		for _, object := range resp.Keys {
   585  			remote := qs.StringValue(object.Key)
   586  			remote = f.opt.Enc.ToStandardPath(remote)
   587  			if !strings.HasPrefix(remote, prefix) {
   588  				fs.Logf(f, "Odd name received %q", remote)
   589  				continue
   590  			}
   591  			remote = remote[len(prefix):]
   592  			if addBucket {
   593  				remote = path.Join(bucket, remote)
   594  			}
   595  			err = fn(remote, object, false)
   596  			if err != nil {
   597  				return err
   598  			}
   599  		}
   600  		if resp.HasMore != nil && !*resp.HasMore {
   601  			break
   602  		}
   603  		// Use NextMarker if set, otherwise use last Key
   604  		if resp.NextMarker == nil || *resp.NextMarker == "" {
   605  			fs.Errorf(f, "Expecting NextMarker but didn't find one")
   606  			break
   607  		} else {
   608  			marker = resp.NextMarker
   609  		}
   610  	}
   611  	return nil
   612  }
   613  
   614  // Convert a list item into a BasicInfo
   615  func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) {
   616  	if isDirectory {
   617  		size := int64(0)
   618  		if object.Size != nil {
   619  			size = *object.Size
   620  		}
   621  		d := fs.NewDir(remote, time.Time{}).SetSize(size)
   622  		return d, nil
   623  	}
   624  	o, err := f.newObjectWithInfo(remote, object)
   625  	if err != nil {
   626  		return nil, err
   627  	}
   628  	return o, nil
   629  }
   630  
   631  // listDir lists files and directories to out
   632  func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
   633  	// List the objects and directories
   634  	err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *qs.KeyType, isDirectory bool) error {
   635  		entry, err := f.itemToDirEntry(remote, object, isDirectory)
   636  		if err != nil {
   637  			return err
   638  		}
   639  		if entry != nil {
   640  			entries = append(entries, entry)
   641  		}
   642  		return nil
   643  	})
   644  	if err != nil {
   645  		return nil, err
   646  	}
   647  	// bucket must be present if listing succeeded
   648  	f.cache.MarkOK(bucket)
   649  	return entries, nil
   650  }
   651  
   652  // listBuckets lists the buckets to out
   653  func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
   654  	req := qs.ListBucketsInput{
   655  		Location: &f.zone,
   656  	}
   657  	resp, err := f.svc.ListBuckets(&req)
   658  	if err != nil {
   659  		return nil, err
   660  	}
   661  
   662  	for _, bucket := range resp.Buckets {
   663  		d := fs.NewDir(f.opt.Enc.ToStandardName(qs.StringValue(bucket.Name)), qs.TimeValue(bucket.Created))
   664  		entries = append(entries, d)
   665  	}
   666  	return entries, nil
   667  }
   668  
   669  // List the objects and directories in dir into entries.  The
   670  // entries can be returned in any order but should be for a
   671  // complete directory.
   672  //
   673  // dir should be "" to list the root, and should not have
   674  // trailing slashes.
   675  //
   676  // This should return ErrDirNotFound if the directory isn't
   677  // found.
   678  func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
   679  	bucket, directory := f.split(dir)
   680  	if bucket == "" {
   681  		if directory != "" {
   682  			return nil, fs.ErrorListBucketRequired
   683  		}
   684  		return f.listBuckets(ctx)
   685  	}
   686  	return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
   687  }
   688  
   689  // ListR lists the objects and directories of the Fs starting
   690  // from dir recursively into out.
   691  //
   692  // dir should be "" to start from the root, and should not
   693  // have trailing slashes.
   694  //
   695  // This should return ErrDirNotFound if the directory isn't
   696  // found.
   697  //
   698  // It should call callback for each tranche of entries read.
   699  // These need not be returned in any particular order.  If
   700  // callback returns an error then the listing will stop
   701  // immediately.
   702  //
   703  // Don't implement this unless you have a more efficient way
   704  // of listing recursively that doing a directory traversal.
   705  func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
   706  	bucket, directory := f.split(dir)
   707  	list := walk.NewListRHelper(callback)
   708  	listR := func(bucket, directory, prefix string, addBucket bool) error {
   709  		return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *qs.KeyType, isDirectory bool) error {
   710  			entry, err := f.itemToDirEntry(remote, object, isDirectory)
   711  			if err != nil {
   712  				return err
   713  			}
   714  			return list.Add(entry)
   715  		})
   716  	}
   717  	if bucket == "" {
   718  		entries, err := f.listBuckets(ctx)
   719  		if err != nil {
   720  			return err
   721  		}
   722  		for _, entry := range entries {
   723  			err = list.Add(entry)
   724  			if err != nil {
   725  				return err
   726  			}
   727  			bucket := entry.Remote()
   728  			err = listR(bucket, "", f.rootDirectory, true)
   729  			if err != nil {
   730  				return err
   731  			}
   732  			// bucket must be present if listing succeeded
   733  			f.cache.MarkOK(bucket)
   734  		}
   735  	} else {
   736  		err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
   737  		if err != nil {
   738  			return err
   739  		}
   740  		// bucket must be present if listing succeeded
   741  		f.cache.MarkOK(bucket)
   742  	}
   743  	return list.Flush()
   744  }
   745  
   746  // Mkdir creates the bucket if it doesn't exist
   747  func (f *Fs) Mkdir(ctx context.Context, dir string) error {
   748  	bucket, _ := f.split(dir)
   749  	return f.makeBucket(ctx, bucket)
   750  }
   751  
   752  // makeBucket creates the bucket if it doesn't exist
   753  func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
   754  	return f.cache.Create(bucket, func() error {
   755  		bucketInit, err := f.svc.Bucket(bucket, f.zone)
   756  		if err != nil {
   757  			return err
   758  		}
   759  		/* When delete a bucket, qingstor need about 60 second to sync status;
   760  		So, need wait for it sync end if we try to operation a just deleted bucket
   761  		*/
   762  		wasDeleted := false
   763  		retries := 0
   764  		for retries <= 120 {
   765  			statistics, err := bucketInit.GetStatistics()
   766  			if statistics == nil || err != nil {
   767  				break
   768  			}
   769  			switch *statistics.Status {
   770  			case "deleted":
   771  				fs.Debugf(f, "Wait for qingstor bucket to be deleted, retries: %d", retries)
   772  				time.Sleep(time.Second * 1)
   773  				retries++
   774  				wasDeleted = true
   775  				continue
   776  			}
   777  			break
   778  		}
   779  
   780  		retries = 0
   781  		for retries <= 120 {
   782  			_, err = bucketInit.Put()
   783  			if e, ok := err.(*qsErr.QingStorError); ok {
   784  				if e.StatusCode == http.StatusConflict {
   785  					if wasDeleted {
   786  						fs.Debugf(f, "Wait for qingstor bucket to be creatable, retries: %d", retries)
   787  						time.Sleep(time.Second * 1)
   788  						retries++
   789  						continue
   790  					}
   791  					err = nil
   792  				}
   793  			}
   794  			break
   795  		}
   796  		return err
   797  	}, nil)
   798  }
   799  
   800  // bucketIsEmpty check if the bucket empty
   801  func (f *Fs) bucketIsEmpty(bucket string) (bool, error) {
   802  	bucketInit, err := f.svc.Bucket(bucket, f.zone)
   803  	if err != nil {
   804  		return true, err
   805  	}
   806  
   807  	statistics, err := bucketInit.GetStatistics()
   808  	if err != nil {
   809  		return true, err
   810  	}
   811  
   812  	if *statistics.Count == 0 {
   813  		return true, nil
   814  	}
   815  	return false, nil
   816  }
   817  
   818  // Rmdir delete a bucket
   819  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   820  	bucket, directory := f.split(dir)
   821  	if bucket == "" || directory != "" {
   822  		return nil
   823  	}
   824  	isEmpty, err := f.bucketIsEmpty(bucket)
   825  	if err != nil {
   826  		return err
   827  	}
   828  	if !isEmpty {
   829  		// fs.Debugf(f, "The bucket %s you tried to delete not empty.", bucket)
   830  		return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty")
   831  	}
   832  	return f.cache.Remove(bucket, func() error {
   833  		// fs.Debugf(f, "Deleting the bucket %s", bucket)
   834  		bucketInit, err := f.svc.Bucket(bucket, f.zone)
   835  		if err != nil {
   836  			return err
   837  		}
   838  		retries := 0
   839  		for retries <= 10 {
   840  			_, delErr := bucketInit.Delete()
   841  			if delErr != nil {
   842  				if e, ok := delErr.(*qsErr.QingStorError); ok {
   843  					switch e.Code {
   844  					// The status of "lease" takes a few seconds to "ready" when creating a new bucket
   845  					// wait for lease status ready
   846  					case "lease_not_ready":
   847  						fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries)
   848  						retries++
   849  						time.Sleep(time.Second * 1)
   850  						continue
   851  					default:
   852  						err = e
   853  					}
   854  				}
   855  			} else {
   856  				err = delErr
   857  			}
   858  			break
   859  		}
   860  		return err
   861  	})
   862  }
   863  
   864  // cleanUpBucket removes all pending multipart uploads for a given bucket
   865  func (f *Fs) cleanUpBucket(ctx context.Context, bucket string) (err error) {
   866  	fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than 24 hours", bucket)
   867  	bucketInit, err := f.svc.Bucket(bucket, f.zone)
   868  	if err != nil {
   869  		return err
   870  	}
   871  	// maxLimit := int(listLimitSize)
   872  	var marker *string
   873  	for {
   874  		req := qs.ListMultipartUploadsInput{
   875  			// The default is 200 but this errors if more than 200 is put in so leave at the default
   876  			// Limit:     &maxLimit,
   877  			KeyMarker: marker,
   878  		}
   879  		var resp *qs.ListMultipartUploadsOutput
   880  		resp, err = bucketInit.ListMultipartUploads(&req)
   881  		if err != nil {
   882  			return fmt.Errorf("clean up bucket list multipart uploads: %w", err)
   883  		}
   884  		for _, upload := range resp.Uploads {
   885  			if upload.Created != nil && upload.Key != nil && upload.UploadID != nil {
   886  				age := time.Since(*upload.Created)
   887  				if age > 24*time.Hour {
   888  					fs.Infof(f, "removing pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
   889  					req := qs.AbortMultipartUploadInput{
   890  						UploadID: upload.UploadID,
   891  					}
   892  					_, abortErr := bucketInit.AbortMultipartUpload(*upload.Key, &req)
   893  					if abortErr != nil {
   894  						err = fmt.Errorf("failed to remove multipart upload for %q: %w", *upload.Key, abortErr)
   895  						fs.Errorf(f, "%v", err)
   896  					}
   897  				} else {
   898  					fs.Debugf(f, "ignoring pending multipart upload for %q dated %v (%v ago)", *upload.Key, upload.Created, age)
   899  				}
   900  			}
   901  		}
   902  		if resp.HasMore != nil && !*resp.HasMore {
   903  			break
   904  		}
   905  		// Use NextMarker if set, otherwise use last Key
   906  		if resp.NextKeyMarker == nil || *resp.NextKeyMarker == "" {
   907  			fs.Errorf(f, "Expecting NextKeyMarker but didn't find one")
   908  			break
   909  		} else {
   910  			marker = resp.NextKeyMarker
   911  		}
   912  	}
   913  	return err
   914  }
   915  
   916  // CleanUp removes all pending multipart uploads
   917  func (f *Fs) CleanUp(ctx context.Context) (err error) {
   918  	if f.rootBucket != "" {
   919  		return f.cleanUpBucket(ctx, f.rootBucket)
   920  	}
   921  	entries, err := f.listBuckets(ctx)
   922  	if err != nil {
   923  		return err
   924  	}
   925  	for _, entry := range entries {
   926  		cleanErr := f.cleanUpBucket(ctx, f.opt.Enc.FromStandardName(entry.Remote()))
   927  		if cleanErr != nil {
   928  			fs.Errorf(f, "Failed to cleanup bucket: %q", cleanErr)
   929  			err = cleanErr
   930  		}
   931  	}
   932  	return err
   933  }
   934  
   935  // readMetaData gets the metadata if it hasn't already been fetched
   936  //
   937  // it also sets the info
   938  func (o *Object) readMetaData() (err error) {
   939  	bucket, bucketPath := o.split()
   940  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
   941  	if err != nil {
   942  		return err
   943  	}
   944  	// fs.Debugf(o, "Read metadata of key: %s", key)
   945  	resp, err := bucketInit.HeadObject(bucketPath, &qs.HeadObjectInput{})
   946  	if err != nil {
   947  		// fs.Debugf(o, "Read metadata failed, API Error: %v", err)
   948  		if e, ok := err.(*qsErr.QingStorError); ok {
   949  			if e.StatusCode == http.StatusNotFound {
   950  				return fs.ErrorObjectNotFound
   951  			}
   952  		}
   953  		return err
   954  	}
   955  	// Ignore missing Content-Length assuming it is 0
   956  	if resp.ContentLength != nil {
   957  		o.size = *resp.ContentLength
   958  	}
   959  
   960  	if resp.ETag != nil {
   961  		o.etag = qs.StringValue(resp.ETag)
   962  	}
   963  
   964  	if resp.LastModified == nil {
   965  		fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
   966  		o.lastModified = time.Now()
   967  	} else {
   968  		o.lastModified = *resp.LastModified
   969  	}
   970  
   971  	if resp.ContentType != nil {
   972  		o.mimeType = qs.StringValue(resp.ContentType)
   973  	}
   974  
   975  	if resp.XQSEncryptionCustomerAlgorithm != nil {
   976  		o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm)
   977  		o.encrypted = true
   978  	}
   979  
   980  	return nil
   981  }
   982  
   983  // ModTime returns the modification date of the file
   984  // It should return a best guess if one isn't available
   985  func (o *Object) ModTime(ctx context.Context) time.Time {
   986  	err := o.readMetaData()
   987  	if err != nil {
   988  		fs.Logf(o, "Failed to read metadata, %v", err)
   989  		return time.Now()
   990  	}
   991  	modTime := o.lastModified
   992  	return modTime
   993  }
   994  
   995  // SetModTime sets the modification time of the local fs object
   996  func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
   997  	err := o.readMetaData()
   998  	if err != nil {
   999  		return err
  1000  	}
  1001  	o.lastModified = modTime
  1002  	mimeType := fs.MimeType(ctx, o)
  1003  
  1004  	if o.size >= maxSizeForCopy {
  1005  		fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
  1006  		return nil
  1007  	}
  1008  	// Copy the object to itself to update the metadata
  1009  	bucket, bucketPath := o.split()
  1010  	sourceKey := path.Join("/", bucket, bucketPath)
  1011  
  1012  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
  1013  	if err != nil {
  1014  		return err
  1015  	}
  1016  
  1017  	req := qs.PutObjectInput{
  1018  		XQSCopySource: &sourceKey,
  1019  		ContentType:   &mimeType,
  1020  	}
  1021  	_, err = bucketInit.PutObject(bucketPath, &req)
  1022  
  1023  	return err
  1024  }
  1025  
  1026  // Open opens the file for read.  Call Close() on the returned io.ReadCloser
  1027  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
  1028  	bucket, bucketPath := o.split()
  1029  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
  1030  	if err != nil {
  1031  		return nil, err
  1032  	}
  1033  
  1034  	req := qs.GetObjectInput{}
  1035  	fs.FixRangeOption(options, o.size)
  1036  	for _, option := range options {
  1037  		switch option.(type) {
  1038  		case *fs.RangeOption, *fs.SeekOption:
  1039  			_, value := option.Header()
  1040  			req.Range = &value
  1041  		default:
  1042  			if option.Mandatory() {
  1043  				fs.Logf(o, "Unsupported mandatory option: %v", option)
  1044  			}
  1045  		}
  1046  	}
  1047  	resp, err := bucketInit.GetObject(bucketPath, &req)
  1048  	if err != nil {
  1049  		return nil, err
  1050  	}
  1051  	return resp.Body, nil
  1052  }
  1053  
  1054  // Update in to the object
  1055  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
  1056  	// The maximum size of upload object is multipartUploadSize * MaxMultipleParts
  1057  	bucket, bucketPath := o.split()
  1058  	err := o.fs.makeBucket(ctx, bucket)
  1059  	if err != nil {
  1060  		return err
  1061  	}
  1062  
  1063  	// Guess the content type
  1064  	mimeType := fs.MimeType(ctx, src)
  1065  
  1066  	req := uploadInput{
  1067  		body:        in,
  1068  		qsSvc:       o.fs.svc,
  1069  		bucket:      bucket,
  1070  		zone:        o.fs.zone,
  1071  		key:         bucketPath,
  1072  		mimeType:    mimeType,
  1073  		partSize:    int64(o.fs.opt.ChunkSize),
  1074  		concurrency: o.fs.opt.UploadConcurrency,
  1075  	}
  1076  	uploader := newUploader(&req)
  1077  
  1078  	size := src.Size()
  1079  	multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
  1080  	if multipart {
  1081  		err = uploader.upload()
  1082  	} else {
  1083  		err = uploader.singlePartUpload(in, size)
  1084  	}
  1085  	if err != nil {
  1086  		return err
  1087  	}
  1088  	// Read Metadata of object
  1089  	err = o.readMetaData()
  1090  	return err
  1091  }
  1092  
  1093  // Remove this object
  1094  func (o *Object) Remove(ctx context.Context) error {
  1095  	bucket, bucketPath := o.split()
  1096  	bucketInit, err := o.fs.svc.Bucket(bucket, o.fs.zone)
  1097  	if err != nil {
  1098  		return err
  1099  	}
  1100  	_, err = bucketInit.DeleteObject(bucketPath)
  1101  	return err
  1102  }
  1103  
  1104  // Fs returns read only access to the Fs that this object is part of
  1105  func (o *Object) Fs() fs.Info {
  1106  	return o.fs
  1107  }
  1108  
  1109  var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
  1110  
  1111  // Hash returns the selected checksum of the file
  1112  // If no checksum is available it returns ""
  1113  func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
  1114  	if t != hash.MD5 {
  1115  		return "", hash.ErrUnsupported
  1116  	}
  1117  	etag := strings.Trim(strings.ToLower(o.etag), `"`)
  1118  	// Check the etag is a valid md5sum
  1119  	if !matchMd5.MatchString(etag) {
  1120  		fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag)
  1121  		return "", nil
  1122  	}
  1123  	return etag, nil
  1124  }
  1125  
  1126  // Storable says whether this object can be stored
  1127  func (o *Object) Storable() bool {
  1128  	return true
  1129  }
  1130  
  1131  // String returns a description of the Object
  1132  func (o *Object) String() string {
  1133  	if o == nil {
  1134  		return "<nil>"
  1135  	}
  1136  	return o.remote
  1137  }
  1138  
  1139  // Remote returns the remote path
  1140  func (o *Object) Remote() string {
  1141  	return o.remote
  1142  }
  1143  
  1144  // Size returns the size of the file
  1145  func (o *Object) Size() int64 {
  1146  	return o.size
  1147  }
  1148  
  1149  // MimeType of an Object if known, "" otherwise
  1150  func (o *Object) MimeType(ctx context.Context) string {
  1151  	err := o.readMetaData()
  1152  	if err != nil {
  1153  		fs.Logf(o, "Failed to read metadata: %v", err)
  1154  		return ""
  1155  	}
  1156  	return o.mimeType
  1157  }
  1158  
  1159  // Check the interfaces are satisfied
  1160  var (
  1161  	_ fs.Fs         = &Fs{}
  1162  	_ fs.CleanUpper = &Fs{}
  1163  	_ fs.Copier     = &Fs{}
  1164  	_ fs.Object     = &Object{}
  1165  	_ fs.ListRer    = &Fs{}
  1166  	_ fs.MimeTyper  = &Object{}
  1167  )