github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/backend/qingstor/qingstor.go (about) 1 // Package qingstor provides an interface to QingStor object storage 2 // Home: https://www.qingcloud.com/ 3 4 // +build !plan9 5 6 package qingstor 7 8 import ( 9 "context" 10 "fmt" 11 "io" 12 "net/http" 13 "path" 14 "regexp" 15 "strconv" 16 "strings" 17 "sync" 18 "time" 19 20 "github.com/ncw/rclone/fs" 21 "github.com/ncw/rclone/fs/config/configmap" 22 "github.com/ncw/rclone/fs/config/configstruct" 23 "github.com/ncw/rclone/fs/fshttp" 24 "github.com/ncw/rclone/fs/hash" 25 "github.com/ncw/rclone/fs/walk" 26 "github.com/pkg/errors" 27 qsConfig "github.com/yunify/qingstor-sdk-go/config" 28 qsErr "github.com/yunify/qingstor-sdk-go/request/errors" 29 qs "github.com/yunify/qingstor-sdk-go/service" 30 ) 31 32 // Register with Fs 33 func init() { 34 fs.Register(&fs.RegInfo{ 35 Name: "qingstor", 36 Description: "QingCloud Object Storage", 37 NewFs: NewFs, 38 Options: []fs.Option{{ 39 Name: "env_auth", 40 Help: "Get QingStor credentials from runtime. Only applies if access_key_id and secret_access_key is blank.", 41 Default: false, 42 Examples: []fs.OptionExample{{ 43 Value: "false", 44 Help: "Enter QingStor credentials in the next step", 45 }, { 46 Value: "true", 47 Help: "Get QingStor credentials from the environment (env vars or IAM)", 48 }}, 49 }, { 50 Name: "access_key_id", 51 Help: "QingStor Access Key ID\nLeave blank for anonymous access or runtime credentials.", 52 }, { 53 Name: "secret_access_key", 54 Help: "QingStor Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", 55 }, { 56 Name: "endpoint", 57 Help: "Enter a endpoint URL to connection QingStor API.\nLeave blank will use the default value \"https://qingstor.com:443\"", 58 }, { 59 Name: "zone", 60 Help: "Zone to connect to.\nDefault is \"pek3a\".", 61 Examples: []fs.OptionExample{{ 62 Value: "pek3a", 63 Help: "The Beijing (China) Three Zone\nNeeds location constraint pek3a.", 64 }, { 65 Value: "sh1a", 66 Help: "The Shanghai (China) First Zone\nNeeds location constraint sh1a.", 67 }, { 68 Value: "gd2a", 69 Help: "The Guangdong (China) Second Zone\nNeeds location constraint gd2a.", 70 }}, 71 }, { 72 Name: "connection_retries", 73 Help: "Number of connection retries.", 74 Default: 3, 75 Advanced: true, 76 }, { 77 Name: "upload_cutoff", 78 Help: `Cutoff for switching to chunked upload 79 80 Any files larger than this will be uploaded in chunks of chunk_size. 81 The minimum is 0 and the maximum is 5GB.`, 82 Default: defaultUploadCutoff, 83 Advanced: true, 84 }, { 85 Name: "chunk_size", 86 Help: `Chunk size to use for uploading. 87 88 When uploading files larger than upload_cutoff they will be uploaded 89 as multipart uploads using this chunk size. 90 91 Note that "--qingstor-upload-concurrency" chunks of this size are buffered 92 in memory per transfer. 93 94 If you are transferring large files over high speed links and you have 95 enough memory, then increasing this will speed up the transfers.`, 96 Default: minChunkSize, 97 Advanced: true, 98 }, { 99 Name: "upload_concurrency", 100 Help: `Concurrency for multipart uploads. 101 102 This is the number of chunks of the same file that are uploaded 103 concurrently. 104 105 NB if you set this to > 1 then the checksums of multpart uploads 106 become corrupted (the uploads themselves are not corrupted though). 107 108 If you are uploading small numbers of large file over high speed link 109 and these uploads do not fully utilize your bandwidth, then increasing 110 this may help to speed up the transfers.`, 111 Default: 1, 112 Advanced: true, 113 }}, 114 }) 115 } 116 117 // Constants 118 const ( 119 listLimitSize = 1000 // Number of items to read at once 120 maxSizeForCopy = 1024 * 1024 * 1024 * 5 // The maximum size of object we can COPY 121 minChunkSize = fs.SizeSuffix(minMultiPartSize) 122 defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) 123 maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) 124 ) 125 126 // Globals 127 func timestampToTime(tp int64) time.Time { 128 timeLayout := time.RFC3339Nano 129 ts := time.Unix(tp, 0).Format(timeLayout) 130 tm, _ := time.Parse(timeLayout, ts) 131 return tm.UTC() 132 } 133 134 // Options defines the configuration for this backend 135 type Options struct { 136 EnvAuth bool `config:"env_auth"` 137 AccessKeyID string `config:"access_key_id"` 138 SecretAccessKey string `config:"secret_access_key"` 139 Endpoint string `config:"endpoint"` 140 Zone string `config:"zone"` 141 ConnectionRetries int `config:"connection_retries"` 142 UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` 143 ChunkSize fs.SizeSuffix `config:"chunk_size"` 144 UploadConcurrency int `config:"upload_concurrency"` 145 } 146 147 // Fs represents a remote qingstor server 148 type Fs struct { 149 name string // The name of the remote 150 root string // The root is a subdir, is a special object 151 opt Options // parsed options 152 features *fs.Features // optional features 153 svc *qs.Service // The connection to the qingstor server 154 zone string // The zone we are working on 155 bucket string // The bucket we are working on 156 bucketOKMu sync.Mutex // mutex to protect bucketOK and bucketDeleted 157 bucketOK bool // true if we have created the bucket 158 bucketDeleted bool // true if we have deleted the bucket 159 } 160 161 // Object describes a qingstor object 162 type Object struct { 163 // Will definitely have everything but meta which may be nil 164 // 165 // List will read everything but meta & mimeType - to fill 166 // that in you need to call readMetaData 167 fs *Fs // what this object is part of 168 remote string // object of remote 169 etag string // md5sum of the object 170 size int64 // length of the object content 171 mimeType string // ContentType of object - may be "" 172 lastModified time.Time // Last modified 173 encrypted bool // whether the object is encryption 174 algo string // Custom encryption algorithms 175 } 176 177 // ------------------------------------------------------------ 178 179 // Pattern to match a qingstor path 180 var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`) 181 182 // parseParse parses a qingstor 'url' 183 func qsParsePath(path string) (bucket, key string, err error) { 184 // Pattern to match a qingstor path 185 parts := matcher.FindStringSubmatch(path) 186 if parts == nil { 187 err = errors.Errorf("Couldn't parse bucket out of qingstor path %q", path) 188 } else { 189 bucket, key = parts[1], parts[2] 190 key = strings.Trim(key, "/") 191 } 192 return 193 } 194 195 // Split an URL into three parts: protocol host and port 196 func qsParseEndpoint(endpoint string) (protocol, host, port string, err error) { 197 /* 198 Pattern to match a endpoint, 199 eg: "http(s)://qingstor.com:443" --> "http(s)", "qingstor.com", 443 200 "http(s)//qingstor.com" --> "http(s)", "qingstor.com", "" 201 "qingstor.com" --> "", "qingstor.com", "" 202 */ 203 defer func() { 204 if r := recover(); r != nil { 205 switch x := r.(type) { 206 case error: 207 err = x 208 default: 209 err = nil 210 } 211 } 212 }() 213 var mather = regexp.MustCompile(`^(?:(http|https)://)*(\w+\.(?:[\w\.])*)(?::(\d{0,5}))*$`) 214 parts := mather.FindStringSubmatch(endpoint) 215 protocol, host, port = parts[1], parts[2], parts[3] 216 return 217 } 218 219 // qsConnection makes a connection to qingstor 220 func qsServiceConnection(opt *Options) (*qs.Service, error) { 221 accessKeyID := opt.AccessKeyID 222 secretAccessKey := opt.SecretAccessKey 223 224 switch { 225 case opt.EnvAuth: 226 // No need for empty checks if "env_auth" is true 227 case accessKeyID == "" && secretAccessKey == "": 228 // if no access key/secret and iam is explicitly disabled then fall back to anon interaction 229 case accessKeyID == "": 230 return nil, errors.New("access_key_id not found") 231 case secretAccessKey == "": 232 return nil, errors.New("secret_access_key not found") 233 } 234 235 protocol := "https" 236 host := "qingstor.com" 237 port := 443 238 239 endpoint := opt.Endpoint 240 if endpoint != "" { 241 _protocol, _host, _port, err := qsParseEndpoint(endpoint) 242 243 if err != nil { 244 return nil, fmt.Errorf("The endpoint \"%s\" format error", endpoint) 245 } 246 247 if _protocol != "" { 248 protocol = _protocol 249 } 250 host = _host 251 if _port != "" { 252 port, _ = strconv.Atoi(_port) 253 } else if protocol == "http" { 254 port = 80 255 } 256 257 } 258 259 cf, err := qsConfig.NewDefault() 260 if err != nil { 261 return nil, err 262 } 263 cf.AccessKeyID = accessKeyID 264 cf.SecretAccessKey = secretAccessKey 265 cf.Protocol = protocol 266 cf.Host = host 267 cf.Port = port 268 cf.ConnectionRetries = opt.ConnectionRetries 269 cf.Connection = fshttp.NewClient(fs.Config) 270 271 return qs.Init(cf) 272 } 273 274 func checkUploadChunkSize(cs fs.SizeSuffix) error { 275 if cs < minChunkSize { 276 return errors.Errorf("%s is less than %s", cs, minChunkSize) 277 } 278 return nil 279 } 280 281 func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { 282 err = checkUploadChunkSize(cs) 283 if err == nil { 284 old, f.opt.ChunkSize = f.opt.ChunkSize, cs 285 } 286 return 287 } 288 289 func checkUploadCutoff(cs fs.SizeSuffix) error { 290 if cs > maxUploadCutoff { 291 return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff) 292 } 293 return nil 294 } 295 296 func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { 297 err = checkUploadCutoff(cs) 298 if err == nil { 299 old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs 300 } 301 return 302 } 303 304 // NewFs constructs an Fs from the path, bucket:path 305 func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { 306 // Parse config into Options struct 307 opt := new(Options) 308 err := configstruct.Set(m, opt) 309 if err != nil { 310 return nil, err 311 } 312 err = checkUploadChunkSize(opt.ChunkSize) 313 if err != nil { 314 return nil, errors.Wrap(err, "qingstor: chunk size") 315 } 316 err = checkUploadCutoff(opt.UploadCutoff) 317 if err != nil { 318 return nil, errors.Wrap(err, "qingstor: upload cutoff") 319 } 320 bucket, key, err := qsParsePath(root) 321 if err != nil { 322 return nil, err 323 } 324 svc, err := qsServiceConnection(opt) 325 if err != nil { 326 return nil, err 327 } 328 329 if opt.Zone == "" { 330 opt.Zone = "pek3a" 331 } 332 333 f := &Fs{ 334 name: name, 335 root: key, 336 opt: *opt, 337 svc: svc, 338 zone: opt.Zone, 339 bucket: bucket, 340 } 341 f.features = (&fs.Features{ 342 ReadMimeType: true, 343 WriteMimeType: true, 344 BucketBased: true, 345 }).Fill(f) 346 347 if f.root != "" { 348 if !strings.HasSuffix(f.root, "/") { 349 f.root += "/" 350 } 351 //Check to see if the object exists 352 bucketInit, err := svc.Bucket(bucket, opt.Zone) 353 if err != nil { 354 return nil, err 355 } 356 _, err = bucketInit.HeadObject(key, &qs.HeadObjectInput{}) 357 if err == nil { 358 f.root = path.Dir(key) 359 if f.root == "." { 360 f.root = "" 361 } else { 362 f.root += "/" 363 } 364 // return an error with an fs which points to the parent 365 return f, fs.ErrorIsFile 366 } 367 } 368 return f, nil 369 } 370 371 // Name of the remote (as passed into NewFs) 372 func (f *Fs) Name() string { 373 return f.name 374 } 375 376 // Root of the remote (as passed into NewFs) 377 func (f *Fs) Root() string { 378 if f.root == "" { 379 return f.bucket 380 } 381 return f.bucket + "/" + f.root 382 } 383 384 // String converts this Fs to a string 385 func (f *Fs) String() string { 386 if f.root == "" { 387 return fmt.Sprintf("QingStor bucket %s", f.bucket) 388 } 389 return fmt.Sprintf("QingStor bucket %s root %s", f.bucket, f.root) 390 } 391 392 // Precision of the remote 393 func (f *Fs) Precision() time.Duration { 394 //return time.Nanosecond 395 //Not supported temporary 396 return fs.ModTimeNotSupported 397 } 398 399 // Hashes returns the supported hash sets. 400 func (f *Fs) Hashes() hash.Set { 401 return hash.Set(hash.MD5) 402 //return hash.HashSet(hash.HashNone) 403 } 404 405 // Features returns the optional features of this Fs 406 func (f *Fs) Features() *fs.Features { 407 return f.features 408 } 409 410 // Put created a new object 411 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 412 fsObj := &Object{ 413 fs: f, 414 remote: src.Remote(), 415 } 416 return fsObj, fsObj.Update(ctx, in, src, options...) 417 } 418 419 // Copy src to this remote using server side copy operations. 420 // 421 // This is stored with the remote path given 422 // 423 // It returns the destination Object and a possible error 424 // 425 // Will only be called if src.Fs().Name() == f.Name() 426 // 427 // If it isn't possible then return fs.ErrorCantCopy 428 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 429 err := f.Mkdir(ctx, "") 430 if err != nil { 431 return nil, err 432 } 433 srcObj, ok := src.(*Object) 434 if !ok { 435 fs.Debugf(src, "Can't copy - not same remote type") 436 return nil, fs.ErrorCantCopy 437 } 438 srcFs := srcObj.fs 439 key := f.root + remote 440 source := path.Join("/"+srcFs.bucket, srcFs.root+srcObj.remote) 441 442 fs.Debugf(f, "Copied, source key is: %s, and dst key is: %s", source, key) 443 req := qs.PutObjectInput{ 444 XQSCopySource: &source, 445 } 446 bucketInit, err := f.svc.Bucket(f.bucket, f.zone) 447 448 if err != nil { 449 return nil, err 450 } 451 _, err = bucketInit.PutObject(key, &req) 452 if err != nil { 453 fs.Debugf(f, "Copy Failed, API Error: %v", err) 454 return nil, err 455 } 456 return f.NewObject(ctx, remote) 457 } 458 459 // NewObject finds the Object at remote. If it can't be found 460 // it returns the error fs.ErrorObjectNotFound. 461 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 462 return f.newObjectWithInfo(remote, nil) 463 } 464 465 // Return an Object from a path 466 // 467 //If it can't be found it returns the error ErrorObjectNotFound. 468 func (f *Fs) newObjectWithInfo(remote string, info *qs.KeyType) (fs.Object, error) { 469 o := &Object{ 470 fs: f, 471 remote: remote, 472 } 473 if info != nil { 474 // Set info 475 if info.Size != nil { 476 o.size = *info.Size 477 } 478 479 if info.Etag != nil { 480 o.etag = qs.StringValue(info.Etag) 481 } 482 if info.Modified == nil { 483 fs.Logf(o, "Failed to read last modified") 484 o.lastModified = time.Now() 485 } else { 486 o.lastModified = timestampToTime(int64(*info.Modified)) 487 } 488 489 if info.MimeType != nil { 490 o.mimeType = qs.StringValue(info.MimeType) 491 } 492 493 if info.Encrypted != nil { 494 o.encrypted = qs.BoolValue(info.Encrypted) 495 } 496 497 } else { 498 err := o.readMetaData() // reads info and meta, returning an error 499 if err != nil { 500 return nil, err 501 } 502 } 503 return o, nil 504 } 505 506 // listFn is called from list to handle an object. 507 type listFn func(remote string, object *qs.KeyType, isDirectory bool) error 508 509 // list the objects into the function supplied 510 // 511 // dir is the starting directory, "" for root 512 // 513 // Set recurse to read sub directories 514 func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error { 515 prefix := f.root 516 if dir != "" { 517 prefix += dir + "/" 518 } 519 520 delimiter := "" 521 if !recurse { 522 delimiter = "/" 523 } 524 525 maxLimit := int(listLimitSize) 526 var marker *string 527 528 for { 529 bucketInit, err := f.svc.Bucket(f.bucket, f.zone) 530 if err != nil { 531 return err 532 } 533 // FIXME need to implement ALL loop 534 req := qs.ListObjectsInput{ 535 Delimiter: &delimiter, 536 Prefix: &prefix, 537 Limit: &maxLimit, 538 Marker: marker, 539 } 540 resp, err := bucketInit.ListObjects(&req) 541 if err != nil { 542 if e, ok := err.(*qsErr.QingStorError); ok { 543 if e.StatusCode == http.StatusNotFound { 544 err = fs.ErrorDirNotFound 545 } 546 } 547 return err 548 } 549 rootLength := len(f.root) 550 if !recurse { 551 for _, commonPrefix := range resp.CommonPrefixes { 552 if commonPrefix == nil { 553 fs.Logf(f, "Nil common prefix received") 554 continue 555 } 556 remote := *commonPrefix 557 if !strings.HasPrefix(remote, f.root) { 558 fs.Logf(f, "Odd name received %q", remote) 559 continue 560 } 561 remote = remote[rootLength:] 562 if strings.HasSuffix(remote, "/") { 563 remote = remote[:len(remote)-1] 564 } 565 566 err = fn(remote, &qs.KeyType{Key: &remote}, true) 567 if err != nil { 568 return err 569 } 570 } 571 } 572 573 for _, object := range resp.Keys { 574 key := qs.StringValue(object.Key) 575 if !strings.HasPrefix(key, f.root) { 576 fs.Logf(f, "Odd name received %q", key) 577 continue 578 } 579 remote := key[rootLength:] 580 err = fn(remote, object, false) 581 if err != nil { 582 return err 583 } 584 } 585 // Use NextMarker if set, otherwise use last Key 586 if resp.NextMarker == nil || *resp.NextMarker == "" { 587 //marker = resp.Keys[len(resp.Keys)-1].Key 588 break 589 } else { 590 marker = resp.NextMarker 591 } 592 } 593 return nil 594 } 595 596 // Convert a list item into a BasicInfo 597 func (f *Fs) itemToDirEntry(remote string, object *qs.KeyType, isDirectory bool) (fs.DirEntry, error) { 598 if isDirectory { 599 size := int64(0) 600 if object.Size != nil { 601 size = *object.Size 602 } 603 d := fs.NewDir(remote, time.Time{}).SetSize(size) 604 return d, nil 605 } 606 o, err := f.newObjectWithInfo(remote, object) 607 if err != nil { 608 return nil, err 609 } 610 return o, nil 611 } 612 613 // mark the bucket as being OK 614 func (f *Fs) markBucketOK() { 615 if f.bucket != "" { 616 f.bucketOKMu.Lock() 617 f.bucketOK = true 618 f.bucketDeleted = false 619 f.bucketOKMu.Unlock() 620 } 621 } 622 623 // listDir lists files and directories to out 624 func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 625 // List the objects and directories 626 err = f.list(ctx, dir, false, func(remote string, object *qs.KeyType, isDirectory bool) error { 627 entry, err := f.itemToDirEntry(remote, object, isDirectory) 628 if err != nil { 629 return err 630 } 631 if entry != nil { 632 entries = append(entries, entry) 633 } 634 return nil 635 }) 636 if err != nil { 637 return nil, err 638 } 639 // bucket must be present if listing succeeded 640 f.markBucketOK() 641 return entries, nil 642 } 643 644 // listBuckets lists the buckets to out 645 func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) { 646 if dir != "" { 647 return nil, fs.ErrorListBucketRequired 648 } 649 650 req := qs.ListBucketsInput{ 651 Location: &f.zone, 652 } 653 resp, err := f.svc.ListBuckets(&req) 654 if err != nil { 655 return nil, err 656 } 657 658 for _, bucket := range resp.Buckets { 659 d := fs.NewDir(qs.StringValue(bucket.Name), qs.TimeValue(bucket.Created)) 660 entries = append(entries, d) 661 } 662 return entries, nil 663 } 664 665 // List the objects and directories in dir into entries. The 666 // entries can be returned in any order but should be for a 667 // complete directory. 668 // 669 // dir should be "" to list the root, and should not have 670 // trailing slashes. 671 // 672 // This should return ErrDirNotFound if the directory isn't 673 // found. 674 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 675 if f.bucket == "" { 676 return f.listBuckets(dir) 677 } 678 return f.listDir(ctx, dir) 679 } 680 681 // ListR lists the objects and directories of the Fs starting 682 // from dir recursively into out. 683 // 684 // dir should be "" to start from the root, and should not 685 // have trailing slashes. 686 // 687 // This should return ErrDirNotFound if the directory isn't 688 // found. 689 // 690 // It should call callback for each tranche of entries read. 691 // These need not be returned in any particular order. If 692 // callback returns an error then the listing will stop 693 // immediately. 694 // 695 // Don't implement this unless you have a more efficient way 696 // of listing recursively that doing a directory traversal. 697 func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { 698 if f.bucket == "" { 699 return fs.ErrorListBucketRequired 700 } 701 list := walk.NewListRHelper(callback) 702 err = f.list(ctx, dir, true, func(remote string, object *qs.KeyType, isDirectory bool) error { 703 entry, err := f.itemToDirEntry(remote, object, isDirectory) 704 if err != nil { 705 return err 706 } 707 return list.Add(entry) 708 }) 709 if err != nil { 710 return err 711 } 712 // bucket must be present if listing succeeded 713 f.markBucketOK() 714 return list.Flush() 715 } 716 717 // Check if the bucket exists 718 func (f *Fs) dirExists() (bool, error) { 719 bucketInit, err := f.svc.Bucket(f.bucket, f.zone) 720 if err != nil { 721 return false, err 722 } 723 724 _, err = bucketInit.Head() 725 if err == nil { 726 return true, nil 727 } 728 729 if e, ok := err.(*qsErr.QingStorError); ok { 730 if e.StatusCode == http.StatusNotFound { 731 err = nil 732 } 733 } 734 return false, err 735 } 736 737 // Mkdir creates the bucket if it doesn't exist 738 func (f *Fs) Mkdir(ctx context.Context, dir string) error { 739 f.bucketOKMu.Lock() 740 defer f.bucketOKMu.Unlock() 741 if f.bucketOK { 742 return nil 743 } 744 745 bucketInit, err := f.svc.Bucket(f.bucket, f.zone) 746 if err != nil { 747 return err 748 } 749 /* When delete a bucket, qingstor need about 60 second to sync status; 750 So, need wait for it sync end if we try to operation a just deleted bucket 751 */ 752 retries := 0 753 for retries <= 120 { 754 statistics, err := bucketInit.GetStatistics() 755 if statistics == nil || err != nil { 756 break 757 } 758 switch *statistics.Status { 759 case "deleted": 760 fs.Debugf(f, "Wait for qingstor sync bucket status, retries: %d", retries) 761 time.Sleep(time.Second * 1) 762 retries++ 763 continue 764 default: 765 break 766 } 767 break 768 } 769 770 if !f.bucketDeleted { 771 exists, err := f.dirExists() 772 if err == nil { 773 f.bucketOK = exists 774 } 775 if err != nil || exists { 776 return err 777 } 778 } 779 780 _, err = bucketInit.Put() 781 if e, ok := err.(*qsErr.QingStorError); ok { 782 if e.StatusCode == http.StatusConflict { 783 err = nil 784 } 785 } 786 787 if err == nil { 788 f.bucketOK = true 789 f.bucketDeleted = false 790 } 791 792 return err 793 } 794 795 // dirIsEmpty check if the bucket empty 796 func (f *Fs) dirIsEmpty() (bool, error) { 797 bucketInit, err := f.svc.Bucket(f.bucket, f.zone) 798 if err != nil { 799 return true, err 800 } 801 802 statistics, err := bucketInit.GetStatistics() 803 if err != nil { 804 return true, err 805 } 806 807 if *statistics.Count == 0 { 808 return true, nil 809 } 810 return false, nil 811 } 812 813 // Rmdir delete a bucket 814 func (f *Fs) Rmdir(ctx context.Context, dir string) error { 815 f.bucketOKMu.Lock() 816 defer f.bucketOKMu.Unlock() 817 if f.root != "" || dir != "" { 818 return nil 819 } 820 821 isEmpty, err := f.dirIsEmpty() 822 if err != nil { 823 return err 824 } 825 if !isEmpty { 826 fs.Debugf(f, "The bucket %s you tried to delete not empty.", f.bucket) 827 return errors.New("BucketNotEmpty: The bucket you tried to delete is not empty") 828 } 829 830 fs.Debugf(f, "Tried to delete the bucket %s", f.bucket) 831 bucketInit, err := f.svc.Bucket(f.bucket, f.zone) 832 if err != nil { 833 return err 834 } 835 retries := 0 836 for retries <= 10 { 837 _, delErr := bucketInit.Delete() 838 if delErr != nil { 839 if e, ok := delErr.(*qsErr.QingStorError); ok { 840 switch e.Code { 841 // The status of "lease" takes a few seconds to "ready" when creating a new bucket 842 // wait for lease status ready 843 case "lease_not_ready": 844 fs.Debugf(f, "QingStor bucket lease not ready, retries: %d", retries) 845 retries++ 846 time.Sleep(time.Second * 1) 847 continue 848 default: 849 err = e 850 break 851 } 852 } 853 } else { 854 err = delErr 855 } 856 break 857 } 858 859 if err == nil { 860 f.bucketOK = false 861 f.bucketDeleted = true 862 } 863 return err 864 } 865 866 // readMetaData gets the metadata if it hasn't already been fetched 867 // 868 // it also sets the info 869 func (o *Object) readMetaData() (err error) { 870 bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone) 871 if err != nil { 872 return err 873 } 874 875 key := o.fs.root + o.remote 876 fs.Debugf(o, "Read metadata of key: %s", key) 877 resp, err := bucketInit.HeadObject(key, &qs.HeadObjectInput{}) 878 if err != nil { 879 fs.Debugf(o, "Read metadata failed, API Error: %v", err) 880 if e, ok := err.(*qsErr.QingStorError); ok { 881 if e.StatusCode == http.StatusNotFound { 882 return fs.ErrorObjectNotFound 883 } 884 } 885 return err 886 } 887 // Ignore missing Content-Length assuming it is 0 888 if resp.ContentLength != nil { 889 o.size = *resp.ContentLength 890 } 891 892 if resp.ETag != nil { 893 o.etag = qs.StringValue(resp.ETag) 894 } 895 896 if resp.LastModified == nil { 897 fs.Logf(o, "Failed to read last modified from HEAD: %v", err) 898 o.lastModified = time.Now() 899 } else { 900 o.lastModified = *resp.LastModified 901 } 902 903 if resp.ContentType != nil { 904 o.mimeType = qs.StringValue(resp.ContentType) 905 } 906 907 if resp.XQSEncryptionCustomerAlgorithm != nil { 908 o.algo = qs.StringValue(resp.XQSEncryptionCustomerAlgorithm) 909 o.encrypted = true 910 } 911 912 return nil 913 } 914 915 // ModTime returns the modification date of the file 916 // It should return a best guess if one isn't available 917 func (o *Object) ModTime(ctx context.Context) time.Time { 918 err := o.readMetaData() 919 if err != nil { 920 fs.Logf(o, "Failed to read metadata, %v", err) 921 return time.Now() 922 } 923 modTime := o.lastModified 924 return modTime 925 } 926 927 // SetModTime sets the modification time of the local fs object 928 func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { 929 err := o.readMetaData() 930 if err != nil { 931 return err 932 } 933 o.lastModified = modTime 934 mimeType := fs.MimeType(ctx, o) 935 936 if o.size >= maxSizeForCopy { 937 fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy)) 938 return nil 939 } 940 // Copy the object to itself to update the metadata 941 key := o.fs.root + o.remote 942 sourceKey := path.Join("/", o.fs.bucket, key) 943 944 bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone) 945 if err != nil { 946 return err 947 } 948 949 req := qs.PutObjectInput{ 950 XQSCopySource: &sourceKey, 951 ContentType: &mimeType, 952 } 953 _, err = bucketInit.PutObject(key, &req) 954 955 return err 956 } 957 958 // Open opens the file for read. Call Close() on the returned io.ReadCloser 959 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { 960 bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone) 961 if err != nil { 962 return nil, err 963 } 964 965 key := o.fs.root + o.remote 966 req := qs.GetObjectInput{} 967 for _, option := range options { 968 switch option.(type) { 969 case *fs.RangeOption, *fs.SeekOption: 970 _, value := option.Header() 971 req.Range = &value 972 default: 973 if option.Mandatory() { 974 fs.Logf(o, "Unsupported mandatory option: %v", option) 975 } 976 } 977 } 978 resp, err := bucketInit.GetObject(key, &req) 979 if err != nil { 980 return nil, err 981 } 982 return resp.Body, nil 983 } 984 985 // Update in to the object 986 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { 987 // The maximum size of upload object is multipartUploadSize * MaxMultipleParts 988 err := o.fs.Mkdir(ctx, "") 989 if err != nil { 990 return err 991 } 992 993 key := o.fs.root + o.remote 994 // Guess the content type 995 mimeType := fs.MimeType(ctx, src) 996 997 req := uploadInput{ 998 body: in, 999 qsSvc: o.fs.svc, 1000 bucket: o.fs.bucket, 1001 zone: o.fs.zone, 1002 key: key, 1003 mimeType: mimeType, 1004 partSize: int64(o.fs.opt.ChunkSize), 1005 concurrency: o.fs.opt.UploadConcurrency, 1006 } 1007 uploader := newUploader(&req) 1008 1009 size := src.Size() 1010 multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) 1011 if multipart { 1012 err = uploader.upload() 1013 } else { 1014 err = uploader.singlePartUpload(in, size) 1015 } 1016 if err != nil { 1017 return err 1018 } 1019 // Read Metadata of object 1020 err = o.readMetaData() 1021 return err 1022 } 1023 1024 // Remove this object 1025 func (o *Object) Remove(ctx context.Context) error { 1026 bucketInit, err := o.fs.svc.Bucket(o.fs.bucket, o.fs.zone) 1027 if err != nil { 1028 return err 1029 } 1030 1031 key := o.fs.root + o.remote 1032 _, err = bucketInit.DeleteObject(key) 1033 return err 1034 } 1035 1036 // Fs returns read only access to the Fs that this object is part of 1037 func (o *Object) Fs() fs.Info { 1038 return o.fs 1039 } 1040 1041 var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) 1042 1043 // Hash returns the selected checksum of the file 1044 // If no checksum is available it returns "" 1045 func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { 1046 if t != hash.MD5 { 1047 return "", hash.ErrUnsupported 1048 } 1049 etag := strings.Trim(strings.ToLower(o.etag), `"`) 1050 // Check the etag is a valid md5sum 1051 if !matchMd5.MatchString(etag) { 1052 fs.Debugf(o, "Invalid md5sum (probably multipart uploaded) - ignoring: %q", etag) 1053 return "", nil 1054 } 1055 return etag, nil 1056 } 1057 1058 // Storable says whether this object can be stored 1059 func (o *Object) Storable() bool { 1060 return true 1061 } 1062 1063 // String returns a description of the Object 1064 func (o *Object) String() string { 1065 if o == nil { 1066 return "<nil>" 1067 } 1068 return o.remote 1069 } 1070 1071 // Remote returns the remote path 1072 func (o *Object) Remote() string { 1073 return o.remote 1074 } 1075 1076 // Size returns the size of the file 1077 func (o *Object) Size() int64 { 1078 return o.size 1079 } 1080 1081 // MimeType of an Object if known, "" otherwise 1082 func (o *Object) MimeType(ctx context.Context) string { 1083 err := o.readMetaData() 1084 if err != nil { 1085 fs.Logf(o, "Failed to read metadata: %v", err) 1086 return "" 1087 } 1088 return o.mimeType 1089 } 1090 1091 // Check the interfaces are satisfied 1092 var ( 1093 _ fs.Fs = &Fs{} 1094 _ fs.Copier = &Fs{} 1095 _ fs.Object = &Object{} 1096 _ fs.ListRer = &Fs{} 1097 _ fs.MimeTyper = &Object{} 1098 )