github.com/npaton/distribution@v2.3.1-rc.0+incompatible/registry/storage/driver/oss/oss.go (about) 1 // Package oss provides a storagedriver.StorageDriver implementation to 2 // store blobs in Aliyun OSS cloud storage. 3 // 4 // This package leverages the denverdino/aliyungo client library for interfacing with 5 // oss. 6 // 7 // Because OSS is a key, value store the Stat call does not support last modification 8 // time for directories (directories are an abstraction for key, value stores) 9 // 10 // +build include_oss 11 12 package oss 13 14 import ( 15 "bytes" 16 "fmt" 17 "io" 18 "io/ioutil" 19 "net/http" 20 "reflect" 21 "strconv" 22 "strings" 23 "sync" 24 "time" 25 26 "github.com/docker/distribution/context" 27 28 "github.com/Sirupsen/logrus" 29 "github.com/denverdino/aliyungo/oss" 30 storagedriver "github.com/docker/distribution/registry/storage/driver" 31 "github.com/docker/distribution/registry/storage/driver/base" 32 "github.com/docker/distribution/registry/storage/driver/factory" 33 ) 34 35 const driverName = "oss" 36 37 // minChunkSize defines the minimum multipart upload chunk size 38 // OSS API requires multipart upload chunks to be at least 5MB 39 const minChunkSize = 5 << 20 40 41 const defaultChunkSize = 2 * minChunkSize 42 const defaultTimeout = 2 * time.Minute // 2 minute timeout per chunk 43 44 // listMax is the largest amount of objects you can request from OSS in a list call 45 const listMax = 1000 46 47 //DriverParameters A struct that encapsulates all of the driver parameters after all values have been set 48 type DriverParameters struct { 49 AccessKeyID string 50 AccessKeySecret string 51 Bucket string 52 Region oss.Region 53 Internal bool 54 Encrypt bool 55 Secure bool 56 ChunkSize int64 57 RootDirectory string 58 Endpoint string 59 } 60 61 func init() { 62 factory.Register(driverName, &ossDriverFactory{}) 63 } 64 65 // ossDriverFactory implements the factory.StorageDriverFactory interface 66 type ossDriverFactory struct{} 67 68 func (factory *ossDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) { 69 return FromParameters(parameters) 70 } 71 72 type driver struct { 73 Client *oss.Client 74 Bucket *oss.Bucket 75 ChunkSize int64 76 Encrypt bool 77 RootDirectory string 78 79 pool sync.Pool // pool []byte buffers used for WriteStream 80 zeros []byte // shared, zero-valued buffer used for WriteStream 81 } 82 83 type baseEmbed struct { 84 base.Base 85 } 86 87 // Driver is a storagedriver.StorageDriver implementation backed by Aliyun OSS 88 // Objects are stored at absolute keys in the provided bucket. 89 type Driver struct { 90 baseEmbed 91 } 92 93 // FromParameters constructs a new Driver with a given parameters map 94 // Required parameters: 95 // - accesskey 96 // - secretkey 97 // - region 98 // - bucket 99 // - encrypt 100 func FromParameters(parameters map[string]interface{}) (*Driver, error) { 101 // Providing no values for these is valid in case the user is authenticating 102 // with an IAM on an ec2 instance (in which case the instance credentials will 103 // be summoned when GetAuth is called) 104 accessKey, ok := parameters["accesskeyid"] 105 if !ok { 106 return nil, fmt.Errorf("No accesskeyid parameter provided") 107 } 108 secretKey, ok := parameters["accesskeysecret"] 109 if !ok { 110 return nil, fmt.Errorf("No accesskeysecret parameter provided") 111 } 112 113 regionName, ok := parameters["region"] 114 if !ok || fmt.Sprint(regionName) == "" { 115 return nil, fmt.Errorf("No region parameter provided") 116 } 117 118 bucket, ok := parameters["bucket"] 119 if !ok || fmt.Sprint(bucket) == "" { 120 return nil, fmt.Errorf("No bucket parameter provided") 121 } 122 123 internalBool := false 124 internal, ok := parameters["internal"] 125 if ok { 126 internalBool, ok = internal.(bool) 127 if !ok { 128 return nil, fmt.Errorf("The internal parameter should be a boolean") 129 } 130 } 131 132 encryptBool := false 133 encrypt, ok := parameters["encrypt"] 134 if ok { 135 encryptBool, ok = encrypt.(bool) 136 if !ok { 137 return nil, fmt.Errorf("The encrypt parameter should be a boolean") 138 } 139 } 140 141 secureBool := true 142 secure, ok := parameters["secure"] 143 if ok { 144 secureBool, ok = secure.(bool) 145 if !ok { 146 return nil, fmt.Errorf("The secure parameter should be a boolean") 147 } 148 } 149 150 chunkSize := int64(defaultChunkSize) 151 chunkSizeParam, ok := parameters["chunksize"] 152 if ok { 153 switch v := chunkSizeParam.(type) { 154 case string: 155 vv, err := strconv.ParseInt(v, 0, 64) 156 if err != nil { 157 return nil, fmt.Errorf("chunksize parameter must be an integer, %v invalid", chunkSizeParam) 158 } 159 chunkSize = vv 160 case int64: 161 chunkSize = v 162 case int, uint, int32, uint32, uint64: 163 chunkSize = reflect.ValueOf(v).Convert(reflect.TypeOf(chunkSize)).Int() 164 default: 165 return nil, fmt.Errorf("invalid valud for chunksize: %#v", chunkSizeParam) 166 } 167 168 if chunkSize < minChunkSize { 169 return nil, fmt.Errorf("The chunksize %#v parameter should be a number that is larger than or equal to %d", chunkSize, minChunkSize) 170 } 171 } 172 173 rootDirectory, ok := parameters["rootdirectory"] 174 if !ok { 175 rootDirectory = "" 176 } 177 178 endpoint, ok := parameters["endpoint"] 179 if !ok { 180 endpoint = "" 181 } 182 183 params := DriverParameters{ 184 AccessKeyID: fmt.Sprint(accessKey), 185 AccessKeySecret: fmt.Sprint(secretKey), 186 Bucket: fmt.Sprint(bucket), 187 Region: oss.Region(fmt.Sprint(regionName)), 188 ChunkSize: chunkSize, 189 RootDirectory: fmt.Sprint(rootDirectory), 190 Encrypt: encryptBool, 191 Secure: secureBool, 192 Internal: internalBool, 193 Endpoint: fmt.Sprint(endpoint), 194 } 195 196 return New(params) 197 } 198 199 // New constructs a new Driver with the given Aliyun credentials, region, encryption flag, and 200 // bucketName 201 func New(params DriverParameters) (*Driver, error) { 202 203 client := oss.NewOSSClient(params.Region, params.Internal, params.AccessKeyID, params.AccessKeySecret, params.Secure) 204 client.SetEndpoint(params.Endpoint) 205 bucket := client.Bucket(params.Bucket) 206 client.SetDebug(false) 207 208 // Validate that the given credentials have at least read permissions in the 209 // given bucket scope. 210 if _, err := bucket.List(strings.TrimRight(params.RootDirectory, "/"), "", "", 1); err != nil { 211 return nil, err 212 } 213 214 // TODO(tg123): Currently multipart uploads have no timestamps, so this would be unwise 215 // if you initiated a new OSS client while another one is running on the same bucket. 216 217 d := &driver{ 218 Client: client, 219 Bucket: bucket, 220 ChunkSize: params.ChunkSize, 221 Encrypt: params.Encrypt, 222 RootDirectory: params.RootDirectory, 223 zeros: make([]byte, params.ChunkSize), 224 } 225 226 d.pool.New = func() interface{} { 227 return make([]byte, d.ChunkSize) 228 } 229 230 return &Driver{ 231 baseEmbed: baseEmbed{ 232 Base: base.Base{ 233 StorageDriver: d, 234 }, 235 }, 236 }, nil 237 } 238 239 // Implement the storagedriver.StorageDriver interface 240 241 func (d *driver) Name() string { 242 return driverName 243 } 244 245 // GetContent retrieves the content stored at "path" as a []byte. 246 func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) { 247 content, err := d.Bucket.Get(d.ossPath(path)) 248 if err != nil { 249 return nil, parseError(path, err) 250 } 251 return content, nil 252 } 253 254 // PutContent stores the []byte content at a location designated by "path". 255 func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { 256 return parseError(path, d.Bucket.Put(d.ossPath(path), contents, d.getContentType(), getPermissions(), d.getOptions())) 257 } 258 259 // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a 260 // given byte offset. 261 func (d *driver) ReadStream(ctx context.Context, path string, offset int64) (io.ReadCloser, error) { 262 headers := make(http.Header) 263 headers.Add("Range", "bytes="+strconv.FormatInt(offset, 10)+"-") 264 265 resp, err := d.Bucket.GetResponseWithHeaders(d.ossPath(path), headers) 266 if err != nil { 267 return nil, parseError(path, err) 268 } 269 270 // Due to Aliyun OSS API, status 200 and whole object will be return instead of an 271 // InvalidRange error when range is invalid. 272 // 273 // OSS sever will always return http.StatusPartialContent if range is acceptable. 274 if resp.StatusCode != http.StatusPartialContent { 275 resp.Body.Close() 276 return ioutil.NopCloser(bytes.NewReader(nil)), nil 277 } 278 279 return resp.Body, nil 280 } 281 282 // WriteStream stores the contents of the provided io.Reader at a 283 // location designated by the given path. The driver will know it has 284 // received the full contents when the reader returns io.EOF. The number 285 // of successfully READ bytes will be returned, even if an error is 286 // returned. May be used to resume writing a stream by providing a nonzero 287 // offset. Offsets past the current size will write from the position 288 // beyond the end of the file. 289 func (d *driver) WriteStream(ctx context.Context, path string, offset int64, reader io.Reader) (totalRead int64, err error) { 290 partNumber := 1 291 bytesRead := 0 292 var putErrChan chan error 293 parts := []oss.Part{} 294 var part oss.Part 295 done := make(chan struct{}) // stopgap to free up waiting goroutines 296 297 multi, err := d.Bucket.InitMulti(d.ossPath(path), d.getContentType(), getPermissions(), d.getOptions()) 298 if err != nil { 299 return 0, err 300 } 301 302 buf := d.getbuf() 303 304 // We never want to leave a dangling multipart upload, our only consistent state is 305 // when there is a whole object at path. This is in order to remain consistent with 306 // the stat call. 307 // 308 // Note that if the machine dies before executing the defer, we will be left with a dangling 309 // multipart upload, which will eventually be cleaned up, but we will lose all of the progress 310 // made prior to the machine crashing. 311 defer func() { 312 if putErrChan != nil { 313 if putErr := <-putErrChan; putErr != nil { 314 err = putErr 315 } 316 } 317 318 if len(parts) > 0 { 319 if multi == nil { 320 // Parts should be empty if the multi is not initialized 321 panic("Unreachable") 322 } else { 323 if multi.Complete(parts) != nil { 324 multi.Abort() 325 } 326 } 327 } 328 329 d.putbuf(buf) // needs to be here to pick up new buf value 330 close(done) // free up any waiting goroutines 331 }() 332 333 // Fills from 0 to total from current 334 fromSmallCurrent := func(total int64) error { 335 current, err := d.ReadStream(ctx, path, 0) 336 if err != nil { 337 return err 338 } 339 340 bytesRead = 0 341 for int64(bytesRead) < total { 342 //The loop should very rarely enter a second iteration 343 nn, err := current.Read(buf[bytesRead:total]) 344 bytesRead += nn 345 if err != nil { 346 if err != io.EOF { 347 return err 348 } 349 350 break 351 } 352 353 } 354 return nil 355 } 356 357 // Fills from parameter to chunkSize from reader 358 fromReader := func(from int64) error { 359 bytesRead = 0 360 for from+int64(bytesRead) < d.ChunkSize { 361 nn, err := reader.Read(buf[from+int64(bytesRead):]) 362 totalRead += int64(nn) 363 bytesRead += nn 364 365 if err != nil { 366 if err != io.EOF { 367 return err 368 } 369 370 break 371 } 372 } 373 374 if putErrChan == nil { 375 putErrChan = make(chan error) 376 } else { 377 if putErr := <-putErrChan; putErr != nil { 378 putErrChan = nil 379 return putErr 380 } 381 } 382 383 go func(bytesRead int, from int64, buf []byte) { 384 defer d.putbuf(buf) // this buffer gets dropped after this call 385 386 // DRAGONS(stevvooe): There are few things one might want to know 387 // about this section. First, the putErrChan is expecting an error 388 // and a nil or just a nil to come through the channel. This is 389 // covered by the silly defer below. The other aspect is the OSS 390 // retry backoff to deal with RequestTimeout errors. Even though 391 // the underlying OSS library should handle it, it doesn't seem to 392 // be part of the shouldRetry function (see denverdino/aliyungo/oss). 393 defer func() { 394 select { 395 case putErrChan <- nil: // for some reason, we do this no matter what. 396 case <-done: 397 return // ensure we don't leak the goroutine 398 } 399 }() 400 401 if bytesRead <= 0 { 402 return 403 } 404 405 var err error 406 var part oss.Part 407 408 part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf[0:int64(bytesRead)+from]), defaultTimeout) 409 410 if err != nil { 411 logrus.Errorf("error putting part, aborting: %v", err) 412 select { 413 case putErrChan <- err: 414 case <-done: 415 return // don't leak the goroutine 416 } 417 } 418 419 // parts and partNumber are safe, because this function is the 420 // only one modifying them and we force it to be executed 421 // serially. 422 parts = append(parts, part) 423 partNumber++ 424 }(bytesRead, from, buf) 425 426 buf = d.getbuf() // use a new buffer for the next call 427 return nil 428 } 429 430 if offset > 0 { 431 resp, err := d.Bucket.Head(d.ossPath(path), nil) 432 if err != nil { 433 if ossErr, ok := err.(*oss.Error); !ok || ossErr.StatusCode != http.StatusNotFound { 434 return 0, err 435 } 436 } 437 438 currentLength := int64(0) 439 if err == nil { 440 currentLength = resp.ContentLength 441 } 442 443 if currentLength >= offset { 444 if offset < d.ChunkSize { 445 // chunkSize > currentLength >= offset 446 if err = fromSmallCurrent(offset); err != nil { 447 return totalRead, err 448 } 449 450 if err = fromReader(offset); err != nil { 451 return totalRead, err 452 } 453 454 if totalRead+offset < d.ChunkSize { 455 return totalRead, nil 456 } 457 } else { 458 // currentLength >= offset >= chunkSize 459 _, part, err = multi.PutPartCopy(partNumber, 460 oss.CopyOptions{CopySourceOptions: "bytes=0-" + strconv.FormatInt(offset-1, 10)}, 461 d.Bucket.Path(d.ossPath(path))) 462 if err != nil { 463 return 0, err 464 } 465 466 parts = append(parts, part) 467 partNumber++ 468 } 469 } else { 470 // Fills between parameters with 0s but only when to - from <= chunkSize 471 fromZeroFillSmall := func(from, to int64) error { 472 bytesRead = 0 473 for from+int64(bytesRead) < to { 474 nn, err := bytes.NewReader(d.zeros).Read(buf[from+int64(bytesRead) : to]) 475 bytesRead += nn 476 if err != nil { 477 return err 478 } 479 } 480 481 return nil 482 } 483 484 // Fills between parameters with 0s, making new parts 485 fromZeroFillLarge := func(from, to int64) error { 486 bytesRead64 := int64(0) 487 for to-(from+bytesRead64) >= d.ChunkSize { 488 part, err := multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(d.zeros), defaultTimeout) 489 if err != nil { 490 return err 491 } 492 bytesRead64 += d.ChunkSize 493 494 parts = append(parts, part) 495 partNumber++ 496 } 497 498 return fromZeroFillSmall(0, (to-from)%d.ChunkSize) 499 } 500 501 // currentLength < offset 502 if currentLength < d.ChunkSize { 503 if offset < d.ChunkSize { 504 // chunkSize > offset > currentLength 505 if err = fromSmallCurrent(currentLength); err != nil { 506 return totalRead, err 507 } 508 509 if err = fromZeroFillSmall(currentLength, offset); err != nil { 510 return totalRead, err 511 } 512 513 if err = fromReader(offset); err != nil { 514 return totalRead, err 515 } 516 517 if totalRead+offset < d.ChunkSize { 518 return totalRead, nil 519 } 520 } else { 521 // offset >= chunkSize > currentLength 522 if err = fromSmallCurrent(currentLength); err != nil { 523 return totalRead, err 524 } 525 526 if err = fromZeroFillSmall(currentLength, d.ChunkSize); err != nil { 527 return totalRead, err 528 } 529 530 part, err = multi.PutPartWithTimeout(int(partNumber), bytes.NewReader(buf), defaultTimeout) 531 if err != nil { 532 return totalRead, err 533 } 534 535 parts = append(parts, part) 536 partNumber++ 537 538 //Zero fill from chunkSize up to offset, then some reader 539 if err = fromZeroFillLarge(d.ChunkSize, offset); err != nil { 540 return totalRead, err 541 } 542 543 if err = fromReader(offset % d.ChunkSize); err != nil { 544 return totalRead, err 545 } 546 547 if totalRead+(offset%d.ChunkSize) < d.ChunkSize { 548 return totalRead, nil 549 } 550 } 551 } else { 552 // offset > currentLength >= chunkSize 553 _, part, err = multi.PutPartCopy(partNumber, 554 oss.CopyOptions{}, 555 d.Bucket.Path(d.ossPath(path))) 556 if err != nil { 557 return 0, err 558 } 559 560 parts = append(parts, part) 561 partNumber++ 562 563 //Zero fill from currentLength up to offset, then some reader 564 if err = fromZeroFillLarge(currentLength, offset); err != nil { 565 return totalRead, err 566 } 567 568 if err = fromReader((offset - currentLength) % d.ChunkSize); err != nil { 569 return totalRead, err 570 } 571 572 if totalRead+((offset-currentLength)%d.ChunkSize) < d.ChunkSize { 573 return totalRead, nil 574 } 575 } 576 577 } 578 } 579 580 for { 581 if err = fromReader(0); err != nil { 582 return totalRead, err 583 } 584 585 if int64(bytesRead) < d.ChunkSize { 586 break 587 } 588 } 589 590 return totalRead, nil 591 } 592 593 // Stat retrieves the FileInfo for the given path, including the current size 594 // in bytes and the creation time. 595 func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) { 596 listResponse, err := d.Bucket.List(d.ossPath(path), "", "", 1) 597 if err != nil { 598 return nil, err 599 } 600 601 fi := storagedriver.FileInfoFields{ 602 Path: path, 603 } 604 605 if len(listResponse.Contents) == 1 { 606 if listResponse.Contents[0].Key != d.ossPath(path) { 607 fi.IsDir = true 608 } else { 609 fi.IsDir = false 610 fi.Size = listResponse.Contents[0].Size 611 612 timestamp, err := time.Parse(time.RFC3339Nano, listResponse.Contents[0].LastModified) 613 if err != nil { 614 return nil, err 615 } 616 fi.ModTime = timestamp 617 } 618 } else if len(listResponse.CommonPrefixes) == 1 { 619 fi.IsDir = true 620 } else { 621 return nil, storagedriver.PathNotFoundError{Path: path} 622 } 623 624 return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil 625 } 626 627 // List returns a list of the objects that are direct descendants of the given path. 628 func (d *driver) List(ctx context.Context, opath string) ([]string, error) { 629 path := opath 630 if path != "/" && opath[len(path)-1] != '/' { 631 path = path + "/" 632 } 633 634 // This is to cover for the cases when the rootDirectory of the driver is either "" or "/". 635 // In those cases, there is no root prefix to replace and we must actually add a "/" to all 636 // results in order to keep them as valid paths as recognized by storagedriver.PathRegexp 637 prefix := "" 638 if d.ossPath("") == "" { 639 prefix = "/" 640 } 641 642 listResponse, err := d.Bucket.List(d.ossPath(path), "/", "", listMax) 643 if err != nil { 644 return nil, parseError(opath, err) 645 } 646 647 files := []string{} 648 directories := []string{} 649 650 for { 651 for _, key := range listResponse.Contents { 652 files = append(files, strings.Replace(key.Key, d.ossPath(""), prefix, 1)) 653 } 654 655 for _, commonPrefix := range listResponse.CommonPrefixes { 656 directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.ossPath(""), prefix, 1)) 657 } 658 659 if listResponse.IsTruncated { 660 listResponse, err = d.Bucket.List(d.ossPath(path), "/", listResponse.NextMarker, listMax) 661 if err != nil { 662 return nil, err 663 } 664 } else { 665 break 666 } 667 } 668 669 if opath != "/" { 670 if len(files) == 0 && len(directories) == 0 { 671 // Treat empty response as missing directory, since we don't actually 672 // have directories in s3. 673 return nil, storagedriver.PathNotFoundError{Path: opath} 674 } 675 } 676 677 return append(files, directories...), nil 678 } 679 680 // Move moves an object stored at sourcePath to destPath, removing the original 681 // object. 682 func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { 683 logrus.Infof("Move from %s to %s", d.ossPath(sourcePath), d.ossPath(destPath)) 684 685 err := d.Bucket.CopyLargeFile(d.ossPath(sourcePath), d.ossPath(destPath), 686 d.getContentType(), 687 getPermissions(), 688 oss.Options{}) 689 if err != nil { 690 logrus.Errorf("Failed for move from %s to %s: %v", d.ossPath(sourcePath), d.ossPath(destPath), err) 691 return parseError(sourcePath, err) 692 } 693 694 return d.Delete(ctx, sourcePath) 695 } 696 697 // Delete recursively deletes all objects stored at "path" and its subpaths. 698 func (d *driver) Delete(ctx context.Context, path string) error { 699 listResponse, err := d.Bucket.List(d.ossPath(path), "", "", listMax) 700 if err != nil || len(listResponse.Contents) == 0 { 701 return storagedriver.PathNotFoundError{Path: path} 702 } 703 704 ossObjects := make([]oss.Object, listMax) 705 706 for len(listResponse.Contents) > 0 { 707 for index, key := range listResponse.Contents { 708 ossObjects[index].Key = key.Key 709 } 710 711 err := d.Bucket.DelMulti(oss.Delete{Quiet: false, Objects: ossObjects[0:len(listResponse.Contents)]}) 712 if err != nil { 713 return nil 714 } 715 716 listResponse, err = d.Bucket.List(d.ossPath(path), "", "", listMax) 717 if err != nil { 718 return err 719 } 720 } 721 722 return nil 723 } 724 725 // URLFor returns a URL which may be used to retrieve the content stored at the given path. 726 // May return an UnsupportedMethodErr in certain StorageDriver implementations. 727 func (d *driver) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) { 728 methodString := "GET" 729 method, ok := options["method"] 730 if ok { 731 methodString, ok = method.(string) 732 if !ok || (methodString != "GET") { 733 return "", storagedriver.ErrUnsupportedMethod{} 734 } 735 } 736 737 expiresTime := time.Now().Add(20 * time.Minute) 738 739 expires, ok := options["expiry"] 740 if ok { 741 et, ok := expires.(time.Time) 742 if ok { 743 expiresTime = et 744 } 745 } 746 logrus.Infof("methodString: %s, expiresTime: %v", methodString, expiresTime) 747 testURL := d.Bucket.SignedURLWithMethod(methodString, d.ossPath(path), expiresTime, nil, nil) 748 logrus.Infof("testURL: %s", testURL) 749 return testURL, nil 750 } 751 752 func (d *driver) ossPath(path string) string { 753 return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/") 754 } 755 756 func parseError(path string, err error) error { 757 if ossErr, ok := err.(*oss.Error); ok && ossErr.StatusCode == http.StatusNotFound && (ossErr.Code == "NoSuchKey" || ossErr.Code == "") { 758 return storagedriver.PathNotFoundError{Path: path} 759 } 760 761 return err 762 } 763 764 func hasCode(err error, code string) bool { 765 ossErr, ok := err.(*oss.Error) 766 return ok && ossErr.Code == code 767 } 768 769 func (d *driver) getOptions() oss.Options { 770 return oss.Options{ServerSideEncryption: d.Encrypt} 771 } 772 773 func getPermissions() oss.ACL { 774 return oss.Private 775 } 776 777 func (d *driver) getContentType() string { 778 return "application/octet-stream" 779 } 780 781 // getbuf returns a buffer from the driver's pool with length d.ChunkSize. 782 func (d *driver) getbuf() []byte { 783 return d.pool.Get().([]byte) 784 } 785 786 func (d *driver) putbuf(p []byte) { 787 copy(p, d.zeros) 788 d.pool.Put(p) 789 }