yunion.io/x/cloudmux@v0.3.10-0-alpha.1/pkg/multicloud/google/bucket.go (about)

     1  // Copyright 2019 Yunion
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package google
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"io"
    21  	"io/ioutil"
    22  	"net/http"
    23  	"net/url"
    24  	"strings"
    25  	"time"
    26  
    27  	"cloud.google.com/go/storage"
    28  
    29  	"yunion.io/x/jsonutils"
    30  	"yunion.io/x/log"
    31  	"yunion.io/x/pkg/errors"
    32  	"yunion.io/x/pkg/utils"
    33  
    34  	"yunion.io/x/cloudmux/pkg/cloudprovider"
    35  	"yunion.io/x/cloudmux/pkg/multicloud"
    36  )
    37  
    38  type SLifecycleRuleAction struct {
    39  	Type string
    40  }
    41  
    42  type SLifecycleRuleCondition struct {
    43  	Age int
    44  }
    45  
    46  type SLifecycleRule struct {
    47  	Action    SLifecycleRuleAction
    48  	Condition SLifecycleRuleCondition
    49  }
    50  
    51  type SBucketPolicyOnly struct {
    52  	Enabled bool
    53  }
    54  
    55  type SUniformBucketLevelAccess struct {
    56  	Enabled bool
    57  }
    58  
    59  type SIamConfiguration struct {
    60  	BucketPolicyOnly         SBucketPolicyOnly
    61  	UniformBucketLevelAccess SUniformBucketLevelAccess
    62  }
    63  
    64  type SLifecycle struct {
    65  	Rule []SLifecycleRule
    66  }
    67  
    68  type SBucket struct {
    69  	multicloud.SBaseBucket
    70  	GoogleTags
    71  
    72  	region *SRegion
    73  
    74  	Kind             string
    75  	SelfLink         string
    76  	Name             string
    77  	ProjectNumber    string
    78  	Metageneration   string
    79  	Location         string
    80  	StorageClass     string
    81  	Etag             string
    82  	TimeCreated      time.Time
    83  	Updated          time.Time
    84  	Lifecycle        SLifecycle
    85  	IamConfiguration SIamConfiguration
    86  	LocationType     string
    87  }
    88  
    89  func (b *SBucket) GetProjectId() string {
    90  	return b.region.GetProjectId()
    91  }
    92  
    93  func (b *SBucket) GetAcl() cloudprovider.TBucketACLType {
    94  	iam, err := b.region.GetBucketIam(b.Name)
    95  	if err != nil {
    96  		return cloudprovider.ACLUnknown
    97  	}
    98  	acl := cloudprovider.ACLPrivate
    99  	allUsers := []SBucketBinding{}
   100  	allAuthUsers := []SBucketBinding{}
   101  	for _, binding := range iam.Bindings {
   102  		if utils.IsInStringArray("allUsers", binding.Members) {
   103  			allUsers = append(allUsers, binding)
   104  		}
   105  		if utils.IsInStringArray("allAuthenticatedUsers", binding.Members) {
   106  			allAuthUsers = append(allAuthUsers, binding)
   107  		}
   108  	}
   109  
   110  	for _, binding := range allUsers {
   111  		switch binding.Role {
   112  		case "roles/storage.admin", "roles/storage.objectAdmin":
   113  			acl = cloudprovider.ACLPublicReadWrite
   114  		case "roles/storage.objectViewer":
   115  			if acl != cloudprovider.ACLPublicReadWrite {
   116  				acl = cloudprovider.ACLPublicRead
   117  			}
   118  		}
   119  	}
   120  
   121  	for _, binding := range allAuthUsers {
   122  		switch binding.Role {
   123  		case "roles/storage.admin", "roles/storage.objectAdmin", "roles/storage.objectViewer":
   124  			acl = cloudprovider.ACLAuthRead
   125  		}
   126  	}
   127  	return acl
   128  }
   129  
   130  func (region *SRegion) SetBucketAcl(bucket string, acl cloudprovider.TBucketACLType) error {
   131  	iam, err := region.GetBucketIam(bucket)
   132  	if err != nil {
   133  		return errors.Wrap(err, "GetBucketIam")
   134  	}
   135  	bindings := []SBucketBinding{}
   136  	for _, binding := range iam.Bindings {
   137  		if !utils.IsInStringArray(string(storage.AllUsers), binding.Members) && !utils.IsInStringArray(string(storage.AllAuthenticatedUsers), binding.Members) {
   138  			bindings = append(bindings, binding)
   139  		}
   140  	}
   141  	switch acl {
   142  	case cloudprovider.ACLPrivate:
   143  		if len(bindings) == len(iam.Bindings) {
   144  			return nil
   145  		}
   146  	case cloudprovider.ACLAuthRead:
   147  		bindings = append(bindings, SBucketBinding{
   148  			Role:    "roles/storage.objectViewer",
   149  			Members: []string{"allAuthenticatedUsers"},
   150  		})
   151  	case cloudprovider.ACLPublicRead:
   152  		bindings = append(bindings, SBucketBinding{
   153  			Role:    "roles/storage.objectViewer",
   154  			Members: []string{"allUsers"},
   155  		})
   156  	case cloudprovider.ACLPublicReadWrite:
   157  		bindings = append(bindings, SBucketBinding{
   158  			Role:    "roles/storage.objectAdmin",
   159  			Members: []string{"allUsers"},
   160  		})
   161  	default:
   162  		return fmt.Errorf("unknown acl %s", acl)
   163  	}
   164  	iam.Bindings = bindings
   165  	_, err = region.SetBucketIam(bucket, iam)
   166  	if err != nil {
   167  		return errors.Wrap(err, "SetBucketIam")
   168  	}
   169  	return nil
   170  
   171  }
   172  
   173  func (b *SBucket) SetAcl(acl cloudprovider.TBucketACLType) error {
   174  	return b.region.SetBucketAcl(b.Name, acl)
   175  }
   176  
   177  func (b *SBucket) GetGlobalId() string {
   178  	return b.Name
   179  }
   180  
   181  func (b *SBucket) GetName() string {
   182  	return b.Name
   183  }
   184  
   185  func (b *SBucket) GetLocation() string {
   186  	return strings.ToLower(b.Location)
   187  }
   188  
   189  func (b *SBucket) GetIRegion() cloudprovider.ICloudRegion {
   190  	return b.region
   191  }
   192  
   193  func (b *SBucket) GetCreatedAt() time.Time {
   194  	return b.TimeCreated
   195  }
   196  
   197  func (b *SBucket) GetStorageClass() string {
   198  	return b.StorageClass
   199  }
   200  
   201  func (b *SBucket) GetAccessUrls() []cloudprovider.SBucketAccessUrl {
   202  	return []cloudprovider.SBucketAccessUrl{
   203  		{
   204  			Url:         fmt.Sprintf("https://www.googleapis.com/storage/v1/b/%s", b.Name),
   205  			Description: "bucket domain",
   206  			Primary:     true,
   207  		},
   208  		{
   209  			Url:         fmt.Sprintf("https://www.googleapis.com/upload/storage/v1/b/%s/o", b.Name),
   210  			Description: "object upload endpoint",
   211  		},
   212  		{
   213  			Url:         fmt.Sprintf("https://www.googleapis.com/batch/storage/v1/b/%s", b.Name),
   214  			Description: "batch operation",
   215  		},
   216  	}
   217  }
   218  
   219  func (b *SBucket) GetStats() cloudprovider.SBucketStats {
   220  	stats, _ := cloudprovider.GetIBucketStats(b)
   221  	return stats
   222  }
   223  
   224  func (b *SBucket) AbortMultipartUpload(ctx context.Context, key string, uploadId string) error {
   225  	resource := fmt.Sprintf("b/%s/o?uploadType=resumable&upload_id=%s", b.Name, uploadId)
   226  	return b.region.client.storageAbortUpload(resource)
   227  }
   228  
   229  func (b *SBucket) CompleteMultipartUpload(ctx context.Context, key string, uploadId string, partEtags []string) error {
   230  	resource := fmt.Sprintf("b/%s/o/%s", b.Name, url.PathEscape(key))
   231  	err := b.region.StorageGet(resource, nil)
   232  	if err != nil {
   233  		return errors.Wrapf(err, "failed to get object %s", key)
   234  	}
   235  	return nil
   236  }
   237  
   238  func (b *SBucket) CopyObject(ctx context.Context, destKey string, srcBucket, srcKey string, cannedAcl cloudprovider.TBucketACLType, storageClassStr string, meta http.Header) error {
   239  	resource := fmt.Sprintf("b/%s/o/%s", srcBucket, url.PathEscape(srcKey))
   240  	action := fmt.Sprintf("copyTo/b/%s/o/%s", b.Name, url.PathEscape(destKey))
   241  	err := b.region.StorageDo(resource, action, nil, nil)
   242  	if err != nil {
   243  		return errors.Wrap(err, "CopyObject")
   244  	}
   245  	err = b.region.SetObjectAcl(b.Name, destKey, cannedAcl)
   246  	if err != nil {
   247  		return errors.Wrapf(err, "AddObjectAcl(%s)", cannedAcl)
   248  	}
   249  	err = b.region.SetObjectMeta(b.Name, destKey, meta)
   250  	if err != nil {
   251  		return errors.Wrap(err, "SetObjectMeta")
   252  	}
   253  	return nil
   254  }
   255  
   256  func (b *SBucket) CopyPart(ctx context.Context, key string, uploadId string, partNumber int, srcBucket string, srcKey string, srcOffset int64, srcLength int64) (string, error) {
   257  	return "", cloudprovider.ErrNotSupported
   258  }
   259  
   260  func (region *SRegion) DeleteObject(bucket, key string) error {
   261  	resource := fmt.Sprintf("b/%s/o/%s", bucket, url.PathEscape(key))
   262  	return region.StorageDelete(resource)
   263  }
   264  
   265  func (b *SBucket) DeleteObject(ctx context.Context, key string) error {
   266  	return b.region.DeleteObject(b.Name, key)
   267  }
   268  
   269  func (region *SRegion) DownloadObjectRange(bucket, object string, start, end int64) (io.ReadCloser, error) {
   270  	resource := fmt.Sprintf("b/%s/o/%s?alt=media", bucket, url.PathEscape(object))
   271  	header := http.Header{}
   272  	if start <= 0 {
   273  		if end > 0 {
   274  			header.Set("Range", fmt.Sprintf("bytes=0-%d", end))
   275  		} else {
   276  			header.Set("Range", "bytes=-1")
   277  		}
   278  	} else {
   279  		if end > start {
   280  			header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
   281  		} else {
   282  			header.Set("Range", fmt.Sprintf("bytes=%d-", start))
   283  		}
   284  	}
   285  	return region.client.storageDownload(resource, header)
   286  }
   287  
   288  func (b *SBucket) GetObject(ctx context.Context, key string, rangeOpt *cloudprovider.SGetObjectRange) (io.ReadCloser, error) {
   289  	return b.region.DownloadObjectRange(b.Name, key, rangeOpt.Start, rangeOpt.End)
   290  }
   291  
   292  func (region *SRegion) SingedUrl(bucket, key string, method string, expire time.Duration) (string, error) {
   293  	if expire > time.Hour*24*7 {
   294  		return "", fmt.Errorf(`Expiration Time can\'t be longer than 604800 seconds (7 days)`)
   295  	}
   296  	opts := &storage.SignedURLOptions{
   297  		Scheme:         storage.SigningSchemeV4,
   298  		Method:         method,
   299  		GoogleAccessID: region.client.clientEmail,
   300  		PrivateKey:     []byte(region.client.privateKey),
   301  		Expires:        time.Now().Add(expire),
   302  	}
   303  	switch method {
   304  	case "GET":
   305  	case "PUT":
   306  		opts.Headers = []string{"Content-Type:application/octet-stream"}
   307  	default:
   308  		return "", errors.Wrapf(cloudprovider.ErrNotSupported, "Not support method %s", method)
   309  	}
   310  	return storage.SignedURL(bucket, key, opts)
   311  
   312  }
   313  
   314  func (b *SBucket) GetTempUrl(method string, key string, expire time.Duration) (string, error) {
   315  	return b.region.SingedUrl(b.Name, key, method, expire)
   316  }
   317  
   318  func (b *SBucket) ListObjects(prefix string, marker string, delimiter string, maxCount int) (cloudprovider.SListObjectResult, error) {
   319  	result := cloudprovider.SListObjectResult{}
   320  	objs, err := b.region.GetObjects(b.Name, prefix, marker, delimiter, maxCount)
   321  	if err != nil {
   322  		return result, errors.Wrap(err, "GetObjects")
   323  	}
   324  	result.NextMarker = objs.NextPageToken
   325  	log.Errorf("obj count: %d", len(objs.Items))
   326  	result.Objects = []cloudprovider.ICloudObject{}
   327  	result.CommonPrefixes = []cloudprovider.ICloudObject{}
   328  	for i := range objs.Items {
   329  		if strings.HasSuffix(objs.Items[i].Name, "/") {
   330  			continue
   331  		}
   332  		objs.Items[i].bucket = b
   333  		result.Objects = append(result.Objects, &objs.Items[i])
   334  	}
   335  	for i := range objs.Prefixes {
   336  		obj := &SObject{
   337  			bucket: b,
   338  			Name:   objs.Prefixes[i],
   339  		}
   340  		result.CommonPrefixes = append(result.CommonPrefixes, obj)
   341  	}
   342  	return result, nil
   343  }
   344  
   345  func (region *SRegion) NewMultipartUpload(bucket, key string, cannedAcl cloudprovider.TBucketACLType, storageClassStr string, meta http.Header) (string, error) {
   346  	body := map[string]string{"name": key}
   347  	if len(storageClassStr) > 0 {
   348  		body["storageClass"] = storageClassStr
   349  	}
   350  	for k := range meta {
   351  		switch k {
   352  		case cloudprovider.META_HEADER_CONTENT_TYPE:
   353  			body["contentType"] = meta.Get(k)
   354  		case cloudprovider.META_HEADER_CONTENT_ENCODING:
   355  			body["contentEncoding"] = meta.Get(k)
   356  		case cloudprovider.META_HEADER_CONTENT_DISPOSITION:
   357  			body["contentDisposition"] = meta.Get(k)
   358  		case cloudprovider.META_HEADER_CONTENT_LANGUAGE:
   359  			body["contentLanguage"] = meta.Get(k)
   360  		case cloudprovider.META_HEADER_CACHE_CONTROL:
   361  			body["cacheControl"] = meta.Get(k)
   362  		default:
   363  			body[fmt.Sprintf("metadata.%s", k)] = meta.Get(k)
   364  		}
   365  	}
   366  	switch cannedAcl {
   367  	case cloudprovider.ACLPrivate:
   368  	case cloudprovider.ACLAuthRead:
   369  		body["predefinedAcl"] = "authenticatedRead"
   370  	case cloudprovider.ACLPublicRead:
   371  		body["predefinedAcl"] = "publicRead"
   372  	case cloudprovider.ACLPublicReadWrite:
   373  		return "", cloudprovider.ErrNotSupported
   374  	}
   375  	resource := fmt.Sprintf("b/%s/o?uploadType=resumable", bucket)
   376  	input := strings.NewReader(jsonutils.Marshal(body).String())
   377  	header := http.Header{}
   378  	header.Set("Content-Type", "application/json; charset=UTF-8")
   379  	header.Set("Content-Length", fmt.Sprintf("%d", input.Len()))
   380  	resp, err := region.client.storageUpload(resource, header, input)
   381  	if err != nil {
   382  		return "", errors.Wrap(err, "storageUpload")
   383  	}
   384  	defer resp.Body.Close()
   385  	location := resp.Header.Get("Location")
   386  	query, err := url.ParseQuery(location)
   387  	if err != nil {
   388  		return "", errors.Wrapf(err, "url.ParseQuery(%s)", location)
   389  	}
   390  	return query.Get("upload_id"), nil
   391  }
   392  
   393  func (b *SBucket) NewMultipartUpload(ctx context.Context, key string, cannedAcl cloudprovider.TBucketACLType, storageClassStr string, meta http.Header) (string, error) {
   394  	return b.region.NewMultipartUpload(b.Name, key, cannedAcl, storageClassStr, meta)
   395  }
   396  
   397  func (region *SRegion) UploadPart(bucket, uploadId string, partIndex int, offset int64, part io.Reader, partSize int64, totalSize int64) error {
   398  	resource := fmt.Sprintf("b/%s/o?uploadType=resumable&upload_id=%s", bucket, uploadId)
   399  	header := http.Header{}
   400  	header.Set("Content-Length", fmt.Sprintf("%d", partSize))
   401  	header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", offset, offset+partSize-1, totalSize))
   402  	resp, err := region.client.storageUploadPart(resource, header, part)
   403  	if err != nil {
   404  		return errors.Wrap(err, "storageUploadPart")
   405  	}
   406  	if resp.StatusCode >= 500 {
   407  		content, _ := ioutil.ReadAll(resp.Body)
   408  		return fmt.Errorf("status code: %d %s", resp.StatusCode, content)
   409  	}
   410  	defer resp.Body.Close()
   411  	return nil
   412  }
   413  
   414  func (region *SRegion) CheckUploadRange(bucket string, uploadId string) error {
   415  	resource := fmt.Sprintf("b/%s/o?uploadType=resumable&upload_id=%s", bucket, uploadId)
   416  	header := http.Header{}
   417  	header.Set("Content-Range", "bytes */*")
   418  	resp, err := region.client.storageUploadPart(resource, header, nil)
   419  	if err != nil {
   420  		return errors.Wrap(err, "storageUploadPart")
   421  	}
   422  	defer resp.Body.Close()
   423  
   424  	content, err := ioutil.ReadAll(resp.Body)
   425  	if err != nil {
   426  		return errors.Wrap(err, "ReadAll")
   427  	}
   428  	fmt.Println("content: ", string(content))
   429  	for k, v := range resp.Header {
   430  		fmt.Println("k: ", k, "v: ", v)
   431  	}
   432  	fmt.Println("status code: ", resp.StatusCode)
   433  	return nil
   434  }
   435  
   436  func (b *SBucket) UploadPart(ctx context.Context, key string, uploadId string, partIndex int, part io.Reader, partSize int64, offset, totalSize int64) (string, error) {
   437  	return "", b.region.UploadPart(b.Name, uploadId, partIndex, offset, part, partSize, totalSize)
   438  }
   439  
   440  func (b *SBucket) PutObject(ctx context.Context, key string, body io.Reader, sizeBytes int64, cannedAcl cloudprovider.TBucketACLType, storageClassStr string, meta http.Header) error {
   441  	return b.region.PutObject(b.Name, key, body, sizeBytes, cannedAcl, meta)
   442  }
   443  
   444  func (region *SRegion) GetBucket(name string) (*SBucket, error) {
   445  	resource := "b/" + name
   446  	bucket := &SBucket{}
   447  	err := region.StorageGet(resource, bucket)
   448  	if err != nil {
   449  		return nil, errors.Wrap(err, "GetBucket")
   450  	}
   451  	return bucket, nil
   452  }
   453  
   454  func (region *SRegion) GetBuckets(maxResults int, pageToken string) ([]SBucket, error) {
   455  	buckets := []SBucket{}
   456  	params := map[string]string{
   457  		"project": region.GetProjectId(),
   458  	}
   459  	err := region.StorageList("b", params, maxResults, pageToken, &buckets)
   460  	if err != nil {
   461  		return nil, err
   462  	}
   463  	return buckets, nil
   464  }
   465  
   466  func (region *SRegion) CreateBucket(name string, storageClass string, acl cloudprovider.TBucketACLType) (*SBucket, error) {
   467  	body := map[string]interface{}{
   468  		"name":     name,
   469  		"location": region.Name,
   470  	}
   471  	if len(storageClass) > 0 {
   472  		body["storageClass"] = storageClass
   473  	}
   474  	params := url.Values{}
   475  	params.Set("predefinedDefaultObjectAcl", "private")
   476  	switch acl {
   477  	case cloudprovider.ACLPrivate, cloudprovider.ACLUnknown:
   478  		params.Set("predefinedAcl", "private")
   479  	case cloudprovider.ACLAuthRead:
   480  		params.Set("predefinedAcl", "authenticatedRead")
   481  	case cloudprovider.ACLPublicRead:
   482  		params.Set("predefinedAcl", "publicRead")
   483  	case cloudprovider.ACLPublicReadWrite:
   484  		params.Set("predefinedAcl", "publicReadWrite")
   485  	}
   486  	params.Set("project", region.GetProjectId())
   487  	bucket := &SBucket{}
   488  	resource := fmt.Sprintf("b?%s", params.Encode())
   489  	err := region.StorageInsert(resource, jsonutils.Marshal(body), bucket)
   490  	if err != nil {
   491  		return nil, err
   492  	}
   493  	return bucket, nil
   494  }
   495  
   496  func (region *SRegion) UploadObject(bucket string, params url.Values, header http.Header, input io.Reader) error {
   497  	resource := fmt.Sprintf("b/%s/o", bucket)
   498  	if len(params) > 0 {
   499  		resource = fmt.Sprintf("%s?%s", resource, params.Encode())
   500  	}
   501  	resp, err := region.client.storageUpload(resource, header, input)
   502  	if err != nil {
   503  		return errors.Wrap(err, "storageUpload")
   504  	}
   505  	defer resp.Body.Close()
   506  	return nil
   507  }
   508  
   509  func (region *SRegion) PutObject(bucket string, name string, input io.Reader, sizeBytes int64, cannedAcl cloudprovider.TBucketACLType, meta http.Header) error {
   510  	params := url.Values{}
   511  	params.Set("name", name)
   512  	params.Set("uploadType", "media")
   513  	header := http.Header{}
   514  	header.Set("Content-Length", fmt.Sprintf("%v", sizeBytes))
   515  	err := region.UploadObject(bucket, params, header, input)
   516  	if err != nil {
   517  		return errors.Wrap(err, "UploadObject")
   518  	}
   519  	err = region.SetObjectAcl(bucket, name, cannedAcl)
   520  	if err != nil {
   521  		return errors.Wrap(err, "SetObjectAcl")
   522  	}
   523  	return region.SetObjectMeta(bucket, name, meta)
   524  }
   525  
   526  func (region *SRegion) DeleteBucket(name string) error {
   527  	return region.StorageDelete("b/" + name)
   528  }