github.com/djmaze/goofys@v0.24.2/internal/backend_azblob.go (about)

     1  // Copyright 2019 Ka-Hing Cheung
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package internal
    16  
    17  import (
    18  	. "github.com/djmaze/goofys/api/common"
    19  
    20  	"bytes"
    21  	"context"
    22  	"encoding/base64"
    23  	"fmt"
    24  	"net/http"
    25  	"net/url"
    26  	"sort"
    27  	"strings"
    28  	"sync"
    29  	"sync/atomic"
    30  	"syscall"
    31  	"time"
    32  
    33  	"github.com/Azure/azure-pipeline-go/pipeline"
    34  	"github.com/Azure/azure-storage-blob-go/azblob"
    35  
    36  	"github.com/google/uuid"
    37  	"github.com/jacobsa/fuse"
    38  	"github.com/sirupsen/logrus"
    39  )
    40  
    41  const AzuriteEndpoint = "http://127.0.0.1:8080/devstoreaccount1/"
    42  const AzureDirBlobMetadataKey = "hdi_isfolder"
    43  const AzureBlobMetaDataHeaderPrefix = "x-ms-meta-"
    44  
    45  // Azure Blob Store API does not not treat headers as case insensitive.
    46  // This is particularly a problem with `AzureDirBlobMetadataKey` header.
    47  // pipelineWrapper wraps around an implementation of `Pipeline` and
    48  // changes the Do function to update the input request headers before invoking
    49  // Do on the wrapping Pipeline onject.
    50  type pipelineWrapper struct {
    51  	p pipeline.Pipeline
    52  }
    53  
    54  type requestWrapper struct {
    55  	pipeline.Request
    56  }
    57  
    58  var pipelineHTTPClient = newDefaultHTTPClient()
    59  
    60  // Clone of https://github.com/Azure/azure-pipeline-go/blob/master/pipeline/core.go#L202
    61  func newDefaultHTTPClient() *http.Client {
    62  	return &http.Client{
    63  		Transport: GetHTTPTransport(),
    64  	}
    65  }
    66  
    67  // Creates a pipeline.Factory object that fixes headers related to azure blob store
    68  // and sends HTTP requests to Go's default http.Client.
    69  func newAzBlobHTTPClientFactory() pipeline.Factory {
    70  	return pipeline.FactoryFunc(
    71  		func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
    72  			return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
    73  				// Fix the Azure Blob store metadata headers.
    74  				// Problem:
    75  				// - Golang canonicalizes headers and converts them into camel case
    76  				//   because HTTP headers are supposed to be case insensitive. E.g After
    77  				//   canonicalization, 'foo-bar' becomes 'Foo-Bar'.
    78  				// - Azure API treats HTTP headers in case sensitive manner.
    79  				// Solution: Convert the problematic headers to lower case.
    80  				for key, value := range request.Header {
    81  					keyLower := strings.ToLower(key)
    82  					// We are mofifying the map while iterating on it. So we check for
    83  					// keyLower != key to avoid potential infinite loop.
    84  					// See https://golang.org/ref/spec#RangeClause for more info.
    85  					if keyLower != key && strings.Contains(keyLower, AzureBlobMetaDataHeaderPrefix) {
    86  						request.Header.Del(key)
    87  						request.Header[keyLower] = value
    88  					}
    89  				}
    90  				// Send the HTTP request.
    91  				r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
    92  				if err != nil {
    93  					err = pipeline.NewError(err, "HTTP request failed")
    94  				}
    95  				return pipeline.NewHTTPResponse(r), err
    96  			}
    97  		})
    98  }
    99  
   100  type AZBlob struct {
   101  	config *AZBlobConfig
   102  	cap    Capabilities
   103  
   104  	mu sync.Mutex
   105  	u  *azblob.ServiceURL
   106  	c  *azblob.ContainerURL
   107  
   108  	pipeline pipeline.Pipeline
   109  
   110  	bucket           string
   111  	bareURL          string
   112  	sasTokenProvider SASTokenProvider
   113  	tokenExpire      time.Time
   114  	tokenRenewBuffer time.Duration
   115  	tokenRenewGate   *Ticket
   116  }
   117  
   118  var azbLog = GetLogger("azblob")
   119  
   120  func NewAZBlob(container string, config *AZBlobConfig) (*AZBlob, error) {
   121  	po := azblob.PipelineOptions{
   122  		Log: pipeline.LogOptions{
   123  			Log: func(level pipeline.LogLevel, msg string) {
   124  				// naive casting kind of works because pipeline.INFO maps
   125  				// to 5 which is logrus.DEBUG
   126  				if level == pipeline.LogError {
   127  					// somehow some http errors
   128  					// are logged at Error, we
   129  					// already log unhandled
   130  					// errors so no need to do
   131  					// that here
   132  					level = pipeline.LogInfo
   133  				}
   134  				azbLog.Log(logrus.Level(uint32(level)), msg)
   135  			},
   136  			ShouldLog: func(level pipeline.LogLevel) bool {
   137  				if level == pipeline.LogError {
   138  					// somehow some http errors
   139  					// are logged at Error, we
   140  					// already log unhandled
   141  					// errors so no need to do
   142  					// that here
   143  					level = pipeline.LogInfo
   144  				}
   145  				return azbLog.IsLevelEnabled(logrus.Level(uint32(level)))
   146  			},
   147  		},
   148  		RequestLog: azblob.RequestLogOptions{
   149  			LogWarningIfTryOverThreshold: time.Duration(-1),
   150  		},
   151  		HTTPSender: newAzBlobHTTPClientFactory(),
   152  	}
   153  
   154  	p := azblob.NewPipeline(azblob.NewAnonymousCredential(), po)
   155  	bareURL := config.Endpoint
   156  
   157  	var bu *azblob.ServiceURL
   158  	var bc *azblob.ContainerURL
   159  
   160  	if config.SasToken == nil {
   161  		credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
   162  		if err != nil {
   163  			return nil, fmt.Errorf("Unable to construct credential: %v", err)
   164  		}
   165  
   166  		p = azblob.NewPipeline(credential, po)
   167  
   168  		u, err := url.Parse(bareURL)
   169  		if err != nil {
   170  			return nil, err
   171  		}
   172  
   173  		serviceURL := azblob.NewServiceURL(*u, p)
   174  		containerURL := serviceURL.NewContainerURL(container)
   175  
   176  		bu = &serviceURL
   177  		bc = &containerURL
   178  	}
   179  
   180  	b := &AZBlob{
   181  		config: config,
   182  		cap: Capabilities{
   183  			MaxMultipartSize: 100 * 1024 * 1024,
   184  			Name:             "wasb",
   185  		},
   186  		pipeline:         p,
   187  		bucket:           container,
   188  		bareURL:          bareURL,
   189  		sasTokenProvider: config.SasToken,
   190  		u:                bu,
   191  		c:                bc,
   192  		tokenRenewBuffer: config.TokenRenewBuffer,
   193  		tokenRenewGate:   Ticket{Total: 1}.Init(),
   194  	}
   195  
   196  	return b, nil
   197  }
   198  
   199  func (b *AZBlob) Delegate() interface{} {
   200  	return b
   201  }
   202  
   203  func (b *AZBlob) Capabilities() *Capabilities {
   204  	return &b.cap
   205  }
   206  
   207  func (b *AZBlob) Bucket() string {
   208  	return b.bucket
   209  }
   210  
   211  func (b *AZBlob) refreshToken() (*azblob.ContainerURL, error) {
   212  	if b.sasTokenProvider == nil {
   213  		return b.c, nil
   214  	}
   215  
   216  	b.mu.Lock()
   217  
   218  	if b.c == nil {
   219  		b.mu.Unlock()
   220  		return b.updateToken()
   221  	} else if b.tokenExpire.Before(time.Now().UTC()) {
   222  		// our token totally expired, renew inline before using it
   223  		b.mu.Unlock()
   224  		b.tokenRenewGate.Take(1, true)
   225  		defer b.tokenRenewGate.Return(1)
   226  
   227  		b.mu.Lock()
   228  		// check again, because in the mean time maybe it's renewed
   229  		if b.tokenExpire.Before(time.Now().UTC()) {
   230  			b.mu.Unlock()
   231  			azbLog.Warnf("token expired: %v", b.tokenExpire)
   232  			_, err := b.updateToken()
   233  			if err != nil {
   234  				azbLog.Errorf("Unable to refresh token: %v", err)
   235  				return nil, syscall.EACCES
   236  			}
   237  		} else {
   238  			// another concurrent goroutine renewed it for us
   239  			b.mu.Unlock()
   240  		}
   241  	} else if b.tokenExpire.Add(b.tokenRenewBuffer).Before(time.Now().UTC()) {
   242  		b.mu.Unlock()
   243  		// only allow one token renew at a time
   244  		if b.tokenRenewGate.Take(1, false) {
   245  
   246  			go func() {
   247  				defer b.tokenRenewGate.Return(1)
   248  				_, err := b.updateToken()
   249  				if err != nil {
   250  					azbLog.Errorf("Unable to refresh token: %v", err)
   251  				}
   252  			}()
   253  
   254  			// if we cannot renew token, treat it as a
   255  			// transient failure because the token is
   256  			// still valid for a while. When the grace
   257  			// period is over we will get an error when we
   258  			// actually access the blob store
   259  		} else {
   260  			// another goroutine is already renewing
   261  			azbLog.Infof("token renewal already in progress")
   262  		}
   263  	} else {
   264  		b.mu.Unlock()
   265  	}
   266  	return b.c, nil
   267  }
   268  
   269  func parseSasToken(token string) (expire time.Time) {
   270  	expire = TIME_MAX
   271  
   272  	parts, err := url.ParseQuery(token)
   273  	if err != nil {
   274  		return
   275  	}
   276  
   277  	se := parts.Get("se")
   278  	if se == "" {
   279  		azbLog.Error("token missing 'se' param")
   280  		return
   281  	}
   282  
   283  	expire, err = time.Parse("2006-01-02T15:04:05Z", se)
   284  	if err != nil {
   285  		// sometimes they only have the date
   286  		expire, err = time.Parse("2006-01-02", se)
   287  		if err != nil {
   288  			expire = TIME_MAX
   289  		}
   290  	}
   291  	return
   292  }
   293  
   294  func (b *AZBlob) updateToken() (*azblob.ContainerURL, error) {
   295  	token, err := b.sasTokenProvider()
   296  	if err != nil {
   297  		azbLog.Errorf("Unable to generate SAS token: %v", err)
   298  		return nil, syscall.EACCES
   299  	}
   300  
   301  	expire := parseSasToken(token)
   302  	azbLog.Infof("token for %v refreshed, next expire at %v", b.bucket, expire.String())
   303  
   304  	sUrl := b.bareURL + "?" + token
   305  	u, err := url.Parse(sUrl)
   306  	if err != nil {
   307  		azbLog.Errorf("Unable to construct service URL: %v", sUrl)
   308  		return nil, fuse.EINVAL
   309  	}
   310  
   311  	serviceURL := azblob.NewServiceURL(*u, b.pipeline)
   312  	containerURL := serviceURL.NewContainerURL(b.bucket)
   313  
   314  	b.mu.Lock()
   315  	defer b.mu.Unlock()
   316  
   317  	b.u = &serviceURL
   318  	b.c = &containerURL
   319  	b.tokenExpire = expire
   320  
   321  	return b.c, nil
   322  }
   323  
   324  func (b *AZBlob) testBucket(key string) (err error) {
   325  	_, err = b.HeadBlob(&HeadBlobInput{Key: key})
   326  	if err != nil {
   327  		err = mapAZBError(err)
   328  		if err == fuse.ENOENT {
   329  			err = nil
   330  		}
   331  	}
   332  
   333  	return
   334  }
   335  
   336  func (b *AZBlob) Init(key string) error {
   337  	_, err := b.refreshToken()
   338  	if err != nil {
   339  		return err
   340  	}
   341  
   342  	err = b.testBucket(key)
   343  	return err
   344  }
   345  
   346  func mapAZBError(err error) error {
   347  	if err == nil {
   348  		return nil
   349  	}
   350  
   351  	if stgErr, ok := err.(azblob.StorageError); ok {
   352  		switch stgErr.ServiceCode() {
   353  		case azblob.ServiceCodeBlobAlreadyExists:
   354  			return syscall.EACCES
   355  		case azblob.ServiceCodeBlobNotFound:
   356  			return fuse.ENOENT
   357  		case azblob.ServiceCodeContainerAlreadyExists:
   358  			return syscall.EEXIST
   359  		case azblob.ServiceCodeContainerBeingDeleted:
   360  			return syscall.EAGAIN
   361  		case azblob.ServiceCodeContainerDisabled:
   362  			return syscall.EACCES
   363  		case azblob.ServiceCodeContainerNotFound:
   364  			return syscall.ENODEV
   365  		case azblob.ServiceCodeCopyAcrossAccountsNotSupported:
   366  			return fuse.EINVAL
   367  		case azblob.ServiceCodeSourceConditionNotMet:
   368  			return fuse.EINVAL
   369  		case azblob.ServiceCodeSystemInUse:
   370  			return syscall.EAGAIN
   371  		case azblob.ServiceCodeTargetConditionNotMet:
   372  			return fuse.EINVAL
   373  		case azblob.ServiceCodeBlobBeingRehydrated:
   374  			return syscall.EAGAIN
   375  		case azblob.ServiceCodeBlobArchived:
   376  			return fuse.EINVAL
   377  		case azblob.ServiceCodeAccountBeingCreated:
   378  			return syscall.EAGAIN
   379  		case azblob.ServiceCodeAuthenticationFailed:
   380  			return syscall.EACCES
   381  		case azblob.ServiceCodeConditionNotMet:
   382  			return syscall.EBUSY
   383  		case azblob.ServiceCodeInternalError:
   384  			return syscall.EAGAIN
   385  		case azblob.ServiceCodeInvalidAuthenticationInfo:
   386  			return syscall.EACCES
   387  		case azblob.ServiceCodeOperationTimedOut:
   388  			return syscall.EAGAIN
   389  		case azblob.ServiceCodeResourceNotFound:
   390  			return fuse.ENOENT
   391  		case azblob.ServiceCodeServerBusy:
   392  			return syscall.EAGAIN
   393  		case "AuthorizationFailure": // from Azurite emulator
   394  			return syscall.EACCES
   395  		default:
   396  			err = mapHttpError(stgErr.Response().StatusCode)
   397  			if err != nil {
   398  				return err
   399  			} else {
   400  				azbLog.Errorf("code=%v status=%v err=%v", stgErr.ServiceCode(), stgErr.Response().Status, stgErr)
   401  				return stgErr
   402  			}
   403  		}
   404  	} else {
   405  		return err
   406  	}
   407  }
   408  
   409  func pMetadata(m map[string]string) map[string]*string {
   410  	metadata := make(map[string]*string)
   411  	for k, _ := range m {
   412  		k = strings.ToLower(k)
   413  		v := m[k]
   414  		metadata[k] = &v
   415  	}
   416  	return metadata
   417  }
   418  
   419  func nilMetadata(m map[string]*string) map[string]string {
   420  	metadata := make(map[string]string)
   421  	for k, v := range m {
   422  		k = strings.ToLower(k)
   423  		metadata[k] = NilStr(v)
   424  	}
   425  	return metadata
   426  }
   427  
   428  func (b *AZBlob) HeadBlob(param *HeadBlobInput) (*HeadBlobOutput, error) {
   429  	c, err := b.refreshToken()
   430  	if err != nil {
   431  		return nil, err
   432  	}
   433  
   434  	if strings.HasSuffix(param.Key, "/") {
   435  		dirBlob, err := b.HeadBlob(&HeadBlobInput{Key: param.Key[:len(param.Key)-1]})
   436  		if err == nil {
   437  			if !dirBlob.IsDirBlob {
   438  				// we requested for a dir suffix, but this isn't one
   439  				err = fuse.ENOENT
   440  			}
   441  		}
   442  		return dirBlob, err
   443  	}
   444  
   445  	blob := c.NewBlobURL(param.Key)
   446  	resp, err := blob.GetProperties(context.TODO(), azblob.BlobAccessConditions{})
   447  	if err != nil {
   448  		return nil, mapAZBError(err)
   449  	}
   450  
   451  	metadata := resp.NewMetadata()
   452  	isDir := strings.HasSuffix(param.Key, "/")
   453  	if !isDir && metadata != nil {
   454  		_, isDir = metadata[AzureDirBlobMetadataKey]
   455  	}
   456  	// don't expose this to user land
   457  	delete(metadata, AzureDirBlobMetadataKey)
   458  
   459  	return &HeadBlobOutput{
   460  		BlobItemOutput: BlobItemOutput{
   461  			Key:          &param.Key,
   462  			ETag:         PString(string(resp.ETag())),
   463  			LastModified: PTime(resp.LastModified()),
   464  			Size:         uint64(resp.ContentLength()),
   465  			StorageClass: PString(resp.AccessTier()),
   466  		},
   467  		ContentType: PString(resp.ContentType()),
   468  		Metadata:    pMetadata(metadata),
   469  		IsDirBlob:   isDir,
   470  	}, nil
   471  }
   472  
   473  func nilUint32(v *uint32) uint32 {
   474  	if v == nil {
   475  		return 0
   476  	} else {
   477  		return *v
   478  	}
   479  }
   480  
   481  func (b *AZBlob) ListBlobs(param *ListBlobsInput) (*ListBlobsOutput, error) {
   482  	// azure blob does not support startAfter
   483  	if param.StartAfter != nil {
   484  		return nil, syscall.ENOTSUP
   485  	}
   486  
   487  	c, err := b.refreshToken()
   488  	if err != nil {
   489  		return nil, err
   490  	}
   491  
   492  	prefixes := make([]BlobPrefixOutput, 0)
   493  	items := make([]BlobItemOutput, 0)
   494  
   495  	var blobItems []azblob.BlobItem
   496  	var nextMarker *string
   497  
   498  	options := azblob.ListBlobsSegmentOptions{
   499  		Prefix:     NilStr(param.Prefix),
   500  		MaxResults: int32(nilUint32(param.MaxKeys)),
   501  		Details: azblob.BlobListingDetails{
   502  			// blobfuse (following wasb) convention uses
   503  			// an empty blob with "hdi_isfolder" metadata
   504  			// set to represent a folder. So we include
   505  			// metadaata in listing to discover that and
   506  			// convert the result back to what we expect
   507  			// (which is a "dir/" blob)
   508  			// https://github.com/Azure/azure-storage-fuse/issues/222
   509  			// https://blogs.msdn.microsoft.com/mostlytrue/2014/04/22/wasb-back-stories-masquerading-a-key-value-store/
   510  			Metadata: true,
   511  		},
   512  	}
   513  
   514  	if param.Delimiter != nil {
   515  		resp, err := c.ListBlobsHierarchySegment(context.TODO(),
   516  			azblob.Marker{
   517  				param.ContinuationToken,
   518  			},
   519  			NilStr(param.Delimiter),
   520  			options)
   521  		if err != nil {
   522  			return nil, mapAZBError(err)
   523  		}
   524  
   525  		for i, _ := range resp.Segment.BlobPrefixes {
   526  			p := resp.Segment.BlobPrefixes[i]
   527  			prefixes = append(prefixes, BlobPrefixOutput{Prefix: &p.Name})
   528  		}
   529  
   530  		if b.config.Endpoint == AzuriteEndpoint &&
   531  			// XXX in Azurite this is not sorted
   532  			!sort.IsSorted(sortBlobPrefixOutput(prefixes)) {
   533  			sort.Sort(sortBlobPrefixOutput(prefixes))
   534  		}
   535  
   536  		blobItems = resp.Segment.BlobItems
   537  		nextMarker = resp.NextMarker.Val
   538  	} else {
   539  		resp, err := c.ListBlobsFlatSegment(context.TODO(),
   540  			azblob.Marker{
   541  				param.ContinuationToken,
   542  			},
   543  			options)
   544  		if err != nil {
   545  			return nil, mapAZBError(err)
   546  		}
   547  
   548  		blobItems = resp.Segment.BlobItems
   549  		nextMarker = resp.NextMarker.Val
   550  
   551  		if b.config.Endpoint == AzuriteEndpoint &&
   552  			!sort.IsSorted(sortBlobItemOutput(items)) {
   553  			sort.Sort(sortBlobItemOutput(items))
   554  		}
   555  	}
   556  
   557  	if len(blobItems) == 1 && len(blobItems[0].Name) <= len(options.Prefix) && strings.HasSuffix(options.Prefix, "/") {
   558  		// There is only 1 result and that one result does not have the desired prefix. This can
   559  		// happen if we ask for ListBlobs under /some/path/ and the result is List(/some/path). This
   560  		// means the prefix we are listing is a blob => So return empty response to indicate that
   561  		// this prefix should not be treated a directory by goofys.
   562  		// NOTE: This undesired behaviour happens only on azblob when hierarchial namespaces are
   563  		// enabled.
   564  		return &ListBlobsOutput{}, nil
   565  	}
   566  	var sortItems bool
   567  
   568  	for idx, _ := range blobItems {
   569  		i := &blobItems[idx]
   570  		p := &i.Properties
   571  
   572  		if i.Metadata[AzureDirBlobMetadataKey] != "" {
   573  			i.Name = i.Name + "/"
   574  
   575  			if param.Delimiter != nil {
   576  				// do we already have such a prefix?
   577  				n := len(prefixes)
   578  				if idx := sort.Search(n, func(idx int) bool {
   579  					return *prefixes[idx].Prefix >= i.Name
   580  				}); idx >= n || *prefixes[idx].Prefix != i.Name {
   581  					if idx >= n {
   582  						prefixes = append(prefixes, BlobPrefixOutput{
   583  							Prefix: &i.Name,
   584  						})
   585  					} else {
   586  						prefixes = append(prefixes, BlobPrefixOutput{})
   587  						copy(prefixes[idx+1:], prefixes[idx:])
   588  						prefixes[idx].Prefix = &i.Name
   589  					}
   590  				}
   591  				continue
   592  			} else {
   593  				sortItems = true
   594  			}
   595  		}
   596  
   597  		items = append(items, BlobItemOutput{
   598  			Key:          &i.Name,
   599  			ETag:         PString(string(p.Etag)),
   600  			LastModified: PTime(p.LastModified),
   601  			Size:         uint64(*p.ContentLength),
   602  			StorageClass: PString(string(p.AccessTier)),
   603  		})
   604  	}
   605  
   606  	if strings.HasSuffix(options.Prefix, "/") {
   607  		// because azure doesn't use dir/ blobs, dir/ would not show up
   608  		// so we make another request to fill that in
   609  		dirBlob, err := b.HeadBlob(&HeadBlobInput{options.Prefix})
   610  		if err == nil {
   611  			*dirBlob.Key += "/"
   612  			items = append(items, dirBlob.BlobItemOutput)
   613  			sortItems = true
   614  		} else if err == fuse.ENOENT {
   615  			err = nil
   616  		} else {
   617  			return nil, err
   618  		}
   619  	}
   620  
   621  	// items are supposed to be alphabetical, but if there was a directory we would
   622  	// have changed the ordering. XXX re-sort this for now but we can probably
   623  	// insert smarter instead
   624  	if sortItems {
   625  		sort.Sort(sortBlobItemOutput(items))
   626  	}
   627  
   628  	if nextMarker != nil && *nextMarker == "" {
   629  		nextMarker = nil
   630  	}
   631  
   632  	return &ListBlobsOutput{
   633  		Prefixes:              prefixes,
   634  		Items:                 items,
   635  		NextContinuationToken: nextMarker,
   636  		IsTruncated:           nextMarker != nil,
   637  	}, nil
   638  }
   639  
   640  func (b *AZBlob) DeleteBlob(param *DeleteBlobInput) (*DeleteBlobOutput, error) {
   641  	c, err := b.refreshToken()
   642  	if err != nil {
   643  		return nil, err
   644  	}
   645  
   646  	if strings.HasSuffix(param.Key, "/") {
   647  		return b.DeleteBlob(&DeleteBlobInput{Key: param.Key[:len(param.Key)-1]})
   648  	}
   649  
   650  	blob := c.NewBlobURL(param.Key)
   651  	_, err = blob.Delete(context.TODO(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
   652  	if err != nil {
   653  		return nil, mapAZBError(err)
   654  	}
   655  	return &DeleteBlobOutput{}, nil
   656  }
   657  
   658  func (b *AZBlob) DeleteBlobs(param *DeleteBlobsInput) (ret *DeleteBlobsOutput, deleteError error) {
   659  	var wg sync.WaitGroup
   660  	defer func() {
   661  		wg.Wait()
   662  		if deleteError != nil {
   663  			ret = nil
   664  		} else {
   665  			ret = &DeleteBlobsOutput{}
   666  		}
   667  	}()
   668  
   669  	for _, i := range param.Items {
   670  		SmallActionsGate.Take(1, true)
   671  		wg.Add(1)
   672  
   673  		go func(key string) {
   674  			defer func() {
   675  				SmallActionsGate.Return(1)
   676  				wg.Done()
   677  			}()
   678  
   679  			_, err := b.DeleteBlob(&DeleteBlobInput{key})
   680  			if err != nil {
   681  				err = mapAZBError(err)
   682  				if err != fuse.ENOENT {
   683  					deleteError = err
   684  				}
   685  			}
   686  		}(i)
   687  
   688  		if deleteError != nil {
   689  			return
   690  		}
   691  	}
   692  
   693  	return
   694  }
   695  
   696  func (b *AZBlob) RenameBlob(param *RenameBlobInput) (*RenameBlobOutput, error) {
   697  	return nil, syscall.ENOTSUP
   698  }
   699  
   700  func (b *AZBlob) CopyBlob(param *CopyBlobInput) (*CopyBlobOutput, error) {
   701  	if strings.HasSuffix(param.Source, "/") && strings.HasSuffix(param.Destination, "/") {
   702  		param.Source = param.Source[:len(param.Source)-1]
   703  		param.Destination = param.Destination[:len(param.Destination)-1]
   704  		return b.CopyBlob(param)
   705  	}
   706  
   707  	c, err := b.refreshToken()
   708  	if err != nil {
   709  		return nil, err
   710  	}
   711  
   712  	src := c.NewBlobURL(param.Source)
   713  	dest := c.NewBlobURL(param.Destination)
   714  	resp, err := dest.StartCopyFromURL(context.TODO(), src.URL(), nilMetadata(param.Metadata),
   715  		azblob.ModifiedAccessConditions{}, azblob.BlobAccessConditions{})
   716  	if err != nil {
   717  		return nil, mapAZBError(err)
   718  	}
   719  
   720  	if resp.CopyStatus() == azblob.CopyStatusPending {
   721  		time.Sleep(50 * time.Millisecond)
   722  
   723  		var copy *azblob.BlobGetPropertiesResponse
   724  		for copy, err = dest.GetProperties(context.TODO(), azblob.BlobAccessConditions{}); err == nil; copy, err = dest.GetProperties(context.TODO(), azblob.BlobAccessConditions{}) {
   725  			// if there's a new copy, we can only assume the last one was done
   726  			if copy.CopyStatus() != azblob.CopyStatusPending || copy.CopyID() != resp.CopyID() {
   727  				break
   728  			}
   729  		}
   730  		if err != nil {
   731  			return nil, mapAZBError(err)
   732  		}
   733  	}
   734  
   735  	return &CopyBlobOutput{}, nil
   736  }
   737  
   738  func (b *AZBlob) GetBlob(param *GetBlobInput) (*GetBlobOutput, error) {
   739  	c, err := b.refreshToken()
   740  	if err != nil {
   741  		return nil, err
   742  	}
   743  
   744  	blob := c.NewBlobURL(param.Key)
   745  	var ifMatch azblob.ETag
   746  	if param.IfMatch != nil {
   747  		ifMatch = azblob.ETag(*param.IfMatch)
   748  	}
   749  
   750  	resp, err := blob.Download(context.TODO(),
   751  		int64(param.Start), int64(param.Count),
   752  		azblob.BlobAccessConditions{
   753  			ModifiedAccessConditions: azblob.ModifiedAccessConditions{
   754  				IfMatch: ifMatch,
   755  			},
   756  		}, false)
   757  	if err != nil {
   758  		return nil, mapAZBError(err)
   759  	}
   760  
   761  	metadata := pMetadata(resp.NewMetadata())
   762  	delete(metadata, AzureDirBlobMetadataKey)
   763  
   764  	return &GetBlobOutput{
   765  		HeadBlobOutput: HeadBlobOutput{
   766  			BlobItemOutput: BlobItemOutput{
   767  				Key:          &param.Key,
   768  				ETag:         PString(string(resp.ETag())),
   769  				LastModified: PTime(resp.LastModified()),
   770  				Size:         uint64(resp.ContentLength()),
   771  			},
   772  			ContentType: PString(resp.ContentType()),
   773  			Metadata:    metadata,
   774  		},
   775  		Body: resp.Body(azblob.RetryReaderOptions{}),
   776  	}, nil
   777  }
   778  
   779  func (b *AZBlob) PutBlob(param *PutBlobInput) (*PutBlobOutput, error) {
   780  	c, err := b.refreshToken()
   781  	if err != nil {
   782  		return nil, err
   783  	}
   784  
   785  	if param.DirBlob && strings.HasSuffix(param.Key, "/") {
   786  		// turn this into an empty blob with "hdi_isfolder" metadata
   787  		param.Key = param.Key[:len(param.Key)-1]
   788  		if param.Metadata != nil {
   789  			param.Metadata[AzureDirBlobMetadataKey] = PString("true")
   790  		} else {
   791  			param.Metadata = map[string]*string{
   792  				AzureDirBlobMetadataKey: PString("true"),
   793  			}
   794  		}
   795  		return b.PutBlob(param)
   796  	}
   797  
   798  	body := param.Body
   799  	if body == nil {
   800  		body = bytes.NewReader([]byte(""))
   801  	}
   802  
   803  	blob := c.NewBlobURL(param.Key).ToBlockBlobURL()
   804  	resp, err := blob.Upload(context.TODO(),
   805  		body,
   806  		azblob.BlobHTTPHeaders{
   807  			ContentType: NilStr(param.ContentType),
   808  		},
   809  		nilMetadata(param.Metadata), azblob.BlobAccessConditions{})
   810  	if err != nil {
   811  		return nil, mapAZBError(err)
   812  	}
   813  
   814  	return &PutBlobOutput{
   815  		ETag:         PString(string(resp.ETag())),
   816  		LastModified: PTime(resp.LastModified()),
   817  	}, nil
   818  }
   819  
   820  func (b *AZBlob) MultipartBlobBegin(param *MultipartBlobBeginInput) (*MultipartBlobCommitInput, error) {
   821  	// we can have up to 50K parts, so %05d should be sufficient
   822  	uploadId := uuid.New().String() + "::%05d"
   823  
   824  	// this is implicitly done on the server side
   825  	return &MultipartBlobCommitInput{
   826  		Key:      &param.Key,
   827  		Metadata: param.Metadata,
   828  		UploadId: &uploadId,
   829  		Parts:    make([]*string, 50000), // at most 50K parts
   830  	}, nil
   831  }
   832  
   833  func (b *AZBlob) MultipartBlobAdd(param *MultipartBlobAddInput) (*MultipartBlobAddOutput, error) {
   834  	c, err := b.refreshToken()
   835  	if err != nil {
   836  		return nil, err
   837  	}
   838  
   839  	blob := c.NewBlockBlobURL(*param.Commit.Key)
   840  	blockId := fmt.Sprintf(*param.Commit.UploadId, param.PartNumber)
   841  	base64BlockId := base64.StdEncoding.EncodeToString([]byte(blockId))
   842  
   843  	atomic.AddUint32(&param.Commit.NumParts, 1)
   844  
   845  	_, err = blob.StageBlock(context.TODO(), base64BlockId, param.Body,
   846  		azblob.LeaseAccessConditions{}, nil)
   847  	if err != nil {
   848  		return nil, mapAZBError(err)
   849  	}
   850  
   851  	param.Commit.Parts[param.PartNumber-1] = &base64BlockId
   852  
   853  	return &MultipartBlobAddOutput{}, nil
   854  }
   855  
   856  func (b *AZBlob) MultipartBlobAbort(param *MultipartBlobCommitInput) (*MultipartBlobAbortOutput, error) {
   857  	// no-op, server will garbage collect them
   858  	return &MultipartBlobAbortOutput{}, nil
   859  }
   860  
   861  func (b *AZBlob) MultipartBlobCommit(param *MultipartBlobCommitInput) (*MultipartBlobCommitOutput, error) {
   862  	c, err := b.refreshToken()
   863  	if err != nil {
   864  		return nil, err
   865  	}
   866  
   867  	blob := c.NewBlockBlobURL(*param.Key)
   868  	parts := make([]string, param.NumParts)
   869  
   870  	for i := uint32(0); i < param.NumParts; i++ {
   871  		parts[i] = *param.Parts[i]
   872  	}
   873  
   874  	resp, err := blob.CommitBlockList(context.TODO(), parts,
   875  		azblob.BlobHTTPHeaders{}, nilMetadata(param.Metadata),
   876  		azblob.BlobAccessConditions{})
   877  	if err != nil {
   878  		return nil, mapAZBError(err)
   879  	}
   880  
   881  	return &MultipartBlobCommitOutput{
   882  		ETag:         PString(string(resp.ETag())),
   883  		LastModified: PTime(resp.LastModified()),
   884  	}, nil
   885  }
   886  
   887  func (b *AZBlob) MultipartExpire(param *MultipartExpireInput) (*MultipartExpireOutput, error) {
   888  	return nil, syscall.ENOTSUP
   889  }
   890  
   891  func (b *AZBlob) RemoveBucket(param *RemoveBucketInput) (*RemoveBucketOutput, error) {
   892  	c, err := b.refreshToken()
   893  	if err != nil {
   894  		return nil, err
   895  	}
   896  
   897  	_, err = c.Delete(context.TODO(), azblob.ContainerAccessConditions{})
   898  	if err != nil {
   899  		return nil, mapAZBError(err)
   900  	}
   901  	return &RemoveBucketOutput{}, nil
   902  }
   903  
   904  func (b *AZBlob) MakeBucket(param *MakeBucketInput) (*MakeBucketOutput, error) {
   905  	c, err := b.refreshToken()
   906  	if err != nil {
   907  		return nil, err
   908  	}
   909  
   910  	_, err = c.Create(context.TODO(), nil, azblob.PublicAccessNone)
   911  	if err != nil {
   912  		return nil, mapAZBError(err)
   913  	}
   914  	return &MakeBucketOutput{}, nil
   915  }