github.com/valexz/goofys@v0.24.0/internal/backend_azblob.go (about)

     1  // Copyright 2019 Ka-Hing Cheung
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package internal
    16  
    17  import (
    18  	. "github.com/kahing/goofys/api/common"
    19  
    20  	"bytes"
    21  	"context"
    22  	"encoding/base64"
    23  	"fmt"
    24  	"net/http"
    25  	"net/url"
    26  	"sort"
    27  	"strings"
    28  	"sync"
    29  	"sync/atomic"
    30  	"syscall"
    31  	"time"
    32  
    33  	"github.com/Azure/azure-pipeline-go/pipeline"
    34  	"github.com/Azure/azure-storage-blob-go/azblob"
    35  
    36  	"github.com/google/uuid"
    37  	"github.com/jacobsa/fuse"
    38  	"github.com/sirupsen/logrus"
    39  )
    40  
    41  const AzuriteEndpoint = "http://127.0.0.1:8080/devstoreaccount1/"
    42  const AzureDirBlobMetadataKey = "hdi_isfolder"
    43  const AzureBlobMetaDataHeaderPrefix = "x-ms-meta-"
    44  
    45  // Azure Blob Store API does not not treat headers as case insensitive.
    46  // This is particularly a problem with `AzureDirBlobMetadataKey` header.
    47  // pipelineWrapper wraps around an implementation of `Pipeline` and
    48  // changes the Do function to update the input request headers before invoking
    49  // Do on the wrapping Pipeline onject.
    50  type pipelineWrapper struct {
    51  	p pipeline.Pipeline
    52  }
    53  
    54  type requestWrapper struct {
    55  	pipeline.Request
    56  }
    57  
    58  var pipelineHTTPClient = newDefaultHTTPClient()
    59  
    60  // Clone of https://github.com/Azure/azure-pipeline-go/blob/master/pipeline/core.go#L202
    61  func newDefaultHTTPClient() *http.Client {
    62  	return &http.Client{
    63  		Transport: GetHTTPTransport(),
    64  	}
    65  }
    66  
    67  // Creates a pipeline.Factory object that fixes headers related to azure blob store
    68  // and sends HTTP requests to Go's default http.Client.
    69  func newAzBlobHTTPClientFactory() pipeline.Factory {
    70  	return pipeline.FactoryFunc(
    71  		func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
    72  			return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
    73  				// Fix the Azure Blob store metadata headers.
    74  				// Problem:
    75  				// - Golang canonicalizes headers and converts them into camel case
    76  				//   because HTTP headers are supposed to be case insensitive. E.g After
    77  				//   canonicalization, 'foo-bar' becomes 'Foo-Bar'.
    78  				// - Azure API treats HTTP headers in case sensitive manner.
    79  				// Solution: Convert the problematic headers to lower case.
    80  				for key, value := range request.Header {
    81  					keyLower := strings.ToLower(key)
    82  					// We are mofifying the map while iterating on it. So we check for
    83  					// keyLower != key to avoid potential infinite loop.
    84  					// See https://golang.org/ref/spec#RangeClause for more info.
    85  					if keyLower != key && strings.Contains(keyLower, AzureBlobMetaDataHeaderPrefix) {
    86  						request.Header.Del(key)
    87  						request.Header[keyLower] = value
    88  					}
    89  				}
    90  				// Send the HTTP request.
    91  				r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
    92  				if err != nil {
    93  					err = pipeline.NewError(err, "HTTP request failed")
    94  				}
    95  				return pipeline.NewHTTPResponse(r), err
    96  			}
    97  		})
    98  }
    99  
   100  type AZBlob struct {
   101  	config *AZBlobConfig
   102  	cap    Capabilities
   103  
   104  	mu sync.Mutex
   105  	u  *azblob.ServiceURL
   106  	c  *azblob.ContainerURL
   107  
   108  	pipeline pipeline.Pipeline
   109  
   110  	bucket           string
   111  	bareURL          string
   112  	sasTokenProvider SASTokenProvider
   113  	tokenExpire      time.Time
   114  	tokenRenewBuffer time.Duration
   115  	tokenRenewGate   *Ticket
   116  }
   117  
   118  var azbLog = GetLogger("azblob")
   119  
   120  func NewAZBlob(container string, config *AZBlobConfig) (*AZBlob, error) {
   121  	po := azblob.PipelineOptions{
   122  		Log: pipeline.LogOptions{
   123  			Log: func(level pipeline.LogLevel, msg string) {
   124  				// naive casting kind of works because pipeline.INFO maps
   125  				// to 5 which is logrus.DEBUG
   126  				if level == pipeline.LogError {
   127  					// somehow some http errors
   128  					// are logged at Error, we
   129  					// already log unhandled
   130  					// errors so no need to do
   131  					// that here
   132  					level = pipeline.LogInfo
   133  				}
   134  				azbLog.Log(logrus.Level(uint32(level)), msg)
   135  			},
   136  			ShouldLog: func(level pipeline.LogLevel) bool {
   137  				if level == pipeline.LogError {
   138  					// somehow some http errors
   139  					// are logged at Error, we
   140  					// already log unhandled
   141  					// errors so no need to do
   142  					// that here
   143  					level = pipeline.LogInfo
   144  				}
   145  				return azbLog.IsLevelEnabled(logrus.Level(uint32(level)))
   146  			},
   147  		},
   148  		RequestLog: azblob.RequestLogOptions{
   149  			LogWarningIfTryOverThreshold: time.Duration(-1),
   150  		},
   151  		HTTPSender: newAzBlobHTTPClientFactory(),
   152  	}
   153  
   154  	p := azblob.NewPipeline(azblob.NewAnonymousCredential(), po)
   155  	bareURL := config.Endpoint
   156  
   157  	var bu *azblob.ServiceURL
   158  	var bc *azblob.ContainerURL
   159  
   160  	if config.SasToken == nil {
   161  		credential, err := azblob.NewSharedKeyCredential(config.AccountName, config.AccountKey)
   162  		if err != nil {
   163  			return nil, fmt.Errorf("Unable to construct credential: %v", err)
   164  		}
   165  
   166  		p = azblob.NewPipeline(credential, po)
   167  
   168  		u, err := url.Parse(bareURL)
   169  		if err != nil {
   170  			return nil, err
   171  		}
   172  
   173  		serviceURL := azblob.NewServiceURL(*u, p)
   174  		containerURL := serviceURL.NewContainerURL(container)
   175  
   176  		bu = &serviceURL
   177  		bc = &containerURL
   178  	}
   179  
   180  	b := &AZBlob{
   181  		config: config,
   182  		cap: Capabilities{
   183  			MaxMultipartSize: 100 * 1024 * 1024,
   184  			Name:             "wasb",
   185  		},
   186  		pipeline:         p,
   187  		bucket:           container,
   188  		bareURL:          bareURL,
   189  		sasTokenProvider: config.SasToken,
   190  		u:                bu,
   191  		c:                bc,
   192  		tokenRenewBuffer: config.TokenRenewBuffer,
   193  		tokenRenewGate:   Ticket{Total: 1}.Init(),
   194  	}
   195  
   196  	return b, nil
   197  }
   198  
   199  func (b *AZBlob) Delegate() interface{} {
   200  	return b
   201  }
   202  
   203  func (b *AZBlob) Capabilities() *Capabilities {
   204  	return &b.cap
   205  }
   206  
   207  func (b *AZBlob) Bucket() string {
   208  	return b.bucket
   209  }
   210  
   211  func (b *AZBlob) refreshToken() (*azblob.ContainerURL, error) {
   212  	if b.sasTokenProvider == nil {
   213  		return b.c, nil
   214  	}
   215  
   216  	b.mu.Lock()
   217  
   218  	if b.c == nil {
   219  		b.mu.Unlock()
   220  		return b.updateToken()
   221  	} else if b.tokenExpire.Before(time.Now().UTC()) {
   222  		// our token totally expired, renew inline before using it
   223  		b.mu.Unlock()
   224  		b.tokenRenewGate.Take(1, true)
   225  		defer b.tokenRenewGate.Return(1)
   226  
   227  		b.mu.Lock()
   228  		// check again, because in the mean time maybe it's renewed
   229  		if b.tokenExpire.Before(time.Now().UTC()) {
   230  			b.mu.Unlock()
   231  			azbLog.Warnf("token expired: %v", b.tokenExpire)
   232  			_, err := b.updateToken()
   233  			if err != nil {
   234  				azbLog.Errorf("Unable to refresh token: %v", err)
   235  				return nil, syscall.EACCES
   236  			}
   237  		} else {
   238  			// another concurrent goroutine renewed it for us
   239  			b.mu.Unlock()
   240  		}
   241  	} else if b.tokenExpire.Add(b.tokenRenewBuffer).Before(time.Now().UTC()) {
   242  		b.mu.Unlock()
   243  		// only allow one token renew at a time
   244  		if b.tokenRenewGate.Take(1, false) {
   245  
   246  			go func() {
   247  				defer b.tokenRenewGate.Return(1)
   248  				_, err := b.updateToken()
   249  				if err != nil {
   250  					azbLog.Errorf("Unable to refresh token: %v", err)
   251  				}
   252  			}()
   253  
   254  			// if we cannot renew token, treat it as a
   255  			// transient failure because the token is
   256  			// still valid for a while. When the grace
   257  			// period is over we will get an error when we
   258  			// actually access the blob store
   259  		} else {
   260  			// another goroutine is already renewing
   261  			azbLog.Infof("token renewal already in progress")
   262  		}
   263  	} else {
   264  		b.mu.Unlock()
   265  	}
   266  	return b.c, nil
   267  }
   268  
   269  func parseSasToken(token string) (expire time.Time) {
   270  	expire = TIME_MAX
   271  
   272  	parts, err := url.ParseQuery(token)
   273  	if err != nil {
   274  		return
   275  	}
   276  
   277  	se := parts.Get("se")
   278  	if se == "" {
   279  		azbLog.Error("token missing 'se' param")
   280  		return
   281  	}
   282  
   283  	expire, err = time.Parse("2006-01-02T15:04:05Z", se)
   284  	if err != nil {
   285  		// sometimes they only have the date
   286  		expire, err = time.Parse("2006-01-02", se)
   287  		if err != nil {
   288  			expire = TIME_MAX
   289  		}
   290  	}
   291  	return
   292  }
   293  
   294  func (b *AZBlob) updateToken() (*azblob.ContainerURL, error) {
   295  	token, err := b.sasTokenProvider()
   296  	if err != nil {
   297  		azbLog.Errorf("Unable to generate SAS token: %v", err)
   298  		return nil, syscall.EACCES
   299  	}
   300  
   301  	expire := parseSasToken(token)
   302  	azbLog.Infof("token for %v refreshed, next expire at %v", b.bucket, expire.String())
   303  
   304  	sUrl := b.bareURL + "?" + token
   305  	u, err := url.Parse(sUrl)
   306  	if err != nil {
   307  		azbLog.Errorf("Unable to construct service URL: %v", sUrl)
   308  		return nil, fuse.EINVAL
   309  	}
   310  
   311  	serviceURL := azblob.NewServiceURL(*u, b.pipeline)
   312  	containerURL := serviceURL.NewContainerURL(b.bucket)
   313  
   314  	b.mu.Lock()
   315  	defer b.mu.Unlock()
   316  
   317  	b.u = &serviceURL
   318  	b.c = &containerURL
   319  	b.tokenExpire = expire
   320  
   321  	return b.c, nil
   322  }
   323  
   324  func (b *AZBlob) testBucket(key string) (err error) {
   325  	_, err = b.HeadBlob(&HeadBlobInput{Key: key})
   326  	if err != nil {
   327  		err = mapAZBError(err)
   328  		if err == fuse.ENOENT {
   329  			err = nil
   330  		}
   331  	}
   332  
   333  	return
   334  }
   335  
   336  func (b *AZBlob) Init(key string) error {
   337  	_, err := b.refreshToken()
   338  	if err != nil {
   339  		return err
   340  	}
   341  
   342  	err = b.testBucket(key)
   343  	return err
   344  }
   345  
   346  func mapAZBError(err error) error {
   347  	if err == nil {
   348  		return nil
   349  	}
   350  
   351  	if stgErr, ok := err.(azblob.StorageError); ok {
   352  		switch stgErr.ServiceCode() {
   353  		case azblob.ServiceCodeBlobAlreadyExists:
   354  			return syscall.EACCES
   355  		case azblob.ServiceCodeBlobNotFound:
   356  			return fuse.ENOENT
   357  		case azblob.ServiceCodeContainerAlreadyExists:
   358  			return syscall.EEXIST
   359  		case azblob.ServiceCodeContainerBeingDeleted:
   360  			return syscall.EAGAIN
   361  		case azblob.ServiceCodeContainerDisabled:
   362  			return syscall.EACCES
   363  		case azblob.ServiceCodeContainerNotFound:
   364  			return syscall.ENODEV
   365  		case azblob.ServiceCodeCopyAcrossAccountsNotSupported:
   366  			return fuse.EINVAL
   367  		case azblob.ServiceCodeSourceConditionNotMet:
   368  			return fuse.EINVAL
   369  		case azblob.ServiceCodeSystemInUse:
   370  			return syscall.EAGAIN
   371  		case azblob.ServiceCodeTargetConditionNotMet:
   372  			return fuse.EINVAL
   373  		case azblob.ServiceCodeBlobBeingRehydrated:
   374  			return syscall.EAGAIN
   375  		case azblob.ServiceCodeBlobArchived:
   376  			return fuse.EINVAL
   377  		case azblob.ServiceCodeAccountBeingCreated:
   378  			return syscall.EAGAIN
   379  		case azblob.ServiceCodeAuthenticationFailed:
   380  			return syscall.EACCES
   381  		case azblob.ServiceCodeConditionNotMet:
   382  			return syscall.EBUSY
   383  		case azblob.ServiceCodeInternalError:
   384  			return syscall.EAGAIN
   385  		case azblob.ServiceCodeInvalidAuthenticationInfo:
   386  			return syscall.EACCES
   387  		case azblob.ServiceCodeOperationTimedOut:
   388  			return syscall.EAGAIN
   389  		case azblob.ServiceCodeResourceNotFound:
   390  			return fuse.ENOENT
   391  		case azblob.ServiceCodeServerBusy:
   392  			return syscall.EAGAIN
   393  		case "AuthorizationFailure": // from Azurite emulator
   394  			return syscall.EACCES
   395  		default:
   396  			err = mapHttpError(stgErr.Response().StatusCode)
   397  			if err != nil {
   398  				return err
   399  			} else {
   400  				azbLog.Errorf("code=%v status=%v err=%v", stgErr.ServiceCode(), stgErr.Response().Status, stgErr)
   401  				return stgErr
   402  			}
   403  		}
   404  	} else {
   405  		return err
   406  	}
   407  }
   408  
   409  func pMetadata(m map[string]string) map[string]*string {
   410  	metadata := make(map[string]*string)
   411  	for k, _ := range m {
   412  		k = strings.ToLower(k)
   413  		v := m[k]
   414  		metadata[k] = &v
   415  	}
   416  	return metadata
   417  }
   418  
   419  func nilMetadata(m map[string]*string) map[string]string {
   420  	metadata := make(map[string]string)
   421  	for k, v := range m {
   422  		k = strings.ToLower(k)
   423  		metadata[k] = nilStr(v)
   424  	}
   425  	return metadata
   426  }
   427  
   428  func (b *AZBlob) HeadBlob(param *HeadBlobInput) (*HeadBlobOutput, error) {
   429  	c, err := b.refreshToken()
   430  	if err != nil {
   431  		return nil, err
   432  	}
   433  
   434  	if strings.HasSuffix(param.Key, "/") {
   435  		dirBlob, err := b.HeadBlob(&HeadBlobInput{Key: param.Key[:len(param.Key)-1]})
   436  		if err == nil {
   437  			if !dirBlob.IsDirBlob {
   438  				// we requested for a dir suffix, but this isn't one
   439  				err = fuse.ENOENT
   440  			}
   441  		}
   442  		return dirBlob, err
   443  	}
   444  
   445  	blob := c.NewBlobURL(param.Key)
   446  	resp, err := blob.GetProperties(context.TODO(), azblob.BlobAccessConditions{})
   447  	if err != nil {
   448  		return nil, mapAZBError(err)
   449  	}
   450  
   451  	metadata := resp.NewMetadata()
   452  	isDir := strings.HasSuffix(param.Key, "/")
   453  	if !isDir && metadata != nil {
   454  		_, isDir = metadata[AzureDirBlobMetadataKey]
   455  	}
   456  	// don't expose this to user land
   457  	delete(metadata, AzureDirBlobMetadataKey)
   458  
   459  	return &HeadBlobOutput{
   460  		BlobItemOutput: BlobItemOutput{
   461  			Key:          &param.Key,
   462  			ETag:         PString(string(resp.ETag())),
   463  			LastModified: PTime(resp.LastModified()),
   464  			Size:         uint64(resp.ContentLength()),
   465  			StorageClass: PString(resp.AccessTier()),
   466  		},
   467  		ContentType: PString(resp.ContentType()),
   468  		Metadata:    pMetadata(metadata),
   469  		IsDirBlob:   isDir,
   470  	}, nil
   471  }
   472  
   473  func nilStr(v *string) string {
   474  	if v == nil {
   475  		return ""
   476  	} else {
   477  		return *v
   478  	}
   479  }
   480  func nilUint32(v *uint32) uint32 {
   481  	if v == nil {
   482  		return 0
   483  	} else {
   484  		return *v
   485  	}
   486  }
   487  
   488  func (b *AZBlob) ListBlobs(param *ListBlobsInput) (*ListBlobsOutput, error) {
   489  	// azure blob does not support startAfter
   490  	if param.StartAfter != nil {
   491  		return nil, syscall.ENOTSUP
   492  	}
   493  
   494  	c, err := b.refreshToken()
   495  	if err != nil {
   496  		return nil, err
   497  	}
   498  
   499  	prefixes := make([]BlobPrefixOutput, 0)
   500  	items := make([]BlobItemOutput, 0)
   501  
   502  	var blobItems []azblob.BlobItem
   503  	var nextMarker *string
   504  
   505  	options := azblob.ListBlobsSegmentOptions{
   506  		Prefix:     nilStr(param.Prefix),
   507  		MaxResults: int32(nilUint32(param.MaxKeys)),
   508  		Details: azblob.BlobListingDetails{
   509  			// blobfuse (following wasb) convention uses
   510  			// an empty blob with "hdi_isfolder" metadata
   511  			// set to represent a folder. So we include
   512  			// metadaata in listing to discover that and
   513  			// convert the result back to what we expect
   514  			// (which is a "dir/" blob)
   515  			// https://github.com/Azure/azure-storage-fuse/issues/222
   516  			// https://blogs.msdn.microsoft.com/mostlytrue/2014/04/22/wasb-back-stories-masquerading-a-key-value-store/
   517  			Metadata: true,
   518  		},
   519  	}
   520  
   521  	if param.Delimiter != nil {
   522  		resp, err := c.ListBlobsHierarchySegment(context.TODO(),
   523  			azblob.Marker{
   524  				param.ContinuationToken,
   525  			},
   526  			nilStr(param.Delimiter),
   527  			options)
   528  		if err != nil {
   529  			return nil, mapAZBError(err)
   530  		}
   531  
   532  		for i, _ := range resp.Segment.BlobPrefixes {
   533  			p := resp.Segment.BlobPrefixes[i]
   534  			prefixes = append(prefixes, BlobPrefixOutput{Prefix: &p.Name})
   535  		}
   536  
   537  		if b.config.Endpoint == AzuriteEndpoint &&
   538  			// XXX in Azurite this is not sorted
   539  			!sort.IsSorted(sortBlobPrefixOutput(prefixes)) {
   540  			sort.Sort(sortBlobPrefixOutput(prefixes))
   541  		}
   542  
   543  		blobItems = resp.Segment.BlobItems
   544  		nextMarker = resp.NextMarker.Val
   545  	} else {
   546  		resp, err := c.ListBlobsFlatSegment(context.TODO(),
   547  			azblob.Marker{
   548  				param.ContinuationToken,
   549  			},
   550  			options)
   551  		if err != nil {
   552  			return nil, mapAZBError(err)
   553  		}
   554  
   555  		blobItems = resp.Segment.BlobItems
   556  		nextMarker = resp.NextMarker.Val
   557  
   558  		if b.config.Endpoint == AzuriteEndpoint &&
   559  			!sort.IsSorted(sortBlobItemOutput(items)) {
   560  			sort.Sort(sortBlobItemOutput(items))
   561  		}
   562  	}
   563  
   564  	if len(blobItems) == 1 && len(blobItems[0].Name) <= len(options.Prefix) && strings.HasSuffix(options.Prefix, "/") {
   565  		// There is only 1 result and that one result does not have the desired prefix. This can
   566  		// happen if we ask for ListBlobs under /some/path/ and the result is List(/some/path). This
   567  		// means the prefix we are listing is a blob => So return empty response to indicate that
   568  		// this prefix should not be treated a directory by goofys.
   569  		// NOTE: This undesired behaviour happens only on azblob when hierarchial namespaces are
   570  		// enabled.
   571  		return &ListBlobsOutput{}, nil
   572  	}
   573  	var sortItems bool
   574  
   575  	for idx, _ := range blobItems {
   576  		i := &blobItems[idx]
   577  		p := &i.Properties
   578  
   579  		if i.Metadata[AzureDirBlobMetadataKey] != "" {
   580  			i.Name = i.Name + "/"
   581  
   582  			if param.Delimiter != nil {
   583  				// do we already have such a prefix?
   584  				n := len(prefixes)
   585  				if idx := sort.Search(n, func(idx int) bool {
   586  					return *prefixes[idx].Prefix >= i.Name
   587  				}); idx >= n || *prefixes[idx].Prefix != i.Name {
   588  					if idx >= n {
   589  						prefixes = append(prefixes, BlobPrefixOutput{
   590  							Prefix: &i.Name,
   591  						})
   592  					} else {
   593  						prefixes = append(prefixes, BlobPrefixOutput{})
   594  						copy(prefixes[idx+1:], prefixes[idx:])
   595  						prefixes[idx].Prefix = &i.Name
   596  					}
   597  				}
   598  				continue
   599  			} else {
   600  				sortItems = true
   601  			}
   602  		}
   603  
   604  		items = append(items, BlobItemOutput{
   605  			Key:          &i.Name,
   606  			ETag:         PString(string(p.Etag)),
   607  			LastModified: PTime(p.LastModified),
   608  			Size:         uint64(*p.ContentLength),
   609  			StorageClass: PString(string(p.AccessTier)),
   610  		})
   611  	}
   612  
   613  	if strings.HasSuffix(options.Prefix, "/") {
   614  		// because azure doesn't use dir/ blobs, dir/ would not show up
   615  		// so we make another request to fill that in
   616  		dirBlob, err := b.HeadBlob(&HeadBlobInput{options.Prefix})
   617  		if err == nil {
   618  			*dirBlob.Key += "/"
   619  			items = append(items, dirBlob.BlobItemOutput)
   620  			sortItems = true
   621  		} else if err == fuse.ENOENT {
   622  			err = nil
   623  		} else {
   624  			return nil, err
   625  		}
   626  	}
   627  
   628  	// items are supposed to be alphabetical, but if there was a directory we would
   629  	// have changed the ordering. XXX re-sort this for now but we can probably
   630  	// insert smarter instead
   631  	if sortItems {
   632  		sort.Sort(sortBlobItemOutput(items))
   633  	}
   634  
   635  	if nextMarker != nil && *nextMarker == "" {
   636  		nextMarker = nil
   637  	}
   638  
   639  	return &ListBlobsOutput{
   640  		Prefixes:              prefixes,
   641  		Items:                 items,
   642  		NextContinuationToken: nextMarker,
   643  		IsTruncated:           nextMarker != nil,
   644  	}, nil
   645  }
   646  
   647  func (b *AZBlob) DeleteBlob(param *DeleteBlobInput) (*DeleteBlobOutput, error) {
   648  	c, err := b.refreshToken()
   649  	if err != nil {
   650  		return nil, err
   651  	}
   652  
   653  	if strings.HasSuffix(param.Key, "/") {
   654  		return b.DeleteBlob(&DeleteBlobInput{Key: param.Key[:len(param.Key)-1]})
   655  	}
   656  
   657  	blob := c.NewBlobURL(param.Key)
   658  	_, err = blob.Delete(context.TODO(), azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{})
   659  	if err != nil {
   660  		return nil, mapAZBError(err)
   661  	}
   662  	return &DeleteBlobOutput{}, nil
   663  }
   664  
   665  func (b *AZBlob) DeleteBlobs(param *DeleteBlobsInput) (ret *DeleteBlobsOutput, deleteError error) {
   666  	var wg sync.WaitGroup
   667  	defer func() {
   668  		wg.Wait()
   669  		if deleteError != nil {
   670  			ret = nil
   671  		} else {
   672  			ret = &DeleteBlobsOutput{}
   673  		}
   674  	}()
   675  
   676  	for _, i := range param.Items {
   677  		SmallActionsGate.Take(1, true)
   678  		wg.Add(1)
   679  
   680  		go func(key string) {
   681  			defer func() {
   682  				SmallActionsGate.Return(1)
   683  				wg.Done()
   684  			}()
   685  
   686  			_, err := b.DeleteBlob(&DeleteBlobInput{key})
   687  			if err != nil {
   688  				err = mapAZBError(err)
   689  				if err != fuse.ENOENT {
   690  					deleteError = err
   691  				}
   692  			}
   693  		}(i)
   694  
   695  		if deleteError != nil {
   696  			return
   697  		}
   698  	}
   699  
   700  	return
   701  }
   702  
   703  func (b *AZBlob) RenameBlob(param *RenameBlobInput) (*RenameBlobOutput, error) {
   704  	return nil, syscall.ENOTSUP
   705  }
   706  
   707  func (b *AZBlob) CopyBlob(param *CopyBlobInput) (*CopyBlobOutput, error) {
   708  	if strings.HasSuffix(param.Source, "/") && strings.HasSuffix(param.Destination, "/") {
   709  		param.Source = param.Source[:len(param.Source)-1]
   710  		param.Destination = param.Destination[:len(param.Destination)-1]
   711  		return b.CopyBlob(param)
   712  	}
   713  
   714  	c, err := b.refreshToken()
   715  	if err != nil {
   716  		return nil, err
   717  	}
   718  
   719  	src := c.NewBlobURL(param.Source)
   720  	dest := c.NewBlobURL(param.Destination)
   721  	resp, err := dest.StartCopyFromURL(context.TODO(), src.URL(), nilMetadata(param.Metadata),
   722  		azblob.ModifiedAccessConditions{}, azblob.BlobAccessConditions{})
   723  	if err != nil {
   724  		return nil, mapAZBError(err)
   725  	}
   726  
   727  	if resp.CopyStatus() == azblob.CopyStatusPending {
   728  		time.Sleep(50 * time.Millisecond)
   729  
   730  		var copy *azblob.BlobGetPropertiesResponse
   731  		for copy, err = dest.GetProperties(context.TODO(), azblob.BlobAccessConditions{}); err == nil; copy, err = dest.GetProperties(context.TODO(), azblob.BlobAccessConditions{}) {
   732  			// if there's a new copy, we can only assume the last one was done
   733  			if copy.CopyStatus() != azblob.CopyStatusPending || copy.CopyID() != resp.CopyID() {
   734  				break
   735  			}
   736  		}
   737  		if err != nil {
   738  			return nil, mapAZBError(err)
   739  		}
   740  	}
   741  
   742  	return &CopyBlobOutput{}, nil
   743  }
   744  
   745  func (b *AZBlob) GetBlob(param *GetBlobInput) (*GetBlobOutput, error) {
   746  	c, err := b.refreshToken()
   747  	if err != nil {
   748  		return nil, err
   749  	}
   750  
   751  	blob := c.NewBlobURL(param.Key)
   752  	var ifMatch azblob.ETag
   753  	if param.IfMatch != nil {
   754  		ifMatch = azblob.ETag(*param.IfMatch)
   755  	}
   756  
   757  	resp, err := blob.Download(context.TODO(),
   758  		int64(param.Start), int64(param.Count),
   759  		azblob.BlobAccessConditions{
   760  			ModifiedAccessConditions: azblob.ModifiedAccessConditions{
   761  				IfMatch: ifMatch,
   762  			},
   763  		}, false)
   764  	if err != nil {
   765  		return nil, mapAZBError(err)
   766  	}
   767  
   768  	metadata := pMetadata(resp.NewMetadata())
   769  	delete(metadata, AzureDirBlobMetadataKey)
   770  
   771  	return &GetBlobOutput{
   772  		HeadBlobOutput: HeadBlobOutput{
   773  			BlobItemOutput: BlobItemOutput{
   774  				Key:          &param.Key,
   775  				ETag:         PString(string(resp.ETag())),
   776  				LastModified: PTime(resp.LastModified()),
   777  				Size:         uint64(resp.ContentLength()),
   778  			},
   779  			ContentType: PString(resp.ContentType()),
   780  			Metadata:    metadata,
   781  		},
   782  		Body: resp.Body(azblob.RetryReaderOptions{}),
   783  	}, nil
   784  }
   785  
   786  func (b *AZBlob) PutBlob(param *PutBlobInput) (*PutBlobOutput, error) {
   787  	c, err := b.refreshToken()
   788  	if err != nil {
   789  		return nil, err
   790  	}
   791  
   792  	if param.DirBlob && strings.HasSuffix(param.Key, "/") {
   793  		// turn this into an empty blob with "hdi_isfolder" metadata
   794  		param.Key = param.Key[:len(param.Key)-1]
   795  		if param.Metadata != nil {
   796  			param.Metadata[AzureDirBlobMetadataKey] = PString("true")
   797  		} else {
   798  			param.Metadata = map[string]*string{
   799  				AzureDirBlobMetadataKey: PString("true"),
   800  			}
   801  		}
   802  		return b.PutBlob(param)
   803  	}
   804  
   805  	body := param.Body
   806  	if body == nil {
   807  		body = bytes.NewReader([]byte(""))
   808  	}
   809  
   810  	blob := c.NewBlobURL(param.Key).ToBlockBlobURL()
   811  	resp, err := blob.Upload(context.TODO(),
   812  		body,
   813  		azblob.BlobHTTPHeaders{
   814  			ContentType: nilStr(param.ContentType),
   815  		},
   816  		nilMetadata(param.Metadata), azblob.BlobAccessConditions{})
   817  	if err != nil {
   818  		return nil, mapAZBError(err)
   819  	}
   820  
   821  	return &PutBlobOutput{
   822  		ETag:         PString(string(resp.ETag())),
   823  		LastModified: PTime(resp.LastModified()),
   824  	}, nil
   825  }
   826  
   827  func (b *AZBlob) MultipartBlobBegin(param *MultipartBlobBeginInput) (*MultipartBlobCommitInput, error) {
   828  	// we can have up to 50K parts, so %05d should be sufficient
   829  	uploadId := uuid.New().String() + "::%05d"
   830  
   831  	// this is implicitly done on the server side
   832  	return &MultipartBlobCommitInput{
   833  		Key:      &param.Key,
   834  		Metadata: param.Metadata,
   835  		UploadId: &uploadId,
   836  		Parts:    make([]*string, 50000), // at most 50K parts
   837  	}, nil
   838  }
   839  
   840  func (b *AZBlob) MultipartBlobAdd(param *MultipartBlobAddInput) (*MultipartBlobAddOutput, error) {
   841  	c, err := b.refreshToken()
   842  	if err != nil {
   843  		return nil, err
   844  	}
   845  
   846  	blob := c.NewBlockBlobURL(*param.Commit.Key)
   847  	blockId := fmt.Sprintf(*param.Commit.UploadId, param.PartNumber)
   848  	base64BlockId := base64.StdEncoding.EncodeToString([]byte(blockId))
   849  
   850  	atomic.AddUint32(&param.Commit.NumParts, 1)
   851  
   852  	_, err = blob.StageBlock(context.TODO(), base64BlockId, param.Body,
   853  		azblob.LeaseAccessConditions{}, nil)
   854  	if err != nil {
   855  		return nil, mapAZBError(err)
   856  	}
   857  
   858  	param.Commit.Parts[param.PartNumber-1] = &base64BlockId
   859  
   860  	return &MultipartBlobAddOutput{}, nil
   861  }
   862  
   863  func (b *AZBlob) MultipartBlobAbort(param *MultipartBlobCommitInput) (*MultipartBlobAbortOutput, error) {
   864  	// no-op, server will garbage collect them
   865  	return &MultipartBlobAbortOutput{}, nil
   866  }
   867  
   868  func (b *AZBlob) MultipartBlobCommit(param *MultipartBlobCommitInput) (*MultipartBlobCommitOutput, error) {
   869  	c, err := b.refreshToken()
   870  	if err != nil {
   871  		return nil, err
   872  	}
   873  
   874  	blob := c.NewBlockBlobURL(*param.Key)
   875  	parts := make([]string, param.NumParts)
   876  
   877  	for i := uint32(0); i < param.NumParts; i++ {
   878  		parts[i] = *param.Parts[i]
   879  	}
   880  
   881  	resp, err := blob.CommitBlockList(context.TODO(), parts,
   882  		azblob.BlobHTTPHeaders{}, nilMetadata(param.Metadata),
   883  		azblob.BlobAccessConditions{})
   884  	if err != nil {
   885  		return nil, mapAZBError(err)
   886  	}
   887  
   888  	return &MultipartBlobCommitOutput{
   889  		ETag:         PString(string(resp.ETag())),
   890  		LastModified: PTime(resp.LastModified()),
   891  	}, nil
   892  }
   893  
   894  func (b *AZBlob) MultipartExpire(param *MultipartExpireInput) (*MultipartExpireOutput, error) {
   895  	return nil, syscall.ENOTSUP
   896  }
   897  
   898  func (b *AZBlob) RemoveBucket(param *RemoveBucketInput) (*RemoveBucketOutput, error) {
   899  	c, err := b.refreshToken()
   900  	if err != nil {
   901  		return nil, err
   902  	}
   903  
   904  	_, err = c.Delete(context.TODO(), azblob.ContainerAccessConditions{})
   905  	if err != nil {
   906  		return nil, mapAZBError(err)
   907  	}
   908  	return &RemoveBucketOutput{}, nil
   909  }
   910  
   911  func (b *AZBlob) MakeBucket(param *MakeBucketInput) (*MakeBucketOutput, error) {
   912  	c, err := b.refreshToken()
   913  	if err != nil {
   914  		return nil, err
   915  	}
   916  
   917  	_, err = c.Create(context.TODO(), nil, azblob.PublicAccessNone)
   918  	if err != nil {
   919  		return nil, mapAZBError(err)
   920  	}
   921  	return &MakeBucketOutput{}, nil
   922  }