storj.io/minio@v0.0.0-20230509071714-0cbc90f649b1/cmd/gateway/azure/gateway-azure.go (about)

     1  /*
     2   * MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package azure
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/rand"
    23  	"crypto/sha256"
    24  	"encoding/base64"
    25  	"encoding/hex"
    26  	"encoding/json"
    27  	"errors"
    28  	"fmt"
    29  	"io"
    30  	"io/ioutil"
    31  	"net/http"
    32  	"net/url"
    33  	"path"
    34  	"sort"
    35  	"strconv"
    36  	"strings"
    37  	"time"
    38  
    39  	"github.com/Azure/azure-pipeline-go/pipeline"
    40  	"github.com/Azure/azure-storage-blob-go/azblob"
    41  	humanize "github.com/dustin/go-humanize"
    42  	"github.com/minio/cli"
    43  	miniogopolicy "github.com/minio/minio-go/v7/pkg/policy"
    44  
    45  	minio "storj.io/minio/cmd"
    46  	"storj.io/minio/cmd/logger"
    47  	"storj.io/minio/pkg/auth"
    48  	"storj.io/minio/pkg/bucket/policy"
    49  	"storj.io/minio/pkg/bucket/policy/condition"
    50  	"storj.io/minio/pkg/env"
    51  	"storj.io/minio/pkg/madmin"
    52  )
    53  
    54  const (
    55  	azureDefaultUploadChunkSizeMB = 25
    56  	azureDownloadRetryAttempts    = 5
    57  	azureS3MinPartSize            = 5 * humanize.MiByte
    58  	metadataObjectNameTemplate    = minio.GatewayMinioSysTmp + "multipart/v1/%s.%x/azure.json"
    59  	azureMarkerPrefix             = "{minio}"
    60  	metadataPartNamePrefix        = minio.GatewayMinioSysTmp + "multipart/v1/%s.%x"
    61  	maxPartsCount                 = 10000
    62  )
    63  
    64  var (
    65  	azureUploadChunkSize   int
    66  	azureUploadConcurrency int
    67  )
    68  
    69  func init() {
    70  	const azureGatewayTemplate = `NAME:
    71    {{.HelpName}} - {{.Usage}}
    72  
    73  USAGE:
    74    {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT]
    75  {{if .VisibleFlags}}
    76  FLAGS:
    77    {{range .VisibleFlags}}{{.}}
    78    {{end}}{{end}}
    79  ENDPOINT:
    80    Azure server endpoint. Default ENDPOINT is https://core.windows.net
    81  
    82  EXAMPLES:
    83    1. Start minio gateway server for Azure Blob Storage backend on custom endpoint.
    84       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}azureaccountname
    85       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}azureaccountkey
    86       {{.Prompt}} {{.HelpName}} https://azureaccountname.blob.custom.azure.endpoint
    87  
    88    2. Start minio gateway server for Azure Blob Storage backend with edge caching enabled.
    89       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}azureaccountname
    90       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}azureaccountkey
    91       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4"
    92       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png"
    93       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90
    94       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3
    95       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75
    96       {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85
    97       {{.Prompt}} {{.HelpName}}
    98  
    99  `
   100  
   101  	minio.RegisterGatewayCommand(cli.Command{
   102  		Name:               minio.AzureBackendGateway,
   103  		Usage:              "Microsoft Azure Blob Storage",
   104  		Action:             azureGatewayMain,
   105  		CustomHelpTemplate: azureGatewayTemplate,
   106  		HideHelpCommand:    true,
   107  	})
   108  }
   109  
   110  // Returns true if marker was returned by Azure, i.e prefixed with
   111  // {minio}
   112  func isAzureMarker(marker string) bool {
   113  	return strings.HasPrefix(marker, azureMarkerPrefix)
   114  }
   115  
   116  // Handler for 'minio gateway azure' command line.
   117  func azureGatewayMain(ctx *cli.Context) {
   118  	// Validate gateway arguments.
   119  	host := ctx.Args().First()
   120  
   121  	serverAddr := ctx.GlobalString("address")
   122  	if serverAddr == "" || serverAddr == ":"+minio.GlobalMinioDefaultPort {
   123  		serverAddr = ctx.String("address")
   124  	}
   125  	// Validate gateway arguments.
   126  	logger.FatalIf(minio.ValidateGatewayArguments(serverAddr, host), "Invalid argument")
   127  
   128  	minio.StartGateway(ctx, &Azure{host})
   129  }
   130  
   131  // Azure implements Gateway.
   132  type Azure struct {
   133  	host string
   134  }
   135  
   136  // Name implements Gateway interface.
   137  func (g *Azure) Name() string {
   138  	return minio.AzureBackendGateway
   139  }
   140  
   141  // NewGatewayLayer initializes azure blob storage client and returns AzureObjects.
   142  func (g *Azure) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
   143  	var err error
   144  
   145  	// Override credentials from the Azure storage environment variables if specified
   146  	if acc, key := env.Get("AZURE_STORAGE_ACCOUNT", creds.AccessKey), env.Get("AZURE_STORAGE_KEY", creds.SecretKey); acc != "" && key != "" {
   147  		creds, err = auth.CreateCredentials(acc, key)
   148  		if err != nil {
   149  			return nil, err
   150  		}
   151  	}
   152  
   153  	endpointURL, err := parseStorageEndpoint(g.host, creds.AccessKey)
   154  	if err != nil {
   155  		return nil, err
   156  	}
   157  
   158  	azureUploadChunkSize, err = env.GetInt("MINIO_AZURE_CHUNK_SIZE_MB", azureDefaultUploadChunkSizeMB)
   159  	if err != nil {
   160  		return nil, err
   161  	}
   162  	azureUploadChunkSize *= humanize.MiByte
   163  	if azureUploadChunkSize <= 0 || azureUploadChunkSize > 100*humanize.MiByte {
   164  		return nil, fmt.Errorf("MINIO_AZURE_CHUNK_SIZE_MB should be an integer value between 0 and 100")
   165  	}
   166  
   167  	azureUploadConcurrency, err = env.GetInt("MINIO_AZURE_UPLOAD_CONCURRENCY", 4)
   168  	if err != nil {
   169  		return nil, err
   170  	}
   171  
   172  	credential, err := azblob.NewSharedKeyCredential(creds.AccessKey, creds.SecretKey)
   173  	if err != nil {
   174  		if _, ok := err.(base64.CorruptInputError); ok {
   175  			return &azureObjects{}, errors.New("invalid Azure credentials")
   176  		}
   177  		return &azureObjects{}, err
   178  	}
   179  
   180  	metrics := minio.NewMetrics()
   181  
   182  	t := &minio.MetricsTransport{
   183  		Transport: minio.NewGatewayHTTPTransport(),
   184  		Metrics:   metrics,
   185  	}
   186  
   187  	httpClient := &http.Client{Transport: t}
   188  	userAgent := fmt.Sprintf("APN/1.0 MinIO/1.0 MinIO/%s", minio.Version)
   189  
   190  	pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{
   191  		Retry: azblob.RetryOptions{
   192  			// Azure SDK recommends to set a timeout of 60 seconds per MB of data so we
   193  			// calculate here the timeout for the configured upload chunck size.
   194  			TryTimeout: time.Duration(azureUploadChunkSize/humanize.MiByte) * 60 * time.Second,
   195  		},
   196  		HTTPSender: pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
   197  			return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
   198  				request.Header.Set("User-Agent", userAgent)
   199  				resp, err := httpClient.Do(request.WithContext(ctx))
   200  				return pipeline.NewHTTPResponse(resp), err
   201  			}
   202  		}),
   203  	})
   204  
   205  	client := azblob.NewServiceURL(*endpointURL, pipeline)
   206  
   207  	return &azureObjects{
   208  		endpoint:   endpointURL,
   209  		httpClient: httpClient,
   210  		client:     client,
   211  		metrics:    metrics,
   212  	}, nil
   213  }
   214  
   215  func parseStorageEndpoint(host string, accountName string) (*url.URL, error) {
   216  	var endpoint string
   217  
   218  	// Load the endpoint url if supplied by the user.
   219  	if host != "" {
   220  		host, secure, err := minio.ParseGatewayEndpoint(host)
   221  		if err != nil {
   222  			return nil, err
   223  		}
   224  
   225  		var protocol string
   226  		if secure {
   227  			protocol = "https"
   228  		} else {
   229  			protocol = "http"
   230  		}
   231  
   232  		// for containerized storage deployments like Azurite or IoT Edge Storage,
   233  		// account resolution isn't handled via a hostname prefix like
   234  		// `http://${account}.host/${path}` but instead via a route prefix like
   235  		// `http://host/${account}/${path}` so adjusting for that here
   236  		if !strings.HasPrefix(host, fmt.Sprintf("%s.", accountName)) {
   237  			host = fmt.Sprintf("%s/%s", host, accountName)
   238  		}
   239  
   240  		endpoint = fmt.Sprintf("%s://%s", protocol, host)
   241  	} else {
   242  		endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName)
   243  	}
   244  
   245  	return url.Parse(endpoint)
   246  }
   247  
   248  // Production - Azure gateway is production ready.
   249  func (g *Azure) Production() bool {
   250  	return true
   251  }
   252  
   253  // s3MetaToAzureProperties converts metadata meant for S3 PUT/COPY
   254  // object into Azure data structures - BlobMetadata and
   255  // BlobProperties.
   256  //
   257  // BlobMetadata contains user defined key-value pairs and each key is
   258  // automatically prefixed with `X-Ms-Meta-` by the Azure SDK. S3
   259  // user-metadata is translated to Azure metadata by removing the
   260  // `X-Amz-Meta-` prefix.
   261  //
   262  // BlobProperties contains commonly set metadata for objects such as
   263  // Content-Encoding, etc. Such metadata that is accepted by S3 is
   264  // copied into BlobProperties.
   265  //
   266  // Header names are canonicalized as in http.Header.
   267  func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string) (azblob.Metadata, azblob.BlobHTTPHeaders, error) {
   268  	for k := range s3Metadata {
   269  		if strings.Contains(k, "--") {
   270  			return azblob.Metadata{}, azblob.BlobHTTPHeaders{}, minio.UnsupportedMetadata{}
   271  		}
   272  	}
   273  
   274  	// Encoding technique for each key is used here is as follows
   275  	// Each '-' is converted to '_'
   276  	// Each '_' is converted to '__'
   277  	// With this basic assumption here are some of the expected
   278  	// translations for these keys.
   279  	// i: 'x-S3cmd_attrs' -> o: 'x_s3cmd__attrs' (mixed)
   280  	// i: 'x__test__value' -> o: 'x____test____value' (double '_')
   281  	encodeKey := func(key string) string {
   282  		tokens := strings.Split(key, "_")
   283  		for i := range tokens {
   284  			tokens[i] = strings.Replace(tokens[i], "-", "_", -1)
   285  		}
   286  		return strings.Join(tokens, "__")
   287  	}
   288  	var blobMeta azblob.Metadata = make(map[string]string)
   289  	var err error
   290  	var props azblob.BlobHTTPHeaders
   291  	for k, v := range s3Metadata {
   292  		k = http.CanonicalHeaderKey(k)
   293  		switch {
   294  		case strings.HasPrefix(k, "X-Amz-Meta-"):
   295  			// Strip header prefix, to let Azure SDK
   296  			// handle it for storage.
   297  			k = strings.Replace(k, "X-Amz-Meta-", "", 1)
   298  			blobMeta[encodeKey(k)] = v
   299  		// All cases below, extract common metadata that is
   300  		// accepted by S3 into BlobProperties for setting on
   301  		// Azure - see
   302  		// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
   303  		case k == "Cache-Control":
   304  			props.CacheControl = v
   305  		case k == "Content-Disposition":
   306  			props.ContentDisposition = v
   307  		case k == "Content-Encoding":
   308  			props.ContentEncoding = v
   309  		case k == "Content-Md5":
   310  			props.ContentMD5, err = base64.StdEncoding.DecodeString(v)
   311  		case k == "Content-Type":
   312  			props.ContentType = v
   313  		case k == "Content-Language":
   314  			props.ContentLanguage = v
   315  		}
   316  	}
   317  	return blobMeta, props, err
   318  }
   319  
   320  const (
   321  	partMetaVersionV1 = "1"
   322  )
   323  
   324  // partMetadataV1 struct holds the part specific metadata for
   325  // multipart operations.
   326  type partMetadataV1 struct {
   327  	Version  string   `json:"version"`
   328  	Size     int64    `json:"Size"`
   329  	BlockIDs []string `json:"blockIDs"`
   330  	ETag     string   `json:"etag"`
   331  }
   332  
   333  // Returns the initialized part metadata struct
   334  func newPartMetaV1(uploadID string, partID int) (partMeta *partMetadataV1) {
   335  	p := &partMetadataV1{}
   336  	p.Version = partMetaVersionV1
   337  	return p
   338  }
   339  
   340  func s3StorageClassToAzureTier(sc string) azblob.AccessTierType {
   341  	switch sc {
   342  	case "REDUCED_REDUNDANCY":
   343  		return azblob.AccessTierCool
   344  	case "STANDARD":
   345  		return azblob.AccessTierHot
   346  	}
   347  	return azblob.AccessTierHot
   348  }
   349  
   350  func azureTierToS3StorageClass(tierType string) string {
   351  	switch azblob.AccessTierType(tierType) {
   352  	case azblob.AccessTierCool:
   353  		return "REDUCED_REDUNDANCY"
   354  	case azblob.AccessTierHot:
   355  		return "STANDARD"
   356  	default:
   357  		return "STANDARD"
   358  	}
   359  
   360  }
   361  
   362  // azurePropertiesToS3Meta converts Azure metadata/properties to S3
   363  // metadata. It is the reverse of s3MetaToAzureProperties. Azure's
   364  // `.GetMetadata()` lower-cases all header keys, so this is taken into
   365  // account by this function.
   366  func azurePropertiesToS3Meta(meta azblob.Metadata, props azblob.BlobHTTPHeaders, contentLength int64) map[string]string {
   367  	// Decoding technique for each key is used here is as follows
   368  	// Each '_' is converted to '-'
   369  	// Each '__' is converted to '_'
   370  	// With this basic assumption here are some of the expected
   371  	// translations for these keys.
   372  	// i: 'x_s3cmd__attrs' -> o: 'x-s3cmd_attrs' (mixed)
   373  	// i: 'x____test____value' -> o: 'x__test__value' (double '_')
   374  	decodeKey := func(key string) string {
   375  		tokens := strings.Split(key, "__")
   376  		for i := range tokens {
   377  			tokens[i] = strings.Replace(tokens[i], "_", "-", -1)
   378  		}
   379  		return strings.Join(tokens, "_")
   380  	}
   381  
   382  	s3Metadata := make(map[string]string)
   383  	for k, v := range meta {
   384  		// k's `x-ms-meta-` prefix is already stripped by
   385  		// Azure SDK, so we add the AMZ prefix.
   386  		k = "X-Amz-Meta-" + decodeKey(k)
   387  		k = http.CanonicalHeaderKey(k)
   388  		s3Metadata[k] = v
   389  	}
   390  
   391  	// Add each property from BlobProperties that is supported by
   392  	// S3 PUT/COPY common metadata.
   393  	if props.CacheControl != "" {
   394  		s3Metadata["Cache-Control"] = props.CacheControl
   395  	}
   396  	if props.ContentDisposition != "" {
   397  		s3Metadata["Content-Disposition"] = props.ContentDisposition
   398  	}
   399  	if props.ContentEncoding != "" {
   400  		s3Metadata["Content-Encoding"] = props.ContentEncoding
   401  	}
   402  	if contentLength != 0 {
   403  		s3Metadata["Content-Length"] = fmt.Sprintf("%d", contentLength)
   404  	}
   405  	if len(props.ContentMD5) != 0 {
   406  		s3Metadata["Content-MD5"] = base64.StdEncoding.EncodeToString(props.ContentMD5)
   407  	}
   408  	if props.ContentType != "" {
   409  		s3Metadata["Content-Type"] = props.ContentType
   410  	}
   411  	if props.ContentLanguage != "" {
   412  		s3Metadata["Content-Language"] = props.ContentLanguage
   413  	}
   414  	return s3Metadata
   415  }
   416  
   417  // azureObjects - Implements Object layer for Azure blob storage.
   418  type azureObjects struct {
   419  	minio.GatewayUnsupported
   420  	endpoint   *url.URL
   421  	httpClient *http.Client
   422  	metrics    *minio.BackendMetrics
   423  	client     azblob.ServiceURL // Azure sdk client
   424  }
   425  
   426  // Convert azure errors to minio object layer errors.
   427  func azureToObjectError(err error, params ...string) error {
   428  	if err == nil {
   429  		return nil
   430  	}
   431  
   432  	bucket := ""
   433  	object := ""
   434  	if len(params) >= 1 {
   435  		bucket = params[0]
   436  	}
   437  	if len(params) == 2 {
   438  		object = params[1]
   439  	}
   440  
   441  	azureErr, ok := err.(azblob.StorageError)
   442  	if !ok {
   443  		// We don't interpret non Azure errors. As azure errors will
   444  		// have StatusCode to help to convert to object errors.
   445  		return err
   446  	}
   447  
   448  	serviceCode := string(azureErr.ServiceCode())
   449  	statusCode := azureErr.Response().StatusCode
   450  
   451  	return azureCodesToObjectError(err, serviceCode, statusCode, bucket, object)
   452  }
   453  
   454  func azureCodesToObjectError(err error, serviceCode string, statusCode int, bucket string, object string) error {
   455  	switch serviceCode {
   456  	case "ContainerNotFound", "ContainerBeingDeleted":
   457  		err = minio.BucketNotFound{Bucket: bucket}
   458  	case "ContainerAlreadyExists":
   459  		err = minio.BucketExists{Bucket: bucket}
   460  	case "InvalidResourceName":
   461  		err = minio.BucketNameInvalid{Bucket: bucket}
   462  	case "RequestBodyTooLarge":
   463  		err = minio.PartTooBig{}
   464  	case "InvalidMetadata":
   465  		err = minio.UnsupportedMetadata{}
   466  	case "BlobAccessTierNotSupportedForAccountType":
   467  		err = minio.NotImplemented{}
   468  	case "OutOfRangeInput":
   469  		err = minio.ObjectNameInvalid{
   470  			Bucket: bucket,
   471  			Object: object,
   472  		}
   473  	default:
   474  		switch statusCode {
   475  		case http.StatusNotFound:
   476  			if object != "" {
   477  				err = minio.ObjectNotFound{
   478  					Bucket: bucket,
   479  					Object: object,
   480  				}
   481  			} else {
   482  				err = minio.BucketNotFound{Bucket: bucket}
   483  			}
   484  		case http.StatusBadRequest:
   485  			err = minio.BucketNameInvalid{Bucket: bucket}
   486  		}
   487  	}
   488  	return err
   489  }
   490  
   491  // getAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value.
   492  // this 8 byte restriction is needed because Azure block id has a restriction of length
   493  // upto 8 bytes.
   494  func getAzureUploadID() (string, error) {
   495  	var id [8]byte
   496  
   497  	n, err := io.ReadFull(rand.Reader, id[:])
   498  	if err != nil {
   499  		return "", err
   500  	}
   501  	if n != len(id) {
   502  		return "", fmt.Errorf("Unexpected random data size. Expected: %d, read: %d)", len(id), n)
   503  	}
   504  
   505  	return hex.EncodeToString(id[:]), nil
   506  }
   507  
   508  // checkAzureUploadID - returns error in case of given string is upload ID.
   509  func checkAzureUploadID(ctx context.Context, uploadID string) (err error) {
   510  	if len(uploadID) != 16 {
   511  		return minio.MalformedUploadID{
   512  			UploadID: uploadID,
   513  		}
   514  	}
   515  
   516  	if _, err = hex.DecodeString(uploadID); err != nil {
   517  		return minio.MalformedUploadID{
   518  			UploadID: uploadID,
   519  		}
   520  	}
   521  
   522  	return nil
   523  }
   524  
   525  // parses partID from part metadata file name
   526  func parseAzurePart(metaPartFileName, prefix string) (partID int, err error) {
   527  	partStr := strings.TrimPrefix(metaPartFileName, prefix+minio.SlashSeparator)
   528  	if partID, err = strconv.Atoi(partStr); err != nil || partID <= 0 {
   529  		err = fmt.Errorf("invalid part number in block id '%d'", partID)
   530  		return
   531  	}
   532  	return
   533  }
   534  
   535  // GetMetrics returns this gateway's metrics
   536  func (a *azureObjects) GetMetrics(ctx context.Context) (*minio.BackendMetrics, error) {
   537  	return a.metrics, nil
   538  }
   539  
   540  // Shutdown - save any gateway metadata to disk
   541  // if necessary and reload upon next restart.
   542  func (a *azureObjects) Shutdown(ctx context.Context) error {
   543  	return nil
   544  }
   545  
   546  // StorageInfo - Not relevant to Azure backend.
   547  func (a *azureObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) {
   548  	si.Backend.Type = madmin.Gateway
   549  	host := a.endpoint.Host
   550  	if a.endpoint.Port() == "" {
   551  		host = a.endpoint.Host + ":" + a.endpoint.Scheme
   552  	}
   553  	si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, host)
   554  	return si, nil
   555  }
   556  
   557  // MakeBucketWithLocation - Create a new container on azure backend.
   558  func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.BucketOptions) error {
   559  	// Filter out unsupported features in Azure and return immediately with NotImplemented error
   560  	if opts.LockEnabled || opts.VersioningEnabled || strings.ContainsAny(bucket, ".") {
   561  		return minio.NotImplemented{}
   562  	}
   563  
   564  	// Verify if bucket (container-name) is valid.
   565  	// IsValidBucketName has same restrictions as container names mentioned
   566  	// in azure documentation, so we will simply use the same function here.
   567  	// Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata
   568  	if !minio.IsValidBucketName(bucket) {
   569  		return minio.BucketNameInvalid{Bucket: bucket}
   570  	}
   571  
   572  	containerURL := a.client.NewContainerURL(bucket)
   573  	_, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
   574  	return azureToObjectError(err, bucket)
   575  }
   576  
   577  // GetBucketInfo - Get bucket metadata..
   578  func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) {
   579  	// Azure does not have an equivalent call, hence use
   580  	// ListContainers with prefix
   581  
   582  	marker := azblob.Marker{}
   583  
   584  	for marker.NotDone() {
   585  		resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{
   586  			Prefix: bucket,
   587  		})
   588  
   589  		if err != nil {
   590  			return bi, azureToObjectError(err, bucket)
   591  		}
   592  
   593  		for _, container := range resp.ContainerItems {
   594  			if container.Name == bucket {
   595  				t := container.Properties.LastModified
   596  				return minio.BucketInfo{
   597  					Name:    bucket,
   598  					Created: t,
   599  				}, nil
   600  			} // else continue
   601  		}
   602  
   603  		marker = resp.NextMarker
   604  	}
   605  	return bi, minio.BucketNotFound{Bucket: bucket}
   606  }
   607  
   608  // ListBuckets - Lists all azure containers, uses Azure equivalent `ServiceURL.ListContainersSegment`.
   609  func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) {
   610  	marker := azblob.Marker{}
   611  
   612  	for marker.NotDone() {
   613  		resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{})
   614  
   615  		if err != nil {
   616  			return nil, azureToObjectError(err)
   617  		}
   618  
   619  		for _, container := range resp.ContainerItems {
   620  			t := container.Properties.LastModified
   621  			buckets = append(buckets, minio.BucketInfo{
   622  				Name:    container.Name,
   623  				Created: t,
   624  			})
   625  		}
   626  
   627  		marker = resp.NextMarker
   628  	}
   629  	return buckets, nil
   630  }
   631  
   632  // DeleteBucket - delete a container on azure, uses Azure equivalent `ContainerURL.Delete`.
   633  func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error {
   634  	if !forceDelete {
   635  		// Check if the container is empty before deleting it.
   636  		result, err := a.ListObjects(ctx, bucket, "", "", "", 1)
   637  		if err != nil {
   638  			return azureToObjectError(err, bucket)
   639  		}
   640  		if len(result.Objects) > 0 {
   641  			return minio.BucketNotEmpty{Bucket: bucket}
   642  		}
   643  	}
   644  
   645  	containerURL := a.client.NewContainerURL(bucket)
   646  	_, err := containerURL.Delete(ctx, azblob.ContainerAccessConditions{})
   647  	return azureToObjectError(err, bucket)
   648  }
   649  
   650  // ListObjects - lists all blobs on azure with in a container filtered by prefix
   651  // and marker, uses Azure equivalent `ContainerURL.ListBlobsHierarchySegment`.
   652  // To accommodate S3-compatible applications using
   653  // ListObjectsV1 to use object keys as markers to control the
   654  // listing of objects, we use the following encoding scheme to
   655  // distinguish between Azure continuation tokens and application
   656  // supplied markers.
   657  //
   658  // - NextMarker in ListObjectsV1 response is constructed by
   659  //   prefixing "{minio}" to the Azure continuation token,
   660  //   e.g, "{minio}CgRvYmoz"
   661  //
   662  // - Application supplied markers are used as-is to list
   663  //   object keys that appear after it in the lexicographical order.
   664  func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) {
   665  	var objects []minio.ObjectInfo
   666  	var prefixes []string
   667  
   668  	azureListMarker := azblob.Marker{}
   669  	if isAzureMarker(marker) {
   670  		// If application is using Azure continuation token we should
   671  		// strip the azureTokenPrefix we added in the previous list response.
   672  		azureMarker := strings.TrimPrefix(marker, azureMarkerPrefix)
   673  		azureListMarker.Val = &azureMarker
   674  	}
   675  
   676  	containerURL := a.client.NewContainerURL(bucket)
   677  	for len(objects) == 0 && len(prefixes) == 0 {
   678  		resp, err := containerURL.ListBlobsHierarchySegment(ctx, azureListMarker, delimiter, azblob.ListBlobsSegmentOptions{
   679  			Prefix:     prefix,
   680  			MaxResults: int32(maxKeys),
   681  		})
   682  		if err != nil {
   683  			return result, azureToObjectError(err, bucket, prefix)
   684  		}
   685  
   686  		for _, blob := range resp.Segment.BlobItems {
   687  			if delimiter == "" && strings.HasPrefix(blob.Name, minio.GatewayMinioSysTmp) {
   688  				// We filter out minio.GatewayMinioSysTmp entries in the recursive listing.
   689  				continue
   690  			}
   691  			if !isAzureMarker(marker) && blob.Name <= marker {
   692  				// If the application used ListObjectsV1 style marker then we
   693  				// skip all the entries till we reach the marker.
   694  				continue
   695  			}
   696  			// Populate correct ETag's if possible, this code primarily exists
   697  			// because AWS S3 indicates that
   698  			//
   699  			// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
   700  			//
   701  			// Objects created by the PUT Object, POST Object, or Copy operation,
   702  			// or through the AWS Management Console, and are encrypted by SSE-S3
   703  			// or plaintext, have ETags that are an MD5 digest of their object data.
   704  			//
   705  			// Some applications depend on this behavior refer https://github.com/minio/minio/issues/6550
   706  			// So we handle it here and make this consistent.
   707  			etag := minio.ToS3ETag(string(blob.Properties.Etag))
   708  			switch {
   709  			case len(blob.Properties.ContentMD5) != 0:
   710  				etag = hex.EncodeToString(blob.Properties.ContentMD5)
   711  			case blob.Metadata["md5sum"] != "":
   712  				etag = blob.Metadata["md5sum"]
   713  				delete(blob.Metadata, "md5sum")
   714  			}
   715  
   716  			objects = append(objects, minio.ObjectInfo{
   717  				Bucket:          bucket,
   718  				Name:            blob.Name,
   719  				ModTime:         blob.Properties.LastModified,
   720  				Size:            *blob.Properties.ContentLength,
   721  				ETag:            etag,
   722  				ContentType:     *blob.Properties.ContentType,
   723  				ContentEncoding: *blob.Properties.ContentEncoding,
   724  				UserDefined:     blob.Metadata,
   725  			})
   726  		}
   727  
   728  		for _, blobPrefix := range resp.Segment.BlobPrefixes {
   729  			if blobPrefix.Name == minio.GatewayMinioSysTmp {
   730  				// We don't do strings.HasPrefix(blob.Name, minio.GatewayMinioSysTmp) here so that
   731  				// we can use tools like mc to inspect the contents of minio.sys.tmp/
   732  				// It is OK to allow listing of minio.sys.tmp/ in non-recursive mode as it aids in debugging.
   733  				continue
   734  			}
   735  			if !isAzureMarker(marker) && blobPrefix.Name <= marker {
   736  				// If the application used ListObjectsV1 style marker then we
   737  				// skip all the entries till we reach the marker.
   738  				continue
   739  			}
   740  			prefixes = append(prefixes, blobPrefix.Name)
   741  		}
   742  
   743  		azureListMarker = resp.NextMarker
   744  		if !azureListMarker.NotDone() {
   745  			// Reached end of listing.
   746  			break
   747  		}
   748  	}
   749  
   750  	result.Objects = objects
   751  	result.Prefixes = prefixes
   752  	if azureListMarker.NotDone() {
   753  		// We add the {minio} prefix so that we know in the subsequent request that this
   754  		// marker is a azure continuation token and not ListObjectV1 marker.
   755  		result.NextMarker = azureMarkerPrefix + *azureListMarker.Val
   756  		result.IsTruncated = true
   757  	}
   758  	return result, nil
   759  }
   760  
   761  // ListObjectsV2 - list all blobs in Azure bucket filtered by prefix
   762  func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) {
   763  	marker := continuationToken
   764  	if marker == "" {
   765  		marker = startAfter
   766  	}
   767  
   768  	var resultV1 minio.ListObjectsInfo
   769  	resultV1, err = a.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
   770  	if err != nil {
   771  		return result, err
   772  	}
   773  
   774  	result.Objects = resultV1.Objects
   775  	result.Prefixes = resultV1.Prefixes
   776  	result.ContinuationToken = continuationToken
   777  	result.NextContinuationToken = resultV1.NextMarker
   778  	result.IsTruncated = (resultV1.NextMarker != "")
   779  	return result, nil
   780  }
   781  
   782  // GetObjectNInfo - returns object info and locked object ReadCloser
   783  func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) {
   784  	var objInfo minio.ObjectInfo
   785  	objInfo, err = a.GetObjectInfo(ctx, bucket, object, opts)
   786  	if err != nil {
   787  		return nil, err
   788  	}
   789  
   790  	var startOffset, length int64
   791  	startOffset, length, err = rs.GetOffsetLength(objInfo.Size)
   792  	if err != nil {
   793  		return nil, err
   794  	}
   795  
   796  	if startOffset != 0 || length != objInfo.Size {
   797  		delete(objInfo.UserDefined, "Content-MD5")
   798  	}
   799  
   800  	pr, pw := io.Pipe()
   801  	go func() {
   802  		err := a.getObject(ctx, bucket, object, startOffset, length, pw, objInfo.InnerETag, opts)
   803  		pw.CloseWithError(err)
   804  	}()
   805  	// Setup cleanup function to cause the above go-routine to
   806  	// exit in case of partial read
   807  	pipeCloser := func() { pr.Close() }
   808  	return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser)
   809  }
   810  
   811  // GetObject - reads an object from azure. Supports additional
   812  // parameters like offset and length which are synonymous with
   813  // HTTP Range requests.
   814  //
   815  // startOffset indicates the starting read location of the object.
   816  // length indicates the total length of the object.
   817  func (a *azureObjects) getObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
   818  	// startOffset cannot be negative.
   819  	if startOffset < 0 {
   820  		return azureToObjectError(minio.InvalidRange{}, bucket, object)
   821  	}
   822  
   823  	accessCond := azblob.BlobAccessConditions{}
   824  	if etag != "" {
   825  		accessCond.ModifiedAccessConditions.IfMatch = azblob.ETag(etag)
   826  	}
   827  
   828  	blobURL := a.client.NewContainerURL(bucket).NewBlobURL(object)
   829  	blob, err := blobURL.Download(ctx, startOffset, length, accessCond, false)
   830  	if err != nil {
   831  		return azureToObjectError(err, bucket, object)
   832  	}
   833  
   834  	rc := blob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts})
   835  
   836  	_, err = io.Copy(writer, rc)
   837  	rc.Close()
   838  	return err
   839  }
   840  
   841  // GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo,
   842  // uses Azure equivalent `BlobURL.GetProperties`.
   843  func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
   844  	blobURL := a.client.NewContainerURL(bucket).NewBlobURL(object)
   845  	blob, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
   846  	if err != nil {
   847  		return objInfo, azureToObjectError(err, bucket, object)
   848  	}
   849  
   850  	realETag := string(blob.ETag())
   851  
   852  	// Populate correct ETag's if possible, this code primarily exists
   853  	// because AWS S3 indicates that
   854  	//
   855  	// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
   856  	//
   857  	// Objects created by the PUT Object, POST Object, or Copy operation,
   858  	// or through the AWS Management Console, and are encrypted by SSE-S3
   859  	// or plaintext, have ETags that are an MD5 digest of their object data.
   860  	//
   861  	// Some applications depend on this behavior refer https://github.com/minio/minio/issues/6550
   862  	// So we handle it here and make this consistent.
   863  	etag := minio.ToS3ETag(realETag)
   864  	metadata := blob.NewMetadata()
   865  	contentMD5 := blob.ContentMD5()
   866  	switch {
   867  	case len(contentMD5) != 0:
   868  		etag = hex.EncodeToString(contentMD5)
   869  	case metadata["md5sum"] != "":
   870  		etag = metadata["md5sum"]
   871  		delete(metadata, "md5sum")
   872  	}
   873  
   874  	return minio.ObjectInfo{
   875  		Bucket:          bucket,
   876  		UserDefined:     azurePropertiesToS3Meta(metadata, blob.NewHTTPHeaders(), blob.ContentLength()),
   877  		ETag:            etag,
   878  		InnerETag:       realETag,
   879  		ModTime:         blob.LastModified(),
   880  		Name:            object,
   881  		Size:            blob.ContentLength(),
   882  		ContentType:     blob.ContentType(),
   883  		ContentEncoding: blob.ContentEncoding(),
   884  		StorageClass:    azureTierToS3StorageClass(blob.AccessTier()),
   885  	}, nil
   886  }
   887  
   888  // PutObject - Create a new blob with the incoming data,
   889  // uses Azure equivalent `UploadStreamToBlockBlob`.
   890  func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
   891  	data := r.Reader
   892  	if opts.UserDefined == nil {
   893  		opts.UserDefined = map[string]string{}
   894  	}
   895  
   896  	metadata, properties, err := s3MetaToAzureProperties(ctx, opts.UserDefined)
   897  	if err != nil {
   898  		return objInfo, azureToObjectError(err, bucket, object)
   899  	}
   900  
   901  	blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(object)
   902  
   903  	_, err = azblob.UploadStreamToBlockBlob(ctx, data, blobURL, azblob.UploadStreamToBlockBlobOptions{
   904  		MaxBuffers:      azureUploadConcurrency,
   905  		BlobHTTPHeaders: properties,
   906  		Metadata:        metadata,
   907  	})
   908  	if err != nil {
   909  		return objInfo, azureToObjectError(err, bucket, object)
   910  	}
   911  	// Query the blob's properties and metadata
   912  	get, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
   913  	if err != nil {
   914  		return objInfo, azureToObjectError(err, bucket, object)
   915  	}
   916  	// Update the blob's metadata with Content-MD5 after the upload
   917  	metadata = get.NewMetadata()
   918  	metadata["md5sum"] = r.MD5CurrentHexString()
   919  	_, err = blobURL.SetMetadata(ctx, metadata, azblob.BlobAccessConditions{})
   920  	if err != nil {
   921  		return objInfo, azureToObjectError(err, bucket, object)
   922  	}
   923  	return a.GetObjectInfo(ctx, bucket, object, opts)
   924  }
   925  
   926  // CopyObject - Copies a blob from source container to destination container.
   927  // Uses Azure equivalent `BlobURL.StartCopyFromURL`.
   928  func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
   929  	if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
   930  		return minio.ObjectInfo{}, minio.PreConditionFailed{}
   931  	}
   932  	srcBlob := a.client.NewContainerURL(srcBucket).NewBlobURL(srcObject)
   933  	srcBlobURL := srcBlob.URL()
   934  
   935  	srcProps, err := srcBlob.GetProperties(ctx, azblob.BlobAccessConditions{})
   936  	if err != nil {
   937  		return objInfo, azureToObjectError(err, srcBucket, srcObject)
   938  	}
   939  	destBlob := a.client.NewContainerURL(destBucket).NewBlobURL(destObject)
   940  
   941  	azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined)
   942  	if err != nil {
   943  		return objInfo, azureToObjectError(err, srcBucket, srcObject)
   944  	}
   945  	props.ContentMD5 = srcProps.ContentMD5()
   946  	azureMeta["md5sum"] = srcInfo.ETag
   947  	res, err := destBlob.StartCopyFromURL(ctx, srcBlobURL, azureMeta, azblob.ModifiedAccessConditions{}, azblob.BlobAccessConditions{})
   948  	if err != nil {
   949  		return objInfo, azureToObjectError(err, srcBucket, srcObject)
   950  	}
   951  	// StartCopyFromURL is an asynchronous operation so need to poll for completion,
   952  	// see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob#remarks.
   953  	copyStatus := res.CopyStatus()
   954  	for copyStatus != azblob.CopyStatusSuccess {
   955  		destProps, err := destBlob.GetProperties(ctx, azblob.BlobAccessConditions{})
   956  		if err != nil {
   957  			return objInfo, azureToObjectError(err, srcBucket, srcObject)
   958  		}
   959  		copyStatus = destProps.CopyStatus()
   960  	}
   961  
   962  	// Azure will copy metadata from the source object when an empty metadata map is provided.
   963  	// To handle the case where the source object should be copied without its metadata,
   964  	// the metadata must be removed from the dest. object after the copy completes
   965  	if len(azureMeta) == 0 {
   966  		_, err = destBlob.SetMetadata(ctx, azureMeta, azblob.BlobAccessConditions{})
   967  		if err != nil {
   968  			return objInfo, azureToObjectError(err, srcBucket, srcObject)
   969  		}
   970  	}
   971  
   972  	_, err = destBlob.SetHTTPHeaders(ctx, props, azblob.BlobAccessConditions{})
   973  	if err != nil {
   974  		return objInfo, azureToObjectError(err, srcBucket, srcObject)
   975  	}
   976  
   977  	if _, ok := srcInfo.UserDefined["x-amz-storage-class"]; ok {
   978  		_, err = destBlob.SetTier(ctx, s3StorageClassToAzureTier(srcInfo.UserDefined["x-amz-storage-class"]),
   979  			azblob.LeaseAccessConditions{})
   980  		if err != nil {
   981  			return objInfo, azureToObjectError(err, srcBucket, srcObject)
   982  		}
   983  	}
   984  
   985  	return a.GetObjectInfo(ctx, destBucket, destObject, dstOpts)
   986  }
   987  
   988  // DeleteObject - Deletes a blob on azure container, uses Azure
   989  // equivalent `BlobURL.Delete`.
   990  func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
   991  	blob := a.client.NewContainerURL(bucket).NewBlobURL(object)
   992  	_, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
   993  	if err != nil {
   994  		err = azureToObjectError(err, bucket, object)
   995  		if !errors.Is(err, minio.ObjectNotFound{Bucket: bucket, Object: object}) {
   996  			return minio.ObjectInfo{}, err
   997  		}
   998  	}
   999  	return minio.ObjectInfo{
  1000  		Bucket: bucket,
  1001  		Name:   object,
  1002  	}, nil
  1003  }
  1004  
  1005  func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) {
  1006  	errs := make([]error, len(objects))
  1007  	dobjects := make([]minio.DeletedObject, len(objects))
  1008  	for idx, object := range objects {
  1009  		_, errs[idx] = a.DeleteObject(ctx, bucket, object.ObjectName, opts)
  1010  		dobjects[idx] = minio.DeletedObject{
  1011  			ObjectName: object.ObjectName,
  1012  		}
  1013  	}
  1014  	return dobjects, errs
  1015  }
  1016  
  1017  // ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result.
  1018  func (a *azureObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result minio.ListMultipartsInfo, err error) {
  1019  	// It's decided not to support List Multipart Uploads, hence returning empty result.
  1020  	return result, nil
  1021  }
  1022  
  1023  type azureMultipartMetadata struct {
  1024  	Name     string            `json:"name"`
  1025  	Metadata map[string]string `json:"metadata"`
  1026  }
  1027  
  1028  func getAzureMetadataObjectName(objectName, uploadID string) string {
  1029  	return fmt.Sprintf(metadataObjectNameTemplate, uploadID, sha256.Sum256([]byte(objectName)))
  1030  }
  1031  
  1032  // gets the name of part metadata file for multipart upload operations
  1033  func getAzureMetadataPartName(objectName, uploadID string, partID int) string {
  1034  	partMetaPrefix := getAzureMetadataPartPrefix(uploadID, objectName)
  1035  	return path.Join(partMetaPrefix, fmt.Sprintf("%d", partID))
  1036  }
  1037  
  1038  // gets the prefix of part metadata file
  1039  func getAzureMetadataPartPrefix(uploadID, objectName string) string {
  1040  	return fmt.Sprintf(metadataPartNamePrefix, uploadID, sha256.Sum256([]byte(objectName)))
  1041  }
  1042  
  1043  func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, objectName, uploadID string) (err error) {
  1044  	blobURL := a.client.NewContainerURL(bucketName).NewBlobURL(
  1045  		getAzureMetadataObjectName(objectName, uploadID))
  1046  	_, err = blobURL.GetProperties(ctx, azblob.BlobAccessConditions{})
  1047  	err = azureToObjectError(err, bucketName, objectName)
  1048  	oerr := minio.ObjectNotFound{
  1049  		Bucket: bucketName,
  1050  		Object: objectName,
  1051  	}
  1052  	if err == oerr {
  1053  		err = minio.InvalidUploadID{
  1054  			UploadID: uploadID,
  1055  		}
  1056  	}
  1057  	return err
  1058  }
  1059  
  1060  // NewMultipartUpload - Use Azure equivalent `BlobURL.Upload`.
  1061  func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (uploadID string, err error) {
  1062  	uploadID, err = getAzureUploadID()
  1063  	if err != nil {
  1064  		logger.LogIf(ctx, err)
  1065  		return "", err
  1066  	}
  1067  	metadataObject := getAzureMetadataObjectName(object, uploadID)
  1068  
  1069  	var jsonData []byte
  1070  	if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: opts.UserDefined}); err != nil {
  1071  		logger.LogIf(ctx, err)
  1072  		return "", err
  1073  	}
  1074  
  1075  	blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(metadataObject)
  1076  	_, err = blobURL.Upload(ctx, bytes.NewReader(jsonData), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
  1077  	if err != nil {
  1078  		return "", azureToObjectError(err, bucket, metadataObject)
  1079  	}
  1080  
  1081  	return uploadID, nil
  1082  }
  1083  
  1084  func (a *azureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, uploadID string, partID int,
  1085  	startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (info minio.PartInfo, err error) {
  1086  	return a.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
  1087  }
  1088  
  1089  // PutObjectPart - Use Azure equivalent `BlobURL.StageBlock`.
  1090  func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (info minio.PartInfo, err error) {
  1091  	data := r.Reader
  1092  	if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
  1093  		return info, err
  1094  	}
  1095  
  1096  	if err = checkAzureUploadID(ctx, uploadID); err != nil {
  1097  		return info, err
  1098  	}
  1099  
  1100  	partMetaV1 := newPartMetaV1(uploadID, partID)
  1101  	subPartSize, subPartNumber := int64(azureUploadChunkSize), 1
  1102  	for remainingSize := data.Size(); remainingSize > 0; remainingSize -= subPartSize {
  1103  		if remainingSize < subPartSize {
  1104  			subPartSize = remainingSize
  1105  		}
  1106  
  1107  		id := base64.StdEncoding.EncodeToString([]byte(minio.MustGetUUID()))
  1108  		blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(object)
  1109  		body, err := ioutil.ReadAll(io.LimitReader(data, subPartSize))
  1110  		if err != nil {
  1111  			return info, azureToObjectError(err, bucket, object)
  1112  		}
  1113  		_, err = blobURL.StageBlock(ctx, id, bytes.NewReader(body), azblob.LeaseAccessConditions{}, nil)
  1114  		if err != nil {
  1115  			return info, azureToObjectError(err, bucket, object)
  1116  		}
  1117  		partMetaV1.BlockIDs = append(partMetaV1.BlockIDs, id)
  1118  		subPartNumber++
  1119  	}
  1120  
  1121  	partMetaV1.ETag = r.MD5CurrentHexString()
  1122  	partMetaV1.Size = data.Size()
  1123  
  1124  	// maintain per part md5sum in a temporary part metadata file until upload
  1125  	// is finalized.
  1126  	metadataObject := getAzureMetadataPartName(object, uploadID, partID)
  1127  	var jsonData []byte
  1128  	if jsonData, err = json.Marshal(partMetaV1); err != nil {
  1129  		logger.LogIf(ctx, err)
  1130  		return info, err
  1131  	}
  1132  
  1133  	blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(metadataObject)
  1134  	_, err = blobURL.Upload(ctx, bytes.NewReader(jsonData), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{})
  1135  	if err != nil {
  1136  		return info, azureToObjectError(err, bucket, metadataObject)
  1137  	}
  1138  
  1139  	info.PartNumber = partID
  1140  	info.ETag = partMetaV1.ETag
  1141  	info.LastModified = minio.UTCNow()
  1142  	info.Size = data.Size()
  1143  	return info, nil
  1144  }
  1145  
  1146  // GetMultipartInfo returns multipart info of the uploadId of the object
  1147  func (a *azureObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) {
  1148  	if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
  1149  		return result, err
  1150  	}
  1151  
  1152  	result.Bucket = bucket
  1153  	result.Object = object
  1154  	result.UploadID = uploadID
  1155  	return result, nil
  1156  }
  1157  
  1158  // ListObjectParts - Use Azure equivalent `ContainerURL.ListBlobsHierarchySegment`.
  1159  func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (result minio.ListPartsInfo, err error) {
  1160  	if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
  1161  		return result, err
  1162  	}
  1163  
  1164  	result.Bucket = bucket
  1165  	result.Object = object
  1166  	result.UploadID = uploadID
  1167  	result.MaxParts = maxParts
  1168  
  1169  	azureListMarker := ""
  1170  	marker := azblob.Marker{Val: &azureListMarker}
  1171  
  1172  	var parts []minio.PartInfo
  1173  	var delimiter string
  1174  	maxKeys := maxPartsCount
  1175  	if partNumberMarker == 0 {
  1176  		maxKeys = maxParts
  1177  	}
  1178  	prefix := getAzureMetadataPartPrefix(uploadID, object)
  1179  	containerURL := a.client.NewContainerURL(bucket)
  1180  	resp, err := containerURL.ListBlobsHierarchySegment(ctx, marker, delimiter, azblob.ListBlobsSegmentOptions{
  1181  		Prefix:     prefix,
  1182  		MaxResults: int32(maxKeys),
  1183  	})
  1184  	if err != nil {
  1185  		return result, azureToObjectError(err, bucket, prefix)
  1186  	}
  1187  
  1188  	for _, blob := range resp.Segment.BlobItems {
  1189  		if delimiter == "" && !strings.HasPrefix(blob.Name, minio.GatewayMinioSysTmp) {
  1190  			// We filter out non minio.GatewayMinioSysTmp entries in the recursive listing.
  1191  			continue
  1192  		}
  1193  		// filter temporary metadata file for blob
  1194  		if strings.HasSuffix(blob.Name, "azure.json") {
  1195  			continue
  1196  		}
  1197  		if !isAzureMarker(*marker.Val) && blob.Name <= *marker.Val {
  1198  			// If the application used ListObjectsV1 style marker then we
  1199  			// skip all the entries till we reach the marker.
  1200  			continue
  1201  		}
  1202  		partNumber, err := parseAzurePart(blob.Name, prefix)
  1203  		if err != nil {
  1204  			return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
  1205  		}
  1206  		var metadata partMetadataV1
  1207  		blobURL := containerURL.NewBlobURL(blob.Name)
  1208  		blob, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)
  1209  		if err != nil {
  1210  			return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object)
  1211  		}
  1212  		metadataReader := blob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts})
  1213  		if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil {
  1214  			logger.LogIf(ctx, err)
  1215  			return result, azureToObjectError(err, bucket, object)
  1216  		}
  1217  		parts = append(parts, minio.PartInfo{
  1218  			PartNumber: partNumber,
  1219  			Size:       metadata.Size,
  1220  			ETag:       metadata.ETag,
  1221  		})
  1222  	}
  1223  	sort.Slice(parts, func(i int, j int) bool {
  1224  		return parts[i].PartNumber < parts[j].PartNumber
  1225  	})
  1226  	partsCount := 0
  1227  	i := 0
  1228  	if partNumberMarker != 0 {
  1229  		// If the marker was set, skip the entries till the marker.
  1230  		for _, part := range parts {
  1231  			i++
  1232  			if part.PartNumber == partNumberMarker {
  1233  				break
  1234  			}
  1235  		}
  1236  	}
  1237  	for partsCount < maxParts && i < len(parts) {
  1238  		result.Parts = append(result.Parts, parts[i])
  1239  		i++
  1240  		partsCount++
  1241  	}
  1242  
  1243  	if i < len(parts) {
  1244  		result.IsTruncated = true
  1245  		if partsCount != 0 {
  1246  			result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber
  1247  		}
  1248  	}
  1249  	result.PartNumberMarker = partNumberMarker
  1250  	return result, nil
  1251  }
  1252  
  1253  // AbortMultipartUpload - Not Implemented.
  1254  // There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks
  1255  // gets deleted after one week.
  1256  func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (err error) {
  1257  	if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
  1258  		return err
  1259  	}
  1260  	var partNumberMarker int
  1261  	for {
  1262  		lpi, err := a.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxPartsCount, minio.ObjectOptions{})
  1263  		if err != nil {
  1264  			break
  1265  		}
  1266  		for _, part := range lpi.Parts {
  1267  			pblob := a.client.NewContainerURL(bucket).NewBlobURL(
  1268  				getAzureMetadataPartName(object, uploadID, part.PartNumber))
  1269  			pblob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
  1270  		}
  1271  		partNumberMarker = lpi.NextPartNumberMarker
  1272  		if !lpi.IsTruncated {
  1273  			break
  1274  		}
  1275  	}
  1276  
  1277  	blobURL := a.client.NewContainerURL(bucket).NewBlobURL(
  1278  		getAzureMetadataObjectName(object, uploadID))
  1279  	_, err = blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
  1280  	return err
  1281  }
  1282  
  1283  // CompleteMultipartUpload - Use Azure equivalent `BlobURL.CommitBlockList`.
  1284  func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) {
  1285  	metadataObject := getAzureMetadataObjectName(object, uploadID)
  1286  	if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil {
  1287  		return objInfo, err
  1288  	}
  1289  
  1290  	if err = checkAzureUploadID(ctx, uploadID); err != nil {
  1291  		return objInfo, err
  1292  	}
  1293  
  1294  	blobURL := a.client.NewContainerURL(bucket).NewBlobURL(metadataObject)
  1295  	blob, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)
  1296  	if err != nil {
  1297  		return objInfo, azureToObjectError(err, bucket, metadataObject)
  1298  	}
  1299  
  1300  	var metadata azureMultipartMetadata
  1301  	metadataReader := blob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts})
  1302  	if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil {
  1303  		logger.LogIf(ctx, err)
  1304  		return objInfo, azureToObjectError(err, bucket, metadataObject)
  1305  	}
  1306  
  1307  	objBlob := a.client.NewContainerURL(bucket).NewBlockBlobURL(object)
  1308  
  1309  	var allBlocks []string
  1310  	for i, part := range uploadedParts {
  1311  		var partMetadata partMetadataV1
  1312  		partMetadataObject := getAzureMetadataPartName(object, uploadID, part.PartNumber)
  1313  		pblobURL := a.client.NewContainerURL(bucket).NewBlobURL(partMetadataObject)
  1314  		pblob, err := pblobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false)
  1315  		if err != nil {
  1316  			return objInfo, azureToObjectError(err, bucket, partMetadataObject)
  1317  		}
  1318  
  1319  		partMetadataReader := pblob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts})
  1320  		if err = json.NewDecoder(partMetadataReader).Decode(&partMetadata); err != nil {
  1321  			logger.LogIf(ctx, err)
  1322  			return objInfo, azureToObjectError(err, bucket, partMetadataObject)
  1323  		}
  1324  
  1325  		if partMetadata.ETag != part.ETag {
  1326  			return objInfo, minio.InvalidPart{}
  1327  		}
  1328  		allBlocks = append(allBlocks, partMetadata.BlockIDs...)
  1329  		if i < (len(uploadedParts)-1) && partMetadata.Size < azureS3MinPartSize {
  1330  			return objInfo, minio.PartTooSmall{
  1331  				PartNumber: uploadedParts[i].PartNumber,
  1332  				PartSize:   partMetadata.Size,
  1333  				PartETag:   uploadedParts[i].ETag,
  1334  			}
  1335  		}
  1336  	}
  1337  
  1338  	objMetadata, objProperties, err := s3MetaToAzureProperties(ctx, metadata.Metadata)
  1339  	if err != nil {
  1340  		return objInfo, azureToObjectError(err, bucket, object)
  1341  	}
  1342  	objMetadata["md5sum"] = minio.ComputeCompleteMultipartMD5(uploadedParts)
  1343  
  1344  	_, err = objBlob.CommitBlockList(ctx, allBlocks, objProperties, objMetadata, azblob.BlobAccessConditions{})
  1345  	if err != nil {
  1346  		return objInfo, azureToObjectError(err, bucket, object)
  1347  	}
  1348  	var partNumberMarker int
  1349  	for {
  1350  		lpi, err := a.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxPartsCount, minio.ObjectOptions{})
  1351  		if err != nil {
  1352  			break
  1353  		}
  1354  		for _, part := range lpi.Parts {
  1355  			pblob := a.client.NewContainerURL(bucket).NewBlobURL(
  1356  				getAzureMetadataPartName(object, uploadID, part.PartNumber))
  1357  			pblob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
  1358  		}
  1359  		partNumberMarker = lpi.NextPartNumberMarker
  1360  		if !lpi.IsTruncated {
  1361  			break
  1362  		}
  1363  	}
  1364  
  1365  	_, derr := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{})
  1366  	logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID)
  1367  	logger.LogIf(ctx, derr)
  1368  
  1369  	return a.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{})
  1370  }
  1371  
  1372  // SetBucketPolicy - Azure supports three types of container policies:
  1373  // azblob.PublicAccessContainer - readonly in minio terminology
  1374  // azblob.PublicAccessBlob - readonly without listing in minio terminology
  1375  // azblob.PublicAccessNone - none in minio terminology
  1376  // As the common denominator for minio and azure is readonly and none, we support
  1377  // these two policies at the bucket level.
  1378  func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error {
  1379  	policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy)
  1380  	if err != nil {
  1381  		// This should not happen.
  1382  		logger.LogIf(ctx, err)
  1383  		return azureToObjectError(err, bucket)
  1384  	}
  1385  
  1386  	var policies []minio.BucketAccessPolicy
  1387  	for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") {
  1388  		policies = append(policies, minio.BucketAccessPolicy{
  1389  			Prefix: prefix,
  1390  			Policy: policy,
  1391  		})
  1392  	}
  1393  	prefix := bucket + "/*" // For all objects inside the bucket.
  1394  	if len(policies) != 1 {
  1395  		return minio.NotImplemented{}
  1396  	}
  1397  	if policies[0].Prefix != prefix {
  1398  		return minio.NotImplemented{}
  1399  	}
  1400  	if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly {
  1401  		return minio.NotImplemented{}
  1402  	}
  1403  	perm := azblob.PublicAccessContainer
  1404  	container := a.client.NewContainerURL(bucket)
  1405  	_, err = container.SetAccessPolicy(ctx, perm, nil, azblob.ContainerAccessConditions{})
  1406  	return azureToObjectError(err, bucket)
  1407  }
  1408  
  1409  // GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy
  1410  func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) {
  1411  	container := a.client.NewContainerURL(bucket)
  1412  	perm, err := container.GetAccessPolicy(ctx, azblob.LeaseAccessConditions{})
  1413  	if err != nil {
  1414  		return nil, azureToObjectError(err, bucket)
  1415  	}
  1416  
  1417  	permAccessType := perm.BlobPublicAccess()
  1418  
  1419  	if permAccessType == azblob.PublicAccessNone {
  1420  		return nil, minio.BucketPolicyNotFound{Bucket: bucket}
  1421  	} else if permAccessType != azblob.PublicAccessContainer {
  1422  		return nil, azureToObjectError(minio.NotImplemented{})
  1423  	}
  1424  
  1425  	return &policy.Policy{
  1426  		Version: policy.DefaultVersion,
  1427  		Statements: []policy.Statement{
  1428  			policy.NewStatement(
  1429  				policy.Allow,
  1430  				policy.NewPrincipal("*"),
  1431  				policy.NewActionSet(
  1432  					policy.GetBucketLocationAction,
  1433  					policy.ListBucketAction,
  1434  					policy.GetObjectAction,
  1435  				),
  1436  				policy.NewResourceSet(
  1437  					policy.NewResource(bucket, ""),
  1438  					policy.NewResource(bucket, "*"),
  1439  				),
  1440  				condition.NewFunctions(),
  1441  			),
  1442  		},
  1443  	}, nil
  1444  }
  1445  
  1446  // DeleteBucketPolicy - Set the container ACL to "private"
  1447  func (a *azureObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error {
  1448  	perm := azblob.PublicAccessNone
  1449  	containerURL := a.client.NewContainerURL(bucket)
  1450  	_, err := containerURL.SetAccessPolicy(ctx, perm, nil, azblob.ContainerAccessConditions{})
  1451  	return azureToObjectError(err)
  1452  }
  1453  
  1454  // IsCompressionSupported returns whether compression is applicable for this layer.
  1455  func (a *azureObjects) IsCompressionSupported() bool {
  1456  	return false
  1457  }