github.com/artpar/rclone@v1.67.3/backend/azurefiles/azurefiles.go (about)

     1  //go:build !plan9 && !js
     2  
     3  // Package azurefiles provides an interface to Microsoft Azure Files
     4  package azurefiles
     5  
     6  /*
     7     TODO
     8  
     9     This uses LastWriteTime which seems to work. The API return also
    10     has LastModified - needs investigation
    11  
    12     Needs pacer to have retries
    13  
    14     HTTP headers need to be passed
    15  
    16     Could support Metadata
    17  
    18     FIXME write mime type
    19  
    20     See FIXME markers
    21  
    22     Optional interfaces for Object
    23     - ID
    24  
    25  */
    26  
    27  import (
    28  	"bytes"
    29  	"context"
    30  	"crypto/md5"
    31  	"encoding/hex"
    32  	"encoding/json"
    33  	"errors"
    34  	"fmt"
    35  	"io"
    36  	"net/http"
    37  	"net/url"
    38  	"os"
    39  	"path"
    40  	"strings"
    41  	"sync"
    42  	"time"
    43  
    44  	"github.com/Azure/azure-sdk-for-go/sdk/azcore"
    45  	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
    46  	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
    47  	"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/directory"
    48  	"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/file"
    49  	"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/fileerror"
    50  	"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/service"
    51  	"github.com/Azure/azure-sdk-for-go/sdk/storage/azfile/share"
    52  	"github.com/artpar/rclone/fs"
    53  	"github.com/artpar/rclone/fs/config"
    54  	"github.com/artpar/rclone/fs/config/configmap"
    55  	"github.com/artpar/rclone/fs/config/configstruct"
    56  	"github.com/artpar/rclone/fs/config/obscure"
    57  	"github.com/artpar/rclone/fs/fshttp"
    58  	"github.com/artpar/rclone/fs/hash"
    59  	"github.com/artpar/rclone/lib/encoder"
    60  	"github.com/artpar/rclone/lib/env"
    61  	"github.com/artpar/rclone/lib/readers"
    62  )
    63  
    64  const (
    65  	maxFileSize           = 4 * fs.Tebi
    66  	defaultChunkSize      = 4 * fs.Mebi
    67  	storageDefaultBaseURL = "file.core.windows.net"
    68  )
    69  
    70  func init() {
    71  	fs.Register(&fs.RegInfo{
    72  		Name:        "azurefiles",
    73  		Description: "Microsoft Azure Files",
    74  		NewFs:       NewFs,
    75  		Options: []fs.Option{{
    76  			Name: "account",
    77  			Help: `Azure Storage Account Name.
    78  
    79  Set this to the Azure Storage Account Name in use.
    80  
    81  Leave blank to use SAS URL or connection string, otherwise it needs to be set.
    82  
    83  If this is blank and if env_auth is set it will be read from the
    84  environment variable ` + "`AZURE_STORAGE_ACCOUNT_NAME`" + ` if possible.
    85  `,
    86  			Sensitive: true,
    87  		}, {
    88  			Name: "share_name",
    89  			Help: `Azure Files Share Name.
    90  
    91  This is required and is the name of the share to access.
    92  `,
    93  		}, {
    94  			Name: "env_auth",
    95  			Help: `Read credentials from runtime (environment variables, CLI or MSI).
    96  
    97  See the [authentication docs](/azurefiles#authentication) for full info.`,
    98  			Default: false,
    99  		}, {
   100  			Name: "key",
   101  			Help: `Storage Account Shared Key.
   102  
   103  Leave blank to use SAS URL or connection string.`,
   104  			Sensitive: true,
   105  		}, {
   106  			Name: "sas_url",
   107  			Help: `SAS URL.
   108  
   109  Leave blank if using account/key or connection string.`,
   110  			Sensitive: true,
   111  		}, {
   112  			Name:      "connection_string",
   113  			Help:      `Azure Files Connection String.`,
   114  			Sensitive: true,
   115  		}, {
   116  			Name: "tenant",
   117  			Help: `ID of the service principal's tenant. Also called its directory ID.
   118  
   119  Set this if using
   120  - Service principal with client secret
   121  - Service principal with certificate
   122  - User with username and password
   123  `,
   124  			Sensitive: true,
   125  		}, {
   126  			Name: "client_id",
   127  			Help: `The ID of the client in use.
   128  
   129  Set this if using
   130  - Service principal with client secret
   131  - Service principal with certificate
   132  - User with username and password
   133  `,
   134  			Sensitive: true,
   135  		}, {
   136  			Name: "client_secret",
   137  			Help: `One of the service principal's client secrets
   138  
   139  Set this if using
   140  - Service principal with client secret
   141  `,
   142  			Sensitive: true,
   143  		}, {
   144  			Name: "client_certificate_path",
   145  			Help: `Path to a PEM or PKCS12 certificate file including the private key.
   146  
   147  Set this if using
   148  - Service principal with certificate
   149  `,
   150  		}, {
   151  			Name: "client_certificate_password",
   152  			Help: `Password for the certificate file (optional).
   153  
   154  Optionally set this if using
   155  - Service principal with certificate
   156  
   157  And the certificate has a password.
   158  `,
   159  			IsPassword: true,
   160  		}, {
   161  			Name: "client_send_certificate_chain",
   162  			Help: `Send the certificate chain when using certificate auth.
   163  
   164  Specifies whether an authentication request will include an x5c header
   165  to support subject name / issuer based authentication. When set to
   166  true, authentication requests include the x5c header.
   167  
   168  Optionally set this if using
   169  - Service principal with certificate
   170  `,
   171  			Default:  false,
   172  			Advanced: true,
   173  		}, {
   174  			Name: "username",
   175  			Help: `User name (usually an email address)
   176  
   177  Set this if using
   178  - User with username and password
   179  `,
   180  			Advanced:  true,
   181  			Sensitive: true,
   182  		}, {
   183  			Name: "password",
   184  			Help: `The user's password
   185  
   186  Set this if using
   187  - User with username and password
   188  `,
   189  			IsPassword: true,
   190  			Advanced:   true,
   191  		}, {
   192  			Name: "service_principal_file",
   193  			Help: `Path to file containing credentials for use with a service principal.
   194  
   195  Leave blank normally. Needed only if you want to use a service principal instead of interactive login.
   196  
   197      $ az ad sp create-for-rbac --name "<name>" \
   198        --role "Storage Files Data Owner" \
   199        --scopes "/subscriptions/<subscription>/resourceGroups/<resource-group>/providers/Microsoft.Storage/storageAccounts/<storage-account>/blobServices/default/containers/<container>" \
   200        > azure-principal.json
   201  
   202  See ["Create an Azure service principal"](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli) and ["Assign an Azure role for access to files data"](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) pages for more details.
   203  
   204  **NB** this section needs updating for Azure Files - pull requests appreciated!
   205  
   206  It may be more convenient to put the credentials directly into the
   207  rclone config file under the ` + "`client_id`, `tenant` and `client_secret`" + `
   208  keys instead of setting ` + "`service_principal_file`" + `.
   209  `,
   210  			Advanced: true,
   211  		}, {
   212  			Name: "use_msi",
   213  			Help: `Use a managed service identity to authenticate (only works in Azure).
   214  
   215  When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/)
   216  to authenticate to Azure Storage instead of a SAS token or account key.
   217  
   218  If the VM(SS) on which this program is running has a system-assigned identity, it will
   219  be used by default. If the resource has no system-assigned but exactly one user-assigned identity,
   220  the user-assigned identity will be used by default. If the resource has multiple user-assigned
   221  identities, the identity to use must be explicitly specified using exactly one of the msi_object_id,
   222  msi_client_id, or msi_mi_res_id parameters.`,
   223  			Default:  false,
   224  			Advanced: true,
   225  		}, {
   226  			Name:      "msi_object_id",
   227  			Help:      "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_mi_res_id specified.",
   228  			Advanced:  true,
   229  			Sensitive: true,
   230  		}, {
   231  			Name:      "msi_client_id",
   232  			Help:      "Object ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_object_id or msi_mi_res_id specified.",
   233  			Advanced:  true,
   234  			Sensitive: true,
   235  		}, {
   236  			Name:      "msi_mi_res_id",
   237  			Help:      "Azure resource ID of the user-assigned MSI to use, if any.\n\nLeave blank if msi_client_id or msi_object_id specified.",
   238  			Advanced:  true,
   239  			Sensitive: true,
   240  		}, {
   241  			Name:     "endpoint",
   242  			Help:     "Endpoint for the service.\n\nLeave blank normally.",
   243  			Advanced: true,
   244  		}, {
   245  			Name: "chunk_size",
   246  			Help: `Upload chunk size.
   247  
   248  Note that this is stored in memory and there may be up to
   249  "--transfers" * "--azurefile-upload-concurrency" chunks stored at once
   250  in memory.`,
   251  			Default:  defaultChunkSize,
   252  			Advanced: true,
   253  		}, {
   254  			Name: "upload_concurrency",
   255  			Help: `Concurrency for multipart uploads.
   256  
   257  This is the number of chunks of the same file that are uploaded
   258  concurrently.
   259  
   260  If you are uploading small numbers of large files over high-speed
   261  links and these uploads do not fully utilize your bandwidth, then
   262  increasing this may help to speed up the transfers.
   263  
   264  Note that chunks are stored in memory and there may be up to
   265  "--transfers" * "--azurefile-upload-concurrency" chunks stored at once
   266  in memory.`,
   267  			Default:  16,
   268  			Advanced: true,
   269  		}, {
   270  			Name: "max_stream_size",
   271  			Help: strings.ReplaceAll(`Max size for streamed files.
   272  
   273  Azure files needs to know in advance how big the file will be. When
   274  rclone doesn't know it uses this value instead.
   275  
   276  This will be used when rclone is streaming data, the most common uses are:
   277  
   278  - Uploading files with |--vfs-cache-mode off| with |rclone mount|
   279  - Using |rclone rcat|
   280  - Copying files with unknown length
   281  
   282  You will need this much free space in the share as the file will be this size temporarily.
   283  `, "|", "`"),
   284  			Default:  10 * fs.Gibi,
   285  			Advanced: true,
   286  		}, {
   287  			Name:     config.ConfigEncoding,
   288  			Help:     config.ConfigEncodingHelp,
   289  			Advanced: true,
   290  			Default: (encoder.EncodeDoubleQuote |
   291  				encoder.EncodeBackSlash |
   292  				encoder.EncodeSlash |
   293  				encoder.EncodeColon |
   294  				encoder.EncodePipe |
   295  				encoder.EncodeLtGt |
   296  				encoder.EncodeAsterisk |
   297  				encoder.EncodeQuestion |
   298  				encoder.EncodeInvalidUtf8 |
   299  				encoder.EncodeCtl | encoder.EncodeDel |
   300  				encoder.EncodeDot | encoder.EncodeRightPeriod),
   301  		}},
   302  	})
   303  }
   304  
   305  // Options defines the configuration for this backend
   306  type Options struct {
   307  	Account                    string               `config:"account"`
   308  	ShareName                  string               `config:"share_name"`
   309  	EnvAuth                    bool                 `config:"env_auth"`
   310  	Key                        string               `config:"key"`
   311  	SASURL                     string               `config:"sas_url"`
   312  	ConnectionString           string               `config:"connection_string"`
   313  	Tenant                     string               `config:"tenant"`
   314  	ClientID                   string               `config:"client_id"`
   315  	ClientSecret               string               `config:"client_secret"`
   316  	ClientCertificatePath      string               `config:"client_certificate_path"`
   317  	ClientCertificatePassword  string               `config:"client_certificate_password"`
   318  	ClientSendCertificateChain bool                 `config:"client_send_certificate_chain"`
   319  	Username                   string               `config:"username"`
   320  	Password                   string               `config:"password"`
   321  	ServicePrincipalFile       string               `config:"service_principal_file"`
   322  	UseMSI                     bool                 `config:"use_msi"`
   323  	MSIObjectID                string               `config:"msi_object_id"`
   324  	MSIClientID                string               `config:"msi_client_id"`
   325  	MSIResourceID              string               `config:"msi_mi_res_id"`
   326  	Endpoint                   string               `config:"endpoint"`
   327  	ChunkSize                  fs.SizeSuffix        `config:"chunk_size"`
   328  	MaxStreamSize              fs.SizeSuffix        `config:"max_stream_size"`
   329  	UploadConcurrency          int                  `config:"upload_concurrency"`
   330  	Enc                        encoder.MultiEncoder `config:"encoding"`
   331  }
   332  
   333  // Fs represents a root directory inside a share. The root directory can be ""
   334  type Fs struct {
   335  	name        string            // name of this remote
   336  	root        string            // the path we are working on if any
   337  	opt         Options           // parsed config options
   338  	features    *fs.Features      // optional features
   339  	shareClient *share.Client     // a client for the share itself
   340  	svc         *directory.Client // the root service
   341  }
   342  
   343  // Object describes a Azure File Share File
   344  type Object struct {
   345  	fs          *Fs       // what this object is part of
   346  	remote      string    // The remote path
   347  	size        int64     // Size of the object
   348  	md5         []byte    // MD5 hash if known
   349  	modTime     time.Time // The modified time of the object if known
   350  	contentType string    // content type if known
   351  }
   352  
   353  // Wrap the http.Transport to satisfy the Transporter interface
   354  type transporter struct {
   355  	http.RoundTripper
   356  }
   357  
   358  // Make a new transporter
   359  func newTransporter(ctx context.Context) transporter {
   360  	return transporter{
   361  		RoundTripper: fshttp.NewTransport(ctx),
   362  	}
   363  }
   364  
   365  // Do sends the HTTP request and returns the HTTP response or error.
   366  func (tr transporter) Do(req *http.Request) (*http.Response, error) {
   367  	return tr.RoundTripper.RoundTrip(req)
   368  }
   369  
   370  type servicePrincipalCredentials struct {
   371  	AppID    string `json:"appId"`
   372  	Password string `json:"password"`
   373  	Tenant   string `json:"tenant"`
   374  }
   375  
   376  // parseServicePrincipalCredentials unmarshals a service principal credentials JSON file as generated by az cli.
   377  func parseServicePrincipalCredentials(ctx context.Context, credentialsData []byte) (*servicePrincipalCredentials, error) {
   378  	var spCredentials servicePrincipalCredentials
   379  	if err := json.Unmarshal(credentialsData, &spCredentials); err != nil {
   380  		return nil, fmt.Errorf("error parsing credentials from JSON file: %w", err)
   381  	}
   382  	// TODO: support certificate credentials
   383  	// Validate all fields present
   384  	if spCredentials.AppID == "" || spCredentials.Password == "" || spCredentials.Tenant == "" {
   385  		return nil, fmt.Errorf("missing fields in credentials file")
   386  	}
   387  	return &spCredentials, nil
   388  }
   389  
   390  // Factored out from NewFs so that it can be tested with opt *Options and without m configmap.Mapper
   391  func newFsFromOptions(ctx context.Context, name, root string, opt *Options) (fs.Fs, error) {
   392  	// Client options specifying our own transport
   393  	policyClientOptions := policy.ClientOptions{
   394  		Transport: newTransporter(ctx),
   395  	}
   396  	clientOpt := service.ClientOptions{
   397  		ClientOptions: policyClientOptions,
   398  	}
   399  
   400  	// Here we auth by setting one of cred, sharedKeyCred or f.client
   401  	var (
   402  		cred          azcore.TokenCredential
   403  		sharedKeyCred *service.SharedKeyCredential
   404  		client        *service.Client
   405  		err           error
   406  	)
   407  	switch {
   408  	case opt.EnvAuth:
   409  		// Read account from environment if needed
   410  		if opt.Account == "" {
   411  			opt.Account, _ = os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
   412  		}
   413  		// Read credentials from the environment
   414  		options := azidentity.DefaultAzureCredentialOptions{
   415  			ClientOptions: policyClientOptions,
   416  		}
   417  		cred, err = azidentity.NewDefaultAzureCredential(&options)
   418  		if err != nil {
   419  			return nil, fmt.Errorf("create azure environment credential failed: %w", err)
   420  		}
   421  	case opt.Account != "" && opt.Key != "":
   422  		sharedKeyCred, err = service.NewSharedKeyCredential(opt.Account, opt.Key)
   423  		if err != nil {
   424  			return nil, fmt.Errorf("create new shared key credential failed: %w", err)
   425  		}
   426  	case opt.SASURL != "":
   427  		client, err = service.NewClientWithNoCredential(opt.SASURL, &clientOpt)
   428  		if err != nil {
   429  			return nil, fmt.Errorf("unable to create SAS URL client: %w", err)
   430  		}
   431  	case opt.ConnectionString != "":
   432  		client, err = service.NewClientFromConnectionString(opt.ConnectionString, &clientOpt)
   433  		if err != nil {
   434  			return nil, fmt.Errorf("unable to create connection string client: %w", err)
   435  		}
   436  	case opt.ClientID != "" && opt.Tenant != "" && opt.ClientSecret != "":
   437  		// Service principal with client secret
   438  		options := azidentity.ClientSecretCredentialOptions{
   439  			ClientOptions: policyClientOptions,
   440  		}
   441  		cred, err = azidentity.NewClientSecretCredential(opt.Tenant, opt.ClientID, opt.ClientSecret, &options)
   442  		if err != nil {
   443  			return nil, fmt.Errorf("error creating a client secret credential: %w", err)
   444  		}
   445  	case opt.ClientID != "" && opt.Tenant != "" && opt.ClientCertificatePath != "":
   446  		// Service principal with certificate
   447  		//
   448  		// Read the certificate
   449  		data, err := os.ReadFile(env.ShellExpand(opt.ClientCertificatePath))
   450  		if err != nil {
   451  			return nil, fmt.Errorf("error reading client certificate file: %w", err)
   452  		}
   453  		// NewClientCertificateCredential requires at least one *x509.Certificate, and a
   454  		// crypto.PrivateKey.
   455  		//
   456  		// ParseCertificates returns these given certificate data in PEM or PKCS12 format.
   457  		// It handles common scenarios but has limitations, for example it doesn't load PEM
   458  		// encrypted private keys.
   459  		var password []byte
   460  		if opt.ClientCertificatePassword != "" {
   461  			pw, err := obscure.Reveal(opt.Password)
   462  			if err != nil {
   463  				return nil, fmt.Errorf("certificate password decode failed - did you obscure it?: %w", err)
   464  			}
   465  			password = []byte(pw)
   466  		}
   467  		certs, key, err := azidentity.ParseCertificates(data, password)
   468  		if err != nil {
   469  			return nil, fmt.Errorf("failed to parse client certificate file: %w", err)
   470  		}
   471  		options := azidentity.ClientCertificateCredentialOptions{
   472  			ClientOptions:        policyClientOptions,
   473  			SendCertificateChain: opt.ClientSendCertificateChain,
   474  		}
   475  		cred, err = azidentity.NewClientCertificateCredential(
   476  			opt.Tenant, opt.ClientID, certs, key, &options,
   477  		)
   478  		if err != nil {
   479  			return nil, fmt.Errorf("create azure service principal with client certificate credential failed: %w", err)
   480  		}
   481  	case opt.ClientID != "" && opt.Tenant != "" && opt.Username != "" && opt.Password != "":
   482  		// User with username and password
   483  		options := azidentity.UsernamePasswordCredentialOptions{
   484  			ClientOptions: policyClientOptions,
   485  		}
   486  		password, err := obscure.Reveal(opt.Password)
   487  		if err != nil {
   488  			return nil, fmt.Errorf("user password decode failed - did you obscure it?: %w", err)
   489  		}
   490  		cred, err = azidentity.NewUsernamePasswordCredential(
   491  			opt.Tenant, opt.ClientID, opt.Username, password, &options,
   492  		)
   493  		if err != nil {
   494  			return nil, fmt.Errorf("authenticate user with password failed: %w", err)
   495  		}
   496  	case opt.ServicePrincipalFile != "":
   497  		// Loading service principal credentials from file.
   498  		loadedCreds, err := os.ReadFile(env.ShellExpand(opt.ServicePrincipalFile))
   499  		if err != nil {
   500  			return nil, fmt.Errorf("error opening service principal credentials file: %w", err)
   501  		}
   502  		parsedCreds, err := parseServicePrincipalCredentials(ctx, loadedCreds)
   503  		if err != nil {
   504  			return nil, fmt.Errorf("error parsing service principal credentials file: %w", err)
   505  		}
   506  		options := azidentity.ClientSecretCredentialOptions{
   507  			ClientOptions: policyClientOptions,
   508  		}
   509  		cred, err = azidentity.NewClientSecretCredential(parsedCreds.Tenant, parsedCreds.AppID, parsedCreds.Password, &options)
   510  		if err != nil {
   511  			return nil, fmt.Errorf("error creating a client secret credential: %w", err)
   512  		}
   513  	case opt.UseMSI:
   514  		// Specifying a user-assigned identity. Exactly one of the above IDs must be specified.
   515  		// Validate and ensure exactly one is set. (To do: better validation.)
   516  		var b2i = map[bool]int{false: 0, true: 1}
   517  		set := b2i[opt.MSIClientID != ""] + b2i[opt.MSIObjectID != ""] + b2i[opt.MSIResourceID != ""]
   518  		if set > 1 {
   519  			return nil, errors.New("more than one user-assigned identity ID is set")
   520  		}
   521  		var options azidentity.ManagedIdentityCredentialOptions
   522  		switch {
   523  		case opt.MSIClientID != "":
   524  			options.ID = azidentity.ClientID(opt.MSIClientID)
   525  		case opt.MSIObjectID != "":
   526  			// FIXME this doesn't appear to be in the new SDK?
   527  			return nil, fmt.Errorf("MSI object ID is currently unsupported")
   528  		case opt.MSIResourceID != "":
   529  			options.ID = azidentity.ResourceID(opt.MSIResourceID)
   530  		}
   531  		cred, err = azidentity.NewManagedIdentityCredential(&options)
   532  		if err != nil {
   533  			return nil, fmt.Errorf("failed to acquire MSI token: %w", err)
   534  		}
   535  	default:
   536  		return nil, errors.New("no authentication method configured")
   537  	}
   538  
   539  	// Make the client if not already created
   540  	if client == nil {
   541  		// Work out what the endpoint is if it is still unset
   542  		if opt.Endpoint == "" {
   543  			if opt.Account == "" {
   544  				return nil, fmt.Errorf("account must be set: can't make service URL")
   545  			}
   546  			u, err := url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, storageDefaultBaseURL))
   547  			if err != nil {
   548  				return nil, fmt.Errorf("failed to make azure storage URL from account: %w", err)
   549  			}
   550  			opt.Endpoint = u.String()
   551  		}
   552  		if sharedKeyCred != nil {
   553  			// Shared key cred
   554  			client, err = service.NewClientWithSharedKeyCredential(opt.Endpoint, sharedKeyCred, &clientOpt)
   555  			if err != nil {
   556  				return nil, fmt.Errorf("create client with shared key failed: %w", err)
   557  			}
   558  		} else if cred != nil {
   559  			// Azidentity cred
   560  			client, err = service.NewClient(opt.Endpoint, cred, &clientOpt)
   561  			if err != nil {
   562  				return nil, fmt.Errorf("create client failed: %w", err)
   563  			}
   564  		}
   565  	}
   566  	if client == nil {
   567  		return nil, fmt.Errorf("internal error: auth failed to make credentials or client")
   568  	}
   569  
   570  	shareClient := client.NewShareClient(opt.ShareName)
   571  	svc := shareClient.NewRootDirectoryClient()
   572  	f := &Fs{
   573  		shareClient: shareClient,
   574  		svc:         svc,
   575  		name:        name,
   576  		root:        root,
   577  		opt:         *opt,
   578  	}
   579  	f.features = (&fs.Features{
   580  		CanHaveEmptyDirectories: true,
   581  		PartialUploads:          true, // files are visible as they are being uploaded
   582  		CaseInsensitive:         true,
   583  		SlowHash:                true, // calling Hash() generally takes an extra transaction
   584  		ReadMimeType:            true,
   585  		WriteMimeType:           true,
   586  	}).Fill(ctx, f)
   587  
   588  	// Check whether a file exists at this location
   589  	_, propsErr := f.fileClient("").GetProperties(ctx, nil)
   590  	if propsErr == nil {
   591  		f.root = path.Dir(root)
   592  		return f, fs.ErrorIsFile
   593  	}
   594  
   595  	return f, nil
   596  }
   597  
   598  // NewFs constructs an Fs from the root
   599  func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
   600  	opt := new(Options)
   601  	err := configstruct.Set(m, opt)
   602  	if err != nil {
   603  		return nil, err
   604  	}
   605  	return newFsFromOptions(ctx, name, root, opt)
   606  }
   607  
   608  // ------------------------------------------------------------
   609  
   610  // Name of the remote (as passed into NewFs)
   611  func (f *Fs) Name() string {
   612  	return f.name
   613  }
   614  
   615  // Root of the remote (as passed into NewFs)
   616  func (f *Fs) Root() string {
   617  	return f.root
   618  }
   619  
   620  // String converts this Fs to a string
   621  func (f *Fs) String() string {
   622  	return fmt.Sprintf("azurefiles root '%s'", f.root)
   623  }
   624  
   625  // Features returns the optional features of this Fs
   626  func (f *Fs) Features() *fs.Features {
   627  	return f.features
   628  }
   629  
   630  // Precision return the precision of this Fs
   631  //
   632  // One second. FileREST API times are in RFC1123 which in the example shows a precision of seconds
   633  // Source: https://learn.microsoft.com/en-us/rest/api/storageservices/representation-of-date-time-values-in-headers
   634  func (f *Fs) Precision() time.Duration {
   635  	return time.Second
   636  }
   637  
   638  // Hashes returns the supported hash sets.
   639  //
   640  // MD5: since it is listed as header in the response for file properties
   641  // Source: https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-properties
   642  func (f *Fs) Hashes() hash.Set {
   643  	return hash.NewHashSet(hash.MD5)
   644  }
   645  
   646  // Encode remote and turn it into an absolute path in the share
   647  func (f *Fs) absPath(remote string) string {
   648  	return f.opt.Enc.FromStandardPath(path.Join(f.root, remote))
   649  }
   650  
   651  // Make a directory client from the dir
   652  func (f *Fs) dirClient(dir string) *directory.Client {
   653  	return f.svc.NewSubdirectoryClient(f.absPath(dir))
   654  }
   655  
   656  // Make a file client from the remote
   657  func (f *Fs) fileClient(remote string) *file.Client {
   658  	return f.svc.NewFileClient(f.absPath(remote))
   659  }
   660  
   661  // NewObject finds the Object at remote.  If it can't be found
   662  // it returns the error fs.ErrorObjectNotFound.
   663  //
   664  // Does not return ErrorIsDir when a directory exists instead of file. since the documentation
   665  // for [rclone.fs.Fs.NewObject] rqeuires no extra work to determine whether it is directory
   666  //
   667  // This initiates a network request and returns an error if object is not found.
   668  func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
   669  	resp, err := f.fileClient(remote).GetProperties(ctx, nil)
   670  	if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
   671  		return nil, fs.ErrorObjectNotFound
   672  	} else if err != nil {
   673  		return nil, fmt.Errorf("unable to find object remote %q: %w", remote, err)
   674  	}
   675  
   676  	o := &Object{
   677  		fs:     f,
   678  		remote: remote,
   679  	}
   680  	o.setMetadata(&resp)
   681  	return o, nil
   682  }
   683  
   684  // Make a directory using the absolute path from the root of the share
   685  //
   686  // This recursiely creating parent directories all the way to the root
   687  // of the share.
   688  func (f *Fs) absMkdir(ctx context.Context, absPath string) error {
   689  	if absPath == "" {
   690  		return nil
   691  	}
   692  	dirClient := f.svc.NewSubdirectoryClient(absPath)
   693  
   694  	// now := time.Now()
   695  	// smbProps := &file.SMBProperties{
   696  	// 	LastWriteTime: &now,
   697  	// }
   698  	// dirCreateOptions := &directory.CreateOptions{
   699  	// 	FileSMBProperties: smbProps,
   700  	// }
   701  
   702  	_, createDirErr := dirClient.Create(ctx, nil)
   703  	if fileerror.HasCode(createDirErr, fileerror.ParentNotFound) {
   704  		parentDir := path.Dir(absPath)
   705  		if parentDir == absPath {
   706  			return fmt.Errorf("internal error: infinite recursion since parent and remote are equal")
   707  		}
   708  		makeParentErr := f.absMkdir(ctx, parentDir)
   709  		if makeParentErr != nil {
   710  			return fmt.Errorf("could not make parent of %q: %w", absPath, makeParentErr)
   711  		}
   712  		return f.absMkdir(ctx, absPath)
   713  	} else if fileerror.HasCode(createDirErr, fileerror.ResourceAlreadyExists) {
   714  		return nil
   715  	} else if createDirErr != nil {
   716  		return fmt.Errorf("unable to MkDir: %w", createDirErr)
   717  	}
   718  	return nil
   719  }
   720  
   721  // Mkdir creates nested directories
   722  func (f *Fs) Mkdir(ctx context.Context, remote string) error {
   723  	return f.absMkdir(ctx, f.absPath(remote))
   724  }
   725  
   726  // Make the parent directory of remote
   727  func (f *Fs) mkParentDir(ctx context.Context, remote string) error {
   728  	// Can't make the parent of root
   729  	if remote == "" {
   730  		return nil
   731  	}
   732  	return f.Mkdir(ctx, path.Dir(remote))
   733  }
   734  
   735  // Rmdir deletes the root folder
   736  //
   737  // Returns an error if it isn't empty
   738  func (f *Fs) Rmdir(ctx context.Context, dir string) error {
   739  	dirClient := f.dirClient(dir)
   740  	_, err := dirClient.Delete(ctx, nil)
   741  	if err != nil {
   742  		if fileerror.HasCode(err, fileerror.DirectoryNotEmpty) {
   743  			return fs.ErrorDirectoryNotEmpty
   744  		} else if fileerror.HasCode(err, fileerror.ResourceNotFound) {
   745  			return fs.ErrorDirNotFound
   746  		}
   747  		return fmt.Errorf("could not rmdir dir %q: %w", dir, err)
   748  	}
   749  	return nil
   750  }
   751  
   752  // Put the object
   753  //
   754  // Copies the reader in to the new object. This new object is returned.
   755  //
   756  // The new object may have been created if an error is returned
   757  func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   758  	// Temporary Object under construction
   759  	fs := &Object{
   760  		fs:     f,
   761  		remote: src.Remote(),
   762  	}
   763  	return fs, fs.Update(ctx, in, src, options...)
   764  }
   765  
   766  // PutStream uploads to the remote path with the modTime given of indeterminate size
   767  func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
   768  	return f.Put(ctx, in, src, options...)
   769  }
   770  
   771  // List the objects and directories in dir into entries. The entries can be
   772  // returned in any order but should be for a complete directory.
   773  //
   774  // dir should be "" to list the root, and should not have trailing slashes.
   775  //
   776  // This should return ErrDirNotFound if the directory isn't found.
   777  func (f *Fs) List(ctx context.Context, dir string) (fs.DirEntries, error) {
   778  	var entries fs.DirEntries
   779  	subDirClient := f.dirClient(dir)
   780  
   781  	// Checking whether directory exists
   782  	_, err := subDirClient.GetProperties(ctx, nil)
   783  	if fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
   784  		return entries, fs.ErrorDirNotFound
   785  	} else if err != nil {
   786  		return entries, err
   787  	}
   788  
   789  	var opt = &directory.ListFilesAndDirectoriesOptions{
   790  		Include: directory.ListFilesInclude{
   791  			Timestamps: true,
   792  		},
   793  	}
   794  	pager := subDirClient.NewListFilesAndDirectoriesPager(opt)
   795  	for pager.More() {
   796  		resp, err := pager.NextPage(ctx)
   797  		if err != nil {
   798  			return entries, err
   799  		}
   800  		for _, directory := range resp.Segment.Directories {
   801  			// Name          *string `xml:"Name"`
   802  			// Attributes    *string `xml:"Attributes"`
   803  			// ID            *string `xml:"FileId"`
   804  			// PermissionKey *string `xml:"PermissionKey"`
   805  			// Properties.ContentLength  *int64       `xml:"Content-Length"`
   806  			// Properties.ChangeTime     *time.Time   `xml:"ChangeTime"`
   807  			// Properties.CreationTime   *time.Time   `xml:"CreationTime"`
   808  			// Properties.ETag           *azcore.ETag `xml:"Etag"`
   809  			// Properties.LastAccessTime *time.Time   `xml:"LastAccessTime"`
   810  			// Properties.LastModified   *time.Time   `xml:"Last-Modified"`
   811  			// Properties.LastWriteTime  *time.Time   `xml:"LastWriteTime"`
   812  			var modTime time.Time
   813  			if directory.Properties.LastWriteTime != nil {
   814  				modTime = *directory.Properties.LastWriteTime
   815  			}
   816  			leaf := f.opt.Enc.ToStandardPath(*directory.Name)
   817  			entry := fs.NewDir(path.Join(dir, leaf), modTime)
   818  			if directory.ID != nil {
   819  				entry.SetID(*directory.ID)
   820  			}
   821  			if directory.Properties.ContentLength != nil {
   822  				entry.SetSize(*directory.Properties.ContentLength)
   823  			}
   824  			entries = append(entries, entry)
   825  		}
   826  		for _, file := range resp.Segment.Files {
   827  			leaf := f.opt.Enc.ToStandardPath(*file.Name)
   828  			entry := &Object{
   829  				fs:     f,
   830  				remote: path.Join(dir, leaf),
   831  			}
   832  			if file.Properties.ContentLength != nil {
   833  				entry.size = *file.Properties.ContentLength
   834  			}
   835  			if file.Properties.LastWriteTime != nil {
   836  				entry.modTime = *file.Properties.LastWriteTime
   837  			}
   838  			entries = append(entries, entry)
   839  		}
   840  	}
   841  	return entries, nil
   842  }
   843  
   844  // ------------------------------------------------------------
   845  
   846  // Fs returns the parent Fs
   847  func (o *Object) Fs() fs.Info {
   848  	return o.fs
   849  }
   850  
   851  // Size of object in bytes
   852  func (o *Object) Size() int64 {
   853  	return o.size
   854  }
   855  
   856  // Return a string version
   857  func (o *Object) String() string {
   858  	if o == nil {
   859  		return "<nil>"
   860  	}
   861  	return o.remote
   862  }
   863  
   864  // Remote returns the remote path
   865  func (o *Object) Remote() string {
   866  	return o.remote
   867  }
   868  
   869  // fileClient makes a specialized client for this object
   870  func (o *Object) fileClient() *file.Client {
   871  	return o.fs.fileClient(o.remote)
   872  }
   873  
   874  // set the metadata from file.GetPropertiesResponse
   875  func (o *Object) setMetadata(resp *file.GetPropertiesResponse) {
   876  	if resp.ContentLength != nil {
   877  		o.size = *resp.ContentLength
   878  	}
   879  	o.md5 = resp.ContentMD5
   880  	if resp.FileLastWriteTime != nil {
   881  		o.modTime = *resp.FileLastWriteTime
   882  	}
   883  	if resp.ContentType != nil {
   884  		o.contentType = *resp.ContentType
   885  	}
   886  }
   887  
   888  // readMetaData gets the metadata if it hasn't already been fetched
   889  func (o *Object) getMetadata(ctx context.Context) error {
   890  	resp, err := o.fileClient().GetProperties(ctx, nil)
   891  	if err != nil {
   892  		return fmt.Errorf("failed to fetch properties: %w", err)
   893  	}
   894  	o.setMetadata(&resp)
   895  	return nil
   896  }
   897  
   898  // Hash returns the MD5 of an object returning a lowercase hex string
   899  //
   900  // May make a network request becaue the [fs.List] method does not
   901  // return MD5 hashes for DirEntry
   902  func (o *Object) Hash(ctx context.Context, ty hash.Type) (string, error) {
   903  	if ty != hash.MD5 {
   904  		return "", hash.ErrUnsupported
   905  	}
   906  	if len(o.md5) == 0 {
   907  		err := o.getMetadata(ctx)
   908  		if err != nil {
   909  			return "", err
   910  		}
   911  	}
   912  	return hex.EncodeToString(o.md5), nil
   913  }
   914  
   915  // MimeType returns the content type of the Object if
   916  // known, or "" if not
   917  func (o *Object) MimeType(ctx context.Context) string {
   918  	if o.contentType == "" {
   919  		err := o.getMetadata(ctx)
   920  		if err != nil {
   921  			fs.Errorf(o, "Failed to fetch Content-Type")
   922  		}
   923  	}
   924  	return o.contentType
   925  }
   926  
   927  // Storable returns a boolean showing whether this object storable
   928  func (o *Object) Storable() bool {
   929  	return true
   930  }
   931  
   932  // ModTime returns the modification time of the object
   933  //
   934  // Returns time.Now() if not present
   935  func (o *Object) ModTime(ctx context.Context) time.Time {
   936  	if o.modTime.IsZero() {
   937  		return time.Now()
   938  	}
   939  	return o.modTime
   940  }
   941  
   942  // SetModTime sets the modification time
   943  func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
   944  	opt := file.SetHTTPHeadersOptions{
   945  		SMBProperties: &file.SMBProperties{
   946  			LastWriteTime: &t,
   947  		},
   948  	}
   949  	_, err := o.fileClient().SetHTTPHeaders(ctx, &opt)
   950  	if err != nil {
   951  		return fmt.Errorf("unable to set modTime: %w", err)
   952  	}
   953  	o.modTime = t
   954  	return nil
   955  }
   956  
   957  // Remove an object
   958  func (o *Object) Remove(ctx context.Context) error {
   959  	if _, err := o.fileClient().Delete(ctx, nil); err != nil {
   960  		return fmt.Errorf("unable to delete remote %q: %w", o.remote, err)
   961  	}
   962  	return nil
   963  }
   964  
   965  // Open an object for read
   966  func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
   967  	// Offset and Count for range download
   968  	var offset int64
   969  	var count int64
   970  	fs.FixRangeOption(options, o.size)
   971  	for _, option := range options {
   972  		switch x := option.(type) {
   973  		case *fs.RangeOption:
   974  			offset, count = x.Decode(o.size)
   975  			if count < 0 {
   976  				count = o.size - offset
   977  			}
   978  		case *fs.SeekOption:
   979  			offset = x.Offset
   980  		default:
   981  			if option.Mandatory() {
   982  				fs.Logf(o, "Unsupported mandatory option: %v", option)
   983  			}
   984  		}
   985  	}
   986  	opt := file.DownloadStreamOptions{
   987  		Range: file.HTTPRange{
   988  			Offset: offset,
   989  			Count:  count,
   990  		},
   991  	}
   992  	resp, err := o.fileClient().DownloadStream(ctx, &opt)
   993  	if err != nil {
   994  		return nil, fmt.Errorf("could not open remote %q: %w", o.remote, err)
   995  	}
   996  	return resp.Body, nil
   997  }
   998  
   999  // Returns a pointer to t - useful for returning pointers to constants
  1000  func ptr[T any](t T) *T {
  1001  	return &t
  1002  }
  1003  
  1004  var warnStreamUpload sync.Once
  1005  
  1006  // Update the object with the contents of the io.Reader, modTime, size and MD5 hash
  1007  func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
  1008  	var (
  1009  		size           = src.Size()
  1010  		sizeUnknown    = false
  1011  		hashUnknown    = true
  1012  		fc             = o.fileClient()
  1013  		isNewlyCreated = o.modTime.IsZero()
  1014  		counter        *readers.CountingReader
  1015  		md5Hash        []byte
  1016  		hasher         = md5.New()
  1017  	)
  1018  
  1019  	if size > int64(maxFileSize) {
  1020  		return fmt.Errorf("update: max supported file size is %vB. provided size is %vB", maxFileSize, fs.SizeSuffix(size))
  1021  	} else if size < 0 {
  1022  		size = int64(o.fs.opt.MaxStreamSize)
  1023  		sizeUnknown = true
  1024  		warnStreamUpload.Do(func() {
  1025  			fs.Logf(o.fs, "Streaming uploads will have maximum file size of %v - adjust with --azurefiles-max-stream-size", o.fs.opt.MaxStreamSize)
  1026  		})
  1027  	}
  1028  
  1029  	if isNewlyCreated {
  1030  		// Make parent directory
  1031  		if mkDirErr := o.fs.mkParentDir(ctx, src.Remote()); mkDirErr != nil {
  1032  			return fmt.Errorf("update: unable to make parent directories: %w", mkDirErr)
  1033  		}
  1034  		// Create the file at the size given
  1035  		if _, createErr := fc.Create(ctx, size, nil); createErr != nil {
  1036  			return fmt.Errorf("update: unable to create file: %w", createErr)
  1037  		}
  1038  	} else {
  1039  		// Resize the file if needed
  1040  		if size != o.Size() {
  1041  			if _, resizeErr := fc.Resize(ctx, size, nil); resizeErr != nil {
  1042  				return fmt.Errorf("update: unable to resize while trying to update: %w ", resizeErr)
  1043  			}
  1044  		}
  1045  	}
  1046  
  1047  	// Measure the size if it is unknown
  1048  	if sizeUnknown {
  1049  		counter = readers.NewCountingReader(in)
  1050  		in = counter
  1051  	}
  1052  
  1053  	// Check we have a source MD5 hash...
  1054  	if hashStr, err := src.Hash(ctx, hash.MD5); err == nil && hashStr != "" {
  1055  		md5Hash, err = hex.DecodeString(hashStr)
  1056  		if err == nil {
  1057  			hashUnknown = false
  1058  		} else {
  1059  			fs.Errorf(o, "internal error: decoding hex encoded md5 %q: %v", hashStr, err)
  1060  		}
  1061  	}
  1062  
  1063  	// ...if not calculate one
  1064  	if hashUnknown {
  1065  		in = io.TeeReader(in, hasher)
  1066  	}
  1067  
  1068  	// Upload the file
  1069  	opt := file.UploadStreamOptions{
  1070  		ChunkSize:   int64(o.fs.opt.ChunkSize),
  1071  		Concurrency: o.fs.opt.UploadConcurrency,
  1072  	}
  1073  	if err := fc.UploadStream(ctx, in, &opt); err != nil {
  1074  		// Remove partially uploaded file on error
  1075  		if isNewlyCreated {
  1076  			if _, delErr := fc.Delete(ctx, nil); delErr != nil {
  1077  				fs.Errorf(o, "failed to delete partially uploaded file: %v", delErr)
  1078  			}
  1079  		}
  1080  		return fmt.Errorf("update: failed to upload stream: %w", err)
  1081  	}
  1082  
  1083  	if sizeUnknown {
  1084  		// Read the uploaded size - the file will be truncated to that size by updateSizeHashModTime
  1085  		size = int64(counter.BytesRead())
  1086  	}
  1087  	if hashUnknown {
  1088  		md5Hash = hasher.Sum(nil)
  1089  	}
  1090  
  1091  	// Update the properties
  1092  	modTime := src.ModTime(ctx)
  1093  	contentType := fs.MimeType(ctx, src)
  1094  	httpHeaders := file.HTTPHeaders{
  1095  		ContentMD5:  md5Hash,
  1096  		ContentType: &contentType,
  1097  	}
  1098  	// Apply upload options (also allows one to overwrite content-type)
  1099  	for _, option := range options {
  1100  		key, value := option.Header()
  1101  		lowerKey := strings.ToLower(key)
  1102  		switch lowerKey {
  1103  		case "cache-control":
  1104  			httpHeaders.CacheControl = &value
  1105  		case "content-disposition":
  1106  			httpHeaders.ContentDisposition = &value
  1107  		case "content-encoding":
  1108  			httpHeaders.ContentEncoding = &value
  1109  		case "content-language":
  1110  			httpHeaders.ContentLanguage = &value
  1111  		case "content-type":
  1112  			httpHeaders.ContentType = &value
  1113  		}
  1114  	}
  1115  	_, err = fc.SetHTTPHeaders(ctx, &file.SetHTTPHeadersOptions{
  1116  		FileContentLength: &size,
  1117  		SMBProperties: &file.SMBProperties{
  1118  			LastWriteTime: &modTime,
  1119  		},
  1120  		HTTPHeaders: &httpHeaders,
  1121  	})
  1122  	if err != nil {
  1123  		return fmt.Errorf("update: failed to set properties: %w", err)
  1124  	}
  1125  
  1126  	// Make sure Object is in sync
  1127  	o.size = size
  1128  	o.md5 = md5Hash
  1129  	o.modTime = modTime
  1130  	o.contentType = contentType
  1131  	return nil
  1132  }
  1133  
  1134  // Move src to this remote using server-side move operations.
  1135  //
  1136  // This is stored with the remote path given.
  1137  //
  1138  // It returns the destination Object and a possible error.
  1139  //
  1140  // Will only be called if src.Fs().Name() == f.Name()
  1141  //
  1142  // If it isn't possible then return fs.ErrorCantMove
  1143  func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1144  	srcObj, ok := src.(*Object)
  1145  	if !ok {
  1146  		fs.Debugf(src, "Can't move - not same remote type")
  1147  		return nil, fs.ErrorCantMove
  1148  	}
  1149  	err := f.mkParentDir(ctx, remote)
  1150  	if err != nil {
  1151  		return nil, fmt.Errorf("Move: mkParentDir failed: %w", err)
  1152  	}
  1153  	opt := file.RenameOptions{
  1154  		IgnoreReadOnly:  ptr(true),
  1155  		ReplaceIfExists: ptr(true),
  1156  	}
  1157  	dstAbsPath := f.absPath(remote)
  1158  	fc := srcObj.fileClient()
  1159  	_, err = fc.Rename(ctx, dstAbsPath, &opt)
  1160  	if err != nil {
  1161  		return nil, fmt.Errorf("Move: Rename failed: %w", err)
  1162  	}
  1163  	dstObj, err := f.NewObject(ctx, remote)
  1164  	if err != nil {
  1165  		return nil, fmt.Errorf("Move: NewObject failed: %w", err)
  1166  	}
  1167  	return dstObj, nil
  1168  }
  1169  
  1170  // DirMove moves src, srcRemote to this remote at dstRemote
  1171  // using server-side move operations.
  1172  //
  1173  // Will only be called if src.Fs().Name() == f.Name()
  1174  //
  1175  // If it isn't possible then return fs.ErrorCantDirMove
  1176  //
  1177  // If destination exists then return fs.ErrorDirExists
  1178  func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
  1179  	dstFs := f
  1180  	srcFs, ok := src.(*Fs)
  1181  	if !ok {
  1182  		fs.Debugf(srcFs, "Can't move directory - not same remote type")
  1183  		return fs.ErrorCantDirMove
  1184  	}
  1185  
  1186  	_, err := dstFs.dirClient(dstRemote).GetProperties(ctx, nil)
  1187  	if err == nil {
  1188  		return fs.ErrorDirExists
  1189  	}
  1190  	if !fileerror.HasCode(err, fileerror.ParentNotFound, fileerror.ResourceNotFound) {
  1191  		return fmt.Errorf("DirMove: failed to get status of destination directory: %w", err)
  1192  	}
  1193  
  1194  	err = dstFs.mkParentDir(ctx, dstRemote)
  1195  	if err != nil {
  1196  		return fmt.Errorf("DirMove: mkParentDir failed: %w", err)
  1197  	}
  1198  
  1199  	opt := directory.RenameOptions{
  1200  		IgnoreReadOnly:  ptr(false),
  1201  		ReplaceIfExists: ptr(false),
  1202  	}
  1203  	dstAbsPath := dstFs.absPath(dstRemote)
  1204  	dirClient := srcFs.dirClient(srcRemote)
  1205  	_, err = dirClient.Rename(ctx, dstAbsPath, &opt)
  1206  	if err != nil {
  1207  		if fileerror.HasCode(err, fileerror.ResourceAlreadyExists) {
  1208  			return fs.ErrorDirExists
  1209  		}
  1210  		return fmt.Errorf("DirMove: Rename failed: %w", err)
  1211  	}
  1212  	return nil
  1213  }
  1214  
  1215  // Copy src to this remote using server-side copy operations.
  1216  //
  1217  // This is stored with the remote path given.
  1218  //
  1219  // It returns the destination Object and a possible error.
  1220  //
  1221  // Will only be called if src.Fs().Name() == f.Name()
  1222  //
  1223  // If it isn't possible then return fs.ErrorCantCopy
  1224  func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
  1225  	srcObj, ok := src.(*Object)
  1226  	if !ok {
  1227  		fs.Debugf(src, "Can't copy - not same remote type")
  1228  		return nil, fs.ErrorCantCopy
  1229  	}
  1230  	err := f.mkParentDir(ctx, remote)
  1231  	if err != nil {
  1232  		return nil, fmt.Errorf("Copy: mkParentDir failed: %w", err)
  1233  	}
  1234  	opt := file.StartCopyFromURLOptions{
  1235  		CopyFileSMBInfo: &file.CopyFileSMBInfo{
  1236  			Attributes:         file.SourceCopyFileAttributes{},
  1237  			ChangeTime:         file.SourceCopyFileChangeTime{},
  1238  			CreationTime:       file.SourceCopyFileCreationTime{},
  1239  			LastWriteTime:      file.SourceCopyFileLastWriteTime{},
  1240  			PermissionCopyMode: ptr(file.PermissionCopyModeTypeSource),
  1241  			IgnoreReadOnly:     ptr(true),
  1242  		},
  1243  	}
  1244  	srcURL := srcObj.fileClient().URL()
  1245  	fc := f.fileClient(remote)
  1246  	_, err = fc.StartCopyFromURL(ctx, srcURL, &opt)
  1247  	if err != nil {
  1248  		return nil, fmt.Errorf("Copy failed: %w", err)
  1249  	}
  1250  	dstObj, err := f.NewObject(ctx, remote)
  1251  	if err != nil {
  1252  		return nil, fmt.Errorf("Copy: NewObject failed: %w", err)
  1253  	}
  1254  	return dstObj, nil
  1255  }
  1256  
  1257  // Implementation of WriterAt
  1258  type writerAt struct {
  1259  	ctx  context.Context
  1260  	f    *Fs
  1261  	fc   *file.Client
  1262  	mu   sync.Mutex // protects variables below
  1263  	size int64
  1264  }
  1265  
  1266  // Adaptor to add a Close method to bytes.Reader
  1267  type bytesReaderCloser struct {
  1268  	*bytes.Reader
  1269  }
  1270  
  1271  // Close the bytesReaderCloser
  1272  func (bytesReaderCloser) Close() error {
  1273  	return nil
  1274  }
  1275  
  1276  // WriteAt writes len(p) bytes from p to the underlying data stream
  1277  // at offset off. It returns the number of bytes written from p (0 <= n <= len(p))
  1278  // and any error encountered that caused the write to stop early.
  1279  // WriteAt must return a non-nil error if it returns n < len(p).
  1280  //
  1281  // If WriteAt is writing to a destination with a seek offset,
  1282  // WriteAt should not affect nor be affected by the underlying
  1283  // seek offset.
  1284  //
  1285  // Clients of WriteAt can execute parallel WriteAt calls on the same
  1286  // destination if the ranges do not overlap.
  1287  //
  1288  // Implementations must not retain p.
  1289  func (w *writerAt) WriteAt(p []byte, off int64) (n int, err error) {
  1290  	endOffset := off + int64(len(p))
  1291  	w.mu.Lock()
  1292  	if w.size < endOffset {
  1293  		_, err = w.fc.Resize(w.ctx, endOffset, nil)
  1294  		if err != nil {
  1295  			w.mu.Unlock()
  1296  			return 0, fmt.Errorf("WriteAt: failed to resize file: %w ", err)
  1297  		}
  1298  		w.size = endOffset
  1299  	}
  1300  	w.mu.Unlock()
  1301  
  1302  	in := bytesReaderCloser{bytes.NewReader(p)}
  1303  	_, err = w.fc.UploadRange(w.ctx, off, in, nil)
  1304  	if err != nil {
  1305  		return 0, err
  1306  	}
  1307  	return len(p), nil
  1308  }
  1309  
  1310  // Close the writer
  1311  func (w *writerAt) Close() error {
  1312  	// FIXME should we be doing something here?
  1313  	return nil
  1314  }
  1315  
  1316  // OpenWriterAt opens with a handle for random access writes
  1317  //
  1318  // Pass in the remote desired and the size if known.
  1319  //
  1320  // It truncates any existing object
  1321  func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) {
  1322  	err := f.mkParentDir(ctx, remote)
  1323  	if err != nil {
  1324  		return nil, fmt.Errorf("OpenWriterAt: failed to create parent directory: %w", err)
  1325  	}
  1326  	fc := f.fileClient(remote)
  1327  	if size < 0 {
  1328  		size = 0
  1329  	}
  1330  	_, err = fc.Create(ctx, size, nil)
  1331  	if err != nil {
  1332  		return nil, fmt.Errorf("OpenWriterAt: unable to create file: %w", err)
  1333  	}
  1334  	w := &writerAt{
  1335  		ctx:  ctx,
  1336  		f:    f,
  1337  		fc:   fc,
  1338  		size: size,
  1339  	}
  1340  	return w, nil
  1341  }
  1342  
  1343  // About gets quota information
  1344  func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
  1345  	stats, err := f.shareClient.GetStatistics(ctx, nil)
  1346  	if err != nil {
  1347  		return nil, fmt.Errorf("failed to read share statistics: %w", err)
  1348  	}
  1349  	usage := &fs.Usage{
  1350  		Used: stats.ShareUsageBytes, // bytes in use
  1351  	}
  1352  	return usage, nil
  1353  }
  1354  
  1355  // Check the interfaces are satisfied
  1356  var (
  1357  	_ fs.Fs             = &Fs{}
  1358  	_ fs.PutStreamer    = &Fs{}
  1359  	_ fs.Abouter        = &Fs{}
  1360  	_ fs.Mover          = &Fs{}
  1361  	_ fs.DirMover       = &Fs{}
  1362  	_ fs.Copier         = &Fs{}
  1363  	_ fs.OpenWriterAter = &Fs{}
  1364  	_ fs.Object         = &Object{}
  1365  	_ fs.MimeTyper      = &Object{}
  1366  )