github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/webdav/chunking.go (about)

     1  package webdav
     2  
     3  /*
     4  	chunked update for Nextcloud
     5  	see https://docs.nextcloud.com/server/20/developer_manual/client_apis/WebDAV/chunking.html
     6  */
     7  
     8  import (
     9  	"context"
    10  	"crypto/md5"
    11  	"encoding/hex"
    12  	"errors"
    13  	"fmt"
    14  	"io"
    15  	"net/http"
    16  	"path"
    17  
    18  	"github.com/rclone/rclone/fs"
    19  	"github.com/rclone/rclone/lib/readers"
    20  	"github.com/rclone/rclone/lib/rest"
    21  )
    22  
    23  func (f *Fs) shouldRetryChunkMerge(ctx context.Context, resp *http.Response, err error) (bool, error) {
    24  	// Not found. Can be returned by NextCloud when merging chunks of an upload.
    25  	if resp != nil && resp.StatusCode == 404 {
    26  		return true, err
    27  	}
    28  
    29  	// 423 LOCKED
    30  	if resp != nil && resp.StatusCode == 423 {
    31  		return false, fmt.Errorf("merging the uploaded chunks failed with 423 LOCKED. This usually happens when the chunks merging is still in progress on NextCloud, but it may also indicate a failed transfer: %w", err)
    32  	}
    33  
    34  	return f.shouldRetry(ctx, resp, err)
    35  }
    36  
    37  // set the chunk size for testing
    38  func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
    39  	old, f.opt.ChunkSize = f.opt.ChunkSize, cs
    40  	return
    41  }
    42  
    43  func (o *Object) getChunksUploadDir() (string, error) {
    44  	hasher := md5.New()
    45  	_, err := hasher.Write([]byte(o.filePath()))
    46  	if err != nil {
    47  		return "", fmt.Errorf("chunked upload couldn't hash URL: %w", err)
    48  	}
    49  	uploadDir := "rclone-chunked-upload-" + hex.EncodeToString(hasher.Sum(nil))
    50  	return uploadDir, nil
    51  }
    52  
    53  func (f *Fs) getChunksUploadURL() (string, error) {
    54  	submatch := nextCloudURLRegex.FindStringSubmatch(f.endpointURL)
    55  	if submatch == nil {
    56  		return "", errors.New("the remote url looks incorrect. Note that nextcloud chunked uploads require you to use the /dav/files/USER endpoint instead of /webdav. Please check 'rclone config show remotename' to verify that the url field ends in /dav/files/USERNAME")
    57  	}
    58  
    59  	baseURL, user := submatch[1], submatch[2]
    60  	chunksUploadURL := fmt.Sprintf("%s/dav/uploads/%s/", baseURL, user)
    61  
    62  	return chunksUploadURL, nil
    63  }
    64  
    65  func (o *Object) shouldUseChunkedUpload(src fs.ObjectInfo) bool {
    66  	return o.fs.canChunk && o.fs.opt.ChunkSize > 0 && src.Size() > int64(o.fs.opt.ChunkSize)
    67  }
    68  
    69  func (o *Object) updateChunked(ctx context.Context, in0 io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
    70  	var uploadDir string
    71  
    72  	// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#starting-a-chunked-upload
    73  	uploadDir, err = o.createChunksUploadDirectory(ctx)
    74  	if err != nil {
    75  		return err
    76  	}
    77  
    78  	partObj := &Object{
    79  		fs: o.fs,
    80  	}
    81  
    82  	// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#uploading-chunks
    83  	err = o.uploadChunks(ctx, in0, src.Size(), partObj, uploadDir, options)
    84  	if err != nil {
    85  		return err
    86  	}
    87  
    88  	// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html#assembling-the-chunks
    89  	err = o.mergeChunks(ctx, uploadDir, options, src)
    90  	if err != nil {
    91  		return err
    92  	}
    93  
    94  	return nil
    95  }
    96  
    97  func (o *Object) uploadChunks(ctx context.Context, in0 io.Reader, size int64, partObj *Object, uploadDir string, options []fs.OpenOption) error {
    98  	chunkSize := int64(partObj.fs.opt.ChunkSize)
    99  
   100  	// TODO: upload chunks in parallel for faster transfer speeds
   101  	for offset := int64(0); offset < size; offset += chunkSize {
   102  		if err := ctx.Err(); err != nil {
   103  			return err
   104  		}
   105  
   106  		contentLength := chunkSize
   107  
   108  		// Last chunk may be smaller
   109  		if size-offset < contentLength {
   110  			contentLength = size - offset
   111  		}
   112  
   113  		endOffset := offset + contentLength - 1
   114  
   115  		partObj.remote = fmt.Sprintf("%s/%015d-%015d", uploadDir, offset, endOffset)
   116  		// Enable low-level HTTP 2 retries.
   117  		// 2022-04-28 15:59:06 ERROR : stuff/video.avi: Failed to copy: uploading chunk failed: Put "https://censored.com/remote.php/dav/uploads/Admin/rclone-chunked-upload-censored/000006113198080-000006123683840": http2: Transport: cannot retry err [http2: Transport received Server's graceful shutdown GOAWAY] after Request.Body was written; define Request.GetBody to avoid this error
   118  
   119  		buf := make([]byte, chunkSize)
   120  		in := readers.NewRepeatableLimitReaderBuffer(in0, buf, chunkSize)
   121  
   122  		getBody := func() (io.ReadCloser, error) {
   123  			// RepeatableReader{} plays well with accounting so rewinding doesn't make the progress buggy
   124  			if _, err := in.Seek(0, io.SeekStart); err != nil {
   125  				return nil, err
   126  			}
   127  
   128  			return io.NopCloser(in), nil
   129  		}
   130  
   131  		err := partObj.updateSimple(ctx, in, getBody, partObj.remote, contentLength, "application/x-www-form-urlencoded", nil, o.fs.chunksUploadURL, options...)
   132  		if err != nil {
   133  			return fmt.Errorf("uploading chunk failed: %w", err)
   134  		}
   135  	}
   136  	return nil
   137  }
   138  
   139  func (o *Object) createChunksUploadDirectory(ctx context.Context) (string, error) {
   140  	uploadDir, err := o.getChunksUploadDir()
   141  	if err != nil {
   142  		return uploadDir, err
   143  	}
   144  
   145  	err = o.purgeUploadedChunks(ctx, uploadDir)
   146  	if err != nil {
   147  		return "", fmt.Errorf("chunked upload couldn't purge upload directory: %w", err)
   148  	}
   149  
   150  	opts := rest.Opts{
   151  		Method:     "MKCOL",
   152  		Path:       uploadDir + "/",
   153  		NoResponse: true,
   154  		RootURL:    o.fs.chunksUploadURL,
   155  	}
   156  	err = o.fs.pacer.CallNoRetry(func() (bool, error) {
   157  		resp, err := o.fs.srv.Call(ctx, &opts)
   158  		return o.fs.shouldRetry(ctx, resp, err)
   159  	})
   160  	if err != nil {
   161  		return "", fmt.Errorf("making upload directory failed: %w", err)
   162  	}
   163  	return uploadDir, err
   164  }
   165  
   166  func (o *Object) mergeChunks(ctx context.Context, uploadDir string, options []fs.OpenOption, src fs.ObjectInfo) error {
   167  	var resp *http.Response
   168  
   169  	// see https://docs.nextcloud.com/server/24/developer_manual/client_apis/WebDAV/chunking.html?highlight=chunk#assembling-the-chunks
   170  	opts := rest.Opts{
   171  		Method:     "MOVE",
   172  		Path:       path.Join(uploadDir, ".file"),
   173  		NoResponse: true,
   174  		Options:    options,
   175  		RootURL:    o.fs.chunksUploadURL,
   176  	}
   177  	destinationURL, err := rest.URLJoin(o.fs.endpoint, o.filePath())
   178  	if err != nil {
   179  		return fmt.Errorf("finalize chunked upload couldn't join URL: %w", err)
   180  	}
   181  	opts.ExtraHeaders = o.extraHeaders(ctx, src)
   182  	opts.ExtraHeaders["Destination"] = destinationURL.String()
   183  	err = o.fs.pacer.Call(func() (bool, error) {
   184  		resp, err = o.fs.srv.Call(ctx, &opts)
   185  		return o.fs.shouldRetryChunkMerge(ctx, resp, err)
   186  	})
   187  	if err != nil {
   188  		return fmt.Errorf("finalize chunked upload failed, destinationURL: \"%s\": %w", destinationURL, err)
   189  	}
   190  	return err
   191  }
   192  
   193  func (o *Object) purgeUploadedChunks(ctx context.Context, uploadDir string) error {
   194  	// clean the upload directory if it exists (this means that a previous try didn't clean up properly).
   195  	opts := rest.Opts{
   196  		Method:     "DELETE",
   197  		Path:       uploadDir + "/",
   198  		NoResponse: true,
   199  		RootURL:    o.fs.chunksUploadURL,
   200  	}
   201  
   202  	err := o.fs.pacer.Call(func() (bool, error) {
   203  		resp, err := o.fs.srv.CallXML(ctx, &opts, nil, nil)
   204  
   205  		// directory doesn't exist, no need to purge
   206  		if resp != nil && resp.StatusCode == http.StatusNotFound {
   207  			return false, nil
   208  		}
   209  
   210  		return o.fs.shouldRetry(ctx, resp, err)
   211  	})
   212  
   213  	return err
   214  }