github.com/rclone/rclone@v1.66.1-0.20240517100346-7b89735ae726/backend/compress/compress.go (about) 1 // Package compress provides wrappers for Fs and Object which implement compression. 2 package compress 3 4 import ( 5 "bufio" 6 "bytes" 7 "context" 8 "crypto/md5" 9 "encoding/base64" 10 "encoding/binary" 11 "encoding/hex" 12 "encoding/json" 13 "errors" 14 "fmt" 15 "io" 16 "os" 17 "path" 18 "regexp" 19 "strings" 20 "time" 21 22 "github.com/buengese/sgzip" 23 "github.com/gabriel-vasile/mimetype" 24 25 "github.com/rclone/rclone/fs" 26 "github.com/rclone/rclone/fs/accounting" 27 "github.com/rclone/rclone/fs/chunkedreader" 28 "github.com/rclone/rclone/fs/config/configmap" 29 "github.com/rclone/rclone/fs/config/configstruct" 30 "github.com/rclone/rclone/fs/fspath" 31 "github.com/rclone/rclone/fs/hash" 32 "github.com/rclone/rclone/fs/log" 33 "github.com/rclone/rclone/fs/object" 34 "github.com/rclone/rclone/fs/operations" 35 ) 36 37 // Globals 38 const ( 39 initialChunkSize = 262144 // Initial and max sizes of chunks when reading parts of the file. Currently 40 maxChunkSize = 8388608 // at 256 KiB and 8 MiB. 41 42 bufferSize = 8388608 43 heuristicBytes = 1048576 44 minCompressionRatio = 1.1 45 46 gzFileExt = ".gz" 47 metaFileExt = ".json" 48 uncompressedFileExt = ".bin" 49 ) 50 51 // Compression modes 52 const ( 53 Uncompressed = 0 54 Gzip = 2 55 ) 56 57 var nameRegexp = regexp.MustCompile(`^(.+?)\.([A-Za-z0-9-_]{11})$`) 58 59 // Register with Fs 60 func init() { 61 // Build compression mode options. 62 compressionModeOptions := []fs.OptionExample{ 63 { // Default compression mode options { 64 Value: "gzip", 65 Help: "Standard gzip compression with fastest parameters.", 66 }, 67 } 68 69 // Register our remote 70 fs.Register(&fs.RegInfo{ 71 Name: "compress", 72 Description: "Compress a remote", 73 NewFs: NewFs, 74 MetadataInfo: &fs.MetadataInfo{ 75 Help: `Any metadata supported by the underlying remote is read and written.`, 76 }, 77 Options: []fs.Option{{ 78 Name: "remote", 79 Help: "Remote to compress.", 80 Required: true, 81 }, { 82 Name: "mode", 83 Help: "Compression mode.", 84 Default: "gzip", 85 Examples: compressionModeOptions, 86 }, { 87 Name: "level", 88 Help: `GZIP compression level (-2 to 9). 89 90 Generally -1 (default, equivalent to 5) is recommended. 91 Levels 1 to 9 increase compression at the cost of speed. Going past 6 92 generally offers very little return. 93 94 Level -2 uses Huffman encoding only. Only use if you know what you 95 are doing. 96 Level 0 turns off compression.`, 97 Default: sgzip.DefaultCompression, 98 Advanced: true, 99 }, { 100 Name: "ram_cache_limit", 101 Help: `Some remotes don't allow the upload of files with unknown size. 102 In this case the compressed file will need to be cached to determine 103 it's size. 104 105 Files smaller than this limit will be cached in RAM, files larger than 106 this limit will be cached on disk.`, 107 Default: fs.SizeSuffix(20 * 1024 * 1024), 108 Advanced: true, 109 }}, 110 }) 111 } 112 113 // Options defines the configuration for this backend 114 type Options struct { 115 Remote string `config:"remote"` 116 CompressionMode string `config:"mode"` 117 CompressionLevel int `config:"level"` 118 RAMCacheLimit fs.SizeSuffix `config:"ram_cache_limit"` 119 } 120 121 /*** FILESYSTEM FUNCTIONS ***/ 122 123 // Fs represents a wrapped fs.Fs 124 type Fs struct { 125 fs.Fs 126 wrapper fs.Fs 127 name string 128 root string 129 opt Options 130 mode int // compression mode id 131 features *fs.Features // optional features 132 } 133 134 // NewFs constructs an Fs from the path, container:path 135 func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { 136 // Parse config into Options struct 137 opt := new(Options) 138 err := configstruct.Set(m, opt) 139 if err != nil { 140 return nil, err 141 } 142 143 remote := opt.Remote 144 if strings.HasPrefix(remote, name+":") { 145 return nil, errors.New("can't point press remote at itself - check the value of the remote setting") 146 } 147 148 wInfo, wName, wPath, wConfig, err := fs.ConfigFs(remote) 149 if err != nil { 150 return nil, fmt.Errorf("failed to parse remote %q to wrap: %w", remote, err) 151 } 152 153 // Strip trailing slashes if they exist in rpath 154 rpath = strings.TrimRight(rpath, "\\/") 155 156 // First, check for a file 157 // If a metadata file was found, return an error. Otherwise, check for a directory 158 remotePath := fspath.JoinRootPath(wPath, makeMetadataName(rpath)) 159 wrappedFs, err := wInfo.NewFs(ctx, wName, remotePath, wConfig) 160 if err != fs.ErrorIsFile { 161 remotePath = fspath.JoinRootPath(wPath, rpath) 162 wrappedFs, err = wInfo.NewFs(ctx, wName, remotePath, wConfig) 163 } 164 if err != nil && err != fs.ErrorIsFile { 165 return nil, fmt.Errorf("failed to make remote %s:%q to wrap: %w", wName, remotePath, err) 166 } 167 168 // Create the wrapping fs 169 f := &Fs{ 170 Fs: wrappedFs, 171 name: name, 172 root: rpath, 173 opt: *opt, 174 mode: compressionModeFromName(opt.CompressionMode), 175 } 176 // Correct root if definitely pointing to a file 177 if err == fs.ErrorIsFile { 178 f.root = path.Dir(f.root) 179 if f.root == "." || f.root == "/" { 180 f.root = "" 181 } 182 } 183 // the features here are ones we could support, and they are 184 // ANDed with the ones from wrappedFs 185 f.features = (&fs.Features{ 186 CaseInsensitive: true, 187 DuplicateFiles: false, 188 ReadMimeType: false, 189 WriteMimeType: false, 190 GetTier: true, 191 SetTier: true, 192 BucketBased: true, 193 CanHaveEmptyDirectories: true, 194 ReadMetadata: true, 195 WriteMetadata: true, 196 UserMetadata: true, 197 ReadDirMetadata: true, 198 WriteDirMetadata: true, 199 WriteDirSetModTime: true, 200 UserDirMetadata: true, 201 DirModTimeUpdatesOnWrite: true, 202 PartialUploads: true, 203 }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) 204 // We support reading MIME types no matter the wrapped fs 205 f.features.ReadMimeType = true 206 // We can only support putstream if we have serverside copy or move 207 if !operations.CanServerSideMove(wrappedFs) { 208 f.features.Disable("PutStream") 209 } 210 211 return f, err 212 } 213 214 func compressionModeFromName(name string) int { 215 switch name { 216 case "gzip": 217 return Gzip 218 default: 219 return Uncompressed 220 } 221 } 222 223 // Converts an int64 to base64 224 func int64ToBase64(number int64) string { 225 intBytes := make([]byte, 8) 226 binary.LittleEndian.PutUint64(intBytes, uint64(number)) 227 return base64.RawURLEncoding.EncodeToString(intBytes) 228 } 229 230 // Converts base64 to int64 231 func base64ToInt64(str string) (int64, error) { 232 intBytes, err := base64.RawURLEncoding.DecodeString(str) 233 if err != nil { 234 return 0, err 235 } 236 return int64(binary.LittleEndian.Uint64(intBytes)), nil 237 } 238 239 // Processes a file name for a compressed file. Returns the original file name, the extension, and the size of the original file. 240 // Returns -2 for the original size if the file is uncompressed. 241 func processFileName(compressedFileName string) (origFileName string, extension string, origSize int64, err error) { 242 // Separate the filename and size from the extension 243 extensionPos := strings.LastIndex(compressedFileName, ".") 244 if extensionPos == -1 { 245 return "", "", 0, errors.New("file name has no extension") 246 } 247 extension = compressedFileName[extensionPos:] 248 nameWithSize := compressedFileName[:extensionPos] 249 if extension == uncompressedFileExt { 250 return nameWithSize, extension, -2, nil 251 } 252 match := nameRegexp.FindStringSubmatch(nameWithSize) 253 if match == nil || len(match) != 3 { 254 return "", "", 0, errors.New("invalid filename") 255 } 256 size, err := base64ToInt64(match[2]) 257 if err != nil { 258 return "", "", 0, errors.New("could not decode size") 259 } 260 return match[1], gzFileExt, size, nil 261 } 262 263 // Generates the file name for a metadata file 264 func makeMetadataName(remote string) (newRemote string) { 265 return remote + metaFileExt 266 } 267 268 // Checks whether a file is a metadata file 269 func isMetadataFile(filename string) bool { 270 return strings.HasSuffix(filename, metaFileExt) 271 } 272 273 // Checks whether a file is a metadata file and returns the original 274 // file name and a flag indicating whether it was a metadata file or 275 // not. 276 func unwrapMetadataFile(filename string) (string, bool) { 277 if !isMetadataFile(filename) { 278 return "", false 279 } 280 return filename[:len(filename)-len(metaFileExt)], true 281 } 282 283 // makeDataName generates the file name for a data file with specified compression mode 284 func makeDataName(remote string, size int64, mode int) (newRemote string) { 285 if mode != Uncompressed { 286 newRemote = remote + "." + int64ToBase64(size) + gzFileExt 287 } else { 288 newRemote = remote + uncompressedFileExt 289 } 290 return newRemote 291 } 292 293 // dataName generates the file name for data file 294 func (f *Fs) dataName(remote string, size int64, compressed bool) (name string) { 295 if !compressed { 296 return makeDataName(remote, size, Uncompressed) 297 } 298 return makeDataName(remote, size, f.mode) 299 } 300 301 // addData parses an object and adds it to the DirEntries 302 func (f *Fs) addData(entries *fs.DirEntries, o fs.Object) { 303 origFileName, _, size, err := processFileName(o.Remote()) 304 if err != nil { 305 fs.Errorf(o, "Error on parsing file name: %v", err) 306 return 307 } 308 if size == -2 { // File is uncompressed 309 size = o.Size() 310 } 311 metaName := makeMetadataName(origFileName) 312 *entries = append(*entries, f.newObjectSizeAndNameOnly(o, metaName, size)) 313 } 314 315 // addDir adds a dir to the dir entries 316 func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) { 317 *entries = append(*entries, f.newDir(dir)) 318 } 319 320 // newDir returns a dir 321 func (f *Fs) newDir(dir fs.Directory) fs.Directory { 322 return dir // We're using the same dir 323 } 324 325 // processEntries parses the file names and adds metadata to the dir entries 326 func (f *Fs) processEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) { 327 newEntries = entries[:0] // in place filter 328 for _, entry := range entries { 329 switch x := entry.(type) { 330 case fs.Object: 331 if !isMetadataFile(x.Remote()) { 332 f.addData(&newEntries, x) // Only care about data files for now; metadata files are redundant. 333 } 334 case fs.Directory: 335 f.addDir(&newEntries, x) 336 default: 337 return nil, fmt.Errorf("unknown object type %T", entry) 338 } 339 } 340 return newEntries, nil 341 } 342 343 // List the objects and directories in dir into entries. The 344 // entries can be returned in any order but should be for a 345 // complete directory. 346 // 347 // dir should be "" to list the root, and should not have 348 // trailing slashes. 349 // 350 // This should return ErrDirNotFound if the directory isn't 351 // found. 352 // List entries and process them 353 func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { 354 entries, err = f.Fs.List(ctx, dir) 355 if err != nil { 356 return nil, err 357 } 358 return f.processEntries(entries) 359 } 360 361 // ListR lists the objects and directories of the Fs starting 362 // from dir recursively into out. 363 // 364 // dir should be "" to start from the root, and should not 365 // have trailing slashes. 366 // 367 // This should return ErrDirNotFound if the directory isn't 368 // found. 369 // 370 // It should call callback for each tranche of entries read. 371 // These need not be returned in any particular order. If 372 // callback returns an error then the listing will stop 373 // immediately. 374 // 375 // Don't implement this unless you have a more efficient way 376 // of listing recursively that doing a directory traversal. 377 func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { 378 return f.Fs.Features().ListR(ctx, dir, func(entries fs.DirEntries) error { 379 newEntries, err := f.processEntries(entries) 380 if err != nil { 381 return err 382 } 383 return callback(newEntries) 384 }) 385 } 386 387 // NewObject finds the Object at remote. 388 func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { 389 // Read metadata from metadata object 390 mo, err := f.Fs.NewObject(ctx, makeMetadataName(remote)) 391 if err != nil { 392 return nil, err 393 } 394 meta, err := readMetadata(ctx, mo) 395 if err != nil { 396 return nil, fmt.Errorf("error decoding metadata: %w", err) 397 } 398 // Create our Object 399 o, err := f.Fs.NewObject(ctx, makeDataName(remote, meta.CompressionMetadata.Size, meta.Mode)) 400 if err != nil { 401 return nil, err 402 } 403 return f.newObject(o, mo, meta), nil 404 } 405 406 // checkCompressAndType checks if an object is compressible and determines it's mime type 407 // returns a multireader with the bytes that were read to determine mime type 408 func checkCompressAndType(in io.Reader) (newReader io.Reader, compressible bool, mimeType string, err error) { 409 in, wrap := accounting.UnWrap(in) 410 buf := make([]byte, heuristicBytes) 411 n, err := in.Read(buf) 412 buf = buf[:n] 413 if err != nil && err != io.EOF { 414 return nil, false, "", err 415 } 416 mime := mimetype.Detect(buf) 417 compressible, err = isCompressible(bytes.NewReader(buf)) 418 if err != nil { 419 return nil, false, "", err 420 } 421 in = io.MultiReader(bytes.NewReader(buf), in) 422 return wrap(in), compressible, mime.String(), nil 423 } 424 425 // isCompressible checks the compression ratio of the provided data and returns true if the ratio exceeds 426 // the configured threshold 427 func isCompressible(r io.Reader) (bool, error) { 428 var b bytes.Buffer 429 w, err := sgzip.NewWriterLevel(&b, sgzip.DefaultCompression) 430 if err != nil { 431 return false, err 432 } 433 n, err := io.Copy(w, r) 434 if err != nil { 435 return false, err 436 } 437 err = w.Close() 438 if err != nil { 439 return false, err 440 } 441 ratio := float64(n) / float64(b.Len()) 442 return ratio > minCompressionRatio, nil 443 } 444 445 // verifyObjectHash verifies the Objects hash 446 func (f *Fs) verifyObjectHash(ctx context.Context, o fs.Object, hasher *hash.MultiHasher, ht hash.Type) error { 447 srcHash := hasher.Sums()[ht] 448 dstHash, err := o.Hash(ctx, ht) 449 if err != nil { 450 return fmt.Errorf("failed to read destination hash: %w", err) 451 } 452 if srcHash != "" && dstHash != "" && srcHash != dstHash { 453 // remove object 454 err = o.Remove(ctx) 455 if err != nil { 456 fs.Errorf(o, "Failed to remove corrupted object: %v", err) 457 } 458 return fmt.Errorf("corrupted on transfer: %v compressed hashes differ src(%s) %q vs dst(%s) %q", ht, f.Fs, srcHash, o.Fs(), dstHash) 459 } 460 return nil 461 } 462 463 type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) 464 465 type compressionResult struct { 466 err error 467 meta sgzip.GzipMetadata 468 } 469 470 // replicating some of operations.Rcat functionality because we want to support remotes without streaming 471 // support and of course cannot know the size of a compressed file before compressing it. 472 func (f *Fs) rcat(ctx context.Context, dstFileName string, in io.ReadCloser, modTime time.Time, options []fs.OpenOption) (o fs.Object, err error) { 473 474 // cache small files in memory and do normal upload 475 buf := make([]byte, f.opt.RAMCacheLimit) 476 if n, err := io.ReadFull(in, buf); err == io.EOF || err == io.ErrUnexpectedEOF { 477 src := object.NewStaticObjectInfo(dstFileName, modTime, int64(len(buf[:n])), false, nil, f.Fs) 478 return f.Fs.Put(ctx, bytes.NewBuffer(buf[:n]), src, options...) 479 } 480 481 // Need to include what we already read 482 in = &ReadCloserWrapper{ 483 Reader: io.MultiReader(bytes.NewReader(buf), in), 484 Closer: in, 485 } 486 487 canStream := f.Fs.Features().PutStream != nil 488 if canStream { 489 src := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, f.Fs) 490 return f.Fs.Features().PutStream(ctx, in, src, options...) 491 } 492 493 fs.Debugf(f, "Target remote doesn't support streaming uploads, creating temporary local file") 494 tempFile, err := os.CreateTemp("", "rclone-press-") 495 defer func() { 496 // these errors should be relatively uncritical and the upload should've succeeded so it's okay-ish 497 // to ignore them 498 _ = tempFile.Close() 499 _ = os.Remove(tempFile.Name()) 500 }() 501 if err != nil { 502 return nil, fmt.Errorf("failed to create temporary local FS to spool file: %w", err) 503 } 504 if _, err = io.Copy(tempFile, in); err != nil { 505 return nil, fmt.Errorf("failed to write temporary local file: %w", err) 506 } 507 if _, err = tempFile.Seek(0, 0); err != nil { 508 return nil, err 509 } 510 finfo, err := tempFile.Stat() 511 if err != nil { 512 return nil, err 513 } 514 return f.Fs.Put(ctx, tempFile, object.NewStaticObjectInfo(dstFileName, modTime, finfo.Size(), false, nil, f.Fs)) 515 } 516 517 // Put a compressed version of a file. Returns a wrappable object and metadata. 518 func (f *Fs) putCompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) { 519 // Unwrap reader accounting 520 in, wrap := accounting.UnWrap(in) 521 522 // Add the metadata hasher 523 metaHasher := md5.New() 524 in = io.TeeReader(in, metaHasher) 525 526 // Compress the file 527 pipeReader, pipeWriter := io.Pipe() 528 results := make(chan compressionResult) 529 go func() { 530 gz, err := sgzip.NewWriterLevel(pipeWriter, f.opt.CompressionLevel) 531 if err != nil { 532 results <- compressionResult{err: err, meta: sgzip.GzipMetadata{}} 533 return 534 } 535 _, err = io.Copy(gz, in) 536 gzErr := gz.Close() 537 if gzErr != nil { 538 fs.Errorf(nil, "Failed to close compress: %v", gzErr) 539 if err == nil { 540 err = gzErr 541 } 542 } 543 closeErr := pipeWriter.Close() 544 if closeErr != nil { 545 fs.Errorf(nil, "Failed to close pipe: %v", closeErr) 546 if err == nil { 547 err = closeErr 548 } 549 } 550 results <- compressionResult{err: err, meta: gz.MetaData()} 551 }() 552 wrappedIn := wrap(bufio.NewReaderSize(pipeReader, bufferSize)) // Probably no longer needed as sgzip has it's own buffering 553 554 // Find a hash the destination supports to compute a hash of 555 // the compressed data. 556 ht := f.Fs.Hashes().GetOne() 557 var hasher *hash.MultiHasher 558 var err error 559 if ht != hash.None { 560 // unwrap the accounting again 561 wrappedIn, wrap = accounting.UnWrap(wrappedIn) 562 hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) 563 if err != nil { 564 return nil, nil, err 565 } 566 // add the hasher and re-wrap the accounting 567 wrappedIn = io.TeeReader(wrappedIn, hasher) 568 wrappedIn = wrap(wrappedIn) 569 } 570 571 // Transfer the data 572 o, err := f.rcat(ctx, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx), options) 573 //o, err := operations.Rcat(ctx, f.Fs, makeDataName(src.Remote(), src.Size(), f.mode), io.NopCloser(wrappedIn), src.ModTime(ctx)) 574 if err != nil { 575 if o != nil { 576 removeErr := o.Remove(ctx) 577 if removeErr != nil { 578 fs.Errorf(o, "Failed to remove partially transferred object: %v", err) 579 } 580 } 581 return nil, nil, err 582 } 583 // Check whether we got an error during compression 584 result := <-results 585 err = result.err 586 if err != nil { 587 if o != nil { 588 removeErr := o.Remove(ctx) 589 if removeErr != nil { 590 fs.Errorf(o, "Failed to remove partially compressed object: %v", err) 591 } 592 } 593 return nil, nil, err 594 } 595 596 // Generate metadata 597 meta := newMetadata(result.meta.Size, f.mode, result.meta, hex.EncodeToString(metaHasher.Sum(nil)), mimeType) 598 599 // Check the hashes of the compressed data if we were comparing them 600 if ht != hash.None && hasher != nil { 601 err = f.verifyObjectHash(ctx, o, hasher, ht) 602 if err != nil { 603 return nil, nil, err 604 } 605 } 606 607 return o, meta, nil 608 } 609 610 // Put an uncompressed version of a file. Returns a wrappable object and metadata. 611 func (f *Fs) putUncompress(ctx context.Context, in io.Reader, src fs.ObjectInfo, put putFn, options []fs.OpenOption, mimeType string) (fs.Object, *ObjectMetadata, error) { 612 // Unwrap the accounting, add our metadata hasher, then wrap it back on 613 in, wrap := accounting.UnWrap(in) 614 615 hs := hash.NewHashSet(hash.MD5) 616 ht := f.Fs.Hashes().GetOne() 617 if !hs.Contains(ht) { 618 hs.Add(ht) 619 } 620 metaHasher, err := hash.NewMultiHasherTypes(hs) 621 if err != nil { 622 return nil, nil, err 623 } 624 in = io.TeeReader(in, metaHasher) 625 wrappedIn := wrap(in) 626 627 // Put the object 628 o, err := put(ctx, wrappedIn, f.wrapInfo(src, makeDataName(src.Remote(), src.Size(), Uncompressed), src.Size()), options...) 629 if err != nil { 630 if o != nil { 631 removeErr := o.Remove(ctx) 632 if removeErr != nil { 633 fs.Errorf(o, "Failed to remove partially transferred object: %v", err) 634 } 635 } 636 return nil, nil, err 637 } 638 // Check the hashes of the compressed data if we were comparing them 639 if ht != hash.None { 640 err := f.verifyObjectHash(ctx, o, metaHasher, ht) 641 if err != nil { 642 return nil, nil, err 643 } 644 } 645 646 // Return our object and metadata 647 sum, err := metaHasher.Sum(hash.MD5) 648 if err != nil { 649 return nil, nil, err 650 } 651 return o, newMetadata(o.Size(), Uncompressed, sgzip.GzipMetadata{}, hex.EncodeToString(sum), mimeType), nil 652 } 653 654 // This function will write a metadata struct to a metadata Object for an src. Returns a wrappable metadata object. 655 func (f *Fs) putMetadata(ctx context.Context, meta *ObjectMetadata, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (mo fs.Object, err error) { 656 // Generate the metadata contents 657 data, err := json.Marshal(meta) 658 if err != nil { 659 return nil, err 660 } 661 metaReader := bytes.NewReader(data) 662 663 // Put the data 664 mo, err = put(ctx, metaReader, f.wrapInfo(src, makeMetadataName(src.Remote()), int64(len(data))), options...) 665 if err != nil { 666 if mo != nil { 667 removeErr := mo.Remove(ctx) 668 if removeErr != nil { 669 fs.Errorf(mo, "Failed to remove partially transferred object: %v", err) 670 } 671 } 672 return nil, err 673 } 674 675 return mo, nil 676 } 677 678 // This function will put both the data and metadata for an Object. 679 // putData is the function used for data, while putMeta is the function used for metadata. 680 // The putData function will only be used when the object is not compressible if the 681 // data is compressible this parameter will be ignored. 682 func (f *Fs) putWithCustomFunctions(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, 683 putData putFn, putMeta putFn, compressible bool, mimeType string) (*Object, error) { 684 // Put file then metadata 685 var dataObject fs.Object 686 var meta *ObjectMetadata 687 var err error 688 if compressible { 689 dataObject, meta, err = f.putCompress(ctx, in, src, options, mimeType) 690 } else { 691 dataObject, meta, err = f.putUncompress(ctx, in, src, putData, options, mimeType) 692 } 693 if err != nil { 694 return nil, err 695 } 696 697 mo, err := f.putMetadata(ctx, meta, src, options, putMeta) 698 699 // meta data upload may fail. in this case we try to remove the original object 700 if err != nil { 701 removeError := dataObject.Remove(ctx) 702 if removeError != nil { 703 return nil, removeError 704 } 705 return nil, err 706 } 707 return f.newObject(dataObject, mo, meta), nil 708 } 709 710 // Put in to the remote path with the modTime given of the given size 711 // 712 // May create the object even if it returns an error - if so 713 // will return the object and the error, otherwise will return 714 // nil and the error 715 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 716 // If there's already an existent objects we need to make sure to explicitly update it to make sure we don't leave 717 // orphaned data. Alternatively we could also deleted (which would simpler) but has the disadvantage that it 718 // destroys all server-side versioning. 719 o, err := f.NewObject(ctx, src.Remote()) 720 if err == fs.ErrorObjectNotFound { 721 // Get our file compressibility 722 in, compressible, mimeType, err := checkCompressAndType(in) 723 if err != nil { 724 return nil, err 725 } 726 return f.putWithCustomFunctions(ctx, in, src, options, f.Fs.Put, f.Fs.Put, compressible, mimeType) 727 } 728 if err != nil { 729 return nil, err 730 } 731 return o, o.Update(ctx, in, src, options...) 732 } 733 734 // PutStream uploads to the remote path with the modTime given of indeterminate size 735 func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 736 oldObj, err := f.NewObject(ctx, src.Remote()) 737 if err != nil && err != fs.ErrorObjectNotFound { 738 return nil, err 739 } 740 found := err == nil 741 742 in, compressible, mimeType, err := checkCompressAndType(in) 743 if err != nil { 744 return nil, err 745 } 746 newObj, err := f.putWithCustomFunctions(ctx, in, src, options, f.Fs.Features().PutStream, f.Fs.Put, compressible, mimeType) 747 if err != nil { 748 return nil, err 749 } 750 751 // Our transfer is now complete. We have to make sure to remove the old object because our new object will 752 // have a different name except when both the old and the new object where uncompressed. 753 if found && (oldObj.(*Object).meta.Mode != Uncompressed || compressible) { 754 err = oldObj.(*Object).Object.Remove(ctx) 755 if err != nil { 756 return nil, fmt.Errorf("couldn't remove original object: %w", err) 757 } 758 } 759 760 // If our new object is compressed we have to rename it with the correct size. 761 // Uncompressed objects don't store the size in the name so we they'll already have the correct name. 762 if compressible { 763 wrapObj, err := operations.Move(ctx, f.Fs, nil, f.dataName(src.Remote(), newObj.size, compressible), newObj.Object) 764 if err != nil { 765 return nil, fmt.Errorf("couldn't rename streamed object: %w", err) 766 } 767 newObj.Object = wrapObj 768 } 769 return newObj, nil 770 } 771 772 // Temporarily disabled. There might be a way to implement this correctly but with the current handling metadata duplicate objects 773 // will break stuff. Right no I can't think of a way to make this work. 774 775 // PutUnchecked uploads the object 776 // 777 // This will create a duplicate if we upload a new file without 778 // checking to see if there is one already - use Put() for that. 779 780 // Hashes returns the supported hash sets. 781 func (f *Fs) Hashes() hash.Set { 782 return hash.Set(hash.MD5) 783 } 784 785 // Mkdir makes the directory (container, bucket) 786 // 787 // Shouldn't return an error if it already exists 788 func (f *Fs) Mkdir(ctx context.Context, dir string) error { 789 return f.Fs.Mkdir(ctx, dir) 790 } 791 792 // MkdirMetadata makes the root directory of the Fs object 793 func (f *Fs) MkdirMetadata(ctx context.Context, dir string, metadata fs.Metadata) (fs.Directory, error) { 794 if do := f.Fs.Features().MkdirMetadata; do != nil { 795 return do(ctx, dir, metadata) 796 } 797 return nil, fs.ErrorNotImplemented 798 } 799 800 // Rmdir removes the directory (container, bucket) if empty 801 // 802 // Return an error if it doesn't exist or isn't empty 803 func (f *Fs) Rmdir(ctx context.Context, dir string) error { 804 return f.Fs.Rmdir(ctx, dir) 805 } 806 807 // Purge all files in the root and the root directory 808 // 809 // Implement this if you have a way of deleting all the files 810 // quicker than just running Remove() on the result of List() 811 // 812 // Return an error if it doesn't exist 813 func (f *Fs) Purge(ctx context.Context, dir string) error { 814 do := f.Fs.Features().Purge 815 if do == nil { 816 return fs.ErrorCantPurge 817 } 818 return do(ctx, dir) 819 } 820 821 // Copy src to this remote using server side copy operations. 822 // 823 // This is stored with the remote path given. 824 // 825 // It returns the destination Object and a possible error. 826 // 827 // Will only be called if src.Fs().Name() == f.Name() 828 // 829 // If it isn't possible then return fs.ErrorCantCopy 830 func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 831 do := f.Fs.Features().Copy 832 if do == nil { 833 return nil, fs.ErrorCantCopy 834 } 835 o, ok := src.(*Object) 836 if !ok { 837 return nil, fs.ErrorCantCopy 838 } 839 // We might be trying to overwrite a file with a newer version but due to size difference the name 840 // is different. Therefore we have to remove the old file first (if it exists). 841 dstFile, err := f.NewObject(ctx, remote) 842 if err != nil && err != fs.ErrorObjectNotFound { 843 return nil, err 844 } 845 if err == nil { 846 err := dstFile.Remove(ctx) 847 if err != nil { 848 return nil, err 849 } 850 } 851 852 // Copy over metadata 853 err = o.loadMetadataIfNotLoaded(ctx) 854 if err != nil { 855 return nil, err 856 } 857 newFilename := makeMetadataName(remote) 858 moResult, err := do(ctx, o.mo, newFilename) 859 if err != nil { 860 return nil, err 861 } 862 // Copy over data 863 newFilename = makeDataName(remote, src.Size(), o.meta.Mode) 864 oResult, err := do(ctx, o.Object, newFilename) 865 if err != nil { 866 return nil, err 867 } 868 return f.newObject(oResult, moResult, o.meta), nil 869 } 870 871 // Move src to this remote using server side move operations. 872 // 873 // This is stored with the remote path given. 874 // 875 // It returns the destination Object and a possible error. 876 // 877 // Will only be called if src.Fs().Name() == f.Name() 878 // 879 // If it isn't possible then return fs.ErrorCantMove 880 func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { 881 do := f.Fs.Features().Move 882 if do == nil { 883 return nil, fs.ErrorCantMove 884 } 885 o, ok := src.(*Object) 886 if !ok { 887 return nil, fs.ErrorCantMove 888 } 889 // We might be trying to overwrite a file with a newer version but due to size difference the name 890 // is different. Therefore we have to remove the old file first (if it exists). 891 dstFile, err := f.NewObject(ctx, remote) 892 if err != nil && err != fs.ErrorObjectNotFound { 893 return nil, err 894 } 895 if err == nil { 896 err := dstFile.Remove(ctx) 897 if err != nil { 898 return nil, err 899 } 900 } 901 902 // Move metadata 903 err = o.loadMetadataIfNotLoaded(ctx) 904 if err != nil { 905 return nil, err 906 } 907 newFilename := makeMetadataName(remote) 908 moResult, err := do(ctx, o.mo, newFilename) 909 if err != nil { 910 return nil, err 911 } 912 913 // Move data 914 newFilename = makeDataName(remote, src.Size(), o.meta.Mode) 915 oResult, err := do(ctx, o.Object, newFilename) 916 if err != nil { 917 return nil, err 918 } 919 return f.newObject(oResult, moResult, o.meta), nil 920 } 921 922 // DirMove moves src, srcRemote to this remote at dstRemote 923 // using server side move operations. 924 // 925 // Will only be called if src.Fs().Name() == f.Name() 926 // 927 // If it isn't possible then return fs.ErrorCantDirMove 928 // 929 // If destination exists then return fs.ErrorDirExists 930 func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { 931 do := f.Fs.Features().DirMove 932 if do == nil { 933 return fs.ErrorCantDirMove 934 } 935 srcFs, ok := src.(*Fs) 936 if !ok { 937 fs.Debugf(srcFs, "Can't move directory - not same remote type") 938 return fs.ErrorCantDirMove 939 } 940 return do(ctx, srcFs.Fs, srcRemote, dstRemote) 941 } 942 943 // DirSetModTime sets the directory modtime for dir 944 func (f *Fs) DirSetModTime(ctx context.Context, dir string, modTime time.Time) error { 945 if do := f.Fs.Features().DirSetModTime; do != nil { 946 return do(ctx, dir, modTime) 947 } 948 return fs.ErrorNotImplemented 949 } 950 951 // CleanUp the trash in the Fs 952 // 953 // Implement this if you have a way of emptying the trash or 954 // otherwise cleaning up old versions of files. 955 func (f *Fs) CleanUp(ctx context.Context) error { 956 do := f.Fs.Features().CleanUp 957 if do == nil { 958 return errors.New("not supported by underlying remote") 959 } 960 return do(ctx) 961 } 962 963 // About gets quota information from the Fs 964 func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { 965 do := f.Fs.Features().About 966 if do == nil { 967 return nil, errors.New("not supported by underlying remote") 968 } 969 return do(ctx) 970 } 971 972 // UnWrap returns the Fs that this Fs is wrapping 973 func (f *Fs) UnWrap() fs.Fs { 974 return f.Fs 975 } 976 977 // WrapFs returns the Fs that is wrapping this Fs 978 func (f *Fs) WrapFs() fs.Fs { 979 return f.wrapper 980 } 981 982 // SetWrapper sets the Fs that is wrapping this Fs 983 func (f *Fs) SetWrapper(wrapper fs.Fs) { 984 f.wrapper = wrapper 985 } 986 987 // MergeDirs merges the contents of all the directories passed 988 // in into the first one and rmdirs the other directories. 989 func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { 990 do := f.Fs.Features().MergeDirs 991 if do == nil { 992 return errors.New("MergeDirs not supported") 993 } 994 out := make([]fs.Directory, len(dirs)) 995 for i, dir := range dirs { 996 out[i] = fs.NewDirCopy(ctx, dir).SetRemote(dir.Remote()) 997 } 998 return do(ctx, out) 999 } 1000 1001 // DirCacheFlush resets the directory cache - used in testing 1002 // as an optional interface 1003 func (f *Fs) DirCacheFlush() { 1004 do := f.Fs.Features().DirCacheFlush 1005 if do != nil { 1006 do() 1007 } 1008 } 1009 1010 // ChangeNotify calls the passed function with a path 1011 // that has had changes. If the implementation 1012 // uses polling, it should adhere to the given interval. 1013 func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { 1014 do := f.Fs.Features().ChangeNotify 1015 if do == nil { 1016 return 1017 } 1018 wrappedNotifyFunc := func(path string, entryType fs.EntryType) { 1019 fs.Logf(f, "path %q entryType %d", path, entryType) 1020 var ( 1021 wrappedPath string 1022 isMetadataFile bool 1023 ) 1024 switch entryType { 1025 case fs.EntryDirectory: 1026 wrappedPath = path 1027 case fs.EntryObject: 1028 // Note: All we really need to do to monitor the object is to check whether the metadata changed, 1029 // as the metadata contains the hash. This will work unless there's a hash collision and the sizes stay the same. 1030 wrappedPath, isMetadataFile = unwrapMetadataFile(path) 1031 if !isMetadataFile { 1032 return 1033 } 1034 default: 1035 fs.Errorf(path, "press ChangeNotify: ignoring unknown EntryType %d", entryType) 1036 return 1037 } 1038 notifyFunc(wrappedPath, entryType) 1039 } 1040 do(ctx, wrappedNotifyFunc, pollIntervalChan) 1041 } 1042 1043 // PublicLink generates a public link to the remote path (usually readable by anyone) 1044 func (f *Fs) PublicLink(ctx context.Context, remote string, duration fs.Duration, unlink bool) (string, error) { 1045 do := f.Fs.Features().PublicLink 1046 if do == nil { 1047 return "", errors.New("can't PublicLink: not supported by underlying remote") 1048 } 1049 o, err := f.NewObject(ctx, remote) 1050 if err != nil { 1051 // assume it is a directory 1052 return do(ctx, remote, duration, unlink) 1053 } 1054 return do(ctx, o.(*Object).Object.Remote(), duration, unlink) 1055 } 1056 1057 /*** OBJECT FUNCTIONS ***/ 1058 1059 // ObjectMetadata describes the metadata for an Object. 1060 type ObjectMetadata struct { 1061 Mode int // Compression mode of the file. 1062 Size int64 // Size of the object. 1063 MD5 string // MD5 hash of the file. 1064 MimeType string // Mime type of the file 1065 CompressionMetadata sgzip.GzipMetadata 1066 } 1067 1068 // Object with external metadata 1069 type Object struct { 1070 fs.Object // Wraps around data object for this object 1071 f *Fs // Filesystem object is in 1072 mo fs.Object // Metadata object for this object 1073 moName string // Metadata file name for this object 1074 size int64 // Size of this object 1075 meta *ObjectMetadata // Metadata struct for this object (nil if not loaded) 1076 } 1077 1078 // This function generates a metadata object 1079 func newMetadata(size int64, mode int, cmeta sgzip.GzipMetadata, md5 string, mimeType string) *ObjectMetadata { 1080 meta := new(ObjectMetadata) 1081 meta.Size = size 1082 meta.Mode = mode 1083 meta.CompressionMetadata = cmeta 1084 meta.MD5 = md5 1085 meta.MimeType = mimeType 1086 return meta 1087 } 1088 1089 // This function will read the metadata from a metadata object. 1090 func readMetadata(ctx context.Context, mo fs.Object) (meta *ObjectMetadata, err error) { 1091 // Open our meradata object 1092 rc, err := mo.Open(ctx) 1093 if err != nil { 1094 return nil, err 1095 } 1096 defer fs.CheckClose(rc, &err) 1097 jr := json.NewDecoder(rc) 1098 meta = new(ObjectMetadata) 1099 if err = jr.Decode(meta); err != nil { 1100 return nil, err 1101 } 1102 return meta, nil 1103 } 1104 1105 // Remove removes this object 1106 func (o *Object) Remove(ctx context.Context) error { 1107 err := o.loadMetadataObjectIfNotLoaded(ctx) 1108 if err != nil { 1109 return err 1110 } 1111 err = o.mo.Remove(ctx) 1112 objErr := o.Object.Remove(ctx) 1113 if err != nil { 1114 return err 1115 } 1116 return objErr 1117 } 1118 1119 // ReadCloserWrapper combines a Reader and a Closer to a ReadCloser 1120 type ReadCloserWrapper struct { 1121 io.Reader 1122 io.Closer 1123 } 1124 1125 // Update in to the object with the modTime given of the given size 1126 func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { 1127 err = o.loadMetadataIfNotLoaded(ctx) // Loads metadata object too 1128 if err != nil { 1129 return err 1130 } 1131 // Function that updates metadata object 1132 updateMeta := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 1133 return o.mo, o.mo.Update(ctx, in, src, options...) 1134 } 1135 1136 in, compressible, mimeType, err := checkCompressAndType(in) 1137 if err != nil { 1138 return err 1139 } 1140 1141 // Since we are storing the filesize in the name the new object may have different name than the old 1142 // We'll make sure to delete the old object in this case 1143 var newObject *Object 1144 origName := o.Remote() 1145 if o.meta.Mode != Uncompressed || compressible { 1146 newObject, err = o.f.putWithCustomFunctions(ctx, in, o.f.wrapInfo(src, origName, src.Size()), options, o.f.Fs.Put, updateMeta, compressible, mimeType) 1147 if err != nil { 1148 return err 1149 } 1150 if newObject.Object.Remote() != o.Object.Remote() { 1151 if removeErr := o.Object.Remove(ctx); removeErr != nil { 1152 return removeErr 1153 } 1154 } 1155 } else { 1156 // We can only support update when BOTH the old and the new object are uncompressed because only then 1157 // the filesize will be known beforehand and name will stay the same 1158 update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { 1159 return o.Object, o.Object.Update(ctx, in, src, options...) 1160 } 1161 // If we are, just update the object and metadata 1162 newObject, err = o.f.putWithCustomFunctions(ctx, in, src, options, update, updateMeta, compressible, mimeType) 1163 if err != nil { 1164 return err 1165 } 1166 } 1167 // Update object metadata and return 1168 o.Object = newObject.Object 1169 o.meta = newObject.meta 1170 o.size = newObject.size 1171 return nil 1172 } 1173 1174 // This will initialize the variables of a new press Object. The metadata object, mo, and metadata struct, meta, must be specified. 1175 func (f *Fs) newObject(o fs.Object, mo fs.Object, meta *ObjectMetadata) *Object { 1176 if o == nil { 1177 log.Trace(nil, "newObject(%#v, %#v, %#v) called with nil o", o, mo, meta) 1178 } 1179 return &Object{ 1180 Object: o, 1181 f: f, 1182 mo: mo, 1183 moName: mo.Remote(), 1184 size: meta.Size, 1185 meta: meta, 1186 } 1187 } 1188 1189 // This initializes the variables of a press Object with only the size. The metadata will be loaded later on demand. 1190 func (f *Fs) newObjectSizeAndNameOnly(o fs.Object, moName string, size int64) *Object { 1191 if o == nil { 1192 log.Trace(nil, "newObjectSizeAndNameOnly(%#v, %#v, %#v) called with nil o", o, moName, size) 1193 } 1194 return &Object{ 1195 Object: o, 1196 f: f, 1197 mo: nil, 1198 moName: moName, 1199 size: size, 1200 meta: nil, 1201 } 1202 } 1203 1204 // Shutdown the backend, closing any background tasks and any 1205 // cached connections. 1206 func (f *Fs) Shutdown(ctx context.Context) error { 1207 do := f.Fs.Features().Shutdown 1208 if do == nil { 1209 return nil 1210 } 1211 return do(ctx) 1212 } 1213 1214 // This loads the metadata of a press Object if it's not loaded yet 1215 func (o *Object) loadMetadataIfNotLoaded(ctx context.Context) (err error) { 1216 err = o.loadMetadataObjectIfNotLoaded(ctx) 1217 if err != nil { 1218 return err 1219 } 1220 if o.meta == nil { 1221 o.meta, err = readMetadata(ctx, o.mo) 1222 } 1223 return err 1224 } 1225 1226 // This loads the metadata object of a press Object if it's not loaded yet 1227 func (o *Object) loadMetadataObjectIfNotLoaded(ctx context.Context) (err error) { 1228 if o.mo == nil { 1229 o.mo, err = o.f.Fs.NewObject(ctx, o.moName) 1230 } 1231 return err 1232 } 1233 1234 // Fs returns read only access to the Fs that this object is part of 1235 func (o *Object) Fs() fs.Info { 1236 return o.f 1237 } 1238 1239 // Return a string version 1240 func (o *Object) String() string { 1241 if o == nil { 1242 return "<nil>" 1243 } 1244 return o.Remote() 1245 } 1246 1247 // Remote returns the remote path 1248 func (o *Object) Remote() string { 1249 origFileName, _, _, err := processFileName(o.Object.Remote()) 1250 if err != nil { 1251 fs.Errorf(o.f, "Could not get remote path for: %s", o.Object.Remote()) 1252 return o.Object.Remote() 1253 } 1254 return origFileName 1255 } 1256 1257 // Size returns the size of the file 1258 func (o *Object) Size() int64 { 1259 if o.meta == nil { 1260 return o.size 1261 } 1262 return o.meta.Size 1263 } 1264 1265 // MimeType returns the MIME type of the file 1266 func (o *Object) MimeType(ctx context.Context) string { 1267 err := o.loadMetadataIfNotLoaded(ctx) 1268 if err != nil { 1269 return "error/error" 1270 } 1271 return o.meta.MimeType 1272 } 1273 1274 // Metadata returns metadata for an object 1275 // 1276 // It should return nil if there is no Metadata 1277 func (o *Object) Metadata(ctx context.Context) (fs.Metadata, error) { 1278 err := o.loadMetadataIfNotLoaded(ctx) 1279 if err != nil { 1280 return nil, err 1281 } 1282 do, ok := o.mo.(fs.Metadataer) 1283 if !ok { 1284 return nil, nil 1285 } 1286 return do.Metadata(ctx) 1287 } 1288 1289 // SetMetadata sets metadata for an Object 1290 // 1291 // It should return fs.ErrorNotImplemented if it can't set metadata 1292 func (o *Object) SetMetadata(ctx context.Context, metadata fs.Metadata) error { 1293 do, ok := o.Object.(fs.SetMetadataer) 1294 if !ok { 1295 return fs.ErrorNotImplemented 1296 } 1297 return do.SetMetadata(ctx, metadata) 1298 } 1299 1300 // Hash returns the selected checksum of the file 1301 // If no checksum is available it returns "" 1302 func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { 1303 if ht != hash.MD5 { 1304 return "", hash.ErrUnsupported 1305 } 1306 err := o.loadMetadataIfNotLoaded(ctx) 1307 if err != nil { 1308 return "", err 1309 } 1310 return o.meta.MD5, nil 1311 } 1312 1313 // SetTier performs changing storage tier of the Object if 1314 // multiple storage classes supported 1315 func (o *Object) SetTier(tier string) error { 1316 do, ok := o.Object.(fs.SetTierer) 1317 mdo, mok := o.mo.(fs.SetTierer) 1318 if !(ok && mok) { 1319 return errors.New("press: underlying remote does not support SetTier") 1320 } 1321 if err := mdo.SetTier(tier); err != nil { 1322 return err 1323 } 1324 return do.SetTier(tier) 1325 } 1326 1327 // GetTier returns storage tier or class of the Object 1328 func (o *Object) GetTier() string { 1329 do, ok := o.mo.(fs.GetTierer) 1330 if !ok { 1331 return "" 1332 } 1333 return do.GetTier() 1334 } 1335 1336 // UnWrap returns the wrapped Object 1337 func (o *Object) UnWrap() fs.Object { 1338 return o.Object 1339 } 1340 1341 // Open opens the file for read. Call Close() on the returned io.ReadCloser. Note that this call requires quite a bit of overhead. 1342 func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { 1343 err = o.loadMetadataIfNotLoaded(ctx) 1344 if err != nil { 1345 return nil, err 1346 } 1347 // If we're uncompressed, just pass this to the underlying object 1348 if o.meta.Mode == Uncompressed { 1349 return o.Object.Open(ctx, options...) 1350 } 1351 // Get offset and limit from OpenOptions, pass the rest to the underlying remote 1352 var openOptions = []fs.OpenOption{&fs.SeekOption{Offset: 0}} 1353 var offset, limit int64 = 0, -1 1354 for _, option := range options { 1355 switch x := option.(type) { 1356 case *fs.SeekOption: 1357 offset = x.Offset 1358 case *fs.RangeOption: 1359 offset, limit = x.Decode(o.Size()) 1360 default: 1361 openOptions = append(openOptions, option) 1362 } 1363 } 1364 // Get a chunkedreader for the wrapped object 1365 chunkedReader := chunkedreader.New(ctx, o.Object, initialChunkSize, maxChunkSize) 1366 // Get file handle 1367 var file io.Reader 1368 if offset != 0 { 1369 file, err = sgzip.NewReaderAt(chunkedReader, &o.meta.CompressionMetadata, offset) 1370 } else { 1371 file, err = sgzip.NewReader(chunkedReader) 1372 } 1373 if err != nil { 1374 return nil, err 1375 } 1376 1377 var fileReader io.Reader 1378 if limit != -1 { 1379 fileReader = io.LimitReader(file, limit) 1380 } else { 1381 fileReader = file 1382 } 1383 // Return a ReadCloser 1384 return ReadCloserWrapper{Reader: fileReader, Closer: chunkedReader}, nil 1385 } 1386 1387 // ObjectInfo describes a wrapped fs.ObjectInfo for being the source 1388 type ObjectInfo struct { 1389 src fs.ObjectInfo 1390 fs *Fs 1391 remote string 1392 size int64 1393 } 1394 1395 func (f *Fs) wrapInfo(src fs.ObjectInfo, newRemote string, size int64) *ObjectInfo { 1396 return &ObjectInfo{ 1397 src: src, 1398 fs: f, 1399 remote: newRemote, 1400 size: size, 1401 } 1402 } 1403 1404 // Fs returns read only access to the Fs that this object is part of 1405 func (o *ObjectInfo) Fs() fs.Info { 1406 if o.fs == nil { 1407 panic("stub ObjectInfo") 1408 } 1409 return o.fs 1410 } 1411 1412 // String returns string representation 1413 func (o *ObjectInfo) String() string { 1414 return o.src.String() 1415 } 1416 1417 // Storable returns whether object is storable 1418 func (o *ObjectInfo) Storable() bool { 1419 return o.src.Storable() 1420 } 1421 1422 // Remote returns the remote path 1423 func (o *ObjectInfo) Remote() string { 1424 if o.remote != "" { 1425 return o.remote 1426 } 1427 return o.src.Remote() 1428 } 1429 1430 // Size returns the size of the file 1431 func (o *ObjectInfo) Size() int64 { 1432 return o.size 1433 } 1434 1435 // ModTime returns the modification time 1436 func (o *ObjectInfo) ModTime(ctx context.Context) time.Time { 1437 return o.src.ModTime(ctx) 1438 } 1439 1440 // Hash returns the selected checksum of the file 1441 // If no checksum is available it returns "" 1442 func (o *ObjectInfo) Hash(ctx context.Context, ht hash.Type) (string, error) { 1443 return "", nil // cannot know the checksum 1444 } 1445 1446 // ID returns the ID of the Object if known, or "" if not 1447 func (o *ObjectInfo) ID() string { 1448 do, ok := o.src.(fs.IDer) 1449 if !ok { 1450 return "" 1451 } 1452 return do.ID() 1453 } 1454 1455 // MimeType returns the content type of the Object if 1456 // known, or "" if not 1457 func (o *ObjectInfo) MimeType(ctx context.Context) string { 1458 do, ok := o.src.(fs.MimeTyper) 1459 if !ok { 1460 return "" 1461 } 1462 return do.MimeType(ctx) 1463 } 1464 1465 // UnWrap returns the Object that this Object is wrapping or 1466 // nil if it isn't wrapping anything 1467 func (o *ObjectInfo) UnWrap() fs.Object { 1468 return fs.UnWrapObjectInfo(o.src) 1469 } 1470 1471 // Metadata returns metadata for an object 1472 // 1473 // It should return nil if there is no Metadata 1474 func (o *ObjectInfo) Metadata(ctx context.Context) (fs.Metadata, error) { 1475 do, ok := o.src.(fs.Metadataer) 1476 if !ok { 1477 return nil, nil 1478 } 1479 return do.Metadata(ctx) 1480 } 1481 1482 // GetTier returns storage tier or class of the Object 1483 func (o *ObjectInfo) GetTier() string { 1484 do, ok := o.src.(fs.GetTierer) 1485 if !ok { 1486 return "" 1487 } 1488 return do.GetTier() 1489 } 1490 1491 // ID returns the ID of the Object if known, or "" if not 1492 func (o *Object) ID() string { 1493 do, ok := o.Object.(fs.IDer) 1494 if !ok { 1495 return "" 1496 } 1497 return do.ID() 1498 } 1499 1500 // Name of the remote (as passed into NewFs) 1501 func (f *Fs) Name() string { 1502 return f.name 1503 } 1504 1505 // Root of the remote (as passed into NewFs) 1506 func (f *Fs) Root() string { 1507 return f.root 1508 } 1509 1510 // Features returns the optional features of this Fs 1511 func (f *Fs) Features() *fs.Features { 1512 return f.features 1513 } 1514 1515 // Return a string version 1516 func (f *Fs) String() string { 1517 return fmt.Sprintf("Compressed: %s:%s", f.name, f.root) 1518 } 1519 1520 // Precision returns the precision of this Fs 1521 func (f *Fs) Precision() time.Duration { 1522 return f.Fs.Precision() 1523 } 1524 1525 // Check the interfaces are satisfied 1526 var ( 1527 _ fs.Fs = (*Fs)(nil) 1528 _ fs.Purger = (*Fs)(nil) 1529 _ fs.Copier = (*Fs)(nil) 1530 _ fs.Mover = (*Fs)(nil) 1531 _ fs.DirMover = (*Fs)(nil) 1532 _ fs.DirSetModTimer = (*Fs)(nil) 1533 _ fs.MkdirMetadataer = (*Fs)(nil) 1534 _ fs.PutStreamer = (*Fs)(nil) 1535 _ fs.CleanUpper = (*Fs)(nil) 1536 _ fs.UnWrapper = (*Fs)(nil) 1537 _ fs.ListRer = (*Fs)(nil) 1538 _ fs.Abouter = (*Fs)(nil) 1539 _ fs.Wrapper = (*Fs)(nil) 1540 _ fs.MergeDirser = (*Fs)(nil) 1541 _ fs.DirCacheFlusher = (*Fs)(nil) 1542 _ fs.ChangeNotifier = (*Fs)(nil) 1543 _ fs.PublicLinker = (*Fs)(nil) 1544 _ fs.Shutdowner = (*Fs)(nil) 1545 _ fs.FullObjectInfo = (*ObjectInfo)(nil) 1546 _ fs.FullObject = (*Object)(nil) 1547 )