github.com/10XDev/rclone@v1.52.3-0.20200626220027-16af9ab76b2a/backend/tardigrade/fs.go (about) 1 // +build go1.13,!plan9 2 3 // Package tardigrade provides an interface to Tardigrade decentralized object storage. 4 package tardigrade 5 6 import ( 7 "context" 8 "fmt" 9 "io" 10 "log" 11 "path" 12 "strings" 13 "time" 14 15 "github.com/pkg/errors" 16 "github.com/rclone/rclone/fs" 17 "github.com/rclone/rclone/fs/config" 18 "github.com/rclone/rclone/fs/config/configmap" 19 "github.com/rclone/rclone/fs/config/configstruct" 20 "github.com/rclone/rclone/fs/fserrors" 21 "github.com/rclone/rclone/fs/hash" 22 "github.com/rclone/rclone/lib/bucket" 23 "golang.org/x/text/unicode/norm" 24 25 "storj.io/uplink" 26 ) 27 28 const ( 29 existingProvider = "existing" 30 newProvider = "new" 31 ) 32 33 var satMap = map[string]string{ 34 "us-central-1.tardigrade.io": "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777", 35 "europe-west-1.tardigrade.io": "12L9ZFwhzVpuEKMUNUqkaTLGzwY9G24tbiigLiXpmZWKwmcNDDs@europe-west-1.tardigrade.io:7777", 36 "asia-east-1.tardigrade.io": "121RTSDpyNZVcEU84Ticf2L1ntiuUimbWgfATz21tuvgk3vzoA6@asia-east-1.tardigrade.io:7777", 37 } 38 39 // Register with Fs 40 func init() { 41 fs.Register(&fs.RegInfo{ 42 Name: "tardigrade", 43 Description: "Tardigrade Decentralized Cloud Storage", 44 NewFs: NewFs, 45 Config: func(name string, configMapper configmap.Mapper) { 46 provider, _ := configMapper.Get(fs.ConfigProvider) 47 48 config.FileDeleteKey(name, fs.ConfigProvider) 49 50 if provider == newProvider { 51 satelliteString, _ := configMapper.Get("satellite_address") 52 apiKey, _ := configMapper.Get("api_key") 53 passphrase, _ := configMapper.Get("passphrase") 54 55 // satelliteString contains always default and passphrase can be empty 56 if apiKey == "" { 57 return 58 } 59 60 satellite, found := satMap[satelliteString] 61 if !found { 62 satellite = satelliteString 63 } 64 65 access, err := uplink.RequestAccessWithPassphrase(context.TODO(), satellite, apiKey, passphrase) 66 if err != nil { 67 log.Fatalf("Couldn't create access grant: %v", err) 68 } 69 70 serialziedAccess, err := access.Serialize() 71 if err != nil { 72 log.Fatalf("Couldn't serialize access grant: %v", err) 73 } 74 configMapper.Set("satellite_address", satellite) 75 configMapper.Set("access_grant", serialziedAccess) 76 } else if provider == existingProvider { 77 config.FileDeleteKey(name, "satellite_address") 78 config.FileDeleteKey(name, "api_key") 79 config.FileDeleteKey(name, "passphrase") 80 } else { 81 log.Fatalf("Invalid provider type: %s", provider) 82 } 83 }, 84 Options: []fs.Option{ 85 { 86 Name: fs.ConfigProvider, 87 Help: "Choose an authentication method.", 88 Required: true, 89 Default: existingProvider, 90 Examples: []fs.OptionExample{{ 91 Value: "existing", 92 Help: "Use an existing access grant.", 93 }, { 94 Value: newProvider, 95 Help: "Create a new access grant from satellite address, API key, and passphrase.", 96 }, 97 }}, 98 { 99 Name: "access_grant", 100 Help: "Access Grant.", 101 Required: false, 102 Provider: "existing", 103 }, 104 { 105 Name: "satellite_address", 106 Help: "Satellite Address. Custom satellite address should match the format: `<nodeid>@<address>:<port>`.", 107 Required: false, 108 Provider: newProvider, 109 Default: "us-central-1.tardigrade.io", 110 Examples: []fs.OptionExample{{ 111 Value: "us-central-1.tardigrade.io", 112 Help: "US Central 1", 113 }, { 114 Value: "europe-west-1.tardigrade.io", 115 Help: "Europe West 1", 116 }, { 117 Value: "asia-east-1.tardigrade.io", 118 Help: "Asia East 1", 119 }, 120 }, 121 }, 122 { 123 Name: "api_key", 124 Help: "API Key.", 125 Required: false, 126 Provider: newProvider, 127 }, 128 { 129 Name: "passphrase", 130 Help: "Encryption Passphrase. To access existing objects enter passphrase used for uploading.", 131 Required: false, 132 Provider: newProvider, 133 }, 134 }, 135 }) 136 } 137 138 // Options defines the configuration for this backend 139 type Options struct { 140 Access string `config:"access_grant"` 141 142 SatelliteAddress string `config:"satellite_address"` 143 APIKey string `config:"api_key"` 144 Passphrase string `config:"passphrase"` 145 } 146 147 // Fs represents a remote to Tardigrade 148 type Fs struct { 149 name string // the name of the remote 150 root string // root of the filesystem 151 152 opts Options // parsed options 153 features *fs.Features // optional features 154 155 access *uplink.Access // parsed scope 156 157 project *uplink.Project // project client 158 } 159 160 // Check the interfaces are satisfied. 161 var ( 162 _ fs.Fs = &Fs{} 163 _ fs.ListRer = &Fs{} 164 _ fs.PutStreamer = &Fs{} 165 ) 166 167 // NewFs creates a filesystem backed by Tardigrade. 168 func NewFs(name, root string, m configmap.Mapper) (_ fs.Fs, err error) { 169 ctx := context.Background() 170 171 // Setup filesystem and connection to Tardigrade 172 root = norm.NFC.String(root) 173 root = strings.Trim(root, "/") 174 175 f := &Fs{ 176 name: name, 177 root: root, 178 } 179 180 // Parse config into Options struct 181 err = configstruct.Set(m, &f.opts) 182 if err != nil { 183 return nil, err 184 } 185 186 // Parse access 187 var access *uplink.Access 188 189 if f.opts.Access != "" { 190 access, err = uplink.ParseAccess(f.opts.Access) 191 if err != nil { 192 return nil, errors.Wrap(err, "tardigrade: access") 193 } 194 } 195 196 if access == nil && f.opts.SatelliteAddress != "" && f.opts.APIKey != "" && f.opts.Passphrase != "" { 197 access, err = uplink.RequestAccessWithPassphrase(ctx, f.opts.SatelliteAddress, f.opts.APIKey, f.opts.Passphrase) 198 if err != nil { 199 return nil, errors.Wrap(err, "tardigrade: access") 200 } 201 202 serializedAccess, err := access.Serialize() 203 if err != nil { 204 return nil, errors.Wrap(err, "tardigrade: access") 205 } 206 207 err = config.SetValueAndSave(f.name, "access_grant", serializedAccess) 208 if err != nil { 209 return nil, errors.Wrap(err, "tardigrade: access") 210 } 211 } 212 213 if access == nil { 214 return nil, errors.New("access not found") 215 } 216 217 f.access = access 218 219 f.features = (&fs.Features{ 220 BucketBased: true, 221 BucketBasedRootOK: true, 222 }).Fill(f) 223 224 project, err := f.connect(ctx) 225 if err != nil { 226 return nil, err 227 } 228 f.project = project 229 230 // Root validation needs to check the following: If a bucket path is 231 // specified and exists, then the object must be a directory. 232 // 233 // NOTE: At this point this must return the filesystem object we've 234 // created so far even if there is an error. 235 if root != "" { 236 bucketName, bucketPath := bucket.Split(root) 237 238 if bucketName != "" && bucketPath != "" { 239 _, err = project.StatBucket(ctx, bucketName) 240 if err != nil { 241 return f, errors.Wrap(err, "tardigrade: bucket") 242 } 243 244 object, err := project.StatObject(ctx, bucketName, bucketPath) 245 if err == nil { 246 if !object.IsPrefix { 247 // If the root is actually a file we 248 // need to return the *parent* 249 // directory of the root instead and an 250 // error that the original root 251 // requested is a file. 252 newRoot := path.Dir(f.root) 253 if newRoot == "." { 254 newRoot = "" 255 } 256 f.root = newRoot 257 258 return f, fs.ErrorIsFile 259 } 260 } 261 } 262 } 263 264 return f, nil 265 } 266 267 // connect opens a connection to Tardigrade. 268 func (f *Fs) connect(ctx context.Context) (project *uplink.Project, err error) { 269 fs.Debugf(f, "connecting...") 270 defer fs.Debugf(f, "connected: %+v", err) 271 272 cfg := uplink.Config{ 273 UserAgent: "rclone", 274 } 275 276 project, err = cfg.OpenProject(ctx, f.access) 277 if err != nil { 278 return nil, errors.Wrap(err, "tardigrade: project") 279 } 280 281 return 282 } 283 284 // absolute computes the absolute bucket name and path from the filesystem root 285 // and the relative path provided. 286 func (f *Fs) absolute(relative string) (bucketName, bucketPath string) { 287 bn, bp := bucket.Split(path.Join(f.root, relative)) 288 289 // NOTE: Technically libuplink does not care about the encoding. It is 290 // happy to work with them as opaque byte sequences. However, rclone 291 // has a test that requires two paths with the same normalized form 292 // (but different un-normalized forms) to point to the same file. This 293 // means we have to normalize before we interact with libuplink. 294 return norm.NFC.String(bn), norm.NFC.String(bp) 295 } 296 297 // Name of the remote (as passed into NewFs) 298 func (f *Fs) Name() string { 299 return f.name 300 } 301 302 // Root of the remote (as passed into NewFs) 303 func (f *Fs) Root() string { 304 return f.root 305 } 306 307 // String returns a description of the FS 308 func (f *Fs) String() string { 309 return fmt.Sprintf("FS sj://%s", f.root) 310 } 311 312 // Precision of the ModTimes in this Fs 313 func (f *Fs) Precision() time.Duration { 314 return time.Nanosecond 315 } 316 317 // Hashes returns the supported hash types of the filesystem. 318 func (f *Fs) Hashes() hash.Set { 319 return hash.NewHashSet() 320 } 321 322 // Features returns the optional features of this Fs 323 func (f *Fs) Features() *fs.Features { 324 return f.features 325 } 326 327 // List the objects and directories in relative into entries. The entries can 328 // be returned in any order but should be for a complete directory. 329 // 330 // relative should be "" to list the root, and should not have trailing 331 // slashes. 332 // 333 // This should return fs.ErrDirNotFound if the directory isn't found. 334 func (f *Fs) List(ctx context.Context, relative string) (entries fs.DirEntries, err error) { 335 fs.Debugf(f, "ls ./%s", relative) 336 337 bucketName, bucketPath := f.absolute(relative) 338 339 defer func() { 340 if errors.Is(err, uplink.ErrBucketNotFound) { 341 err = fs.ErrorDirNotFound 342 } 343 }() 344 345 if bucketName == "" { 346 if bucketPath != "" { 347 return nil, fs.ErrorListBucketRequired 348 } 349 350 return f.listBuckets(ctx) 351 } 352 353 return f.listObjects(ctx, relative, bucketName, bucketPath) 354 } 355 356 func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { 357 fs.Debugf(f, "BKT ls") 358 359 buckets := f.project.ListBuckets(ctx, nil) 360 361 for buckets.Next() { 362 bucket := buckets.Item() 363 364 entries = append(entries, fs.NewDir(bucket.Name, bucket.Created)) 365 } 366 367 return entries, buckets.Err() 368 } 369 370 // newDirEntry creates a directory entry from an uplink object. 371 // 372 // NOTE: Getting the exact behavior required by rclone is somewhat tricky. The 373 // path manipulation here is necessary to cover all the different ways the 374 // filesystem and object could be initialized and combined. 375 func (f *Fs) newDirEntry(relative, prefix string, object *uplink.Object) fs.DirEntry { 376 if object.IsPrefix { 377 // . The entry must include the relative path as its prefix. Depending on 378 // | what is being listed and how the filesystem root was initialized the 379 // | relative path may be empty (and so we use path joining here to ensure 380 // | we don't end up with an empty path segment). 381 // | 382 // | . Remove the prefix used during listing. 383 // | | 384 // | | . Remove the trailing slash. 385 // | | | 386 // v v v 387 return fs.NewDir(path.Join(relative, object.Key[len(prefix):len(object.Key)-1]), object.System.Created) 388 } 389 390 return newObjectFromUplink(f, relative, object) 391 } 392 393 func (f *Fs) listObjects(ctx context.Context, relative, bucketName, bucketPath string) (entries fs.DirEntries, err error) { 394 fs.Debugf(f, "OBJ ls ./%s (%q, %q)", relative, bucketName, bucketPath) 395 396 opts := &uplink.ListObjectsOptions{ 397 Prefix: newPrefix(bucketPath), 398 399 System: true, 400 Custom: true, 401 } 402 fs.Debugf(f, "opts %+v", opts) 403 404 objects := f.project.ListObjects(ctx, bucketName, opts) 405 406 for objects.Next() { 407 entries = append(entries, f.newDirEntry(relative, opts.Prefix, objects.Item())) 408 } 409 410 err = objects.Err() 411 if err != nil { 412 return nil, err 413 } 414 415 return entries, nil 416 } 417 418 // ListR lists the objects and directories of the Fs starting from dir 419 // recursively into out. 420 // 421 // relative should be "" to start from the root, and should not have trailing 422 // slashes. 423 // 424 // This should return ErrDirNotFound if the directory isn't found. 425 // 426 // It should call callback for each tranche of entries read. These need not be 427 // returned in any particular order. If callback returns an error then the 428 // listing will stop immediately. 429 // 430 // Don't implement this unless you have a more efficient way of listing 431 // recursively that doing a directory traversal. 432 func (f *Fs) ListR(ctx context.Context, relative string, callback fs.ListRCallback) (err error) { 433 fs.Debugf(f, "ls -R ./%s", relative) 434 435 bucketName, bucketPath := f.absolute(relative) 436 437 defer func() { 438 if errors.Is(err, uplink.ErrBucketNotFound) { 439 err = fs.ErrorDirNotFound 440 } 441 }() 442 443 if bucketName == "" { 444 if bucketPath != "" { 445 return fs.ErrorListBucketRequired 446 } 447 448 return f.listBucketsR(ctx, callback) 449 } 450 451 return f.listObjectsR(ctx, relative, bucketName, bucketPath, callback) 452 } 453 454 func (f *Fs) listBucketsR(ctx context.Context, callback fs.ListRCallback) (err error) { 455 fs.Debugf(f, "BKT ls -R") 456 457 buckets := f.project.ListBuckets(ctx, nil) 458 459 for buckets.Next() { 460 bucket := buckets.Item() 461 462 err = f.listObjectsR(ctx, bucket.Name, bucket.Name, "", callback) 463 if err != nil { 464 return err 465 } 466 } 467 468 return buckets.Err() 469 } 470 471 func (f *Fs) listObjectsR(ctx context.Context, relative, bucketName, bucketPath string, callback fs.ListRCallback) (err error) { 472 fs.Debugf(f, "OBJ ls -R ./%s (%q, %q)", relative, bucketName, bucketPath) 473 474 opts := &uplink.ListObjectsOptions{ 475 Prefix: newPrefix(bucketPath), 476 Recursive: true, 477 478 System: true, 479 Custom: true, 480 } 481 482 objects := f.project.ListObjects(ctx, bucketName, opts) 483 484 for objects.Next() { 485 object := objects.Item() 486 487 err = callback(fs.DirEntries{f.newDirEntry(relative, opts.Prefix, object)}) 488 if err != nil { 489 return err 490 } 491 } 492 493 err = objects.Err() 494 if err != nil { 495 return err 496 } 497 498 return nil 499 } 500 501 // NewObject finds the Object at relative. If it can't be found it returns the 502 // error ErrorObjectNotFound. 503 func (f *Fs) NewObject(ctx context.Context, relative string) (_ fs.Object, err error) { 504 fs.Debugf(f, "stat ./%s", relative) 505 506 bucketName, bucketPath := f.absolute(relative) 507 508 object, err := f.project.StatObject(ctx, bucketName, bucketPath) 509 if err != nil { 510 fs.Debugf(f, "err: %+v", err) 511 512 if errors.Is(err, uplink.ErrObjectNotFound) { 513 return nil, fs.ErrorObjectNotFound 514 } 515 return nil, err 516 } 517 518 return newObjectFromUplink(f, relative, object), nil 519 } 520 521 // Put in to the remote path with the modTime given of the given size 522 // 523 // When called from outside an Fs by rclone, src.Size() will always be >= 0. 524 // But for unknown-sized objects (indicated by src.Size() == -1), Put should 525 // either return an error or upload it properly (rather than e.g. calling 526 // panic). 527 // 528 // May create the object even if it returns an error - if so will return the 529 // object and the error, otherwise will return nil and the error 530 func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) { 531 fs.Debugf(f, "cp input ./%s # %+v %d", src.Remote(), options, src.Size()) 532 533 // Reject options we don't support. 534 for _, option := range options { 535 if option.Mandatory() { 536 fs.Errorf(f, "Unsupported mandatory option: %v", option) 537 538 return nil, errors.New("unsupported mandatory option") 539 } 540 } 541 542 bucketName, bucketPath := f.absolute(src.Remote()) 543 544 upload, err := f.project.UploadObject(ctx, bucketName, bucketPath, nil) 545 if err != nil { 546 return nil, err 547 } 548 defer func() { 549 if err != nil { 550 aerr := upload.Abort() 551 if aerr != nil { 552 fs.Errorf(f, "cp input ./%s %+v: %+v", src.Remote(), options, aerr) 553 } 554 } 555 }() 556 557 err = upload.SetCustomMetadata(ctx, uplink.CustomMetadata{ 558 "rclone:mtime": src.ModTime(ctx).Format(time.RFC3339Nano), 559 }) 560 if err != nil { 561 return nil, err 562 } 563 564 _, err = io.Copy(upload, in) 565 if err != nil { 566 err = fserrors.RetryError(err) 567 fs.Errorf(f, "cp input ./%s %+v: %+v\n", src.Remote(), options, err) 568 569 return nil, err 570 } 571 572 err = upload.Commit() 573 if err != nil { 574 if errors.Is(err, uplink.ErrBucketNotFound) { 575 // Rclone assumes the backend will create the bucket if not existing yet. 576 // Here we create the bucket and return a retry error for rclone to retry the upload. 577 _, err = f.project.EnsureBucket(ctx, bucketName) 578 if err != nil { 579 return nil, err 580 } 581 err = fserrors.RetryError(errors.New("bucket was not available, now created, the upload must be retried")) 582 } 583 return nil, err 584 } 585 586 return newObjectFromUplink(f, "", upload.Info()), nil 587 } 588 589 // PutStream uploads to the remote path with the modTime given of indeterminate 590 // size. 591 // 592 // May create the object even if it returns an error - if so will return the 593 // object and the error, otherwise will return nil and the error. 594 func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (_ fs.Object, err error) { 595 return f.Put(ctx, in, src, options...) 596 } 597 598 // Mkdir makes the directory (container, bucket) 599 // 600 // Shouldn't return an error if it already exists 601 func (f *Fs) Mkdir(ctx context.Context, relative string) (err error) { 602 fs.Debugf(f, "mkdir -p ./%s", relative) 603 604 bucketName, _ := f.absolute(relative) 605 606 _, err = f.project.EnsureBucket(ctx, bucketName) 607 608 return err 609 } 610 611 // Rmdir removes the directory (container, bucket) 612 // 613 // NOTE: Despite code documentation to the contrary, this method should not 614 // return an error if the directory does not exist. 615 func (f *Fs) Rmdir(ctx context.Context, relative string) (err error) { 616 fs.Debugf(f, "rmdir ./%s", relative) 617 618 bucketName, bucketPath := f.absolute(relative) 619 620 if bucketPath != "" { 621 // If we can successfully stat it, then it is an object (and not a prefix). 622 _, err := f.project.StatObject(ctx, bucketName, bucketPath) 623 if err != nil { 624 if errors.Is(err, uplink.ErrObjectNotFound) { 625 // At this point we know it is not an object, 626 // but we don't know if it is a prefix for one. 627 // 628 // We check this by doing a listing and if we 629 // get any results back, then we know this is a 630 // valid prefix (which implies the directory is 631 // not empty). 632 opts := &uplink.ListObjectsOptions{ 633 Prefix: newPrefix(bucketPath), 634 635 System: true, 636 Custom: true, 637 } 638 639 objects := f.project.ListObjects(ctx, bucketName, opts) 640 641 if objects.Next() { 642 return fs.ErrorDirectoryNotEmpty 643 } 644 645 return objects.Err() 646 } 647 648 return err 649 } 650 651 return fs.ErrorIsFile 652 } 653 654 _, err = f.project.DeleteBucket(ctx, bucketName) 655 if err != nil { 656 if errors.Is(err, uplink.ErrBucketNotFound) { 657 return fs.ErrorDirNotFound 658 } 659 660 if errors.Is(err, uplink.ErrBucketNotEmpty) { 661 return fs.ErrorDirectoryNotEmpty 662 } 663 664 return err 665 } 666 667 return nil 668 } 669 670 // newPrefix returns a new prefix for listing conforming to the libuplink 671 // requirements. In particular, libuplink requires a trailing slash for 672 // listings, but rclone does not always provide one. Further, depending on how 673 // the path was initially path normalization may have removed it (e.g. a 674 // trailing slash from the CLI is removed before it ever gets to the backend 675 // code). 676 func newPrefix(prefix string) string { 677 if prefix == "" { 678 return prefix 679 } 680 681 if prefix[len(prefix)-1] == '/' { 682 return prefix 683 } 684 685 return prefix + "/" 686 }