github.com/attic-labs/noms@v0.0.0-20210827224422-e5fa29d95e8b/samples/go/nomsfs/nomsfs.go (about) 1 // Copyright 2016 Attic Labs, Inc. All rights reserved. 2 // Licensed under the Apache License, version 2.0: 3 // http://www.apache.org/licenses/LICENSE-2.0 4 5 // +build darwin linux 6 7 package main 8 9 import ( 10 "bytes" 11 "flag" 12 "fmt" 13 "math" 14 "os" 15 "os/signal" 16 "path" 17 "runtime" 18 "strings" 19 "sync" 20 "syscall" 21 "time" 22 23 "github.com/attic-labs/noms/go/config" 24 "github.com/attic-labs/noms/go/d" 25 "github.com/attic-labs/noms/go/datas" 26 "github.com/attic-labs/noms/go/hash" 27 "github.com/attic-labs/noms/go/nomdl" 28 "github.com/attic-labs/noms/go/types" 29 30 "io" 31 32 "github.com/hanwen/go-fuse/fuse" 33 "github.com/hanwen/go-fuse/fuse/nodefs" 34 "github.com/hanwen/go-fuse/fuse/pathfs" 35 ) 36 37 // NomsFS 38 // 39 // This is an implementation of a FUSE filesystem on top of Noms. The hierarchy is arranged with the following basic types: 40 // 41 // Filesystem { 42 // root Inode 43 // } 44 // 45 // Inode { 46 // attr Attr { 47 // ctime Number 48 // gid Number 49 // mtime Number 50 // mode Number 51 // uid Number 52 // xattr Map<String, Blob> 53 // } 54 // contents File | Symlink | Directory 55 // } 56 // 57 // File { 58 // data Ref<Blob> 59 // } 60 // 61 // Symlink { 62 // targetPath String 63 // } 64 // 65 // Directory { 66 // entries Map<String, Inode> 67 // } 68 // 69 // While we don't currently support hard links, this could be achieved by storing a list of parents rather than a single parent *and* processing all metadata every time we mount a dataset to identify commonalities. Hard links are stupid anyways. 70 // XXX TODO: If the head gets out of sync it actually shouldn't be a problem to resync and try a transaction again (though we may need to redo some of the error checking that FUSE has done for us). The place where it may be problematic is around writes to open files where we rely on the in-core parent map. If we get out of sync we will need to invalidate that map in part or whole. We can try to be smart about it, but in certain circumstances (e.g. if an open file has moved) then there's not much we can do other than return fuse.EBADF (or something). We can add a valid bit to the nNode structure so that open files will be able to tell. 71 // XXX TODO: The map of nodes really only needs entries that pertain to open files and their paths. That structure should likely be refcounted; when a nNode goes to 0 it can be removed from the map. This would also help with re-syncing since fixing up the smaller map would be faster. 72 // 73 74 type nomsFile struct { 75 nodefs.File 76 77 fs *nomsFS 78 node *nNode 79 } 80 81 type nomsFS struct { 82 pathfs.FileSystem 83 84 mdLock *sync.Mutex // protect filesystem metadata 85 86 db datas.Database 87 ds datas.Dataset 88 head types.Struct 89 90 // This map lets us find the name of a file and its parent given an inode. This lets us splice changes back into the hierarchy upon modification. 91 nodes map[hash.Hash]*nNode 92 } 93 94 // This represents a node in the filesystem hierarchy. The key will match the hash for the inode unless there is cached, yet-to-be-flushed data. 95 type nNode struct { 96 nLock *sync.Mutex 97 parent *nNode 98 name string 99 key hash.Hash 100 inode types.Struct 101 } 102 103 type mount func(fs pathfs.FileSystem) 104 105 var fsType, inodeType, attrType, directoryType, fileType, symlinkType *types.Type 106 107 func init() { 108 inodeType = nomdl.MustParseType(`Struct Inode { 109 attr: Struct Attr { 110 ctime: Number, 111 gid: Number, 112 mode: Number, 113 mtime: Number, 114 uid: Number, 115 xattr: Map<String, Blob>, 116 }, 117 contents: Struct Symlink { 118 targetPath: String, 119 } | Struct File { 120 data: Ref<Blob>, 121 } | Struct Directory { 122 entries: Map<String, Cycle<Inode>>, 123 }, 124 }`) 125 126 // Root around for some useful types. 127 attrType, _ = inodeType.Desc.(types.StructDesc).Field("attr") 128 contentsType, _ := inodeType.Desc.(types.StructDesc).Field("contents") 129 for _, elemType := range contentsType.Desc.(types.CompoundDesc).ElemTypes { 130 switch elemType.Desc.(types.StructDesc).Name { 131 case "Directory": 132 directoryType = elemType 133 case "File": 134 fileType = elemType 135 case "Symlink": 136 symlinkType = elemType 137 } 138 } 139 140 fsType = types.MakeStructType("Filesystem", types.StructField{ 141 Name: "root", 142 Type: inodeType, 143 }) 144 } 145 146 func start(dataset string, mount mount) { 147 cfg := config.NewResolver() 148 db, ds, err := cfg.GetDataset(dataset) 149 150 if err != nil { 151 fmt.Fprintf(os.Stderr, "Could not create dataset: %s\n", err) 152 return 153 } 154 155 hv, ok := ds.MaybeHeadValue() 156 if ok { 157 if !types.IsValueSubtypeOf(hv, fsType) { 158 fmt.Fprintf(os.Stderr, "Invalid dataset head: expected type '%s' but found type '%s'\n", fsType.Desc.(types.StructDesc).Name, types.TypeOf(hv).Desc.(types.StructDesc).Name) 159 return 160 } 161 } else { 162 rootAttr := makeAttr(db, 0777) // create the root directory with maximally permissive permissions 163 rootDir := types.NewStruct("Directory", types.StructData{ 164 "entries": types.NewMap(db), 165 }) 166 rootInode := types.NewStruct("Inode", types.StructData{ 167 "attr": rootAttr, 168 "contents": rootDir, 169 }) 170 hv = types.NewStruct("Filesystem", types.StructData{ 171 "root": rootInode, 172 }) 173 } 174 175 mount(&nomsFS{ 176 FileSystem: pathfs.NewDefaultFileSystem(), 177 db: db, 178 ds: ds, 179 head: hv.(types.Struct), 180 mdLock: &sync.Mutex{}, 181 nodes: make(map[hash.Hash]*nNode), 182 }) 183 } 184 185 var debug bool 186 187 func main() { 188 flag.BoolVar(&debug, "d", false, "debug") 189 flag.Parse() 190 if len(flag.Args()) < 2 { 191 fmt.Fprintf(os.Stderr, "Usage: %s dataset mount_point\n", path.Base(os.Args[0])) 192 return 193 } 194 195 start(flag.Arg(0), func(fs pathfs.FileSystem) { 196 nfs := pathfs.NewPathNodeFs(fs, nil) 197 198 server, _, err := nodefs.MountRoot(flag.Arg(1), nfs.Root(), &nodefs.Options{Debug: debug}) 199 if err != nil { 200 fmt.Println("Mount failed; attempting unmount") 201 syscall.Unmount(flag.Arg(1), 0) 202 server, _, err = nodefs.MountRoot(flag.Arg(1), nfs.Root(), &nodefs.Options{Debug: debug}) 203 } 204 if err != nil { 205 fmt.Fprintf(os.Stderr, "Mount failed: %s\n", err) 206 return 207 } 208 209 sig := make(chan os.Signal, 1) 210 signal.Notify(sig, syscall.SIGINT) 211 go func() { 212 <-sig 213 fmt.Println("unmounting...") 214 server.Unmount() 215 // Ignore any subsequent ^C 216 signal.Reset(syscall.SIGINT) 217 }() 218 219 fmt.Println("running...") 220 server.Serve() 221 fmt.Println("done.") 222 }) 223 } 224 225 func (fs *nomsFS) StatFs(path string) *fuse.StatfsOut { 226 // We'll pretend this is a 4PB device that could hold a billion files, a truthful hyperbole. 227 return &fuse.StatfsOut{ 228 Bsize: 4096, 229 Blocks: 1 << 40, 230 Bfree: 1 << 40, 231 Bavail: 1 << 40, 232 Files: 1 << 30, 233 Ffree: 1 << 30, 234 } 235 } 236 237 func (fs *nomsFS) OpenDir(path string, context *fuse.Context) ([]fuse.DirEntry, fuse.Status) { 238 fs.mdLock.Lock() 239 defer fs.mdLock.Unlock() 240 np, code := fs.getPath(path) 241 if code != fuse.OK { 242 return nil, code 243 } 244 245 inode := np.inode 246 247 if nodeType(inode) != "Directory" { 248 return nil, fuse.ENOTDIR 249 } 250 251 entries := inode.Get("contents").(types.Struct).Get("entries").(types.Map) 252 253 c := make([]fuse.DirEntry, 0, entries.Len()) 254 255 entries.IterAll(func(k, v types.Value) { 256 c = append(c, fuse.DirEntry{ 257 Name: string(k.(types.String)), 258 }) 259 }) 260 261 return c, fuse.OK 262 } 263 264 func (fs *nomsFS) Open(path string, flags uint32, context *fuse.Context) (nodefs.File, fuse.Status) { 265 fs.mdLock.Lock() 266 defer fs.mdLock.Unlock() 267 np, code := fs.getPath(path) 268 if code != fuse.OK { 269 return nil, code 270 } 271 272 nfile := nomsFile{ 273 File: nodefs.NewDefaultFile(), 274 275 fs: fs, 276 node: np, 277 } 278 279 return nfile, fuse.OK 280 } 281 282 func (fs *nomsFS) Truncate(path string, size uint64, context *fuse.Context) fuse.Status { 283 fs.mdLock.Lock() 284 defer fs.mdLock.Unlock() 285 np, code := fs.getPath(path) 286 if code != fuse.OK { 287 return code 288 } 289 290 np.nLock.Lock() 291 defer np.nLock.Unlock() 292 293 inode := np.inode 294 attr := inode.Get("attr").(types.Struct) 295 file := inode.Get("contents").(types.Struct) 296 ref := file.Get("data").(types.Ref) 297 blob := ref.TargetValue(fs.db).(types.Blob) 298 299 blob = blob.Edit().Splice(size, blob.Len()-size, nil).Blob() 300 ref = fs.db.WriteValue(blob) 301 file = file.Set("data", ref) 302 303 inode = inode.Set("contents", file).Set("attr", updateMtime(attr)) 304 fs.updateNode(np, inode) 305 fs.splice(np) 306 fs.commit() 307 308 return fuse.OK 309 } 310 311 func (fs *nomsFS) Create(path string, flags uint32, mode uint32, context *fuse.Context) (nodefs.File, fuse.Status) { 312 fs.mdLock.Lock() 313 defer fs.mdLock.Unlock() 314 np, code := fs.createCommon(path, mode, func() types.Value { 315 blob := types.NewEmptyBlob(fs.db) 316 return types.NewStruct("File", types.StructData{ 317 "data": fs.ds.Database().WriteValue(blob), 318 }) 319 }) 320 if code != fuse.OK { 321 return nil, code 322 } 323 324 nfile := nomsFile{ 325 File: nodefs.NewDefaultFile(), 326 327 fs: fs, 328 node: np, 329 } 330 return nfile, fuse.OK 331 } 332 333 func (fs *nomsFS) Mkdir(path string, mode uint32, context *fuse.Context) fuse.Status { 334 fs.mdLock.Lock() 335 defer fs.mdLock.Unlock() 336 _, code := fs.createCommon(path, mode, func() types.Value { 337 return types.NewStruct("Directory", types.StructData{ 338 "entries": types.NewMap(fs.db), 339 }) 340 }) 341 342 return code 343 } 344 345 func (fs *nomsFS) Symlink(targetPath string, path string, context *fuse.Context) fuse.Status { 346 fs.mdLock.Lock() 347 defer fs.mdLock.Unlock() 348 _, code := fs.createCommon(path, 0755, func() types.Value { 349 return types.NewStruct("Symlink", types.StructData{ 350 "targetPath": types.String(targetPath), 351 }) 352 }) 353 354 return code 355 } 356 357 func (fs *nomsFS) createCommon(path string, mode uint32, createContents func() types.Value) (*nNode, fuse.Status) { 358 components := strings.Split(path, "/") 359 360 fname := components[len(components)-1] 361 components = components[:len(components)-1] 362 363 // Grab the spot in the hierarchy where the new node will go. 364 parent, code := fs.getPathComponents(components) 365 if code != fuse.OK { 366 return nil, code 367 } 368 369 if nodeType(parent.inode) != "Directory" { 370 return nil, fuse.ENOTDIR 371 } 372 373 // Create the new node. 374 inode := types.NewStruct("Inode", types.StructData{ 375 "attr": makeAttr(fs.db, mode), 376 "contents": createContents(), 377 }) 378 379 np := fs.getNode(inode, fname, parent) 380 381 // Insert the new node into the hierarchy. 382 fs.splice(np) 383 fs.commit() 384 385 return np, fuse.OK 386 } 387 388 func (fs *nomsFS) Readlink(path string, context *fuse.Context) (string, fuse.Status) { 389 fs.mdLock.Lock() 390 defer fs.mdLock.Unlock() 391 np, code := fs.getPath(path) 392 if code != fuse.OK { 393 return "", code 394 } 395 396 inode := np.inode 397 d.Chk.Equal(nodeType(inode), "Symlink") 398 link := inode.Get("contents") 399 400 return string(link.(types.Struct).Get("targetPath").(types.String)), fuse.OK 401 } 402 403 func (fs *nomsFS) Unlink(path string, context *fuse.Context) fuse.Status { 404 405 // Since we don't support hard links we don't need to worry about checking the link count. 406 407 return fs.removeCommon(path, func(inode types.Value) { 408 d.Chk.NotEqual(nodeType(inode), "Directory") 409 }) 410 } 411 412 func (fs *nomsFS) Rmdir(path string, context *fuse.Context) (code fuse.Status) { 413 return fs.removeCommon(path, func(inode types.Value) { 414 d.Chk.Equal(nodeType(inode), "Directory") 415 }) 416 } 417 418 func (fs *nomsFS) removeCommon(path string, typeCheck func(inode types.Value)) fuse.Status { 419 fs.mdLock.Lock() 420 defer fs.mdLock.Unlock() 421 np, code := fs.getPath(path) 422 if code != fuse.OK { 423 return code 424 } 425 426 typeCheck(np.inode) 427 428 parent := np.parent 429 430 dir := parent.inode.Get("contents").(types.Struct) 431 entries := dir.Get("entries").(types.Map) 432 433 entries = entries.Edit().Remove(types.String(np.name)).Map() 434 dir = dir.Set("entries", entries) 435 436 fs.deleteNode(np) 437 438 fs.updateNode(parent, parent.inode.Set("contents", dir)) 439 fs.splice(parent) 440 fs.commit() 441 442 return fuse.OK 443 } 444 445 func (nfile nomsFile) Read(dest []byte, off int64) (fuse.ReadResult, fuse.Status) { 446 nfile.node.nLock.Lock() 447 defer nfile.node.nLock.Unlock() 448 449 file := nfile.node.inode.Get("contents") 450 451 d.Chk.Equal(nodeType(nfile.node.inode), "File") 452 453 ref := file.(types.Struct).Get("data").(types.Ref) 454 blob := ref.TargetValue(nfile.fs.db).(types.Blob) 455 456 n, err := blob.ReadAt(dest, off) 457 if err == nil || err == io.EOF && uint64(off)+uint64(n) == blob.Len() { 458 // Blob.ReadAt returns IOF if the end of the buffer is reached with this read 459 return fuse.ReadResultData(dest[:n]), fuse.OK 460 461 } 462 463 return fuse.ReadResultData(dest[:n]), fuse.EIO 464 } 465 466 func (nfile nomsFile) Write(data []byte, off int64) (uint32, fuse.Status) { 467 nfile.node.nLock.Lock() 468 defer nfile.node.nLock.Unlock() 469 470 inode := nfile.node.inode 471 d.Chk.Equal(nodeType(inode), "File") 472 473 attr := inode.Get("attr").(types.Struct) 474 file := inode.Get("contents").(types.Struct) 475 ref := file.Get("data").(types.Ref) 476 blob := ref.TargetValue(nfile.fs.db).(types.Blob) 477 478 ll := uint64(blob.Len()) 479 oo := uint64(off) 480 d.PanicIfFalse(ll >= oo) 481 del := uint64(len(data)) 482 if ll-oo < del { 483 del = ll - oo 484 } 485 486 blob = blob.Edit().Splice(uint64(off), del, data).Blob() 487 ref = nfile.fs.db.WriteValue(blob) 488 file = file.Set("data", ref) 489 490 nfile.fs.bufferNode(nfile.node, inode.Set("contents", file).Set("attr", updateMtime(attr))) 491 492 return uint32(len(data)), fuse.OK 493 } 494 495 func (nfile nomsFile) Flush() fuse.Status { 496 nfile.fs.mdLock.Lock() 497 nfile.node.nLock.Lock() 498 defer nfile.fs.mdLock.Unlock() 499 defer nfile.node.nLock.Unlock() 500 501 np := nfile.fs.nodes[nfile.node.key] 502 if np == nfile.node { 503 nfile.fs.commitNode(nfile.node) 504 nfile.fs.splice(nfile.node) 505 nfile.fs.commit() 506 } 507 508 return fuse.OK 509 } 510 511 func makeAttr(vrw types.ValueReadWriter, mode uint32) types.Struct { 512 now := time.Now() 513 ctime := types.Number(float64(now.Unix()) + float64(now.Nanosecond())/1000000000) 514 mtime := ctime 515 516 user := fuse.CurrentOwner() 517 gid := types.Number(float64(user.Gid)) 518 uid := types.Number(float64(user.Uid)) 519 520 return types.NewStruct("Attr", types.StructData{ 521 "ctime": ctime, 522 "gid": gid, 523 "mode": types.Number(mode), 524 "mtime": mtime, 525 "uid": uid, 526 "xattr": types.NewMap(vrw), 527 }) 528 } 529 530 func updateMtime(attr types.Struct) types.Struct { 531 now := time.Now() 532 mtime := types.Number(float64(now.Unix()) + float64(now.Nanosecond())/1000000000) 533 534 return attr.Set("mtime", mtime) 535 } 536 537 func nodeType(inode types.Value) string { 538 return types.TypeOf(inode.(types.Struct).Get("contents")).Desc.(types.StructDesc).Name 539 } 540 541 func (fs *nomsFS) getNode(inode types.Struct, name string, parent *nNode) *nNode { 542 // The parent has to be a directory. 543 if parent != nil { 544 d.Chk.Equal("Directory", nodeType(parent.inode)) 545 } 546 547 np, ok := fs.nodes[inode.Hash()] 548 if ok { 549 d.Chk.Equal(np.parent, parent) 550 d.Chk.Equal(np.name, name) 551 } else { 552 np = &nNode{ 553 nLock: &sync.Mutex{}, 554 parent: parent, 555 name: name, 556 key: inode.Hash(), 557 inode: inode, 558 } 559 fs.nodes[np.key] = np 560 } 561 return np 562 } 563 564 func (fs *nomsFS) updateNode(np *nNode, inode types.Struct) { 565 delete(fs.nodes, np.key) 566 np.inode = inode 567 np.key = inode.Hash() 568 fs.nodes[np.key] = np 569 } 570 571 func (fs *nomsFS) bufferNode(np *nNode, inode types.Struct) { 572 np.inode = inode 573 } 574 575 func (fs *nomsFS) commitNode(np *nNode) { 576 fs.updateNode(np, np.inode) 577 } 578 579 func (fs *nomsFS) deleteNode(np *nNode) { 580 delete(fs.nodes, np.inode.Hash()) 581 } 582 583 // Rewrite the hierarchy starting frpm np and walking back to the root. 584 func (fs *nomsFS) splice(np *nNode) { 585 for np.parent != nil { 586 dir := np.parent.inode.Get("contents").(types.Struct) 587 entries := dir.Get("entries").(types.Map) 588 589 entries = entries.Edit().Set(types.String(np.name), np.inode).Map() 590 dir = dir.Set("entries", entries) 591 592 fs.updateNode(np.parent, np.parent.inode.Set("contents", dir)) 593 594 np = np.parent 595 } 596 597 fs.head = fs.head.Set("root", np.inode) 598 } 599 600 func (fs *nomsFS) commit() { 601 ds, ee := fs.db.CommitValue(fs.ds, fs.head) 602 if ee != nil { 603 panic("Unexpected changes to dataset (is it mounted in multiple locations?") 604 } 605 fs.ds = ds 606 } 607 608 func (fs *nomsFS) getPath(path string) (*nNode, fuse.Status) { 609 if path == "" { 610 return fs.getPathComponents([]string{}) 611 } 612 return fs.getPathComponents(strings.Split(path, "/")) 613 } 614 615 func (fs *nomsFS) getPathComponents(components []string) (*nNode, fuse.Status) { 616 inode := fs.head.Get("root").(types.Struct) 617 np := fs.getNode(inode, "", nil) 618 619 for _, component := range components { 620 d.Chk.NotEqual(component, "") 621 622 contents := inode.Get("contents") 623 if types.TypeOf(contents).Desc.(types.StructDesc).Name != "Directory" { 624 return nil, fuse.ENOTDIR 625 } 626 627 v, ok := contents.(types.Struct).Get("entries").(types.Map). 628 MaybeGet(types.String(component)) 629 if !ok { 630 return nil, fuse.ENOENT 631 } 632 inode = v.(types.Struct) 633 np = fs.getNode(inode, component, np) 634 } 635 636 return np, fuse.OK 637 } 638 639 func (fs *nomsFS) Rename(oldPath string, newPath string, context *fuse.Context) fuse.Status { 640 fs.mdLock.Lock() 641 defer fs.mdLock.Unlock() 642 // We find the node, new parent, and node representing the shared point in the hierarchy in order to then minimize repeated work when splicing the hierarchy back together below. 643 np, nparent, nshared, fname, code := fs.getPaths(oldPath, newPath) 644 if code != fuse.OK { 645 return code 646 } 647 648 // Remove the node from the old spot in the hierarchy. 649 oparent := np.parent 650 651 dir := oparent.inode.Get("contents").(types.Struct) 652 entries := dir.Get("entries").(types.Map) 653 654 entries = entries.Edit().Remove(types.String(np.name)).Map() 655 dir = dir.Set("entries", entries) 656 657 fs.updateNode(oparent, oparent.inode.Set("contents", dir)) 658 659 // Insert it into the new spot in the hierarchy 660 np.parent = nparent 661 np.name = fname 662 fs.splices(oparent, np, nshared) 663 664 fs.commit() 665 666 return fuse.OK 667 } 668 669 func (fs *nomsFS) getPaths(oldPath string, newPath string) (oldNode *nNode, newParent *nNode, sharedNode *nNode, newName string, code fuse.Status) { 670 ocomp := strings.Split(oldPath, "/") 671 ncomp := strings.Split(newPath, "/") 672 newName = ncomp[len(ncomp)-1] 673 ncomp = ncomp[:len(ncomp)-1] 674 675 inode := fs.head.Get("root").(types.Struct) 676 sharedNode = fs.getNode(inode, "", nil) 677 678 var i int 679 var component string 680 for i, component = range ocomp { 681 if i >= len(ncomp) || component != ncomp[i] { 682 break 683 } 684 685 contents := inode.Get("contents") 686 if types.TypeOf(contents).Desc.(types.StructDesc).Name != "Directory" { 687 return nil, nil, nil, "", fuse.ENOTDIR 688 } 689 690 v, ok := contents.(types.Struct).Get("entries").(types.Map). 691 MaybeGet(types.String(component)) 692 if !ok { 693 return nil, nil, nil, "", fuse.ENOENT 694 } 695 696 inode = v.(types.Struct) 697 sharedNode = fs.getNode(inode, component, sharedNode) 698 } 699 700 pinode := inode 701 oldNode = sharedNode 702 for _, component := range ocomp[i:] { 703 contents := inode.Get("contents") 704 if types.TypeOf(contents).Desc.(types.StructDesc).Name != "Directory" { 705 return nil, nil, nil, "", fuse.ENOTDIR 706 } 707 708 v, ok := contents.(types.Struct).Get("entries").(types.Map). 709 MaybeGet(types.String(component)) 710 if !ok { 711 return nil, nil, nil, "", fuse.ENOENT 712 } 713 714 inode = v.(types.Struct) 715 oldNode = fs.getNode(inode, component, oldNode) 716 } 717 718 inode = pinode 719 newParent = sharedNode 720 for _, component := range ncomp[i:] { 721 contents := inode.Get("contents") 722 // TODO: Expose name on struct value 723 if types.TypeOf(contents).Desc.(types.StructDesc).Name != "Directory" { 724 return nil, nil, nil, "", fuse.ENOTDIR 725 } 726 727 v, ok := contents.(types.Struct).Get("entries").(types.Map). 728 MaybeGet(types.String(component)) 729 if !ok { 730 return nil, nil, nil, "", fuse.ENOENT 731 } 732 733 inode = v.(types.Struct) 734 newParent = fs.getNode(inode, component, newParent) 735 } 736 737 code = fuse.OK 738 return 739 } 740 741 func (fs *nomsFS) splices(np1, np2, npShared *nNode) { 742 // Splice each until we get to the shared parent directory. 743 for _, np := range []*nNode{np1, np2} { 744 for np != npShared { 745 dir := np.parent.inode.Get("contents").(types.Struct) 746 entries := dir.Get("entries").(types.Map) 747 748 entries = entries.Edit().Set(types.String(np.name), np.inode).Map() 749 dir = dir.Set("entries", entries) 750 751 fs.updateNode(np.parent, np.parent.inode.Set("contents", dir)) 752 753 np = np.parent 754 } 755 } 756 757 // Splice the shared parent. 758 fs.splice(npShared) 759 } 760 761 func (fs *nomsFS) GetAttr(path string, context *fuse.Context) (*fuse.Attr, fuse.Status) { 762 fs.mdLock.Lock() 763 defer fs.mdLock.Unlock() 764 np, code := fs.getPath(path) 765 if code != fuse.OK { 766 return nil, code 767 } 768 769 inode := np.inode 770 attr := inode.Get("attr").(types.Struct) 771 contents := inode.Get("contents").(types.Struct) 772 773 mode := uint32(float64(attr.Get("mode").(types.Number))) 774 ctime := float64(attr.Get("ctime").(types.Number)) 775 gid := float64(attr.Get("gid").(types.Number)) 776 mtime := float64(attr.Get("mtime").(types.Number)) 777 uid := float64(attr.Get("uid").(types.Number)) 778 779 at := &fuse.Attr{ 780 Mode: mode, 781 Mtime: uint64(mtime), 782 Mtimensec: uint32(math.Floor(mtime) * 1000000000), 783 Ctime: uint64(ctime), 784 Ctimensec: uint32(math.Floor(ctime) * 1000000000), 785 } 786 787 at.Owner.Gid = uint32(gid) 788 at.Owner.Uid = uint32(uid) 789 790 switch types.TypeOf(contents).Desc.(types.StructDesc).Name { 791 case "File": 792 blob := contents.Get("data").(types.Ref).TargetValue(fs.db).(types.Blob) 793 at.Mode |= fuse.S_IFREG 794 at.Size = blob.Len() 795 case "Directory": 796 at.Mode |= fuse.S_IFDIR 797 at.Size = contents.Get("entries").(types.Map).Len() 798 case "Symlink": 799 at.Mode |= fuse.S_IFLNK 800 } 801 802 return at, fuse.OK 803 } 804 805 func (fs *nomsFS) Chown(path string, uid uint32, gid uint32, context *fuse.Context) fuse.Status { 806 return fs.setAttr(path, func(attr types.Struct) types.Struct { 807 return attr.Set("uid", types.Number(uid)).Set("gid", types.Number(gid)) 808 }) 809 } 810 811 func (fs *nomsFS) Utimens(path string, atime *time.Time, mtime *time.Time, context *fuse.Context) fuse.Status { 812 if mtime == nil { 813 return fuse.OK 814 } 815 return fs.setAttr(path, func(attr types.Struct) types.Struct { 816 return attr.Set("mtime", types.Number(float64(mtime.Unix())+float64(mtime.Nanosecond())/1000000000)) 817 }) 818 } 819 820 func (fs *nomsFS) Chmod(path string, mode uint32, context *fuse.Context) fuse.Status { 821 return fs.setAttr(path, func(attr types.Struct) types.Struct { 822 return attr.Set("mode", types.Number(mode)) 823 }) 824 } 825 826 func (fs *nomsFS) setAttr(path string, updateAttr func(attr types.Struct) types.Struct) fuse.Status { 827 fs.mdLock.Lock() 828 defer fs.mdLock.Unlock() 829 np, code := fs.getPath(path) 830 if code != fuse.OK { 831 return code 832 } 833 834 inode := np.inode 835 attr := inode.Get("attr").(types.Struct) 836 attr = updateAttr(attr) 837 inode = inode.Set("attr", attr) 838 839 fs.updateNode(np, inode) 840 fs.splice(np) 841 fs.commit() 842 843 return fuse.OK 844 } 845 846 func (fs *nomsFS) GetXAttr(path string, attribute string, context *fuse.Context) ([]byte, fuse.Status) { 847 fs.mdLock.Lock() 848 defer fs.mdLock.Unlock() 849 np, code := fs.getPath(path) 850 if code != fuse.OK { 851 return nil, code 852 } 853 854 xattr := np.inode.Get("attr").(types.Struct).Get("xattr").(types.Map) 855 856 v, found := xattr.MaybeGet(types.String(attribute)) 857 if !found { 858 if runtime.GOOS == "darwin" { 859 return nil, fuse.Status(93) // syscall.ENOATTR 860 } 861 return nil, fuse.ENODATA 862 } 863 864 blob := v.(types.Blob) 865 data := make([]byte, blob.Len()) 866 blob.ReadAt(data, 0) 867 868 return data, fuse.OK 869 } 870 871 func (fs *nomsFS) ListXAttr(path string, context *fuse.Context) ([]string, fuse.Status) { 872 fs.mdLock.Lock() 873 defer fs.mdLock.Unlock() 874 np, code := fs.getPath(path) 875 if code != fuse.OK { 876 return nil, code 877 } 878 879 xattr := np.inode.Get("attr").(types.Struct).Get("xattr").(types.Map) 880 881 keys := make([]string, 0, xattr.Len()) 882 xattr.IterAll(func(key, value types.Value) { 883 keys = append(keys, string(key.(types.String))) 884 }) 885 886 return keys, fuse.OK 887 } 888 889 func (fs *nomsFS) RemoveXAttr(path string, key string, context *fuse.Context) fuse.Status { 890 fs.mdLock.Lock() 891 defer fs.mdLock.Unlock() 892 np, code := fs.getPath(path) 893 if code != fuse.OK { 894 return code 895 } 896 897 inode := np.inode 898 attr := np.inode.Get("attr").(types.Struct) 899 xattr := attr.Get("xattr").(types.Map) 900 901 xattr = xattr.Edit().Remove(types.String(key)).Map() 902 attr = attr.Set("xattr", xattr) 903 inode = inode.Set("attr", attr) 904 905 fs.updateNode(np, inode) 906 fs.splice(np) 907 fs.commit() 908 909 return fuse.OK 910 } 911 912 func (fs *nomsFS) SetXAttr(path string, key string, data []byte, flags int, context *fuse.Context) fuse.Status { 913 fs.mdLock.Lock() 914 defer fs.mdLock.Unlock() 915 np, code := fs.getPath(path) 916 if code != fuse.OK { 917 return code 918 } 919 920 inode := np.inode 921 attr := np.inode.Get("attr").(types.Struct) 922 xattr := attr.Get("xattr").(types.Map) 923 blob := types.NewBlob(fs.db, bytes.NewReader(data)) 924 925 xattr = xattr.Edit().Set(types.String(key), blob).Map() 926 attr = attr.Set("xattr", xattr) 927 inode = inode.Set("attr", attr) 928 929 fs.updateNode(np, inode) 930 fs.splice(np) 931 fs.commit() 932 933 return fuse.OK 934 }