github.com/swiftstack/ProxyFS@v0.0.0-20210203235616-4017c267d62f/pfsagentd/fission.go (about) 1 // Copyright (c) 2015-2021, NVIDIA CORPORATION. 2 // SPDX-License-Identifier: Apache-2.0 3 4 package main 5 6 import ( 7 "container/list" 8 "fmt" 9 "log" 10 "os" 11 "sync/atomic" 12 "syscall" 13 "time" 14 15 "github.com/swiftstack/fission" 16 "github.com/swiftstack/sortedmap" 17 18 "github.com/swiftstack/ProxyFS/fs" 19 "github.com/swiftstack/ProxyFS/inode" 20 "github.com/swiftstack/ProxyFS/jrpcfs" 21 ) 22 23 const ( 24 initOutFlagsMaskReadDirPlusDisabled = uint32(0) | 25 fission.InitFlagsAsyncRead | 26 fission.InitFlagsFileOps | 27 fission.InitFlagsAtomicOTrunc | 28 fission.InitFlagsBigWrites | 29 fission.InitFlagsAutoInvalData | 30 fission.InitFlagsParallelDirops | 31 fission.InitFlagsMaxPages | 32 fission.InitFlagsExplicitInvalData 33 34 initOutFlagsMaskReadDirPlusEnabled = initOutFlagsMaskReadDirPlusDisabled | 35 fission.InitFlagsDoReadDirPlus | 36 fission.InitFlagsReaddirplusAuto 37 38 pfsagentFuseSubtype = "PFSAgent" 39 ) 40 41 func performMountFUSE() { 42 var ( 43 err error 44 ) 45 46 globals.fissionVolume = fission.NewVolume( 47 globals.config.FUSEVolumeName, // volumeName string 48 globals.config.FUSEMountPointPath, // mountpointDirPath string 49 pfsagentFuseSubtype, // fuseSubtype string 50 globals.config.FUSEMaxWrite, // initOutMaxWrite uint32 51 &globals, // callbacks fission.Callbacks 52 newLogger(), // logger *log.Logger 53 globals.fissionErrChan) // errChan chan error 54 55 err = globals.fissionVolume.DoMount() 56 if nil != err { 57 log.Fatalf("fissionVolume.DoMount() failed: %v", err) 58 } 59 } 60 61 func performUnmountFUSE() { 62 var ( 63 err error 64 ) 65 66 err = globals.fissionVolume.DoUnmount() 67 if nil != err { 68 log.Fatalf("fissionVolume.DoUnmount() failed: %v", err) 69 } 70 } 71 72 func convertErrToErrno(err error, defaultErrno syscall.Errno) (errno syscall.Errno) { 73 var ( 74 convertErr error 75 possibleErrno syscall.Errno 76 ) 77 78 _, convertErr = fmt.Sscanf(err.Error(), "errno: %v", &possibleErrno) 79 if nil == convertErr { 80 errno = possibleErrno 81 } else { 82 errno = defaultErrno 83 } 84 85 return 86 } 87 88 func fixAttrSizes(attr *fission.Attr) { 89 if syscall.S_IFREG == (attr.Mode & syscall.S_IFMT) { 90 attr.Blocks = attr.Size + (globals.config.AttrBlockSize - 1) 91 attr.Blocks /= globals.config.AttrBlockSize 92 attr.BlkSize = uint32(globals.config.AttrBlockSize) 93 } else { 94 attr.Size = 0 95 attr.Blocks = 0 96 attr.BlkSize = 0 97 } 98 } 99 100 func nsToUnixTime(ns uint64) (sec uint64, nsec uint32) { 101 sec = ns / 1e9 102 nsec = uint32(ns - (sec * 1e9)) 103 return 104 } 105 106 func unixTimeToNs(sec uint64, nsec uint32) (ns uint64) { 107 ns = (sec * 1e9) + uint64(nsec) 108 return 109 } 110 111 func (fileInode *fileInodeStruct) ensureCachedStatPopulatedWhileLocked() (err error) { 112 var ( 113 getStatReply *jrpcfs.StatStruct 114 getStatRequest *jrpcfs.GetStatRequest 115 ) 116 117 if nil == fileInode.cachedStat { 118 getStatRequest = &jrpcfs.GetStatRequest{ 119 InodeHandle: jrpcfs.InodeHandle{ 120 MountID: globals.mountID, 121 InodeNumber: int64(fileInode.InodeNumber), 122 }, 123 } 124 125 getStatReply = &jrpcfs.StatStruct{} 126 127 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 128 if nil != err { 129 return 130 } 131 132 fileInode.cachedStat = getStatReply 133 } 134 135 err = nil 136 return 137 } 138 139 func (dummy *globalsStruct) DoLookup(inHeader *fission.InHeader, lookupIn *fission.LookupIn) (lookupOut *fission.LookupOut, errno syscall.Errno) { 140 var ( 141 aTimeNSec uint32 142 aTimeSec uint64 143 cTimeNSec uint32 144 cTimeSec uint64 145 err error 146 fileInode *fileInodeStruct 147 lookupPlusReply *jrpcfs.LookupPlusReply 148 lookupPlusRequest *jrpcfs.LookupPlusRequest 149 mTimeNSec uint32 150 mTimeSec uint64 151 ) 152 153 _ = atomic.AddUint64(&globals.metrics.FUSE_DoLookup_calls, 1) 154 155 lookupPlusRequest = &jrpcfs.LookupPlusRequest{ 156 InodeHandle: jrpcfs.InodeHandle{ 157 MountID: globals.mountID, 158 InodeNumber: int64(inHeader.NodeID), 159 }, 160 Basename: string(lookupIn.Name[:]), 161 } 162 163 lookupPlusReply = &jrpcfs.LookupPlusReply{} 164 165 err = globals.retryRPCClient.Send("RpcLookupPlus", lookupPlusRequest, lookupPlusReply) 166 if nil != err { 167 errno = convertErrToErrno(err, syscall.ENOENT) 168 return 169 } 170 171 fileInode = lockInodeIfExclusiveLeaseGranted(inode.InodeNumber(lookupPlusReply.InodeNumber)) 172 if nil != fileInode { 173 if nil == fileInode.cachedStat { 174 // Might as well cache the Stat portion of lookupPlusReply in fileInode.cachedStat 175 // 176 // Note: This is a convenient, and unlikely, optimization that won't be done in 177 // the SharedLease case... but that is also an unlikely case. 178 179 fileInode.cachedStat = &jrpcfs.StatStruct{ 180 CTimeNs: lookupPlusReply.CTimeNs, 181 CRTimeNs: lookupPlusReply.CRTimeNs, 182 MTimeNs: lookupPlusReply.MTimeNs, 183 ATimeNs: lookupPlusReply.ATimeNs, 184 Size: lookupPlusReply.Size, 185 NumLinks: lookupPlusReply.NumLinks, 186 StatInodeNumber: lookupPlusReply.StatInodeNumber, 187 FileMode: lookupPlusReply.FileMode, 188 UserID: lookupPlusReply.UserID, 189 GroupID: lookupPlusReply.GroupID, 190 } 191 } else { 192 // Update lookupPlusReply from fileInode.cachedStat 193 194 lookupPlusReply.CTimeNs = fileInode.cachedStat.CTimeNs 195 lookupPlusReply.CRTimeNs = fileInode.cachedStat.CRTimeNs 196 lookupPlusReply.MTimeNs = fileInode.cachedStat.MTimeNs 197 lookupPlusReply.ATimeNs = fileInode.cachedStat.ATimeNs 198 lookupPlusReply.Size = fileInode.cachedStat.Size 199 lookupPlusReply.NumLinks = fileInode.cachedStat.NumLinks 200 lookupPlusReply.StatInodeNumber = fileInode.cachedStat.StatInodeNumber 201 lookupPlusReply.FileMode = fileInode.cachedStat.FileMode 202 lookupPlusReply.UserID = fileInode.cachedStat.UserID 203 lookupPlusReply.GroupID = fileInode.cachedStat.GroupID 204 } 205 206 fileInode.unlock(false) 207 } 208 209 aTimeSec, aTimeNSec = nsToUnixTime(lookupPlusReply.ATimeNs) 210 mTimeSec, mTimeNSec = nsToUnixTime(lookupPlusReply.MTimeNs) 211 cTimeSec, cTimeNSec = nsToUnixTime(lookupPlusReply.CTimeNs) 212 213 lookupOut = &fission.LookupOut{ 214 EntryOut: fission.EntryOut{ 215 NodeID: uint64(lookupPlusReply.InodeNumber), 216 Generation: 0, 217 EntryValidSec: globals.entryValidSec, 218 AttrValidSec: globals.attrValidSec, 219 EntryValidNSec: globals.entryValidNSec, 220 AttrValidNSec: globals.attrValidNSec, 221 Attr: fission.Attr{ 222 Ino: uint64(lookupPlusReply.InodeNumber), 223 Size: lookupPlusReply.Size, 224 Blocks: 0, // fixAttrSizes() will compute this 225 ATimeSec: aTimeSec, 226 MTimeSec: mTimeSec, 227 CTimeSec: cTimeSec, 228 ATimeNSec: aTimeNSec, 229 MTimeNSec: mTimeNSec, 230 CTimeNSec: cTimeNSec, 231 Mode: lookupPlusReply.FileMode, 232 NLink: uint32(lookupPlusReply.NumLinks), 233 UID: lookupPlusReply.UserID, 234 GID: lookupPlusReply.GroupID, 235 RDev: 0, 236 BlkSize: 0, // fixAttrSizes() will set this 237 Padding: 0, 238 }, 239 }, 240 } 241 242 fixAttrSizes(&lookupOut.EntryOut.Attr) 243 244 errno = 0 245 return 246 } 247 248 func (dummy *globalsStruct) DoForget(inHeader *fission.InHeader, forgetIn *fission.ForgetIn) { 249 _ = atomic.AddUint64(&globals.metrics.FUSE_DoForget_calls, 1) 250 return 251 } 252 253 func (dummy *globalsStruct) DoGetAttr(inHeader *fission.InHeader, getAttrIn *fission.GetAttrIn) (getAttrOut *fission.GetAttrOut, errno syscall.Errno) { 254 var ( 255 aTimeNSec uint32 256 aTimeSec uint64 257 cTimeNSec uint32 258 cTimeSec uint64 259 err error 260 fileInode *fileInodeStruct 261 getStatReply *jrpcfs.StatStruct 262 getStatRequest *jrpcfs.GetStatRequest 263 mTimeNSec uint32 264 mTimeSec uint64 265 ) 266 267 _ = atomic.AddUint64(&globals.metrics.FUSE_DoGetAttr_calls, 1) 268 269 fileInode = lockInodeWithSharedLease(inode.InodeNumber(inHeader.NodeID)) 270 271 if nil == fileInode.cachedStat { 272 getStatRequest = &jrpcfs.GetStatRequest{ 273 InodeHandle: jrpcfs.InodeHandle{ 274 MountID: globals.mountID, 275 InodeNumber: int64(inHeader.NodeID), 276 }, 277 } 278 279 getStatReply = &jrpcfs.StatStruct{} 280 281 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 282 if nil != err { 283 fileInode.unlock(true) 284 errno = convertErrToErrno(err, syscall.EIO) 285 return 286 } 287 288 fileInode.cachedStat = getStatReply 289 } 290 291 aTimeSec, aTimeNSec = nsToUnixTime(fileInode.cachedStat.ATimeNs) 292 mTimeSec, mTimeNSec = nsToUnixTime(fileInode.cachedStat.MTimeNs) 293 cTimeSec, cTimeNSec = nsToUnixTime(fileInode.cachedStat.CTimeNs) 294 295 getAttrOut = &fission.GetAttrOut{ 296 AttrValidSec: globals.attrValidSec, 297 AttrValidNSec: globals.attrValidNSec, 298 Dummy: 0, 299 Attr: fission.Attr{ 300 Ino: inHeader.NodeID, 301 Size: fileInode.cachedStat.Size, 302 Blocks: 0, // fixAttrSizes() will compute this 303 ATimeSec: aTimeSec, 304 MTimeSec: mTimeSec, 305 CTimeSec: cTimeSec, 306 ATimeNSec: aTimeNSec, 307 MTimeNSec: mTimeNSec, 308 CTimeNSec: cTimeNSec, 309 Mode: fileInode.cachedStat.FileMode, 310 NLink: uint32(fileInode.cachedStat.NumLinks), 311 UID: fileInode.cachedStat.UserID, 312 GID: fileInode.cachedStat.GroupID, 313 RDev: 0, 314 BlkSize: 0, // fixAttrSizes() will set this 315 Padding: 0, 316 }, 317 } 318 319 fileInode.unlock(false) 320 321 fixAttrSizes(&getAttrOut.Attr) 322 323 errno = 0 324 return 325 } 326 327 func setMode(nodeID uint64, mode uint32) (errno syscall.Errno) { 328 var ( 329 chmodReply *jrpcfs.Reply 330 chmodRequest *jrpcfs.ChmodRequest 331 err error 332 ) 333 334 chmodRequest = &jrpcfs.ChmodRequest{ 335 InodeHandle: jrpcfs.InodeHandle{ 336 MountID: globals.mountID, 337 InodeNumber: int64(nodeID), 338 }, 339 FileMode: mode & uint32(os.ModePerm), 340 } 341 342 chmodReply = &jrpcfs.Reply{} 343 344 err = globals.retryRPCClient.Send("RpcChmod", chmodRequest, chmodReply) 345 if nil != err { 346 errno = convertErrToErrno(err, syscall.EIO) 347 return 348 } 349 350 errno = 0 351 return 352 } 353 354 func setUIDAndOrGID(nodeID uint64, settingUID bool, uid uint32, settingGID bool, gid uint32) (errno syscall.Errno) { 355 var ( 356 chownReply *jrpcfs.Reply 357 chownRequest *jrpcfs.ChownRequest 358 err error 359 ) 360 361 chownRequest = &jrpcfs.ChownRequest{ 362 InodeHandle: jrpcfs.InodeHandle{ 363 MountID: globals.mountID, 364 InodeNumber: int64(nodeID), 365 }, 366 } 367 368 if settingUID { 369 chownRequest.UserID = int32(uid) 370 } else { 371 chownRequest.UserID = -1 372 } 373 374 if settingGID { 375 chownRequest.GroupID = int32(gid) 376 } else { 377 chownRequest.GroupID = -1 378 } 379 380 chownReply = &jrpcfs.Reply{} 381 382 err = globals.retryRPCClient.Send("RpcChown", chownRequest, chownReply) 383 if nil != err { 384 errno = convertErrToErrno(err, syscall.EIO) 385 return 386 } 387 388 errno = 0 389 return 390 } 391 392 func (fileInode *fileInodeStruct) setSize(nodeID uint64, size uint64) (errno syscall.Errno) { 393 var ( 394 chunkedPutContext *chunkedPutContextStruct 395 chunkedPutContextElement *list.Element 396 err error 397 ok bool 398 resizeReply *jrpcfs.Reply 399 resizeRequest *jrpcfs.ResizeRequest 400 ) 401 402 fileInode.cachedStat.Size = size 403 404 pruneExtentMap(fileInode.extentMap, size) 405 406 chunkedPutContextElement = fileInode.chunkedPutList.Front() 407 408 for nil != chunkedPutContextElement { 409 chunkedPutContext, ok = chunkedPutContextElement.Value.(*chunkedPutContextStruct) 410 if !ok { 411 logFatalf("chunkedPutContextElement.Value.(*chunkedPutContextStruct) returned !ok") 412 } 413 414 pruneExtentMap(chunkedPutContext.extentMap, size) 415 416 chunkedPutContextElement = chunkedPutContextElement.Next() 417 } 418 419 resizeRequest = &jrpcfs.ResizeRequest{ 420 InodeHandle: jrpcfs.InodeHandle{ 421 MountID: globals.mountID, 422 InodeNumber: int64(nodeID), 423 }, 424 NewSize: size, 425 } 426 427 resizeReply = &jrpcfs.Reply{} 428 429 err = globals.retryRPCClient.Send("RpcResize", resizeRequest, resizeReply) 430 if nil != err { 431 errno = convertErrToErrno(err, syscall.EIO) 432 return 433 } 434 435 errno = 0 436 return 437 } 438 439 func setMTimeAndOrATime(nodeID uint64, settingMTime bool, settingMTimeNow bool, mTimeSec uint64, mTimeNSec uint32, settingATime bool, settingATimeNow bool, aTimeSec uint64, aTimeNSec uint32) (errno syscall.Errno) { 440 var ( 441 err error 442 setTimeReply *jrpcfs.Reply 443 setTimeRequest *jrpcfs.SetTimeRequest 444 timeNow time.Time 445 ) 446 447 setTimeRequest = &jrpcfs.SetTimeRequest{ 448 InodeHandle: jrpcfs.InodeHandle{ 449 MountID: globals.mountID, 450 InodeNumber: int64(nodeID), 451 }, 452 StatStruct: jrpcfs.StatStruct{ 453 MTimeNs: uint64(0), // Updated below if settingMTime 454 ATimeNs: uint64(0), // Updated below if settingATime 455 }, 456 } 457 458 timeNow = time.Now() 459 460 if settingMTime { 461 if settingMTimeNow { 462 setTimeRequest.MTimeNs = uint64(timeNow.UnixNano()) 463 } else { 464 setTimeRequest.MTimeNs = unixTimeToNs(mTimeSec, mTimeNSec) 465 } 466 } 467 if settingATime { 468 if settingATimeNow { 469 setTimeRequest.ATimeNs = uint64(timeNow.UnixNano()) 470 } else { 471 setTimeRequest.ATimeNs = unixTimeToNs(aTimeSec, aTimeNSec) 472 } 473 } 474 475 setTimeReply = &jrpcfs.Reply{} 476 477 err = globals.retryRPCClient.Send("RpcSetTime", setTimeRequest, setTimeReply) 478 if nil != err { 479 errno = convertErrToErrno(err, syscall.EIO) 480 return 481 } 482 483 errno = 0 484 return 485 } 486 487 func (dummy *globalsStruct) DoSetAttr(inHeader *fission.InHeader, setAttrIn *fission.SetAttrIn) (setAttrOut *fission.SetAttrOut, errno syscall.Errno) { 488 var ( 489 aTimeNSec uint32 490 aTimeSec uint64 491 cTimeNSec uint32 492 cTimeSec uint64 493 err error 494 fileInode *fileInodeStruct 495 getStatReply *jrpcfs.StatStruct 496 getStatRequest *jrpcfs.GetStatRequest 497 mTimeNSec uint32 498 mTimeSec uint64 499 settingATime bool 500 settingATimeNow bool 501 settingGID bool 502 settingMode bool 503 settingMTime bool 504 settingMTimeAndOrATime bool 505 settingMTimeNow bool 506 settingSize bool 507 settingUID bool 508 ) 509 510 _ = atomic.AddUint64(&globals.metrics.FUSE_DoSetAttr_calls, 1) 511 512 if setAttrIn.Valid != (setAttrIn.Valid & (fission.SetAttrInValidMode | fission.SetAttrInValidUID | fission.SetAttrInValidGID | fission.SetAttrInValidSize | fission.SetAttrInValidATime | fission.SetAttrInValidMTime | fission.SetAttrInValidFH | fission.SetAttrInValidATimeNow | fission.SetAttrInValidMTimeNow | fission.SetAttrInValidLockOwner)) { 513 errno = syscall.ENOSYS 514 return 515 } 516 517 settingMode = (0 != (setAttrIn.Valid & fission.SetAttrInValidMode)) 518 519 settingUID = (0 != (setAttrIn.Valid & fission.SetAttrInValidUID)) 520 settingGID = (0 != (setAttrIn.Valid & fission.SetAttrInValidGID)) 521 522 settingSize = (0 != (setAttrIn.Valid & fission.SetAttrInValidSize)) 523 524 settingMTime = (0 != (setAttrIn.Valid & fission.SetAttrInValidMTime)) || (0 != (setAttrIn.Valid & fission.SetAttrInValidMTimeNow)) 525 settingATime = (0 != (setAttrIn.Valid & fission.SetAttrInValidATime)) || (0 != (setAttrIn.Valid & fission.SetAttrInValidATimeNow)) 526 527 settingMTimeNow = settingMTime && (0 != (setAttrIn.Valid & fission.SetAttrInValidMTimeNow)) 528 settingATimeNow = settingATime && (0 != (setAttrIn.Valid & fission.SetAttrInValidATimeNow)) 529 530 settingMTimeAndOrATime = settingATime || settingMTime 531 532 // TODO: Verify we can accept but ignore fission.SetAttrInValidFH in setAttrIn.Valid 533 // TODO: Verify we can accept but ignore fission.SetAttrInValidLockOwner in setAttrIn.Valid 534 535 // Now perform requested setAttrIn.Mode operations 536 537 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(inHeader.NodeID)) 538 539 fileInode.doFlushIfNecessary() 540 541 if settingMode { 542 errno = setMode(inHeader.NodeID, setAttrIn.Mode) 543 if 0 != errno { 544 fileInode.unlock(true) 545 return 546 } 547 } 548 549 if settingUID || settingGID { 550 errno = setUIDAndOrGID(inHeader.NodeID, settingUID, setAttrIn.UID, settingGID, setAttrIn.GID) 551 if 0 != errno { 552 fileInode.unlock(true) 553 return 554 } 555 } 556 557 if settingSize { 558 errno = fileInode.setSize(inHeader.NodeID, setAttrIn.Size) 559 if 0 != errno { 560 fileInode.unlock(true) 561 return 562 } 563 } 564 565 if settingMTimeAndOrATime { 566 errno = setMTimeAndOrATime(inHeader.NodeID, settingMTime, settingMTimeNow, setAttrIn.MTimeSec, setAttrIn.MTimeNSec, settingATime, settingATimeNow, setAttrIn.ATimeSec, setAttrIn.ATimeNSec) 567 if 0 != errno { 568 fileInode.unlock(true) 569 return 570 } 571 } 572 573 getStatRequest = &jrpcfs.GetStatRequest{ 574 InodeHandle: jrpcfs.InodeHandle{ 575 MountID: globals.mountID, 576 InodeNumber: int64(inHeader.NodeID), 577 }, 578 } 579 580 getStatReply = &jrpcfs.StatStruct{} 581 582 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 583 if nil != err { 584 fileInode.unlock(true) 585 errno = convertErrToErrno(err, syscall.EIO) 586 return 587 } 588 589 fileInode.cachedStat = getStatReply 590 591 aTimeSec, aTimeNSec = nsToUnixTime(fileInode.cachedStat.ATimeNs) 592 mTimeSec, mTimeNSec = nsToUnixTime(fileInode.cachedStat.MTimeNs) 593 cTimeSec, cTimeNSec = nsToUnixTime(fileInode.cachedStat.CTimeNs) 594 595 setAttrOut = &fission.SetAttrOut{ 596 AttrValidSec: globals.attrValidSec, 597 AttrValidNSec: globals.attrValidNSec, 598 Dummy: 0, 599 Attr: fission.Attr{ 600 Ino: inHeader.NodeID, 601 Size: fileInode.cachedStat.Size, 602 Blocks: 0, // fixAttrSizes() will compute this 603 ATimeSec: aTimeSec, 604 MTimeSec: mTimeSec, 605 CTimeSec: cTimeSec, 606 ATimeNSec: aTimeNSec, 607 MTimeNSec: mTimeNSec, 608 CTimeNSec: cTimeNSec, 609 Mode: fileInode.cachedStat.FileMode, 610 NLink: uint32(fileInode.cachedStat.NumLinks), 611 UID: fileInode.cachedStat.UserID, 612 GID: fileInode.cachedStat.GroupID, 613 RDev: 0, 614 BlkSize: 0, // fixAttrSizes() will set this 615 Padding: 0, 616 }, 617 } 618 619 fileInode.unlock(false) 620 621 fixAttrSizes(&setAttrOut.Attr) 622 623 errno = 0 624 return 625 } 626 627 func (dummy *globalsStruct) DoReadLink(inHeader *fission.InHeader) (readLinkOut *fission.ReadLinkOut, errno syscall.Errno) { 628 var ( 629 err error 630 readSymlinkReply *jrpcfs.ReadSymlinkReply 631 readSymlinkRequest *jrpcfs.ReadSymlinkRequest 632 ) 633 634 _ = atomic.AddUint64(&globals.metrics.FUSE_DoReadLink_calls, 1) 635 636 readSymlinkRequest = &jrpcfs.ReadSymlinkRequest{ 637 InodeHandle: jrpcfs.InodeHandle{ 638 MountID: globals.mountID, 639 InodeNumber: int64(inHeader.NodeID), 640 }, 641 } 642 643 readSymlinkReply = &jrpcfs.ReadSymlinkReply{} 644 645 err = globals.retryRPCClient.Send("RpcReadSymlink", readSymlinkRequest, readSymlinkReply) 646 if nil != err { 647 errno = convertErrToErrno(err, syscall.EIO) 648 return 649 } 650 651 readLinkOut = &fission.ReadLinkOut{ 652 Data: []byte(readSymlinkReply.Target), 653 } 654 655 errno = 0 656 return 657 } 658 659 func (dummy *globalsStruct) DoSymLink(inHeader *fission.InHeader, symLinkIn *fission.SymLinkIn) (symLinkOut *fission.SymLinkOut, errno syscall.Errno) { 660 var ( 661 aTimeNSec uint32 662 aTimeSec uint64 663 cTimeNSec uint32 664 cTimeSec uint64 665 err error 666 getStatReply *jrpcfs.StatStruct 667 getStatRequest *jrpcfs.GetStatRequest 668 mTimeNSec uint32 669 mTimeSec uint64 670 symlinkReply *jrpcfs.InodeReply 671 symlinkRequest *jrpcfs.SymlinkRequest 672 ) 673 674 _ = atomic.AddUint64(&globals.metrics.FUSE_DoSymLink_calls, 1) 675 676 symlinkRequest = &jrpcfs.SymlinkRequest{ 677 InodeHandle: jrpcfs.InodeHandle{ 678 MountID: globals.mountID, 679 InodeNumber: int64(inHeader.NodeID), 680 }, 681 Basename: string(symLinkIn.Name[:]), 682 Target: string(symLinkIn.Data[:]), 683 UserID: int32(inHeader.UID), 684 GroupID: int32(inHeader.GID), 685 } 686 687 symlinkReply = &jrpcfs.InodeReply{} 688 689 err = globals.retryRPCClient.Send("RpcSymlink", symlinkRequest, symlinkReply) 690 if nil != err { 691 errno = convertErrToErrno(err, syscall.EIO) 692 return 693 } 694 695 getStatRequest = &jrpcfs.GetStatRequest{ 696 InodeHandle: jrpcfs.InodeHandle{ 697 MountID: globals.mountID, 698 InodeNumber: symlinkReply.InodeNumber, 699 }, 700 } 701 702 getStatReply = &jrpcfs.StatStruct{} 703 704 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 705 if nil != err { 706 errno = convertErrToErrno(err, syscall.EIO) 707 return 708 } 709 710 aTimeSec, aTimeNSec = nsToUnixTime(getStatReply.ATimeNs) 711 mTimeSec, mTimeNSec = nsToUnixTime(getStatReply.MTimeNs) 712 cTimeSec, cTimeNSec = nsToUnixTime(getStatReply.CTimeNs) 713 714 symLinkOut = &fission.SymLinkOut{ 715 EntryOut: fission.EntryOut{ 716 NodeID: uint64(symlinkReply.InodeNumber), 717 Generation: 0, 718 EntryValidSec: globals.entryValidSec, 719 AttrValidSec: globals.attrValidSec, 720 EntryValidNSec: globals.entryValidNSec, 721 AttrValidNSec: globals.attrValidNSec, 722 Attr: fission.Attr{ 723 Ino: uint64(symlinkReply.InodeNumber), 724 Size: 0, 725 Blocks: 0, 726 ATimeSec: aTimeSec, 727 MTimeSec: mTimeSec, 728 CTimeSec: cTimeSec, 729 ATimeNSec: aTimeNSec, 730 MTimeNSec: mTimeNSec, 731 CTimeNSec: cTimeNSec, 732 Mode: getStatReply.FileMode, 733 NLink: uint32(getStatReply.NumLinks), 734 UID: getStatReply.UserID, 735 GID: getStatReply.GroupID, 736 RDev: 0, 737 BlkSize: 0, 738 Padding: 0, 739 }, 740 }, 741 } 742 743 errno = 0 744 return 745 } 746 747 func (dummy *globalsStruct) DoMkNod(inHeader *fission.InHeader, mkNodIn *fission.MkNodIn) (mkNodOut *fission.MkNodOut, errno syscall.Errno) { 748 _ = atomic.AddUint64(&globals.metrics.FUSE_DoMkNod_calls, 1) 749 errno = syscall.ENOSYS 750 return 751 } 752 753 func (dummy *globalsStruct) DoMkDir(inHeader *fission.InHeader, mkDirIn *fission.MkDirIn) (mkDirOut *fission.MkDirOut, errno syscall.Errno) { 754 var ( 755 aTimeNSec uint32 756 aTimeSec uint64 757 cTimeNSec uint32 758 cTimeSec uint64 759 err error 760 getStatReply *jrpcfs.StatStruct 761 getStatRequest *jrpcfs.GetStatRequest 762 mkdirReply *jrpcfs.InodeReply 763 mkdirRequest *jrpcfs.MkdirRequest 764 mTimeNSec uint32 765 mTimeSec uint64 766 ) 767 768 _ = atomic.AddUint64(&globals.metrics.FUSE_DoMkDir_calls, 1) 769 770 mkdirRequest = &jrpcfs.MkdirRequest{ 771 InodeHandle: jrpcfs.InodeHandle{ 772 MountID: globals.mountID, 773 InodeNumber: int64(inHeader.NodeID), 774 }, 775 Basename: string(mkDirIn.Name[:]), 776 UserID: int32(inHeader.UID), 777 GroupID: int32(inHeader.GID), 778 FileMode: mkDirIn.Mode & uint32(os.ModePerm), 779 } 780 781 mkdirReply = &jrpcfs.InodeReply{} 782 783 err = globals.retryRPCClient.Send("RpcMkdir", mkdirRequest, mkdirReply) 784 if nil != err { 785 errno = convertErrToErrno(err, syscall.EIO) 786 return 787 } 788 789 getStatRequest = &jrpcfs.GetStatRequest{ 790 InodeHandle: jrpcfs.InodeHandle{ 791 MountID: globals.mountID, 792 InodeNumber: mkdirReply.InodeNumber, 793 }, 794 } 795 796 getStatReply = &jrpcfs.StatStruct{} 797 798 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 799 if nil != err { 800 errno = convertErrToErrno(err, syscall.EIO) 801 return 802 } 803 804 aTimeSec, aTimeNSec = nsToUnixTime(getStatReply.ATimeNs) 805 mTimeSec, mTimeNSec = nsToUnixTime(getStatReply.MTimeNs) 806 cTimeSec, cTimeNSec = nsToUnixTime(getStatReply.CTimeNs) 807 808 mkDirOut = &fission.MkDirOut{ 809 EntryOut: fission.EntryOut{ 810 NodeID: uint64(mkdirReply.InodeNumber), 811 Generation: 0, 812 EntryValidSec: globals.entryValidSec, 813 AttrValidSec: globals.attrValidSec, 814 EntryValidNSec: globals.entryValidNSec, 815 AttrValidNSec: globals.attrValidNSec, 816 Attr: fission.Attr{ 817 Ino: uint64(mkdirReply.InodeNumber), 818 Size: 0, 819 Blocks: 0, 820 ATimeSec: aTimeSec, 821 MTimeSec: mTimeSec, 822 CTimeSec: cTimeSec, 823 ATimeNSec: aTimeNSec, 824 MTimeNSec: mTimeNSec, 825 CTimeNSec: cTimeNSec, 826 Mode: getStatReply.FileMode, 827 NLink: uint32(getStatReply.NumLinks), 828 UID: getStatReply.UserID, 829 GID: getStatReply.GroupID, 830 RDev: 0, 831 BlkSize: 0, 832 Padding: 0, 833 }, 834 }, 835 } 836 837 errno = 0 838 return 839 } 840 841 func (dummy *globalsStruct) DoUnlink(inHeader *fission.InHeader, unlinkIn *fission.UnlinkIn) (errno syscall.Errno) { 842 var ( 843 err error 844 fileInode *fileInodeStruct 845 lookupReply *jrpcfs.InodeReply 846 lookupRequest *jrpcfs.LookupRequest 847 unlinkReply *jrpcfs.Reply 848 unlinkRequest *jrpcfs.UnlinkRequest 849 ) 850 851 _ = atomic.AddUint64(&globals.metrics.FUSE_DoUnlink_calls, 1) 852 853 lookupRequest = &jrpcfs.LookupRequest{ 854 InodeHandle: jrpcfs.InodeHandle{ 855 MountID: globals.mountID, 856 InodeNumber: int64(inHeader.NodeID), 857 }, 858 Basename: string(unlinkIn.Name[:]), 859 } 860 861 lookupReply = &jrpcfs.InodeReply{} 862 863 err = globals.retryRPCClient.Send("RpcLookup", lookupRequest, lookupReply) 864 if nil != err { 865 errno = convertErrToErrno(err, syscall.ENOENT) 866 return 867 } 868 869 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(lookupReply.InodeNumber)) 870 871 // Make sure potentially file inode didn't move before we were able to ExclusiveLease it 872 873 lookupReply = &jrpcfs.InodeReply{} 874 875 err = globals.retryRPCClient.Send("RpcLookup", lookupRequest, lookupReply) 876 if (nil != err) || (fileInode.InodeNumber != inode.InodeNumber(lookupReply.InodeNumber)) { 877 fileInode.unlock(true) 878 errno = convertErrToErrno(err, syscall.ENOENT) 879 return 880 } 881 882 fileInode.doFlushIfNecessary() 883 884 unlinkRequest = &jrpcfs.UnlinkRequest{ 885 InodeHandle: jrpcfs.InodeHandle{ 886 MountID: globals.mountID, 887 InodeNumber: int64(inHeader.NodeID), 888 }, 889 Basename: string(unlinkIn.Name[:]), 890 } 891 892 unlinkReply = &jrpcfs.Reply{} 893 894 err = globals.retryRPCClient.Send("RpcUnlink", unlinkRequest, unlinkReply) 895 if nil != err { 896 fileInode.unlock(true) 897 errno = convertErrToErrno(err, syscall.EIO) 898 return 899 } 900 901 fileInode.unlock(false) 902 903 errno = 0 904 return 905 } 906 907 func (dummy *globalsStruct) DoRmDir(inHeader *fission.InHeader, rmDirIn *fission.RmDirIn) (errno syscall.Errno) { 908 var ( 909 err error 910 unlinkReply *jrpcfs.Reply 911 unlinkRequest *jrpcfs.UnlinkRequest 912 ) 913 914 _ = atomic.AddUint64(&globals.metrics.FUSE_DoRmDir_calls, 1) 915 916 unlinkRequest = &jrpcfs.UnlinkRequest{ 917 InodeHandle: jrpcfs.InodeHandle{ 918 MountID: globals.mountID, 919 InodeNumber: int64(inHeader.NodeID), 920 }, 921 Basename: string(rmDirIn.Name[:]), 922 } 923 924 unlinkReply = &jrpcfs.Reply{} 925 926 err = globals.retryRPCClient.Send("RpcRmdir", unlinkRequest, unlinkReply) 927 if nil != err { 928 errno = convertErrToErrno(err, syscall.EIO) 929 return 930 } 931 932 errno = 0 933 return 934 } 935 936 func (dummy *globalsStruct) DoRename(inHeader *fission.InHeader, renameIn *fission.RenameIn) (errno syscall.Errno) { 937 var ( 938 destroyReply *jrpcfs.Reply 939 destroyRequest *jrpcfs.DestroyRequest 940 err error 941 fileInode *fileInodeStruct 942 lookupReply *jrpcfs.InodeReply 943 lookupRequest *jrpcfs.LookupRequest 944 moveReply *jrpcfs.MoveReply 945 moveRequest *jrpcfs.MoveRequest 946 ) 947 948 _ = atomic.AddUint64(&globals.metrics.FUSE_DoRename_calls, 1) 949 950 lookupRequest = &jrpcfs.LookupRequest{ 951 InodeHandle: jrpcfs.InodeHandle{ 952 MountID: globals.mountID, 953 InodeNumber: int64(renameIn.NewDir), 954 }, 955 Basename: string(renameIn.NewName[:]), 956 } 957 958 lookupReply = &jrpcfs.InodeReply{} 959 960 err = globals.retryRPCClient.Send("RpcLookup", lookupRequest, lookupReply) 961 if nil == err { 962 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(lookupReply.InodeNumber)) 963 964 // Make sure potentially file inode didn't move before we were able to ExclusiveLease it 965 966 lookupReply = &jrpcfs.InodeReply{} 967 968 err = globals.retryRPCClient.Send("RpcLookup", lookupRequest, lookupReply) 969 if (nil != err) || (fileInode.InodeNumber != inode.InodeNumber(lookupReply.InodeNumber)) { 970 fileInode.unlock(true) 971 fileInode = nil 972 } else { 973 fileInode.doFlushIfNecessary() 974 } 975 } else { 976 fileInode = nil 977 } 978 979 moveRequest = &jrpcfs.MoveRequest{ 980 MountID: globals.mountID, 981 SrcDirInodeNumber: int64(inHeader.NodeID), 982 SrcBasename: string(renameIn.OldName[:]), 983 DstDirInodeNumber: int64(renameIn.NewDir), 984 DstBasename: string(renameIn.NewName[:]), 985 } 986 987 moveReply = &jrpcfs.MoveReply{} 988 989 err = globals.retryRPCClient.Send("RpcMove", moveRequest, moveReply) 990 if nil == err { 991 errno = 0 992 } else { 993 errno = convertErrToErrno(err, syscall.EIO) 994 } 995 996 if nil != fileInode { 997 fileInode.unlock(false) 998 } 999 1000 if 0 != moveReply.ToDestroyInodeNumber { 1001 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(moveReply.ToDestroyInodeNumber)) 1002 1003 fileInode.doFlushIfNecessary() 1004 1005 destroyRequest = &jrpcfs.DestroyRequest{ 1006 InodeHandle: jrpcfs.InodeHandle{ 1007 MountID: globals.mountID, 1008 InodeNumber: moveReply.ToDestroyInodeNumber, 1009 }, 1010 } 1011 1012 destroyReply = &jrpcfs.Reply{} 1013 1014 err = globals.retryRPCClient.Send("RpcDestroy", destroyRequest, destroyReply) 1015 if nil != err { 1016 logWarnf("RpcDestroy(InodeHandle: %#v) failed: err", err) 1017 } 1018 1019 fileInode.unlock(false) 1020 } 1021 1022 return 1023 } 1024 1025 func (dummy *globalsStruct) DoLink(inHeader *fission.InHeader, linkIn *fission.LinkIn) (linkOut *fission.LinkOut, errno syscall.Errno) { 1026 var ( 1027 aTimeNSec uint32 1028 aTimeSec uint64 1029 cTimeNSec uint32 1030 cTimeSec uint64 1031 err error 1032 fileInode *fileInodeStruct 1033 getStatReply *jrpcfs.StatStruct 1034 getStatRequest *jrpcfs.GetStatRequest 1035 linkReply *jrpcfs.Reply 1036 linkRequest *jrpcfs.LinkRequest 1037 mTimeNSec uint32 1038 mTimeSec uint64 1039 ) 1040 1041 _ = atomic.AddUint64(&globals.metrics.FUSE_DoLink_calls, 1) 1042 1043 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(linkIn.OldNodeID)) 1044 1045 fileInode.doFlushIfNecessary() 1046 1047 linkRequest = &jrpcfs.LinkRequest{ 1048 InodeHandle: jrpcfs.InodeHandle{ 1049 MountID: globals.mountID, 1050 InodeNumber: int64(inHeader.NodeID), 1051 }, 1052 Basename: string(linkIn.Name[:]), 1053 TargetInodeNumber: int64(linkIn.OldNodeID), 1054 } 1055 1056 linkReply = &jrpcfs.Reply{} 1057 1058 err = globals.retryRPCClient.Send("RpcLink", linkRequest, linkReply) 1059 if nil != err { 1060 fileInode.unlock(true) 1061 errno = convertErrToErrno(err, syscall.EIO) 1062 return 1063 } 1064 1065 getStatRequest = &jrpcfs.GetStatRequest{ 1066 InodeHandle: jrpcfs.InodeHandle{ 1067 MountID: globals.mountID, 1068 InodeNumber: int64(linkIn.OldNodeID), 1069 }, 1070 } 1071 1072 getStatReply = &jrpcfs.StatStruct{} 1073 1074 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 1075 if nil != err { 1076 fileInode.unlock(true) 1077 errno = convertErrToErrno(err, syscall.EIO) 1078 return 1079 } 1080 1081 fileInode.cachedStat = getStatReply 1082 1083 aTimeSec, aTimeNSec = nsToUnixTime(fileInode.cachedStat.ATimeNs) 1084 mTimeSec, mTimeNSec = nsToUnixTime(fileInode.cachedStat.MTimeNs) 1085 cTimeSec, cTimeNSec = nsToUnixTime(fileInode.cachedStat.CTimeNs) 1086 1087 linkOut = &fission.LinkOut{ 1088 EntryOut: fission.EntryOut{ 1089 NodeID: linkIn.OldNodeID, 1090 Generation: 0, 1091 EntryValidSec: globals.entryValidSec, 1092 AttrValidSec: globals.attrValidSec, 1093 EntryValidNSec: globals.entryValidNSec, 1094 AttrValidNSec: globals.attrValidNSec, 1095 Attr: fission.Attr{ 1096 Ino: linkIn.OldNodeID, 1097 Size: fileInode.cachedStat.Size, 1098 Blocks: 0, // fixAttrSizes() will compute this 1099 ATimeSec: aTimeSec, 1100 MTimeSec: mTimeSec, 1101 CTimeSec: cTimeSec, 1102 ATimeNSec: aTimeNSec, 1103 MTimeNSec: mTimeNSec, 1104 CTimeNSec: cTimeNSec, 1105 Mode: fileInode.cachedStat.FileMode, 1106 NLink: uint32(fileInode.cachedStat.NumLinks), 1107 UID: fileInode.cachedStat.UserID, 1108 GID: fileInode.cachedStat.GroupID, 1109 RDev: 0, 1110 BlkSize: 0, // fixAttrSizes() will set this 1111 Padding: 0, 1112 }, 1113 }, 1114 } 1115 1116 fileInode.unlock(false) 1117 1118 fixAttrSizes(&linkOut.EntryOut.Attr) 1119 1120 errno = 0 1121 return 1122 } 1123 1124 func (dummy *globalsStruct) DoOpen(inHeader *fission.InHeader, openIn *fission.OpenIn) (openOut *fission.OpenOut, errno syscall.Errno) { 1125 var ( 1126 err error 1127 fhSet fhSetType 1128 fileInode *fileInodeStruct 1129 getStatReply *jrpcfs.StatStruct 1130 getStatRequest *jrpcfs.GetStatRequest 1131 ok bool 1132 ) 1133 1134 _ = atomic.AddUint64(&globals.metrics.FUSE_DoOpen_calls, 1) 1135 1136 if 0 != (openIn.Flags & fission.FOpenRequestTRUNC) { 1137 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(inHeader.NodeID)) 1138 fileInode.doFlushIfNecessary() 1139 } else { 1140 fileInode = lockInodeWithSharedLease(inode.InodeNumber(inHeader.NodeID)) 1141 } 1142 1143 if nil == fileInode.cachedStat { 1144 getStatRequest = &jrpcfs.GetStatRequest{ 1145 InodeHandle: jrpcfs.InodeHandle{ 1146 MountID: globals.mountID, 1147 InodeNumber: int64(inHeader.NodeID), 1148 }, 1149 } 1150 1151 getStatReply = &jrpcfs.StatStruct{} 1152 1153 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 1154 if nil != err { 1155 fileInode.unlock(true) 1156 errno = convertErrToErrno(err, syscall.EIO) 1157 return 1158 } 1159 1160 fileInode.cachedStat = getStatReply 1161 } 1162 1163 if syscall.S_IFREG != (fileInode.cachedStat.FileMode & syscall.S_IFMT) { 1164 fileInode.unlock(true) 1165 errno = syscall.EINVAL 1166 return 1167 } 1168 1169 globals.Lock() 1170 1171 globals.lastFH++ 1172 1173 globals.fhToInodeNumberMap[globals.lastFH] = inHeader.NodeID 1174 1175 fhSet, ok = globals.inodeNumberToFHMap[inHeader.NodeID] 1176 if !ok { 1177 fhSet = make(fhSetType) 1178 } 1179 fhSet[globals.lastFH] = struct{}{} 1180 globals.inodeNumberToFHMap[inHeader.NodeID] = fhSet 1181 1182 openOut = &fission.OpenOut{ 1183 FH: globals.lastFH, 1184 OpenFlags: 0, 1185 Padding: 0, 1186 } 1187 1188 globals.Unlock() 1189 1190 if 0 != (openIn.Flags & fission.FOpenRequestTRUNC) { 1191 errno = fileInode.setSize(inHeader.NodeID, 0) 1192 if 0 != errno { 1193 fileInode.unlock(true) 1194 return 1195 } 1196 } 1197 1198 fileInode.unlock(false) 1199 1200 errno = 0 1201 return 1202 } 1203 1204 func (dummy *globalsStruct) DoRead(inHeader *fission.InHeader, readIn *fission.ReadIn) (readOut *fission.ReadOut, errno syscall.Errno) { 1205 var ( 1206 curObjectOffset uint64 1207 err error 1208 fhInodeNumber uint64 1209 fileInode *fileInodeStruct 1210 getStatReply *jrpcfs.StatStruct 1211 getStatRequest *jrpcfs.GetStatRequest 1212 logSegmentCacheElement *logSegmentCacheElementStruct 1213 logSegmentCacheElementBufEndingPosition uint64 1214 logSegmentCacheElementBufRemainingLen uint64 1215 logSegmentCacheElementBufSelectedLen uint64 1216 logSegmentCacheElementBufStartingPosition uint64 1217 ok bool 1218 readPlan []interface{} 1219 readPlanSpan uint64 1220 readPlanStepAsInterface interface{} 1221 readPlanStepAsMultiObjectExtentStruct *multiObjectExtentStruct 1222 readPlanStepAsSingleObjectExtentWithLink *singleObjectExtentWithLinkStruct 1223 readPlanStepRemainingLength uint64 1224 ) 1225 1226 _ = atomic.AddUint64(&globals.metrics.FUSE_DoRead_calls, 1) 1227 globals.stats.FUSEDoReadBytes.Add(uint64(readIn.Size)) 1228 1229 fileInode = lockInodeWithSharedLease(inode.InodeNumber(inHeader.NodeID)) 1230 1231 if nil == fileInode.cachedStat { 1232 getStatRequest = &jrpcfs.GetStatRequest{ 1233 InodeHandle: jrpcfs.InodeHandle{ 1234 MountID: globals.mountID, 1235 InodeNumber: int64(inHeader.NodeID), 1236 }, 1237 } 1238 1239 getStatReply = &jrpcfs.StatStruct{} 1240 1241 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 1242 if nil != err { 1243 fileInode.unlock(true) 1244 errno = convertErrToErrno(err, syscall.EIO) 1245 return 1246 } 1247 1248 fileInode.cachedStat = getStatReply 1249 } 1250 1251 globals.Lock() 1252 1253 fhInodeNumber, ok = globals.fhToInodeNumberMap[readIn.FH] 1254 if !ok { 1255 logFatalf("DoRead(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, readIn.FH) 1256 } 1257 if fhInodeNumber != inHeader.NodeID { 1258 logFatalf("DoRead(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, readIn.FH, fhInodeNumber) 1259 } 1260 1261 globals.Unlock() 1262 1263 // fileInode = referenceFileInode(inode.InodeNumber(inHeader.NodeID)) 1264 if nil == fileInode { 1265 logFatalf("DoRead(NodeID=%v,FH=%v) called for non-FileInode", inHeader.NodeID, readIn.FH) 1266 } 1267 // defer fileInode.dereference() 1268 1269 err = fileInode.ensureCachedStatPopulatedWhileLocked() 1270 if nil != err { 1271 fileInode.unlock(true) 1272 errno = convertErrToErrno(err, syscall.EIO) 1273 return 1274 } 1275 1276 // grantedLock = fileInode.getSharedLock() 1277 // defer grantedLock.release() 1278 1279 err = fileInode.populateExtentMap(uint64(readIn.Offset), uint64(readIn.Size)) 1280 if nil != err { 1281 fileInode.unlock(true) 1282 errno = convertErrToErrno(err, syscall.EIO) 1283 return 1284 } 1285 1286 readPlan, readPlanSpan = fileInode.getReadPlan(uint64(readIn.Offset), uint64(readIn.Size)) 1287 1288 if (nil == readPlan) || (0 == readPlanSpan) { 1289 readOut = &fission.ReadOut{ 1290 Data: make([]byte, 0), 1291 } 1292 } else { 1293 readOut = &fission.ReadOut{ 1294 Data: make([]byte, 0, readPlanSpan), 1295 } 1296 1297 for _, readPlanStepAsInterface = range readPlan { 1298 switch readPlanStepAsInterface.(type) { 1299 case *multiObjectExtentStruct: 1300 readPlanStepAsMultiObjectExtentStruct = readPlanStepAsInterface.(*multiObjectExtentStruct) 1301 1302 if "" == readPlanStepAsMultiObjectExtentStruct.objectName { 1303 // Zero-fill for readPlanStep.length 1304 1305 readOut.Data = append(readOut.Data, make([]byte, readPlanStepAsMultiObjectExtentStruct.length)...) 1306 } else { 1307 // Fetch LogSegment data... possibly crossing LogSegmentCacheLine boundaries 1308 1309 curObjectOffset = readPlanStepAsMultiObjectExtentStruct.objectOffset 1310 readPlanStepRemainingLength = readPlanStepAsMultiObjectExtentStruct.length 1311 1312 for readPlanStepRemainingLength > 0 { 1313 logSegmentCacheElement = fetchLogSegmentCacheLine(readPlanStepAsMultiObjectExtentStruct.containerName, readPlanStepAsMultiObjectExtentStruct.objectName, curObjectOffset) 1314 1315 if logSegmentCacheElementStateGetFailed == logSegmentCacheElement.state { 1316 fileInode.unlock(true) 1317 errno = syscall.EIO 1318 return 1319 } 1320 1321 logSegmentCacheElementBufStartingPosition = curObjectOffset - logSegmentCacheElement.startingOffset 1322 logSegmentCacheElementBufRemainingLen = uint64(len(logSegmentCacheElement.buf)) - logSegmentCacheElementBufStartingPosition 1323 1324 if logSegmentCacheElementBufRemainingLen <= readPlanStepRemainingLength { 1325 logSegmentCacheElementBufSelectedLen = logSegmentCacheElementBufRemainingLen 1326 } else { 1327 logSegmentCacheElementBufSelectedLen = readPlanStepRemainingLength 1328 } 1329 1330 logSegmentCacheElementBufEndingPosition = logSegmentCacheElementBufStartingPosition + logSegmentCacheElementBufSelectedLen 1331 1332 readOut.Data = append(readOut.Data, logSegmentCacheElement.buf[logSegmentCacheElementBufStartingPosition:logSegmentCacheElementBufEndingPosition]...) 1333 1334 curObjectOffset += logSegmentCacheElementBufSelectedLen 1335 readPlanStepRemainingLength -= logSegmentCacheElementBufSelectedLen 1336 } 1337 } 1338 case *singleObjectExtentWithLinkStruct: 1339 readPlanStepAsSingleObjectExtentWithLink = readPlanStepAsInterface.(*singleObjectExtentWithLinkStruct) 1340 1341 if nil == readPlanStepAsSingleObjectExtentWithLink.chunkedPutContext { 1342 // Zero-fill for readPlanStep.length 1343 1344 readOut.Data = append(readOut.Data, make([]byte, readPlanStepAsSingleObjectExtentWithLink.length)...) 1345 } else { 1346 // Fetch LogSegment data... from readPlanStepAsSingleObjectExtentWithLink.chunkedPutContextStruct 1347 1348 _ = atomic.AddUint64(&globals.metrics.LogSegmentPUTReadHits, 1) 1349 1350 readOut.Data = append(readOut.Data, readPlanStepAsSingleObjectExtentWithLink.chunkedPutContext.buf[readPlanStepAsSingleObjectExtentWithLink.objectOffset:readPlanStepAsSingleObjectExtentWithLink.objectOffset+readPlanStepAsSingleObjectExtentWithLink.length]...) 1351 } 1352 default: 1353 logFatalf("getReadPlan() returned an invalid readPlanStep: %v", readPlanStepAsInterface) 1354 } 1355 } 1356 } 1357 1358 fileInode.unlock(true) 1359 1360 _ = atomic.AddUint64(&globals.metrics.FUSE_DoRead_bytes, uint64(len(readOut.Data))) 1361 1362 errno = 0 1363 return 1364 } 1365 1366 func (dummy *globalsStruct) DoWrite(inHeader *fission.InHeader, writeIn *fission.WriteIn) (writeOut *fission.WriteOut, errno syscall.Errno) { 1367 var ( 1368 chunkedPutContext *chunkedPutContextStruct 1369 chunkedPutContextElement *list.Element 1370 err error 1371 fhInodeNumber uint64 1372 fileInode *fileInodeStruct 1373 ok bool 1374 singleObjectExtent *singleObjectExtentStruct 1375 ) 1376 1377 _ = atomic.AddUint64(&globals.metrics.FUSE_DoWrite_calls, 1) 1378 1379 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(inHeader.NodeID)) 1380 1381 globals.Lock() 1382 1383 fhInodeNumber, ok = globals.fhToInodeNumberMap[writeIn.FH] 1384 if !ok { 1385 logFatalf("DoWrite(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, writeIn.FH) 1386 } 1387 if fhInodeNumber != inHeader.NodeID { 1388 logFatalf("DoWrite(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, writeIn.FH, fhInodeNumber) 1389 } 1390 1391 globals.Unlock() 1392 1393 // fileInode = referenceFileInode(inode.InodeNumber(inHeader.NodeID)) 1394 if nil == fileInode { 1395 logFatalf("DoWrite(NodeID=%v,FH=%v) called for non-FileInode", inHeader.NodeID, writeIn.FH) 1396 } 1397 1398 err = fileInode.ensureCachedStatPopulatedWhileLocked() 1399 if nil != err { 1400 errno = convertErrToErrno(err, syscall.EIO) 1401 return 1402 } 1403 1404 // Grab quota to start a fresh chunkedPutContext before calling fileInode.getExclusiveLock() 1405 // since, in the pathologic case where only fileInode has any outstanding chunkedPutContext's, 1406 // they can only be complete()'d while holding fileInode.getExclusiveLock() and we would 1407 // deadlock. 1408 1409 _ = <-globals.fileInodeDirtyLogSegmentChan 1410 1411 // grantedLock = fileInode.getExclusiveLock() 1412 1413 if 0 == fileInode.chunkedPutList.Len() { 1414 // No chunkedPutContext is present (so none can be open), so open one 1415 1416 _ = atomic.AddUint64(&globals.metrics.LogSegmentPUTs, 1) 1417 1418 chunkedPutContext = &chunkedPutContextStruct{ 1419 buf: make([]byte, 0), 1420 fileInode: fileInode, 1421 state: chunkedPutContextStateOpen, 1422 sendChan: make(chan struct{}, 1), 1423 wakeChan: make(chan struct{}, 1), 1424 inRead: false, 1425 flushRequested: false, 1426 } 1427 1428 chunkedPutContext.extentMap = sortedmap.NewLLRBTree(sortedmap.CompareUint64, chunkedPutContext) 1429 chunkedPutContext.chunkedPutListElement = fileInode.chunkedPutList.PushBack(chunkedPutContext) 1430 1431 // fileInode.reference() 1432 1433 pruneFileInodeDirtyListIfNecessary() 1434 1435 globals.Lock() 1436 fileInode.dirtyListElement = globals.fileInodeDirtyList.PushBack(fileInode) 1437 globals.Unlock() 1438 1439 go chunkedPutContext.sendDaemon() 1440 } else { 1441 globals.Lock() 1442 globals.fileInodeDirtyList.MoveToBack(fileInode.dirtyListElement) 1443 globals.Unlock() 1444 1445 chunkedPutContextElement = fileInode.chunkedPutList.Back() 1446 chunkedPutContext = chunkedPutContextElement.Value.(*chunkedPutContextStruct) 1447 1448 if chunkedPutContextStateOpen == chunkedPutContext.state { 1449 // Use this most recent (and open) chunkedPutContext... so we can give back our chunkedPutContext quota 1450 1451 globals.fileInodeDirtyLogSegmentChan <- struct{}{} 1452 } else { 1453 // Most recent chunkedPutContext is closed, so open a new one 1454 1455 _ = atomic.AddUint64(&globals.metrics.LogSegmentPUTs, 1) 1456 1457 chunkedPutContext = &chunkedPutContextStruct{ 1458 buf: make([]byte, 0), 1459 fileInode: fileInode, 1460 state: chunkedPutContextStateOpen, 1461 sendChan: make(chan struct{}, 1), 1462 wakeChan: make(chan struct{}, 1), 1463 inRead: false, 1464 flushRequested: false, 1465 } 1466 1467 chunkedPutContext.extentMap = sortedmap.NewLLRBTree(sortedmap.CompareUint64, chunkedPutContext) 1468 chunkedPutContext.chunkedPutListElement = fileInode.chunkedPutList.PushBack(chunkedPutContext) 1469 1470 // fileInode.reference() 1471 1472 go chunkedPutContext.sendDaemon() 1473 } 1474 } 1475 1476 singleObjectExtent = &singleObjectExtentStruct{ 1477 fileOffset: uint64(writeIn.Offset), 1478 objectOffset: uint64(len(chunkedPutContext.buf)), 1479 length: uint64(len(writeIn.Data)), 1480 } 1481 1482 chunkedPutContext.mergeSingleObjectExtent(singleObjectExtent) 1483 1484 if (singleObjectExtent.fileOffset + singleObjectExtent.length) > fileInode.cachedStat.Size { 1485 fileInode.cachedStat.Size = singleObjectExtent.fileOffset + singleObjectExtent.length 1486 } 1487 1488 chunkedPutContext.buf = append(chunkedPutContext.buf, writeIn.Data...) 1489 1490 select { 1491 case chunkedPutContext.sendChan <- struct{}{}: 1492 // We just notified sendDaemon() 1493 default: 1494 // We didn't need to notify sendDaemon() 1495 } 1496 1497 if uint64(len(chunkedPutContext.buf)) >= globals.config.MaxFlushSize { 1498 // Time to do a Flush 1499 chunkedPutContext.state = chunkedPutContextStateClosing 1500 close(chunkedPutContext.sendChan) 1501 } 1502 1503 // grantedLock.release() 1504 1505 // fileInode.dereference() 1506 1507 fileInode.unlock(false) 1508 1509 writeOut = &fission.WriteOut{ 1510 Size: uint32(len(writeIn.Data)), 1511 Padding: 0, 1512 } 1513 1514 _ = atomic.AddUint64(&globals.metrics.FUSE_DoWrite_bytes, uint64(writeOut.Size)) 1515 globals.stats.FUSEDoWriteBytes.Add(uint64(writeOut.Size)) 1516 1517 errno = 0 1518 return 1519 } 1520 1521 func (dummy *globalsStruct) DoStatFS(inHeader *fission.InHeader) (statFSOut *fission.StatFSOut, errno syscall.Errno) { 1522 var ( 1523 err error 1524 statVFSRequest *jrpcfs.StatVFSRequest 1525 statVFSReply *jrpcfs.StatVFS 1526 ) 1527 1528 _ = atomic.AddUint64(&globals.metrics.FUSE_DoStatFS_calls, 1) 1529 1530 statVFSRequest = &jrpcfs.StatVFSRequest{ 1531 MountID: globals.mountID, 1532 } 1533 1534 statVFSReply = &jrpcfs.StatVFS{} 1535 1536 err = globals.retryRPCClient.Send("RpcStatVFS", statVFSRequest, statVFSReply) 1537 if nil != err { 1538 errno = convertErrToErrno(err, syscall.EIO) 1539 return 1540 } 1541 1542 statFSOut = &fission.StatFSOut{ 1543 KStatFS: fission.KStatFS{ 1544 Blocks: statVFSReply.TotalBlocks, 1545 BFree: statVFSReply.FreeBlocks, 1546 BAvail: statVFSReply.AvailBlocks, 1547 Files: statVFSReply.TotalInodes, 1548 FFree: statVFSReply.FreeInodes, 1549 BSize: uint32(statVFSReply.BlockSize), 1550 NameLen: uint32(statVFSReply.MaxFilenameLen), 1551 FRSize: uint32(statVFSReply.FragmentSize), 1552 Padding: 0, 1553 Spare: [6]uint32{0, 0, 0, 0, 0, 0}, 1554 }, 1555 } 1556 1557 errno = 0 1558 return 1559 } 1560 1561 func (dummy *globalsStruct) DoRelease(inHeader *fission.InHeader, releaseIn *fission.ReleaseIn) (errno syscall.Errno) { 1562 var ( 1563 fhInodeNumber uint64 1564 fhSet fhSetType 1565 ok bool 1566 ) 1567 1568 _ = atomic.AddUint64(&globals.metrics.FUSE_DoRelease_calls, 1) 1569 1570 globals.Lock() 1571 1572 fhInodeNumber, ok = globals.fhToInodeNumberMap[releaseIn.FH] 1573 if !ok { 1574 logFatalf("DoRelease(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, releaseIn.FH) 1575 } 1576 if fhInodeNumber != inHeader.NodeID { 1577 logFatalf("DoRelease(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, releaseIn.FH, fhInodeNumber) 1578 } 1579 1580 delete(globals.fhToInodeNumberMap, releaseIn.FH) 1581 1582 fhSet, ok = globals.inodeNumberToFHMap[inHeader.NodeID] 1583 if !ok { 1584 logFatalf("DoRelease(NodeID=%v,FH=%v) called for unknown NodeID", inHeader.NodeID, releaseIn.FH) 1585 } 1586 1587 _, ok = fhSet[releaseIn.FH] 1588 if !ok { 1589 logFatalf("DoRelease(NodeID=%v,FH=%v) called for FH missing from fhSet: %v", inHeader.NodeID, releaseIn.FH, fhSet) 1590 } 1591 1592 delete(fhSet, releaseIn.FH) 1593 1594 if 0 == len(fhSet) { 1595 delete(globals.inodeNumberToFHMap, inHeader.NodeID) 1596 } else { 1597 globals.inodeNumberToFHMap[inHeader.NodeID] = fhSet 1598 } 1599 1600 globals.Unlock() 1601 1602 errno = 0 1603 return 1604 } 1605 1606 func (dummy *globalsStruct) DoFSync(inHeader *fission.InHeader, fSyncIn *fission.FSyncIn) (errno syscall.Errno) { 1607 var ( 1608 fhInodeNumber uint64 1609 fileInode *fileInodeStruct 1610 ok bool 1611 ) 1612 1613 _ = atomic.AddUint64(&globals.metrics.FUSE_DoFSync_calls, 1) 1614 1615 globals.Lock() 1616 1617 fhInodeNumber, ok = globals.fhToInodeNumberMap[fSyncIn.FH] 1618 if !ok { 1619 logFatalf("DoFSync(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, fSyncIn.FH) 1620 } 1621 if fhInodeNumber != inHeader.NodeID { 1622 logFatalf("DoFSync(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, fSyncIn.FH, fhInodeNumber) 1623 } 1624 1625 globals.Unlock() 1626 1627 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(inHeader.NodeID)) 1628 // fileInode = referenceFileInode(inode.InodeNumber(inHeader.NodeID)) 1629 if nil == fileInode { 1630 logFatalf("DoFSync(NodeID=%v,FH=%v) called for non-FileInode", inHeader.NodeID, fSyncIn.FH) 1631 } 1632 1633 fileInode.doFlushIfNecessary() 1634 1635 // fileInode.dereference() 1636 fileInode.unlock(false) 1637 1638 errno = 0 1639 return 1640 } 1641 1642 func (dummy *globalsStruct) DoSetXAttr(inHeader *fission.InHeader, setXAttrIn *fission.SetXAttrIn) (errno syscall.Errno) { 1643 var ( 1644 err error 1645 setXAttrReply *jrpcfs.Reply 1646 setXAttrRequest *jrpcfs.SetXAttrRequest 1647 ) 1648 1649 _ = atomic.AddUint64(&globals.metrics.FUSE_DoSetXAttr_calls, 1) 1650 1651 if !globals.config.XAttrEnabled { 1652 errno = syscall.ENOSYS 1653 return 1654 } 1655 1656 setXAttrRequest = &jrpcfs.SetXAttrRequest{ 1657 InodeHandle: jrpcfs.InodeHandle{ 1658 MountID: globals.mountID, 1659 InodeNumber: int64(inHeader.NodeID), 1660 }, 1661 AttrName: string(setXAttrIn.Name[:]), 1662 AttrValue: setXAttrIn.Data[:], 1663 AttrFlags: fs.SetXAttrCreateOrReplace, 1664 } 1665 1666 setXAttrReply = &jrpcfs.Reply{} 1667 1668 err = globals.retryRPCClient.Send("RpcSetXAttr", setXAttrRequest, setXAttrReply) 1669 if nil != err { 1670 errno = convertErrToErrno(err, syscall.EIO) 1671 return 1672 } 1673 1674 _ = atomic.AddUint64(&globals.metrics.FUSE_DoSetXAttr_bytes, uint64(len(setXAttrIn.Data))) 1675 1676 errno = 0 1677 return 1678 } 1679 1680 func (dummy *globalsStruct) DoGetXAttr(inHeader *fission.InHeader, getXAttrIn *fission.GetXAttrIn) (getXAttrOut *fission.GetXAttrOut, errno syscall.Errno) { 1681 var ( 1682 err error 1683 getXAttrReply *jrpcfs.GetXAttrReply 1684 getXAttrRequest *jrpcfs.GetXAttrRequest 1685 ) 1686 1687 _ = atomic.AddUint64(&globals.metrics.FUSE_DoGetXAttr_calls, 1) 1688 1689 if !globals.config.XAttrEnabled { 1690 errno = syscall.ENOSYS 1691 return 1692 } 1693 1694 getXAttrRequest = &jrpcfs.GetXAttrRequest{ 1695 InodeHandle: jrpcfs.InodeHandle{ 1696 MountID: globals.mountID, 1697 InodeNumber: int64(inHeader.NodeID), 1698 }, 1699 AttrName: string(getXAttrIn.Name[:]), 1700 } 1701 1702 getXAttrReply = &jrpcfs.GetXAttrReply{} 1703 1704 err = globals.retryRPCClient.Send("RpcGetXAttr", getXAttrRequest, getXAttrReply) 1705 if nil != err { 1706 errno = convertErrToErrno(err, syscall.EIO) 1707 return 1708 } 1709 1710 if 0 == getXAttrIn.Size { 1711 getXAttrOut = &fission.GetXAttrOut{ 1712 Size: uint32(len(getXAttrReply.AttrValue)), 1713 Padding: 0, 1714 Data: make([]byte, 0), 1715 } 1716 errno = 0 1717 return 1718 } 1719 1720 if uint32(len(getXAttrReply.AttrValue)) > getXAttrIn.Size { 1721 errno = syscall.ERANGE 1722 return 1723 } 1724 1725 getXAttrOut = &fission.GetXAttrOut{ 1726 Size: uint32(len(getXAttrReply.AttrValue)), 1727 Padding: 0, 1728 Data: getXAttrReply.AttrValue, 1729 } 1730 1731 _ = atomic.AddUint64(&globals.metrics.FUSE_DoGetXAttr_bytes, uint64(getXAttrOut.Size)) 1732 1733 errno = 0 1734 return 1735 } 1736 1737 func (dummy *globalsStruct) DoListXAttr(inHeader *fission.InHeader, listXAttrIn *fission.ListXAttrIn) (listXAttrOut *fission.ListXAttrOut, errno syscall.Errno) { 1738 var ( 1739 err error 1740 listXAttrReply *jrpcfs.ListXAttrReply 1741 listXAttrRequest *jrpcfs.ListXAttrRequest 1742 totalSize uint32 1743 xAttrIndex int 1744 xAttrName string 1745 ) 1746 1747 _ = atomic.AddUint64(&globals.metrics.FUSE_DoListXAttr_calls, 1) 1748 1749 if !globals.config.XAttrEnabled { 1750 errno = syscall.ENOSYS 1751 return 1752 } 1753 1754 listXAttrRequest = &jrpcfs.ListXAttrRequest{ 1755 InodeHandle: jrpcfs.InodeHandle{ 1756 MountID: globals.mountID, 1757 InodeNumber: int64(inHeader.NodeID), 1758 }, 1759 } 1760 1761 listXAttrReply = &jrpcfs.ListXAttrReply{} 1762 1763 err = globals.retryRPCClient.Send("RpcListXAttr", listXAttrRequest, listXAttrReply) 1764 if nil != err { 1765 errno = convertErrToErrno(err, syscall.EIO) 1766 return 1767 } 1768 1769 totalSize = 0 1770 1771 for _, xAttrName = range listXAttrReply.AttrNames { 1772 totalSize += uint32(len(xAttrName) + 1) 1773 } 1774 1775 if 0 == listXAttrIn.Size { 1776 listXAttrOut = &fission.ListXAttrOut{ 1777 Size: totalSize, 1778 Padding: 0, 1779 Name: make([][]byte, 0), 1780 } 1781 errno = 0 1782 return 1783 } 1784 1785 listXAttrOut = &fission.ListXAttrOut{ 1786 Size: totalSize, // unnecessary 1787 Padding: 0, 1788 Name: make([][]byte, len(listXAttrReply.AttrNames)), 1789 } 1790 1791 for xAttrIndex, xAttrName = range listXAttrReply.AttrNames { 1792 listXAttrOut.Name[xAttrIndex] = []byte(xAttrName) 1793 } 1794 1795 _ = atomic.AddUint64(&globals.metrics.FUSE_DoListXAttr_names, uint64(len(listXAttrOut.Name))) 1796 1797 errno = 0 1798 return 1799 } 1800 1801 func (dummy *globalsStruct) DoRemoveXAttr(inHeader *fission.InHeader, removeXAttrIn *fission.RemoveXAttrIn) (errno syscall.Errno) { 1802 var ( 1803 err error 1804 removeXAttrReply *jrpcfs.Reply 1805 removeXAttrRequest *jrpcfs.RemoveXAttrRequest 1806 ) 1807 1808 _ = atomic.AddUint64(&globals.metrics.FUSE_DoRemoveXAttr_calls, 1) 1809 1810 if !globals.config.XAttrEnabled { 1811 errno = syscall.ENOSYS 1812 return 1813 } 1814 1815 removeXAttrRequest = &jrpcfs.RemoveXAttrRequest{ 1816 InodeHandle: jrpcfs.InodeHandle{ 1817 MountID: globals.mountID, 1818 InodeNumber: int64(inHeader.NodeID), 1819 }, 1820 AttrName: string(removeXAttrIn.Name[:]), 1821 } 1822 1823 removeXAttrReply = &jrpcfs.Reply{} 1824 1825 err = globals.retryRPCClient.Send("RpcRemoveXAttr", removeXAttrRequest, removeXAttrReply) 1826 if nil != err { 1827 errno = convertErrToErrno(err, syscall.EIO) 1828 return 1829 } 1830 1831 errno = 0 1832 return 1833 } 1834 1835 func (dummy *globalsStruct) DoFlush(inHeader *fission.InHeader, flushIn *fission.FlushIn) (errno syscall.Errno) { 1836 var ( 1837 fhInodeNumber uint64 1838 ok bool 1839 ) 1840 1841 _ = atomic.AddUint64(&globals.metrics.FUSE_DoFlush_calls, 1) 1842 1843 globals.Lock() 1844 1845 fhInodeNumber, ok = globals.fhToInodeNumberMap[flushIn.FH] 1846 if !ok { 1847 logFatalf("DoFlush(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, flushIn.FH) 1848 } 1849 if fhInodeNumber != inHeader.NodeID { 1850 logFatalf("DoFlush(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, flushIn.FH, fhInodeNumber) 1851 } 1852 1853 globals.Unlock() 1854 1855 errno = 0 1856 return 1857 } 1858 1859 func (dummy *globalsStruct) DoInit(inHeader *fission.InHeader, initIn *fission.InitIn) (initOut *fission.InitOut, errno syscall.Errno) { 1860 var ( 1861 initOutFlags uint32 1862 ) 1863 1864 _ = atomic.AddUint64(&globals.metrics.FUSE_DoInit_calls, 1) 1865 1866 if globals.config.ReadDirPlusEnabled { 1867 initOutFlags = initOutFlagsMaskReadDirPlusEnabled 1868 } else { 1869 initOutFlags = initOutFlagsMaskReadDirPlusDisabled 1870 } 1871 1872 initOut = &fission.InitOut{ 1873 Major: initIn.Major, 1874 Minor: initIn.Minor, 1875 MaxReadAhead: initIn.MaxReadAhead, 1876 Flags: initOutFlags, 1877 MaxBackground: globals.config.FUSEMaxBackground, 1878 CongestionThreshhold: globals.config.FUSECongestionThreshhold, 1879 MaxWrite: globals.config.FUSEMaxWrite, 1880 } 1881 1882 errno = 0 1883 return 1884 } 1885 1886 func (dummy *globalsStruct) DoOpenDir(inHeader *fission.InHeader, openDirIn *fission.OpenDirIn) (openDirOut *fission.OpenDirOut, errno syscall.Errno) { 1887 var ( 1888 err error 1889 fhSet fhSetType 1890 getStatReply *jrpcfs.StatStruct 1891 getStatRequest *jrpcfs.GetStatRequest 1892 ok bool 1893 ) 1894 1895 _ = atomic.AddUint64(&globals.metrics.FUSE_DoOpenDir_calls, 1) 1896 1897 getStatRequest = &jrpcfs.GetStatRequest{ 1898 InodeHandle: jrpcfs.InodeHandle{ 1899 MountID: globals.mountID, 1900 InodeNumber: int64(inHeader.NodeID), 1901 }, 1902 } 1903 1904 getStatReply = &jrpcfs.StatStruct{} 1905 1906 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 1907 if nil != err { 1908 errno = convertErrToErrno(err, syscall.EIO) 1909 return 1910 } 1911 1912 if syscall.S_IFDIR != (getStatReply.FileMode & syscall.S_IFMT) { 1913 errno = syscall.ENOTDIR 1914 return 1915 } 1916 1917 globals.Lock() 1918 1919 globals.lastFH++ 1920 1921 globals.fhToInodeNumberMap[globals.lastFH] = inHeader.NodeID 1922 1923 fhSet, ok = globals.inodeNumberToFHMap[inHeader.NodeID] 1924 if !ok { 1925 fhSet = make(fhSetType) 1926 } 1927 fhSet[globals.lastFH] = struct{}{} 1928 globals.inodeNumberToFHMap[inHeader.NodeID] = fhSet 1929 1930 openDirOut = &fission.OpenDirOut{ 1931 FH: globals.lastFH, 1932 OpenFlags: 0, 1933 Padding: 0, 1934 } 1935 1936 globals.Unlock() 1937 1938 errno = 0 1939 return 1940 } 1941 1942 func (dummy *globalsStruct) DoReadDir(inHeader *fission.InHeader, readDirIn *fission.ReadDirIn) (readDirOut *fission.ReadDirOut, errno syscall.Errno) { 1943 var ( 1944 curSize uint32 1945 dirEntIndex uint64 1946 dirEntNameLenAligned uint32 1947 dirEnt fission.DirEnt 1948 dirEntSize uint32 1949 dirEntry *jrpcfs.DirEntry 1950 err error 1951 fhInodeNumber uint64 1952 maxEntries uint64 1953 numEntries uint64 1954 ok bool 1955 readdirByLocRequest *jrpcfs.ReaddirByLocRequest 1956 readdirReply *jrpcfs.ReaddirReply 1957 ) 1958 1959 _ = atomic.AddUint64(&globals.metrics.FUSE_DoReadDir_calls, 1) 1960 1961 globals.Lock() 1962 1963 fhInodeNumber, ok = globals.fhToInodeNumberMap[readDirIn.FH] 1964 if !ok { 1965 logFatalf("DoReadDir(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, readDirIn.FH) 1966 } 1967 if fhInodeNumber != inHeader.NodeID { 1968 logFatalf("DoReadDir(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, readDirIn.FH, fhInodeNumber) 1969 } 1970 1971 globals.Unlock() 1972 1973 maxEntries = (uint64(readDirIn.Size) + fission.DirEntFixedPortionSize + 1 - 1) / (fission.DirEntFixedPortionSize + 1) 1974 1975 readdirByLocRequest = &jrpcfs.ReaddirByLocRequest{ 1976 InodeHandle: jrpcfs.InodeHandle{ 1977 MountID: globals.mountID, 1978 InodeNumber: int64(inHeader.NodeID), 1979 }, 1980 MaxEntries: maxEntries, 1981 PrevDirEntLocation: int64(readDirIn.Offset) - 1, 1982 } 1983 1984 readdirReply = &jrpcfs.ReaddirReply{} 1985 1986 err = globals.retryRPCClient.Send("RpcReaddirByLoc", readdirByLocRequest, readdirReply) 1987 if nil != err { 1988 errno = convertErrToErrno(err, syscall.EIO) 1989 return 1990 } 1991 1992 numEntries = uint64(len(readdirReply.DirEnts)) 1993 1994 readDirOut = &fission.ReadDirOut{ 1995 DirEnt: make([]fission.DirEnt, 0, numEntries), 1996 } 1997 1998 curSize = 0 1999 2000 for dirEntIndex = 0; dirEntIndex < numEntries; dirEntIndex++ { 2001 dirEntry = &readdirReply.DirEnts[dirEntIndex] 2002 2003 dirEntNameLenAligned = (uint32(len(dirEntry.Basename)) + (fission.DirEntAlignment - 1)) & ^uint32(fission.DirEntAlignment-1) 2004 dirEntSize = fission.DirEntFixedPortionSize + dirEntNameLenAligned 2005 2006 if (curSize + dirEntSize) > readDirIn.Size { 2007 break 2008 } 2009 2010 dirEnt = fission.DirEnt{ 2011 Ino: uint64(dirEntry.InodeNumber), 2012 Off: uint64(dirEntry.NextDirLocation), 2013 NameLen: uint32(len(dirEntry.Basename)), // unnecessary 2014 Type: uint32(dirEntry.FileType), 2015 Name: []byte(dirEntry.Basename), 2016 } 2017 2018 readDirOut.DirEnt = append(readDirOut.DirEnt, dirEnt) 2019 2020 curSize += dirEntSize 2021 } 2022 2023 _ = atomic.AddUint64(&globals.metrics.FUSE_DoReadDir_entries, uint64(len(readDirOut.DirEnt))) 2024 2025 errno = 0 2026 return 2027 } 2028 2029 func (dummy *globalsStruct) DoReleaseDir(inHeader *fission.InHeader, releaseDirIn *fission.ReleaseDirIn) (errno syscall.Errno) { 2030 var ( 2031 fhInodeNumber uint64 2032 fhSet fhSetType 2033 ok bool 2034 ) 2035 2036 _ = atomic.AddUint64(&globals.metrics.FUSE_DoReleaseDir_calls, 1) 2037 2038 globals.Lock() 2039 2040 fhInodeNumber, ok = globals.fhToInodeNumberMap[releaseDirIn.FH] 2041 if !ok { 2042 logFatalf("DoReleaseDir(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, releaseDirIn.FH) 2043 } 2044 if fhInodeNumber != inHeader.NodeID { 2045 logFatalf("DoReleaseDir(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, releaseDirIn.FH, fhInodeNumber) 2046 } 2047 2048 delete(globals.fhToInodeNumberMap, releaseDirIn.FH) 2049 2050 fhSet, ok = globals.inodeNumberToFHMap[inHeader.NodeID] 2051 if !ok { 2052 logFatalf("DoReleaseDir(NodeID=%v,FH=%v) called for unknown NodeID", inHeader.NodeID, releaseDirIn.FH) 2053 } 2054 2055 _, ok = fhSet[releaseDirIn.FH] 2056 if !ok { 2057 logFatalf("DoReleaseDir(NodeID=%v,FH=%v) called for FH missing from fhSet: %v", inHeader.NodeID, releaseDirIn.FH, fhSet) 2058 } 2059 2060 delete(fhSet, releaseDirIn.FH) 2061 2062 if 0 == len(fhSet) { 2063 delete(globals.inodeNumberToFHMap, inHeader.NodeID) 2064 } else { 2065 globals.inodeNumberToFHMap[inHeader.NodeID] = fhSet 2066 } 2067 2068 globals.Unlock() 2069 2070 errno = 0 2071 return 2072 } 2073 2074 func (dummy *globalsStruct) DoFSyncDir(inHeader *fission.InHeader, fSyncDirIn *fission.FSyncDirIn) (errno syscall.Errno) { 2075 var ( 2076 fhInodeNumber uint64 2077 ok bool 2078 ) 2079 2080 _ = atomic.AddUint64(&globals.metrics.FUSE_DoFSyncDir_calls, 1) 2081 2082 globals.Lock() 2083 2084 fhInodeNumber, ok = globals.fhToInodeNumberMap[fSyncDirIn.FH] 2085 if !ok { 2086 logFatalf("DoFSync(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, fSyncDirIn.FH) 2087 } 2088 if fhInodeNumber != inHeader.NodeID { 2089 logFatalf("DoFSync(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, fSyncDirIn.FH, fhInodeNumber) 2090 } 2091 2092 globals.Unlock() 2093 2094 errno = 0 2095 return 2096 } 2097 2098 func (dummy *globalsStruct) DoGetLK(inHeader *fission.InHeader, getLKIn *fission.GetLKIn) (getLKOut *fission.GetLKOut, errno syscall.Errno) { 2099 _ = atomic.AddUint64(&globals.metrics.FUSE_DoGetLK_calls, 1) 2100 errno = syscall.ENOSYS 2101 return 2102 } 2103 func (dummy *globalsStruct) DoSetLK(inHeader *fission.InHeader, setLKIn *fission.SetLKIn) (errno syscall.Errno) { 2104 _ = atomic.AddUint64(&globals.metrics.FUSE_DoSetLK_calls, 1) 2105 errno = syscall.ENOSYS 2106 return 2107 } 2108 func (dummy *globalsStruct) DoSetLKW(inHeader *fission.InHeader, setLKWIn *fission.SetLKWIn) (errno syscall.Errno) { 2109 _ = atomic.AddUint64(&globals.metrics.FUSE_DoSetLKW_calls, 1) 2110 errno = syscall.ENOSYS 2111 return 2112 } 2113 2114 func (dummy *globalsStruct) DoAccess(inHeader *fission.InHeader, accessIn *fission.AccessIn) (errno syscall.Errno) { 2115 var ( 2116 err error 2117 accessReply *jrpcfs.Reply 2118 accessRequest *jrpcfs.AccessRequest 2119 ) 2120 2121 _ = atomic.AddUint64(&globals.metrics.FUSE_DoAccess_calls, 1) 2122 2123 accessRequest = &jrpcfs.AccessRequest{ 2124 InodeHandle: jrpcfs.InodeHandle{ 2125 MountID: globals.mountID, 2126 InodeNumber: int64(inHeader.NodeID), 2127 }, 2128 UserID: int32(inHeader.UID), 2129 GroupID: int32(inHeader.GID), 2130 AccessMode: accessIn.Mask, 2131 } 2132 2133 accessReply = &jrpcfs.Reply{} 2134 2135 err = globals.retryRPCClient.Send("RpcAccess", accessRequest, accessReply) 2136 if nil != err { 2137 errno = convertErrToErrno(err, syscall.EIO) 2138 return 2139 } 2140 2141 errno = 0 2142 return 2143 } 2144 2145 func (dummy *globalsStruct) DoCreate(inHeader *fission.InHeader, createIn *fission.CreateIn) (createOut *fission.CreateOut, errno syscall.Errno) { 2146 var ( 2147 aTimeNSec uint32 2148 aTimeSec uint64 2149 cTimeNSec uint32 2150 cTimeSec uint64 2151 createReply *jrpcfs.InodeReply 2152 createRequest *jrpcfs.CreateRequest 2153 err error 2154 fhSet fhSetType 2155 getStatReply *jrpcfs.StatStruct 2156 getStatRequest *jrpcfs.GetStatRequest 2157 mTimeNSec uint32 2158 mTimeSec uint64 2159 ok bool 2160 ) 2161 2162 _ = atomic.AddUint64(&globals.metrics.FUSE_DoCreate_calls, 1) 2163 2164 createRequest = &jrpcfs.CreateRequest{ 2165 InodeHandle: jrpcfs.InodeHandle{ 2166 MountID: globals.mountID, 2167 InodeNumber: int64(inHeader.NodeID), 2168 }, 2169 Basename: string(createIn.Name[:]), 2170 UserID: int32(inHeader.UID), 2171 GroupID: int32(inHeader.GID), 2172 FileMode: createIn.Mode & uint32(os.ModePerm), 2173 } 2174 2175 createReply = &jrpcfs.InodeReply{} 2176 2177 err = globals.retryRPCClient.Send("RpcCreate", createRequest, createReply) 2178 if nil != err { 2179 errno = convertErrToErrno(err, syscall.EIO) 2180 return 2181 } 2182 2183 getStatRequest = &jrpcfs.GetStatRequest{ 2184 InodeHandle: jrpcfs.InodeHandle{ 2185 MountID: globals.mountID, 2186 InodeNumber: createReply.InodeNumber, 2187 }, 2188 } 2189 2190 getStatReply = &jrpcfs.StatStruct{} 2191 2192 err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply) 2193 if nil != err { 2194 errno = convertErrToErrno(err, syscall.EIO) 2195 return 2196 } 2197 2198 aTimeSec, aTimeNSec = nsToUnixTime(getStatReply.ATimeNs) 2199 mTimeSec, mTimeNSec = nsToUnixTime(getStatReply.MTimeNs) 2200 cTimeSec, cTimeNSec = nsToUnixTime(getStatReply.CTimeNs) 2201 2202 globals.Lock() 2203 2204 globals.lastFH++ 2205 2206 globals.fhToInodeNumberMap[globals.lastFH] = uint64(createReply.InodeNumber) 2207 2208 fhSet, ok = globals.inodeNumberToFHMap[uint64(createReply.InodeNumber)] 2209 if !ok { 2210 fhSet = make(fhSetType) 2211 } 2212 fhSet[globals.lastFH] = struct{}{} 2213 globals.inodeNumberToFHMap[uint64(createReply.InodeNumber)] = fhSet 2214 2215 createOut = &fission.CreateOut{ 2216 EntryOut: fission.EntryOut{ 2217 NodeID: uint64(createReply.InodeNumber), 2218 Generation: 0, 2219 EntryValidSec: globals.entryValidSec, 2220 AttrValidSec: globals.attrValidSec, 2221 EntryValidNSec: globals.entryValidNSec, 2222 AttrValidNSec: globals.attrValidNSec, 2223 Attr: fission.Attr{ 2224 Ino: uint64(createReply.InodeNumber), 2225 Size: getStatReply.Size, 2226 Blocks: 0, // fixAttrSizes() will compute this 2227 ATimeSec: aTimeSec, 2228 MTimeSec: mTimeSec, 2229 CTimeSec: cTimeSec, 2230 ATimeNSec: aTimeNSec, 2231 MTimeNSec: mTimeNSec, 2232 CTimeNSec: cTimeNSec, 2233 Mode: getStatReply.FileMode, 2234 NLink: uint32(getStatReply.NumLinks), 2235 UID: getStatReply.UserID, 2236 GID: getStatReply.GroupID, 2237 RDev: 0, 2238 BlkSize: 0, // fixAttrSizes() will set this 2239 Padding: 0, 2240 }, 2241 }, 2242 FH: globals.lastFH, 2243 OpenFlags: fission.FOpenResponseDirectIO, 2244 Padding: 0, 2245 } 2246 2247 globals.Unlock() 2248 2249 fixAttrSizes(&createOut.Attr) 2250 2251 errno = 0 2252 return 2253 } 2254 2255 func (dummy *globalsStruct) DoInterrupt(inHeader *fission.InHeader, interruptIn *fission.InterruptIn) { 2256 _ = atomic.AddUint64(&globals.metrics.FUSE_DoInterrupt_calls, 1) 2257 return 2258 } 2259 2260 func (dummy *globalsStruct) DoBMap(inHeader *fission.InHeader, bMapIn *fission.BMapIn) (bMapOut *fission.BMapOut, errno syscall.Errno) { 2261 _ = atomic.AddUint64(&globals.metrics.FUSE_DoBMap_calls, 1) 2262 errno = syscall.ENOSYS 2263 return 2264 } 2265 2266 func (dummy *globalsStruct) DoDestroy(inHeader *fission.InHeader) (errno syscall.Errno) { 2267 _ = atomic.AddUint64(&globals.metrics.FUSE_DoDestroy_calls, 1) 2268 errno = syscall.ENOSYS 2269 return 2270 } 2271 2272 func (dummy *globalsStruct) DoPoll(inHeader *fission.InHeader, pollIn *fission.PollIn) (pollOut *fission.PollOut, errno syscall.Errno) { 2273 _ = atomic.AddUint64(&globals.metrics.FUSE_DoPoll_calls, 1) 2274 errno = syscall.ENOSYS 2275 return 2276 } 2277 2278 func (dummy *globalsStruct) DoBatchForget(inHeader *fission.InHeader, batchForgetIn *fission.BatchForgetIn) { 2279 _ = atomic.AddUint64(&globals.metrics.FUSE_DoBatchForget_calls, 1) 2280 _ = atomic.AddUint64(&globals.metrics.FUSE_DoBatchForget_nodes, uint64(len(batchForgetIn.Forget))) 2281 return 2282 } 2283 2284 func (dummy *globalsStruct) DoFAllocate(inHeader *fission.InHeader, fAllocateIn *fission.FAllocateIn) (errno syscall.Errno) { 2285 _ = atomic.AddUint64(&globals.metrics.FUSE_DoFAllocate_calls, 1) 2286 errno = syscall.ENOSYS 2287 return 2288 } 2289 2290 func (dummy *globalsStruct) DoReadDirPlus(inHeader *fission.InHeader, readDirPlusIn *fission.ReadDirPlusIn) (readDirPlusOut *fission.ReadDirPlusOut, errno syscall.Errno) { 2291 var ( 2292 aTimeNSec uint32 2293 aTimeSec uint64 2294 cTimeNSec uint32 2295 cTimeSec uint64 2296 curSize uint32 2297 dirEntIndex uint64 2298 dirEntNameLenAligned uint32 2299 dirEntPlus fission.DirEntPlus 2300 dirEntPlusSize uint32 2301 dirEntry *jrpcfs.DirEntry 2302 err error 2303 fhInodeNumber uint64 2304 mTimeNSec uint32 2305 mTimeSec uint64 2306 maxEntries uint64 2307 numEntries uint64 2308 ok bool 2309 readdirPlusByLocRequest *jrpcfs.ReaddirPlusByLocRequest 2310 readdirPlusReply *jrpcfs.ReaddirPlusReply 2311 statStruct *jrpcfs.StatStruct 2312 ) 2313 2314 _ = atomic.AddUint64(&globals.metrics.FUSE_DoReadDirPlus_calls, 1) 2315 2316 globals.Lock() 2317 2318 fhInodeNumber, ok = globals.fhToInodeNumberMap[readDirPlusIn.FH] 2319 if !ok { 2320 logFatalf("DoReadDirPlus(NodeID=%v,FH=%v) called for unknown FH", inHeader.NodeID, readDirPlusIn.FH) 2321 } 2322 if fhInodeNumber != inHeader.NodeID { 2323 logFatalf("DoReadDirPlus(NodeID=%v,FH=%v) called for FH associated with NodeID=%v", inHeader.NodeID, readDirPlusIn.FH, fhInodeNumber) 2324 } 2325 2326 globals.Unlock() 2327 2328 maxEntries = (uint64(readDirPlusIn.Size) + fission.DirEntPlusFixedPortionSize + 1 - 1) / (fission.DirEntPlusFixedPortionSize + 1) 2329 2330 readdirPlusByLocRequest = &jrpcfs.ReaddirPlusByLocRequest{ 2331 InodeHandle: jrpcfs.InodeHandle{ 2332 MountID: globals.mountID, 2333 InodeNumber: int64(inHeader.NodeID), 2334 }, 2335 MaxEntries: maxEntries, 2336 PrevDirEntLocation: int64(readDirPlusIn.Offset) - 1, 2337 } 2338 2339 readdirPlusReply = &jrpcfs.ReaddirPlusReply{} 2340 2341 err = globals.retryRPCClient.Send("RpcReaddirPlusByLoc", readdirPlusByLocRequest, readdirPlusReply) 2342 if nil != err { 2343 errno = convertErrToErrno(err, syscall.EIO) 2344 return 2345 } 2346 2347 numEntries = uint64(len(readdirPlusReply.DirEnts)) 2348 if numEntries != uint64(len(readdirPlusReply.StatEnts)) { 2349 logFatalf("DoReadDirPlus(NodeID=%v,FH=%v) fetched mismatched number of DirEnts (%v) & StatEnts (%v)", inHeader.NodeID, readDirPlusIn.FH, len(readdirPlusReply.DirEnts), len(readdirPlusReply.StatEnts)) 2350 } 2351 2352 readDirPlusOut = &fission.ReadDirPlusOut{ 2353 DirEntPlus: make([]fission.DirEntPlus, 0, numEntries), 2354 } 2355 2356 curSize = 0 2357 2358 for dirEntIndex = 0; dirEntIndex < numEntries; dirEntIndex++ { 2359 dirEntry = &readdirPlusReply.DirEnts[dirEntIndex] 2360 statStruct = &readdirPlusReply.StatEnts[dirEntIndex] 2361 2362 dirEntNameLenAligned = (uint32(len(dirEntry.Basename)) + (fission.DirEntAlignment - 1)) & ^uint32(fission.DirEntAlignment-1) 2363 dirEntPlusSize = fission.DirEntPlusFixedPortionSize + dirEntNameLenAligned 2364 2365 if (curSize + dirEntPlusSize) > readDirPlusIn.Size { 2366 break 2367 } 2368 2369 aTimeSec, aTimeNSec = nsToUnixTime(statStruct.ATimeNs) 2370 mTimeSec, mTimeNSec = nsToUnixTime(statStruct.MTimeNs) 2371 cTimeSec, cTimeNSec = nsToUnixTime(statStruct.CTimeNs) 2372 2373 dirEntPlus = fission.DirEntPlus{ 2374 EntryOut: fission.EntryOut{ 2375 NodeID: uint64(dirEntry.InodeNumber), 2376 Generation: 0, 2377 EntryValidSec: globals.entryValidSec, 2378 AttrValidSec: globals.attrValidSec, 2379 EntryValidNSec: globals.entryValidNSec, 2380 AttrValidNSec: globals.attrValidNSec, 2381 Attr: fission.Attr{ 2382 Ino: uint64(dirEntry.InodeNumber), 2383 Size: statStruct.Size, 2384 Blocks: 0, // fixAttrSizes() will compute this 2385 ATimeSec: aTimeSec, 2386 MTimeSec: mTimeSec, 2387 CTimeSec: cTimeSec, 2388 ATimeNSec: aTimeNSec, 2389 MTimeNSec: mTimeNSec, 2390 CTimeNSec: cTimeNSec, 2391 Mode: statStruct.FileMode, 2392 NLink: uint32(statStruct.NumLinks), 2393 UID: statStruct.UserID, 2394 GID: statStruct.GroupID, 2395 RDev: 0, 2396 BlkSize: 0, // fixAttrSizes() will set this 2397 Padding: 0, 2398 }, 2399 }, 2400 DirEnt: fission.DirEnt{ 2401 Ino: uint64(dirEntry.InodeNumber), 2402 Off: uint64(dirEntry.NextDirLocation), 2403 NameLen: uint32(len(dirEntry.Basename)), // unnecessary 2404 Type: uint32(dirEntry.FileType), 2405 Name: []byte(dirEntry.Basename), 2406 }, 2407 } 2408 2409 fixAttrSizes(&dirEntPlus.EntryOut.Attr) 2410 2411 readDirPlusOut.DirEntPlus = append(readDirPlusOut.DirEntPlus, dirEntPlus) 2412 2413 curSize += dirEntPlusSize 2414 } 2415 2416 _ = atomic.AddUint64(&globals.metrics.FUSE_DoReadDirPlus_entries, uint64(len(readDirPlusOut.DirEntPlus))) 2417 2418 errno = 0 2419 return 2420 } 2421 2422 func (dummy *globalsStruct) DoRename2(inHeader *fission.InHeader, rename2In *fission.Rename2In) (errno syscall.Errno) { 2423 var ( 2424 destroyReply *jrpcfs.Reply 2425 destroyRequest *jrpcfs.DestroyRequest 2426 err error 2427 fileInode *fileInodeStruct 2428 lookupReply *jrpcfs.InodeReply 2429 lookupRequest *jrpcfs.LookupRequest 2430 moveReply *jrpcfs.MoveReply 2431 moveRequest *jrpcfs.MoveRequest 2432 ) 2433 2434 _ = atomic.AddUint64(&globals.metrics.FUSE_DoRename2_calls, 1) 2435 2436 lookupRequest = &jrpcfs.LookupRequest{ 2437 InodeHandle: jrpcfs.InodeHandle{ 2438 MountID: globals.mountID, 2439 InodeNumber: int64(rename2In.NewDir), 2440 }, 2441 Basename: string(rename2In.NewName[:]), 2442 } 2443 2444 lookupReply = &jrpcfs.InodeReply{} 2445 2446 err = globals.retryRPCClient.Send("RpcLookup", lookupRequest, lookupReply) 2447 if nil == err { 2448 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(lookupReply.InodeNumber)) 2449 2450 // Make sure potentially file inode didn't move before we were able to ExclusiveLease it 2451 2452 lookupReply = &jrpcfs.InodeReply{} 2453 2454 err = globals.retryRPCClient.Send("RpcLookup", lookupRequest, lookupReply) 2455 if (nil != err) || (fileInode.InodeNumber != inode.InodeNumber(lookupReply.InodeNumber)) { 2456 fileInode.unlock(true) 2457 fileInode = nil 2458 } else { 2459 fileInode.doFlushIfNecessary() 2460 } 2461 } else { 2462 fileInode = nil 2463 } 2464 2465 moveRequest = &jrpcfs.MoveRequest{ 2466 MountID: globals.mountID, 2467 SrcDirInodeNumber: int64(inHeader.NodeID), 2468 SrcBasename: string(rename2In.OldName[:]), 2469 DstDirInodeNumber: int64(rename2In.NewDir), 2470 DstBasename: string(rename2In.NewName[:]), 2471 } 2472 2473 moveReply = &jrpcfs.MoveReply{} 2474 2475 err = globals.retryRPCClient.Send("RpcMove", moveRequest, moveReply) 2476 if nil == err { 2477 errno = 0 2478 } else { 2479 errno = convertErrToErrno(err, syscall.EIO) 2480 } 2481 2482 if nil != fileInode { 2483 fileInode.unlock(false) 2484 } 2485 2486 if 0 != moveReply.ToDestroyInodeNumber { 2487 fileInode = lockInodeWithExclusiveLease(inode.InodeNumber(moveReply.ToDestroyInodeNumber)) 2488 2489 fileInode.doFlushIfNecessary() 2490 2491 destroyRequest = &jrpcfs.DestroyRequest{ 2492 InodeHandle: jrpcfs.InodeHandle{ 2493 MountID: globals.mountID, 2494 InodeNumber: moveReply.ToDestroyInodeNumber, 2495 }, 2496 } 2497 2498 destroyReply = &jrpcfs.Reply{} 2499 2500 err = globals.retryRPCClient.Send("RpcDestroy", destroyRequest, destroyReply) 2501 if nil != err { 2502 logWarnf("RpcDestroy(InodeHandle: %#v) failed: err", err) 2503 } 2504 2505 fileInode.unlock(false) 2506 } 2507 2508 return 2509 } 2510 2511 func (dummy *globalsStruct) DoLSeek(inHeader *fission.InHeader, lSeekIn *fission.LSeekIn) (lSeekOut *fission.LSeekOut, errno syscall.Errno) { 2512 _ = atomic.AddUint64(&globals.metrics.FUSE_DoLSeek_calls, 1) 2513 errno = syscall.ENOSYS 2514 return 2515 }