github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/pkg/sentry/fsimpl/gofer/lisafs_dentry.go (about) 1 // Copyright 2022 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package gofer 16 17 import ( 18 "fmt" 19 "strings" 20 21 "github.com/metacubex/gvisor/pkg/abi/linux" 22 "github.com/metacubex/gvisor/pkg/atomicbitops" 23 "github.com/metacubex/gvisor/pkg/context" 24 "github.com/metacubex/gvisor/pkg/errors/linuxerr" 25 "github.com/metacubex/gvisor/pkg/hostarch" 26 "github.com/metacubex/gvisor/pkg/lisafs" 27 "github.com/metacubex/gvisor/pkg/log" 28 "github.com/metacubex/gvisor/pkg/sentry/kernel/auth" 29 "github.com/metacubex/gvisor/pkg/sentry/socket/unix/transport" 30 "github.com/metacubex/gvisor/pkg/sentry/vfs" 31 ) 32 33 func (fs *filesystem) handleAnameLisafs(ctx context.Context, rootInode lisafs.Inode) (lisafs.Inode, error) { 34 if fs.opts.aname == "/" { 35 return rootInode, nil 36 } 37 38 // Walk to the attach point from root inode. aname is always absolute. 39 rootFD := fs.client.NewFD(rootInode.ControlFD) 40 status, inodes, err := rootFD.WalkMultiple(ctx, strings.Split(fs.opts.aname, "/")[1:]) 41 if err != nil { 42 return lisafs.Inode{}, err 43 } 44 45 // Close all intermediate FDs to the attach point. 46 rootFD.Close(ctx, false /* flush */) 47 numInodes := len(inodes) 48 for i := 0; i < numInodes-1; i++ { 49 curFD := fs.client.NewFD(inodes[i].ControlFD) 50 curFD.Close(ctx, false /* flush */) 51 } 52 53 switch status { 54 case lisafs.WalkSuccess: 55 return inodes[numInodes-1], nil 56 default: 57 if numInodes > 0 { 58 last := fs.client.NewFD(inodes[numInodes-1].ControlFD) 59 last.Close(ctx, false /* flush */) 60 } 61 log.Warningf("initClient failed because walk to attach point %q failed: lisafs.WalkStatus = %v", fs.opts.aname, status) 62 return lisafs.Inode{}, linuxerr.ENOENT 63 } 64 } 65 66 // lisafsDentry is a gofer dentry implementation. It represents a dentry backed 67 // by a lisafs connection. 68 // 69 // +stateify savable 70 type lisafsDentry struct { 71 dentry 72 73 // controlFD is used by lisafs to perform path based operations on this 74 // dentry. controlFD is immutable. 75 // 76 // if !controlFD.Ok(), this dentry represents a synthetic file, i.e. a 77 // file that does not exist on the remote filesystem. As of this writing, the 78 // only files that can be synthetic are sockets, pipes, and directories. 79 controlFD lisafs.ClientFD `state:"nosave"` 80 81 // If this dentry represents a regular file or directory, readFDLisa is a 82 // LISAFS FD used for reads by all regularFileFDs/directoryFDs representing 83 // this dentry. readFDLisa is protected by dentry.handleMu. 84 readFDLisa lisafs.ClientFD `state:"nosave"` 85 86 // If this dentry represents a regular file, writeFDLisa is the LISAFS FD 87 // used for writes by all regularFileFDs representing this dentry. 88 // readFDLisa and writeFDLisa may or may not represent the same LISAFS FD. 89 // Once either transitions from closed (Ok() == false) to open 90 // (Ok() == true), it may be mutated with dentry.handleMu locked, but cannot 91 // be closed until the dentry is destroyed. writeFDLisa is protected by 92 // dentry.handleMu. 93 writeFDLisa lisafs.ClientFD `state:"nosave"` 94 } 95 96 // newLisafsDentry creates a new dentry representing the given file. The dentry 97 // initially has no references, but is not cached; it is the caller's 98 // responsibility to set the dentry's reference count and/or call 99 // dentry.checkCachingLocked() as appropriate. 100 // newLisafsDentry takes ownership of ino. 101 func (fs *filesystem) newLisafsDentry(ctx context.Context, ino *lisafs.Inode) (*dentry, error) { 102 if ino.Stat.Mask&linux.STATX_TYPE == 0 { 103 ctx.Warningf("can't create gofer.dentry without file type") 104 fs.client.CloseFD(ctx, ino.ControlFD, false /* flush */) 105 return nil, linuxerr.EIO 106 } 107 if ino.Stat.Mode&linux.FileTypeMask == linux.ModeRegular && ino.Stat.Mask&linux.STATX_SIZE == 0 { 108 ctx.Warningf("can't create regular file gofer.dentry without file size") 109 fs.client.CloseFD(ctx, ino.ControlFD, false /* flush */) 110 return nil, linuxerr.EIO 111 } 112 113 inoKey := inoKeyFromStatx(&ino.Stat) 114 d := &lisafsDentry{ 115 dentry: dentry{ 116 fs: fs, 117 inoKey: inoKey, 118 ino: fs.inoFromKey(inoKey), 119 mode: atomicbitops.FromUint32(uint32(ino.Stat.Mode)), 120 uid: atomicbitops.FromUint32(uint32(fs.opts.dfltuid)), 121 gid: atomicbitops.FromUint32(uint32(fs.opts.dfltgid)), 122 blockSize: atomicbitops.FromUint32(hostarch.PageSize), 123 readFD: atomicbitops.FromInt32(-1), 124 writeFD: atomicbitops.FromInt32(-1), 125 mmapFD: atomicbitops.FromInt32(-1), 126 }, 127 controlFD: fs.client.NewFD(ino.ControlFD), 128 } 129 if ino.Stat.Mask&linux.STATX_UID != 0 { 130 d.uid = atomicbitops.FromUint32(dentryUID(lisafs.UID(ino.Stat.UID))) 131 } 132 if ino.Stat.Mask&linux.STATX_GID != 0 { 133 d.gid = atomicbitops.FromUint32(dentryGID(lisafs.GID(ino.Stat.GID))) 134 } 135 if ino.Stat.Mask&linux.STATX_SIZE != 0 { 136 d.size = atomicbitops.FromUint64(ino.Stat.Size) 137 } 138 if ino.Stat.Blksize != 0 { 139 d.blockSize = atomicbitops.FromUint32(ino.Stat.Blksize) 140 } 141 if ino.Stat.Mask&linux.STATX_ATIME != 0 { 142 d.atime = atomicbitops.FromInt64(dentryTimestamp(ino.Stat.Atime)) 143 } else { 144 d.atime = atomicbitops.FromInt64(fs.clock.Now().Nanoseconds()) 145 } 146 if ino.Stat.Mask&linux.STATX_MTIME != 0 { 147 d.mtime = atomicbitops.FromInt64(dentryTimestamp(ino.Stat.Mtime)) 148 } else { 149 d.mtime = atomicbitops.FromInt64(fs.clock.Now().Nanoseconds()) 150 } 151 if ino.Stat.Mask&linux.STATX_CTIME != 0 { 152 d.ctime = atomicbitops.FromInt64(dentryTimestamp(ino.Stat.Ctime)) 153 } else { 154 // Approximate ctime with mtime if ctime isn't available. 155 d.ctime = atomicbitops.FromInt64(d.mtime.Load()) 156 } 157 if ino.Stat.Mask&linux.STATX_BTIME != 0 { 158 d.btime = atomicbitops.FromInt64(dentryTimestamp(ino.Stat.Btime)) 159 } 160 if ino.Stat.Mask&linux.STATX_NLINK != 0 { 161 d.nlink = atomicbitops.FromUint32(ino.Stat.Nlink) 162 } else { 163 if ino.Stat.Mode&linux.FileTypeMask == linux.ModeDirectory { 164 d.nlink = atomicbitops.FromUint32(2) 165 } else { 166 d.nlink = atomicbitops.FromUint32(1) 167 } 168 } 169 d.dentry.init(d) 170 fs.syncMu.Lock() 171 fs.syncableDentries.PushBack(&d.syncableListEntry) 172 fs.syncMu.Unlock() 173 return &d.dentry, nil 174 } 175 176 func (d *lisafsDentry) openHandle(ctx context.Context, flags uint32) (handle, error) { 177 openFD, hostFD, err := d.controlFD.OpenAt(ctx, flags) 178 if err != nil { 179 return noHandle, err 180 } 181 return handle{ 182 fdLisa: d.controlFD.Client().NewFD(openFD), 183 fd: int32(hostFD), 184 }, nil 185 } 186 187 func (d *lisafsDentry) updateHandles(ctx context.Context, h handle, readable, writable bool) { 188 // Switch to new LISAFS FDs. Note that the read, write and mmap host FDs are 189 // updated separately. 190 oldReadFD := lisafs.InvalidFDID 191 if readable { 192 oldReadFD = d.readFDLisa.ID() 193 d.readFDLisa = h.fdLisa 194 } 195 oldWriteFD := lisafs.InvalidFDID 196 if writable { 197 oldWriteFD = d.writeFDLisa.ID() 198 d.writeFDLisa = h.fdLisa 199 } 200 // NOTE(b/141991141): Close old FDs before making new fids visible (by 201 // unlocking d.handleMu). 202 if oldReadFD.Ok() { 203 d.fs.client.CloseFD(ctx, oldReadFD, false /* flush */) 204 } 205 if oldWriteFD.Ok() && oldReadFD != oldWriteFD { 206 d.fs.client.CloseFD(ctx, oldWriteFD, false /* flush */) 207 } 208 } 209 210 // Precondition: d.metadataMu must be locked. 211 // 212 // +checklocks:d.metadataMu 213 func (d *lisafsDentry) updateMetadataLocked(ctx context.Context, h handle) error { 214 handleMuRLocked := false 215 if !h.fdLisa.Ok() { 216 // Use open FDs in preferenece to the control FD. This may be significantly 217 // more efficient in some implementations. Prefer a writable FD over a 218 // readable one since some filesystem implementations may update a writable 219 // FD's metadata after writes, without making metadata updates immediately 220 // visible to read-only FDs representing the same file. 221 d.handleMu.RLock() 222 switch { 223 case d.writeFDLisa.Ok(): 224 h.fdLisa = d.writeFDLisa 225 handleMuRLocked = true 226 case d.readFDLisa.Ok(): 227 h.fdLisa = d.readFDLisa 228 handleMuRLocked = true 229 default: 230 h.fdLisa = d.controlFD 231 d.handleMu.RUnlock() 232 } 233 } 234 235 var stat linux.Statx 236 err := h.fdLisa.StatTo(ctx, &stat) 237 if handleMuRLocked { 238 // handleMu must be released before updateMetadataFromStatLocked(). 239 d.handleMu.RUnlock() // +checklocksforce: complex case. 240 } 241 if err != nil { 242 return err 243 } 244 d.updateMetadataFromStatxLocked(&stat) 245 return nil 246 } 247 248 func chmod(ctx context.Context, controlFD lisafs.ClientFD, mode uint16) error { 249 setStat := linux.Statx{ 250 Mask: linux.STATX_MODE, 251 Mode: mode, 252 } 253 _, failureErr, err := controlFD.SetStat(ctx, &setStat) 254 if err != nil { 255 return err 256 } 257 return failureErr 258 } 259 260 func (d *lisafsDentry) destroy(ctx context.Context) { 261 if d.readFDLisa.Ok() && d.readFDLisa.ID() != d.writeFDLisa.ID() { 262 d.readFDLisa.Close(ctx, false /* flush */) 263 } 264 if d.writeFDLisa.Ok() { 265 d.writeFDLisa.Close(ctx, false /* flush */) 266 } 267 if d.controlFD.Ok() { 268 // Close the control FD. Propagate the Close RPCs immediately to the server 269 // if the dentry being destroyed is a deleted regular file. This is to 270 // release the disk space on remote immediately. This will flush the above 271 // read/write lisa FDs as well. 272 flushClose := d.isDeleted() && d.isRegularFile() 273 d.controlFD.Close(ctx, flushClose) 274 } 275 } 276 277 func (d *lisafsDentry) getRemoteChild(ctx context.Context, name string) (*dentry, error) { 278 childInode, err := d.controlFD.Walk(ctx, name) 279 if err != nil { 280 return nil, err 281 } 282 return d.fs.newLisafsDentry(ctx, &childInode) 283 } 284 285 // Preconditions: 286 // - fs.renameMu must be locked. 287 // - d.opMu must be locked. 288 // - d.isDir(). 289 // - !rp.done() && rp.Component() is not "." or "..". 290 // 291 // Postcondition: The returned dentry is already cached appropriately. 292 func (d *lisafsDentry) getRemoteChildAndWalkPathLocked(ctx context.Context, rp resolvingPath, ds **[]*dentry) (*dentry, error) { 293 // Collect as many path components as possible to walk. 294 var namesArr [16]string // arbitrarily sized array to help avoid slice allocation. 295 names := namesArr[:0] 296 rp.getComponents(func(name string) bool { 297 if name == "." { 298 return true 299 } 300 if name == ".." { 301 return false 302 } 303 names = append(names, name) 304 return true 305 }) 306 // Walk as much of the path as possible in 1 RPC. 307 _, inodes, err := d.controlFD.WalkMultiple(ctx, names) 308 if err != nil { 309 return nil, err 310 } 311 if len(inodes) == 0 { 312 // d.opMu is locked. So a new child could not have appeared concurrently. 313 // It should be safe to mark this as a negative entry. 314 d.childrenMu.Lock() 315 defer d.childrenMu.Unlock() 316 d.cacheNegativeLookupLocked(names[0]) 317 return nil, linuxerr.ENOENT 318 } 319 320 // Add the walked inodes into the dentry tree. 321 startParent := &d.dentry 322 curParent := startParent 323 curParentLock := func() { 324 if curParent != startParent { 325 curParent.opMu.RLock() 326 } 327 curParent.childrenMu.Lock() 328 } 329 curParentUnlock := func() { 330 curParent.childrenMu.Unlock() 331 if curParent != startParent { 332 curParent.opMu.RUnlock() // +checklocksforce: locked via curParentLock(). 333 } 334 } 335 var ret *dentry 336 var dentryCreationErr error 337 for i := range inodes { 338 if dentryCreationErr != nil { 339 d.fs.client.CloseFD(ctx, inodes[i].ControlFD, false /* flush */) 340 continue 341 } 342 343 curParentLock() 344 // Did we race with another walk + cache operation? 345 child, ok := curParent.children[names[i]] // +checklocksforce: locked via curParentLock() 346 if ok && child != nil { 347 // We raced. Clean up the new inode and proceed with 348 // the cached child. 349 d.fs.client.CloseFD(ctx, inodes[i].ControlFD, false /* flush */) 350 } else { 351 // Create and cache the new dentry. 352 var err error 353 child, err = d.fs.newLisafsDentry(ctx, &inodes[i]) 354 if err != nil { 355 dentryCreationErr = err 356 curParentUnlock() 357 continue 358 } 359 curParent.cacheNewChildLocked(child, names[i]) // +checklocksforce: locked via curParentLock(). 360 } 361 curParentUnlock() 362 363 // For now, child has 0 references, so our caller should call 364 // child.checkCachingLocked(). curParent gained a ref so we should also 365 // call curParent.checkCachingLocked() so it can be removed from the cache 366 // if needed. We only do that for the first iteration because all 367 // subsequent parents would have already been added to ds. 368 if i == 0 { 369 *ds = appendDentry(*ds, curParent) 370 } 371 *ds = appendDentry(*ds, child) 372 curParent = child 373 if i == 0 { 374 ret = child 375 } 376 } 377 return ret, dentryCreationErr 378 } 379 380 func (d *lisafsDentry) newChildDentry(ctx context.Context, childIno *lisafs.Inode, childName string) (*dentry, error) { 381 child, err := d.fs.newLisafsDentry(ctx, childIno) 382 if err != nil { 383 if err := d.controlFD.UnlinkAt(ctx, childName, 0 /* flags */); err != nil { 384 log.Warningf("failed to clean up created child %s after newLisafsDentry() failed: %v", childName, err) 385 } 386 } 387 return child, err 388 } 389 390 func (d *lisafsDentry) mknod(ctx context.Context, name string, creds *auth.Credentials, opts *vfs.MknodOptions) (*dentry, error) { 391 if _, ok := opts.Endpoint.(transport.HostBoundEndpoint); !ok { 392 childInode, err := d.controlFD.MknodAt(ctx, name, opts.Mode, lisafs.UID(creds.EffectiveKUID), lisafs.GID(creds.EffectiveKGID), opts.DevMinor, opts.DevMajor) 393 if err != nil { 394 return nil, err 395 } 396 return d.newChildDentry(ctx, &childInode, name) 397 } 398 399 // This mknod(2) is coming from unix bind(2), as opts.Endpoint is set. 400 sockType := opts.Endpoint.(transport.Endpoint).Type() 401 childInode, boundSocketFD, err := d.controlFD.BindAt(ctx, sockType, name, opts.Mode, lisafs.UID(creds.EffectiveKUID), lisafs.GID(creds.EffectiveKGID)) 402 if err != nil { 403 return nil, err 404 } 405 hbep := opts.Endpoint.(transport.HostBoundEndpoint) 406 if err := hbep.SetBoundSocketFD(ctx, boundSocketFD); err != nil { 407 if err := d.controlFD.UnlinkAt(ctx, name, 0 /* flags */); err != nil { 408 log.Warningf("failed to clean up socket which was created by BindAt RPC: %v", err) 409 } 410 d.fs.client.CloseFD(ctx, childInode.ControlFD, false /* flush */) 411 return nil, err 412 } 413 child, err := d.newChildDentry(ctx, &childInode, name) 414 if err != nil { 415 hbep.ResetBoundSocketFD(ctx) 416 return nil, err 417 } 418 // Set the endpoint on the newly created child dentry. 419 child.endpoint = opts.Endpoint 420 return child, nil 421 } 422 423 func (d *lisafsDentry) link(ctx context.Context, target *lisafsDentry, name string) (*dentry, error) { 424 linkInode, err := d.controlFD.LinkAt(ctx, target.controlFD.ID(), name) 425 if err != nil { 426 return nil, err 427 } 428 // TODO(gvisor.dev/issue/6739): Hard linked dentries should share the same 429 // inode fields. 430 return d.newChildDentry(ctx, &linkInode, name) 431 } 432 433 func (d *lisafsDentry) mkdir(ctx context.Context, name string, mode linux.FileMode, uid auth.KUID, gid auth.KGID) (*dentry, error) { 434 childDirInode, err := d.controlFD.MkdirAt(ctx, name, mode, lisafs.UID(uid), lisafs.GID(gid)) 435 if err != nil { 436 return nil, err 437 } 438 return d.newChildDentry(ctx, &childDirInode, name) 439 } 440 441 func (d *lisafsDentry) symlink(ctx context.Context, name, target string, creds *auth.Credentials) (*dentry, error) { 442 symlinkInode, err := d.controlFD.SymlinkAt(ctx, name, target, lisafs.UID(creds.EffectiveKUID), lisafs.GID(creds.EffectiveKGID)) 443 if err != nil { 444 return nil, err 445 } 446 return d.newChildDentry(ctx, &symlinkInode, name) 447 } 448 449 func (d *lisafsDentry) openCreate(ctx context.Context, name string, flags uint32, mode linux.FileMode, uid auth.KUID, gid auth.KGID) (*dentry, handle, error) { 450 ino, openFD, hostFD, err := d.controlFD.OpenCreateAt(ctx, name, flags, mode, lisafs.UID(uid), lisafs.GID(gid)) 451 if err != nil { 452 return nil, noHandle, err 453 } 454 455 h := handle{ 456 fdLisa: d.fs.client.NewFD(openFD), 457 fd: int32(hostFD), 458 } 459 child, err := d.fs.newLisafsDentry(ctx, &ino) 460 if err != nil { 461 h.close(ctx) 462 return nil, noHandle, err 463 } 464 return child, h, nil 465 } 466 467 // lisafsGetdentsCount is the number of bytes of dirents to read from the 468 // server in each Getdents RPC. This value is consistent with vfs1 client. 469 const lisafsGetdentsCount = int32(64 * 1024) 470 471 // Preconditions: 472 // - getDirents may not be called concurrently with another getDirents call. 473 func (d *lisafsDentry) getDirentsLocked(ctx context.Context, recordDirent func(name string, key inoKey, dType uint8)) error { 474 // shouldSeek0 indicates whether the server should SEEK to 0 before reading 475 // directory entries. 476 shouldSeek0 := true 477 for { 478 count := lisafsGetdentsCount 479 if shouldSeek0 { 480 // See lisafs.Getdents64Req.Count. 481 count = -count 482 shouldSeek0 = false 483 } 484 dirents, err := d.readFDLisa.Getdents64(ctx, count) 485 if err != nil { 486 return err 487 } 488 if len(dirents) == 0 { 489 return nil 490 } 491 for i := range dirents { 492 name := string(dirents[i].Name) 493 if name == "." || name == ".." { 494 continue 495 } 496 recordDirent(name, inoKey{ 497 ino: uint64(dirents[i].Ino), 498 devMinor: uint32(dirents[i].DevMinor), 499 devMajor: uint32(dirents[i].DevMajor), 500 }, uint8(dirents[i].Type)) 501 } 502 } 503 } 504 505 func flush(ctx context.Context, fd lisafs.ClientFD) error { 506 if fd.Ok() { 507 return fd.Flush(ctx) 508 } 509 return nil 510 } 511 512 func (d *lisafsDentry) statfs(ctx context.Context) (linux.Statfs, error) { 513 var statFS lisafs.StatFS 514 if err := d.controlFD.StatFSTo(ctx, &statFS); err != nil { 515 return linux.Statfs{}, err 516 } 517 return linux.Statfs{ 518 BlockSize: statFS.BlockSize, 519 FragmentSize: statFS.BlockSize, 520 Blocks: statFS.Blocks, 521 BlocksFree: statFS.BlocksFree, 522 BlocksAvailable: statFS.BlocksAvailable, 523 Files: statFS.Files, 524 FilesFree: statFS.FilesFree, 525 NameLength: statFS.NameLength, 526 }, nil 527 } 528 529 func (d *lisafsDentry) restoreFile(ctx context.Context, inode *lisafs.Inode, opts *vfs.CompleteRestoreOptions) error { 530 d.controlFD = d.fs.client.NewFD(inode.ControlFD) 531 532 // Gofers do not preserve inoKey across checkpoint/restore, so: 533 // 534 // - We must assume that the remote filesystem did not change in a way that 535 // would invalidate dentries, since we can't revalidate dentries by 536 // checking inoKey. 537 // 538 // - We need to associate the new inoKey with the existing d.ino. 539 d.inoKey = inoKeyFromStatx(&inode.Stat) 540 d.fs.inoMu.Lock() 541 d.fs.inoByKey[d.inoKey] = d.ino 542 d.fs.inoMu.Unlock() 543 544 // Check metadata stability before updating metadata. 545 d.metadataMu.Lock() 546 defer d.metadataMu.Unlock() 547 if d.isRegularFile() { 548 if opts.ValidateFileSizes { 549 if inode.Stat.Mask&linux.STATX_SIZE == 0 { 550 return vfs.ErrCorruption{fmt.Errorf("gofer.dentry(%q).restoreFile: file size validation failed: file size not available", genericDebugPathname(&d.dentry))} 551 } 552 if d.size.RacyLoad() != inode.Stat.Size { 553 return vfs.ErrCorruption{fmt.Errorf("gofer.dentry(%q).restoreFile: file size validation failed: size changed from %d to %d", genericDebugPathname(&d.dentry), d.size.Load(), inode.Stat.Size)} 554 } 555 } 556 if opts.ValidateFileModificationTimestamps { 557 if inode.Stat.Mask&linux.STATX_MTIME == 0 { 558 return vfs.ErrCorruption{fmt.Errorf("gofer.dentry(%q).restoreFile: mtime validation failed: mtime not available", genericDebugPathname(&d.dentry))} 559 } 560 if want := dentryTimestamp(inode.Stat.Mtime); d.mtime.RacyLoad() != want { 561 return vfs.ErrCorruption{fmt.Errorf("gofer.dentry(%q).restoreFile: mtime validation failed: mtime changed from %+v to %+v", genericDebugPathname(&d.dentry), linux.NsecToStatxTimestamp(d.mtime.RacyLoad()), linux.NsecToStatxTimestamp(want))} 562 } 563 } 564 } 565 if !d.cachedMetadataAuthoritative() { 566 d.updateMetadataFromStatxLocked(&inode.Stat) 567 } 568 569 if rw, ok := d.fs.savedDentryRW[&d.dentry]; ok { 570 if err := d.ensureSharedHandle(ctx, rw.read, rw.write, false /* trunc */); err != nil { 571 return err 572 } 573 } 574 575 return nil 576 } 577 578 // doRevalidationLisafs stats all dentries in `state`. It will update or 579 // invalidate dentries in the cache based on the result. 580 // 581 // Preconditions: 582 // - fs.renameMu must be locked. 583 // - InteropModeShared is in effect. 584 func doRevalidationLisafs(ctx context.Context, vfsObj *vfs.VirtualFilesystem, state *revalidateState, ds **[]*dentry) error { 585 start := state.start.impl.(*lisafsDentry) 586 587 // Populate state.names. 588 state.names = state.names[:0] // For sanity. 589 if state.refreshStart { 590 state.names = append(state.names, "") 591 } 592 for _, d := range state.dentries { 593 state.names = append(state.names, d.name) 594 } 595 596 // Lock metadata on all dentries *before* getting attributes for them. 597 if state.refreshStart { 598 start.metadataMu.Lock() 599 defer start.metadataMu.Unlock() 600 } 601 for _, d := range state.dentries { 602 d.metadataMu.Lock() 603 } 604 // lastUnlockedDentry keeps track of the dentries in state.dentries that have 605 // already had their metadataMu unlocked. Avoid defer unlock in the loop 606 // above to avoid heap allocation. 607 lastUnlockedDentry := -1 608 defer func() { 609 // Advance to the first unevaluated dentry and unlock the remaining 610 // dentries. 611 for lastUnlockedDentry++; lastUnlockedDentry < len(state.dentries); lastUnlockedDentry++ { 612 state.dentries[lastUnlockedDentry].metadataMu.Unlock() 613 } 614 }() 615 616 // Make WalkStat RPC. 617 stats, err := start.controlFD.WalkStat(ctx, state.names) 618 if err != nil { 619 return err 620 } 621 622 if state.refreshStart { 623 if len(stats) > 0 { 624 // First dentry is where the search is starting, just update attributes 625 // since it cannot be replaced. 626 start.updateMetadataFromStatxLocked(&stats[0]) // +checklocksforce: see above. 627 stats = stats[1:] 628 } 629 } 630 631 for i := 0; i < len(state.dentries); i++ { 632 d := state.dentries[i] 633 found := i < len(stats) 634 // Advance lastUnlockedDentry. It is the responsibility of this for loop 635 // block to unlock d.metadataMu. 636 lastUnlockedDentry = i 637 638 // Note that synthetic dentries will always fail this comparison check. 639 if !found || d.inoKey != inoKeyFromStatx(&stats[i]) { 640 d.metadataMu.Unlock() 641 if !found && d.isSynthetic() { 642 // We have a synthetic file, and no remote file has arisen to replace 643 // it. 644 return nil 645 } 646 // The file at this path has changed or no longer exists. Mark the 647 // dentry invalidated. 648 d.invalidate(ctx, vfsObj, ds) 649 return nil 650 } 651 652 // The file at this path hasn't changed. Just update cached metadata. 653 d.impl.(*lisafsDentry).updateMetadataFromStatxLocked(&stats[i]) // +checklocksforce: see above. 654 d.metadataMu.Unlock() 655 } 656 return nil 657 }