github.com/rohankumardubey/proxyfs@v0.0.0-20210108201508-653efa9ab00e/pfsagentd/inode.go (about)

     1  package main
     2  
     3  import (
     4  	"container/list"
     5  
     6  	"github.com/swiftstack/ProxyFS/inode"
     7  	"github.com/swiftstack/ProxyFS/jrpcfs"
     8  )
     9  
    10  // References is a concept used to prevent two or more contexts from operating on
    11  // distinct instances of a FileInode. Consider the case where each instance
    12  // wants to lookup a previously unknown FileInode (i.e. by inode.InodeNumber).
    13  // Each would think it is unknown and promptly create distinct instances. To
    14  // prevent this, references to each FileInode instance will be strictly protected
    15  // by the sync.Mutex in globalsStruct. This necessitates that a "lookup" operation
    16  // be allowed to intrinsically create a fileInodeStruct. It also requires those
    17  // receiving a reference to a fileInodeStruct to eventually drop their reference
    18  // to it. Each of the globals.{unleased|sharedLease|exclusiveLease}FileInodeCacheLRU's
    19  // and the globals.fileInodeMap must, therefore, never "forget" a fileInodeStruct
    20  // for which a reference is still available.
    21  //
    22  // References occur in two cases:
    23  //
    24  //   A Shared or Exclusive Lock is being requested or held for a FileInode:
    25  //
    26  //     The lock requestor must first reference a FileInode before makeing
    27  //     the Shared or Exclusive Lock request. After releasing the Lock, they
    28  //     dereference it.
    29  //
    30  //   A FileInode has one or more in-flight LogSegment PUTs underway:
    31  //
    32  //     In this case, each LogSegment PUT is represented by a reference from the
    33  //     time it is initiated via a request to ProvisionObject() through the
    34  //     actual LogSegment PUT until the corresponding Wrote() request has completed.
    35  //
    36  // Note that the File Inode Cache (globals.fileInodeMap) typically swings into action
    37  // when initial references are made and when a last reference is released. It is,
    38  // however, possible for movements of fileInodeStructs among the globals.*CacheLRU's
    39  // to trigger evictions as well if, at the time, File Inode Cache limits are already
    40  // exceeded.
    41  
    42  func referenceFileInode(inodeNumber inode.InodeNumber) (fileInode *fileInodeStruct) {
    43  	var (
    44  		delayedLeaseRequestList *list.List
    45  		err                     error
    46  		getStatReply            *jrpcfs.StatStruct
    47  		getStatRequest          *jrpcfs.GetStatRequest
    48  		ok                      bool
    49  	)
    50  
    51  	delayedLeaseRequestList = nil
    52  
    53  	globals.Lock()
    54  
    55  	fileInode, ok = globals.fileInodeMap[inodeNumber]
    56  
    57  	if ok {
    58  		fileInode.references++
    59  	} else {
    60  		getStatRequest = &jrpcfs.GetStatRequest{
    61  			InodeHandle: jrpcfs.InodeHandle{
    62  				MountID:     globals.mountID,
    63  				InodeNumber: int64(inodeNumber),
    64  			},
    65  		}
    66  
    67  		getStatReply = &jrpcfs.StatStruct{}
    68  
    69  		err = globals.retryRPCClient.Send("RpcGetStat", getStatRequest, getStatReply)
    70  		if (nil != err) || (inode.PosixModeFile != (inode.InodeMode(getStatReply.FileMode) & inode.PosixModeType)) {
    71  			globals.Unlock()
    72  			fileInode = nil
    73  			return
    74  		}
    75  
    76  		fileInode = &fileInodeStruct{
    77  			InodeNumber:                  inodeNumber,
    78  			cachedStat:                   getStatReply,
    79  			lockWaiters:                  nil,
    80  			references:                   1,
    81  			leaseState:                   fileInodeLeaseStateNone,
    82  			pendingLeaseInterrupt:        nil,
    83  			sharedLockHolders:            list.New(),
    84  			exclusiveLockHolder:          nil,
    85  			TODODeprecatelockWaiters:     list.New(),
    86  			leaseListElement:             nil,
    87  			extentMap:                    nil,
    88  			extentMapLenWhenUnreferenced: 0,
    89  			chunkedPutList:               list.New(),
    90  			flushInProgress:              false,
    91  			chunkedPutFlushWaiterList:    list.New(),
    92  			dirtyListElement:             nil,
    93  		}
    94  
    95  		fileInode.leaseListElement = globals.unleasedFileInodeCacheLRU.PushBack(fileInode)
    96  
    97  		globals.fileInodeMap[inodeNumber] = fileInode
    98  
    99  		delayedLeaseRequestList = honorInodeCacheLimits()
   100  	}
   101  
   102  	globals.Unlock()
   103  
   104  	if nil != delayedLeaseRequestList {
   105  		performDelayedLeaseRequestList(delayedLeaseRequestList)
   106  	}
   107  
   108  	return
   109  }
   110  
   111  func (fileInode *fileInodeStruct) reference() {
   112  	globals.Lock()
   113  
   114  	if 0 == fileInode.references {
   115  		logFatalf("*fileInodeStruct.reference() should not have been called with fileInode.references == 0")
   116  	}
   117  
   118  	fileInode.references++
   119  
   120  	globals.Unlock()
   121  }
   122  
   123  func (fileInode *fileInodeStruct) dereference() {
   124  	var (
   125  		delayedLeaseRequestList *list.List
   126  		err                     error
   127  		extentMapLen            int
   128  	)
   129  
   130  	delayedLeaseRequestList = nil
   131  
   132  	globals.Lock()
   133  
   134  	fileInode.references--
   135  
   136  	if 0 == fileInode.references {
   137  		if nil == fileInode.extentMap {
   138  			globals.extentMapEntriesCached -= fileInode.extentMapLenWhenUnreferenced
   139  			fileInode.extentMapLenWhenUnreferenced = 0
   140  		} else {
   141  			extentMapLen, err = fileInode.extentMap.Len()
   142  			if nil != err {
   143  				logFatalf("*fileInodeStruct.dereference() call to fileInode.extentMap.Len() failed: %v", err)
   144  			}
   145  			globals.extentMapEntriesCached += extentMapLen - fileInode.extentMapLenWhenUnreferenced
   146  			fileInode.extentMapLenWhenUnreferenced = extentMapLen
   147  		}
   148  
   149  		delayedLeaseRequestList = honorInodeCacheLimits()
   150  	}
   151  
   152  	globals.Unlock()
   153  
   154  	if nil != delayedLeaseRequestList {
   155  		performDelayedLeaseRequestList(delayedLeaseRequestList)
   156  	}
   157  }
   158  
   159  // honorInodeCacheLimits enforces the SharedFileLimit and ExclusiveFileLimit confMap
   160  // parameters. Since it is called while globals Lock is held, it simply assembles a
   161  // list of fileInodeLeaseRequestStruct's to be issued once the globals Lock is released.
   162  //
   163  func honorInodeCacheLimits() (delayedLeaseRequestList *list.List) {
   164  	var (
   165  		delayedLeaseRequest          *fileInodeLeaseRequestStruct
   166  		fileInode                    *fileInodeStruct
   167  		fileInodeCacheLRUElement     *list.Element
   168  		fileInodeCacheLimitToEnforce int
   169  	)
   170  
   171  	delayedLeaseRequestList = list.New()
   172  
   173  	fileInodeCacheLimitToEnforce = int(globals.config.ExclusiveFileLimit)
   174  
   175  	for globals.exclusiveLeaseFileInodeCacheLRU.Len() > fileInodeCacheLimitToEnforce {
   176  		fileInodeCacheLRUElement = globals.exclusiveLeaseFileInodeCacheLRU.Front()
   177  		fileInode = fileInodeCacheLRUElement.Value.(*fileInodeStruct)
   178  		if (0 < fileInode.references) || (fileInodeLeaseStateExclusiveGranted != fileInode.leaseState) {
   179  			break
   180  		}
   181  		delayedLeaseRequest = &fileInodeLeaseRequestStruct{
   182  			fileInode:   fileInode,
   183  			requestType: fileInodeLeaseRequestDemote,
   184  		}
   185  		delayedLeaseRequest.delayedLeaseRequestListElement = delayedLeaseRequestList.PushBack(delayedLeaseRequest)
   186  		fileInode.leaseState = fileInodeLeaseStateExclusiveDemoting
   187  		globals.exclusiveLeaseFileInodeCacheLRU.Remove(fileInodeCacheLRUElement)
   188  		fileInode.leaseListElement = globals.sharedLeaseFileInodeCacheLRU.PushBack(fileInode)
   189  	}
   190  
   191  	fileInodeCacheLimitToEnforce = int(globals.config.SharedFileLimit)
   192  
   193  	if globals.exclusiveLeaseFileInodeCacheLRU.Len() > int(globals.config.ExclusiveFileLimit) {
   194  		fileInodeCacheLimitToEnforce -= globals.exclusiveLeaseFileInodeCacheLRU.Len() - int(globals.config.ExclusiveFileLimit)
   195  		if 0 > fileInodeCacheLimitToEnforce {
   196  			fileInodeCacheLimitToEnforce = 0
   197  		}
   198  	}
   199  
   200  	for globals.sharedLeaseFileInodeCacheLRU.Len() > fileInodeCacheLimitToEnforce {
   201  		fileInodeCacheLRUElement = globals.sharedLeaseFileInodeCacheLRU.Front()
   202  		fileInode = fileInodeCacheLRUElement.Value.(*fileInodeStruct)
   203  		if (0 < fileInode.references) || (fileInodeLeaseStateSharedGranted != fileInode.leaseState) {
   204  			break
   205  		}
   206  		delayedLeaseRequest = &fileInodeLeaseRequestStruct{
   207  			fileInode:   fileInode,
   208  			requestType: fileInodeLeaseRequestRelease,
   209  		}
   210  		delayedLeaseRequest.delayedLeaseRequestListElement = delayedLeaseRequestList.PushBack(delayedLeaseRequest)
   211  		fileInode.leaseState = fileInodeLeaseStateSharedReleasing
   212  		globals.sharedLeaseFileInodeCacheLRU.Remove(fileInodeCacheLRUElement)
   213  		fileInode.leaseListElement = globals.unleasedFileInodeCacheLRU.PushBack(fileInode)
   214  	}
   215  
   216  	fileInodeCacheLimitToEnforce = int(globals.config.ExclusiveFileLimit) - globals.exclusiveLeaseFileInodeCacheLRU.Len()
   217  	fileInodeCacheLimitToEnforce += int(globals.config.SharedFileLimit) - globals.sharedLeaseFileInodeCacheLRU.Len()
   218  
   219  	for (nil != globals.unleasedFileInodeCacheLRU.Front()) && ((globals.unleasedFileInodeCacheLRU.Len() > fileInodeCacheLimitToEnforce) || (globals.config.ExtentMapEntryLimit < uint64(globals.extentMapEntriesCached))) {
   220  		fileInodeCacheLRUElement = globals.unleasedFileInodeCacheLRU.Front()
   221  		fileInode = fileInodeCacheLRUElement.Value.(*fileInodeStruct)
   222  		if 0 != fileInode.references {
   223  			// TODO: While this shouldn't be the case, the current mock Lease/Lock solution may
   224  			//       result in a referenced file being on the globals.unleasedFileInodeCacheLRU
   225  			//       leading to a race condition where a new DoRead() comes in, can't find the
   226  			//       "dirty" fileInode, and go off and fetch a stale copy from ProxyFS. Meanwhile,
   227  			//       the reference by the ChunkedPUTContext for the "dirty" fileInode eventually
   228  			//       finishes leading to proper updating of ProxyFS. Hence, a restart of PFSAgent
   229  			//       or a subsequent fileInode cache flush for the referenced file will result in
   230  			//       reading the correct/up-to-date contents of the file.
   231  			break
   232  		}
   233  		globals.unleasedFileInodeCacheLRU.Remove(fileInodeCacheLRUElement)
   234  		globals.extentMapEntriesCached -= fileInode.extentMapLenWhenUnreferenced
   235  		delete(globals.fileInodeMap, fileInode.InodeNumber)
   236  	}
   237  
   238  	return
   239  }
   240  
   241  // performDelayedLeaseRequestList is the companion to honorInodeCacheLimits and is
   242  // invoked once the globals Lock has been released.
   243  //
   244  func performDelayedLeaseRequestList(delayedLeaseRequestList *list.List) {
   245  	var (
   246  		delayedLeaseRequest            *fileInodeLeaseRequestStruct
   247  		delayedLeaseRequestListElement *list.Element
   248  	)
   249  
   250  	for 0 < delayedLeaseRequestList.Len() {
   251  		delayedLeaseRequestListElement = delayedLeaseRequestList.Front()
   252  		delayedLeaseRequestList.Remove(delayedLeaseRequestListElement)
   253  		delayedLeaseRequest = delayedLeaseRequestListElement.Value.(*fileInodeLeaseRequestStruct)
   254  		delayedLeaseRequest.Add(1)
   255  		globals.leaseRequestChan <- delayedLeaseRequest
   256  		delayedLeaseRequest.Wait()
   257  	}
   258  }
   259  
   260  // Locks come in two forms: Shared and Exclusive. If an Exclusive Lock has been requested
   261  // or granted, any subsequent Shared Lock must also block lest forward progress of an
   262  // Exclusive Lock requestor would not be guaranteed.
   263  //
   264  // One might imagine a desire to grab a Shared Lock and, later, determine that one actually
   265  // needs an Exclusive Lock. Alas, this is a recipe for deadlock if two such instances both
   266  // having obtained a Shared Lock attempting this promotion at about the same time. Neither
   267  // would be able to promote to Exclusive because the other is stuck continuing to hold its
   268  // Shared Lock.
   269  //
   270  // A better approach where such a promotion is possible is to do the reverse. Demoting an
   271  // Exclusive Lock to a Shared Lock has no such has no deadlock concern. Hence, if it is
   272  // possible one might ultimately need an Exclusive Lock, they should grab that first. If,
   273  // at some point, the potential for actually needing the Lock to remain Exclusive is gone
   274  // (but the Lock still needs to remain Shared), the Lock should then be demoted.
   275  //
   276  // Note, however, that it is expected Locks are actually held for very short intervals
   277  // (e.g. in the servicing of a FUSE upcall).
   278  
   279  // getSharedLock returns a granted Shared Lock.
   280  func (fileInode *fileInodeStruct) getSharedLock() (grantedLock *fileInodeLockRequestStruct) {
   281  	var (
   282  		leaseRequest *fileInodeLeaseRequestStruct
   283  	)
   284  
   285  	grantedLock = &fileInodeLockRequestStruct{
   286  		fileInode:      fileInode,
   287  		exclusive:      false,
   288  		holdersElement: nil,
   289  		waitersElement: nil,
   290  	}
   291  
   292  	// We must hold a SharedLease or an ExclusiveLease on fileInode to proceed
   293  
   294  	for {
   295  		globals.Lock()
   296  
   297  		if (fileInodeLeaseStateSharedGranted != fileInode.leaseState) && (fileInodeLeaseStateExclusiveGranted != fileInode.leaseState) {
   298  			// Request (at least) SharedLease and Wait... then Retry
   299  
   300  			globals.Unlock()
   301  
   302  			leaseRequest = &fileInodeLeaseRequestStruct{
   303  				fileInode:   fileInode,
   304  				requestType: fileInodeLeaseRequestShared,
   305  			}
   306  
   307  			leaseRequest.Add(1)
   308  
   309  			globals.leaseRequestChan <- leaseRequest
   310  
   311  			leaseRequest.Wait()
   312  
   313  			continue
   314  		}
   315  
   316  		if (nil != fileInode.exclusiveLockHolder) || (0 != fileInode.TODODeprecatelockWaiters.Len()) {
   317  			// Need to block awaiting a release() on a conflicting held or prior pending LockRequest
   318  			grantedLock.Add(1)
   319  			grantedLock.waitersElement = fileInode.TODODeprecatelockWaiters.PushBack(grantedLock)
   320  			globals.Unlock()
   321  			grantedLock.Wait()
   322  			return
   323  		}
   324  
   325  		// We can grant the grantedLock LockRequest
   326  
   327  		grantedLock.holdersElement = fileInode.sharedLockHolders.PushBack(grantedLock)
   328  
   329  		globals.Unlock()
   330  
   331  		return
   332  	}
   333  }
   334  
   335  // getExclusiveLock returns a granted Exclusive Lock.
   336  func (fileInode *fileInodeStruct) getExclusiveLock() (grantedLock *fileInodeLockRequestStruct) {
   337  	var (
   338  		leaseRequest *fileInodeLeaseRequestStruct
   339  	)
   340  
   341  	grantedLock = &fileInodeLockRequestStruct{
   342  		fileInode:      fileInode,
   343  		exclusive:      true,
   344  		holdersElement: nil,
   345  		waitersElement: nil,
   346  	}
   347  
   348  	// We must hold an ExclusiveLease on fileInode to proceed
   349  
   350  	for {
   351  		globals.Lock()
   352  
   353  		if fileInodeLeaseStateExclusiveGranted != fileInode.leaseState {
   354  			// Request ExclusiveLease and Wait... then Retry
   355  
   356  			globals.Unlock()
   357  
   358  			leaseRequest = &fileInodeLeaseRequestStruct{
   359  				fileInode:   fileInode,
   360  				requestType: fileInodeLeaseRequestExclusive,
   361  			}
   362  
   363  			leaseRequest.Add(1)
   364  
   365  			globals.leaseRequestChan <- leaseRequest
   366  
   367  			leaseRequest.Wait()
   368  
   369  			continue
   370  		}
   371  
   372  		if (nil != fileInode.exclusiveLockHolder) || (0 != fileInode.sharedLockHolders.Len()) {
   373  			// Need to block awaiting a release() on a conflicting held LockRequest
   374  			grantedLock.Add(1)
   375  			grantedLock.waitersElement = fileInode.TODODeprecatelockWaiters.PushBack(grantedLock)
   376  			globals.Unlock()
   377  			grantedLock.Wait()
   378  			return
   379  		}
   380  
   381  		// We can grant the grantedLock LockRequest
   382  
   383  		fileInode.exclusiveLockHolder = grantedLock
   384  
   385  		globals.Unlock()
   386  
   387  		return
   388  	}
   389  }
   390  
   391  func (grantedLock *fileInodeLockRequestStruct) release() {
   392  	var (
   393  		fileInode       *fileInodeStruct
   394  		leaseRequest    *fileInodeLeaseRequestStruct
   395  		nextLock        *fileInodeLockRequestStruct
   396  		nextLockElement *list.Element
   397  	)
   398  
   399  	globals.Lock()
   400  
   401  	fileInode = grantedLock.fileInode
   402  
   403  	if grantedLock.exclusive {
   404  		// ExclusiveLock released - see if one or more pending LockRequest's can now be granted
   405  
   406  		fileInode.exclusiveLockHolder = nil
   407  
   408  		nextLockElement = fileInode.TODODeprecatelockWaiters.Front()
   409  
   410  		if nil != nextLockElement {
   411  			nextLock = nextLockElement.Value.(*fileInodeLockRequestStruct)
   412  
   413  			if nextLock.exclusive {
   414  				// Grant nextLock as ExclusiveLock
   415  
   416  				_ = fileInode.TODODeprecatelockWaiters.Remove(nextLock.waitersElement)
   417  				nextLock.waitersElement = nil
   418  				fileInode.exclusiveLockHolder = nextLock
   419  				nextLock.Done()
   420  			} else {
   421  				// Grant nextLock, and any subsequent Lock's SharedLock
   422  				//   until an ExclusiveLock Request is encountered (or no more TODODeprecatelockWaiters)
   423  
   424  				for {
   425  					_ = fileInode.TODODeprecatelockWaiters.Remove(nextLock.waitersElement)
   426  					nextLock.waitersElement = nil
   427  					nextLock.holdersElement = fileInode.sharedLockHolders.PushBack(nextLock)
   428  					nextLock.Done()
   429  
   430  					nextLockElement = fileInode.TODODeprecatelockWaiters.Front()
   431  					if nil == nextLockElement {
   432  						break
   433  					}
   434  					nextLock = nextLockElement.Value.(*fileInodeLockRequestStruct)
   435  					if nextLock.exclusive {
   436  						break
   437  					}
   438  				}
   439  			}
   440  		}
   441  
   442  		globals.Unlock()
   443  
   444  		return
   445  	}
   446  
   447  	// SharedLock released - see if one pending ExclusiveLock can now be granted
   448  
   449  	_ = fileInode.sharedLockHolders.Remove(grantedLock.holdersElement)
   450  
   451  	if 0 != fileInode.sharedLockHolders.Len() {
   452  		globals.Unlock()
   453  		return
   454  	}
   455  
   456  	nextLockElement = fileInode.TODODeprecatelockWaiters.Front()
   457  
   458  	if nil == nextLockElement {
   459  		globals.Unlock()
   460  		return
   461  	}
   462  
   463  	nextLock = nextLockElement.Value.(*fileInodeLockRequestStruct)
   464  
   465  	// Since a subsequent SharedLock Request would have been immediately granted,
   466  	//   we know this is an ExclusiveLock Request... so just grant it
   467  
   468  	_ = fileInode.TODODeprecatelockWaiters.Remove(nextLock.waitersElement)
   469  	nextLock.waitersElement = nil
   470  	fileInode.exclusiveLockHolder = nextLock
   471  
   472  	// But we cannot actually deliver the completion of the ExclusiveLock
   473  	//   until after we are assured we hold an ExclusiveLease on fileInode
   474  
   475  	for fileInodeLeaseStateExclusiveGranted != fileInode.leaseState {
   476  		globals.Unlock()
   477  		leaseRequest = &fileInodeLeaseRequestStruct{
   478  			fileInode:   fileInode,
   479  			requestType: fileInodeLeaseRequestExclusive,
   480  		}
   481  		leaseRequest.Add(1)
   482  		globals.leaseRequestChan <- leaseRequest
   483  		leaseRequest.Wait()
   484  		globals.Lock()
   485  	}
   486  
   487  	// Finally, we can let the ExclusiveLock requester know they have it
   488  
   489  	nextLock.Done()
   490  
   491  	globals.Unlock()
   492  }
   493  
   494  // Leases, like Locks, also come in two forms: Shared and Exclusive. The key difference
   495  // is that Leases are used to coordinate access among distinct pfsagentd instances. As such,
   496  // the overhead of obtaining Leases suggests a good default behavior would be to continue
   497  // holding a Lease even after all Locks requiring the Lease have themselves been released
   498  // in anticipation of a new Lock request arriving shortly. Indeed, the caching of a
   499  // FileInode's ExtentMap remains valid for the life of a Shared or Exclusive Lease and not
   500  // having to fetch a FileInode's ExtentMap each time a read operation is performed
   501  // provides yet another incentive to holding a Shared Lease for a much longer period of
   502  // time.
   503  //
   504  // Importantly, such caching must be coordinated with other instances that may also need
   505  // to cache. This is where Leases really shine. In order to grant a Shared Lock, this
   506  // instance must know that no other instance holds any Exclusive Locks. To do that, a
   507  // prerequisite for obtaining a Shared Lock is that this instance hold either a Shared
   508  // or Exclusive Lease. Similarly, in order to grant an Exclusive Lock, this instance must
   509  // know that no other instance holds any Shared or Exclusive Locks. To do that, a
   510  // prerequisite for obtaining an Exclusive Lock is that this instance hold an Exclusive
   511  // Lease.
   512  //
   513  // Due to write operations needing to be chained together into a smaller number of
   514  // LogSegment PUTs, it is typical for an Exclusive Lock to be released well before
   515  // such in-flight LogSegment PUTs have completed. And Exclusive Lease must be held,
   516  // not only for the life span of an Exclusive Lock, but also to include the life span
   517  // of any in-flight LogSegment PUTs.
   518  //
   519  // As with promotion of a Shared Lock to an Exclusive Lock being deadlock inducing, this
   520  // concern certainly applies for the promotion of a Shared Lease to an Exclusive Lease.
   521  // The work-around of just always requesting an Exclusive Lease "just in case" is not
   522  // as desirebale when the duration of holding it is arbitrarily long. As such, Leases
   523  // will, in fact, support promotion with an important caveat that it might fail. Indeed,
   524  // it may very well occur that the Lease Manager has already issued a Revoke for a
   525  // previously granted Shared Lease. In this case, the instance requesting the promotion
   526  // will first have to go through the path of first releasing the Shared Lease it
   527  // currently holds before requesting the desired Exclusive Lease.
   528  //
   529  // Note that another instance may request a Shared or Exclusive Lease that is in conflict
   530  // with a Lease held by this instance. When this happens, a Lease Demotion (i.e. from
   531  // Exclusive to Shared) or Lease Release will be requested by ProxyFS. At such time, any
   532  // long-running state requiring the Lease being relinquished must itself be resolved
   533  // (e.g. by evicting any cached ExtentMap contents and/or flushing any in-flight LogSegment
   534  // PUTs). In addition, a loss of contact with ProxyFS (where all-instance Lease State is
   535  // managed) must be detected by both ends (i.e. each instance and ProxyFS). If such contact
   536  // is lost, each instance must in a timely manner force all Leases to be relinquished
   537  // perhaps abruptly (i.e. it may not be possible to complete the flushing of any in-flight
   538  // LogSegment PUTs). After a suitable interval, ProxyFS would then be able to reliably
   539  // consider the instance losing contact to have relinquished all held Leases.
   540  
   541  // leaseDaemon talks to the controlling ProxyFS daemon to negotiate FileInode Leases... and
   542  // keeps them alive. It also responds to revocation requests from the controlling ProxyFS
   543  // daemon by properly flushing and evicting the affected FileInode. Losing an ExclusiveLease
   544  // means that there can be no in-flight LogSegment PUTs. Losing a SharedLease means also
   545  // that there can be no cached ExtentMap extents/chunks.
   546  //
   547  // TODO: For now, leaseDaemon simply grants (and never revokes) leases as they arrive.
   548  //       Of course this means there can be no coherency between a pfsagentd instance
   549  //       and any other such instance... nor with any FileInode activity seen by the
   550  //       controlling ProxyFS (i.e. via jrpcclient, FUSE, or via pfs_middleware).
   551  //
   552  func leaseDaemon() {
   553  	var (
   554  		fileInode    *fileInodeStruct
   555  		leaseRequest *fileInodeLeaseRequestStruct
   556  	)
   557  
   558  	for {
   559  		leaseRequest = <-globals.leaseRequestChan
   560  
   561  		if fileInodeLeaseRequestShutdown == leaseRequest.requestType {
   562  			globals.Lock()
   563  
   564  			for _, fileInode = range globals.fileInodeMap {
   565  				fileInode.leaseState = fileInodeLeaseStateNone
   566  			}
   567  
   568  			globals.Unlock()
   569  
   570  			leaseRequest.Done()
   571  
   572  			return
   573  		}
   574  
   575  		fileInode = leaseRequest.fileInode
   576  
   577  		switch leaseRequest.requestType {
   578  		case fileInodeLeaseRequestShared:
   579  			globals.Lock()
   580  			switch fileInode.leaseState {
   581  			case fileInodeLeaseStateNone:
   582  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   583  			case fileInodeLeaseStateSharedRequested:
   584  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   585  			case fileInodeLeaseStateSharedGranted:
   586  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   587  			case fileInodeLeaseStateSharedReleasing:
   588  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   589  			case fileInodeLeaseStateSharedPromoting:
   590  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   591  			case fileInodeLeaseStateExclusiveRequested:
   592  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   593  			case fileInodeLeaseStateExclusiveGranted:
   594  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   595  			case fileInodeLeaseStateExclusiveDemoting:
   596  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   597  			case fileInodeLeaseStateExclusiveReleasing:
   598  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   599  			default:
   600  				logFatalf("fileInode 0x%016X current leaseState (%d) unrecognized", fileInode.InodeNumber)
   601  			}
   602  			globals.Unlock()
   603  		case fileInodeLeaseRequestExclusive:
   604  			globals.Lock()
   605  			switch fileInode.leaseState {
   606  			case fileInodeLeaseStateNone:
   607  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   608  			case fileInodeLeaseStateSharedRequested:
   609  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   610  			case fileInodeLeaseStateSharedGranted:
   611  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   612  			case fileInodeLeaseStateSharedReleasing:
   613  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   614  			case fileInodeLeaseStateSharedPromoting:
   615  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   616  			case fileInodeLeaseStateExclusiveRequested:
   617  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   618  			case fileInodeLeaseStateExclusiveGranted:
   619  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   620  			case fileInodeLeaseStateExclusiveDemoting:
   621  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   622  			case fileInodeLeaseStateExclusiveReleasing:
   623  				fileInode.leaseState = fileInodeLeaseStateExclusiveGranted
   624  			default:
   625  				logFatalf("fileInode 0x%016X current leaseState (%d) unrecognized", fileInode.InodeNumber)
   626  			}
   627  			globals.Unlock()
   628  		case fileInodeLeaseRequestDemote:
   629  			globals.Lock()
   630  			switch fileInode.leaseState {
   631  			case fileInodeLeaseStateNone:
   632  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   633  			case fileInodeLeaseStateSharedRequested:
   634  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   635  			case fileInodeLeaseStateSharedGranted:
   636  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   637  			case fileInodeLeaseStateSharedReleasing:
   638  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   639  			case fileInodeLeaseStateSharedPromoting:
   640  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   641  			case fileInodeLeaseStateExclusiveRequested:
   642  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   643  			case fileInodeLeaseStateExclusiveGranted:
   644  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   645  			case fileInodeLeaseStateExclusiveDemoting:
   646  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   647  			case fileInodeLeaseStateExclusiveReleasing:
   648  				fileInode.leaseState = fileInodeLeaseStateSharedGranted
   649  			default:
   650  				logFatalf("fileInode 0x%016X current leaseState (%d) unrecognized", fileInode.InodeNumber)
   651  			}
   652  			globals.Unlock()
   653  		case fileInodeLeaseRequestRelease:
   654  			globals.Lock()
   655  			switch fileInode.leaseState {
   656  			case fileInodeLeaseStateNone:
   657  				fileInode.leaseState = fileInodeLeaseStateNone
   658  			case fileInodeLeaseStateSharedRequested:
   659  				fileInode.leaseState = fileInodeLeaseStateNone
   660  			case fileInodeLeaseStateSharedGranted:
   661  				fileInode.leaseState = fileInodeLeaseStateNone
   662  			case fileInodeLeaseStateSharedReleasing:
   663  				fileInode.leaseState = fileInodeLeaseStateNone
   664  			case fileInodeLeaseStateSharedPromoting:
   665  				fileInode.leaseState = fileInodeLeaseStateNone
   666  			case fileInodeLeaseStateExclusiveRequested:
   667  				fileInode.leaseState = fileInodeLeaseStateNone
   668  			case fileInodeLeaseStateExclusiveGranted:
   669  				fileInode.leaseState = fileInodeLeaseStateNone
   670  			case fileInodeLeaseStateExclusiveDemoting:
   671  				fileInode.leaseState = fileInodeLeaseStateNone
   672  			case fileInodeLeaseStateExclusiveReleasing:
   673  				fileInode.leaseState = fileInodeLeaseStateNone
   674  			default:
   675  				logFatalf("fileInode 0x%016X current leaseState (%d) unrecognized", fileInode.InodeNumber)
   676  			}
   677  			globals.Unlock()
   678  		default:
   679  			logFatalf("leaseRequest.requestType (%d) unrecognized", leaseRequest.requestType)
   680  		}
   681  
   682  		leaseRequest.Done()
   683  	}
   684  }