github.com/swiftstack/ProxyFS@v0.0.0-20210203235616-4017c267d62f/dlm/llm_test.go (about)

     1  // Copyright (c) 2015-2021, NVIDIA CORPORATION.
     2  // SPDX-License-Identifier: Apache-2.0
     3  
     4  package dlm
     5  
     6  import (
     7  	"flag"
     8  	"io/ioutil"
     9  	"os"
    10  	"strconv"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/stretchr/testify/assert"
    15  
    16  	"github.com/swiftstack/ProxyFS/blunder"
    17  	"github.com/swiftstack/ProxyFS/conf"
    18  	"github.com/swiftstack/ProxyFS/logger"
    19  	"github.com/swiftstack/ProxyFS/trackedlock"
    20  	"github.com/swiftstack/ProxyFS/transitions"
    21  )
    22  
    23  // Test string for passing inode 1
    24  var s1 string = strconv.Itoa(1)
    25  
    26  // Mutex for protecting global variables
    27  var mutex trackedlock.Mutex
    28  
    29  type testOpTyp int
    30  
    31  const (
    32  	nilTestOp testOpTyp = iota
    33  	readLock
    34  	writeLock
    35  	tryReadLock
    36  	tryWriteLock
    37  	unlock
    38  	stopThread
    39  )
    40  
    41  type testReq struct {
    42  	lockID string
    43  	typ    testOpTyp // Operation type - writeLock, etc
    44  	t      *testing.T
    45  }
    46  
    47  // Per thread structure storing channel information
    48  type threadInfo struct {
    49  	startedNode      chan bool
    50  	requestForThread chan *testReq
    51  }
    52  
    53  var globalSyncPt chan testReq // Channel used to synchronize test threads to simulate multiple threads
    54  
    55  var testConfMap conf.ConfMap
    56  
    57  // Largely stolen from fs/api_test.go
    58  func testSetup() (err error) {
    59  	confStrings := []string{
    60  		"TrackedLock.LockHoldTimeLimit=2s",
    61  		"TrackedLock.LockCheckPeriod=1s",
    62  	}
    63  
    64  	testDir, err := ioutil.TempDir(os.TempDir(), "ProxyFS_test_ldlm_")
    65  	if nil != err {
    66  		return
    67  	}
    68  
    69  	err = os.Chdir(testDir)
    70  	if nil != err {
    71  		return
    72  	}
    73  
    74  	err = os.Mkdir("TestVolume", os.ModePerm)
    75  
    76  	confMap, err := conf.MakeConfMapFromStrings(confStrings)
    77  	if err != nil {
    78  		return
    79  	}
    80  
    81  	err = logger.Up(confMap)
    82  	if nil != err {
    83  		return
    84  	}
    85  
    86  	// Setup channel used to synchronize multiple test thread operations
    87  	globalSyncPt = make(chan testReq)
    88  
    89  	testConfMapStrings := []string{
    90  		"Logging.LogFilePath=/dev/null",
    91  		"Cluster.WhoAmI=nobody",
    92  		"FSGlobals.VolumeGroupList=",
    93  		"FSGlobals.CheckpointHeaderConsensusAttempts=5",
    94  		"FSGlobals.MountRetryLimit=6",
    95  		"FSGlobals.MountRetryDelay=1s",
    96  		"FSGlobals.MountRetryExpBackoff=2",
    97  		"FSGlobals.LogCheckpointHeaderPosts=true",
    98  		"FSGlobals.TryLockBackoffMin=10ms",
    99  		"FSGlobals.TryLockBackoffMax=50ms",
   100  		"FSGlobals.TryLockSerializationThreshhold=5",
   101  		"FSGlobals.SymlinkMax=32",
   102  		"FSGlobals.CoalesceElementChunkSize=16",
   103  	}
   104  
   105  	testConfMap, err = conf.MakeConfMapFromStrings(testConfMapStrings)
   106  	if nil != err {
   107  		return
   108  	}
   109  
   110  	err = transitions.Up(testConfMap)
   111  	if nil != err {
   112  		logger.ErrorWithError(err, "transitions.Up() failed")
   113  		return
   114  	}
   115  
   116  	return
   117  }
   118  
   119  // Largely stolen from fs/api_test.go
   120  func testTeardown() (err error) {
   121  	err = transitions.Down(testConfMap)
   122  	if nil != err {
   123  		logger.ErrorWithError(err, "transitions.Down() failed")
   124  		return
   125  	}
   126  
   127  	testDir, err := os.Getwd()
   128  	if nil != err {
   129  		return
   130  	}
   131  
   132  	err = os.Chdir("..")
   133  	if nil != err {
   134  		return
   135  	}
   136  
   137  	err = os.RemoveAll(testDir)
   138  	if nil != err {
   139  		return
   140  	}
   141  
   142  	return
   143  }
   144  
   145  // Largely stolen from fs/api_test.go
   146  func TestMain(m *testing.M) {
   147  	flag.Parse()
   148  
   149  	err := testSetup()
   150  	if nil != err {
   151  		logger.ErrorWithError(err)
   152  	}
   153  
   154  	testResults := m.Run()
   155  
   156  	err = testTeardown()
   157  	if nil != err {
   158  		logger.ErrorWithError(err, "testTeardown failed")
   159  	}
   160  
   161  	os.Exit(testResults)
   162  }
   163  
   164  // Test basic lock primitives
   165  func TestLockAPI(t *testing.T) {
   166  
   167  	// TODO - how cleanup lockMap here between test runs?
   168  	testSimpleLocks(t)
   169  	testTwoThreadsExclLocking(t)
   170  	testTwoThreadsSharedLocking(t)
   171  	testTwoThreadsAndExclToShared(t)
   172  	testTwoThreadsAndSharedToExcl(t)
   173  	test100ThreadsSharedLocking(t)
   174  	test100ThreadsExclLocking(t)
   175  }
   176  
   177  // Test basic WriteLock, ReadLock and Unlock
   178  func testSimpleLocks(t *testing.T) {
   179  	assert := assert.New(t)
   180  
   181  	myCookie := GenerateCallerID()
   182  	myRwLock := &RWLockStruct{LockID: s1, Notify: nil, LockCallerID: myCookie}
   183  
   184  	myRwLock.WriteLock()
   185  	waitCountOwners(s1, 1)
   186  	waitCountWaiters(s1, 0)
   187  	assert.Equal(IsLockHeld(s1, myCookie, WRITELOCK), true)
   188  	assert.Equal(IsLockHeld(s1, myCookie, ANYLOCK), true)
   189  	assert.Equal(IsLockHeld(s1, myCookie, READLOCK), false)
   190  
   191  	myRwLock.Unlock()
   192  	waitCountOwners(s1, 0)
   193  	waitCountWaiters(s1, 0)
   194  
   195  	myRwLock.ReadLock()
   196  	waitCountOwners(s1, 1)
   197  	waitCountWaiters(s1, 0)
   198  	assert.Equal(IsLockHeld(s1, myCookie, WRITELOCK), false)
   199  	assert.Equal(IsLockHeld(s1, myCookie, ANYLOCK), true)
   200  	assert.Equal(IsLockHeld(s1, myCookie, READLOCK), true)
   201  
   202  	myRwLock.Unlock()
   203  	waitCountOwners(s1, 0)
   204  	waitCountWaiters(s1, 0)
   205  
   206  	// Try locks
   207  	err := myRwLock.TryWriteLock()
   208  	assert.Nil(err, "TryWriteLock() should work if no lock owner.")
   209  	waitCountOwners(s1, 1)
   210  	waitCountWaiters(s1, 0)
   211  
   212  	myRwLock.Unlock()
   213  	waitCountOwners(s1, 0)
   214  	waitCountWaiters(s1, 0)
   215  
   216  	err = myRwLock.TryReadLock()
   217  	assert.Nil(err, "TryReadLock() should work if no lock owner.")
   218  	waitCountOwners(s1, 1)
   219  	waitCountWaiters(s1, 0)
   220  
   221  	myRwLock.Unlock()
   222  	waitCountOwners(s1, 0)
   223  	waitCountWaiters(s1, 0)
   224  	assert.Equal(IsLockHeld(s1, myCookie, WRITELOCK), false)
   225  	assert.Equal(IsLockHeld(s1, myCookie, ANYLOCK), false)
   226  	assert.Equal(IsLockHeld(s1, myCookie, READLOCK), false)
   227  }
   228  
   229  //
   230  // Code related to multiple test threads.
   231  //
   232  
   233  // Thread which currently owns the lock.
   234  //
   235  // This is a map indexed by a string of the inodeNumber
   236  var currentLockOwner map[string]uint64
   237  
   238  // Map of threads and channels used for communication
   239  var threadMap map[uint64]*threadInfo
   240  
   241  // Setup thread stuctures based on number of threads test wants
   242  func setupThreadMap(threadCount uint64) {
   243  	threadMap = make(map[uint64]*threadInfo)
   244  	currentLockOwner = make(map[string]uint64)
   245  
   246  	for i := uint64(0); i < threadCount; i++ {
   247  		thread := &threadInfo{startedNode: make(chan bool), requestForThread: make(chan *testReq)}
   248  		threadMap[i] = thread
   249  	}
   250  }
   251  
   252  func setupThreads(threadCount uint64) {
   253  	setupThreadMap(threadCount)
   254  
   255  	// Start threads and wait for them
   256  	for i := range threadMap {
   257  		go threadNode(i)
   258  		_ = <-threadMap[i].startedNode
   259  	}
   260  }
   261  
   262  func stopThreads(t *testing.T) {
   263  	for i := range threadMap {
   264  		sendRequestToThread(i, t, stopThread, s1)
   265  	}
   266  }
   267  
   268  // Test thread.  Just waits on channel and does operation requested.
   269  func threadNode(threadID uint64) {
   270  
   271  	// Tell control thread we are up and set channel to read.
   272  	threadMap[threadID].startedNode <- true
   273  	var request chan *testReq
   274  	request = threadMap[threadID].requestForThread
   275  
   276  	var myLockMap map[string]*RWLockStruct
   277  	myLockMap = make(map[string]*RWLockStruct)
   278  
   279  	// Get cookie which track this go routine
   280  	myCookie := GenerateCallerID()
   281  
   282  	// Wait for a command
   283  	for {
   284  		lockRequest := <-request
   285  		assert := assert.New(lockRequest.t)
   286  		assert.NotNil(lockRequest.lockID)
   287  
   288  		switch lockRequest.typ {
   289  		case stopThread:
   290  			return
   291  
   292  		case writeLock:
   293  			// Init lock structure and add to map
   294  			//			assert := assert.New(lockRequest.t)
   295  			myRwLock := &RWLockStruct{LockID: lockRequest.lockID, Notify: nil, LockCallerID: myCookie}
   296  			myLockMap[lockRequest.lockID] = myRwLock
   297  
   298  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, WRITELOCK), false)
   299  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, ANYLOCK), false)
   300  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, READLOCK), false)
   301  
   302  			err := myRwLock.WriteLock()
   303  			assert.Nil(err, "No error from WriteLock().")
   304  
   305  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, WRITELOCK), true)
   306  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, ANYLOCK), true)
   307  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, READLOCK), false)
   308  
   309  			mutex.Lock()
   310  			currentLockOwner[lockRequest.lockID] = threadID
   311  			mutex.Unlock()
   312  
   313  		case readLock:
   314  			// Init lock structure and add to map
   315  			myRwLock := &RWLockStruct{LockID: lockRequest.lockID, Notify: nil, LockCallerID: myCookie}
   316  			myLockMap[lockRequest.lockID] = myRwLock
   317  
   318  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, WRITELOCK), false)
   319  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, ANYLOCK), false)
   320  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, READLOCK), false)
   321  
   322  			err := myRwLock.ReadLock()
   323  			assert.Nil(err, "No error from ReadLock().")
   324  
   325  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, WRITELOCK), false)
   326  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, ANYLOCK), true)
   327  			assert.Equal(IsLockHeld(lockRequest.lockID, myCookie, READLOCK), true)
   328  
   329  			mutex.Lock()
   330  			currentLockOwner[lockRequest.lockID] = threadID
   331  			mutex.Unlock()
   332  
   333  		case tryWriteLock:
   334  			// Init lock structure and add to map
   335  			myRwLock := &RWLockStruct{LockID: lockRequest.lockID, Notify: nil, LockCallerID: myCookie}
   336  			myLockMap[lockRequest.lockID] = myRwLock
   337  
   338  			err := myRwLock.TryWriteLock()
   339  			if err != nil {
   340  				assert.True(blunder.Is(err, blunder.TryAgainError))
   341  
   342  				mutex.Lock()
   343  				currentLockOwner[lockRequest.lockID] = threadID
   344  				mutex.Unlock()
   345  			}
   346  
   347  		case tryReadLock:
   348  			// Init lock structure and add to map
   349  			myRwLock := &RWLockStruct{LockID: lockRequest.lockID, Notify: nil, LockCallerID: myCookie}
   350  			myLockMap[lockRequest.lockID] = myRwLock
   351  
   352  			err := myRwLock.TryReadLock()
   353  			if err != nil {
   354  				assert.True(blunder.Is(err, blunder.TryAgainError))
   355  
   356  				mutex.Lock()
   357  				currentLockOwner[lockRequest.lockID] = threadID
   358  				mutex.Unlock()
   359  			}
   360  
   361  		case unlock:
   362  			// Lookup lock in map
   363  			myRwLock := myLockMap[lockRequest.lockID]
   364  			assert.NotNil(myRwLock)
   365  			assert.NotNil(myRwLock.LockID)
   366  			assert.Equal(IsLockHeld(myRwLock.LockID, myCookie, ANYLOCK), true)
   367  
   368  			err := myRwLock.Unlock()
   369  			assert.Nil(err, "No error from Unlock()().")
   370  
   371  			delete(myLockMap, lockRequest.lockID)
   372  
   373  			// We do not clear currentLockOwner here since that would be a race condition with a thread
   374  			// which has just been granted the lock.  It is possible that it could grab the lock and set
   375  			// currentLockOwner before we can grab the mutex.
   376  		}
   377  	}
   378  }
   379  
   380  func sendRequestToThread(threadID uint64, t *testing.T, operation testOpTyp, lockID string) {
   381  	request := &testReq{typ: operation, lockID: lockID, t: t}
   382  	threadMap[threadID].requestForThread <- request
   383  
   384  	// We do not wait until the operation completes before returning.
   385  }
   386  
   387  // Test that two threads can grab a lock *exclusive* and the second thread
   388  // only gets lock after first one has done Unlock().
   389  func testTwoThreadsExclLocking(t *testing.T) {
   390  	var numThreads uint64 = 2
   391  
   392  	// Initialize worker threads
   393  	setupThreads(numThreads)
   394  
   395  	// Lock *exclusive* from thread 0 and wait until lock is owned.
   396  	sendRequestToThread(0, t, writeLock, s1)
   397  	waitCountOwners(s1, 1)
   398  
   399  	// Send *exclusive* from thread 1, this will block until thread 0 does unlock.
   400  	// We just wait until we see a thread waiting for the lock.
   401  	sendRequestToThread(1, t, writeLock, s1)
   402  	waitCountWaiters(s1, 1)
   403  
   404  	// Release lock from thread 0.  This should grant lock to thread 1.
   405  	sendRequestToThread(0, t, unlock, s1)
   406  
   407  	// Block until the lock is granted to thread 1.
   408  	waitCountWaiters(s1, 0)
   409  	waitCountOwners(s1, 1)
   410  
   411  	sendRequestToThread(1, t, unlock, s1)
   412  	waitCountWaiters(s1, 0)
   413  	waitCountOwners(s1, 0)
   414  
   415  	// Stop worker threads
   416  	stopThreads(t)
   417  }
   418  
   419  // Test that two threads can grab a lock shared.
   420  func testTwoThreadsSharedLocking(t *testing.T) {
   421  	var numThreads uint64 = 2
   422  
   423  	// Initialize worker threads
   424  	setupThreads(numThreads)
   425  
   426  	// Lock *shared* from thread 0 and wait until lock is owned.
   427  	sendRequestToThread(0, t, readLock, s1)
   428  	waitCountOwners(s1, 1)
   429  
   430  	// Send *shared* from thread 1.  This should be granted right away.
   431  	sendRequestToThread(1, t, readLock, s1)
   432  	waitCountOwners(s1, 2)
   433  	waitCountWaiters(s1, 0)
   434  
   435  	// Release lock from thread 0. Owners should just decrease by 1.
   436  	sendRequestToThread(0, t, unlock, s1)
   437  	waitCountOwners(s1, 1)
   438  	waitCountWaiters(s1, 0)
   439  
   440  	// Have thread 0 acquire the lock again
   441  	sendRequestToThread(0, t, readLock, s1)
   442  	waitCountOwners(s1, 2)
   443  	waitCountWaiters(s1, 0)
   444  
   445  	// Thread 1 releases the lock
   446  	sendRequestToThread(1, t, unlock, s1)
   447  	waitCountOwners(s1, 1)
   448  	waitCountWaiters(s1, 0)
   449  
   450  	// Thread 1 acquires the lock again
   451  	sendRequestToThread(1, t, readLock, s1)
   452  	waitCountOwners(s1, 2)
   453  	waitCountWaiters(s1, 0)
   454  
   455  	// both threads release their locks
   456  	sendRequestToThread(0, t, unlock, s1)
   457  	sendRequestToThread(1, t, unlock, s1)
   458  	waitCountOwners(s1, 0)
   459  	waitCountWaiters(s1, 0)
   460  
   461  	// Stop worker threads
   462  	stopThreads(t)
   463  }
   464  
   465  // Test that 100 threads can grab a lock shared.
   466  func test100ThreadsSharedLocking(t *testing.T) {
   467  	var numThreads uint64 = 100
   468  
   469  	// Initialize worker threads
   470  	setupThreads(numThreads)
   471  
   472  	var i uint64
   473  	for i = 0; i < numThreads; i++ {
   474  		sendRequestToThread(i, t, readLock, s1)
   475  		waitCountOwners(s1, (i + 1))
   476  		waitCountWaiters(s1, 0)
   477  	}
   478  
   479  	currentOwners := numThreads
   480  	for i = 0; i < numThreads; i++ {
   481  		sendRequestToThread(i, t, unlock, s1)
   482  		currentOwners--
   483  		waitCountOwners(s1, currentOwners)
   484  		waitCountWaiters(s1, 0)
   485  	}
   486  
   487  	// Stop worker threads
   488  	stopThreads(t)
   489  }
   490  
   491  // Force 100 threads to grab the lock exclusively.  The operation
   492  // should be serialized.
   493  func test100ThreadsExclLocking(t *testing.T) {
   494  	var numThreads uint64 = 100
   495  
   496  	// Initialize worker threads
   497  	setupThreads(numThreads)
   498  
   499  	var i uint64
   500  	for i = 0; i < numThreads; i++ {
   501  		sendRequestToThread(i, t, writeLock, s1)
   502  		waitCountOwners(s1, 1)
   503  		waitCountWaiters(s1, i)
   504  	}
   505  	waitCountWaiters(s1, numThreads-1)
   506  
   507  	// Any of the threads could get the lock once thread0 releases it.
   508  	//
   509  	// Therefore, we have to find the thread which currently owns the lock.
   510  	//
   511  	// We do this by having the thread that grabs the lock save its threadID
   512  	// in the global currentLockOwner map.
   513  	waiters := numThreads - 1
   514  	var prevOwner int64 = -1
   515  	for i = 0; i < numThreads; i++ {
   516  
   517  	waitForLockAcquire:
   518  		mutex.Lock()
   519  		owner, ok := currentLockOwner[s1]
   520  		if !ok {
   521  			mutex.Unlock()
   522  			// Lock is not yet held, let us wait
   523  			time.Sleep(5 * time.Millisecond)
   524  			goto waitForLockAcquire
   525  		}
   526  		mutex.Unlock()
   527  
   528  		if int64(owner) == prevOwner {
   529  			// We ran before the next caller is able to grab the lock. Let us wait and retry again:
   530  			time.Sleep(5 * time.Millisecond)
   531  			goto waitForLockAcquire
   532  		}
   533  
   534  		prevOwner = int64(owner)
   535  		sendRequestToThread(owner, t, unlock, s1)
   536  
   537  		// Wait until next thread picks up lock
   538  		if waiters > 0 {
   539  			waiters--
   540  			waitCountWaiters(s1, waiters)
   541  		}
   542  	}
   543  	waitCountWaiters(s1, 0)
   544  	waitCountOwners(s1, 0)
   545  
   546  	// Stop worker threads
   547  	stopThreads(t)
   548  }
   549  
   550  // Test that if one thread grabs the lock exclusive and a second thread attempts
   551  // to grab shared, the shared lock request is granted after the first thread unlocks.
   552  func testTwoThreadsAndExclToShared(t *testing.T) {
   553  	var numThreads uint64 = 2
   554  
   555  	// Initialize worker threads
   556  	setupThreads(numThreads)
   557  
   558  	// Lock *exclusive* from thread 0 and wait until lock is owned.
   559  	sendRequestToThread(0, t, writeLock, s1)
   560  	waitCountOwners(s1, 1)
   561  
   562  	// Send *shared* from thread 1, this will block until thread 0 does unlock.
   563  	// We just wait until we see a thread waiting for the lock.
   564  	sendRequestToThread(1, t, readLock, s1)
   565  	waitCountWaiters(s1, 1)
   566  
   567  	// Release lock from thread 0.  This should grant lock to thread 1.
   568  	sendRequestToThread(0, t, unlock, s1)
   569  
   570  	// Block until the lock is granted to thread 1.
   571  	waitCountWaiters(s1, 0)
   572  	waitCountOwners(s1, 1)
   573  
   574  	sendRequestToThread(1, t, unlock, s1)
   575  	waitCountWaiters(s1, 0)
   576  	waitCountOwners(s1, 0)
   577  
   578  	// Stop worker threads
   579  	stopThreads(t)
   580  }
   581  
   582  // Test that if one thread grabs the lock shared and a second thread attempts
   583  // to grab exclusive, the exclusive lock request is granted after the first thread unlocks.
   584  func testTwoThreadsAndSharedToExcl(t *testing.T) {
   585  	var numThreads uint64 = 2
   586  
   587  	// Initialize worker threads
   588  	setupThreads(numThreads)
   589  
   590  	// Lock *exclusive* from thread 0 and wait until lock is owned.
   591  	sendRequestToThread(0, t, readLock, s1)
   592  	waitCountOwners(s1, 1)
   593  
   594  	// Send *shared* from thread 1, this will block until thread 0 does unlock.
   595  	// We just wait until we see a thread waiting for the lock.
   596  	sendRequestToThread(1, t, writeLock, s1)
   597  	waitCountWaiters(s1, 1)
   598  
   599  	// Release lock from thread 0.  This should grant lock to thread 1.
   600  	sendRequestToThread(0, t, unlock, s1)
   601  
   602  	// Block until the lock is granted to thread 1.
   603  	waitCountWaiters(s1, 0)
   604  	waitCountOwners(s1, 1)
   605  
   606  	sendRequestToThread(1, t, unlock, s1)
   607  	waitCountWaiters(s1, 0)
   608  	waitCountOwners(s1, 0)
   609  
   610  	// Stop worker threads
   611  	stopThreads(t)
   612  }
   613  
   614  // Test that if lock is held *exclusive* TryWriteLock() and TryReadLock() fail.
   615  func testTryFailsIfHeldExclusive(t *testing.T) {
   616  	var numThreads uint64 = 2
   617  
   618  	// Initialize worker threads
   619  	setupThreads(numThreads)
   620  
   621  	// Lock *exclusive* from thread 0 and wait until lock is owned.
   622  	sendRequestToThread(0, t, writeLock, s1)
   623  	waitCountOwners(s1, 1)
   624  	mutex.Lock()
   625  	lockOwner := currentLockOwner[s1]
   626  	mutex.Unlock()
   627  	assert := assert.New(t)
   628  	assert.Equal(lockOwner, 0, "Lock should be owned by thread 0.")
   629  
   630  	// Try write lock which should fail and there should not be any waiters.
   631  	sendRequestToThread(1, t, tryWriteLock, s1)
   632  	waitCountOwners(s1, 1)
   633  	waitCountWaiters(s1, 0)
   634  	mutex.Lock()
   635  	lockOwner = currentLockOwner[s1]
   636  	mutex.Unlock()
   637  	assert.Equal(lockOwner, 0, "Lock should be owned by thread 0.")
   638  
   639  	// Try read lock which should fail and there should not be any waiters.
   640  	sendRequestToThread(1, t, tryReadLock, s1)
   641  	waitCountOwners(s1, 1)
   642  	waitCountWaiters(s1, 0)
   643  	mutex.Lock()
   644  	lockOwner = currentLockOwner[s1]
   645  	mutex.Unlock()
   646  	assert.Equal(lockOwner, 0, "Lock should be owned by thread 0.")
   647  
   648  	// Release lock from thread 0. Owners should now be 0.
   649  	sendRequestToThread(0, t, unlock, s1)
   650  	waitCountOwners(s1, 0)
   651  	waitCountWaiters(s1, 0)
   652  
   653  	// Now try write lock from thread 1. This should work.
   654  	sendRequestToThread(1, t, tryWriteLock, s1)
   655  	waitCountOwners(s1, 1)
   656  	waitCountWaiters(s1, 0)
   657  	mutex.Lock()
   658  	lockOwner = currentLockOwner[s1]
   659  	mutex.Unlock()
   660  	assert.Equal(lockOwner, 1, "Lock should be owned by thread 1.")
   661  
   662  	// Unlock from thread 1 and try a read lock from thread 1.  This should work.
   663  	sendRequestToThread(1, t, unlock, s1)
   664  	waitCountOwners(s1, 0)
   665  	waitCountWaiters(s1, 0)
   666  
   667  	sendRequestToThread(1, t, tryReadLock, s1)
   668  	waitCountOwners(s1, 1)
   669  	waitCountWaiters(s1, 0)
   670  	mutex.Lock()
   671  	lockOwner = currentLockOwner[s1]
   672  	mutex.Unlock()
   673  	assert.Equal(lockOwner, 1, "Lock should be owned by thread 1.")
   674  
   675  	// A try of a write lock from thread 0 should fail if the lock is held shared by thread 1.
   676  	sendRequestToThread(0, t, tryWriteLock, s1)
   677  	waitCountOwners(s1, 1)
   678  	waitCountWaiters(s1, 0)
   679  	mutex.Lock()
   680  	lockOwner = currentLockOwner[s1]
   681  	mutex.Unlock()
   682  	assert.Equal(lockOwner, 1, "Lock should be owned by thread 1.")
   683  
   684  	// Release the lock
   685  	sendRequestToThread(1, t, unlock, s1)
   686  	waitCountWaiters(s1, 0)
   687  	waitCountOwners(s1, 0)
   688  
   689  	// Stop worker threads
   690  	stopThreads(t)
   691  }