gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/memory_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"reflect"
     7  	"sync"
     8  	"testing"
     9  	"time"
    10  
    11  	"gitlab.com/NebulousLabs/fastrand"
    12  	"gitlab.com/SkynetLabs/skyd/build"
    13  	"gitlab.com/SkynetLabs/skyd/skymodules"
    14  )
    15  
    16  // TestMemoryManager checks that the memory management is working correctly.
    17  func TestMemoryManager(t *testing.T) {
    18  	// Mimic the default parameters.
    19  	stopChan := make(chan struct{})
    20  	mm := newMemoryManager(100, 25, stopChan)
    21  
    22  	// Low priority memory should have no issues requesting up to 75 memory.
    23  	for i := 0; i < 75; i++ {
    24  		if !mm.Request(context.Background(), 1, memoryPriorityLow) {
    25  			t.Error("unable to get memory")
    26  		}
    27  	}
    28  
    29  	// Request 1 more memory. This should not be allowed to complete until
    30  	// memory has been returned.
    31  	memoryCompleted1 := make(chan struct{})
    32  	go func() {
    33  		if !mm.Request(context.Background(), 1, memoryPriorityLow) {
    34  			t.Error("unable to get memory")
    35  		}
    36  		close(memoryCompleted1)
    37  	}()
    38  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
    39  
    40  	// Request some priority memory.
    41  	for i := 0; i < 25; i++ {
    42  		if !mm.Request(context.Background(), 1, memoryPriorityHigh) {
    43  			t.Error("unable to get memory")
    44  		}
    45  	}
    46  
    47  	// Request 27 priority memory. This will consume all of the priority memory,
    48  	// plus two slots that could go to the non-priority request. Because this is
    49  	// a priority request, it should be granted first, even if there is enough
    50  	// non-priority memory for the non-priority request.
    51  	memoryCompleted2 := make(chan struct{})
    52  	go func() {
    53  		if !mm.Request(context.Background(), 27, memoryPriorityHigh) {
    54  			t.Error("unable to get memory")
    55  		}
    56  		close(memoryCompleted2)
    57  	}()
    58  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
    59  
    60  	// Return 26 memory, which should not be enough for either open request to
    61  	// complete. The request for 1 will remain blocked because it is not allowed
    62  	// to complete while there is an open priority request. The priority request
    63  	// will not complete because there is not enough memory available.
    64  	mm.Return(26)
    65  
    66  	// Check that neither memory request has completed.
    67  	select {
    68  	case <-memoryCompleted1:
    69  		t.Error("memory request should not have completed")
    70  	case <-memoryCompleted2:
    71  		t.Error("memory request should not have completed")
    72  	default:
    73  	}
    74  
    75  	// Return 1 more memory. This should clear the priority request but not the
    76  	// normal request.
    77  	mm.Return(1)
    78  	select {
    79  	case <-memoryCompleted1:
    80  		t.Error("memory request should not have completed")
    81  	case <-memoryCompleted2:
    82  	}
    83  
    84  	// All memory is in use, return 26 memory so that there is room for this
    85  	// request.
    86  	mm.Return(26)
    87  	<-memoryCompleted1
    88  
    89  	// Try requesting a super large amount of memory on priority. This should
    90  	// block all future requests until all memory has been returned.
    91  	memoryCompleted3 := make(chan struct{})
    92  	go func() {
    93  		if !mm.Request(context.Background(), 250, memoryPriorityHigh) {
    94  			t.Error("unable to get memory")
    95  		}
    96  		close(memoryCompleted3)
    97  	}()
    98  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
    99  	// Create a couple of future requests, both priority and non priority.
   100  	//
   101  	// NOTE: We make the low priority requests first to ensure that the FIFO is
   102  	// respecting priority.
   103  	memoryCompleted6 := make(chan struct{})
   104  	go func() {
   105  		if !mm.Request(context.Background(), 1, memoryPriorityLow) {
   106  			t.Error("unable to get memory")
   107  		}
   108  		close(memoryCompleted6)
   109  	}()
   110  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   111  	memoryCompleted7 := make(chan struct{})
   112  	go func() {
   113  		if !mm.Request(context.Background(), 1, memoryPriorityLow) {
   114  			t.Error("unable to get memory")
   115  		}
   116  		close(memoryCompleted7)
   117  	}()
   118  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   119  	memoryCompleted4 := make(chan struct{})
   120  	go func() {
   121  		if !mm.Request(context.Background(), 30, memoryPriorityHigh) {
   122  			t.Error("unable to get memory")
   123  		}
   124  		close(memoryCompleted4)
   125  	}()
   126  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   127  	memoryCompleted5 := make(chan struct{})
   128  	go func() {
   129  		if !mm.Request(context.Background(), 1, memoryPriorityHigh) {
   130  			t.Error("unable to get memory")
   131  		}
   132  		close(memoryCompleted5)
   133  	}()
   134  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   135  
   136  	// Return 75 memory to get the mm back to zero, unblocking the big request.
   137  	// All little requests should remain blocked.
   138  	mm.Return(1)  // 1
   139  	mm.Return(2)  // 3
   140  	mm.Return(3)  // 6
   141  	mm.Return(4)  // 10
   142  	mm.Return(64) // 74
   143  
   144  	// None of the memory requests should be able to complete.
   145  	select {
   146  	case <-memoryCompleted3:
   147  		t.Error("memory should not complete")
   148  	case <-memoryCompleted4:
   149  		t.Error("memory should not complete")
   150  	case <-memoryCompleted5:
   151  		t.Error("memory should not complete")
   152  	case <-memoryCompleted6:
   153  		t.Error("memory should not complete")
   154  	case <-memoryCompleted7:
   155  		t.Error("memory should not complete")
   156  	default:
   157  	}
   158  
   159  	// Return 1 more memory, this should unblock the big priority request.
   160  	mm.Return(1)
   161  	select {
   162  	case <-memoryCompleted4:
   163  		t.Error("memory should not complete")
   164  	case <-memoryCompleted5:
   165  		t.Error("memory should not complete")
   166  	case <-memoryCompleted6:
   167  		t.Error("memory should not complete")
   168  	case <-memoryCompleted7:
   169  		t.Error("memory should not complete")
   170  	default:
   171  	}
   172  
   173  	// Return 150 memory, which means the large request is still holding the
   174  	// full capacity of the mempool. None of the blocking threads should be
   175  	// released. Because it is first in the fifo, nothing else should be
   176  	// released either.
   177  	mm.Return(1)  // 1
   178  	mm.Return(2)  // 3
   179  	mm.Return(3)  // 6
   180  	mm.Return(4)  // 10
   181  	mm.Return(65) // 75
   182  	mm.Return(75) // 150
   183  	select {
   184  	case <-memoryCompleted4:
   185  		t.Error("memory should not complete")
   186  	case <-memoryCompleted5:
   187  		t.Error("memory should not complete")
   188  	case <-memoryCompleted6:
   189  		t.Error("memory should not complete")
   190  	case <-memoryCompleted7:
   191  		t.Error("memory should not complete")
   192  	default:
   193  	}
   194  
   195  	// Return 29 memory, which is not enough for the large request in the fifo
   196  	// to be released.
   197  	mm.Return(1)  // 1
   198  	mm.Return(2)  // 3
   199  	mm.Return(3)  // 6
   200  	mm.Return(4)  // 10
   201  	mm.Return(19) // 29
   202  	select {
   203  	case <-memoryCompleted4:
   204  		t.Error("memory should not complete")
   205  	case <-memoryCompleted5:
   206  		t.Error("memory should not complete")
   207  	case <-memoryCompleted6:
   208  		t.Error("memory should not complete")
   209  	case <-memoryCompleted7:
   210  		t.Error("memory should not complete")
   211  	default:
   212  	}
   213  
   214  	// Return 1 memory to release the large request.
   215  	mm.Return(1)
   216  	<-memoryCompleted4
   217  
   218  	// Return 27 memory, which should be enough to let both the priority item
   219  	// through as well as the first small memory item through. Needs to be +2
   220  	// because the priority item takes the +1 away.
   221  	mm.Return(27)
   222  	// Check for memoryCompleted5
   223  	select {
   224  	case <-memoryCompleted5:
   225  	case <-memoryCompleted7:
   226  		t.Error("memory should not complete")
   227  	}
   228  	// Check for memoryCompleted6
   229  	select {
   230  	case <-memoryCompleted6:
   231  	case <-memoryCompleted7:
   232  		t.Error("memory should not complete")
   233  	}
   234  
   235  	// Return one more memory to clear that final request.
   236  	mm.Return(1)
   237  	<-memoryCompleted7
   238  
   239  	// Do a check to make sure that large non priority requests do not block
   240  	// priority requests.
   241  	mm.Return(74) // There is still 1 memory unreturned.
   242  	memoryCompleted8 := make(chan struct{})
   243  	go func() {
   244  		if !mm.Request(context.Background(), 250, memoryPriorityLow) {
   245  			t.Error("unable to get memory")
   246  		}
   247  		close(memoryCompleted8)
   248  	}()
   249  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   250  
   251  	// Do some priority requests.
   252  	if !mm.Request(context.Background(), 10, memoryPriorityHigh) {
   253  		t.Error("unable to get 10 memory")
   254  	}
   255  	if !mm.Request(context.Background(), 5, memoryPriorityHigh) {
   256  		t.Error("unable to get 10 memory")
   257  	}
   258  	if !mm.Request(context.Background(), 20, memoryPriorityHigh) {
   259  		t.Error("unable to get 10 memory")
   260  	}
   261  	// Clean up.
   262  	mm.Return(36)
   263  	<-memoryCompleted8
   264  	mm.Return(250)
   265  	if mm.available != mm.base {
   266  		t.Error("test did not reset properly")
   267  	}
   268  
   269  	// Handle an edge case around awkwardly sized low priority memory requests.
   270  	// The low priority request will go through.
   271  	if !mm.Request(context.Background(), 85, memoryPriorityLow) {
   272  		t.Error("could not get memory")
   273  	}
   274  	memoryCompleted9 := make(chan struct{})
   275  	go func() {
   276  		if !mm.Request(context.Background(), 20, memoryPriorityHigh) {
   277  			t.Error("unable to get memory")
   278  		}
   279  		close(memoryCompleted9)
   280  	}()
   281  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   282  
   283  	// The high priority request should not have been granted even though there
   284  	// is enough high priority memory available, because the low priority
   285  	// request was large enough to eat into the high priority memory.
   286  	select {
   287  	case <-memoryCompleted9:
   288  		t.Error("memory request should not have gone through")
   289  	default:
   290  	}
   291  	mm.Return(5)
   292  	// Now that a small amount  of memory has been returned, the high priority
   293  	// request should be able to complete.
   294  	<-memoryCompleted9
   295  	mm.Return(100)
   296  	if mm.available != mm.base {
   297  		t.Error("test did not reset properly")
   298  	}
   299  
   300  	// Test out the starvation detector. Request a continuout stream of high
   301  	// priority memory that should starve out the low priority memory. The
   302  	// starvation detector should make sure that eventually, the low priority
   303  	// memory is able to make progress.
   304  	if !mm.Request(context.Background(), 100, memoryPriorityHigh) {
   305  		t.Error("could not get memory through")
   306  	}
   307  	// Add 3 low priority requests each for 10 memory. All 3 should be unblocked
   308  	// by the starvation detector at the same time.
   309  	memoryCompleted10 := make(chan struct{})
   310  	go func() {
   311  		if !mm.Request(context.Background(), 10, memoryPriorityLow) {
   312  			t.Error("unable to get memory")
   313  		}
   314  		close(memoryCompleted10)
   315  	}()
   316  	<-mm.staticBlocking
   317  	memoryCompleted11 := make(chan struct{})
   318  	go func() {
   319  		if !mm.Request(context.Background(), 10, memoryPriorityLow) {
   320  			t.Error("unable to get memory")
   321  		}
   322  		close(memoryCompleted11)
   323  	}()
   324  	<-mm.staticBlocking
   325  	memoryCompleted12 := make(chan struct{})
   326  	go func() {
   327  		if !mm.Request(context.Background(), 10, memoryPriorityLow) {
   328  			t.Error("unable to get memory")
   329  		}
   330  		close(memoryCompleted12)
   331  	}()
   332  	<-mm.staticBlocking
   333  	// Add another low priority request, this should be unblocked by the
   334  	// starvation detector much later than the previous 3.
   335  	memoryCompleted13 := make(chan struct{})
   336  	go func() {
   337  		if !mm.Request(context.Background(), 30, memoryPriorityLow) {
   338  			t.Error("unable to get memory")
   339  		}
   340  		close(memoryCompleted13)
   341  	}()
   342  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   343  
   344  	// Add high priority requests and release previous high priority items.
   345  	// These should all unblock as soon as memory is returned.
   346  	for i := 0; i < 3; i++ {
   347  		memoryCompletedL := make(chan struct{})
   348  		go func() {
   349  			if !mm.Request(context.Background(), 100, memoryPriorityHigh) {
   350  				t.Error("unable to get memory")
   351  			}
   352  			close(memoryCompletedL)
   353  		}()
   354  		<-mm.staticBlocking // wait until the goroutine is in the fifo.
   355  		mm.Return(100)
   356  		<-memoryCompletedL
   357  	}
   358  
   359  	// Add a high priority request. The next time memory is returned, the first
   360  	// set of low priority items should go through.
   361  	memoryCompleted14 := make(chan struct{})
   362  	go func() {
   363  		if !mm.Request(context.Background(), 100, memoryPriorityHigh) {
   364  			t.Error("unable to get memory")
   365  		}
   366  		close(memoryCompleted14)
   367  	}()
   368  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   369  	mm.Return(100)
   370  	// First set of low priority requests should have gone through.
   371  	<-memoryCompleted10
   372  	<-memoryCompleted11
   373  	<-memoryCompleted12
   374  	// Second set should not have gone through.
   375  	select {
   376  	case <-memoryCompleted13:
   377  		t.Error("memory should not have been released")
   378  	default:
   379  	}
   380  	mm.Return(30)
   381  	<-memoryCompleted14
   382  
   383  	// Add high priority requests and release previous high priority items.
   384  	// These should all unblock as soon as memory is returned.
   385  	for i := 0; i < 3; i++ {
   386  		memoryCompletedL := make(chan struct{})
   387  		go func() {
   388  			if !mm.Request(context.Background(), 100, memoryPriorityHigh) {
   389  				t.Error("unable to get memory")
   390  			}
   391  			close(memoryCompletedL)
   392  		}()
   393  		<-mm.staticBlocking // wait until the goroutine is in the fifo.
   394  		mm.Return(100)
   395  		<-memoryCompletedL
   396  
   397  		// Second set should not have gone through still.
   398  		select {
   399  		case <-memoryCompleted13:
   400  			t.Error("memory should not have been released")
   401  		default:
   402  		}
   403  	}
   404  	memoryCompleted15 := make(chan struct{})
   405  	go func() {
   406  		if !mm.Request(context.Background(), 100, memoryPriorityHigh) {
   407  			t.Error("unable to get memory")
   408  		}
   409  		close(memoryCompleted15)
   410  	}()
   411  	<-mm.staticBlocking // wait until the goroutine is in the fifo.
   412  	mm.Return(100)
   413  	// Second set of low priority requests should have gone through.
   414  	<-memoryCompleted13
   415  	mm.Return(30)
   416  	<-memoryCompleted15
   417  	mm.Return(100)
   418  	if mm.available != mm.base {
   419  		t.Error("test did not reset properly")
   420  	}
   421  }
   422  
   423  // TestMemoryManager checks that the memory management is working correctly.
   424  func TestMemoryManagerConcurrent(t *testing.T) {
   425  	if testing.Short() {
   426  		t.SkipNow()
   427  	}
   428  
   429  	// Mimic the default parameters.
   430  	stopChan := make(chan struct{})
   431  	mm := newMemoryManager(100, 25, stopChan)
   432  
   433  	// Spin up a bunch of threads to all request and release memory at the same
   434  	// time.
   435  	doMemory := func() {
   436  		for {
   437  			// Check if the thread has been killed.
   438  			select {
   439  			case <-stopChan:
   440  				return
   441  			default:
   442  			}
   443  
   444  			// Randomly request some amount of memory. Sometimes there will be
   445  			// overdrafts.
   446  			memNeeded := uint64(fastrand.Intn(110) + 1)
   447  			// Randomly set the priority of this memory.
   448  			priority := false
   449  			if fastrand.Intn(2) == 0 {
   450  				priority = true
   451  			}
   452  
   453  			// Perform the request.
   454  			if !mm.Request(context.Background(), memNeeded, priority) {
   455  				select {
   456  				case <-stopChan:
   457  					return
   458  				default:
   459  					t.Error("request failed even though the mm hasn't been shut down")
   460  				}
   461  				return
   462  			}
   463  
   464  			// Sit on the memory for some random (low) number of microseconds.
   465  			sleepTime := time.Microsecond * time.Duration(fastrand.Intn(1e3))
   466  			time.Sleep(sleepTime)
   467  
   468  			// Randomly decide whether to return all of the memory at once.
   469  			if fastrand.Intn(2) == 0 {
   470  				mm.Return(memNeeded)
   471  				continue
   472  			}
   473  			// Return random smaller amounts of memory.
   474  			for memNeeded > 0 {
   475  				returnAmt := uint64(fastrand.Intn(int(memNeeded))) + 1
   476  				memNeeded -= returnAmt
   477  				mm.Return(uint64(returnAmt))
   478  			}
   479  		}
   480  	}
   481  
   482  	// Spin up 20 threads to compete for memory.
   483  	var wg sync.WaitGroup
   484  	for i := 0; i < 20; i++ {
   485  		wg.Add(1)
   486  		go func() {
   487  			doMemory()
   488  			wg.Done()
   489  		}()
   490  	}
   491  
   492  	// Sleep for 10 seconds to let the threads do their thing.
   493  	time.Sleep(time.Second * 10)
   494  
   495  	// Close out the memory and wait for all the threads to die.
   496  	close(stopChan)
   497  	wg.Wait()
   498  }
   499  
   500  // TestMemoryManagerStatus probes the response from callStatus
   501  func TestMemoryManagerStatus(t *testing.T) {
   502  	if testing.Short() {
   503  		t.SkipNow()
   504  	}
   505  	t.Parallel()
   506  
   507  	// Create memory manager
   508  	memoryDefault := repairMemoryDefault
   509  	memoryPriorityDefault := memoryDefault / 4
   510  	stopChan := make(chan struct{})
   511  	mm := newMemoryManager(memoryDefault, memoryPriorityDefault, stopChan)
   512  
   513  	// Check status
   514  	ms := mm.callStatus()
   515  	expectedStatus := skymodules.MemoryManagerStatus{
   516  		Available: memoryDefault - memoryPriorityDefault,
   517  		Base:      memoryDefault - memoryPriorityDefault,
   518  		Requested: 0,
   519  
   520  		PriorityAvailable: memoryDefault,
   521  		PriorityBase:      memoryDefault,
   522  		PriorityRequested: 0,
   523  		PriorityReserve:   memoryPriorityDefault,
   524  	}
   525  	if !reflect.DeepEqual(ms, expectedStatus) {
   526  		t.Log("Expected:", expectedStatus)
   527  		t.Log("Status:", ms)
   528  		t.Fatal("MemoryStatus not as expected")
   529  	}
   530  
   531  	// Request memory
   532  	normalRequest := uint64(100)
   533  	requested := mm.Request(context.Background(), normalRequest, memoryPriorityLow)
   534  	if !requested {
   535  		t.Error("Normal request should have succeeded")
   536  	}
   537  	priorityRequest := uint64(123)
   538  	requested = mm.Request(context.Background(), priorityRequest, memoryPriorityHigh)
   539  	if !requested {
   540  		t.Error("Priority request should have succeeded")
   541  	}
   542  
   543  	// Check status
   544  	ms = mm.callStatus()
   545  	expectedStatus = skymodules.MemoryManagerStatus{
   546  		Available: memoryDefault - memoryPriorityDefault - normalRequest - priorityRequest,
   547  		Base:      memoryDefault - memoryPriorityDefault,
   548  		Requested: 0,
   549  
   550  		PriorityAvailable: memoryDefault - normalRequest - priorityRequest,
   551  		PriorityBase:      memoryDefault,
   552  		PriorityRequested: 0,
   553  		PriorityReserve:   memoryPriorityDefault,
   554  	}
   555  	if !reflect.DeepEqual(ms, expectedStatus) {
   556  		t.Log("Expected:", expectedStatus)
   557  		t.Log("Status:", ms)
   558  		t.Fatal("MemoryStatus not as expected")
   559  	}
   560  
   561  	// Request remaining memory
   562  	mm.mu.Lock()
   563  	request := mm.available
   564  	mm.mu.Unlock()
   565  	requested = mm.Request(context.Background(), request, memoryPriorityHigh)
   566  	if !requested {
   567  		t.Error("Priority request should have succeeded")
   568  	}
   569  
   570  	// Check status
   571  	ms = mm.callStatus()
   572  	expectedStatus = skymodules.MemoryManagerStatus{
   573  		Available: 0,
   574  		Base:      memoryDefault - memoryPriorityDefault,
   575  		Requested: 0,
   576  
   577  		PriorityAvailable: 0,
   578  		PriorityBase:      memoryDefault,
   579  		PriorityRequested: 0,
   580  		PriorityReserve:   memoryPriorityDefault,
   581  	}
   582  	if !reflect.DeepEqual(ms, expectedStatus) {
   583  		t.Log("Expected:", expectedStatus)
   584  		t.Log("Status:", ms)
   585  		t.Fatal("MemoryStatus not as expected")
   586  	}
   587  
   588  	// Request enough memory to have a FIFO queue.
   589  	//
   590  	// These must happen in a go routine since they are blocking calls. We don't
   591  	// care about the calls returning since the block is after the request is
   592  	// added to the FIFO queue which is what the test is concerned with.
   593  	go func() {
   594  		_ = mm.Request(context.Background(), memoryDefault, memoryPriorityLow)
   595  	}()
   596  	go func() {
   597  		_ = mm.Request(context.Background(), memoryDefault, memoryPriorityHigh)
   598  	}()
   599  
   600  	// Since the requests are being handled in a go routine, wait until each
   601  	// request appears in the FIFO queue
   602  	err := build.Retry(100, 10*time.Millisecond, func() error {
   603  		mm.mu.Lock()
   604  		defer mm.mu.Unlock()
   605  		if mm.fifo.Len() != 1 {
   606  			return fmt.Errorf("FIFO queue should have 1 request but has %v", mm.fifo.Len())
   607  		}
   608  		if mm.priorityFifo.Len() != 1 {
   609  			return fmt.Errorf("Priority FIFO queue should have 1 request but has %v", mm.priorityFifo.Len())
   610  		}
   611  		return nil
   612  	})
   613  	if err != nil {
   614  		t.Fatal(err)
   615  	}
   616  
   617  	// Check Status
   618  	ms = mm.callStatus()
   619  	expectedStatus = skymodules.MemoryManagerStatus{
   620  		Available: 0,
   621  		Base:      memoryDefault - memoryPriorityDefault,
   622  		Requested: memoryDefault,
   623  
   624  		PriorityAvailable: 0,
   625  		PriorityBase:      memoryDefault,
   626  		PriorityRequested: memoryDefault,
   627  		PriorityReserve:   memoryPriorityDefault,
   628  	}
   629  	if !reflect.DeepEqual(ms, expectedStatus) {
   630  		t.Log("Expected:", expectedStatus)
   631  		t.Log("Status:", ms)
   632  		t.Fatal("MemoryStatus not as expected")
   633  	}
   634  }
   635  
   636  // TestMemoryManagerRequestMemoryWithContext verifies the behaviour of
   637  // RequestWithContext method on the memory manager
   638  func TestMemoryManagerRequestMemoryWithContext(t *testing.T) {
   639  	if testing.Short() {
   640  		t.SkipNow()
   641  	}
   642  	t.Parallel()
   643  
   644  	// Create memory manager
   645  	stopChan := make(chan struct{})
   646  	mm := newMemoryManager(repairMemoryDefault, repairMemoryPriorityDefault, stopChan)
   647  
   648  	// Get the total available memory
   649  	mm.mu.Lock()
   650  	available := mm.available
   651  	mm.mu.Unlock()
   652  
   653  	// Request all available memory
   654  	requested := mm.Request(context.Background(), available, memoryPriorityHigh)
   655  	if !requested {
   656  		t.Fatal("Priority request should have succeeded")
   657  	}
   658  
   659  	// Validate that requesting more memory blocks
   660  	doneChan := make(chan struct{})
   661  	go func() {
   662  		mm.Request(context.Background(), 1, memoryPriorityHigh)
   663  		close(doneChan)
   664  	}()
   665  	select {
   666  	case <-doneChan:
   667  		t.Fatal("Priority request should have been blocking...")
   668  	case <-time.After(time.Second):
   669  	}
   670  
   671  	// Validate the current status
   672  	status := mm.callStatus()
   673  	if status.PriorityAvailable != 0 || status.PriorityRequested != 1 {
   674  		t.Fatal("unexpected")
   675  	}
   676  
   677  	// Return some memory, this should unblock the previously queued request
   678  	mm.Return(1)
   679  	select {
   680  	case <-time.After(time.Second):
   681  		t.Fatal("Request should have been unblocked now")
   682  	case <-doneChan:
   683  	}
   684  
   685  	// Validate the current status
   686  	status = mm.callStatus()
   687  	if status.PriorityAvailable != 0 || status.PriorityRequested != 0 {
   688  		t.Fatal("unexpected")
   689  	}
   690  
   691  	// Request some memory, this time pass a context that times out
   692  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
   693  	defer cancel()
   694  	requested = mm.Request(ctx, 10, memoryPriorityHigh)
   695  	if requested {
   696  		t.Fatal("Priority request should have timed out")
   697  	}
   698  
   699  	// Validate the current status
   700  	status = mm.callStatus()
   701  	if status.PriorityAvailable != 0 || status.PriorityRequested != 0 {
   702  		t.Fatal("unexpected")
   703  	}
   704  
   705  	// Return all available memory
   706  	mm.Return(available)
   707  	status = mm.callStatus()
   708  	if status.PriorityAvailable != available {
   709  		t.Fatal("unexpected")
   710  	}
   711  }
   712  
   713  // TestAddMemoryStatus is a unit test for adding up MemoryStatus objects.
   714  func TestAddMemoryStatus(t *testing.T) {
   715  	mms := skymodules.MemoryManagerStatus{
   716  		Available: 1,
   717  		Base:      2,
   718  		Requested: 3,
   719  
   720  		PriorityAvailable: 4,
   721  		PriorityBase:      5,
   722  		PriorityRequested: 6,
   723  		PriorityReserve:   7,
   724  	}
   725  	total := mms.Add(mms)
   726  
   727  	if total.Available != 2*mms.Available {
   728  		t.Fatal("invalid")
   729  	}
   730  	if total.Base != 2*mms.Base {
   731  		t.Fatal("invalid")
   732  	}
   733  	if total.Requested != 2*mms.Requested {
   734  		t.Fatal("invalid")
   735  	}
   736  	if total.PriorityAvailable != 2*mms.PriorityAvailable {
   737  		t.Fatal("invalid")
   738  	}
   739  	if total.PriorityBase != 2*mms.PriorityBase {
   740  		t.Fatal("invalid")
   741  	}
   742  	if total.PriorityRequested != 2*mms.PriorityRequested {
   743  		t.Fatal("invalid")
   744  	}
   745  	if total.PriorityReserve != 2*mms.PriorityReserve {
   746  		t.Fatal("invalid")
   747  	}
   748  }