gitlab.com/SiaPrime/SiaPrime@v1.4.1/modules/renter/memory.go (about)

     1  package renter
     2  
     3  // TODO: Move the memory manager to its own package.
     4  
     5  // TODO: Add functions that allow a caller to increase or decrease the base
     6  // memory for the memory manager.
     7  
     8  import (
     9  	"runtime"
    10  	"runtime/debug"
    11  	"sync"
    12  
    13  	"gitlab.com/SiaPrime/SiaPrime/build"
    14  )
    15  
    16  // memoryManager can handle requests for memory and returns of memory. The
    17  // memory manager is initialized with a base amount of memory and it will allow
    18  // up to that much memory to be requested simultaneously. Beyond that, it will
    19  // block on calls to 'managedGetMemory' until enough memory has been returned to
    20  // allow the request.
    21  //
    22  // If a request is made that exceeds the base memory, the memory manager will
    23  // block until all memory is available, and then grant the request, blocking all
    24  // future requests for memory until the memory is returned. This allows large
    25  // requests to go through even if there is not enough base memory.
    26  //
    27  // The memoryManager counts how much memory has been returned to it since the
    28  // most recent call to runtime.GC(). If memory is returned that puts the memory
    29  // manager over the limit, the memory manager will call runtime.GC(). This helps
    30  // to keep the amount of system memory consumed by siad under control.
    31  type memoryManager struct {
    32  	available    uint64
    33  	base         uint64
    34  	fifo         []*memoryRequest
    35  	memSinceGC   uint64
    36  	mu           sync.Mutex
    37  	priorityFifo []*memoryRequest
    38  	stop         <-chan struct{}
    39  	underflow    uint64
    40  }
    41  
    42  // memoryRequest is a single thread that is blocked while waiting for memory.
    43  type memoryRequest struct {
    44  	amount uint64
    45  	done   chan struct{}
    46  }
    47  
    48  // try will try to get the amount of memory requested from the manger, returning
    49  // true if the attempt is successful, and false if the attempt is not.  In the
    50  // event that the attempt is successful, the internal state of the memory
    51  // manager will be updated to reflect the granted request.
    52  func (mm *memoryManager) try(amount uint64) bool {
    53  	if mm.available >= amount {
    54  		// There is enough memory, decrement the memory and return.
    55  		mm.available -= amount
    56  		return true
    57  	} else if mm.available == mm.base {
    58  		// The amount of memory being requested is greater than the amount of
    59  		// memory available, but no memory is currently in use. Set the amount
    60  		// of memory available to zero and return.
    61  		//
    62  		// The effect is that all of the memory is allocated to this one
    63  		// request, allowing the request to succeed even though there is
    64  		// technically not enough total memory available for the request.
    65  		mm.available = 0
    66  		mm.underflow = amount - mm.base
    67  		return true
    68  	}
    69  	return false
    70  }
    71  
    72  // Request is a blocking request for memory. The request will return when the
    73  // memory has been acquired. If 'false' is returned, it means that the renter
    74  // shut down before the memory could be allocated.
    75  func (mm *memoryManager) Request(amount uint64, priority bool) bool {
    76  	// Try to request the memory.
    77  	mm.mu.Lock()
    78  	if len(mm.fifo) == 0 && mm.try(amount) {
    79  		mm.mu.Unlock()
    80  		return true
    81  	}
    82  
    83  	// There is not enough memory available for this request, join the fifo.
    84  	myRequest := &memoryRequest{
    85  		amount: amount,
    86  		done:   make(chan struct{}),
    87  	}
    88  	if priority {
    89  		mm.priorityFifo = append(mm.priorityFifo, myRequest)
    90  	} else {
    91  		mm.fifo = append(mm.fifo, myRequest)
    92  	}
    93  	mm.mu.Unlock()
    94  
    95  	// Block until memory is available or until shutdown. The thread that closes
    96  	// the 'available' channel will also handle updating the memoryManager
    97  	// variables.
    98  	select {
    99  	case <-myRequest.done:
   100  		return true
   101  	case <-mm.stop:
   102  		return false
   103  	}
   104  }
   105  
   106  // Return will return memory to the manager, waking any blocking threads which
   107  // now have enough memory to proceed.
   108  func (mm *memoryManager) Return(amount uint64) {
   109  	mm.mu.Lock()
   110  	defer mm.mu.Unlock()
   111  
   112  	// Check if the garbage collector should be run now that memory has been
   113  	// released. If the garbage collector does not run soon, this released
   114  	// memory can build up and really increase the total amount of memory that
   115  	// the renter consumes.
   116  	mm.memSinceGC += amount
   117  	if mm.memSinceGC > defaultMemory {
   118  		runtime.GC()
   119  		debug.FreeOSMemory()
   120  		mm.memSinceGC = 0
   121  	}
   122  
   123  	// Add the remaining memory to the pool of available memory, clearing out
   124  	// the underflow if needed.
   125  	if mm.underflow > 0 && amount <= mm.underflow {
   126  		// Not even enough memory has been returned to clear the underflow.
   127  		// Reduce the underflow amount and return.
   128  		mm.underflow -= amount
   129  		return
   130  	} else if mm.underflow > 0 && amount > mm.underflow {
   131  		amount -= mm.underflow
   132  		mm.underflow = 0
   133  	}
   134  	mm.available += amount
   135  
   136  	// Sanity check - the amount of memory available should not exceed the base
   137  	// unless the memory manager is being used incorrectly.
   138  	if mm.available > mm.base {
   139  		build.Critical("renter memory manager being used incorrectly, too much memory returned")
   140  		mm.available = mm.base
   141  	}
   142  
   143  	// Release as many of the priority threads blocking in the fifo as possible.
   144  	for len(mm.priorityFifo) > 0 {
   145  		if !mm.try(mm.priorityFifo[0].amount) {
   146  			// There is not enough memory to grant the next request, meaning no
   147  			// future requests should be checked either.
   148  			return
   149  		}
   150  		// There is enough memory to grant the next request. Unblock that
   151  		// request and continue checking the next requests.
   152  		close(mm.priorityFifo[0].done)
   153  		mm.priorityFifo = mm.priorityFifo[1:]
   154  	}
   155  
   156  	// Release as many of the threads blocking in the fifo as possible.
   157  	for len(mm.fifo) > 0 {
   158  		if !mm.try(mm.fifo[0].amount) {
   159  			// There is not enough memory to grant the next request, meaning no
   160  			// future requests should be checked either.
   161  			return
   162  		}
   163  		// There is enough memory to grant the next request. Unblock that
   164  		// request and continue checking the next requests.
   165  		close(mm.fifo[0].done)
   166  		mm.fifo = mm.fifo[1:]
   167  	}
   168  }
   169  
   170  // newMemoryManager will create a memoryManager and return it.
   171  func newMemoryManager(baseMemory uint64, stopChan <-chan struct{}) *memoryManager {
   172  	return &memoryManager{
   173  		available: baseMemory,
   174  		base:      baseMemory,
   175  		stop:      stopChan,
   176  	}
   177  }