github.com/andy2046/gopie@v0.7.0/pkg/drf/drf.go (about)

     1  // Package drf implements Dominant Resource Fairness.
     2  package drf
     3  
     4  import (
     5  	"container/heap"
     6  	"sync"
     7  )
     8  
     9  // New create a DRF Cluster.
    10  func New(clusterResource map[Typ]float64, clusterNodes ...*Node) (DRF, error) {
    11  	if len(clusterResource) == 0 {
    12  		return EmptyDRF, ErrEmptyResource
    13  	}
    14  	if len(clusterNodes) == 0 {
    15  		return EmptyDRF, ErrEmptyNodes
    16  	}
    17  
    18  	drf := DRF{
    19  		clusterResource:  clusterResource,
    20  		consumedResource: make(map[Typ]float64),
    21  		nodes:            make(nodeQueue, len(clusterNodes)),
    22  		mu:               &sync.RWMutex{},
    23  	}
    24  	i := 0
    25  	for _, n := range clusterNodes {
    26  		n.mu.Lock()
    27  		n.index = i
    28  		if n.allocated == nil {
    29  			n.allocated = make(map[Typ]float64)
    30  		}
    31  		if n.demand == nil {
    32  			n.demand = make(map[Typ]float64)
    33  		}
    34  		drf.nodes[i] = n
    35  		n.mu.Unlock()
    36  		i++
    37  	}
    38  	if drf.clusterResource == nil {
    39  		drf.clusterResource = make(map[Typ]float64)
    40  	}
    41  	heap.Init(&drf.nodes)
    42  	return drf, nil
    43  }
    44  
    45  // NewNode create a Cluster Node.
    46  func NewNode(demand ...map[Typ]float64) *Node {
    47  	n := Node{
    48  		allocated: make(map[Typ]float64),
    49  	}
    50  	if len(demand) > 0 {
    51  		n.demand = demand[0]
    52  	}
    53  	if n.demand == nil {
    54  		n.demand = make(map[Typ]float64)
    55  	}
    56  	return &n
    57  }
    58  
    59  // UpdateDemand add delta to existing demand.
    60  func (n *Node) UpdateDemand(delta map[Typ]float64) {
    61  	n.mu.Lock()
    62  	defer n.mu.Unlock()
    63  	for k, v := range delta {
    64  		n.demand[k] = n.demand[k] + v
    65  	}
    66  }
    67  
    68  // NextTask run next task with lowest dominant share.
    69  func (drf DRF) NextTask() error {
    70  	drf.mu.Lock()
    71  	defer drf.mu.Unlock()
    72  	if len(drf.nodes) == 0 {
    73  		return ErrEmptyNodes
    74  	}
    75  	n := drf.nodes[0]
    76  	n.mu.Lock()
    77  	defer n.mu.Unlock()
    78  	if drf.checkIfResourceUsageOverLimit(n) {
    79  		return ErrResourceSaturated
    80  	}
    81  	n.updateAllocated(n.demand)
    82  	drf.updateConsumed(n.demand)
    83  	drf.computeDShare(n)
    84  	heap.Fix(&drf.nodes, n.index)
    85  	return nil
    86  }
    87  
    88  // UpdateResource add delta to Cluster Resource.
    89  func (drf DRF) UpdateResource(delta map[Typ]float64) {
    90  	drf.mu.Lock()
    91  	defer drf.mu.Unlock()
    92  	for k, v := range delta {
    93  		drf.clusterResource[k] = drf.clusterResource[k] + v
    94  	}
    95  }
    96  
    97  // AddNode add new Node to DRF Cluster.
    98  func (drf DRF) AddNode(n *Node) {
    99  	drf.mu.Lock()
   100  	defer drf.mu.Unlock()
   101  	n.mu.Lock()
   102  	defer n.mu.Unlock()
   103  	n.allocated = make(map[Typ]float64)
   104  	n.dShare = 0
   105  	if n.demand == nil {
   106  		// TODO: error out if demand is empty
   107  		n.demand = make(map[Typ]float64)
   108  	}
   109  	drf.nodes.Push(n)
   110  }
   111  
   112  // RemoveNode remove Node from DRF Cluster.
   113  func (drf DRF) RemoveNode(n *Node) {
   114  	drf.mu.Lock()
   115  	defer drf.mu.Unlock()
   116  	n.mu.Lock()
   117  	defer n.mu.Unlock()
   118  	heap.Remove(&drf.nodes, n.index)
   119  	n.index = -1
   120  	for k, v := range n.allocated {
   121  		drf.consumedResource[k] = drf.consumedResource[k] - v
   122  	}
   123  	n.allocated = nil
   124  	n.dShare = 0
   125  }
   126  
   127  // Allocated return all the allocated resource for node.
   128  func (n *Node) Allocated() map[Typ]float64 {
   129  	n.mu.RLock()
   130  	defer n.mu.RUnlock()
   131  	allc := make(map[Typ]float64, len(n.allocated))
   132  	for k, v := range n.allocated {
   133  		allc[k] = allc[k] + v
   134  	}
   135  	return allc
   136  }
   137  
   138  // Consumed return all the consumed resource by cluster.
   139  func (drf DRF) Consumed() map[Typ]float64 {
   140  	drf.mu.RLock()
   141  	defer drf.mu.RUnlock()
   142  	consm := make(map[Typ]float64, len(drf.consumedResource))
   143  	for k, v := range drf.consumedResource {
   144  		consm[k] = consm[k] + v
   145  	}
   146  	return consm
   147  }
   148  
   149  // Resource return all the cluster resource.
   150  func (drf DRF) Resource() map[Typ]float64 {
   151  	drf.mu.RLock()
   152  	defer drf.mu.RUnlock()
   153  	cr := make(map[Typ]float64, len(drf.clusterResource))
   154  	for k, v := range drf.clusterResource {
   155  		cr[k] = cr[k] + v
   156  	}
   157  	return cr
   158  }
   159  
   160  func (drf DRF) checkIfResourceUsageOverLimit(n *Node) bool {
   161  	for k := range n.demand {
   162  		if drf.consumedResource[k]+n.demand[k] > drf.clusterResource[k] {
   163  			return true
   164  		}
   165  	}
   166  	return false
   167  }
   168  
   169  func (drf DRF) computeDShare(n *Node) {
   170  	temp := n.dShare
   171  	for k := range n.allocated {
   172  		if r, ok := drf.clusterResource[k]; ok && r > 0 {
   173  			if n.allocated[k]/r > temp {
   174  				temp = n.allocated[k] / r
   175  			}
   176  		}
   177  	}
   178  	n.dShare = temp
   179  }
   180  
   181  func (n *Node) updateAllocated(demand map[Typ]float64) {
   182  	for k := range demand {
   183  		n.allocated[k] = n.allocated[k] + demand[k]
   184  	}
   185  }
   186  
   187  func (drf DRF) updateConsumed(demand map[Typ]float64) {
   188  	for k := range demand {
   189  		drf.consumedResource[k] = drf.consumedResource[k] + demand[k]
   190  	}
   191  }