github.com/angenalZZZ/gofunc@v0.0.0-20210507121333-48ff1be3917b/net/load_balancing.go (about) 1 package net 2 3 import ( 4 "container/heap" 5 "sync" 6 "sync/atomic" 7 ) 8 9 // LoadBalancing represents the the type of load-balancing algorithm. 10 type LoadBalancing int 11 12 const ( 13 // RoundRobin assigns the next accepted connection to the event-loop by polling event-loop list. 14 RoundRobin LoadBalancing = iota 15 16 // LeastConnections assigns the next accepted connection to the event-loop that is 17 // serving the least number of active connections at the current time. 18 LeastConnections 19 20 // SourceAddrHash assignes the next accepted connection to the event-loop by hashing socket fd. 21 SourceAddrHash 22 ) 23 24 type ( 25 // loadBalancer is a interface which manipulates the event-loop set. 26 loadBalancer interface { 27 register(*eventloop) 28 next(int) *eventloop 29 iterate(func(int, *eventloop) bool) 30 len() int 31 calibrate(*eventloop, int32) 32 } 33 34 // roundRobinEventLoopSet with Round-Robin algorithm. 35 roundRobinEventLoopSet struct { 36 nextLoopIndex int 37 eventLoops []*eventloop 38 size int 39 } 40 41 // leastConnectionsEventLoopSet with Least-Connections algorithm. 42 leastConnectionsEventLoopSet struct { 43 sync.RWMutex 44 minHeap minEventLoopHeap 45 cachedRoot *eventloop 46 threshold int32 47 calibrateConnsThreshold int32 48 } 49 50 // sourceAddrHashEventLoopSet with Hash algorithm. 51 sourceAddrHashEventLoopSet struct { 52 eventLoops []*eventloop 53 size int 54 } 55 ) 56 57 // ==================================== Implementation of Round-Robin load-balancer ==================================== 58 59 func (set *roundRobinEventLoopSet) register(el *eventloop) { 60 el.idx = set.size 61 set.eventLoops = append(set.eventLoops, el) 62 set.size++ 63 } 64 65 // next returns the eligible event-loop based on Round-Robin algorithm. 66 func (set *roundRobinEventLoopSet) next(_ int) (el *eventloop) { 67 el = set.eventLoops[set.nextLoopIndex] 68 if set.nextLoopIndex++; set.nextLoopIndex >= set.size { 69 set.nextLoopIndex = 0 70 } 71 return 72 } 73 74 func (set *roundRobinEventLoopSet) iterate(f func(int, *eventloop) bool) { 75 for i, el := range set.eventLoops { 76 if !f(i, el) { 77 break 78 } 79 } 80 } 81 82 func (set *roundRobinEventLoopSet) len() int { 83 return set.size 84 } 85 86 func (set *roundRobinEventLoopSet) calibrate(el *eventloop, delta int32) { 87 atomic.AddInt32(&el.connCount, delta) 88 } 89 90 // ================================= Implementation of Least-Connections load-balancer ================================= 91 92 // Leverage min-heap to optimize Least-Connections load-balancing. 93 type minEventLoopHeap []*eventloop 94 95 // Implement heap.Interface: Len, Less, Swap, Push, Pop. 96 func (h minEventLoopHeap) Len() int { 97 return len(h) 98 } 99 100 func (h minEventLoopHeap) Less(i, j int) bool { 101 //return (*h)[i].loadConnCount() < (*h)[j].loadConnCount() 102 return h[i].connCount < h[j].connCount 103 } 104 105 func (h minEventLoopHeap) Swap(i, j int) { 106 h[i], h[j] = h[j], h[i] 107 h[i].idx, h[j].idx = i, j 108 } 109 110 func (h *minEventLoopHeap) Push(x interface{}) { 111 el := x.(*eventloop) 112 el.idx = len(*h) 113 *h = append(*h, el) 114 } 115 116 func (h *minEventLoopHeap) Pop() interface{} { 117 old := *h 118 i := len(old) - 1 119 x := old[i] 120 old[i] = nil // avoid memory leak 121 x.idx = -1 // for safety 122 *h = old[:i] 123 return x 124 } 125 126 func (set *leastConnectionsEventLoopSet) register(el *eventloop) { 127 set.Lock() 128 heap.Push(&set.minHeap, el) 129 if el.idx == 0 { 130 set.cachedRoot = el 131 } 132 set.calibrateConnsThreshold = int32(set.minHeap.Len()) 133 set.Unlock() 134 } 135 136 // next returns the eligible event-loop by taking the root node from minimum heap based on Least-Connections algorithm. 137 func (set *leastConnectionsEventLoopSet) next(_ int) (el *eventloop) { 138 //set.RLock() 139 //el = set.minHeap[0] 140 //set.RUnlock() 141 //return 142 143 // In most cases, `next` method returns the cached event-loop immediately and it only reconstructs the minimum heap 144 // every `calibrateConnsThreshold` times for reducing locks to global mutex. 145 if atomic.LoadInt32(&set.threshold) >= set.calibrateConnsThreshold { 146 set.Lock() 147 heap.Init(&set.minHeap) 148 set.cachedRoot = set.minHeap[0] 149 atomic.StoreInt32(&set.threshold, 0) 150 set.Unlock() 151 } 152 return set.cachedRoot 153 } 154 155 func (set *leastConnectionsEventLoopSet) iterate(f func(int, *eventloop) bool) { 156 set.RLock() 157 for i, el := range set.minHeap { 158 if !f(i, el) { 159 break 160 } 161 } 162 set.RUnlock() 163 } 164 165 func (set *leastConnectionsEventLoopSet) len() (size int) { 166 set.RLock() 167 size = set.minHeap.Len() 168 set.RUnlock() 169 return 170 } 171 172 func (set *leastConnectionsEventLoopSet) calibrate(el *eventloop, delta int32) { 173 //set.Lock() 174 //el.connCount += delta 175 //heap.Fix(&set.minHeap, el.idx) 176 //set.Unlock() 177 set.RLock() 178 atomic.AddInt32(&el.connCount, delta) 179 atomic.AddInt32(&set.threshold, 1) 180 set.RUnlock() 181 } 182 183 // ======================================= Implementation of Hash load-balancer ======================================== 184 185 func (set *sourceAddrHashEventLoopSet) register(el *eventloop) { 186 el.idx = set.size 187 set.eventLoops = append(set.eventLoops, el) 188 set.size++ 189 } 190 191 // next returns the eligible event-loop by taking the remainder of a given fd as the index of event-loop list. 192 func (set *sourceAddrHashEventLoopSet) next(hashCode int) *eventloop { 193 return set.eventLoops[hashCode%set.size] 194 } 195 196 func (set *sourceAddrHashEventLoopSet) iterate(f func(int, *eventloop) bool) { 197 for i, el := range set.eventLoops { 198 if !f(i, el) { 199 break 200 } 201 } 202 } 203 204 func (set *sourceAddrHashEventLoopSet) len() int { 205 return set.size 206 } 207 208 func (set *sourceAddrHashEventLoopSet) calibrate(el *eventloop, delta int32) { 209 atomic.AddInt32(&el.connCount, delta) 210 }