github.com/hernad/nomad@v1.6.112/nomad/drainer/drain_heap.go (about) 1 // Copyright (c) HashiCorp, Inc. 2 // SPDX-License-Identifier: MPL-2.0 3 4 package drainer 5 6 import ( 7 "context" 8 "sync" 9 "time" 10 ) 11 12 // DrainDeadlineNotifier allows batch notification of nodes that have reached 13 // their drain deadline. 14 type DrainDeadlineNotifier interface { 15 // NextBatch returns the next batch of nodes that have reached their 16 // deadline. 17 NextBatch() <-chan []string 18 19 // Remove removes the given node from being tracked for a deadline. 20 Remove(nodeID string) 21 22 // Watch marks the given node for being watched for its deadline. 23 Watch(nodeID string, deadline time.Time) 24 } 25 26 // deadlineHeap implements the DrainDeadlineNotifier and is backed by a min-heap 27 // to efficiently determine the next deadlining node. It also supports 28 // coalescing several deadlines into a single emission. 29 type deadlineHeap struct { 30 ctx context.Context 31 coalesceWindow time.Duration 32 batch chan []string 33 nodes map[string]time.Time 34 trigger chan struct{} 35 mu sync.Mutex 36 } 37 38 // NewDeadlineHeap returns a new deadline heap that coalesces for the given 39 // duration and will stop watching when the passed context is cancelled. 40 func NewDeadlineHeap(ctx context.Context, coalesceWindow time.Duration) *deadlineHeap { 41 d := &deadlineHeap{ 42 ctx: ctx, 43 coalesceWindow: coalesceWindow, 44 batch: make(chan []string), 45 nodes: make(map[string]time.Time, 64), 46 trigger: make(chan struct{}, 1), 47 } 48 49 go d.watch() 50 return d 51 } 52 53 func (d *deadlineHeap) watch() { 54 timer := time.NewTimer(0) 55 timer.Stop() 56 select { 57 case <-timer.C: 58 default: 59 } 60 defer timer.Stop() 61 62 var nextDeadline time.Time 63 for { 64 select { 65 case <-d.ctx.Done(): 66 return 67 case <-timer.C: 68 var batch []string 69 70 d.mu.Lock() 71 for nodeID, nodeDeadline := range d.nodes { 72 if !nodeDeadline.After(nextDeadline) { 73 batch = append(batch, nodeID) 74 delete(d.nodes, nodeID) 75 } 76 } 77 d.mu.Unlock() 78 79 if len(batch) > 0 { 80 // Send the batch 81 select { 82 case d.batch <- batch: 83 case <-d.ctx.Done(): 84 return 85 } 86 } 87 88 case <-d.trigger: 89 } 90 91 // Calculate the next deadline 92 deadline, ok := d.calculateNextDeadline() 93 if !ok { 94 continue 95 } 96 97 // If the deadline is zero, it is a force drain. Otherwise if the 98 // deadline is in the future, see if we already have a timer setup to 99 // handle it. If we don't create the timer. 100 if deadline.IsZero() || !deadline.Equal(nextDeadline) { 101 timer.Reset(time.Until(deadline)) 102 nextDeadline = deadline 103 } 104 } 105 } 106 107 // calculateNextDeadline returns the next deadline in which to scan for 108 // deadlined nodes. It applies the coalesce window. 109 func (d *deadlineHeap) calculateNextDeadline() (time.Time, bool) { 110 d.mu.Lock() 111 defer d.mu.Unlock() 112 113 if len(d.nodes) == 0 { 114 return time.Time{}, false 115 } 116 117 // Calculate the new timer value 118 var deadline time.Time 119 for _, v := range d.nodes { 120 if deadline.IsZero() || v.Before(deadline) { 121 deadline = v 122 } 123 } 124 125 var maxWithinWindow time.Time 126 coalescedDeadline := deadline.Add(d.coalesceWindow) 127 for _, nodeDeadline := range d.nodes { 128 if nodeDeadline.Before(coalescedDeadline) { 129 if maxWithinWindow.IsZero() || nodeDeadline.After(maxWithinWindow) { 130 maxWithinWindow = nodeDeadline 131 } 132 } 133 } 134 135 return maxWithinWindow, true 136 } 137 138 // NextBatch returns the next batch of nodes to be drained. 139 func (d *deadlineHeap) NextBatch() <-chan []string { 140 return d.batch 141 } 142 143 func (d *deadlineHeap) Remove(nodeID string) { 144 d.mu.Lock() 145 defer d.mu.Unlock() 146 delete(d.nodes, nodeID) 147 148 select { 149 case d.trigger <- struct{}{}: 150 default: 151 } 152 } 153 154 func (d *deadlineHeap) Watch(nodeID string, deadline time.Time) { 155 d.mu.Lock() 156 defer d.mu.Unlock() 157 d.nodes[nodeID] = deadline 158 159 select { 160 case d.trigger <- struct{}{}: 161 default: 162 } 163 }