github.com/mutagen-io/mutagen@v0.18.0-rc1/pkg/state/tracker.go (about) 1 package state 2 3 import ( 4 "context" 5 "errors" 6 "sync" 7 ) 8 9 // ErrTrackingTerminated indicates that tracking was terminated before a polling 10 // operation saw any changes. 11 var ErrTrackingTerminated = errors.New("tracking terminated") 12 13 // pollResponse is used to respond to a polling request within Tracker. 14 type pollResponse struct { 15 // index is the index at the time of the response. 16 index uint64 17 // terminated indicates whether or not tracking was terminated at the time 18 // of the response. 19 terminated bool 20 } 21 22 // pollRequest represents a polling request within Tracker. 23 type pollRequest struct { 24 // previousIndex is the previous index for which state information was seen. 25 previousIndex uint64 26 // responses is used to respond to the polling request. It must be buffered. 27 responses chan<- pollResponse 28 } 29 30 // Tracker provides index-based state tracking using a condition variable. 31 type Tracker struct { 32 // change is the condition variable used to track changes. It is used to 33 // signal state changes to index and terminated. It is also used to 34 // serialize and signal changes to pollRequests. 35 change *sync.Cond 36 // index is the current state index. 37 // NOTE: In theory, we should track and handle overflow on this index, but 38 // given that an update period of 1 nanosecond would only cause an overflow 39 // after about 584 years, the possibility isn't hugely concerning. 40 // 41 // Moreover, the "failure" mode in the case of overflow is that a poller who 42 // waited an entire overflow period before an additional state change check, 43 // and then managed to hit when the index was exactly the same as their last 44 // check, would have to wait for an additional state change before detecting 45 // an update. Given the vanishingly small likelihood of both conditions, 46 // along with the minimal consequences, it's not worth hauling around a ton 47 // of overflow handling code. We do perform a minimal amount of overflow 48 // handling code on this value, but that's just to maintain the meaning of 0 49 // as a previous state index in the unlikely event of an overflow. 50 index uint64 51 // terminated indicates whether or not tracking has been terminated. 52 terminated bool 53 // pollRequests is the set of current pollers. 54 pollRequests map[*pollRequest]bool 55 // trackDone is closed to signal that the tracking loop has exited. 56 trackDone chan struct{} 57 } 58 59 // NewTracker creates a new tracker instance with a state index of 1. 60 func NewTracker() *Tracker { 61 // Creack the tracker. 62 tracker := &Tracker{ 63 change: sync.NewCond(&sync.Mutex{}), 64 index: 1, 65 pollRequests: make(map[*pollRequest]bool), 66 trackDone: make(chan struct{}), 67 } 68 69 // Start the tracking loop. 70 go tracker.track() 71 72 // Done. 73 return tracker 74 } 75 76 // track is the tracking loop entry point. It serves as a bridge between the 77 // world of condition variables and the world of channels. 78 func (t *Tracker) track() { 79 // Defer closure of the tracking loop termination channel. 80 defer close(t.trackDone) 81 82 // Acquire the state lock and defer its release. 83 t.change.L.Lock() 84 defer t.change.L.Unlock() 85 86 // Loop until terminated. 87 for { 88 // Check for and handle termination. 89 if t.terminated { 90 response := pollResponse{t.index, true} 91 for r := range t.pollRequests { 92 r.responses <- response 93 delete(t.pollRequests, r) 94 } 95 return 96 } 97 98 // Signal any completed polling requests. 99 // TODO: It would be nice if we had a better data structure where 100 // iteration wasn't O(n) in the number of registered poll requests. It 101 // feels like we could leverage the fact that index is monotonically 102 // increasing and maybe use a heap (ordered by requests' previous 103 // indices) to reduce the iteration overhead here, but it's not 104 // performance critical for now. Such a design might motivate better 105 // overflow handling as well. In any case, given that we're no longer 106 // using sync.Cond.Broadcast, we're already saving O(n) iteration in the 107 // Go runtime, so this is a reasonable tradeoff. 108 for r := range t.pollRequests { 109 if r.previousIndex != t.index { 110 r.responses <- pollResponse{t.index, false} 111 delete(t.pollRequests, r) 112 } 113 } 114 115 // Wait for a state change. 116 t.change.Wait() 117 } 118 } 119 120 // Terminate terminates tracking. 121 func (t *Tracker) Terminate() { 122 // Acquire the state lock. 123 t.change.L.Lock() 124 125 // Mark tracking as terminated. 126 t.terminated = true 127 128 // Signal to the tracking loop that termination has occurred. 129 t.change.Signal() 130 131 // Release the state lock. 132 t.change.L.Unlock() 133 134 // Wait for the tracking loop to exit. 135 <-t.trackDone 136 } 137 138 // NotifyOfChange indicates the state index and notifies waiters. 139 func (t *Tracker) NotifyOfChange() { 140 // Acquire the state lock and defer its release. 141 t.change.L.Lock() 142 defer t.change.L.Unlock() 143 144 // If tracking has been terminated, then there's nothing that we need to do. 145 if t.terminated { 146 return 147 } 148 149 // Increment the state index. If we do overflow, then at least set the index 150 // back to 1, because we want 0 to remain the sentinel value that returns an 151 // immediate read of the current state index. 152 t.index++ 153 if t.index == 0 { 154 t.index = 1 155 } 156 157 // Signal the tracking loop. 158 t.change.Signal() 159 } 160 161 // WaitForChange polls for a state index change from the specified previous 162 // index. It returns the new index at which the change was seen. If tracking is 163 // terminated before the polling operation completes, then the current state 164 // index is returned along with ErrTrackingTerminated. If the provided context 165 // is cancelled before the polling operation completes, then the current state 166 // index is returned along with context.Canceled. If a previous state index of 0 167 // is provided, then the current state index (which will always be greater than 168 // 0) is returned immediately. 169 func (t *Tracker) WaitForChange(ctx context.Context, previousIndex uint64) (uint64, error) { 170 // If the previous index is 0, then an immediate read is being requested. In 171 // that case we can just bypass the polling mechanism. 172 if previousIndex == 0 { 173 t.change.L.Lock() 174 defer t.change.L.Unlock() 175 if t.terminated { 176 return t.index, ErrTrackingTerminated 177 } 178 return t.index, nil 179 } 180 181 // Acquire the state lock. 182 t.change.L.Lock() 183 184 // If tracking has already been terminated, then abort immediately because 185 // polling won't function. 186 if t.terminated { 187 defer t.change.L.Unlock() 188 return t.index, ErrTrackingTerminated 189 } 190 191 // Create and register the polling request. 192 responses := make(chan pollResponse, 1) 193 request := &pollRequest{previousIndex, responses} 194 t.pollRequests[request] = true 195 196 // Signal to the tracking loop that a new request has been registered. 197 t.change.Signal() 198 199 // Release the state lock. 200 t.change.L.Unlock() 201 202 // Wait for a state change or cancellation. If the request is cancelled, 203 // then we'll deregister it ourselves (in which case there's no need to 204 // notify the tracking loop). If the polling operation succeeds, then the 205 // tracking loop will deregister the request. 206 select { 207 case <-ctx.Done(): 208 t.change.L.Lock() 209 delete(t.pollRequests, request) 210 defer t.change.L.Unlock() 211 return t.index, context.Canceled 212 case response := <-responses: 213 if response.terminated { 214 return response.index, ErrTrackingTerminated 215 } 216 return response.index, nil 217 } 218 }