github.com/timstclair/heapster@v0.20.0-alpha1/Godeps/_workspace/src/k8s.io/kubernetes/pkg/watch/mux.go (about) 1 /* 2 Copyright 2014 The Kubernetes Authors All rights reserved. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package watch 18 19 import ( 20 "sync" 21 22 "k8s.io/kubernetes/pkg/runtime" 23 ) 24 25 // FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch 26 // channel is full. 27 type FullChannelBehavior int 28 29 const ( 30 WaitIfChannelFull FullChannelBehavior = iota 31 DropIfChannelFull 32 ) 33 34 // Buffer the incoming queue a little bit even though it should rarely ever accumulate 35 // anything, just in case a few events are received in such a short window that 36 // Broadcaster can't move them onto the watchers' queues fast enough. 37 const incomingQueueLength = 25 38 39 // Broadcaster distributes event notifications among any number of watchers. Every event 40 // is delivered to every watcher. 41 type Broadcaster struct { 42 // TODO: see if this lock is needed now that new watchers go through 43 // the incoming channel. 44 lock sync.Mutex 45 46 watchers map[int64]*broadcasterWatcher 47 nextWatcher int64 48 distributing sync.WaitGroup 49 50 incoming chan Event 51 52 // How large to make watcher's channel. 53 watchQueueLength int 54 // If one of the watch channels is full, don't wait for it to become empty. 55 // Instead just deliver it to the watchers that do have space in their 56 // channels and move on to the next event. 57 // It's more fair to do this on a per-watcher basis than to do it on the 58 // "incoming" channel, which would allow one slow watcher to prevent all 59 // other watchers from getting new events. 60 fullChannelBehavior FullChannelBehavior 61 } 62 63 // NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher. 64 // It is guaranteed that events will be distributed in the order in which they occur, 65 // but the order in which a single event is distributed among all of the watchers is unspecified. 66 func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster { 67 m := &Broadcaster{ 68 watchers: map[int64]*broadcasterWatcher{}, 69 incoming: make(chan Event, incomingQueueLength), 70 watchQueueLength: queueLength, 71 fullChannelBehavior: fullChannelBehavior, 72 } 73 m.distributing.Add(1) 74 go m.loop() 75 return m 76 } 77 78 const internalRunFunctionMarker = "internal-do-function" 79 80 // a function type we can shoehorn into the queue. 81 type functionFakeRuntimeObject func() 82 83 func (functionFakeRuntimeObject) IsAnAPIObject() {} 84 85 // Execute f, blocking the incoming queue (and waiting for it to drain first). 86 // The purpose of this terrible hack is so that watchers added after an event 87 // won't ever see that event, and will always see any event after they are 88 // added. 89 func (b *Broadcaster) blockQueue(f func()) { 90 var wg sync.WaitGroup 91 wg.Add(1) 92 b.incoming <- Event{ 93 Type: internalRunFunctionMarker, 94 Object: functionFakeRuntimeObject(func() { 95 defer wg.Done() 96 f() 97 }), 98 } 99 wg.Wait() 100 } 101 102 // Watch adds a new watcher to the list and returns an Interface for it. 103 // Note: new watchers will only receive new events. They won't get an entire history 104 // of previous events. 105 func (m *Broadcaster) Watch() Interface { 106 var w *broadcasterWatcher 107 m.blockQueue(func() { 108 m.lock.Lock() 109 defer m.lock.Unlock() 110 id := m.nextWatcher 111 m.nextWatcher++ 112 w = &broadcasterWatcher{ 113 result: make(chan Event, m.watchQueueLength), 114 stopped: make(chan struct{}), 115 id: id, 116 m: m, 117 } 118 m.watchers[id] = w 119 }) 120 return w 121 } 122 123 // WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends 124 // queuedEvents down the new watch before beginning to send ordinary events from Broadcaster. 125 // The returned watch will have a queue length that is at least large enough to accommodate 126 // all of the items in queuedEvents. 127 func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { 128 var w *broadcasterWatcher 129 m.blockQueue(func() { 130 m.lock.Lock() 131 defer m.lock.Unlock() 132 id := m.nextWatcher 133 m.nextWatcher++ 134 length := m.watchQueueLength 135 if n := len(queuedEvents) + 1; n > length { 136 length = n 137 } 138 w = &broadcasterWatcher{ 139 result: make(chan Event, length), 140 stopped: make(chan struct{}), 141 id: id, 142 m: m, 143 } 144 m.watchers[id] = w 145 for _, e := range queuedEvents { 146 w.result <- e 147 } 148 }) 149 return w 150 } 151 152 // stopWatching stops the given watcher and removes it from the list. 153 func (m *Broadcaster) stopWatching(id int64) { 154 m.lock.Lock() 155 defer m.lock.Unlock() 156 w, ok := m.watchers[id] 157 if !ok { 158 // No need to do anything, it's already been removed from the list. 159 return 160 } 161 delete(m.watchers, id) 162 close(w.result) 163 } 164 165 // closeAll disconnects all watchers (presumably in response to a Shutdown call). 166 func (m *Broadcaster) closeAll() { 167 m.lock.Lock() 168 defer m.lock.Unlock() 169 for _, w := range m.watchers { 170 close(w.result) 171 } 172 // Delete everything from the map, since presence/absence in the map is used 173 // by stopWatching to avoid double-closing the channel. 174 m.watchers = map[int64]*broadcasterWatcher{} 175 } 176 177 // Action distributes the given event among all watchers. 178 func (m *Broadcaster) Action(action EventType, obj runtime.Object) { 179 m.incoming <- Event{action, obj} 180 } 181 182 // Shutdown disconnects all watchers (but any queued events will still be distributed). 183 // You must not call Action or Watch* after calling Shutdown. This call blocks 184 // until all events have been distributed through the outbound channels. Note 185 // that since they can be buffered, this means that the watchers might not 186 // have received the data yet as it can remain sitting in the buffered 187 // channel. 188 func (m *Broadcaster) Shutdown() { 189 close(m.incoming) 190 m.distributing.Wait() 191 } 192 193 // loop receives from m.incoming and distributes to all watchers. 194 func (m *Broadcaster) loop() { 195 // Deliberately not catching crashes here. Yes, bring down the process if there's a 196 // bug in watch.Broadcaster. 197 for { 198 event, ok := <-m.incoming 199 if !ok { 200 break 201 } 202 if event.Type == internalRunFunctionMarker { 203 event.Object.(functionFakeRuntimeObject)() 204 continue 205 } 206 m.distribute(event) 207 } 208 m.closeAll() 209 m.distributing.Done() 210 } 211 212 // distribute sends event to all watchers. Blocking. 213 func (m *Broadcaster) distribute(event Event) { 214 m.lock.Lock() 215 defer m.lock.Unlock() 216 if m.fullChannelBehavior == DropIfChannelFull { 217 for _, w := range m.watchers { 218 select { 219 case w.result <- event: 220 case <-w.stopped: 221 default: // Don't block if the event can't be queued. 222 } 223 } 224 } else { 225 for _, w := range m.watchers { 226 select { 227 case w.result <- event: 228 case <-w.stopped: 229 } 230 } 231 } 232 } 233 234 // broadcasterWatcher handles a single watcher of a broadcaster 235 type broadcasterWatcher struct { 236 result chan Event 237 stopped chan struct{} 238 stop sync.Once 239 id int64 240 m *Broadcaster 241 } 242 243 // ResultChan returns a channel to use for waiting on events. 244 func (mw *broadcasterWatcher) ResultChan() <-chan Event { 245 return mw.result 246 } 247 248 // Stop stops watching and removes mw from its list. 249 func (mw *broadcasterWatcher) Stop() { 250 mw.stop.Do(func() { 251 close(mw.stopped) 252 mw.m.stopWatching(mw.id) 253 }) 254 }