github.phpd.cn/cilium/cilium@v1.6.12/pkg/workloads/watcher_state.go (about) 1 // Copyright 2017-2018 Authors of Cilium 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package workloads 16 17 import ( 18 ctx "context" 19 "sync" 20 "time" 21 22 "github.com/cilium/cilium/pkg/k8s" 23 "github.com/cilium/cilium/pkg/lock" 24 "github.com/cilium/cilium/pkg/logging/logfields" 25 26 "github.com/sirupsen/logrus" 27 ) 28 29 type eventType string 30 31 const ( 32 // EventTypeStart represents when a workload was started 33 EventTypeStart eventType = "start" 34 // EventTypeDelete represents when a workload was deleted 35 EventTypeDelete eventType = "delete" 36 37 periodicSyncRate = 5 * time.Minute 38 39 eventQueueBufferSize = 100 40 ) 41 42 // EventMessage is the structure use for the different workload events. 43 type EventMessage struct { 44 WorkloadID string 45 EventType eventType 46 } 47 48 // watcherState holds global close flag, per-container queues for events and 49 // ignore toggles 50 type watcherState struct { 51 lock.Mutex 52 53 events map[string]chan EventMessage 54 } 55 56 func newWatcherState() *watcherState { 57 ws := &watcherState{ 58 events: make(map[string]chan EventMessage), 59 } 60 61 go func(state *watcherState) { 62 for { 63 // Clean up empty event handling channels 64 state.reapEmpty() 65 66 // periodically synchronize containers managed by the 67 // local container runtime and checks if any of them 68 // need to be managed by Cilium. This is a fall back 69 // mechanism in case an event notification has been 70 // lost. 71 // 72 // This is only required when *NOT* running in 73 // Kubernetes mode as kubelet will keep containers and 74 // pods in sync and will make CNI ADD and CNI DEL calls 75 // as required. 76 if !k8s.IsEnabled() { 77 ws.syncWithRuntime() 78 } 79 80 time.Sleep(periodicSyncRate) 81 } 82 }(ws) 83 84 return ws 85 } 86 87 // enqueueByContainerID starts a handler for this container, if needed, and 88 // enqueues a copy of the event if it is non-nil. Passing in a nil event will 89 // only start the handler. These handlers can be reaped via 90 // watcherState.reapEmpty. 91 // This parallelism is desirable to respond to events faster; each event might 92 // require talking to an outside daemon (docker) and a single noisy container 93 // might starve others. 94 func (ws *watcherState) enqueueByContainerID(containerID string, e *EventMessage) { 95 ws.Lock() 96 defer ws.Unlock() 97 98 if _, found := ws.events[containerID]; !found { 99 q := make(chan EventMessage, eventQueueBufferSize) 100 ws.events[containerID] = q 101 go Client().processEvents(q) 102 } 103 104 if e != nil { 105 ws.events[containerID] <- *e 106 } 107 } 108 109 // handlingContainerID returns whether there is a goroutine already consuming 110 // events for this id 111 func (ws *watcherState) handlingContainerID(id string) bool { 112 ws.Lock() 113 defer ws.Unlock() 114 115 _, handled := ws.events[id] 116 return handled 117 } 118 119 // reapEmpty deletes empty queues from the map. This also causes the handler 120 // goroutines to exit. It is expected to be called periodically to avoid the 121 // map growing over time. 122 func (ws *watcherState) reapEmpty() { 123 ws.Lock() 124 defer ws.Unlock() 125 126 for id, q := range ws.events { 127 if len(q) == 0 { 128 close(q) 129 delete(ws.events, id) 130 } 131 } 132 } 133 134 // syncWithRuntime is used by the daemon to synchronize changes between Docker and 135 // Cilium. This includes identities, labels, etc. 136 func (ws *watcherState) syncWithRuntime() { 137 var wg sync.WaitGroup 138 139 timeoutCtx, cancel := ctx.WithTimeout(ctx.Background(), 10*time.Second) 140 defer cancel() 141 142 cList, err := Client().workloadIDsList(timeoutCtx) 143 if err != nil { 144 log.WithError(err).Error("Failed to retrieve the container list") 145 return 146 } 147 for _, contID := range cList { 148 if ignoredContainer(contID) { 149 continue 150 } 151 152 if alreadyHandled := ws.handlingContainerID(contID); !alreadyHandled { 153 log.WithFields(logrus.Fields{ 154 logfields.ContainerID: shortContainerID(contID), 155 }).Debug("Found unwatched container") 156 157 wg.Add(1) 158 go func(wg *sync.WaitGroup, id string) { 159 defer wg.Done() 160 Client().handleCreateWorkload(id, false) 161 }(&wg, contID) 162 } 163 } 164 165 // Wait for all spawned go routines handling container creations to exit 166 wg.Wait() 167 }