k8s.io/client-go@v0.31.1/examples/workqueue/main.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package main 18 19 import ( 20 "flag" 21 "fmt" 22 "time" 23 24 "k8s.io/klog/v2" 25 26 v1 "k8s.io/api/core/v1" 27 meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 28 "k8s.io/apimachinery/pkg/fields" 29 "k8s.io/apimachinery/pkg/util/runtime" 30 "k8s.io/apimachinery/pkg/util/wait" 31 "k8s.io/client-go/kubernetes" 32 "k8s.io/client-go/tools/cache" 33 "k8s.io/client-go/tools/clientcmd" 34 "k8s.io/client-go/util/workqueue" 35 ) 36 37 // Controller demonstrates how to implement a controller with client-go. 38 type Controller struct { 39 indexer cache.Indexer 40 queue workqueue.TypedRateLimitingInterface[string] 41 informer cache.Controller 42 } 43 44 // NewController creates a new Controller. 45 func NewController(queue workqueue.TypedRateLimitingInterface[string], indexer cache.Indexer, informer cache.Controller) *Controller { 46 return &Controller{ 47 informer: informer, 48 indexer: indexer, 49 queue: queue, 50 } 51 } 52 53 func (c *Controller) processNextItem() bool { 54 // Wait until there is a new item in the working queue 55 key, quit := c.queue.Get() 56 if quit { 57 return false 58 } 59 // Tell the queue that we are done with processing this key. This unblocks the key for other workers 60 // This allows safe parallel processing because two pods with the same key are never processed in 61 // parallel. 62 defer c.queue.Done(key) 63 64 // Invoke the method containing the business logic 65 err := c.syncToStdout(key) 66 // Handle the error if something went wrong during the execution of the business logic 67 c.handleErr(err, key) 68 return true 69 } 70 71 // syncToStdout is the business logic of the controller. In this controller it simply prints 72 // information about the pod to stdout. In case an error happened, it has to simply return the error. 73 // The retry logic should not be part of the business logic. 74 func (c *Controller) syncToStdout(key string) error { 75 obj, exists, err := c.indexer.GetByKey(key) 76 if err != nil { 77 klog.Errorf("Fetching object with key %s from store failed with %v", key, err) 78 return err 79 } 80 81 if !exists { 82 // Below we will warm up our cache with a Pod, so that we will see a delete for one pod 83 fmt.Printf("Pod %s does not exist anymore\n", key) 84 } else { 85 // Note that you also have to check the uid if you have a local controlled resource, which 86 // is dependent on the actual instance, to detect that a Pod was recreated with the same name 87 fmt.Printf("Sync/Add/Update for Pod %s\n", obj.(*v1.Pod).GetName()) 88 } 89 return nil 90 } 91 92 // handleErr checks if an error happened and makes sure we will retry later. 93 func (c *Controller) handleErr(err error, key string) { 94 if err == nil { 95 // Forget about the #AddRateLimited history of the key on every successful synchronization. 96 // This ensures that future processing of updates for this key is not delayed because of 97 // an outdated error history. 98 c.queue.Forget(key) 99 return 100 } 101 102 // This controller retries 5 times if something goes wrong. After that, it stops trying. 103 if c.queue.NumRequeues(key) < 5 { 104 klog.Infof("Error syncing pod %v: %v", key, err) 105 106 // Re-enqueue the key rate limited. Based on the rate limiter on the 107 // queue and the re-enqueue history, the key will be processed later again. 108 c.queue.AddRateLimited(key) 109 return 110 } 111 112 c.queue.Forget(key) 113 // Report to an external entity that, even after several retries, we could not successfully process this key 114 runtime.HandleError(err) 115 klog.Infof("Dropping pod %q out of the queue: %v", key, err) 116 } 117 118 // Run begins watching and syncing. 119 func (c *Controller) Run(workers int, stopCh chan struct{}) { 120 defer runtime.HandleCrash() 121 122 // Let the workers stop when we are done 123 defer c.queue.ShutDown() 124 klog.Info("Starting Pod controller") 125 126 go c.informer.Run(stopCh) 127 128 // Wait for all involved caches to be synced, before processing items from the queue is started 129 if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) { 130 runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) 131 return 132 } 133 134 for i := 0; i < workers; i++ { 135 go wait.Until(c.runWorker, time.Second, stopCh) 136 } 137 138 <-stopCh 139 klog.Info("Stopping Pod controller") 140 } 141 142 func (c *Controller) runWorker() { 143 for c.processNextItem() { 144 } 145 } 146 147 func main() { 148 var kubeconfig string 149 var master string 150 151 flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file") 152 flag.StringVar(&master, "master", "", "master url") 153 flag.Parse() 154 155 // creates the connection 156 config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig) 157 if err != nil { 158 klog.Fatal(err) 159 } 160 161 // creates the clientset 162 clientset, err := kubernetes.NewForConfig(config) 163 if err != nil { 164 klog.Fatal(err) 165 } 166 167 // create the pod watcher 168 podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything()) 169 170 // create the workqueue 171 queue := workqueue.NewTypedRateLimitingQueue(workqueue.DefaultTypedControllerRateLimiter[string]()) 172 173 // Bind the workqueue to a cache with the help of an informer. This way we make sure that 174 // whenever the cache is updated, the pod key is added to the workqueue. 175 // Note that when we finally process the item from the workqueue, we might see a newer version 176 // of the Pod than the version which was responsible for triggering the update. 177 indexer, informer := cache.NewIndexerInformer(podListWatcher, &v1.Pod{}, 0, cache.ResourceEventHandlerFuncs{ 178 AddFunc: func(obj interface{}) { 179 key, err := cache.MetaNamespaceKeyFunc(obj) 180 if err == nil { 181 queue.Add(key) 182 } 183 }, 184 UpdateFunc: func(old interface{}, new interface{}) { 185 key, err := cache.MetaNamespaceKeyFunc(new) 186 if err == nil { 187 queue.Add(key) 188 } 189 }, 190 DeleteFunc: func(obj interface{}) { 191 // IndexerInformer uses a delta queue, therefore for deletes we have to use this 192 // key function. 193 key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) 194 if err == nil { 195 queue.Add(key) 196 } 197 }, 198 }, cache.Indexers{}) 199 200 controller := NewController(queue, indexer, informer) 201 202 // We can now warm up the cache for initial synchronization. 203 // Let's suppose that we knew about a pod "mypod" on our last run, therefore add it to the cache. 204 // If this pod is not there anymore, the controller will be notified about the removal after the 205 // cache has synchronized. 206 indexer.Add(&v1.Pod{ 207 ObjectMeta: meta_v1.ObjectMeta{ 208 Name: "mypod", 209 Namespace: v1.NamespaceDefault, 210 }, 211 }) 212 213 // Now let's start the controller 214 stop := make(chan struct{}) 215 defer close(stop) 216 go controller.Run(1, stop) 217 218 // Wait forever 219 select {} 220 }