istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pkg/kube/informerfactory/factory.go (about)

     1  // Copyright Istio Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package informerfactory provides a "factory" to generate informers. This allows users to create the
    16  // same informers in multiple different locations, while still using the same underlying resources.
    17  // Additionally, aggregate operations like Start, Shutdown, and Wait are available.
    18  // Kubernetes core has informer factories with very similar logic. However, this has a few problems that
    19  // spurred a fork:
    20  // * Factories are per package. That means we have ~6 distinct factories, which makes management a hassle.
    21  // * Across these, the factories are often inconsistent in functionality. Changes to these takes >4 months.
    22  // * Lack of functionality we want (see below).
    23  //
    24  // Added functionality:
    25  // * Single factory for any type, including dynamic informers, meta informers, typed informers, etc.
    26  // * Ability to create multiple informers of the same type but with different filters.
    27  // * Ability to run a single informer rather than all of them.
    28  package informerfactory
    29  
    30  import (
    31  	"fmt"
    32  	"runtime/debug"
    33  	"sync"
    34  
    35  	"k8s.io/apimachinery/pkg/runtime/schema"
    36  	"k8s.io/client-go/tools/cache"
    37  
    38  	"istio.io/istio/pilot/pkg/features"
    39  	"istio.io/istio/pkg/config/schema/gvr"
    40  	"istio.io/istio/pkg/kube/kubetypes"
    41  	"istio.io/istio/pkg/log"
    42  	"istio.io/istio/pkg/util/sets"
    43  )
    44  
    45  // NewInformerFunc returns a SharedIndexInformer.
    46  type NewInformerFunc func() cache.SharedIndexInformer
    47  
    48  type StartableInformer struct {
    49  	Informer cache.SharedIndexInformer
    50  	start    func(stopCh <-chan struct{})
    51  }
    52  
    53  func (s StartableInformer) Start(stopCh <-chan struct{}) {
    54  	s.start(stopCh)
    55  }
    56  
    57  // InformerFactory provides access to a shared informer factory
    58  type InformerFactory interface {
    59  	// Start initializes all requested informers. They are handled in goroutines
    60  	// which run until the stop channel gets closed.
    61  	Start(stopCh <-chan struct{})
    62  
    63  	// InformerFor returns the SharedIndexInformer the provided type.
    64  	InformerFor(resource schema.GroupVersionResource, opts kubetypes.InformerOptions, newFunc NewInformerFunc) StartableInformer
    65  
    66  	// WaitForCacheSync blocks until all started informers' caches were synced
    67  	// or the stop channel gets closed.
    68  	WaitForCacheSync(stopCh <-chan struct{}) bool
    69  
    70  	// Shutdown marks a factory as shutting down. At that point no new
    71  	// informers can be started anymore and Start will return without
    72  	// doing anything.
    73  	//
    74  	// In addition, Shutdown blocks until all goroutines have terminated. For that
    75  	// to happen, the close channel(s) that they were started with must be closed,
    76  	// either before Shutdown gets called or while it is waiting.
    77  	//
    78  	// Shutdown may be called multiple times, even concurrently. All such calls will
    79  	// block until all goroutines have terminated.
    80  	Shutdown()
    81  }
    82  
    83  // NewSharedInformerFactory constructs a new instance of informerFactory for all namespaces.
    84  func NewSharedInformerFactory() InformerFactory {
    85  	return &informerFactory{
    86  		informers:        map[informerKey]builtInformer{},
    87  		startedInformers: sets.New[informerKey](),
    88  	}
    89  }
    90  
    91  // InformerKey represents a unique informer
    92  type informerKey struct {
    93  	gvr           schema.GroupVersionResource
    94  	labelSelector string
    95  	fieldSelector string
    96  	informerType  kubetypes.InformerType
    97  	namespace     string
    98  }
    99  
   100  type builtInformer struct {
   101  	informer        cache.SharedIndexInformer
   102  	objectTransform func(obj any) (any, error)
   103  }
   104  
   105  type informerFactory struct {
   106  	lock      sync.Mutex
   107  	informers map[informerKey]builtInformer
   108  	// startedInformers is used for tracking which informers have been started.
   109  	// This allows Start() to be called multiple times safely.
   110  	startedInformers sets.Set[informerKey]
   111  
   112  	// wg tracks how many goroutines were started.
   113  	wg sync.WaitGroup
   114  	// shuttingDown is true when Shutdown has been called. It may still be running
   115  	// because it needs to wait for goroutines.
   116  	shuttingDown bool
   117  }
   118  
   119  var _ InformerFactory = &informerFactory{}
   120  
   121  func (f *informerFactory) InformerFor(resource schema.GroupVersionResource, opts kubetypes.InformerOptions, newFunc NewInformerFunc) StartableInformer {
   122  	f.lock.Lock()
   123  	defer f.lock.Unlock()
   124  
   125  	key := informerKey{
   126  		gvr:           resource,
   127  		labelSelector: opts.LabelSelector,
   128  		fieldSelector: opts.FieldSelector,
   129  		informerType:  opts.InformerType,
   130  		namespace:     opts.Namespace,
   131  	}
   132  	inf, exists := f.informers[key]
   133  	if exists {
   134  		checkInformerOverlap(inf, resource, opts)
   135  		return f.makeStartableInformer(inf.informer, key)
   136  	}
   137  
   138  	informer := newFunc()
   139  	f.informers[key] = builtInformer{
   140  		informer:        informer,
   141  		objectTransform: opts.ObjectTransform,
   142  	}
   143  
   144  	return f.makeStartableInformer(informer, key)
   145  }
   146  
   147  func allowedOverlap(resource schema.GroupVersionResource) bool {
   148  	// We register an optimized Pod watcher for standard flow, but for the experimental analysis feature we need the full pod,
   149  	// so we start another watch.
   150  	// We may want to reconsider this if the analysis feature becomes stable.
   151  	return features.EnableAnalysis && resource == gvr.Pod
   152  }
   153  
   154  func checkInformerOverlap(inf builtInformer, resource schema.GroupVersionResource, opts kubetypes.InformerOptions) {
   155  	if fmt.Sprintf("%p", inf.objectTransform) == fmt.Sprintf("%p", opts.ObjectTransform) {
   156  		return
   157  	}
   158  	l := log.Warnf
   159  	if features.EnableUnsafeAssertions && !allowedOverlap(resource) {
   160  		l = log.Fatalf
   161  	}
   162  	l("for type %v, registered conflicting ObjectTransform. Stack: %v", resource, string(debug.Stack()))
   163  }
   164  
   165  func (f *informerFactory) makeStartableInformer(informer cache.SharedIndexInformer, key informerKey) StartableInformer {
   166  	return StartableInformer{
   167  		Informer: informer,
   168  		start: func(stopCh <-chan struct{}) {
   169  			f.startOne(stopCh, key)
   170  		},
   171  	}
   172  }
   173  
   174  func (f *informerFactory) startOne(stopCh <-chan struct{}, informerType informerKey) {
   175  	f.lock.Lock()
   176  	defer f.lock.Unlock()
   177  
   178  	if f.shuttingDown {
   179  		return
   180  	}
   181  
   182  	informer, ff := f.informers[informerType]
   183  	if !ff {
   184  		panic(fmt.Sprintf("bug: informer key %+v not found", informerType))
   185  	}
   186  	if !f.startedInformers.Contains(informerType) {
   187  		f.wg.Add(1)
   188  		go func() {
   189  			defer f.wg.Done()
   190  			informer.informer.Run(stopCh)
   191  		}()
   192  		f.startedInformers.Insert(informerType)
   193  	}
   194  }
   195  
   196  // Start initializes all requested informers.
   197  func (f *informerFactory) Start(stopCh <-chan struct{}) {
   198  	f.lock.Lock()
   199  	defer f.lock.Unlock()
   200  
   201  	if f.shuttingDown {
   202  		return
   203  	}
   204  
   205  	for informerType, informer := range f.informers {
   206  		if !f.startedInformers.Contains(informerType) {
   207  			f.wg.Add(1)
   208  			// We need a new variable in each loop iteration,
   209  			// otherwise the goroutine would use the loop variable
   210  			// and that keeps changing.
   211  			informer := informer
   212  			go func() {
   213  				defer f.wg.Done()
   214  				informer.informer.Run(stopCh)
   215  			}()
   216  			f.startedInformers.Insert(informerType)
   217  		}
   218  	}
   219  }
   220  
   221  // WaitForCacheSync waits for all started informers' cache were synced.
   222  func (f *informerFactory) WaitForCacheSync(stopCh <-chan struct{}) bool {
   223  	informers := func() []cache.SharedIndexInformer {
   224  		f.lock.Lock()
   225  		defer f.lock.Unlock()
   226  		informers := make([]cache.SharedIndexInformer, 0, len(f.informers))
   227  		for informerKey, informer := range f.informers {
   228  			if f.startedInformers.Contains(informerKey) {
   229  				informers = append(informers, informer.informer)
   230  			}
   231  		}
   232  		return informers
   233  	}()
   234  
   235  	for _, informer := range informers {
   236  		if !cache.WaitForCacheSync(stopCh, informer.HasSynced) {
   237  			return false
   238  		}
   239  	}
   240  	return true
   241  }
   242  
   243  func (f *informerFactory) Shutdown() {
   244  	// Will return immediately if there is nothing to wait for.
   245  	defer f.wg.Wait()
   246  
   247  	f.lock.Lock()
   248  	defer f.lock.Unlock()
   249  	f.shuttingDown = true
   250  }