github.com/cilium/cilium@v1.16.2/pkg/clustermesh/endpointslicesync/node_informer.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Cilium
     3  
     4  package endpointslicesync
     5  
     6  import (
     7  	"fmt"
     8  
     9  	"github.com/sirupsen/logrus"
    10  	"golang.org/x/exp/maps"
    11  	v1 "k8s.io/api/core/v1"
    12  	"k8s.io/apimachinery/pkg/api/resource"
    13  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    14  	"k8s.io/apimachinery/pkg/labels"
    15  	listersv1 "k8s.io/client-go/listers/core/v1"
    16  	cache "k8s.io/client-go/tools/cache"
    17  
    18  	"github.com/cilium/cilium/pkg/lock"
    19  )
    20  
    21  // meshNodeInformer uses the remote clusters inside the Cilium mesh to fake nodes
    22  // with the same name as the clusters and the meshPodInformer also use cluster name as
    23  // node name. This trick allows the EndpointSlice controller to set correct
    24  // topology on EndpointSlice objects.
    25  type meshNodeInformer struct {
    26  	dummyInformer
    27  
    28  	handler cache.ResourceEventHandler
    29  	nodes   map[string]*v1.Node
    30  	mutex   lock.RWMutex
    31  }
    32  
    33  func newMeshNodeInformer(logger logrus.FieldLogger) *meshNodeInformer {
    34  	return &meshNodeInformer{
    35  		dummyInformer: dummyInformer{name: "meshNodeInformer", logger: logger},
    36  		nodes:         map[string]*v1.Node{},
    37  	}
    38  }
    39  
    40  func createDummyNode(cluster string) *v1.Node {
    41  	return &v1.Node{
    42  		ObjectMeta: metav1.ObjectMeta{
    43  			Name:   cluster,
    44  			Labels: map[string]string{v1.LabelTopologyZone: cluster},
    45  		},
    46  		Status: v1.NodeStatus{
    47  			Phase: v1.NodeRunning,
    48  			Conditions: []v1.NodeCondition{
    49  				{Type: v1.NodeReady, Status: v1.ConditionTrue},
    50  			},
    51  			// Set to 1 cpu per "node"/cluster so that topology manager will set the topology.
    52  			// This could be improved later on with meaningful value for weighted traffic distribution
    53  			// with kube-proxy enabled
    54  			Allocatable: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(1, resource.DecimalSI)},
    55  		},
    56  	}
    57  }
    58  
    59  func (i *meshNodeInformer) ListClusters() []string {
    60  	i.mutex.RLock()
    61  	defer i.mutex.RUnlock()
    62  
    63  	return maps.Keys(i.nodes)
    64  }
    65  
    66  func (i *meshNodeInformer) List(selector labels.Selector) ([]*v1.Node, error) {
    67  	reqs, _ := selector.Requirements()
    68  	if !selector.Empty() {
    69  		return nil, fmt.Errorf("meshNodeInformer only supports listing everything as requirements: %s", reqs)
    70  	}
    71  
    72  	i.mutex.RLock()
    73  	defer i.mutex.RUnlock()
    74  	return maps.Values(i.nodes), nil
    75  }
    76  
    77  func (i *meshNodeInformer) Get(name string) (*v1.Node, error) {
    78  	i.mutex.RLock()
    79  	defer i.mutex.RUnlock()
    80  	if node, ok := i.nodes[name]; ok {
    81  		return node, nil
    82  	}
    83  	return nil, newNotFoundError(fmt.Sprintf("node '%s' not found", name))
    84  }
    85  
    86  func (i *meshNodeInformer) onClusterAdd(cluster string) {
    87  	i.mutex.Lock()
    88  	node := createDummyNode(cluster)
    89  	i.nodes[cluster] = node
    90  	i.mutex.Unlock()
    91  
    92  	if i.handler == nil {
    93  		return
    94  	}
    95  	i.handler.OnAdd(node, false)
    96  }
    97  
    98  func (i *meshNodeInformer) onClusterDelete(cluster string) {
    99  	i.mutex.Lock()
   100  	delete(i.nodes, cluster)
   101  	i.mutex.Unlock()
   102  
   103  	if i.handler == nil {
   104  		return
   105  	}
   106  	i.handler.OnDelete(i.nodes[cluster])
   107  }
   108  
   109  func (i *meshNodeInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) {
   110  	i.handler = handler
   111  	return i, nil
   112  }
   113  
   114  func (i *meshNodeInformer) HasSynced() bool {
   115  	// Controller is launched only after cluster mesh has been fully synced
   116  	// so we always return true here
   117  	return true
   118  }
   119  
   120  func (i *meshNodeInformer) Informer() cache.SharedIndexInformer {
   121  	return i
   122  }
   123  func (i *meshNodeInformer) Lister() listersv1.NodeLister {
   124  	return i
   125  }