github.com/cilium/cilium@v1.16.2/operator/pkg/lbipam/lbipam.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Cilium
     3  
     4  package lbipam
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"errors"
    10  	"fmt"
    11  	"math/big"
    12  	"net/netip"
    13  	"slices"
    14  	"strconv"
    15  	"strings"
    16  	"time"
    17  
    18  	"github.com/cilium/hive/cell"
    19  	"github.com/cilium/hive/job"
    20  	"github.com/sirupsen/logrus"
    21  	"go4.org/netipx"
    22  	meta "k8s.io/apimachinery/pkg/api/meta"
    23  	meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    24  	"k8s.io/apimachinery/pkg/types"
    25  	"k8s.io/client-go/util/workqueue"
    26  
    27  	"github.com/cilium/cilium/pkg/annotation"
    28  	"github.com/cilium/cilium/pkg/ipalloc"
    29  	"github.com/cilium/cilium/pkg/k8s"
    30  	cilium_api_v2alpha1 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1"
    31  	"github.com/cilium/cilium/pkg/k8s/resource"
    32  	slim_core_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1"
    33  	slim_meta "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/api/meta"
    34  	slim_meta_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1"
    35  	client_typed_v1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/client/clientset/versioned/typed/core/v1"
    36  )
    37  
    38  const (
    39  	// The condition added to services to indicate if a request for IPs could be satisfied or not
    40  	ciliumSvcRequestSatisfiedCondition = "cilium.io/IPAMRequestSatisfied"
    41  
    42  	ciliumPoolIPsTotalCondition     = "cilium.io/IPsTotal"
    43  	ciliumPoolIPsAvailableCondition = "cilium.io/IPsAvailable"
    44  	ciliumPoolIPsUsedCondition      = "cilium.io/IPsUsed"
    45  	ciliumPoolConflict              = "cilium.io/PoolConflict"
    46  
    47  	ciliumSvcLBISKCNWildward = "*"
    48  
    49  	// The string used in the FieldManager field on update options
    50  	ciliumFieldManager = "cilium-operator-lb-ipam"
    51  
    52  	serviceNamespaceLabel = "io.kubernetes.service.namespace"
    53  	serviceNameLabel      = "io.kubernetes.service.name"
    54  )
    55  
    56  var (
    57  	// eventsOpts are the options used with resource's Events()
    58  	eventsOpts = resource.WithRateLimiter(
    59  		// This rate limiter will retry in the following pattern
    60  		// 250ms, 500ms, 1s, 2s, 4s, 8s, 16s, 32s, .... max 5m
    61  		workqueue.NewItemExponentialFailureRateLimiter(250*time.Millisecond, 5*time.Minute),
    62  	)
    63  )
    64  
    65  type poolClient interface {
    66  	Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta_v1.PatchOptions, subresources ...string) (result *cilium_api_v2alpha1.CiliumLoadBalancerIPPool, err error)
    67  }
    68  
    69  type lbIPAMParams struct {
    70  	logger logrus.FieldLogger
    71  
    72  	lbClasses   []string
    73  	ipv4Enabled bool
    74  	ipv6Enabled bool
    75  
    76  	poolClient poolClient
    77  	svcClient  client_typed_v1.ServicesGetter
    78  
    79  	poolResource resource.Resource[*cilium_api_v2alpha1.CiliumLoadBalancerIPPool]
    80  	svcResource  resource.Resource[*slim_core_v1.Service]
    81  
    82  	jobGroup job.Group
    83  
    84  	metrics *ipamMetrics
    85  }
    86  
    87  func newLBIPAM(params lbIPAMParams) *LBIPAM {
    88  	lbIPAM := &LBIPAM{
    89  		lbIPAMParams: params,
    90  		pools:        make(map[string]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool),
    91  		rangesStore:  newRangesStore(),
    92  		serviceStore: NewServiceStore(),
    93  	}
    94  	return lbIPAM
    95  }
    96  
    97  // LBIPAM is the loadbalancer IP address manager, controller which allocates and assigns IP addresses
    98  // to LoadBalancer services from the configured set of LoadBalancerIPPools in the cluster.
    99  type LBIPAM struct {
   100  	lbIPAMParams
   101  
   102  	pools        map[string]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool
   103  	rangesStore  rangesStore
   104  	serviceStore serviceStore
   105  
   106  	// Only used during testing.
   107  	initDoneCallbacks []func()
   108  }
   109  
   110  func (ipam *LBIPAM) restart() {
   111  	ipam.logger.Info("Restarting LB IPAM")
   112  
   113  	// Reset all stored state
   114  	ipam.pools = make(map[string]*cilium_api_v2alpha1.CiliumLoadBalancerIPPool)
   115  	ipam.rangesStore = newRangesStore()
   116  	ipam.serviceStore = NewServiceStore()
   117  
   118  	// Re-start the main goroutine
   119  	ipam.jobGroup.Add(
   120  		job.OneShot("lbipam main", func(ctx context.Context, health cell.Health) error {
   121  			ipam.Run(ctx, health)
   122  			return nil
   123  		}),
   124  	)
   125  }
   126  
   127  func (ipam *LBIPAM) Run(ctx context.Context, health cell.Health) {
   128  	ctx, cancel := context.WithCancel(ctx)
   129  	defer cancel()
   130  
   131  	poolChan := ipam.poolResource.Events(ctx, eventsOpts)
   132  
   133  	ipam.logger.Info("LB-IPAM initializing")
   134  	svcChan := ipam.initialize(ctx, poolChan)
   135  
   136  	for _, cb := range ipam.initDoneCallbacks {
   137  		if cb != nil {
   138  			cb()
   139  		}
   140  	}
   141  
   142  	ipam.logger.Info("LB-IPAM done initializing")
   143  
   144  	for {
   145  		select {
   146  		case <-ctx.Done():
   147  			return
   148  
   149  		case event, ok := <-poolChan:
   150  			if !ok {
   151  				poolChan = nil
   152  				continue
   153  			}
   154  			ipam.handlePoolEvent(ctx, event)
   155  
   156  			// This controller must go back into a dormant state when the last pool has been removed
   157  			if len(ipam.pools) == 0 {
   158  				// Upon return, restart the controller, which will start in pre-init state
   159  				defer ipam.restart()
   160  				return
   161  			}
   162  
   163  		case event, ok := <-svcChan:
   164  			if !ok {
   165  				svcChan = nil
   166  				continue
   167  			}
   168  			ipam.handleServiceEvent(ctx, event)
   169  		}
   170  	}
   171  }
   172  
   173  func (ipam *LBIPAM) initialize(
   174  	ctx context.Context,
   175  	poolChan <-chan resource.Event[*cilium_api_v2alpha1.CiliumLoadBalancerIPPool],
   176  ) <-chan resource.Event[*slim_core_v1.Service] {
   177  	// Synchronize pools first as we need them before we can satisfy
   178  	// the services. This will also wait for the first pool to appear
   179  	// before we start processing the services, which will save us from
   180  	// unnecessary work when LB-IPAM is not used.
   181  	poolsSynced := false
   182  	for event := range poolChan {
   183  		if event.Kind == resource.Sync {
   184  			err := ipam.settleConflicts(ctx)
   185  			if err != nil {
   186  				ipam.logger.WithError(err).Error("Error while settling pool conflicts")
   187  				// Keep retrying the handling of the sync event until we succeed.
   188  				// During this time we may receive further updates and deletes.
   189  				event.Done(err)
   190  				continue
   191  			}
   192  			poolsSynced = true
   193  			event.Done(nil)
   194  		} else {
   195  			ipam.handlePoolEvent(ctx, event)
   196  		}
   197  
   198  		// Pools have been synchronized and we've got more than
   199  		// one pool, continue initialization.
   200  		if poolsSynced && len(ipam.pools) > 0 {
   201  			break
   202  		}
   203  	}
   204  
   205  	svcChan := ipam.svcResource.Events(ctx, eventsOpts)
   206  	for event := range svcChan {
   207  		if event.Kind == resource.Sync {
   208  			if err := ipam.satisfyServices(ctx); err != nil {
   209  				ipam.logger.WithError(err).Error("Error while satisfying services")
   210  				// Keep retrying the handling of the sync event until we succeed.
   211  				event.Done(err)
   212  				continue
   213  			}
   214  			if err := ipam.updateAllPoolCounts(ctx); err != nil {
   215  				ipam.logger.WithError(err).Error("Error while updating pool counts")
   216  				event.Done(err)
   217  				continue
   218  			}
   219  			event.Done(nil)
   220  			break
   221  		} else {
   222  			ipam.handleServiceEvent(ctx, event)
   223  		}
   224  	}
   225  
   226  	return svcChan
   227  }
   228  
   229  func (ipam *LBIPAM) handlePoolEvent(ctx context.Context, event resource.Event[*cilium_api_v2alpha1.CiliumLoadBalancerIPPool]) {
   230  	var err error
   231  	switch event.Kind {
   232  	case resource.Upsert:
   233  		err = ipam.poolOnUpsert(ctx, event.Key, event.Object)
   234  		if err != nil {
   235  			ipam.logger.WithError(err).Error("pool upsert failed")
   236  			err = fmt.Errorf("poolOnUpsert: %w", err)
   237  		}
   238  	case resource.Delete:
   239  		err = ipam.poolOnDelete(ctx, event.Key, event.Object)
   240  		if err != nil {
   241  			ipam.logger.WithError(err).Error("pool delete failed")
   242  			err = fmt.Errorf("poolOnDelete: %w", err)
   243  		}
   244  	}
   245  	event.Done(err)
   246  }
   247  
   248  func (ipam *LBIPAM) handleServiceEvent(ctx context.Context, event resource.Event[*slim_core_v1.Service]) {
   249  	var err error
   250  	switch event.Kind {
   251  	case resource.Upsert:
   252  		err = ipam.svcOnUpsert(ctx, event.Key, event.Object)
   253  		if err != nil {
   254  			ipam.logger.WithError(err).Error("service upsert failed")
   255  			err = fmt.Errorf("svcOnUpsert: %w", err)
   256  		}
   257  	case resource.Delete:
   258  		err = ipam.svcOnDelete(ctx, event.Key, event.Object)
   259  		if err != nil {
   260  			ipam.logger.WithError(err).Error("service delete failed")
   261  			err = fmt.Errorf("svcOnDelete: %w", err)
   262  		}
   263  	}
   264  	event.Done(err)
   265  }
   266  
   267  // RegisterOnReady registers a callback function which will be invoked when LBIPAM is done initializing.
   268  // Note: mainly used in the integration tests.
   269  func (ipam *LBIPAM) RegisterOnReady(cb func()) {
   270  	ipam.initDoneCallbacks = append(ipam.initDoneCallbacks, cb)
   271  }
   272  
   273  func (ipam *LBIPAM) poolOnUpsert(ctx context.Context, k resource.Key, pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) error {
   274  	// Deep copy so we get a version we are allowed to update the status
   275  	pool = pool.DeepCopy()
   276  
   277  	var err error
   278  	if _, exists := ipam.pools[pool.GetName()]; exists {
   279  		err = ipam.handlePoolModified(ctx, pool)
   280  		if err != nil {
   281  			return fmt.Errorf("handlePoolModified: %w", err)
   282  		}
   283  	} else {
   284  		err = ipam.handleNewPool(ctx, pool)
   285  		if err != nil {
   286  			return fmt.Errorf("handleNewPool: %w", err)
   287  		}
   288  	}
   289  	if err != nil {
   290  		return err
   291  	}
   292  
   293  	err = ipam.settleConflicts(ctx)
   294  	if err != nil {
   295  		return fmt.Errorf("settleConflicts: %w", err)
   296  	}
   297  
   298  	err = ipam.satisfyAndUpdateCounts(ctx)
   299  	if err != nil {
   300  		return fmt.Errorf("satisfyAndUpdateCounts: %w", err)
   301  	}
   302  
   303  	return nil
   304  }
   305  
   306  func (ipam *LBIPAM) poolOnDelete(ctx context.Context, k resource.Key, pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) error {
   307  	err := ipam.handlePoolDeleted(ctx, pool)
   308  	if err != nil {
   309  		return fmt.Errorf("handlePoolDeleted: %w", err)
   310  	}
   311  
   312  	err = ipam.settleConflicts(ctx)
   313  	if err != nil {
   314  		return fmt.Errorf("settleConflicts: %w", err)
   315  	}
   316  
   317  	err = ipam.satisfyAndUpdateCounts(ctx)
   318  	if err != nil {
   319  		return fmt.Errorf("satisfyAndUpdateCounts: %w", err)
   320  	}
   321  
   322  	return nil
   323  }
   324  
   325  func (ipam *LBIPAM) svcOnUpsert(ctx context.Context, k resource.Key, svc *slim_core_v1.Service) error {
   326  	err := ipam.handleUpsertService(ctx, svc)
   327  	if err != nil {
   328  		return fmt.Errorf("handleUpsertService: %w", err)
   329  	}
   330  
   331  	err = ipam.satisfyAndUpdateCounts(ctx)
   332  	if err != nil {
   333  		return fmt.Errorf("satisfyAndUpdateCounts: %w", err)
   334  	}
   335  
   336  	return nil
   337  }
   338  
   339  func (ipam *LBIPAM) svcOnDelete(ctx context.Context, k resource.Key, svc *slim_core_v1.Service) error {
   340  	ipam.logger.Debugf("Deleted service '%s/%s'", svc.GetNamespace(), svc.GetName())
   341  
   342  	ipam.handleDeletedService(svc)
   343  
   344  	err := ipam.satisfyAndUpdateCounts(ctx)
   345  	if err != nil {
   346  		return fmt.Errorf("satisfyAndUpdateCounts: %w", err)
   347  	}
   348  
   349  	return nil
   350  }
   351  
   352  func (ipam *LBIPAM) satisfyAndUpdateCounts(ctx context.Context) error {
   353  	err := ipam.satisfyServices(ctx)
   354  	if err != nil {
   355  		return fmt.Errorf("satisfyServices: %w", err)
   356  	}
   357  
   358  	err = ipam.updateAllPoolCounts(ctx)
   359  	if err != nil {
   360  		return fmt.Errorf("updateAllPoolCounts: %w", err)
   361  	}
   362  
   363  	return nil
   364  }
   365  
   366  // handleUpsertService updates the service view in the service store, it removes any allocation and ingress that
   367  // do not belong on the service and will move the service to the satisfied or unsatisfied service view store depending
   368  // on if the service requests are satisfied or not.
   369  func (ipam *LBIPAM) handleUpsertService(ctx context.Context, svc *slim_core_v1.Service) error {
   370  	key := resource.NewKey(svc)
   371  
   372  	// Ignore services which are not meant for us
   373  	if !ipam.isResponsibleForSVC(svc) {
   374  		sv, found, _ := ipam.serviceStore.GetService(key)
   375  		if !found {
   376  			// We were not responsible for this service before, so nothing to do
   377  			return nil
   378  		}
   379  
   380  		// we were responsible before, but not anymore
   381  
   382  		// Release allocations and other references as if the service was deleted
   383  		if err := ipam.svcOnDelete(ctx, key, svc); err != nil {
   384  			return fmt.Errorf("svcOnDelete: %w", err)
   385  		}
   386  
   387  		// Remove all ingress IPs and conditions, cleaning up the service for reuse by another controller
   388  		sv.Status.LoadBalancer.Ingress = nil
   389  		slim_meta.RemoveStatusCondition(&sv.Status.Conditions, ciliumSvcRequestSatisfiedCondition)
   390  
   391  		err := ipam.patchSvcStatus(ctx, sv)
   392  		if err != nil {
   393  			return fmt.Errorf("patchSvcStatus: %w", err)
   394  		}
   395  
   396  		return nil
   397  	}
   398  
   399  	// We are responsible for this service.
   400  
   401  	sv := ipam.serviceViewFromService(key, svc)
   402  
   403  	// Remove any allocation that are no longer valid due to a change in the service spec
   404  	err := ipam.stripInvalidAllocations(sv)
   405  	if err != nil {
   406  		return fmt.Errorf("stripInvalidAllocations: %w", err)
   407  	}
   408  
   409  	// Check for each ingress, if its IP has been allocated by us. If it isn't check if we can allocate that IP.
   410  	// If we can't, strip the ingress from the service.
   411  	svModifiedStatus, err := ipam.stripOrImportIngresses(sv)
   412  	if err != nil {
   413  		return fmt.Errorf("stripOrImportIngresses: %w", err)
   414  	}
   415  
   416  	// Attempt to satisfy this service in particular now. We do this now instread of relying on
   417  	// ipam.satisfyServices to avoid updating the service twice in quick succession.
   418  	if !sv.isSatisfied() {
   419  		modified, err := ipam.satisfyService(sv)
   420  		if err != nil {
   421  			return fmt.Errorf("satisfyService: %w", err)
   422  		}
   423  		if modified {
   424  			svModifiedStatus = true
   425  		}
   426  	}
   427  
   428  	// If any of the steps above changed the service object, update the object.
   429  	if svModifiedStatus {
   430  		err := ipam.patchSvcStatus(ctx, sv)
   431  		if err != nil {
   432  			return fmt.Errorf("patchSvcStatus: %w", err)
   433  		}
   434  	}
   435  
   436  	ipam.serviceStore.Upsert(sv)
   437  
   438  	return nil
   439  }
   440  
   441  func (ipam *LBIPAM) serviceViewFromService(key resource.Key, svc *slim_core_v1.Service) *ServiceView {
   442  	sv, found, _ := ipam.serviceStore.GetService(key)
   443  	if !found {
   444  		sv = &ServiceView{
   445  			Key: key,
   446  		}
   447  	}
   448  
   449  	// Update the service view
   450  	sv.Generation = svc.Generation
   451  	sv.Labels = svcLabels(svc)
   452  	sv.RequestedFamilies.IPv4, sv.RequestedFamilies.IPv6 = ipam.serviceIPFamilyRequest(svc)
   453  	sv.RequestedIPs = getSVCRequestedIPs(ipam.logger, svc)
   454  	sv.SharingKey = getSVCSharingKey(ipam.logger, svc)
   455  	sv.SharingCrossNamespace = getSVCSharingCrossNamespace(ipam.logger, svc)
   456  	sv.ExternalTrafficPolicy = svc.Spec.ExternalTrafficPolicy
   457  	sv.Ports = make([]slim_core_v1.ServicePort, len(svc.Spec.Ports))
   458  	copy(sv.Ports, svc.Spec.Ports)
   459  	sv.Namespace = svc.Namespace
   460  	sv.Selector = make(map[string]string)
   461  	for k, v := range svc.Spec.Selector {
   462  		sv.Selector[k] = v
   463  	}
   464  	sv.Status = svc.Status.DeepCopy()
   465  
   466  	return sv
   467  }
   468  
   469  func (ipam *LBIPAM) stripInvalidAllocations(sv *ServiceView) error {
   470  	var errs error
   471  	// Remove bad allocations which are no longer valid
   472  	for allocIdx := len(sv.AllocatedIPs) - 1; allocIdx >= 0; allocIdx-- {
   473  		alloc := sv.AllocatedIPs[allocIdx]
   474  
   475  		releaseAllocIP := func() error {
   476  			ipam.logger.Debugf("removing allocation '%s' from '%s'", alloc.IP.String(), sv.Key.String())
   477  			sharingGroup, _ := alloc.Origin.alloc.Get(alloc.IP)
   478  
   479  			idx := slices.Index(sharingGroup, sv)
   480  			if idx != -1 {
   481  				sharingGroup = slices.Delete(sharingGroup, idx, idx+1)
   482  			}
   483  
   484  			if len(sharingGroup) == 0 {
   485  				alloc.Origin.alloc.Free(alloc.IP)
   486  				ipam.rangesStore.DeleteServiceViewIPForSharingKey(sv.SharingKey, &alloc)
   487  			} else {
   488  				alloc.Origin.alloc.Update(alloc.IP, sharingGroup)
   489  			}
   490  
   491  			sv.AllocatedIPs = slices.Delete(sv.AllocatedIPs, allocIdx, allocIdx+1)
   492  
   493  			return nil
   494  		}
   495  
   496  		// If origin pool no longer exists, remove allocation
   497  		pool, found := ipam.pools[alloc.Origin.originPool]
   498  		if !found {
   499  			errs = errors.Join(errs, releaseAllocIP())
   500  			continue
   501  		}
   502  
   503  		// If service no longer matches the pool selector, remove allocation
   504  		if pool.Spec.ServiceSelector != nil {
   505  			selector, err := slim_meta_v1.LabelSelectorAsSelector(pool.Spec.ServiceSelector)
   506  			if err != nil {
   507  				errs = errors.Join(errs, fmt.Errorf("Making selector from pool '%s' label selector", pool.Name))
   508  				continue
   509  			}
   510  
   511  			if !selector.Matches(sv.Labels) {
   512  				errs = errors.Join(errs, releaseAllocIP())
   513  				continue
   514  			}
   515  		}
   516  
   517  		// Check if all AllocatedIPs that are part of a sharing group, if this service is still compatible with them.
   518  		// If this service is no longer compatible, we have to remove the IP from the sharing group and re-allocate.
   519  		if !ipam.checkSharingGroupCompatibility(sv) {
   520  			errs = errors.Join(errs, releaseAllocIP())
   521  			continue
   522  		}
   523  
   524  		// If the service is requesting specific IPs
   525  		if len(sv.RequestedIPs) > 0 {
   526  			found := false
   527  			for _, reqIP := range sv.RequestedIPs {
   528  				if reqIP.Compare(alloc.IP) == 0 {
   529  					found = true
   530  					break
   531  				}
   532  			}
   533  			// If allocated IP has not been requested, remove it
   534  			if !found {
   535  				errs = errors.Join(errs, releaseAllocIP())
   536  				continue
   537  			}
   538  		} else {
   539  			// No specific requests have been made, check if we have ingresses from un-requested families.
   540  
   541  			if isIPv6(alloc.IP) {
   542  				// Service has an IPv6 address, but its spec doesn't request it anymore, so take it away
   543  				if !sv.RequestedFamilies.IPv6 {
   544  					errs = errors.Join(errs, releaseAllocIP())
   545  					continue
   546  				}
   547  
   548  			} else {
   549  				// Service has an IPv4 address, but its spec doesn't request it anymore, so take it away
   550  				if !sv.RequestedFamilies.IPv4 {
   551  					errs = errors.Join(errs, releaseAllocIP())
   552  					continue
   553  				}
   554  			}
   555  		}
   556  	}
   557  
   558  	return errs
   559  }
   560  
   561  func (ipam *LBIPAM) checkSharingGroupCompatibility(sv *ServiceView) bool {
   562  	for _, allocIP := range sv.AllocatedIPs {
   563  		sharedViews, _ := allocIP.Origin.alloc.Get(allocIP.IP)
   564  		if len(sharedViews) == 1 {
   565  			// The allocation isn't shared, we can continue
   566  			continue
   567  		}
   568  
   569  		for _, sharedView := range sharedViews {
   570  			if sv != sharedView {
   571  				if c := sharedView.isCompatible(sv); !c {
   572  					return false
   573  				}
   574  			}
   575  		}
   576  	}
   577  
   578  	return true
   579  }
   580  
   581  func (ipam *LBIPAM) stripOrImportIngresses(sv *ServiceView) (statusModified bool, err error) {
   582  	var newIngresses []slim_core_v1.LoadBalancerIngress
   583  
   584  	// Only keep valid ingresses.
   585  	for _, ingress := range sv.Status.LoadBalancer.Ingress {
   586  		if ingress.IP == "" {
   587  			continue
   588  		}
   589  
   590  		ip, err := netip.ParseAddr(ingress.IP)
   591  		if err != nil {
   592  			continue
   593  		}
   594  
   595  		// Remove any ingress which is no longer allocated
   596  		var viewIP *ServiceViewIP
   597  		for i, vip := range sv.AllocatedIPs {
   598  			if vip.IP.Compare(ip) == 0 {
   599  				viewIP = &sv.AllocatedIPs[i]
   600  				break
   601  			}
   602  		}
   603  		if viewIP == nil {
   604  			// The ingress is not allocated by LB IPAM, check if we can "import it"
   605  
   606  			// If the service has requested IP, the ingress must match one of them.
   607  			if len(sv.RequestedIPs) > 0 {
   608  				found := false
   609  				for _, reqIP := range sv.RequestedIPs {
   610  					if reqIP.Compare(ip) == 0 {
   611  						found = true
   612  						break
   613  					}
   614  				}
   615  				if !found {
   616  					// Don't keep ingress
   617  					continue
   618  				}
   619  			}
   620  
   621  			if isIPv6(ip) {
   622  				if !sv.RequestedFamilies.IPv6 {
   623  					continue
   624  				}
   625  			} else {
   626  				if !sv.RequestedFamilies.IPv4 {
   627  					continue
   628  				}
   629  			}
   630  
   631  			lbRange, _, err := ipam.findRangeOfIP(sv, ip)
   632  			if err != nil {
   633  				return statusModified, fmt.Errorf("findRangeOfIP: %w", err)
   634  			}
   635  			if lbRange == nil {
   636  				continue
   637  			}
   638  
   639  			serviceViews := []*ServiceView{sv}
   640  			err = lbRange.alloc.Alloc(ip, serviceViews)
   641  			if err != nil {
   642  				if errors.Is(err, ipalloc.ErrInUse) {
   643  					// The IP is already allocated, defer to regular allocation logic to deterime
   644  					// if this service can share the allocation.
   645  					continue
   646  				}
   647  
   648  				return statusModified, fmt.Errorf("Error while attempting to allocate IP '%s'", ingress.IP)
   649  			}
   650  
   651  			sv.AllocatedIPs = append(sv.AllocatedIPs, ServiceViewIP{
   652  				IP:     ip,
   653  				Origin: lbRange,
   654  			})
   655  		}
   656  
   657  		newIngresses = append(newIngresses, ingress)
   658  	}
   659  
   660  	// Deduplicate ingress IPs (condition can be created externally before we adopted the service)
   661  	newIngresses = slices.CompactFunc(newIngresses, func(a, b slim_core_v1.LoadBalancerIngress) bool {
   662  		return a.IP == b.IP
   663  	})
   664  
   665  	// Check if we have removed any ingresses
   666  	if len(sv.Status.LoadBalancer.Ingress) != len(newIngresses) {
   667  		statusModified = true
   668  	}
   669  
   670  	sv.Status.LoadBalancer.Ingress = newIngresses
   671  
   672  	return statusModified, nil
   673  }
   674  
   675  func getSVCRequestedIPs(log logrus.FieldLogger, svc *slim_core_v1.Service) []netip.Addr {
   676  	var ips []netip.Addr
   677  	if svc.Spec.LoadBalancerIP != "" {
   678  		ip, err := netip.ParseAddr(svc.Spec.LoadBalancerIP)
   679  		if err == nil {
   680  			ips = append(ips, ip)
   681  		} else {
   682  			log.WithError(err).Error("Unable to parse service.spec.LoadBalancerIP")
   683  		}
   684  	}
   685  
   686  	if value, _ := annotation.Get(svc, annotation.LBIPAMIPsKey, annotation.LBIPAMIPKeyAlias); value != "" {
   687  		for _, ipStr := range strings.Split(value, ",") {
   688  			ip, err := netip.ParseAddr(strings.TrimSpace(ipStr))
   689  			if err == nil {
   690  				ips = append(ips, ip)
   691  			} else {
   692  				log.WithError(err).Error("Unable to parse service.spec.LoadBalancerIP")
   693  			}
   694  		}
   695  	}
   696  
   697  	return slices.CompactFunc(ips, func(a, b netip.Addr) bool {
   698  		return a.Compare(b) == 0
   699  	})
   700  }
   701  
   702  func getSVCSharingKey(log logrus.FieldLogger, svc *slim_core_v1.Service) string {
   703  	if val, _ := annotation.Get(svc, annotation.LBIPAMSharingKey, annotation.LBIPAMSharingKeyAlias); val != "" {
   704  		return val
   705  	}
   706  	return ""
   707  }
   708  
   709  func getSVCSharingCrossNamespace(log logrus.FieldLogger, svc *slim_core_v1.Service) []string {
   710  	if val, _ := annotation.Get(svc, annotation.LBIPAMSharingAcrossNamespace, annotation.LBIPAMSharingAcrossNamespaceAlias); val != "" {
   711  		return strings.Split(val, ",")
   712  	}
   713  	return []string{}
   714  }
   715  
   716  func (ipam *LBIPAM) handleDeletedService(svc *slim_core_v1.Service) {
   717  	key := resource.NewKey(svc)
   718  	sv, found, _ := ipam.serviceStore.GetService(key)
   719  	if !found {
   720  		return
   721  	}
   722  
   723  	// Remove all allocations for this service
   724  	for _, alloc := range sv.AllocatedIPs {
   725  		// Even if a service doesn't have a sharing key, each allocation is a sharing group
   726  		sharingGroupIPs, found := alloc.Origin.alloc.Get(alloc.IP)
   727  		if !found {
   728  			continue
   729  		}
   730  
   731  		// Remove this IP from the sharing group
   732  		i := slices.Index(sharingGroupIPs, sv)
   733  		if i != -1 {
   734  			sharingGroupIPs = slices.Delete(sharingGroupIPs, i, i+1)
   735  		}
   736  
   737  		// If there are still IPs in the group, update the allocation, otherwise free the IP
   738  		if len(sharingGroupIPs) > 0 {
   739  			alloc.Origin.alloc.Update(alloc.IP, sharingGroupIPs)
   740  		} else {
   741  			alloc.Origin.alloc.Free(alloc.IP)
   742  			// The `ServiceView` has a sharing key, remove the IP from the `rangeStore` index
   743  			if sv.SharingKey != "" {
   744  				ipam.rangesStore.DeleteServiceViewIPForSharingKey(sv.SharingKey, &ServiceViewIP{
   745  					IP:     alloc.IP,
   746  					Origin: alloc.Origin,
   747  				})
   748  			}
   749  		}
   750  
   751  	}
   752  
   753  	ipam.serviceStore.Delete(key)
   754  }
   755  
   756  // satisfyServices attempts to satisfy all unsatisfied services by allocating and assigning IP addresses
   757  func (ipam *LBIPAM) satisfyServices(ctx context.Context) error {
   758  	for _, sv := range ipam.serviceStore.unsatisfied {
   759  		statusModified, err := ipam.satisfyService(sv)
   760  		if err != nil {
   761  			return fmt.Errorf("satisfyService: %w", err)
   762  		}
   763  
   764  		// If the services status has been modified, update the service.
   765  		if statusModified {
   766  			err := ipam.patchSvcStatus(ctx, sv)
   767  			if err != nil {
   768  				return fmt.Errorf("patchSvcStatus: %w", err)
   769  			}
   770  		}
   771  
   772  		ipam.serviceStore.Upsert(sv)
   773  	}
   774  
   775  	return nil
   776  }
   777  
   778  func (ipam *LBIPAM) satisfyService(sv *ServiceView) (statusModified bool, err error) {
   779  	if len(sv.RequestedIPs) > 0 {
   780  		statusModified, err = ipam.satisfySpecificIPRequests(sv)
   781  		if err != nil {
   782  			return statusModified, fmt.Errorf("satisfySpecificIPRequests: %w", err)
   783  		}
   784  	} else {
   785  		statusModified, err = ipam.satisfyGenericIPRequests(sv)
   786  		if err != nil {
   787  			return statusModified, fmt.Errorf("satisfyGenericIPRequests: %w", err)
   788  		}
   789  	}
   790  
   791  	// Sync allocated IPs back to the service
   792  	for _, alloc := range sv.AllocatedIPs {
   793  		// If the allocated IP isn't found in the assigned list, assign it
   794  		if slices.IndexFunc(sv.Status.LoadBalancer.Ingress, func(in slim_core_v1.LoadBalancerIngress) bool {
   795  			addr, err := netip.ParseAddr(in.IP)
   796  			if err != nil {
   797  				return false
   798  			}
   799  
   800  			return addr.Compare(alloc.IP) == 0
   801  		}) == -1 {
   802  			// We allocated a new IP, add it to the ingress list
   803  			sv.Status.LoadBalancer.Ingress = append(sv.Status.LoadBalancer.Ingress, slim_core_v1.LoadBalancerIngress{
   804  				IP: alloc.IP.String(),
   805  			})
   806  			statusModified = true
   807  
   808  			// If the `ServiceView` has a sharing key, add the IP to the `rangeStore` index
   809  			if sv.SharingKey != "" {
   810  				ipam.rangesStore.AddServiceViewIPForSharingKey(sv.SharingKey, &alloc)
   811  			}
   812  		}
   813  	}
   814  
   815  	if sv.isSatisfied() {
   816  		if ipam.setSVCSatisfiedCondition(sv, true, "satisfied", "") {
   817  			statusModified = true
   818  		}
   819  	}
   820  
   821  	ipam.serviceStore.Upsert(sv)
   822  
   823  	return statusModified, err
   824  }
   825  
   826  func (ipam *LBIPAM) satisfySpecificIPRequests(sv *ServiceView) (statusModified bool, err error) {
   827  	// The service requests specific IPs
   828  	for _, reqIP := range sv.RequestedIPs {
   829  		// If the requests IP is already allocated, to this service, skip it
   830  		if slices.IndexFunc(sv.AllocatedIPs, func(sv ServiceViewIP) bool {
   831  			return reqIP.Compare(sv.IP) == 0
   832  		}) != -1 {
   833  			continue
   834  		}
   835  
   836  		lbRange, foundPool, err := ipam.findRangeOfIP(sv, reqIP)
   837  		if err != nil {
   838  			return statusModified, fmt.Errorf("findRangeOfIP: %w", err)
   839  		}
   840  		if lbRange == nil {
   841  			msg := fmt.Sprintf("No pool exists with a CIDR containing '%s'", reqIP)
   842  			reason := "no_pool"
   843  			if foundPool {
   844  				msg = fmt.Sprintf("The pool with the CIDR containing '%s', doesn't select this service", reqIP)
   845  				reason = "pool_selector_mismatch"
   846  			}
   847  			if ipam.setSVCSatisfiedCondition(sv, false, reason, msg) {
   848  				statusModified = true
   849  			}
   850  
   851  			continue
   852  		}
   853  
   854  		if serviceViews, exists := lbRange.alloc.Get(reqIP); exists {
   855  			// The IP is already assigned to another service, if we have a sharing key we might be able to share it.
   856  			if sv.SharingKey == "" {
   857  				msg := fmt.Sprintf("The IP '%s' is already allocated to another service", reqIP)
   858  				reason := "already_allocated"
   859  				if ipam.setSVCSatisfiedCondition(sv, false, reason, msg) {
   860  					statusModified = true
   861  				}
   862  				continue
   863  			}
   864  
   865  			// Check if the ports and external traffic policy of the current service is compatible with the existing `ServiceViews`
   866  			// This also checks if the sharing key is the same
   867  			compatible := true
   868  			for _, serviceView := range serviceViews {
   869  				if !(serviceView.isCompatible(sv)) {
   870  					compatible = false
   871  					break
   872  				}
   873  			}
   874  			// if it is, add the service view to the list, and satisfy the IP
   875  			if !compatible {
   876  				// The IP was requested and a sharing key was provided, but the IP is already allocated to another service with a different sharing key.
   877  				msg := fmt.Sprintf("The IP '%s' is already allocated to another service with a different sharing key", reqIP)
   878  				reason := "already_allocated_different_sharing_key"
   879  				if ipam.setSVCSatisfiedCondition(sv, false, reason, msg) {
   880  					statusModified = true
   881  				}
   882  				continue
   883  			}
   884  			serviceViews = append(serviceViews, sv)
   885  			err = lbRange.alloc.Update(reqIP, serviceViews)
   886  			if err != nil {
   887  				ipam.logger.WithError(err).Errorf("Error while attempting to update IP '%s'", reqIP)
   888  				continue
   889  			}
   890  		} else {
   891  			ipam.logger.Debugf("Allocate '%s' for '%s'", reqIP.String(), sv.Key.String())
   892  			err = lbRange.alloc.Alloc(reqIP, []*ServiceView{sv})
   893  			if err != nil {
   894  				if errors.Is(err, ipalloc.ErrInUse) {
   895  					return statusModified, fmt.Errorf("ipalloc.Alloc: %w", err)
   896  				}
   897  
   898  				ipam.logger.WithError(err).Error("Unable to allocate IP")
   899  				continue
   900  			}
   901  		}
   902  
   903  		sv.AllocatedIPs = append(sv.AllocatedIPs, ServiceViewIP{
   904  			IP:     reqIP,
   905  			Origin: lbRange,
   906  		})
   907  	}
   908  
   909  	return statusModified, nil
   910  }
   911  
   912  func (ipam *LBIPAM) satisfyGenericIPRequests(sv *ServiceView) (statusModified bool, err error) {
   913  	hasIPv4 := false
   914  	hasIPv6 := false
   915  	for _, allocated := range sv.AllocatedIPs {
   916  		if isIPv6(allocated.IP) {
   917  			hasIPv6 = true
   918  		} else {
   919  			hasIPv4 = true
   920  		}
   921  	}
   922  
   923  	// Missing an IPv4 address, lets attempt to allocate an address
   924  	if sv.RequestedFamilies.IPv4 && !hasIPv4 {
   925  		statusModified, err = ipam.satisfyGenericIPv4Requests(sv)
   926  		if err != nil {
   927  			return statusModified, fmt.Errorf("satisfyGenericIPv4Requests: %w", err)
   928  		}
   929  	}
   930  
   931  	// Missing an IPv6 address, lets attempt to allocate an address
   932  	if sv.RequestedFamilies.IPv6 && !hasIPv6 {
   933  		statusModified, err = ipam.satisfyGenericIPv6Requests(sv)
   934  		if err != nil {
   935  			return statusModified, fmt.Errorf("satisfyGenericIPv6Requests: %w", err)
   936  		}
   937  	}
   938  
   939  	return statusModified, nil
   940  }
   941  
   942  func (ipam *LBIPAM) satisfyGenericIPv4Requests(sv *ServiceView) (statusModified bool, err error) {
   943  	if sv.SharingKey != "" {
   944  		// If the service has a sharing key, check if it exists in the `rangeStore` via the index.
   945  		sharingGroupIPs, _ := ipam.rangesStore.GetServiceViewIPsForSharingKey(sv.SharingKey)
   946  		// If it exists, we go to the `LBRange` and get the list of `ServiceViews`.
   947  		for _, sharingGroupIP := range sharingGroupIPs {
   948  			// We only want to allocate IPv4 addresses from the sharing key pool
   949  			if isIPv6(sharingGroupIP.IP) {
   950  				continue
   951  			}
   952  
   953  			serviceViews, _ := sharingGroupIP.Origin.alloc.Get(sharingGroupIP.IP)
   954  			if len(serviceViews) == 0 {
   955  				continue
   956  			}
   957  
   958  			// Check if the ports and external traffic policy of the current service is compatible with the existing `ServiceViews`
   959  			compatible := true
   960  			for _, serviceView := range serviceViews {
   961  				if !(serviceView.isCompatible(sv)) {
   962  					compatible = false
   963  					break
   964  				}
   965  			}
   966  
   967  			// if it is, add the service view to the list, and satisfy the IP
   968  			if compatible {
   969  				sv.AllocatedIPs = append(sv.AllocatedIPs, *sharingGroupIP)
   970  				serviceViews = append(serviceViews, sv)
   971  				sharingGroupIP.Origin.alloc.Update(sharingGroupIP.IP, serviceViews)
   972  				return statusModified, nil
   973  			}
   974  		}
   975  	}
   976  
   977  	// Unable to share an already allocated IP, so lets allocate a new one
   978  	newIP, lbRange, err := ipam.allocateIPAddress(sv, IPv4Family)
   979  	if err != nil && !errors.Is(err, ipalloc.ErrFull) {
   980  		return statusModified, fmt.Errorf("allocateIPAddress: %w", err)
   981  	}
   982  	if newIP.Compare(netip.Addr{}) != 0 {
   983  		sv.AllocatedIPs = append(sv.AllocatedIPs, ServiceViewIP{
   984  			IP:     newIP,
   985  			Origin: lbRange,
   986  		})
   987  	} else {
   988  		reason := "no_pool"
   989  		message := "There are no enabled CiliumLoadBalancerIPPools that match this service"
   990  		if errors.Is(err, ipalloc.ErrFull) {
   991  			reason = "out_of_ips"
   992  			message = "All enabled CiliumLoadBalancerIPPools that match this service ran out of allocatable IPs"
   993  		}
   994  
   995  		if ipam.setSVCSatisfiedCondition(sv, false, reason, message) {
   996  			statusModified = true
   997  		}
   998  	}
   999  
  1000  	return statusModified, nil
  1001  }
  1002  
  1003  func (ipam *LBIPAM) satisfyGenericIPv6Requests(sv *ServiceView) (statusModified bool, err error) {
  1004  	allocatedFromSharingKey := false
  1005  	if sv.SharingKey != "" {
  1006  		// If the service has a sharing key, check if it exists in the `rangeStore` via the index.
  1007  		serviceViewIPs, foundServiceViewIP := ipam.rangesStore.GetServiceViewIPsForSharingKey(sv.SharingKey)
  1008  		if foundServiceViewIP && len(serviceViewIPs) > 0 {
  1009  			// If it exists, we go to the `LBRange` and get the list of `ServiceViews`.
  1010  			for _, serviceViewIP := range serviceViewIPs {
  1011  				// We only want to allocate IPv6 addresses from the sharing key pool
  1012  				if !isIPv6(serviceViewIP.IP) {
  1013  					continue
  1014  				}
  1015  				lbRangePtr := serviceViewIP.Origin
  1016  				if lbRangePtr == nil {
  1017  					continue
  1018  				}
  1019  				lbRange := *lbRangePtr
  1020  				serviceViews, foundServiceViewsPtr := lbRange.alloc.Get(serviceViewIP.IP)
  1021  				if !foundServiceViewsPtr || len(serviceViews) == 0 {
  1022  					continue
  1023  				}
  1024  				// Check if the ports and external traffic policy of the current service is compatible with the existing `ServiceViews`
  1025  				compatible := true
  1026  				for _, serviceView := range serviceViews {
  1027  					if !(serviceView.isCompatible(sv)) {
  1028  						compatible = false
  1029  						break
  1030  					}
  1031  				}
  1032  				// if it is, add the service view to the list, and satisfy the IP
  1033  				if compatible {
  1034  					sv.AllocatedIPs = append(sv.AllocatedIPs, *serviceViewIP)
  1035  					serviceViews = append(serviceViews, sv)
  1036  					lbRange.alloc.Update(serviceViewIP.IP, serviceViews)
  1037  					allocatedFromSharingKey = true
  1038  					break
  1039  				}
  1040  			}
  1041  		}
  1042  	}
  1043  	if !allocatedFromSharingKey {
  1044  		newIP, lbRange, err := ipam.allocateIPAddress(sv, IPv6Family)
  1045  		if err != nil && !errors.Is(err, ipalloc.ErrFull) {
  1046  			return statusModified, fmt.Errorf("allocateIPAddress: %w", err)
  1047  		}
  1048  		if newIP.Compare(netip.Addr{}) != 0 {
  1049  			sv.AllocatedIPs = append(sv.AllocatedIPs, ServiceViewIP{
  1050  				IP:     newIP,
  1051  				Origin: lbRange,
  1052  			})
  1053  		} else {
  1054  			reason := "no_pool"
  1055  			message := "There are no enabled CiliumLoadBalancerIPPools that match this service"
  1056  			if errors.Is(err, ipalloc.ErrFull) {
  1057  				reason = "out_of_ips"
  1058  				message = "All enabled CiliumLoadBalancerIPPools that match this service ran out of allocatable IPs"
  1059  			}
  1060  
  1061  			if ipam.setSVCSatisfiedCondition(sv, false, reason, message) {
  1062  				statusModified = true
  1063  			}
  1064  		}
  1065  	}
  1066  
  1067  	return statusModified, nil
  1068  }
  1069  
  1070  func (ipam *LBIPAM) setSVCSatisfiedCondition(
  1071  	sv *ServiceView,
  1072  	satisfied bool,
  1073  	reason, message string,
  1074  ) (statusModified bool) {
  1075  	status := slim_meta_v1.ConditionFalse
  1076  	if satisfied {
  1077  		status = slim_meta_v1.ConditionTrue
  1078  	}
  1079  
  1080  	if cond := slim_meta.FindStatusCondition(sv.Status.Conditions, ciliumSvcRequestSatisfiedCondition); cond != nil &&
  1081  		cond.Status == status &&
  1082  		cond.ObservedGeneration == sv.Generation &&
  1083  		cond.Reason == reason &&
  1084  		cond.Message == message {
  1085  		return false
  1086  	}
  1087  
  1088  	slim_meta.SetStatusCondition(&sv.Status.Conditions, slim_meta_v1.Condition{
  1089  		Type:               ciliumSvcRequestSatisfiedCondition,
  1090  		Status:             status,
  1091  		ObservedGeneration: sv.Generation,
  1092  		LastTransitionTime: slim_meta_v1.Now(),
  1093  		Reason:             reason,
  1094  		Message:            message,
  1095  	})
  1096  	return true
  1097  }
  1098  
  1099  func (ipam *LBIPAM) findRangeOfIP(sv *ServiceView, ip netip.Addr) (lbRange *LBRange, foundPool bool, err error) {
  1100  	for _, r := range ipam.rangesStore.ranges {
  1101  		if r.Disabled() {
  1102  			continue
  1103  		}
  1104  
  1105  		from, to := r.alloc.Range()
  1106  		if ip.Compare(from) < 0 || ip.Compare(to) > 0 {
  1107  			continue
  1108  		}
  1109  
  1110  		pool, found := ipam.pools[r.originPool]
  1111  		if !found {
  1112  			continue
  1113  		}
  1114  
  1115  		foundPool = true
  1116  
  1117  		if pool.Spec.ServiceSelector != nil {
  1118  			selector, err := slim_meta_v1.LabelSelectorAsSelector(pool.Spec.ServiceSelector)
  1119  			if err != nil {
  1120  				return nil, false, fmt.Errorf("Making selector from pool '%s' label selector: %w", pool.Name, err)
  1121  			}
  1122  
  1123  			if !selector.Matches(sv.Labels) {
  1124  				continue
  1125  			}
  1126  		}
  1127  
  1128  		return r, false, nil
  1129  	}
  1130  
  1131  	return nil, foundPool, nil
  1132  }
  1133  
  1134  // isResponsibleForSVC checks if LB IPAM should allocate and assign IPs or some other controller
  1135  func (ipam *LBIPAM) isResponsibleForSVC(svc *slim_core_v1.Service) bool {
  1136  	// Ignore non-lb services
  1137  	if svc.Spec.Type != slim_core_v1.ServiceTypeLoadBalancer {
  1138  		return false
  1139  	}
  1140  
  1141  	// We will assume that we are the default LB, LB-IPAM shouldn't be enabled clusters that don't support LBClasses
  1142  	// and have multiple LBs.
  1143  	if svc.Spec.LoadBalancerClass == nil {
  1144  		return true
  1145  	}
  1146  
  1147  	if !slices.Contains(ipam.lbClasses, *svc.Spec.LoadBalancerClass) {
  1148  		return false
  1149  	}
  1150  
  1151  	return true
  1152  }
  1153  
  1154  type AddressFamily string
  1155  
  1156  const (
  1157  	IPv4Family AddressFamily = "IPv4"
  1158  	IPv6Family AddressFamily = "IPv6"
  1159  )
  1160  
  1161  func (ipam *LBIPAM) allocateIPAddress(
  1162  	sv *ServiceView,
  1163  	family AddressFamily,
  1164  ) (
  1165  	newIP netip.Addr,
  1166  	chosenRange *LBRange,
  1167  	err error,
  1168  ) {
  1169  	full := false
  1170  	for _, lbRange := range ipam.rangesStore.ranges {
  1171  		// If the range is disabled we can't allocate new IPs from it.
  1172  		if lbRange.Disabled() {
  1173  			continue
  1174  		}
  1175  
  1176  		// Skip this range if it doesn't match the requested address family
  1177  		if _, to := lbRange.alloc.Range(); isIPv6(to) {
  1178  			if family == IPv4Family {
  1179  				continue
  1180  			}
  1181  		} else {
  1182  			if family == IPv6Family {
  1183  				continue
  1184  			}
  1185  		}
  1186  
  1187  		pool, found := ipam.pools[lbRange.originPool]
  1188  		if !found {
  1189  			ipam.logger.WithField("pool-name", lbRange.originPool).
  1190  				Warnf("Bad state detected, store contains lbRange for pool '%s' but missing the pool", lbRange.originPool)
  1191  			continue
  1192  		}
  1193  
  1194  		// If there is no selector, all services match
  1195  		if pool.Spec.ServiceSelector != nil {
  1196  			selector, err := slim_meta_v1.LabelSelectorAsSelector(pool.Spec.ServiceSelector)
  1197  			if err != nil {
  1198  				return netip.Addr{}, nil, fmt.Errorf("Making selector from pool '%s' label selector: %w", pool.Name, err)
  1199  			}
  1200  
  1201  			if !selector.Matches(sv.Labels) {
  1202  				continue
  1203  			}
  1204  		}
  1205  
  1206  		// Attempt to allocate the next IP from this range.
  1207  		newIp, err := lbRange.alloc.AllocAny([]*ServiceView{sv})
  1208  		if err != nil {
  1209  			// If the range is full, mark it.
  1210  			if errors.Is(err, ipalloc.ErrFull) {
  1211  				full = true
  1212  				continue
  1213  			}
  1214  
  1215  			ipam.logger.WithError(err).Error("Allocate next IP from lb range")
  1216  			continue
  1217  		}
  1218  
  1219  		return newIp, lbRange, nil
  1220  	}
  1221  
  1222  	if full {
  1223  		return netip.Addr{}, nil, ipalloc.ErrFull
  1224  	}
  1225  
  1226  	return netip.Addr{}, nil, nil
  1227  }
  1228  
  1229  // serviceIPFamilyRequest checks which families of IP addresses are requested
  1230  func (ipam *LBIPAM) serviceIPFamilyRequest(svc *slim_core_v1.Service) (IPv4Requested, IPv6Requested bool) {
  1231  	if svc.Spec.IPFamilyPolicy != nil {
  1232  		switch *svc.Spec.IPFamilyPolicy {
  1233  		case slim_core_v1.IPFamilyPolicySingleStack:
  1234  			if len(svc.Spec.IPFamilies) > 0 {
  1235  				if svc.Spec.IPFamilies[0] == slim_core_v1.IPFamily(IPv4Family) {
  1236  					IPv4Requested = true
  1237  				} else {
  1238  					IPv6Requested = true
  1239  				}
  1240  			} else {
  1241  				if ipam.ipv4Enabled {
  1242  					IPv4Requested = true
  1243  				} else if ipam.ipv6Enabled {
  1244  					IPv6Requested = true
  1245  				}
  1246  			}
  1247  
  1248  		case slim_core_v1.IPFamilyPolicyPreferDualStack:
  1249  			if len(svc.Spec.IPFamilies) > 0 {
  1250  				for _, family := range svc.Spec.IPFamilies {
  1251  					if family == slim_core_v1.IPFamily(IPv4Family) {
  1252  						IPv4Requested = ipam.ipv4Enabled
  1253  					}
  1254  					if family == slim_core_v1.IPFamily(IPv6Family) {
  1255  						IPv6Requested = ipam.ipv6Enabled
  1256  					}
  1257  				}
  1258  			} else {
  1259  				// If no IPFamilies are specified
  1260  
  1261  				IPv4Requested = ipam.ipv4Enabled
  1262  				IPv6Requested = ipam.ipv6Enabled
  1263  			}
  1264  
  1265  		case slim_core_v1.IPFamilyPolicyRequireDualStack:
  1266  			IPv4Requested = ipam.ipv4Enabled
  1267  			IPv6Requested = ipam.ipv6Enabled
  1268  		}
  1269  	} else {
  1270  		if len(svc.Spec.IPFamilies) > 0 {
  1271  			if svc.Spec.IPFamilies[0] == slim_core_v1.IPFamily(IPv4Family) {
  1272  				IPv4Requested = true
  1273  			} else {
  1274  				IPv6Requested = true
  1275  			}
  1276  		} else {
  1277  			if ipam.ipv4Enabled {
  1278  				IPv4Requested = true
  1279  			} else if ipam.ipv6Enabled {
  1280  				IPv6Requested = true
  1281  			}
  1282  		}
  1283  	}
  1284  
  1285  	return IPv4Requested, IPv6Requested
  1286  }
  1287  
  1288  // Handle the addition of a new IPPool
  1289  func (ipam *LBIPAM) handleNewPool(ctx context.Context, pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) error {
  1290  	// Sanity check that we do not yet know about this pool.
  1291  	if _, found := ipam.pools[pool.GetName()]; found {
  1292  		ipam.logger.WithField("pool-name", pool.GetName()).
  1293  			Warnf("LB IPPool '%s' has been created, but a LB IP Pool with the same name already exists", pool.GetName())
  1294  		return nil
  1295  	}
  1296  
  1297  	ipam.pools[pool.GetName()] = pool
  1298  	for _, ipBlock := range pool.Spec.Blocks {
  1299  		from, to, fromCidr, err := ipRangeFromBlock(ipBlock)
  1300  		if err != nil {
  1301  			return fmt.Errorf("Error parsing ip block: %w", err)
  1302  		}
  1303  
  1304  		lbRange, err := NewLBRange(from, to, pool)
  1305  		if err != nil {
  1306  			return fmt.Errorf("Error making LB Range for '%s': %w", ipBlock.Cidr, err)
  1307  		}
  1308  
  1309  		// If AllowFirstLastIPs is no, mark the first and last IP as allocated upon range creation.
  1310  		if fromCidr && pool.Spec.AllowFirstLastIPs == cilium_api_v2alpha1.AllowFirstLastIPNo {
  1311  			from, to := lbRange.alloc.Range()
  1312  
  1313  			// If the first and last IPs are the same or adjacent, we would reserve the entire range.
  1314  			// Only reserve first and last IPs for ranges /30 or /126 and larger.
  1315  			if !(from.Compare(to) == 0 || from.Next().Compare(to) == 0) {
  1316  				lbRange.alloc.Alloc(from, nil)
  1317  				lbRange.alloc.Alloc(to, nil)
  1318  			}
  1319  		}
  1320  
  1321  		ipam.rangesStore.Add(lbRange)
  1322  	}
  1323  
  1324  	// Unmark new pools so they get a conflict: False condition set, otherwise kubectl will report a blank field.
  1325  	ipam.unmarkPool(ctx, pool)
  1326  
  1327  	return nil
  1328  }
  1329  
  1330  func ipRangeFromBlock(block cilium_api_v2alpha1.CiliumLoadBalancerIPPoolIPBlock) (to, from netip.Addr, fromCidr bool, err error) {
  1331  	if string(block.Cidr) != "" {
  1332  		prefix, err := netip.ParsePrefix(string(block.Cidr))
  1333  		if err != nil {
  1334  			return netip.Addr{}, netip.Addr{}, false, fmt.Errorf("Error parsing cidr '%s': %w", block.Cidr, err)
  1335  		}
  1336  
  1337  		to, from = rangeFromPrefix(prefix)
  1338  		return to, from, true, nil
  1339  	}
  1340  
  1341  	from, err = netip.ParseAddr(block.Start)
  1342  	if err != nil {
  1343  		return netip.Addr{}, netip.Addr{}, false, fmt.Errorf("error parsing start ip '%s': %w", block.Start, err)
  1344  	}
  1345  	if block.Stop == "" {
  1346  		return from, from, false, nil
  1347  	}
  1348  
  1349  	to, err = netip.ParseAddr(block.Stop)
  1350  	if err != nil {
  1351  		return netip.Addr{}, netip.Addr{}, false, fmt.Errorf("error parsing stop ip '%s': %w", block.Stop, err)
  1352  	}
  1353  
  1354  	return from, to, false, nil
  1355  }
  1356  
  1357  func (ipam *LBIPAM) handlePoolModified(ctx context.Context, pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) error {
  1358  	changedAllowFirstLastIPs := false
  1359  	if existingPool, ok := ipam.pools[pool.GetName()]; ok {
  1360  		changedAllowFirstLastIPs = (existingPool.Spec.AllowFirstLastIPs == cilium_api_v2alpha1.AllowFirstLastIPNo) !=
  1361  			(pool.Spec.AllowFirstLastIPs == cilium_api_v2alpha1.AllowFirstLastIPNo)
  1362  	}
  1363  
  1364  	ipam.pools[pool.GetName()] = pool
  1365  
  1366  	type rng struct {
  1367  		from, to netip.Addr
  1368  		fromCidr bool
  1369  	}
  1370  	var newRanges []rng
  1371  	for _, newBlock := range pool.Spec.Blocks {
  1372  		from, to, fromCidr, err := ipRangeFromBlock(newBlock)
  1373  		if err != nil {
  1374  			return fmt.Errorf("Error parsing ip block: %w", err)
  1375  		}
  1376  
  1377  		newRanges = append(newRanges, rng{
  1378  			from:     from,
  1379  			to:       to,
  1380  			fromCidr: fromCidr,
  1381  		})
  1382  	}
  1383  
  1384  	existingRanges, _ := ipam.rangesStore.GetRangesForPool(pool.GetName())
  1385  	existingRanges = slices.Clone(existingRanges)
  1386  
  1387  	// Remove existing ranges that no longer exist
  1388  	for _, extRange := range existingRanges {
  1389  		found := false
  1390  		fromCidr := false
  1391  		for _, newRange := range newRanges {
  1392  			if extRange.EqualCIDR(newRange.from, newRange.to) {
  1393  				found = true
  1394  				fromCidr = newRange.fromCidr
  1395  				break
  1396  			}
  1397  		}
  1398  
  1399  		if found {
  1400  			// If the AllowFirstLastIPs state changed
  1401  			if fromCidr && changedAllowFirstLastIPs {
  1402  				if pool.Spec.AllowFirstLastIPs != cilium_api_v2alpha1.AllowFirstLastIPNo {
  1403  					// If we are allowing first and last IPs again, free them for allocation
  1404  					from, to := extRange.alloc.Range()
  1405  
  1406  					if !(from.Compare(to) == 0 || from.Next().Compare(to) == 0) {
  1407  						extRange.alloc.Free(from)
  1408  						extRange.alloc.Free(to)
  1409  					}
  1410  				} else {
  1411  					// If we are disallowing first and last IPs, alloc the first and last IP if they are not already allocated.
  1412  					// Note: This will not revoke IPs that are already allocated to services.
  1413  					from, to := extRange.alloc.Range()
  1414  
  1415  					// If the first and last IPs are the same or adjacent, we would reserve the entire range.
  1416  					// Only reserve first and last IPs for ranges /30 or /126 and larger.
  1417  					if !(from.Compare(to) == 0 || from.Next().Compare(to) == 0) {
  1418  						extRange.alloc.Alloc(from, nil)
  1419  						extRange.alloc.Alloc(to, nil)
  1420  					}
  1421  				}
  1422  			}
  1423  
  1424  			continue
  1425  		}
  1426  
  1427  		// Remove allocations from services if the ranges no longer exist
  1428  		ipam.rangesStore.Delete(extRange)
  1429  		err := ipam.deleteRangeAllocations(ctx, extRange)
  1430  		if err != nil {
  1431  			return fmt.Errorf("deleteRangeAllocations: %w", err)
  1432  		}
  1433  	}
  1434  
  1435  	// Add new ranges that were added
  1436  	for _, newRange := range newRanges {
  1437  		found := false
  1438  		for _, extRange := range existingRanges {
  1439  			if extRange.EqualCIDR(newRange.from, newRange.to) {
  1440  				found = true
  1441  				break
  1442  			}
  1443  		}
  1444  
  1445  		if found {
  1446  			continue
  1447  		}
  1448  
  1449  		newLBRange, err := NewLBRange(newRange.from, newRange.to, pool)
  1450  		if err != nil {
  1451  			return fmt.Errorf("Error while making new LB range for range '%s - %s': %w", newRange.from, newRange.to, err)
  1452  		}
  1453  
  1454  		// If AllowFirstLastIPs is no, mark the first and last IP as allocated upon range creation.
  1455  		if newRange.fromCidr && pool.Spec.AllowFirstLastIPs == cilium_api_v2alpha1.AllowFirstLastIPNo {
  1456  			from, to := newLBRange.alloc.Range()
  1457  
  1458  			// If the first and last IPs are the same or adjacent, we would reserve the entire range.
  1459  			// Only reserve first and last IPs for ranges /30 or /126 and larger.
  1460  			if !(from.Compare(to) == 0 || from.Next().Compare(to) == 0) {
  1461  				newLBRange.alloc.Alloc(from, nil)
  1462  				newLBRange.alloc.Alloc(to, nil)
  1463  			}
  1464  		}
  1465  
  1466  		ipam.rangesStore.Add(newLBRange)
  1467  	}
  1468  
  1469  	existingRanges, _ = ipam.rangesStore.GetRangesForPool(pool.GetName())
  1470  	for _, extRange := range existingRanges {
  1471  		extRange.externallyDisabled = pool.Spec.Disabled
  1472  	}
  1473  
  1474  	// This is a heavy operation, but pool modification should happen rarely
  1475  	err := ipam.revalidateAllServices(ctx)
  1476  	if err != nil {
  1477  		return fmt.Errorf("revalidateAllServices: %w", err)
  1478  	}
  1479  
  1480  	return nil
  1481  }
  1482  
  1483  func (ipam *LBIPAM) revalidateAllServices(ctx context.Context) error {
  1484  	revalidate := func(sv *ServiceView) error {
  1485  		err := ipam.stripInvalidAllocations(sv)
  1486  		if err != nil {
  1487  			return fmt.Errorf("stripInvalidAllocations: %w", err)
  1488  		}
  1489  
  1490  		// Check for each ingress, if its IP has been allocated by us. If it isn't check if we can allocate that IP.
  1491  		// If we can't, strip the ingress from the service.
  1492  		svModifiedStatus, err := ipam.stripOrImportIngresses(sv)
  1493  		if err != nil {
  1494  			return fmt.Errorf("stripOrImportIngresses: %w", err)
  1495  		}
  1496  
  1497  		// Attempt to satisfy this service in particular now. We do this now instread of relying on
  1498  		// ipam.satisfyServices to avoid updating the service twice in quick succession.
  1499  		if !sv.isSatisfied() {
  1500  			modified, err := ipam.satisfyService(sv)
  1501  			if err != nil {
  1502  				return fmt.Errorf("satisfyService: %w", err)
  1503  			}
  1504  			if modified {
  1505  				svModifiedStatus = true
  1506  			}
  1507  		}
  1508  
  1509  		// If any of the steps above changed the service object, update the object.
  1510  		if svModifiedStatus {
  1511  			err := ipam.patchSvcStatus(ctx, sv)
  1512  			if err != nil {
  1513  				return fmt.Errorf("patchSvcStatus: %w", err)
  1514  			}
  1515  		}
  1516  
  1517  		ipam.serviceStore.Upsert(sv)
  1518  
  1519  		return nil
  1520  	}
  1521  	for _, sv := range ipam.serviceStore.unsatisfied {
  1522  		if err := revalidate(sv); err != nil {
  1523  			return fmt.Errorf("revalidate: %w", err)
  1524  		}
  1525  	}
  1526  
  1527  	for _, sv := range ipam.serviceStore.satisfied {
  1528  		if err := revalidate(sv); err != nil {
  1529  			return fmt.Errorf("revalidate: %w", err)
  1530  		}
  1531  	}
  1532  
  1533  	return nil
  1534  }
  1535  
  1536  func (ipam *LBIPAM) updateAllPoolCounts(ctx context.Context) error {
  1537  	ipam.logger.Debug("Updating pool counts")
  1538  	for _, pool := range ipam.pools {
  1539  		if ipam.updatePoolCounts(pool) {
  1540  			ipam.logger.Debugf("Pool counts of '%s' changed, patching", pool.Name)
  1541  			err := ipam.patchPoolStatus(ctx, pool)
  1542  			if err != nil {
  1543  				return fmt.Errorf("patchPoolStatus: %w", err)
  1544  			}
  1545  		}
  1546  	}
  1547  
  1548  	ipam.metrics.MatchingServices.Set(float64(len(ipam.serviceStore.satisfied) + len(ipam.serviceStore.unsatisfied)))
  1549  	ipam.metrics.UnsatisfiedServices.Set(float64(len(ipam.serviceStore.unsatisfied)))
  1550  
  1551  	return nil
  1552  }
  1553  
  1554  func (ipam *LBIPAM) updatePoolCounts(pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) (modifiedPoolStatus bool) {
  1555  	ranges, _ := ipam.rangesStore.GetRangesForPool(pool.GetName())
  1556  
  1557  	type IPCounts struct {
  1558  		// Total is the total amount of allocatable IPs
  1559  		Total *big.Int
  1560  		// Available is the amount of IPs which can still be allocated
  1561  		Available *big.Int
  1562  		// Used is the amount of IPs that are currently allocated
  1563  		Used uint64
  1564  	}
  1565  
  1566  	totalCounts := IPCounts{
  1567  		Total:     big.NewInt(0),
  1568  		Available: big.NewInt(0),
  1569  	}
  1570  	for _, lbRange := range ranges {
  1571  		used, available := lbRange.alloc.Stats()
  1572  
  1573  		totalCounts.Total = totalCounts.Total.Add(totalCounts.Total, available)
  1574  
  1575  		// big.NewInt wants a int64, we have a uint64, converting like int64(used) could cause overflow
  1576  		// to negative numbers. So shift down by 1 bit so the sign bit is always 0, then convert to bigint.
  1577  		// Multiply by two once a bigint to reverse the bitshift and possibly add 1 if the last bit is 1.
  1578  		// This should give a loss-less conversion.
  1579  		half := int64(used >> 1)
  1580  		bigUsed := big.NewInt(0).Mul(big.NewInt(half), big.NewInt(2))
  1581  		if used%2 == 1 {
  1582  			bigUsed.Add(bigUsed, big.NewInt(1))
  1583  		}
  1584  		totalCounts.Total = totalCounts.Total.Add(totalCounts.Total, bigUsed)
  1585  
  1586  		totalCounts.Available = totalCounts.Available.Add(totalCounts.Available, available)
  1587  		totalCounts.Used += used
  1588  	}
  1589  
  1590  	if ipam.setPoolCondition(pool, ciliumPoolIPsTotalCondition, meta_v1.ConditionUnknown, "noreason", totalCounts.Total.String()) ||
  1591  		ipam.setPoolCondition(pool, ciliumPoolIPsAvailableCondition, meta_v1.ConditionUnknown, "noreason", totalCounts.Available.String()) ||
  1592  		ipam.setPoolCondition(pool, ciliumPoolIPsUsedCondition, meta_v1.ConditionUnknown, "noreason", strconv.FormatUint(totalCounts.Used, 10)) {
  1593  		modifiedPoolStatus = true
  1594  	}
  1595  
  1596  	available, _ := new(big.Float).SetInt(totalCounts.Available).Float64()
  1597  	ipam.metrics.AvailableIPs.WithLabelValues(pool.Name).Set(available)
  1598  	ipam.metrics.UsedIPs.WithLabelValues(pool.Name).Set(float64(totalCounts.Used))
  1599  
  1600  	return modifiedPoolStatus
  1601  }
  1602  
  1603  func (ipam *LBIPAM) setPoolCondition(
  1604  	pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool,
  1605  	condType string,
  1606  	status meta_v1.ConditionStatus,
  1607  	reason, message string,
  1608  ) (statusModified bool) {
  1609  	// Don't trigger an update if the condition is already applied
  1610  
  1611  	if cond := meta.FindStatusCondition(pool.Status.Conditions, condType); cond != nil &&
  1612  		cond.Status == status &&
  1613  		cond.ObservedGeneration == pool.Generation &&
  1614  		cond.Reason == reason &&
  1615  		cond.Message == message {
  1616  		return false
  1617  	}
  1618  
  1619  	meta.SetStatusCondition(&pool.Status.Conditions, meta_v1.Condition{
  1620  		Type:               condType,
  1621  		Status:             status,
  1622  		ObservedGeneration: pool.Generation,
  1623  		LastTransitionTime: meta_v1.Now(),
  1624  		Reason:             reason,
  1625  		Message:            message,
  1626  	})
  1627  	return true
  1628  }
  1629  
  1630  // deleteRangeAllocations removes allocations from
  1631  func (ipam *LBIPAM) deleteRangeAllocations(ctx context.Context, delRange *LBRange) error {
  1632  	delAllocs := func(sv *ServiceView) error {
  1633  		svModified := false
  1634  		for i := len(sv.AllocatedIPs) - 1; i >= 0; i-- {
  1635  			alloc := sv.AllocatedIPs[i]
  1636  
  1637  			if alloc.Origin == delRange {
  1638  				sv.AllocatedIPs = slices.Delete(sv.AllocatedIPs, i, i+1)
  1639  				svModified = true
  1640  			}
  1641  		}
  1642  
  1643  		if !svModified {
  1644  			return nil
  1645  		}
  1646  
  1647  		// Check for each ingress, if its IP has been allocated by us. If it isn't check if we can allocate that IP.
  1648  		// If we can't, strip the ingress from the service.
  1649  		svModifiedStatus, err := ipam.stripOrImportIngresses(sv)
  1650  		if err != nil {
  1651  			return fmt.Errorf("stripOrImportIngresses: %w", err)
  1652  		}
  1653  
  1654  		// Attempt to satisfy this service in particular now. We do this now instread of relying on
  1655  		// ipam.satisfyServices to avoid updating the service twice in quick succession.
  1656  		if !sv.isSatisfied() {
  1657  			statusModified, err := ipam.satisfyService(sv)
  1658  			if err != nil {
  1659  				return fmt.Errorf("satisfyService: %w", err)
  1660  			}
  1661  			if statusModified {
  1662  				svModifiedStatus = true
  1663  			}
  1664  		}
  1665  
  1666  		// If any of the steps above changed the service object, update the object.
  1667  		if svModifiedStatus {
  1668  			err := ipam.patchSvcStatus(ctx, sv)
  1669  			if err != nil {
  1670  				return fmt.Errorf("patchSvcStatus: %w", err)
  1671  			}
  1672  		}
  1673  
  1674  		ipam.serviceStore.Upsert(sv)
  1675  
  1676  		return nil
  1677  	}
  1678  	for _, sv := range ipam.serviceStore.unsatisfied {
  1679  		if err := delAllocs(sv); err != nil {
  1680  			return fmt.Errorf("delAllocs: %w", err)
  1681  		}
  1682  	}
  1683  	for _, sv := range ipam.serviceStore.satisfied {
  1684  		if err := delAllocs(sv); err != nil {
  1685  			return fmt.Errorf("delAllocs: %w", err)
  1686  		}
  1687  	}
  1688  
  1689  	return nil
  1690  }
  1691  
  1692  func (ipam *LBIPAM) handlePoolDeleted(ctx context.Context, pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) error {
  1693  	delete(ipam.pools, pool.GetName())
  1694  
  1695  	ipam.metrics.AvailableIPs.DeleteLabelValues(pool.Name)
  1696  	ipam.metrics.UsedIPs.DeleteLabelValues(pool.Name)
  1697  
  1698  	poolRanges, _ := ipam.rangesStore.GetRangesForPool(pool.GetName())
  1699  	for _, poolRange := range poolRanges {
  1700  		// Remove allocations from services if the ranges no longer exist
  1701  		ipam.rangesStore.Delete(poolRange)
  1702  		err := ipam.deleteRangeAllocations(ctx, poolRange)
  1703  		if err != nil {
  1704  			return fmt.Errorf("deleteRangeAllocations: %w", err)
  1705  		}
  1706  	}
  1707  
  1708  	return nil
  1709  }
  1710  
  1711  func isPoolConflicting(pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) bool {
  1712  	return meta.IsStatusConditionTrue(pool.Status.Conditions, ciliumPoolConflict)
  1713  }
  1714  
  1715  // settleConflicts check if there exist any un-resolved conflicts between the ranges of IP pools and resolve them.
  1716  // secondly, it checks if any ranges that are marked as conflicting have been resolved.
  1717  // Any found conflicts are reflected in the IP Pool's status.
  1718  func (ipam *LBIPAM) settleConflicts(ctx context.Context) error {
  1719  	ipam.logger.Debug("Settling pool conflicts")
  1720  
  1721  	// Mark any pools that conflict as conflicting
  1722  	for _, poolOuter := range ipam.pools {
  1723  		if isPoolConflicting(poolOuter) {
  1724  			continue
  1725  		}
  1726  
  1727  		outerRanges, _ := ipam.rangesStore.GetRangesForPool(poolOuter.GetName())
  1728  
  1729  		if conflicting, rangeA, rangeB := areRangesInternallyConflicting(outerRanges); conflicting {
  1730  			err := ipam.markPoolConflicting(ctx, poolOuter, poolOuter, rangeA, rangeB)
  1731  			if err != nil {
  1732  				return fmt.Errorf("markPoolConflicting: %w", err)
  1733  			}
  1734  			continue
  1735  		}
  1736  
  1737  		for _, poolInner := range ipam.pools {
  1738  			if poolOuter.GetName() == poolInner.GetName() {
  1739  				continue
  1740  			}
  1741  
  1742  			if isPoolConflicting(poolInner) {
  1743  				continue
  1744  			}
  1745  
  1746  			innerRanges, _ := ipam.rangesStore.GetRangesForPool(poolInner.GetName())
  1747  			if conflicting, outerRange, innerRange := areRangesConflicting(outerRanges, innerRanges); conflicting {
  1748  				// If two pools are conflicting, disable/mark the newest pool
  1749  
  1750  				if poolOuter.CreationTimestamp.Before(&poolInner.CreationTimestamp) {
  1751  					err := ipam.markPoolConflicting(ctx, poolInner, poolOuter, innerRange, outerRange)
  1752  					if err != nil {
  1753  						return fmt.Errorf("markPoolConflicting: %w", err)
  1754  					}
  1755  					break
  1756  				}
  1757  
  1758  				err := ipam.markPoolConflicting(ctx, poolOuter, poolInner, outerRange, innerRange)
  1759  				if err != nil {
  1760  					return fmt.Errorf("markPoolConflicting: %w", err)
  1761  				}
  1762  				break
  1763  			}
  1764  		}
  1765  	}
  1766  
  1767  	// un-mark pools that no longer conflict
  1768  	for _, poolOuter := range ipam.pools {
  1769  		if !isPoolConflicting(poolOuter) {
  1770  			continue
  1771  		}
  1772  
  1773  		outerRanges, _ := ipam.rangesStore.GetRangesForPool(poolOuter.GetName())
  1774  
  1775  		// If the pool is still internally conflicting, don't un-mark
  1776  		if conflicting, _, _ := areRangesInternallyConflicting(outerRanges); conflicting {
  1777  			continue
  1778  		}
  1779  
  1780  		poolConflict := false
  1781  		for _, poolInner := range ipam.pools {
  1782  			if poolOuter.GetName() == poolInner.GetName() {
  1783  				continue
  1784  			}
  1785  
  1786  			innerRanges, _ := ipam.rangesStore.GetRangesForPool(poolInner.GetName())
  1787  			if conflicting, _, _ := areRangesConflicting(outerRanges, innerRanges); conflicting {
  1788  				poolConflict = true
  1789  				break
  1790  			}
  1791  		}
  1792  
  1793  		// The outer pool, which is marked conflicting no longer conflicts
  1794  		if !poolConflict {
  1795  			err := ipam.unmarkPool(ctx, poolOuter)
  1796  			if err != nil {
  1797  				return fmt.Errorf("unmarkPool: %w", err)
  1798  			}
  1799  		}
  1800  	}
  1801  
  1802  	return nil
  1803  }
  1804  
  1805  // markPoolConflicting marks the targetPool as "Conflicting" in its status and disables all of its ranges internally.
  1806  func (ipam *LBIPAM) markPoolConflicting(
  1807  	ctx context.Context,
  1808  	targetPool, collisionPool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool,
  1809  	targetRange, collisionRange *LBRange,
  1810  ) error {
  1811  	// If the target pool is already marked conflicting, than there is no need to re-add a condition
  1812  	if isPoolConflicting(targetPool) {
  1813  		return nil
  1814  	}
  1815  
  1816  	ipam.metrics.ConflictingPools.Inc()
  1817  
  1818  	ipam.logger.WithFields(logrus.Fields{
  1819  		"pool1-name":  targetPool.Name,
  1820  		"pool1-range": ipNetStr(targetRange),
  1821  		"pool2-name":  ipNetStr(collisionRange),
  1822  		"pool2-range": collisionPool.Name,
  1823  	}).Warnf("Pool '%s' conflicts since range '%s' overlaps range '%s' from IP Pool '%s'",
  1824  		targetPool.Name,
  1825  		ipNetStr(targetRange),
  1826  		ipNetStr(collisionRange),
  1827  		collisionPool.Name,
  1828  	)
  1829  
  1830  	conflictMessage := fmt.Sprintf(
  1831  		"Pool conflicts since range '%s' overlaps range '%s' from IP Pool '%s'",
  1832  		ipNetStr(targetRange),
  1833  		ipNetStr(collisionRange),
  1834  		collisionPool.Name,
  1835  	)
  1836  
  1837  	// Mark all ranges of the pool as internally disabled so we will not allocate from them.
  1838  	targetPoolRanges, _ := ipam.rangesStore.GetRangesForPool(targetPool.GetName())
  1839  	for _, poolRange := range targetPoolRanges {
  1840  		poolRange.internallyDisabled = true
  1841  	}
  1842  
  1843  	if ipam.setPoolCondition(targetPool, ciliumPoolConflict, meta_v1.ConditionTrue, "cidr_overlap", conflictMessage) {
  1844  		err := ipam.patchPoolStatus(ctx, targetPool)
  1845  		if err != nil {
  1846  			return fmt.Errorf("patchPoolStatus: %w", err)
  1847  		}
  1848  	}
  1849  
  1850  	return nil
  1851  }
  1852  
  1853  // unmarkPool removes the "Conflicting" status from the pool and removes the internally disabled flag from its ranges
  1854  func (ipam *LBIPAM) unmarkPool(ctx context.Context, targetPool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) error {
  1855  	// Re-enabled all ranges
  1856  	targetPoolRanges, _ := ipam.rangesStore.GetRangesForPool(targetPool.GetName())
  1857  	for _, poolRange := range targetPoolRanges {
  1858  		poolRange.internallyDisabled = false
  1859  	}
  1860  
  1861  	ipam.metrics.ConflictingPools.Dec()
  1862  
  1863  	if ipam.setPoolCondition(targetPool, ciliumPoolConflict, meta_v1.ConditionFalse, "resolved", "") {
  1864  		err := ipam.patchPoolStatus(ctx, targetPool)
  1865  		if err != nil {
  1866  			return fmt.Errorf("patchPoolStatus: %w", err)
  1867  		}
  1868  	}
  1869  
  1870  	return nil
  1871  }
  1872  
  1873  func (ipam *LBIPAM) patchSvcStatus(ctx context.Context, sv *ServiceView) error {
  1874  	replaceSvcStatus := []k8s.JSONPatch{
  1875  		{
  1876  			OP:    "replace",
  1877  			Path:  "/status",
  1878  			Value: sv.Status,
  1879  		},
  1880  	}
  1881  
  1882  	createStatusPatch, err := json.Marshal(replaceSvcStatus)
  1883  	if err != nil {
  1884  		return fmt.Errorf("json.Marshal(%v) failed: %w", replaceSvcStatus, err)
  1885  	}
  1886  
  1887  	_, err = ipam.svcClient.Services(sv.Key.Namespace).Patch(ctx, sv.Key.Name,
  1888  		types.JSONPatchType, createStatusPatch, meta_v1.PatchOptions{
  1889  			FieldManager: ciliumFieldManager,
  1890  		}, "status")
  1891  
  1892  	return err
  1893  }
  1894  
  1895  func (ipam *LBIPAM) patchPoolStatus(ctx context.Context, pool *cilium_api_v2alpha1.CiliumLoadBalancerIPPool) error {
  1896  	replaceSvcStatus := []k8s.JSONPatch{
  1897  		{
  1898  			OP:    "replace",
  1899  			Path:  "/status",
  1900  			Value: pool.Status,
  1901  		},
  1902  	}
  1903  
  1904  	createStatusPatch, err := json.Marshal(replaceSvcStatus)
  1905  	if err != nil {
  1906  		return fmt.Errorf("json.Marshal(%v) failed: %w", replaceSvcStatus, err)
  1907  	}
  1908  
  1909  	_, err = ipam.poolClient.Patch(ctx, pool.Name,
  1910  		types.JSONPatchType, createStatusPatch, meta_v1.PatchOptions{
  1911  			FieldManager: ciliumFieldManager,
  1912  		}, "status")
  1913  
  1914  	return err
  1915  }
  1916  
  1917  func isIPv6(ip netip.Addr) bool {
  1918  	return ip.BitLen() == 128
  1919  }
  1920  
  1921  func rangeFromPrefix(prefix netip.Prefix) (netip.Addr, netip.Addr) {
  1922  	prefix = prefix.Masked()
  1923  	return prefix.Addr(), netipx.PrefixLastIP(prefix)
  1924  }