istio.io/istio@v0.0.0-20240520182934-d79c90f27776/pilot/pkg/xds/ads.go (about)

     1  // Copyright Istio Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package xds
    16  
    17  import (
    18  	"fmt"
    19  	"strconv"
    20  	"strings"
    21  	"sync/atomic"
    22  	"time"
    23  
    24  	core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
    25  	discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
    26  	uatomic "go.uber.org/atomic"
    27  	"google.golang.org/grpc/codes"
    28  	"google.golang.org/grpc/peer"
    29  	"google.golang.org/grpc/status"
    30  
    31  	"istio.io/api/label"
    32  	"istio.io/istio/pilot/pkg/autoregistration"
    33  	"istio.io/istio/pilot/pkg/features"
    34  	"istio.io/istio/pilot/pkg/model"
    35  	"istio.io/istio/pilot/pkg/networking/util"
    36  	labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
    37  	v3 "istio.io/istio/pilot/pkg/xds/v3"
    38  	"istio.io/istio/pkg/cluster"
    39  	"istio.io/istio/pkg/config/schema/kind"
    40  	"istio.io/istio/pkg/env"
    41  	"istio.io/istio/pkg/util/sets"
    42  	"istio.io/istio/pkg/xds"
    43  )
    44  
    45  var (
    46  	log = xds.Log
    47  
    48  	// Tracks connections, increment on each new connection.
    49  	connectionNumber = int64(0)
    50  )
    51  
    52  // Used only when running in KNative, to handle the load balancing behavior.
    53  var firstRequest = uatomic.NewBool(true)
    54  
    55  var knativeEnv = env.Register("K_REVISION", "",
    56  	"KNative revision, set if running in knative").Get()
    57  
    58  // DiscoveryStream is a server interface for XDS.
    59  type DiscoveryStream = xds.DiscoveryStream
    60  
    61  // DeltaDiscoveryStream is a server interface for Delta XDS.
    62  type DeltaDiscoveryStream = discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesServer
    63  
    64  // DiscoveryClient is a client interface for XDS.
    65  type DiscoveryClient = discovery.AggregatedDiscoveryService_StreamAggregatedResourcesClient
    66  
    67  // DeltaDiscoveryClient is a client interface for Delta XDS.
    68  type DeltaDiscoveryClient = discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesClient
    69  
    70  type Connection struct {
    71  	xds.Connection
    72  
    73  	// Original node metadata, to avoid unmarshal/marshal.
    74  	// This is included in internal events.
    75  	node *core.Node
    76  
    77  	// proxy is the client to which this connection is established.
    78  	proxy *model.Proxy
    79  
    80  	// deltaStream is used for Delta XDS. Only one of deltaStream or stream will be set
    81  	deltaStream DeltaDiscoveryStream
    82  
    83  	deltaReqChan chan *discovery.DeltaDiscoveryRequest
    84  
    85  	s   *DiscoveryServer
    86  	ids []string
    87  }
    88  
    89  func (conn *Connection) XdsConnection() *xds.Connection {
    90  	return &conn.Connection
    91  }
    92  
    93  func (conn *Connection) Proxy() *model.Proxy {
    94  	return conn.proxy
    95  }
    96  
    97  // Event represents a config or registry event that results in a push.
    98  type Event struct {
    99  	// pushRequest PushRequest to use for the push.
   100  	pushRequest *model.PushRequest
   101  
   102  	// function to call once a push is finished. This must be called or future changes may be blocked.
   103  	done func()
   104  }
   105  
   106  func newConnection(peerAddr string, stream DiscoveryStream) *Connection {
   107  	return &Connection{
   108  		Connection: xds.NewConnection(peerAddr, stream),
   109  	}
   110  }
   111  
   112  func (conn *Connection) Initialize(node *core.Node) error {
   113  	return conn.s.initConnection(node, conn, conn.ids)
   114  }
   115  
   116  func (conn *Connection) Close() {
   117  	conn.s.closeConnection(conn)
   118  }
   119  
   120  func (conn *Connection) Watcher() xds.Watcher {
   121  	return conn.proxy
   122  }
   123  
   124  func (conn *Connection) Process(req *discovery.DiscoveryRequest) error {
   125  	return conn.s.processRequest(req, conn)
   126  }
   127  
   128  func (conn *Connection) Push(ev any) error {
   129  	pushEv := ev.(*Event)
   130  	err := conn.s.pushConnection(conn, pushEv)
   131  	pushEv.done()
   132  	return err
   133  }
   134  
   135  // processRequest handles one discovery request. This is currently called from the 'main' thread, which also
   136  // handles 'push' requests and close - the code will eventually call the 'push' code, and it needs more mutex
   137  // protection. Original code avoided the mutexes by doing both 'push' and 'process requests' in same thread.
   138  func (s *DiscoveryServer) processRequest(req *discovery.DiscoveryRequest, con *Connection) error {
   139  	stype := v3.GetShortType(req.TypeUrl)
   140  	log.Debugf("ADS:%s: REQ %s resources:%d nonce:%s version:%s ", stype,
   141  		con.ID(), len(req.ResourceNames), req.ResponseNonce, req.VersionInfo)
   142  	if req.TypeUrl == v3.HealthInfoType {
   143  		s.handleWorkloadHealthcheck(con.proxy, req)
   144  		return nil
   145  	}
   146  
   147  	// For now, don't let xDS piggyback debug requests start watchers.
   148  	if strings.HasPrefix(req.TypeUrl, v3.DebugType) {
   149  		return s.pushXds(con,
   150  			&model.WatchedResource{TypeUrl: req.TypeUrl, ResourceNames: req.ResourceNames},
   151  			&model.PushRequest{Full: true, Push: con.proxy.LastPushContext})
   152  	}
   153  
   154  	if s.StatusReporter != nil {
   155  		s.StatusReporter.RegisterEvent(con.ID(), req.TypeUrl, req.ResponseNonce)
   156  	}
   157  
   158  	shouldRespond, delta := xds.ShouldRespond(con.proxy, con.ID(), req)
   159  	if !shouldRespond {
   160  		return nil
   161  	}
   162  
   163  	request := &model.PushRequest{
   164  		Full:   true,
   165  		Push:   con.proxy.LastPushContext,
   166  		Reason: model.NewReasonStats(model.ProxyRequest),
   167  
   168  		// The usage of LastPushTime (rather than time.Now()), is critical here for correctness; This time
   169  		// is used by the XDS cache to determine if a entry is stale. If we use Now() with an old push context,
   170  		// we may end up overriding active cache entries with stale ones.
   171  		Start: con.proxy.LastPushTime,
   172  		Delta: delta,
   173  	}
   174  
   175  	// SidecarScope for the proxy may not have been updated based on this pushContext.
   176  	// It can happen when `processRequest` comes after push context has been updated(s.initPushContext),
   177  	// but proxy's SidecarScope has been updated(s.computeProxyState -> SetSidecarScope) due to optimizations that skip sidecar scope
   178  	// computation.
   179  	if con.proxy.SidecarScope != nil && con.proxy.SidecarScope.Version != request.Push.PushVersion {
   180  		s.computeProxyState(con.proxy, request)
   181  	}
   182  	return s.pushXds(con, con.proxy.GetWatchedResource(req.TypeUrl), request)
   183  }
   184  
   185  // StreamAggregatedResources implements the ADS interface.
   186  func (s *DiscoveryServer) StreamAggregatedResources(stream DiscoveryStream) error {
   187  	return s.Stream(stream)
   188  }
   189  
   190  func (s *DiscoveryServer) Stream(stream DiscoveryStream) error {
   191  	if knativeEnv != "" && firstRequest.Load() {
   192  		// How scaling works in knative is the first request is the "loading" request. During
   193  		// loading request, concurrency=1. Once that request is done, concurrency is enabled.
   194  		// However, the XDS stream is long lived, so the first request would block all others. As a
   195  		// result, we should exit the first request immediately; clients will retry.
   196  		firstRequest.Store(false)
   197  		return status.Error(codes.Unavailable, "server warmup not complete; try again")
   198  	}
   199  	// Check if server is ready to accept clients and process new requests.
   200  	// Currently ready means caches have been synced and hence can build
   201  	// clusters correctly. Without this check, InitContext() call below would
   202  	// initialize with empty config, leading to reconnected Envoys loosing
   203  	// configuration. This is an additional safety check inaddition to adding
   204  	// cachesSynced logic to readiness probe to handle cases where kube-proxy
   205  	// ip tables update latencies.
   206  	// See https://github.com/istio/istio/issues/25495.
   207  	if !s.IsServerReady() {
   208  		return status.Error(codes.Unavailable, "server is not ready to serve discovery information")
   209  	}
   210  
   211  	ctx := stream.Context()
   212  	peerAddr := "0.0.0.0"
   213  	if peerInfo, ok := peer.FromContext(ctx); ok {
   214  		peerAddr = peerInfo.Addr.String()
   215  	}
   216  
   217  	if err := s.WaitForRequestLimit(stream.Context()); err != nil {
   218  		log.Warnf("ADS: %q exceeded rate limit: %v", peerAddr, err)
   219  		return status.Errorf(codes.ResourceExhausted, "request rate limit exceeded: %v", err)
   220  	}
   221  
   222  	ids, err := s.authenticate(ctx)
   223  	if err != nil {
   224  		return status.Error(codes.Unauthenticated, err.Error())
   225  	}
   226  	if ids != nil {
   227  		log.Debugf("Authenticated XDS: %v with identity %v", peerAddr, ids)
   228  	} else {
   229  		log.Debugf("Unauthenticated XDS: %s", peerAddr)
   230  	}
   231  
   232  	// InitContext returns immediately if the context was already initialized.
   233  	if err = s.globalPushContext().InitContext(s.Env, nil, nil); err != nil {
   234  		// Error accessing the data - log and close, maybe a different pilot replica
   235  		// has more luck
   236  		log.Warnf("Error reading config %v", err)
   237  		return status.Error(codes.Unavailable, "error reading config")
   238  	}
   239  	con := newConnection(peerAddr, stream)
   240  	con.ids = ids
   241  	con.s = s
   242  	return xds.Stream(con)
   243  }
   244  
   245  // update the node associated with the connection, after receiving a packet from envoy, also adds the connection
   246  // to the tracking map.
   247  func (s *DiscoveryServer) initConnection(node *core.Node, con *Connection, identities []string) error {
   248  	// Setup the initial proxy metadata
   249  	proxy, err := s.initProxyMetadata(node)
   250  	if err != nil {
   251  		return err
   252  	}
   253  	// Check if proxy cluster has an alias configured, if yes use that as cluster ID for this proxy.
   254  	if alias, exists := s.ClusterAliases[proxy.Metadata.ClusterID]; exists {
   255  		proxy.Metadata.ClusterID = alias
   256  	}
   257  	// To ensure push context is monotonically increasing, setup LastPushContext before we addCon. This
   258  	// way only new push contexts will be registered for this proxy.
   259  	proxy.LastPushContext = s.globalPushContext()
   260  	// First request so initialize connection id and start tracking it.
   261  	con.SetID(connectionID(proxy.ID))
   262  	con.node = node
   263  	con.proxy = proxy
   264  	if proxy.IsZTunnel() && !features.EnableAmbient {
   265  		return fmt.Errorf("ztunnel requires PILOT_ENABLE_AMBIENT=true")
   266  	}
   267  
   268  	// Authorize xds clients
   269  	if err := s.authorize(con, identities); err != nil {
   270  		return err
   271  	}
   272  
   273  	// Register the connection. this allows pushes to be triggered for the proxy. Note: the timing of
   274  	// this and initializeProxy important. While registering for pushes *after* initialization is complete seems like
   275  	// a better choice, it introduces a race condition; If we complete initialization of a new push
   276  	// context between initializeProxy and addCon, we would not get any pushes triggered for the new
   277  	// push context, leading the proxy to have a stale state until the next full push.
   278  	s.addCon(con.ID(), con)
   279  	// Register that initialization is complete. This triggers to calls that it is safe to access the
   280  	// proxy
   281  	defer con.MarkInitialized()
   282  
   283  	// Complete full initialization of the proxy
   284  	if err := s.initializeProxy(con); err != nil {
   285  		s.closeConnection(con)
   286  		return err
   287  	}
   288  	return nil
   289  }
   290  
   291  func (s *DiscoveryServer) closeConnection(con *Connection) {
   292  	if con.ID() == "" {
   293  		return
   294  	}
   295  	s.removeCon(con.ID())
   296  	if s.StatusReporter != nil {
   297  		s.StatusReporter.RegisterDisconnect(con.ID(), AllTrackingEventTypes)
   298  	}
   299  	s.WorkloadEntryController.OnDisconnect(con)
   300  }
   301  
   302  func connectionID(node string) string {
   303  	id := atomic.AddInt64(&connectionNumber, 1)
   304  	return node + "-" + strconv.FormatInt(id, 10)
   305  }
   306  
   307  // Only used for test
   308  func ResetConnectionNumberForTest() {
   309  	atomic.StoreInt64(&connectionNumber, 0)
   310  }
   311  
   312  // initProxyMetadata initializes just the basic metadata of a proxy. This is decoupled from
   313  // initProxyState such that we can perform authorization before attempting expensive computations to
   314  // fully initialize the proxy.
   315  func (s *DiscoveryServer) initProxyMetadata(node *core.Node) (*model.Proxy, error) {
   316  	meta, err := model.ParseMetadata(node.Metadata)
   317  	if err != nil {
   318  		return nil, status.New(codes.InvalidArgument, err.Error()).Err()
   319  	}
   320  	proxy, err := model.ParseServiceNodeWithMetadata(node.Id, meta)
   321  	if err != nil {
   322  		return nil, status.New(codes.InvalidArgument, err.Error()).Err()
   323  	}
   324  	// Update the config namespace associated with this proxy
   325  	proxy.ConfigNamespace = model.GetProxyConfigNamespace(proxy)
   326  	proxy.XdsNode = node
   327  	return proxy, nil
   328  }
   329  
   330  // setTopologyLabels sets locality, cluster, network label
   331  // must be called after `SetWorkloadLabels` and `SetServiceTargets`.
   332  func setTopologyLabels(proxy *model.Proxy) {
   333  	// This is a bit un-intuitive, but pull the locality from Labels first. The service registries have the best access to
   334  	// locality information, as they can read from various sources (Node on Kubernetes, for example). They will take this
   335  	// information and add it to the labels. So while the proxy may not originally have these labels,
   336  	// it will by the time we get here (as a result of calling this after SetWorkloadLabels).
   337  	proxy.Locality = localityFromProxyLabels(proxy)
   338  	if proxy.Locality == nil {
   339  		// If there is no locality in the registry then use the one sent as part of the discovery request.
   340  		// This is not preferable as only the connected Pilot is aware of this proxies location, but it
   341  		// can still help provide some client-side Envoy context when load balancing based on location.
   342  		proxy.Locality = &core.Locality{
   343  			Region:  proxy.XdsNode.Locality.GetRegion(),
   344  			Zone:    proxy.XdsNode.Locality.GetZone(),
   345  			SubZone: proxy.XdsNode.Locality.GetSubZone(),
   346  		}
   347  	}
   348  	// add topology labels to proxy labels
   349  	proxy.Labels = labelutil.AugmentLabels(
   350  		proxy.Labels,
   351  		proxy.Metadata.ClusterID,
   352  		util.LocalityToString(proxy.Locality),
   353  		proxy.GetNodeName(),
   354  		proxy.Metadata.Network,
   355  	)
   356  }
   357  
   358  func localityFromProxyLabels(proxy *model.Proxy) *core.Locality {
   359  	region, f1 := proxy.Labels[labelutil.LabelTopologyRegion]
   360  	zone, f2 := proxy.Labels[labelutil.LabelTopologyZone]
   361  	subzone, f3 := proxy.Labels[label.TopologySubzone.Name]
   362  	if !f1 && !f2 && !f3 {
   363  		// If no labels set, we didn't find the locality from the service registry. We do support a (mostly undocumented/internal)
   364  		// label to override the locality, so respect that here as well.
   365  		ls, f := proxy.Labels[model.LocalityLabel]
   366  		if f {
   367  			return util.ConvertLocality(ls)
   368  		}
   369  		return nil
   370  	}
   371  	return &core.Locality{
   372  		Region:  region,
   373  		Zone:    zone,
   374  		SubZone: subzone,
   375  	}
   376  }
   377  
   378  // initializeProxy completes the initialization of a proxy. It is expected to be called only after
   379  // initProxyMetadata.
   380  func (s *DiscoveryServer) initializeProxy(con *Connection) error {
   381  	proxy := con.proxy
   382  	// this should be done before we look for service instances, but after we load metadata
   383  	// TODO fix check in kubecontroller treat echo VMs like there isn't a pod
   384  	if err := s.WorkloadEntryController.OnConnect(con); err != nil {
   385  		return err
   386  	}
   387  	s.computeProxyState(proxy, nil)
   388  	// Discover supported IP Versions of proxy so that appropriate config can be delivered.
   389  	proxy.DiscoverIPMode()
   390  
   391  	proxy.WatchedResources = map[string]*model.WatchedResource{}
   392  	// Based on node metadata and version, we can associate a different generator.
   393  	if proxy.Metadata.Generator != "" {
   394  		proxy.XdsResourceGenerator = s.Generators[proxy.Metadata.Generator]
   395  	}
   396  
   397  	return nil
   398  }
   399  
   400  func (s *DiscoveryServer) computeProxyState(proxy *model.Proxy, request *model.PushRequest) {
   401  	proxy.SetServiceTargets(s.Env.ServiceDiscovery)
   402  	// only recompute workload labels when
   403  	// 1. stream established and proxy first time initialization
   404  	// 2. proxy update
   405  	recomputeLabels := request == nil || request.IsProxyUpdate()
   406  	if recomputeLabels {
   407  		proxy.SetWorkloadLabels(s.Env)
   408  		setTopologyLabels(proxy)
   409  	}
   410  	// Precompute the sidecar scope and merged gateways associated with this proxy.
   411  	// Saves compute cycles in networking code. Though this might be redundant sometimes, we still
   412  	// have to compute this because as part of a config change, a new Sidecar could become
   413  	// applicable to this proxy
   414  	var sidecar, gateway bool
   415  	push := proxy.LastPushContext
   416  	if request == nil {
   417  		sidecar = true
   418  		gateway = true
   419  	} else {
   420  		push = request.Push
   421  		if len(request.ConfigsUpdated) == 0 {
   422  			sidecar = true
   423  			gateway = true
   424  		}
   425  		for conf := range request.ConfigsUpdated {
   426  			switch conf.Kind {
   427  			case kind.ServiceEntry, kind.DestinationRule, kind.VirtualService, kind.Sidecar, kind.HTTPRoute, kind.TCPRoute, kind.TLSRoute, kind.GRPCRoute:
   428  				sidecar = true
   429  			case kind.Gateway, kind.KubernetesGateway, kind.GatewayClass, kind.ReferenceGrant:
   430  				gateway = true
   431  			case kind.Ingress:
   432  				sidecar = true
   433  				gateway = true
   434  			}
   435  			if sidecar && gateway {
   436  				break
   437  			}
   438  		}
   439  	}
   440  	// compute the sidecarscope for both proxy type whenever it changes.
   441  	if sidecar {
   442  		proxy.SetSidecarScope(push)
   443  	}
   444  	// only compute gateways for "router" type proxy.
   445  	if gateway && proxy.Type == model.Router {
   446  		proxy.SetGatewaysForProxy(push)
   447  	}
   448  	proxy.LastPushContext = push
   449  	if request != nil {
   450  		proxy.LastPushTime = request.Start
   451  	}
   452  }
   453  
   454  // handleWorkloadHealthcheck processes HealthInformation type Url.
   455  func (s *DiscoveryServer) handleWorkloadHealthcheck(proxy *model.Proxy, req *discovery.DiscoveryRequest) {
   456  	if features.WorkloadEntryHealthChecks {
   457  		event := autoregistration.HealthEvent{}
   458  		event.Healthy = req.ErrorDetail == nil
   459  		if !event.Healthy {
   460  			event.Message = req.ErrorDetail.Message
   461  		}
   462  		s.WorkloadEntryController.QueueWorkloadEntryHealth(proxy, event)
   463  	}
   464  }
   465  
   466  // DeltaAggregatedResources is not implemented.
   467  // Instead, Generators may send only updates/add, with Delete indicated by an empty spec.
   468  // This works if both ends follow this model. For example EDS and the API generator follow this
   469  // pattern.
   470  //
   471  // The delta protocol changes the request, adding unsubscribe/subscribe instead of sending full
   472  // list of resources. On the response it adds 'removed resources' and sends changes for everything.
   473  func (s *DiscoveryServer) DeltaAggregatedResources(stream discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error {
   474  	return s.StreamDeltas(stream)
   475  }
   476  
   477  // Compute and send the new configuration for a connection.
   478  func (s *DiscoveryServer) pushConnection(con *Connection, pushEv *Event) error {
   479  	pushRequest := pushEv.pushRequest
   480  
   481  	if pushRequest.Full {
   482  		// Update Proxy with current information.
   483  		s.computeProxyState(con.proxy, pushRequest)
   484  	}
   485  
   486  	if !s.ProxyNeedsPush(con.proxy, pushRequest) {
   487  		log.Debugf("Skipping push to %v, no updates required", con.ID())
   488  		if pushRequest.Full {
   489  			// Only report for full versions, incremental pushes do not have a new version.
   490  			reportAllEventsForProxyNoPush(con, s.StatusReporter, pushRequest.Push.LedgerVersion)
   491  		}
   492  		return nil
   493  	}
   494  
   495  	// Send pushes to all generators
   496  	// Each Generator is responsible for determining if the push event requires a push
   497  	wrl := con.watchedResourcesByOrder()
   498  	for _, w := range wrl {
   499  		if err := s.pushXds(con, w, pushRequest); err != nil {
   500  			return err
   501  		}
   502  	}
   503  	if pushRequest.Full {
   504  		// Report all events for unwatched resources. Watched resources will be reported in pushXds or on ack.
   505  		reportEventsForUnWatched(con, s.StatusReporter, pushRequest.Push.LedgerVersion)
   506  	}
   507  
   508  	proxiesConvergeDelay.Record(time.Since(pushRequest.Start).Seconds())
   509  	return nil
   510  }
   511  
   512  // PushOrder defines the order that updates will be pushed in. Any types not listed here will be pushed in random
   513  // order after the types listed here
   514  var PushOrder = []string{
   515  	v3.ClusterType,
   516  	v3.EndpointType,
   517  	v3.ListenerType,
   518  	v3.RouteType,
   519  	v3.SecretType,
   520  	v3.AddressType,
   521  	v3.WorkloadType,
   522  	v3.WorkloadAuthorizationType,
   523  }
   524  
   525  // KnownOrderedTypeUrls has typeUrls for which we know the order of push.
   526  var KnownOrderedTypeUrls = sets.New(PushOrder...)
   527  
   528  func (s *DiscoveryServer) adsClientCount() int {
   529  	s.adsClientsMutex.RLock()
   530  	defer s.adsClientsMutex.RUnlock()
   531  	return len(s.adsClients)
   532  }
   533  
   534  func (s *DiscoveryServer) ProxyUpdate(clusterID cluster.ID, ip string) {
   535  	var connection *Connection
   536  
   537  	for _, v := range s.Clients() {
   538  		if v.proxy.Metadata.ClusterID == clusterID && v.proxy.IPAddresses[0] == ip {
   539  			connection = v
   540  			break
   541  		}
   542  	}
   543  
   544  	// It is possible that the envoy has not connected to this pilot, maybe connected to another pilot
   545  	if connection == nil {
   546  		return
   547  	}
   548  	if log.DebugEnabled() {
   549  		currentlyPending := s.pushQueue.Pending()
   550  		if currentlyPending != 0 {
   551  			log.Debugf("Starting new push while %v were still pending", currentlyPending)
   552  		}
   553  	}
   554  
   555  	s.pushQueue.Enqueue(connection, &model.PushRequest{
   556  		Full:   true,
   557  		Push:   s.globalPushContext(),
   558  		Start:  time.Now(),
   559  		Reason: model.NewReasonStats(model.ProxyUpdate),
   560  	})
   561  }
   562  
   563  // AdsPushAll will send updates to all nodes, with a full push.
   564  // Mainly used in Debug interface.
   565  func AdsPushAll(s *DiscoveryServer) {
   566  	s.AdsPushAll(&model.PushRequest{
   567  		Full:   true,
   568  		Push:   s.globalPushContext(),
   569  		Reason: model.NewReasonStats(model.DebugTrigger),
   570  	})
   571  }
   572  
   573  // AdsPushAll will send updates to all nodes, for a full config or incremental EDS.
   574  func (s *DiscoveryServer) AdsPushAll(req *model.PushRequest) {
   575  	if !req.Full {
   576  		log.Infof("XDS: Incremental Pushing ConnectedEndpoints:%d Version:%s",
   577  			s.adsClientCount(), req.Push.PushVersion)
   578  	} else {
   579  		totalService := len(req.Push.GetAllServices())
   580  		log.Infof("XDS: Pushing Services:%d ConnectedEndpoints:%d Version:%s",
   581  			totalService, s.adsClientCount(), req.Push.PushVersion)
   582  		monServices.Record(float64(totalService))
   583  
   584  		// Make sure the ConfigsUpdated map exists
   585  		if req.ConfigsUpdated == nil {
   586  			req.ConfigsUpdated = make(sets.Set[model.ConfigKey])
   587  		}
   588  	}
   589  
   590  	s.StartPush(req)
   591  }
   592  
   593  // Send a signal to all connections, with a push event.
   594  func (s *DiscoveryServer) StartPush(req *model.PushRequest) {
   595  	// Push config changes, iterating over connected envoys.
   596  	if log.DebugEnabled() {
   597  		currentlyPending := s.pushQueue.Pending()
   598  		if currentlyPending != 0 {
   599  			log.Debugf("Starting new push while %v were still pending", currentlyPending)
   600  		}
   601  	}
   602  	req.Start = time.Now()
   603  	for _, p := range s.AllClients() {
   604  		s.pushQueue.Enqueue(p, req)
   605  	}
   606  }
   607  
   608  func (s *DiscoveryServer) addCon(conID string, con *Connection) {
   609  	s.adsClientsMutex.Lock()
   610  	defer s.adsClientsMutex.Unlock()
   611  	s.adsClients[conID] = con
   612  	recordXDSClients(con.proxy.Metadata.IstioVersion, 1)
   613  }
   614  
   615  func (s *DiscoveryServer) removeCon(conID string) {
   616  	s.adsClientsMutex.Lock()
   617  	defer s.adsClientsMutex.Unlock()
   618  
   619  	if con, exist := s.adsClients[conID]; !exist {
   620  		log.Errorf("ADS: Removing connection for non-existing node:%v.", conID)
   621  		xds.TotalXDSInternalErrors.Increment()
   622  	} else {
   623  		delete(s.adsClients, conID)
   624  		recordXDSClients(con.proxy.Metadata.IstioVersion, -1)
   625  	}
   626  }
   627  
   628  // nolint
   629  func (conn *Connection) NonceAcked(typeUrl string) string {
   630  	wr := conn.proxy.GetWatchedResource(typeUrl)
   631  	if wr != nil {
   632  		return wr.NonceAcked
   633  	}
   634  	return ""
   635  }
   636  
   637  // nolint
   638  func (conn *Connection) NonceSent(typeUrl string) string {
   639  	wr := conn.proxy.GetWatchedResource(typeUrl)
   640  	if wr != nil {
   641  		return wr.NonceSent
   642  	}
   643  	return ""
   644  }
   645  
   646  func (conn *Connection) Clusters() []string {
   647  	wr := conn.proxy.GetWatchedResource(v3.EndpointType)
   648  	if wr != nil {
   649  		return wr.ResourceNames
   650  	}
   651  	return []string{}
   652  }
   653  
   654  // watchedResourcesByOrder returns the ordered list of
   655  // watched resources for the proxy, ordered in accordance with known push order.
   656  func (conn *Connection) watchedResourcesByOrder() []*model.WatchedResource {
   657  	allWatched := conn.proxy.CloneWatchedResources()
   658  	ordered := make([]*model.WatchedResource, 0, len(allWatched))
   659  	// first add all known types, in order
   660  	for _, tp := range PushOrder {
   661  		if allWatched[tp] != nil {
   662  			ordered = append(ordered, allWatched[tp])
   663  		}
   664  	}
   665  	// Then add any undeclared types
   666  	for tp, res := range allWatched {
   667  		if !KnownOrderedTypeUrls.Contains(tp) {
   668  			ordered = append(ordered, res)
   669  		}
   670  	}
   671  	return ordered
   672  }
   673  
   674  // reportAllEventsForProxyNoPush reports all tracking events for a proxy without need to push xds.
   675  func reportAllEventsForProxyNoPush(con *Connection, statusReporter DistributionStatusCache, nonce string) {
   676  	if statusReporter == nil {
   677  		return
   678  	}
   679  	for distributionType := range AllTrackingEventTypes {
   680  		statusReporter.RegisterEvent(con.ID(), distributionType, nonce)
   681  	}
   682  }
   683  
   684  // reportEventsForUnWatched is to report events for unwatched types after push.
   685  // e.g. there is no rds if no route configured for gateway.
   686  // nolint
   687  func reportEventsForUnWatched(con *Connection, statusReporter DistributionStatusCache, nonce string) {
   688  	if statusReporter == nil {
   689  		return
   690  	}
   691  
   692  	// if typeUrl is not empty, report all events that are not being watched
   693  	unWatched := sets.NewWithLength[EventType](len(AllTrackingEventTypes))
   694  	watchedTypes := con.proxy.GetWatchedResourceTypes()
   695  	for tyeUrl := range AllTrackingEventTypes {
   696  		if _, exists := watchedTypes[tyeUrl]; !exists {
   697  			unWatched.Insert(tyeUrl)
   698  		}
   699  	}
   700  	for tyeUrl := range unWatched {
   701  		statusReporter.RegisterEvent(con.ID(), tyeUrl, nonce)
   702  	}
   703  }