google.golang.org/grpc@v1.62.1/xds/internal/xdsclient/tests/cds_watchers_test.go (about)

     1  /*
     2   *
     3   * Copyright 2022 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package xdsclient_test
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"strings"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/google/go-cmp/cmp"
    29  	"github.com/google/go-cmp/cmp/cmpopts"
    30  	"github.com/google/uuid"
    31  	"google.golang.org/grpc/internal/grpcsync"
    32  	"google.golang.org/grpc/internal/testutils"
    33  	"google.golang.org/grpc/internal/testutils/xds/e2e"
    34  	xdstestutils "google.golang.org/grpc/xds/internal/testutils"
    35  	"google.golang.org/grpc/xds/internal/xdsclient"
    36  	"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
    37  	"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
    38  
    39  	v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
    40  	v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
    41  	v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
    42  )
    43  
    44  type noopClusterWatcher struct{}
    45  
    46  func (noopClusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData) {}
    47  func (noopClusterWatcher) OnError(err error)                                {}
    48  func (noopClusterWatcher) OnResourceDoesNotExist()                          {}
    49  
    50  type clusterUpdateErrTuple struct {
    51  	update xdsresource.ClusterUpdate
    52  	err    error
    53  }
    54  
    55  type clusterWatcher struct {
    56  	updateCh *testutils.Channel
    57  }
    58  
    59  func newClusterWatcher() *clusterWatcher {
    60  	return &clusterWatcher{updateCh: testutils.NewChannel()}
    61  }
    62  
    63  func (cw *clusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData) {
    64  	cw.updateCh.Send(clusterUpdateErrTuple{update: update.Resource})
    65  }
    66  
    67  func (cw *clusterWatcher) OnError(err error) {
    68  	// When used with a go-control-plane management server that continuously
    69  	// resends resources which are NACKed by the xDS client, using a `Replace()`
    70  	// here and in OnResourceDoesNotExist() simplifies tests which will have
    71  	// access to the most recently received error.
    72  	cw.updateCh.Replace(clusterUpdateErrTuple{err: err})
    73  }
    74  
    75  func (cw *clusterWatcher) OnResourceDoesNotExist() {
    76  	cw.updateCh.Replace(clusterUpdateErrTuple{err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Cluster not found in received response")})
    77  }
    78  
    79  // badClusterResource returns a cluster resource for the given name which
    80  // contains a config_source_specifier for the `lrs_server` field which is not
    81  // set to `self`, and hence is expected to be NACKed by the client.
    82  func badClusterResource(clusterName, edsServiceName string, secLevel e2e.SecurityLevel) *v3clusterpb.Cluster {
    83  	cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel)
    84  	cluster.LrsServer = &v3corepb.ConfigSource{ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{}}
    85  	return cluster
    86  }
    87  
    88  // xdsClient is expected to produce an error containing this string when an
    89  // update is received containing a cluster created using `badClusterResource`.
    90  const wantClusterNACKErr = "unsupported config_source_specifier"
    91  
    92  // verifyClusterUpdate waits for an update to be received on the provided update
    93  // channel and verifies that it matches the expected update.
    94  //
    95  // Returns an error if no update is received before the context deadline expires
    96  // or the received update does not match the expected one.
    97  func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate clusterUpdateErrTuple) error {
    98  	u, err := updateCh.Receive(ctx)
    99  	if err != nil {
   100  		return fmt.Errorf("timeout when waiting for a cluster resource from the management server: %v", err)
   101  	}
   102  	got := u.(clusterUpdateErrTuple)
   103  	if wantUpdate.err != nil {
   104  		if gotType, wantType := xdsresource.ErrType(got.err), xdsresource.ErrType(wantUpdate.err); gotType != wantType {
   105  			return fmt.Errorf("received update with error type %v, want %v", gotType, wantType)
   106  		}
   107  	}
   108  	cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy")}
   109  	if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
   110  		return fmt.Errorf("received unepected diff in the cluster resource update: (-want, got):\n%s", diff)
   111  	}
   112  	return nil
   113  }
   114  
   115  // verifyNoClusterUpdate verifies that no cluster update is received on the
   116  // provided update channel, and returns an error if an update is received.
   117  //
   118  // A very short deadline is used while waiting for the update, as this function
   119  // is intended to be used when an update is not expected.
   120  func verifyNoClusterUpdate(ctx context.Context, updateCh *testutils.Channel) error {
   121  	sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
   122  	defer sCancel()
   123  	if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded {
   124  		return fmt.Errorf("received unexpected ClusterUpdate when expecting none: %v", u)
   125  	}
   126  	return nil
   127  }
   128  
   129  // TestCDSWatch covers the case where a single watcher exists for a single
   130  // cluster resource. The test verifies the following scenarios:
   131  //  1. An update from the management server containing the resource being
   132  //     watched should result in the invocation of the watch callback.
   133  //  2. An update from the management server containing a resource *not* being
   134  //     watched should not result in the invocation of the watch callback.
   135  //  3. After the watch is cancelled, an update from the management server
   136  //     containing the resource that was being watched should not result in the
   137  //     invocation of the watch callback.
   138  //
   139  // The test is run for old and new style names.
   140  func (s) TestCDSWatch(t *testing.T) {
   141  	tests := []struct {
   142  		desc                   string
   143  		resourceName           string
   144  		watchedResource        *v3clusterpb.Cluster // The resource being watched.
   145  		updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update.
   146  		notWatchedResource     *v3clusterpb.Cluster // A resource which is not being watched.
   147  		wantUpdate             clusterUpdateErrTuple
   148  	}{
   149  		{
   150  			desc:                   "old style resource",
   151  			resourceName:           cdsName,
   152  			watchedResource:        e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone),
   153  			updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone),
   154  			notWatchedResource:     e2e.DefaultCluster("unsubscribed-cds-resource", edsName, e2e.SecurityLevelNone),
   155  			wantUpdate: clusterUpdateErrTuple{
   156  				update: xdsresource.ClusterUpdate{
   157  					ClusterName:    cdsName,
   158  					EDSServiceName: edsName,
   159  				},
   160  			},
   161  		},
   162  		{
   163  			desc:                   "new style resource",
   164  			resourceName:           cdsNameNewStyle,
   165  			watchedResource:        e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone),
   166  			updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone),
   167  			notWatchedResource:     e2e.DefaultCluster("unsubscribed-cds-resource", edsNameNewStyle, e2e.SecurityLevelNone),
   168  			wantUpdate: clusterUpdateErrTuple{
   169  				update: xdsresource.ClusterUpdate{
   170  					ClusterName:    cdsNameNewStyle,
   171  					EDSServiceName: edsNameNewStyle,
   172  				},
   173  			},
   174  		},
   175  	}
   176  
   177  	for _, test := range tests {
   178  		t.Run(test.desc, func(t *testing.T) {
   179  			mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   180  			defer cleanup()
   181  
   182  			// Create an xDS client with the above bootstrap contents.
   183  			client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   184  			if err != nil {
   185  				t.Fatalf("Failed to create xDS client: %v", err)
   186  			}
   187  			defer close()
   188  
   189  			// Register a watch for a cluster resource and have the watch
   190  			// callback push the received update on to a channel.
   191  			cw := newClusterWatcher()
   192  			cdsCancel := xdsresource.WatchCluster(client, test.resourceName, cw)
   193  
   194  			// Configure the management server to return a single cluster
   195  			// resource, corresponding to the one we registered a watch for.
   196  			resources := e2e.UpdateOptions{
   197  				NodeID:         nodeID,
   198  				Clusters:       []*v3clusterpb.Cluster{test.watchedResource},
   199  				SkipValidation: true,
   200  			}
   201  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   202  			defer cancel()
   203  			if err := mgmtServer.Update(ctx, resources); err != nil {
   204  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   205  			}
   206  
   207  			// Verify the contents of the received update.
   208  			if err := verifyClusterUpdate(ctx, cw.updateCh, test.wantUpdate); err != nil {
   209  				t.Fatal(err)
   210  			}
   211  
   212  			// Configure the management server to return an additional cluster
   213  			// resource, one that we are not interested in.
   214  			resources = e2e.UpdateOptions{
   215  				NodeID:         nodeID,
   216  				Clusters:       []*v3clusterpb.Cluster{test.watchedResource, test.notWatchedResource},
   217  				SkipValidation: true,
   218  			}
   219  			if err := mgmtServer.Update(ctx, resources); err != nil {
   220  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   221  			}
   222  			if err := verifyNoClusterUpdate(ctx, cw.updateCh); err != nil {
   223  				t.Fatal(err)
   224  			}
   225  
   226  			// Cancel the watch and update the resource corresponding to the original
   227  			// watch.  Ensure that the cancelled watch callback is not invoked.
   228  			cdsCancel()
   229  			resources = e2e.UpdateOptions{
   230  				NodeID:         nodeID,
   231  				Clusters:       []*v3clusterpb.Cluster{test.updatedWatchedResource, test.notWatchedResource},
   232  				SkipValidation: true,
   233  			}
   234  			if err := mgmtServer.Update(ctx, resources); err != nil {
   235  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   236  			}
   237  			if err := verifyNoClusterUpdate(ctx, cw.updateCh); err != nil {
   238  				t.Fatal(err)
   239  			}
   240  		})
   241  	}
   242  }
   243  
   244  // TestCDSWatch_TwoWatchesForSameResourceName covers the case where two watchers
   245  // exist for a single cluster resource.  The test verifies the following
   246  // scenarios:
   247  //  1. An update from the management server containing the resource being
   248  //     watched should result in the invocation of both watch callbacks.
   249  //  2. After one of the watches is cancelled, a redundant update from the
   250  //     management server should not result in the invocation of either of the
   251  //     watch callbacks.
   252  //  3. A new update from the management server containing the resource being
   253  //     watched should result in the invocation of the un-cancelled watch
   254  //     callback.
   255  //
   256  // The test is run for old and new style names.
   257  func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
   258  	tests := []struct {
   259  		desc                   string
   260  		resourceName           string
   261  		watchedResource        *v3clusterpb.Cluster // The resource being watched.
   262  		updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update.
   263  		wantUpdateV1           clusterUpdateErrTuple
   264  		wantUpdateV2           clusterUpdateErrTuple
   265  	}{
   266  		{
   267  			desc:                   "old style resource",
   268  			resourceName:           cdsName,
   269  			watchedResource:        e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone),
   270  			updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone),
   271  			wantUpdateV1: clusterUpdateErrTuple{
   272  				update: xdsresource.ClusterUpdate{
   273  					ClusterName:    cdsName,
   274  					EDSServiceName: edsName,
   275  				},
   276  			},
   277  			wantUpdateV2: clusterUpdateErrTuple{
   278  				update: xdsresource.ClusterUpdate{
   279  					ClusterName:    cdsName,
   280  					EDSServiceName: "new-eds-resource",
   281  				},
   282  			},
   283  		},
   284  		{
   285  			desc:                   "new style resource",
   286  			resourceName:           cdsNameNewStyle,
   287  			watchedResource:        e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone),
   288  			updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone),
   289  			wantUpdateV1: clusterUpdateErrTuple{
   290  				update: xdsresource.ClusterUpdate{
   291  					ClusterName:    cdsNameNewStyle,
   292  					EDSServiceName: edsNameNewStyle,
   293  				},
   294  			},
   295  			wantUpdateV2: clusterUpdateErrTuple{
   296  				update: xdsresource.ClusterUpdate{
   297  					ClusterName:    cdsNameNewStyle,
   298  					EDSServiceName: "new-eds-resource",
   299  				},
   300  			},
   301  		},
   302  	}
   303  
   304  	for _, test := range tests {
   305  		t.Run(test.desc, func(t *testing.T) {
   306  			mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   307  			defer cleanup()
   308  
   309  			// Create an xDS client with the above bootstrap contents.
   310  			client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   311  			if err != nil {
   312  				t.Fatalf("Failed to create xDS client: %v", err)
   313  			}
   314  			defer close()
   315  
   316  			// Register two watches for the same cluster resource and have the
   317  			// callbacks push the received updates on to a channel.
   318  			cw1 := newClusterWatcher()
   319  			cdsCancel1 := xdsresource.WatchCluster(client, test.resourceName, cw1)
   320  			defer cdsCancel1()
   321  			cw2 := newClusterWatcher()
   322  			cdsCancel2 := xdsresource.WatchCluster(client, test.resourceName, cw2)
   323  
   324  			// Configure the management server to return a single cluster
   325  			// resource, corresponding to the one we registered watches for.
   326  			resources := e2e.UpdateOptions{
   327  				NodeID:         nodeID,
   328  				Clusters:       []*v3clusterpb.Cluster{test.watchedResource},
   329  				SkipValidation: true,
   330  			}
   331  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   332  			defer cancel()
   333  			if err := mgmtServer.Update(ctx, resources); err != nil {
   334  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   335  			}
   336  
   337  			// Verify the contents of the received update.
   338  			if err := verifyClusterUpdate(ctx, cw1.updateCh, test.wantUpdateV1); err != nil {
   339  				t.Fatal(err)
   340  			}
   341  			if err := verifyClusterUpdate(ctx, cw2.updateCh, test.wantUpdateV1); err != nil {
   342  				t.Fatal(err)
   343  			}
   344  
   345  			// Cancel the second watch and force the management server to push a
   346  			// redundant update for the resource being watched. Neither of the
   347  			// two watch callbacks should be invoked.
   348  			cdsCancel2()
   349  			if err := mgmtServer.Update(ctx, resources); err != nil {
   350  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   351  			}
   352  			if err := verifyNoClusterUpdate(ctx, cw1.updateCh); err != nil {
   353  				t.Fatal(err)
   354  			}
   355  			if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
   356  				t.Fatal(err)
   357  			}
   358  
   359  			// Update to the resource being watched. The un-cancelled callback
   360  			// should be invoked while the cancelled one should not be.
   361  			resources = e2e.UpdateOptions{
   362  				NodeID:         nodeID,
   363  				Clusters:       []*v3clusterpb.Cluster{test.updatedWatchedResource},
   364  				SkipValidation: true,
   365  			}
   366  			if err := mgmtServer.Update(ctx, resources); err != nil {
   367  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   368  			}
   369  			if err := verifyClusterUpdate(ctx, cw1.updateCh, test.wantUpdateV2); err != nil {
   370  				t.Fatal(err)
   371  			}
   372  			if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
   373  				t.Fatal(err)
   374  			}
   375  		})
   376  	}
   377  }
   378  
   379  // TestCDSWatch_ThreeWatchesForDifferentResourceNames covers the case where
   380  // three watchers (two watchers for one resource, and the third watcher for
   381  // another resource) exist across two cluster resources (one with an old style
   382  // name and one with a new style name).  The test verifies that an update from
   383  // the management server containing both resources results in the invocation of
   384  // all watch callbacks.
   385  func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
   386  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   387  	defer cleanup()
   388  
   389  	// Create an xDS client with the above bootstrap contents.
   390  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   391  	if err != nil {
   392  		t.Fatalf("Failed to create xDS client: %v", err)
   393  	}
   394  	defer close()
   395  
   396  	// Register two watches for the same cluster resource and have the
   397  	// callbacks push the received updates on to a channel.
   398  	cw1 := newClusterWatcher()
   399  	cdsCancel1 := xdsresource.WatchCluster(client, cdsName, cw1)
   400  	defer cdsCancel1()
   401  	cw2 := newClusterWatcher()
   402  	cdsCancel2 := xdsresource.WatchCluster(client, cdsName, cw2)
   403  	defer cdsCancel2()
   404  
   405  	// Register the third watch for a different cluster resource, and push the
   406  	// received updates onto a channel.
   407  	cw3 := newClusterWatcher()
   408  	cdsCancel3 := xdsresource.WatchCluster(client, cdsNameNewStyle, cw3)
   409  	defer cdsCancel3()
   410  
   411  	// Configure the management server to return two cluster resources,
   412  	// corresponding to the registered watches.
   413  	resources := e2e.UpdateOptions{
   414  		NodeID: nodeID,
   415  		Clusters: []*v3clusterpb.Cluster{
   416  			e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone),
   417  			e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone),
   418  		},
   419  		SkipValidation: true,
   420  	}
   421  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   422  	defer cancel()
   423  	if err := mgmtServer.Update(ctx, resources); err != nil {
   424  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   425  	}
   426  
   427  	// Verify the contents of the received update for the all watchers.
   428  	wantUpdate12 := clusterUpdateErrTuple{
   429  		update: xdsresource.ClusterUpdate{
   430  			ClusterName:    cdsName,
   431  			EDSServiceName: edsName,
   432  		},
   433  	}
   434  	wantUpdate3 := clusterUpdateErrTuple{
   435  		update: xdsresource.ClusterUpdate{
   436  			ClusterName:    cdsNameNewStyle,
   437  			EDSServiceName: edsNameNewStyle,
   438  		},
   439  	}
   440  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate12); err != nil {
   441  		t.Fatal(err)
   442  	}
   443  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate12); err != nil {
   444  		t.Fatal(err)
   445  	}
   446  	if err := verifyClusterUpdate(ctx, cw3.updateCh, wantUpdate3); err != nil {
   447  		t.Fatal(err)
   448  	}
   449  }
   450  
   451  // TestCDSWatch_ResourceCaching covers the case where a watch is registered for
   452  // a resource which is already present in the cache.  The test verifies that the
   453  // watch callback is invoked with the contents from the cache, instead of a
   454  // request being sent to the management server.
   455  func (s) TestCDSWatch_ResourceCaching(t *testing.T) {
   456  	firstRequestReceived := false
   457  	firstAckReceived := grpcsync.NewEvent()
   458  	secondRequestReceived := grpcsync.NewEvent()
   459  
   460  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{
   461  		OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error {
   462  			// The first request has an empty version string.
   463  			if !firstRequestReceived && req.GetVersionInfo() == "" {
   464  				firstRequestReceived = true
   465  				return nil
   466  			}
   467  			// The first ack has a non-empty version string.
   468  			if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" {
   469  				firstAckReceived.Fire()
   470  				return nil
   471  			}
   472  			// Any requests after the first request and ack, are not expected.
   473  			secondRequestReceived.Fire()
   474  			return nil
   475  		},
   476  	})
   477  	defer cleanup()
   478  
   479  	// Create an xDS client with the above bootstrap contents.
   480  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   481  	if err != nil {
   482  		t.Fatalf("Failed to create xDS client: %v", err)
   483  	}
   484  	defer close()
   485  
   486  	// Register a watch for a cluster resource and have the watch
   487  	// callback push the received update on to a channel.
   488  	cw1 := newClusterWatcher()
   489  	cdsCancel1 := xdsresource.WatchCluster(client, cdsName, cw1)
   490  	defer cdsCancel1()
   491  
   492  	// Configure the management server to return a single cluster
   493  	// resource, corresponding to the one we registered a watch for.
   494  	resources := e2e.UpdateOptions{
   495  		NodeID:         nodeID,
   496  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)},
   497  		SkipValidation: true,
   498  	}
   499  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   500  	defer cancel()
   501  	if err := mgmtServer.Update(ctx, resources); err != nil {
   502  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   503  	}
   504  
   505  	// Verify the contents of the received update.
   506  	wantUpdate := clusterUpdateErrTuple{
   507  		update: xdsresource.ClusterUpdate{
   508  			ClusterName:    cdsName,
   509  			EDSServiceName: edsName,
   510  		},
   511  	}
   512  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate); err != nil {
   513  		t.Fatal(err)
   514  	}
   515  	select {
   516  	case <-ctx.Done():
   517  		t.Fatal("timeout when waiting for receipt of ACK at the management server")
   518  	case <-firstAckReceived.Done():
   519  	}
   520  
   521  	// Register another watch for the same resource. This should get the update
   522  	// from the cache.
   523  	cw2 := newClusterWatcher()
   524  	cdsCancel2 := xdsresource.WatchCluster(client, cdsName, cw2)
   525  	defer cdsCancel2()
   526  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate); err != nil {
   527  		t.Fatal(err)
   528  	}
   529  	// No request should get sent out as part of this watch.
   530  	sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
   531  	defer sCancel()
   532  	select {
   533  	case <-sCtx.Done():
   534  	case <-secondRequestReceived.Done():
   535  		t.Fatal("xdsClient sent out request instead of using update from cache")
   536  	}
   537  }
   538  
   539  // TestCDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client
   540  // does not receive an CDS response for the request that it sends. The test
   541  // verifies that the watch callback is invoked with an error once the
   542  // watchExpiryTimer fires.
   543  func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
   544  	mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
   545  	if err != nil {
   546  		t.Fatalf("Failed to spin up the xDS management server: %v", err)
   547  	}
   548  	defer mgmtServer.Stop()
   549  
   550  	client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
   551  		XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address),
   552  		NodeProto: &v3corepb.Node{},
   553  	}, defaultTestWatchExpiryTimeout, time.Duration(0))
   554  	if err != nil {
   555  		t.Fatalf("failed to create xds client: %v", err)
   556  	}
   557  	defer close()
   558  
   559  	// Register a watch for a resource which is expected to be invoked with an
   560  	// error after the watch expiry timer fires.
   561  	cw := newClusterWatcher()
   562  	cdsCancel := xdsresource.WatchCluster(client, cdsName, cw)
   563  	defer cdsCancel()
   564  
   565  	// Wait for the watch expiry timer to fire.
   566  	<-time.After(defaultTestWatchExpiryTimeout)
   567  
   568  	// Verify that an empty update with the expected error is received.
   569  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   570  	defer cancel()
   571  	wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "")
   572  	if err := verifyClusterUpdate(ctx, cw.updateCh, clusterUpdateErrTuple{err: wantErr}); err != nil {
   573  		t.Fatal(err)
   574  	}
   575  }
   576  
   577  // TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the
   578  // client receives a valid LDS response for the request that it sends. The test
   579  // verifies that the behavior associated with the expiry timer (i.e, callback
   580  // invocation with error) does not take place.
   581  func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
   582  	mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
   583  	if err != nil {
   584  		t.Fatalf("Failed to spin up the xDS management server: %v", err)
   585  	}
   586  	defer mgmtServer.Stop()
   587  
   588  	// Create an xDS client talking to the above management server.
   589  	nodeID := uuid.New().String()
   590  	client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
   591  		XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address),
   592  		NodeProto: &v3corepb.Node{Id: nodeID},
   593  	}, defaultTestWatchExpiryTimeout, time.Duration(0))
   594  	if err != nil {
   595  		t.Fatalf("failed to create xds client: %v", err)
   596  	}
   597  	defer close()
   598  
   599  	// Register a watch for a cluster resource and have the watch
   600  	// callback push the received update on to a channel.
   601  	cw := newClusterWatcher()
   602  	cdsCancel := xdsresource.WatchCluster(client, cdsName, cw)
   603  	defer cdsCancel()
   604  
   605  	// Configure the management server to return a single cluster resource,
   606  	// corresponding to the one we registered a watch for.
   607  	resources := e2e.UpdateOptions{
   608  		NodeID:         nodeID,
   609  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)},
   610  		SkipValidation: true,
   611  	}
   612  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   613  	defer cancel()
   614  	if err := mgmtServer.Update(ctx, resources); err != nil {
   615  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   616  	}
   617  
   618  	// Verify the contents of the received update.
   619  	wantUpdate := clusterUpdateErrTuple{
   620  		update: xdsresource.ClusterUpdate{
   621  			ClusterName:    cdsName,
   622  			EDSServiceName: edsName,
   623  		},
   624  	}
   625  	if err := verifyClusterUpdate(ctx, cw.updateCh, wantUpdate); err != nil {
   626  		t.Fatal(err)
   627  	}
   628  
   629  	// Wait for the watch expiry timer to fire, and verify that the callback is
   630  	// not invoked.
   631  	<-time.After(defaultTestWatchExpiryTimeout)
   632  	if err := verifyNoClusterUpdate(ctx, cw.updateCh); err != nil {
   633  		t.Fatal(err)
   634  	}
   635  }
   636  
   637  // TestCDSWatch_ResourceRemoved covers the cases where two watchers exists for
   638  // two different resources (one with an old style name and one with a new style
   639  // name). One of these resources being watched is removed from the management
   640  // server. The test verifies the following scenarios:
   641  //  1. Removing a resource should trigger the watch callback associated with that
   642  //     resource with a resource removed error. It should not trigger the watch
   643  //     callback for an unrelated resource.
   644  //  2. An update to other resource should result in the invocation of the watch
   645  //     callback associated with that resource.  It should not result in the
   646  //     invocation of the watch callback associated with the deleted resource.
   647  func (s) TestCDSWatch_ResourceRemoved(t *testing.T) {
   648  	t.Skip("Disabled; see https://github.com/grpc/grpc-go/issues/6781")
   649  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   650  	defer cleanup()
   651  
   652  	// Create an xDS client with the above bootstrap contents.
   653  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   654  	if err != nil {
   655  		t.Fatalf("Failed to create xDS client: %v", err)
   656  	}
   657  	defer close()
   658  
   659  	// Register two watches for two cluster resources and have the
   660  	// callbacks push the received updates on to a channel.
   661  	resourceName1 := cdsName
   662  	cw1 := newClusterWatcher()
   663  	cdsCancel1 := xdsresource.WatchCluster(client, resourceName1, cw1)
   664  	defer cdsCancel1()
   665  	resourceName2 := cdsNameNewStyle
   666  	cw2 := newClusterWatcher()
   667  	cdsCancel2 := xdsresource.WatchCluster(client, resourceName1, cw2)
   668  	defer cdsCancel2()
   669  
   670  	// Configure the management server to return two cluster resources,
   671  	// corresponding to the registered watches.
   672  	resources := e2e.UpdateOptions{
   673  		NodeID: nodeID,
   674  		Clusters: []*v3clusterpb.Cluster{
   675  			e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone),
   676  			e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone),
   677  		},
   678  		SkipValidation: true,
   679  	}
   680  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   681  	defer cancel()
   682  	if err := mgmtServer.Update(ctx, resources); err != nil {
   683  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   684  	}
   685  
   686  	// Verify the contents of the received update for both watchers.
   687  	wantUpdate1 := clusterUpdateErrTuple{
   688  		update: xdsresource.ClusterUpdate{
   689  			ClusterName:    resourceName1,
   690  			EDSServiceName: edsName,
   691  		},
   692  	}
   693  	wantUpdate2 := clusterUpdateErrTuple{
   694  		update: xdsresource.ClusterUpdate{
   695  			ClusterName:    resourceName2,
   696  			EDSServiceName: edsNameNewStyle,
   697  		},
   698  	}
   699  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate1); err != nil {
   700  		t.Fatal(err)
   701  	}
   702  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate2); err != nil {
   703  		t.Fatal(err)
   704  	}
   705  
   706  	// Remove the first cluster resource on the management server.
   707  	resources = e2e.UpdateOptions{
   708  		NodeID:         nodeID,
   709  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone)},
   710  		SkipValidation: true,
   711  	}
   712  	if err := mgmtServer.Update(ctx, resources); err != nil {
   713  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   714  	}
   715  
   716  	// The first watcher should receive a resource removed error, while the
   717  	// second watcher should not receive an update.
   718  	if err := verifyClusterUpdate(ctx, cw1.updateCh, clusterUpdateErrTuple{err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "")}); err != nil {
   719  		t.Fatal(err)
   720  	}
   721  	if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
   722  		t.Fatal(err)
   723  	}
   724  
   725  	// Update the second cluster resource on the management server. The first
   726  	// watcher should not receive an update, while the second watcher should.
   727  	resources = e2e.UpdateOptions{
   728  		NodeID:         nodeID,
   729  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, "new-eds-resource", e2e.SecurityLevelNone)},
   730  		SkipValidation: true,
   731  	}
   732  	if err := mgmtServer.Update(ctx, resources); err != nil {
   733  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   734  	}
   735  	if err := verifyNoClusterUpdate(ctx, cw1.updateCh); err != nil {
   736  		t.Fatal(err)
   737  	}
   738  	wantUpdate := clusterUpdateErrTuple{
   739  		update: xdsresource.ClusterUpdate{
   740  			ClusterName:    resourceName2,
   741  			EDSServiceName: "new-eds-resource",
   742  		},
   743  	}
   744  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate); err != nil {
   745  		t.Fatal(err)
   746  	}
   747  }
   748  
   749  // TestCDSWatch_NACKError covers the case where an update from the management
   750  // server is NACK'ed by the xdsclient. The test verifies that the error is
   751  // propagated to the watcher.
   752  func (s) TestCDSWatch_NACKError(t *testing.T) {
   753  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   754  	defer cleanup()
   755  
   756  	// Create an xDS client with the above bootstrap contents.
   757  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   758  	if err != nil {
   759  		t.Fatalf("Failed to create xDS client: %v", err)
   760  	}
   761  	defer close()
   762  
   763  	// Register a watch for a cluster resource and have the watch
   764  	// callback push the received update on to a channel.
   765  	cw := newClusterWatcher()
   766  	cdsCancel := xdsresource.WatchCluster(client, cdsName, cw)
   767  	defer cdsCancel()
   768  
   769  	// Configure the management server to return a single cluster resource
   770  	// which is expected to be NACK'ed by the client.
   771  	resources := e2e.UpdateOptions{
   772  		NodeID:         nodeID,
   773  		Clusters:       []*v3clusterpb.Cluster{badClusterResource(cdsName, edsName, e2e.SecurityLevelNone)},
   774  		SkipValidation: true,
   775  	}
   776  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   777  	defer cancel()
   778  	if err := mgmtServer.Update(ctx, resources); err != nil {
   779  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   780  	}
   781  
   782  	// Verify that the expected error is propagated to the watcher.
   783  	u, err := cw.updateCh.Receive(ctx)
   784  	if err != nil {
   785  		t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err)
   786  	}
   787  	gotErr := u.(clusterUpdateErrTuple).err
   788  	if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) {
   789  		t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr)
   790  	}
   791  }
   792  
   793  // TestCDSWatch_PartialValid covers the case where a response from the
   794  // management server contains both valid and invalid resources and is expected
   795  // to be NACK'ed by the xdsclient. The test verifies that watchers corresponding
   796  // to the valid resource receive the update, while watchers corresponding to the
   797  // invalid resource receive an error.
   798  func (s) TestCDSWatch_PartialValid(t *testing.T) {
   799  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   800  	defer cleanup()
   801  
   802  	// Create an xDS client with the above bootstrap contents.
   803  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   804  	if err != nil {
   805  		t.Fatalf("Failed to create xDS client: %v", err)
   806  	}
   807  	defer close()
   808  
   809  	// Register two watches for cluster resources. The first watch is expected
   810  	// to receive an error because the received resource is NACK'ed. The second
   811  	// watch is expected to get a good update.
   812  	badResourceName := cdsName
   813  	cw1 := newClusterWatcher()
   814  	cdsCancel1 := xdsresource.WatchCluster(client, badResourceName, cw1)
   815  	defer cdsCancel1()
   816  	goodResourceName := cdsNameNewStyle
   817  	cw2 := newClusterWatcher()
   818  	cdsCancel2 := xdsresource.WatchCluster(client, goodResourceName, cw2)
   819  	defer cdsCancel2()
   820  
   821  	// Configure the management server with two cluster resources. One of these
   822  	// is a bad resource causing the update to be NACKed.
   823  	resources := e2e.UpdateOptions{
   824  		NodeID: nodeID,
   825  		Clusters: []*v3clusterpb.Cluster{
   826  			badClusterResource(badResourceName, edsName, e2e.SecurityLevelNone),
   827  			e2e.DefaultCluster(goodResourceName, edsName, e2e.SecurityLevelNone)},
   828  		SkipValidation: true,
   829  	}
   830  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   831  	defer cancel()
   832  	if err := mgmtServer.Update(ctx, resources); err != nil {
   833  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   834  	}
   835  
   836  	// Verify that the expected error is propagated to the watcher which is
   837  	// watching the bad resource.
   838  	u, err := cw1.updateCh.Receive(ctx)
   839  	if err != nil {
   840  		t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err)
   841  	}
   842  	gotErr := u.(clusterUpdateErrTuple).err
   843  	if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) {
   844  		t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr)
   845  	}
   846  
   847  	// Verify that the watcher watching the good resource receives a good
   848  	// update.
   849  	wantUpdate := clusterUpdateErrTuple{
   850  		update: xdsresource.ClusterUpdate{
   851  			ClusterName:    goodResourceName,
   852  			EDSServiceName: edsName,
   853  		},
   854  	}
   855  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate); err != nil {
   856  		t.Fatal(err)
   857  	}
   858  }
   859  
   860  // TestCDSWatch_PartialResponse covers the case where a response from the
   861  // management server does not contain all requested resources. CDS responses are
   862  // supposed to contain all requested resources, and the absence of one usually
   863  // indicates that the management server does not know about it. In cases where
   864  // the server has never responded with this resource before, the xDS client is
   865  // expected to wait for the watch timeout to expire before concluding that the
   866  // resource does not exist on the server
   867  func (s) TestCDSWatch_PartialResponse(t *testing.T) {
   868  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   869  	defer cleanup()
   870  
   871  	// Create an xDS client with the above bootstrap contents.
   872  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   873  	if err != nil {
   874  		t.Fatalf("Failed to create xDS client: %v", err)
   875  	}
   876  	defer close()
   877  
   878  	// Register two watches for two cluster resources and have the
   879  	// callbacks push the received updates on to a channel.
   880  	resourceName1 := cdsName
   881  	cw1 := newClusterWatcher()
   882  	cdsCancel1 := xdsresource.WatchCluster(client, resourceName1, cw1)
   883  	defer cdsCancel1()
   884  	resourceName2 := cdsNameNewStyle
   885  	cw2 := newClusterWatcher()
   886  	cdsCancel2 := xdsresource.WatchCluster(client, resourceName2, cw2)
   887  	defer cdsCancel2()
   888  
   889  	// Configure the management server to return only one of the two cluster
   890  	// resources, corresponding to the registered watches.
   891  	resources := e2e.UpdateOptions{
   892  		NodeID:         nodeID,
   893  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone)},
   894  		SkipValidation: true,
   895  	}
   896  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   897  	defer cancel()
   898  	if err := mgmtServer.Update(ctx, resources); err != nil {
   899  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   900  	}
   901  
   902  	// Verify the contents of the received update for first watcher.
   903  	wantUpdate1 := clusterUpdateErrTuple{
   904  		update: xdsresource.ClusterUpdate{
   905  			ClusterName:    resourceName1,
   906  			EDSServiceName: edsName,
   907  		},
   908  	}
   909  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate1); err != nil {
   910  		t.Fatal(err)
   911  	}
   912  
   913  	// Verify that the second watcher does not get an update with an error.
   914  	if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
   915  		t.Fatal(err)
   916  	}
   917  
   918  	// Configure the management server to return two cluster resources,
   919  	// corresponding to the registered watches.
   920  	resources = e2e.UpdateOptions{
   921  		NodeID: nodeID,
   922  		Clusters: []*v3clusterpb.Cluster{
   923  			e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone),
   924  			e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone),
   925  		},
   926  		SkipValidation: true,
   927  	}
   928  	if err := mgmtServer.Update(ctx, resources); err != nil {
   929  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   930  	}
   931  
   932  	// Verify the contents of the received update for the second watcher.
   933  	wantUpdate2 := clusterUpdateErrTuple{
   934  		update: xdsresource.ClusterUpdate{
   935  			ClusterName:    resourceName2,
   936  			EDSServiceName: edsNameNewStyle,
   937  		},
   938  	}
   939  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate2); err != nil {
   940  		t.Fatal(err)
   941  	}
   942  
   943  	// Verify that the first watcher gets no update, as the first resource did
   944  	// not change.
   945  	if err := verifyNoClusterUpdate(ctx, cw1.updateCh); err != nil {
   946  		t.Fatal(err)
   947  	}
   948  }