google.golang.org/grpc@v1.72.2/xds/internal/xdsclient/tests/cds_watchers_test.go (about)

     1  /*
     2   *
     3   * Copyright 2022 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package xdsclient_test
    20  
    21  import (
    22  	"context"
    23  	"encoding/json"
    24  	"fmt"
    25  	"strings"
    26  	"testing"
    27  	"time"
    28  
    29  	"github.com/google/go-cmp/cmp"
    30  	"github.com/google/go-cmp/cmp/cmpopts"
    31  	"github.com/google/uuid"
    32  	"google.golang.org/grpc/internal/grpcsync"
    33  	"google.golang.org/grpc/internal/pretty"
    34  	"google.golang.org/grpc/internal/testutils"
    35  	"google.golang.org/grpc/internal/testutils/xds/e2e"
    36  	"google.golang.org/grpc/internal/xds/bootstrap"
    37  	"google.golang.org/grpc/xds/internal/xdsclient"
    38  	"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
    39  
    40  	v3clusterpb "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
    41  	v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
    42  	v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
    43  )
    44  
    45  type noopClusterWatcher struct{}
    46  
    47  func (noopClusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData, onDone xdsresource.OnDoneFunc) {
    48  	onDone()
    49  }
    50  func (noopClusterWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) {
    51  	onDone()
    52  }
    53  func (noopClusterWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
    54  	onDone()
    55  }
    56  
    57  type clusterUpdateErrTuple struct {
    58  	update xdsresource.ClusterUpdate
    59  	err    error
    60  }
    61  
    62  type clusterWatcher struct {
    63  	updateCh *testutils.Channel
    64  }
    65  
    66  func newClusterWatcher() *clusterWatcher {
    67  	return &clusterWatcher{updateCh: testutils.NewChannel()}
    68  }
    69  
    70  func (cw *clusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData, onDone xdsresource.OnDoneFunc) {
    71  	cw.updateCh.Send(clusterUpdateErrTuple{update: update.Resource})
    72  	onDone()
    73  }
    74  
    75  func (cw *clusterWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) {
    76  	// When used with a go-control-plane management server that continuously
    77  	// resends resources which are NACKed by the xDS client, using a `Replace()`
    78  	// here and in OnResourceDoesNotExist() simplifies tests which will have
    79  	// access to the most recently received error.
    80  	cw.updateCh.Replace(clusterUpdateErrTuple{err: err})
    81  	onDone()
    82  }
    83  
    84  func (cw *clusterWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) {
    85  	cw.updateCh.Replace(clusterUpdateErrTuple{err: xdsresource.NewError(xdsresource.ErrorTypeResourceNotFound, "Cluster not found in received response")})
    86  	onDone()
    87  }
    88  
    89  // badClusterResource returns a cluster resource for the given name which
    90  // contains a config_source_specifier for the `lrs_server` field which is not
    91  // set to `self`, and hence is expected to be NACKed by the client.
    92  func badClusterResource(clusterName, edsServiceName string, secLevel e2e.SecurityLevel) *v3clusterpb.Cluster {
    93  	cluster := e2e.DefaultCluster(clusterName, edsServiceName, secLevel)
    94  	cluster.LrsServer = &v3corepb.ConfigSource{ConfigSourceSpecifier: &v3corepb.ConfigSource_Ads{}}
    95  	return cluster
    96  }
    97  
    98  // xdsClient is expected to produce an error containing this string when an
    99  // update is received containing a cluster created using `badClusterResource`.
   100  const wantClusterNACKErr = "unsupported config_source_specifier"
   101  
   102  // verifyClusterUpdate waits for an update to be received on the provided update
   103  // channel and verifies that it matches the expected update.
   104  //
   105  // Returns an error if no update is received before the context deadline expires
   106  // or the received update does not match the expected one.
   107  func verifyClusterUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate clusterUpdateErrTuple) error {
   108  	u, err := updateCh.Receive(ctx)
   109  	if err != nil {
   110  		return fmt.Errorf("timeout when waiting for a cluster resource from the management server: %v", err)
   111  	}
   112  	got := u.(clusterUpdateErrTuple)
   113  	if wantUpdate.err != nil {
   114  		if gotType, wantType := xdsresource.ErrType(got.err), xdsresource.ErrType(wantUpdate.err); gotType != wantType {
   115  			return fmt.Errorf("received update with error type %v, want %v", gotType, wantType)
   116  		}
   117  	}
   118  	cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.ClusterUpdate{}, "Raw", "LBPolicy", "TelemetryLabels")}
   119  	if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
   120  		return fmt.Errorf("received unexpected diff in the cluster resource update: (-want, got):\n%s", diff)
   121  	}
   122  	return nil
   123  }
   124  
   125  // verifyNoClusterUpdate verifies that no cluster update is received on the
   126  // provided update channel, and returns an error if an update is received.
   127  //
   128  // A very short deadline is used while waiting for the update, as this function
   129  // is intended to be used when an update is not expected.
   130  func verifyNoClusterUpdate(ctx context.Context, updateCh *testutils.Channel) error {
   131  	sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
   132  	defer sCancel()
   133  	if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded {
   134  		return fmt.Errorf("received unexpected ClusterUpdate when expecting none: %s", pretty.ToJSON(u))
   135  	}
   136  	return nil
   137  }
   138  
   139  // TestCDSWatch covers the case where a single watcher exists for a single
   140  // cluster resource. The test verifies the following scenarios:
   141  //  1. An update from the management server containing the resource being
   142  //     watched should result in the invocation of the watch callback.
   143  //  2. An update from the management server containing a resource *not* being
   144  //     watched should not result in the invocation of the watch callback.
   145  //  3. After the watch is cancelled, an update from the management server
   146  //     containing the resource that was being watched should not result in the
   147  //     invocation of the watch callback.
   148  //
   149  // The test is run for old and new style names.
   150  func (s) TestCDSWatch(t *testing.T) {
   151  	tests := []struct {
   152  		desc                   string
   153  		resourceName           string
   154  		watchedResource        *v3clusterpb.Cluster // The resource being watched.
   155  		updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update.
   156  		notWatchedResource     *v3clusterpb.Cluster // A resource which is not being watched.
   157  		wantUpdate             clusterUpdateErrTuple
   158  	}{
   159  		{
   160  			desc:                   "old style resource",
   161  			resourceName:           cdsName,
   162  			watchedResource:        e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone),
   163  			updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone),
   164  			notWatchedResource:     e2e.DefaultCluster("unsubscribed-cds-resource", edsName, e2e.SecurityLevelNone),
   165  			wantUpdate: clusterUpdateErrTuple{
   166  				update: xdsresource.ClusterUpdate{
   167  					ClusterName:    cdsName,
   168  					EDSServiceName: edsName,
   169  				},
   170  			},
   171  		},
   172  		{
   173  			desc:                   "new style resource",
   174  			resourceName:           cdsNameNewStyle,
   175  			watchedResource:        e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone),
   176  			updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone),
   177  			notWatchedResource:     e2e.DefaultCluster("unsubscribed-cds-resource", edsNameNewStyle, e2e.SecurityLevelNone),
   178  			wantUpdate: clusterUpdateErrTuple{
   179  				update: xdsresource.ClusterUpdate{
   180  					ClusterName:    cdsNameNewStyle,
   181  					EDSServiceName: edsNameNewStyle,
   182  				},
   183  			},
   184  		},
   185  	}
   186  
   187  	for _, test := range tests {
   188  		t.Run(test.desc, func(t *testing.T) {
   189  			mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   190  
   191  			nodeID := uuid.New().String()
   192  			bc, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{
   193  				Servers: []byte(fmt.Sprintf(`[{
   194  					"server_uri": %q,
   195  					"channel_creds": [{"type": "insecure"}]
   196  				}]`, mgmtServer.Address)),
   197  				Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)),
   198  				Authorities: map[string]json.RawMessage{
   199  					// Xdstp resource names used in this test do not specify an
   200  					// authority. These will end up looking up an entry with the
   201  					// empty key in the authorities map. Having an entry with an
   202  					// empty key and empty configuration, results in these
   203  					// resources also using the top-level configuration.
   204  					"": []byte(`{}`),
   205  				},
   206  			})
   207  			if err != nil {
   208  				t.Fatalf("Failed to create bootstrap configuration: %v", err)
   209  			}
   210  
   211  			// Create an xDS client with the above bootstrap contents.
   212  			config, err := bootstrap.NewConfigFromContents(bc)
   213  			if err != nil {
   214  				t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   215  			}
   216  			pool := xdsclient.NewPool(config)
   217  			client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   218  				Name: t.Name(),
   219  			})
   220  			if err != nil {
   221  				t.Fatalf("Failed to create xDS client: %v", err)
   222  			}
   223  			defer close()
   224  
   225  			// Register a watch for a cluster resource and have the watch
   226  			// callback push the received update on to a channel.
   227  			cw := newClusterWatcher()
   228  			cdsCancel := xdsresource.WatchCluster(client, test.resourceName, cw)
   229  
   230  			// Configure the management server to return a single cluster
   231  			// resource, corresponding to the one we registered a watch for.
   232  			resources := e2e.UpdateOptions{
   233  				NodeID:         nodeID,
   234  				Clusters:       []*v3clusterpb.Cluster{test.watchedResource},
   235  				SkipValidation: true,
   236  			}
   237  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   238  			defer cancel()
   239  			if err := mgmtServer.Update(ctx, resources); err != nil {
   240  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   241  			}
   242  
   243  			// Verify the contents of the received update.
   244  			if err := verifyClusterUpdate(ctx, cw.updateCh, test.wantUpdate); err != nil {
   245  				t.Fatal(err)
   246  			}
   247  
   248  			// Configure the management server to return an additional cluster
   249  			// resource, one that we are not interested in.
   250  			resources = e2e.UpdateOptions{
   251  				NodeID:         nodeID,
   252  				Clusters:       []*v3clusterpb.Cluster{test.watchedResource, test.notWatchedResource},
   253  				SkipValidation: true,
   254  			}
   255  			if err := mgmtServer.Update(ctx, resources); err != nil {
   256  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   257  			}
   258  			if err := verifyNoClusterUpdate(ctx, cw.updateCh); err != nil {
   259  				t.Fatal(err)
   260  			}
   261  
   262  			// Cancel the watch and update the resource corresponding to the original
   263  			// watch.  Ensure that the cancelled watch callback is not invoked.
   264  			cdsCancel()
   265  			resources = e2e.UpdateOptions{
   266  				NodeID:         nodeID,
   267  				Clusters:       []*v3clusterpb.Cluster{test.updatedWatchedResource, test.notWatchedResource},
   268  				SkipValidation: true,
   269  			}
   270  			if err := mgmtServer.Update(ctx, resources); err != nil {
   271  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   272  			}
   273  			if err := verifyNoClusterUpdate(ctx, cw.updateCh); err != nil {
   274  				t.Fatal(err)
   275  			}
   276  		})
   277  	}
   278  }
   279  
   280  // TestCDSWatch_TwoWatchesForSameResourceName covers the case where two watchers
   281  // exist for a single cluster resource.  The test verifies the following
   282  // scenarios:
   283  //  1. An update from the management server containing the resource being
   284  //     watched should result in the invocation of both watch callbacks.
   285  //  2. After one of the watches is cancelled, a redundant update from the
   286  //     management server should not result in the invocation of either of the
   287  //     watch callbacks.
   288  //  3. A new update from the management server containing the resource being
   289  //     watched should result in the invocation of the un-cancelled watch
   290  //     callback.
   291  //
   292  // The test is run for old and new style names.
   293  func (s) TestCDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
   294  	tests := []struct {
   295  		desc                   string
   296  		resourceName           string
   297  		watchedResource        *v3clusterpb.Cluster // The resource being watched.
   298  		updatedWatchedResource *v3clusterpb.Cluster // The watched resource after an update.
   299  		wantUpdateV1           clusterUpdateErrTuple
   300  		wantUpdateV2           clusterUpdateErrTuple
   301  	}{
   302  		{
   303  			desc:                   "old style resource",
   304  			resourceName:           cdsName,
   305  			watchedResource:        e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone),
   306  			updatedWatchedResource: e2e.DefaultCluster(cdsName, "new-eds-resource", e2e.SecurityLevelNone),
   307  			wantUpdateV1: clusterUpdateErrTuple{
   308  				update: xdsresource.ClusterUpdate{
   309  					ClusterName:    cdsName,
   310  					EDSServiceName: edsName,
   311  				},
   312  			},
   313  			wantUpdateV2: clusterUpdateErrTuple{
   314  				update: xdsresource.ClusterUpdate{
   315  					ClusterName:    cdsName,
   316  					EDSServiceName: "new-eds-resource",
   317  				},
   318  			},
   319  		},
   320  		{
   321  			desc:                   "new style resource",
   322  			resourceName:           cdsNameNewStyle,
   323  			watchedResource:        e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone),
   324  			updatedWatchedResource: e2e.DefaultCluster(cdsNameNewStyle, "new-eds-resource", e2e.SecurityLevelNone),
   325  			wantUpdateV1: clusterUpdateErrTuple{
   326  				update: xdsresource.ClusterUpdate{
   327  					ClusterName:    cdsNameNewStyle,
   328  					EDSServiceName: edsNameNewStyle,
   329  				},
   330  			},
   331  			wantUpdateV2: clusterUpdateErrTuple{
   332  				update: xdsresource.ClusterUpdate{
   333  					ClusterName:    cdsNameNewStyle,
   334  					EDSServiceName: "new-eds-resource",
   335  				},
   336  			},
   337  		},
   338  	}
   339  
   340  	for _, test := range tests {
   341  		t.Run(test.desc, func(t *testing.T) {
   342  			mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   343  
   344  			nodeID := uuid.New().String()
   345  			bc, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{
   346  				Servers: []byte(fmt.Sprintf(`[{
   347  					"server_uri": %q,
   348  					"channel_creds": [{"type": "insecure"}]
   349  				}]`, mgmtServer.Address)),
   350  				Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)),
   351  				Authorities: map[string]json.RawMessage{
   352  					// Xdstp resource names used in this test do not specify an
   353  					// authority. These will end up looking up an entry with the
   354  					// empty key in the authorities map. Having an entry with an
   355  					// empty key and empty configuration, results in these
   356  					// resources also using the top-level configuration.
   357  					"": []byte(`{}`),
   358  				},
   359  			})
   360  			if err != nil {
   361  				t.Fatalf("Failed to create bootstrap configuration: %v", err)
   362  			}
   363  
   364  			// Create an xDS client with the above bootstrap contents.
   365  			config, err := bootstrap.NewConfigFromContents(bc)
   366  			if err != nil {
   367  				t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   368  			}
   369  			pool := xdsclient.NewPool(config)
   370  			client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   371  				Name: t.Name(),
   372  			})
   373  			if err != nil {
   374  				t.Fatalf("Failed to create xDS client: %v", err)
   375  			}
   376  			defer close()
   377  
   378  			// Register two watches for the same cluster resource and have the
   379  			// callbacks push the received updates on to a channel.
   380  			cw1 := newClusterWatcher()
   381  			cdsCancel1 := xdsresource.WatchCluster(client, test.resourceName, cw1)
   382  			defer cdsCancel1()
   383  			cw2 := newClusterWatcher()
   384  			cdsCancel2 := xdsresource.WatchCluster(client, test.resourceName, cw2)
   385  
   386  			// Configure the management server to return a single cluster
   387  			// resource, corresponding to the one we registered watches for.
   388  			resources := e2e.UpdateOptions{
   389  				NodeID:         nodeID,
   390  				Clusters:       []*v3clusterpb.Cluster{test.watchedResource},
   391  				SkipValidation: true,
   392  			}
   393  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   394  			defer cancel()
   395  			if err := mgmtServer.Update(ctx, resources); err != nil {
   396  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   397  			}
   398  
   399  			// Verify the contents of the received update.
   400  			if err := verifyClusterUpdate(ctx, cw1.updateCh, test.wantUpdateV1); err != nil {
   401  				t.Fatal(err)
   402  			}
   403  			if err := verifyClusterUpdate(ctx, cw2.updateCh, test.wantUpdateV1); err != nil {
   404  				t.Fatal(err)
   405  			}
   406  
   407  			// Cancel the second watch and force the management server to push a
   408  			// redundant update for the resource being watched. Neither of the
   409  			// two watch callbacks should be invoked.
   410  			cdsCancel2()
   411  			if err := mgmtServer.Update(ctx, resources); err != nil {
   412  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   413  			}
   414  			if err := verifyNoClusterUpdate(ctx, cw1.updateCh); err != nil {
   415  				t.Fatal(err)
   416  			}
   417  			if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
   418  				t.Fatal(err)
   419  			}
   420  
   421  			// Update to the resource being watched. The un-cancelled callback
   422  			// should be invoked while the cancelled one should not be.
   423  			resources = e2e.UpdateOptions{
   424  				NodeID:         nodeID,
   425  				Clusters:       []*v3clusterpb.Cluster{test.updatedWatchedResource},
   426  				SkipValidation: true,
   427  			}
   428  			if err := mgmtServer.Update(ctx, resources); err != nil {
   429  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   430  			}
   431  			if err := verifyClusterUpdate(ctx, cw1.updateCh, test.wantUpdateV2); err != nil {
   432  				t.Fatal(err)
   433  			}
   434  			if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
   435  				t.Fatal(err)
   436  			}
   437  		})
   438  	}
   439  }
   440  
   441  // TestCDSWatch_ThreeWatchesForDifferentResourceNames covers the case where
   442  // three watchers (two watchers for one resource, and the third watcher for
   443  // another resource) exist across two cluster resources (one with an old style
   444  // name and one with a new style name).  The test verifies that an update from
   445  // the management server containing both resources results in the invocation of
   446  // all watch callbacks.
   447  func (s) TestCDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
   448  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   449  
   450  	nodeID := uuid.New().String()
   451  	authority := makeAuthorityName(t.Name())
   452  	bc, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{
   453  		Servers: []byte(fmt.Sprintf(`[{
   454  			"server_uri": %q,
   455  			"channel_creds": [{"type": "insecure"}]
   456  		}]`, mgmtServer.Address)),
   457  		Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)),
   458  		Authorities: map[string]json.RawMessage{
   459  			// Xdstp style resource names used in this test use a slash removed
   460  			// version of t.Name as their authority, and the empty config
   461  			// results in the top-level xds server configuration being used for
   462  			// this authority.
   463  			authority: []byte(`{}`),
   464  		},
   465  	})
   466  	if err != nil {
   467  		t.Fatalf("Failed to create bootstrap configuration: %v", err)
   468  	}
   469  
   470  	// Create an xDS client with the above bootstrap contents.
   471  	config, err := bootstrap.NewConfigFromContents(bc)
   472  	if err != nil {
   473  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   474  	}
   475  	pool := xdsclient.NewPool(config)
   476  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   477  		Name: t.Name(),
   478  	})
   479  	if err != nil {
   480  		t.Fatalf("Failed to create xDS client: %v", err)
   481  	}
   482  	defer close()
   483  
   484  	// Register two watches for the same cluster resource and have the
   485  	// callbacks push the received updates on to a channel.
   486  	cw1 := newClusterWatcher()
   487  	cdsCancel1 := xdsresource.WatchCluster(client, cdsName, cw1)
   488  	defer cdsCancel1()
   489  	cw2 := newClusterWatcher()
   490  	cdsCancel2 := xdsresource.WatchCluster(client, cdsName, cw2)
   491  	defer cdsCancel2()
   492  
   493  	// Register the third watch for a different cluster resource, and push the
   494  	// received updates onto a channel.
   495  	cdsNameNewStyle := makeNewStyleCDSName(authority)
   496  	cw3 := newClusterWatcher()
   497  	cdsCancel3 := xdsresource.WatchCluster(client, cdsNameNewStyle, cw3)
   498  	defer cdsCancel3()
   499  
   500  	// Configure the management server to return two cluster resources,
   501  	// corresponding to the registered watches.
   502  	resources := e2e.UpdateOptions{
   503  		NodeID: nodeID,
   504  		Clusters: []*v3clusterpb.Cluster{
   505  			e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone),
   506  			e2e.DefaultCluster(cdsNameNewStyle, edsNameNewStyle, e2e.SecurityLevelNone),
   507  		},
   508  		SkipValidation: true,
   509  	}
   510  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   511  	defer cancel()
   512  	if err := mgmtServer.Update(ctx, resources); err != nil {
   513  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   514  	}
   515  
   516  	// Verify the contents of the received update for the all watchers.
   517  	wantUpdate12 := clusterUpdateErrTuple{
   518  		update: xdsresource.ClusterUpdate{
   519  			ClusterName:    cdsName,
   520  			EDSServiceName: edsName,
   521  		},
   522  	}
   523  	wantUpdate3 := clusterUpdateErrTuple{
   524  		update: xdsresource.ClusterUpdate{
   525  			ClusterName:    cdsNameNewStyle,
   526  			EDSServiceName: edsNameNewStyle,
   527  		},
   528  	}
   529  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate12); err != nil {
   530  		t.Fatal(err)
   531  	}
   532  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate12); err != nil {
   533  		t.Fatal(err)
   534  	}
   535  	if err := verifyClusterUpdate(ctx, cw3.updateCh, wantUpdate3); err != nil {
   536  		t.Fatal(err)
   537  	}
   538  }
   539  
   540  // TestCDSWatch_ResourceCaching covers the case where a watch is registered for
   541  // a resource which is already present in the cache.  The test verifies that the
   542  // watch callback is invoked with the contents from the cache, instead of a
   543  // request being sent to the management server.
   544  func (s) TestCDSWatch_ResourceCaching(t *testing.T) {
   545  	firstRequestReceived := false
   546  	firstAckReceived := grpcsync.NewEvent()
   547  	secondRequestReceived := grpcsync.NewEvent()
   548  
   549  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{
   550  		OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error {
   551  			// The first request has an empty version string.
   552  			if !firstRequestReceived && req.GetVersionInfo() == "" {
   553  				firstRequestReceived = true
   554  				return nil
   555  			}
   556  			// The first ack has a non-empty version string.
   557  			if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" {
   558  				firstAckReceived.Fire()
   559  				return nil
   560  			}
   561  			// Any requests after the first request and ack, are not expected.
   562  			secondRequestReceived.Fire()
   563  			return nil
   564  		},
   565  	})
   566  
   567  	nodeID := uuid.New().String()
   568  	bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address)
   569  
   570  	// Create an xDS client with the above bootstrap contents.
   571  	config, err := bootstrap.NewConfigFromContents(bc)
   572  	if err != nil {
   573  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   574  	}
   575  	pool := xdsclient.NewPool(config)
   576  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   577  		Name: t.Name(),
   578  	})
   579  	if err != nil {
   580  		t.Fatalf("Failed to create xDS client: %v", err)
   581  	}
   582  	defer close()
   583  
   584  	// Register a watch for a cluster resource and have the watch
   585  	// callback push the received update on to a channel.
   586  	cw1 := newClusterWatcher()
   587  	cdsCancel1 := xdsresource.WatchCluster(client, cdsName, cw1)
   588  	defer cdsCancel1()
   589  
   590  	// Configure the management server to return a single cluster
   591  	// resource, corresponding to the one we registered a watch for.
   592  	resources := e2e.UpdateOptions{
   593  		NodeID:         nodeID,
   594  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)},
   595  		SkipValidation: true,
   596  	}
   597  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   598  	defer cancel()
   599  	if err := mgmtServer.Update(ctx, resources); err != nil {
   600  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   601  	}
   602  
   603  	// Verify the contents of the received update.
   604  	wantUpdate := clusterUpdateErrTuple{
   605  		update: xdsresource.ClusterUpdate{
   606  			ClusterName:    cdsName,
   607  			EDSServiceName: edsName,
   608  		},
   609  	}
   610  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate); err != nil {
   611  		t.Fatal(err)
   612  	}
   613  	select {
   614  	case <-ctx.Done():
   615  		t.Fatal("timeout when waiting for receipt of ACK at the management server")
   616  	case <-firstAckReceived.Done():
   617  	}
   618  
   619  	// Register another watch for the same resource. This should get the update
   620  	// from the cache.
   621  	cw2 := newClusterWatcher()
   622  	cdsCancel2 := xdsresource.WatchCluster(client, cdsName, cw2)
   623  	defer cdsCancel2()
   624  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate); err != nil {
   625  		t.Fatal(err)
   626  	}
   627  	// No request should get sent out as part of this watch.
   628  	sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
   629  	defer sCancel()
   630  	select {
   631  	case <-sCtx.Done():
   632  	case <-secondRequestReceived.Done():
   633  		t.Fatal("xdsClient sent out request instead of using update from cache")
   634  	}
   635  }
   636  
   637  // TestCDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client
   638  // does not receive an CDS response for the request that it sends. The test
   639  // verifies that the watch callback is invoked with an error once the
   640  // watchExpiryTimer fires.
   641  func (s) TestCDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
   642  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   643  
   644  	nodeID := uuid.New().String()
   645  	bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address)
   646  
   647  	config, err := bootstrap.NewConfigFromContents(bc)
   648  	if err != nil {
   649  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   650  	}
   651  	pool := xdsclient.NewPool(config)
   652  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   653  		Name:               t.Name(),
   654  		WatchExpiryTimeout: defaultTestWatchExpiryTimeout,
   655  	})
   656  	if err != nil {
   657  		t.Fatalf("Failed to create an xDS client: %v", err)
   658  	}
   659  	defer close()
   660  
   661  	// Register a watch for a resource which is expected to be invoked with an
   662  	// error after the watch expiry timer fires.
   663  	cw := newClusterWatcher()
   664  	cdsCancel := xdsresource.WatchCluster(client, cdsName, cw)
   665  	defer cdsCancel()
   666  
   667  	// Wait for the watch expiry timer to fire.
   668  	<-time.After(defaultTestWatchExpiryTimeout)
   669  
   670  	// Verify that an empty update with the expected error is received.
   671  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   672  	defer cancel()
   673  	wantErr := xdsresource.NewError(xdsresource.ErrorTypeResourceNotFound, "")
   674  	if err := verifyClusterUpdate(ctx, cw.updateCh, clusterUpdateErrTuple{err: wantErr}); err != nil {
   675  		t.Fatal(err)
   676  	}
   677  }
   678  
   679  // TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the
   680  // client receives a valid LDS response for the request that it sends. The test
   681  // verifies that the behavior associated with the expiry timer (i.e, callback
   682  // invocation with error) does not take place.
   683  func (s) TestCDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
   684  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   685  
   686  	// Create an xDS client talking to the above management server.
   687  	nodeID := uuid.New().String()
   688  	bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address)
   689  
   690  	config, err := bootstrap.NewConfigFromContents(bc)
   691  	if err != nil {
   692  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   693  	}
   694  	pool := xdsclient.NewPool(config)
   695  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   696  		Name:               t.Name(),
   697  		WatchExpiryTimeout: defaultTestWatchExpiryTimeout,
   698  	})
   699  	if err != nil {
   700  		t.Fatalf("Failed to create an xDS client: %v", err)
   701  	}
   702  	defer close()
   703  
   704  	// Register a watch for a cluster resource and have the watch
   705  	// callback push the received update on to a channel.
   706  	cw := newClusterWatcher()
   707  	cdsCancel := xdsresource.WatchCluster(client, cdsName, cw)
   708  	defer cdsCancel()
   709  
   710  	// Configure the management server to return a single cluster resource,
   711  	// corresponding to the one we registered a watch for.
   712  	resources := e2e.UpdateOptions{
   713  		NodeID:         nodeID,
   714  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(cdsName, edsName, e2e.SecurityLevelNone)},
   715  		SkipValidation: true,
   716  	}
   717  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   718  	defer cancel()
   719  	if err := mgmtServer.Update(ctx, resources); err != nil {
   720  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   721  	}
   722  
   723  	// Verify the contents of the received update.
   724  	wantUpdate := clusterUpdateErrTuple{
   725  		update: xdsresource.ClusterUpdate{
   726  			ClusterName:    cdsName,
   727  			EDSServiceName: edsName,
   728  		},
   729  	}
   730  	if err := verifyClusterUpdate(ctx, cw.updateCh, wantUpdate); err != nil {
   731  		t.Fatal(err)
   732  	}
   733  
   734  	// Wait for the watch expiry timer to fire, and verify that the callback is
   735  	// not invoked.
   736  	<-time.After(defaultTestWatchExpiryTimeout)
   737  	if err := verifyNoClusterUpdate(ctx, cw.updateCh); err != nil {
   738  		t.Fatal(err)
   739  	}
   740  }
   741  
   742  // TestCDSWatch_ResourceRemoved covers the cases where two watchers exists for
   743  // two different resources (one with an old style name and one with a new style
   744  // name). One of these resources being watched is removed from the management
   745  // server. The test verifies the following scenarios:
   746  //  1. Removing a resource should trigger the watch callback associated with that
   747  //     resource with a resource removed error. It should not trigger the watch
   748  //     callback for an unrelated resource.
   749  //  2. An update to other resource should result in the invocation of the watch
   750  //     callback associated with that resource.  It should not result in the
   751  //     invocation of the watch callback associated with the deleted resource.
   752  func (s) TestCDSWatch_ResourceRemoved(t *testing.T) {
   753  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   754  
   755  	nodeID := uuid.New().String()
   756  	authority := makeAuthorityName(t.Name())
   757  	bc, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{
   758  		Servers: []byte(fmt.Sprintf(`[{
   759  			"server_uri": %q,
   760  			"channel_creds": [{"type": "insecure"}]
   761  		}]`, mgmtServer.Address)),
   762  		Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)),
   763  		Authorities: map[string]json.RawMessage{
   764  			// Xdstp style resource names used in this test use a slash removed
   765  			// version of t.Name as their authority, and the empty config
   766  			// results in the top-level xds server configuration being used for
   767  			// this authority.
   768  			authority: []byte(`{}`),
   769  		},
   770  	})
   771  	if err != nil {
   772  		t.Fatalf("Failed to create bootstrap configuration: %v", err)
   773  	}
   774  
   775  	// Create an xDS client with the above bootstrap contents.
   776  	config, err := bootstrap.NewConfigFromContents(bc)
   777  	if err != nil {
   778  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   779  	}
   780  	pool := xdsclient.NewPool(config)
   781  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   782  		Name: t.Name(),
   783  	})
   784  	if err != nil {
   785  		t.Fatalf("Failed to create xDS client: %v", err)
   786  	}
   787  	defer close()
   788  
   789  	// Register two watches for two cluster resources and have the
   790  	// callbacks push the received updates on to a channel.
   791  	resourceName1 := cdsName
   792  	cw1 := newClusterWatcher()
   793  	cdsCancel1 := xdsresource.WatchCluster(client, resourceName1, cw1)
   794  	defer cdsCancel1()
   795  
   796  	resourceName2 := makeNewStyleCDSName(authority)
   797  	cw2 := newClusterWatcher()
   798  	cdsCancel2 := xdsresource.WatchCluster(client, resourceName2, cw2)
   799  	defer cdsCancel2()
   800  
   801  	// Configure the management server to return two cluster resources,
   802  	// corresponding to the registered watches.
   803  	edsNameNewStyle := makeNewStyleEDSName(authority)
   804  	resources := e2e.UpdateOptions{
   805  		NodeID: nodeID,
   806  		Clusters: []*v3clusterpb.Cluster{
   807  			e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone),
   808  			e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone),
   809  		},
   810  		SkipValidation: true,
   811  	}
   812  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   813  	defer cancel()
   814  	if err := mgmtServer.Update(ctx, resources); err != nil {
   815  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   816  	}
   817  
   818  	// Verify the contents of the received update for both watchers.
   819  	wantUpdate1 := clusterUpdateErrTuple{
   820  		update: xdsresource.ClusterUpdate{
   821  			ClusterName:    resourceName1,
   822  			EDSServiceName: edsName,
   823  		},
   824  	}
   825  	wantUpdate2 := clusterUpdateErrTuple{
   826  		update: xdsresource.ClusterUpdate{
   827  			ClusterName:    resourceName2,
   828  			EDSServiceName: edsNameNewStyle,
   829  		},
   830  	}
   831  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate1); err != nil {
   832  		t.Fatal(err)
   833  	}
   834  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate2); err != nil {
   835  		t.Fatal(err)
   836  	}
   837  
   838  	// Remove the first cluster resource on the management server.
   839  	resources = e2e.UpdateOptions{
   840  		NodeID:         nodeID,
   841  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone)},
   842  		SkipValidation: true,
   843  	}
   844  	if err := mgmtServer.Update(ctx, resources); err != nil {
   845  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   846  	}
   847  
   848  	// The first watcher should receive a resource removed error, while the
   849  	// second watcher should not receive an update.
   850  	if err := verifyClusterUpdate(ctx, cw1.updateCh, clusterUpdateErrTuple{err: xdsresource.NewError(xdsresource.ErrorTypeResourceNotFound, "")}); err != nil {
   851  		t.Fatal(err)
   852  	}
   853  	if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
   854  		t.Fatal(err)
   855  	}
   856  
   857  	// Update the second cluster resource on the management server. The first
   858  	// watcher should not receive an update, while the second watcher should.
   859  	resources = e2e.UpdateOptions{
   860  		NodeID:         nodeID,
   861  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName2, "new-eds-resource", e2e.SecurityLevelNone)},
   862  		SkipValidation: true,
   863  	}
   864  	if err := mgmtServer.Update(ctx, resources); err != nil {
   865  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   866  	}
   867  	if err := verifyNoClusterUpdate(ctx, cw1.updateCh); err != nil {
   868  		t.Fatal(err)
   869  	}
   870  	wantUpdate := clusterUpdateErrTuple{
   871  		update: xdsresource.ClusterUpdate{
   872  			ClusterName:    resourceName2,
   873  			EDSServiceName: "new-eds-resource",
   874  		},
   875  	}
   876  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate); err != nil {
   877  		t.Fatal(err)
   878  	}
   879  }
   880  
   881  // TestCDSWatch_NACKError covers the case where an update from the management
   882  // server is NACK'ed by the xdsclient. The test verifies that the error is
   883  // propagated to the watcher.
   884  func (s) TestCDSWatch_NACKError(t *testing.T) {
   885  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   886  
   887  	nodeID := uuid.New().String()
   888  	bc := e2e.DefaultBootstrapContents(t, nodeID, mgmtServer.Address)
   889  
   890  	// Create an xDS client with the above bootstrap contents.
   891  	config, err := bootstrap.NewConfigFromContents(bc)
   892  	if err != nil {
   893  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   894  	}
   895  	pool := xdsclient.NewPool(config)
   896  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   897  		Name: t.Name(),
   898  	})
   899  	if err != nil {
   900  		t.Fatalf("Failed to create xDS client: %v", err)
   901  	}
   902  	defer close()
   903  
   904  	// Register a watch for a cluster resource and have the watch
   905  	// callback push the received update on to a channel.
   906  	cw := newClusterWatcher()
   907  	cdsCancel := xdsresource.WatchCluster(client, cdsName, cw)
   908  	defer cdsCancel()
   909  
   910  	// Configure the management server to return a single cluster resource
   911  	// which is expected to be NACK'ed by the client.
   912  	resources := e2e.UpdateOptions{
   913  		NodeID:         nodeID,
   914  		Clusters:       []*v3clusterpb.Cluster{badClusterResource(cdsName, edsName, e2e.SecurityLevelNone)},
   915  		SkipValidation: true,
   916  	}
   917  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   918  	defer cancel()
   919  	if err := mgmtServer.Update(ctx, resources); err != nil {
   920  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   921  	}
   922  
   923  	// Verify that the expected error is propagated to the watcher.
   924  	u, err := cw.updateCh.Receive(ctx)
   925  	if err != nil {
   926  		t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err)
   927  	}
   928  	gotErr := u.(clusterUpdateErrTuple).err
   929  	if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) {
   930  		t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr)
   931  	}
   932  }
   933  
   934  // TestCDSWatch_PartialValid covers the case where a response from the
   935  // management server contains both valid and invalid resources and is expected
   936  // to be NACK'ed by the xdsclient. The test verifies that watchers corresponding
   937  // to the valid resource receive the update, while watchers corresponding to the
   938  // invalid resource receive an error.
   939  func (s) TestCDSWatch_PartialValid(t *testing.T) {
   940  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
   941  
   942  	nodeID := uuid.New().String()
   943  	authority := makeAuthorityName(t.Name())
   944  	bc, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{
   945  		Servers: []byte(fmt.Sprintf(`[{
   946  			"server_uri": %q,
   947  			"channel_creds": [{"type": "insecure"}]
   948  		}]`, mgmtServer.Address)),
   949  		Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)),
   950  		Authorities: map[string]json.RawMessage{
   951  			// Xdstp style resource names used in this test use a slash removed
   952  			// version of t.Name as their authority, and the empty config
   953  			// results in the top-level xds server configuration being used for
   954  			// this authority.
   955  			authority: []byte(`{}`),
   956  		},
   957  	})
   958  	if err != nil {
   959  		t.Fatalf("Failed to create bootstrap configuration: %v", err)
   960  	}
   961  
   962  	// Create an xDS client with the above bootstrap contents.
   963  	config, err := bootstrap.NewConfigFromContents(bc)
   964  	if err != nil {
   965  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
   966  	}
   967  	pool := xdsclient.NewPool(config)
   968  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
   969  		Name: t.Name(),
   970  	})
   971  	if err != nil {
   972  		t.Fatalf("Failed to create xDS client: %v", err)
   973  	}
   974  	defer close()
   975  
   976  	// Register two watches for cluster resources. The first watch is expected
   977  	// to receive an error because the received resource is NACK'ed. The second
   978  	// watch is expected to get a good update.
   979  	badResourceName := cdsName
   980  	cw1 := newClusterWatcher()
   981  	cdsCancel1 := xdsresource.WatchCluster(client, badResourceName, cw1)
   982  	defer cdsCancel1()
   983  	goodResourceName := makeNewStyleCDSName(authority)
   984  	cw2 := newClusterWatcher()
   985  	cdsCancel2 := xdsresource.WatchCluster(client, goodResourceName, cw2)
   986  	defer cdsCancel2()
   987  
   988  	// Configure the management server with two cluster resources. One of these
   989  	// is a bad resource causing the update to be NACKed.
   990  	resources := e2e.UpdateOptions{
   991  		NodeID: nodeID,
   992  		Clusters: []*v3clusterpb.Cluster{
   993  			badClusterResource(badResourceName, edsName, e2e.SecurityLevelNone),
   994  			e2e.DefaultCluster(goodResourceName, edsName, e2e.SecurityLevelNone)},
   995  		SkipValidation: true,
   996  	}
   997  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   998  	defer cancel()
   999  	if err := mgmtServer.Update(ctx, resources); err != nil {
  1000  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
  1001  	}
  1002  
  1003  	// Verify that the expected error is propagated to the watcher which is
  1004  	// watching the bad resource.
  1005  	u, err := cw1.updateCh.Receive(ctx)
  1006  	if err != nil {
  1007  		t.Fatalf("timeout when waiting for a cluster resource from the management server: %v", err)
  1008  	}
  1009  	gotErr := u.(clusterUpdateErrTuple).err
  1010  	if gotErr == nil || !strings.Contains(gotErr.Error(), wantClusterNACKErr) {
  1011  		t.Fatalf("update received with error: %v, want %q", gotErr, wantClusterNACKErr)
  1012  	}
  1013  
  1014  	// Verify that the watcher watching the good resource receives a good
  1015  	// update.
  1016  	wantUpdate := clusterUpdateErrTuple{
  1017  		update: xdsresource.ClusterUpdate{
  1018  			ClusterName:    goodResourceName,
  1019  			EDSServiceName: edsName,
  1020  		},
  1021  	}
  1022  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate); err != nil {
  1023  		t.Fatal(err)
  1024  	}
  1025  }
  1026  
  1027  // TestCDSWatch_PartialResponse covers the case where a response from the
  1028  // management server does not contain all requested resources. CDS responses are
  1029  // supposed to contain all requested resources, and the absence of one usually
  1030  // indicates that the management server does not know about it. In cases where
  1031  // the server has never responded with this resource before, the xDS client is
  1032  // expected to wait for the watch timeout to expire before concluding that the
  1033  // resource does not exist on the server
  1034  func (s) TestCDSWatch_PartialResponse(t *testing.T) {
  1035  	mgmtServer := e2e.StartManagementServer(t, e2e.ManagementServerOptions{})
  1036  
  1037  	nodeID := uuid.New().String()
  1038  	authority := makeAuthorityName(t.Name())
  1039  	bc, err := bootstrap.NewContentsForTesting(bootstrap.ConfigOptionsForTesting{
  1040  		Servers: []byte(fmt.Sprintf(`[{
  1041  			"server_uri": %q,
  1042  			"channel_creds": [{"type": "insecure"}]
  1043  		}]`, mgmtServer.Address)),
  1044  		Node: []byte(fmt.Sprintf(`{"id": "%s"}`, nodeID)),
  1045  		Authorities: map[string]json.RawMessage{
  1046  			// Xdstp style resource names used in this test use a slash removed
  1047  			// version of t.Name as their authority, and the empty config
  1048  			// results in the top-level xds server configuration being used for
  1049  			// this authority.
  1050  			authority: []byte(`{}`),
  1051  		},
  1052  	})
  1053  	if err != nil {
  1054  		t.Fatalf("Failed to create bootstrap configuration: %v", err)
  1055  	}
  1056  
  1057  	// Create an xDS client with the above bootstrap contents.
  1058  	config, err := bootstrap.NewConfigFromContents(bc)
  1059  	if err != nil {
  1060  		t.Fatalf("Failed to parse bootstrap contents: %s, %v", string(bc), err)
  1061  	}
  1062  	pool := xdsclient.NewPool(config)
  1063  	client, close, err := pool.NewClientForTesting(xdsclient.OptionsForTesting{
  1064  		Name: t.Name(),
  1065  	})
  1066  	if err != nil {
  1067  		t.Fatalf("Failed to create xDS client: %v", err)
  1068  	}
  1069  	defer close()
  1070  
  1071  	// Register two watches for two cluster resources and have the
  1072  	// callbacks push the received updates on to a channel.
  1073  	resourceName1 := cdsName
  1074  	cw1 := newClusterWatcher()
  1075  	cdsCancel1 := xdsresource.WatchCluster(client, resourceName1, cw1)
  1076  	defer cdsCancel1()
  1077  	resourceName2 := makeNewStyleCDSName(authority)
  1078  	cw2 := newClusterWatcher()
  1079  	cdsCancel2 := xdsresource.WatchCluster(client, resourceName2, cw2)
  1080  	defer cdsCancel2()
  1081  
  1082  	// Configure the management server to return only one of the two cluster
  1083  	// resources, corresponding to the registered watches.
  1084  	resources := e2e.UpdateOptions{
  1085  		NodeID:         nodeID,
  1086  		Clusters:       []*v3clusterpb.Cluster{e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone)},
  1087  		SkipValidation: true,
  1088  	}
  1089  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
  1090  	defer cancel()
  1091  	if err := mgmtServer.Update(ctx, resources); err != nil {
  1092  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
  1093  	}
  1094  
  1095  	// Verify the contents of the received update for first watcher.
  1096  	wantUpdate1 := clusterUpdateErrTuple{
  1097  		update: xdsresource.ClusterUpdate{
  1098  			ClusterName:    resourceName1,
  1099  			EDSServiceName: edsName,
  1100  		},
  1101  	}
  1102  	if err := verifyClusterUpdate(ctx, cw1.updateCh, wantUpdate1); err != nil {
  1103  		t.Fatal(err)
  1104  	}
  1105  
  1106  	// Verify that the second watcher does not get an update with an error.
  1107  	if err := verifyNoClusterUpdate(ctx, cw2.updateCh); err != nil {
  1108  		t.Fatal(err)
  1109  	}
  1110  
  1111  	// Configure the management server to return two cluster resources,
  1112  	// corresponding to the registered watches.
  1113  	resources = e2e.UpdateOptions{
  1114  		NodeID: nodeID,
  1115  		Clusters: []*v3clusterpb.Cluster{
  1116  			e2e.DefaultCluster(resourceName1, edsName, e2e.SecurityLevelNone),
  1117  			e2e.DefaultCluster(resourceName2, edsNameNewStyle, e2e.SecurityLevelNone),
  1118  		},
  1119  		SkipValidation: true,
  1120  	}
  1121  	if err := mgmtServer.Update(ctx, resources); err != nil {
  1122  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
  1123  	}
  1124  
  1125  	// Verify the contents of the received update for the second watcher.
  1126  	wantUpdate2 := clusterUpdateErrTuple{
  1127  		update: xdsresource.ClusterUpdate{
  1128  			ClusterName:    resourceName2,
  1129  			EDSServiceName: edsNameNewStyle,
  1130  		},
  1131  	}
  1132  	if err := verifyClusterUpdate(ctx, cw2.updateCh, wantUpdate2); err != nil {
  1133  		t.Fatal(err)
  1134  	}
  1135  
  1136  	// Verify that the first watcher gets no update, as the first resource did
  1137  	// not change.
  1138  	if err := verifyNoClusterUpdate(ctx, cw1.updateCh); err != nil {
  1139  		t.Fatal(err)
  1140  	}
  1141  }