google.golang.org/grpc@v1.62.1/xds/internal/xdsclient/tests/eds_watchers_test.go (about)

     1  /*
     2   *
     3   * Copyright 2022 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package xdsclient_test
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"strings"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/google/go-cmp/cmp"
    29  	"github.com/google/go-cmp/cmp/cmpopts"
    30  	"github.com/google/uuid"
    31  	"google.golang.org/grpc/internal/grpcsync"
    32  	"google.golang.org/grpc/internal/testutils"
    33  	"google.golang.org/grpc/internal/testutils/xds/e2e"
    34  	"google.golang.org/grpc/xds/internal"
    35  	xdstestutils "google.golang.org/grpc/xds/internal/testutils"
    36  	"google.golang.org/grpc/xds/internal/xdsclient"
    37  	"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
    38  	"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
    39  	"google.golang.org/protobuf/types/known/wrapperspb"
    40  
    41  	v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
    42  	v3endpointpb "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
    43  	v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
    44  )
    45  
    46  const (
    47  	edsHost1 = "1.foo.bar.com"
    48  	edsHost2 = "2.foo.bar.com"
    49  	edsHost3 = "3.foo.bar.com"
    50  	edsPort1 = 1
    51  	edsPort2 = 2
    52  	edsPort3 = 3
    53  )
    54  
    55  type noopEndpointsWatcher struct{}
    56  
    57  func (noopEndpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) {}
    58  func (noopEndpointsWatcher) OnError(err error)                                  {}
    59  func (noopEndpointsWatcher) OnResourceDoesNotExist()                            {}
    60  
    61  type endpointsUpdateErrTuple struct {
    62  	update xdsresource.EndpointsUpdate
    63  	err    error
    64  }
    65  
    66  type endpointsWatcher struct {
    67  	updateCh *testutils.Channel
    68  }
    69  
    70  func newEndpointsWatcher() *endpointsWatcher {
    71  	return &endpointsWatcher{updateCh: testutils.NewChannel()}
    72  }
    73  
    74  func (ew *endpointsWatcher) OnUpdate(update *xdsresource.EndpointsResourceData) {
    75  	ew.updateCh.Send(endpointsUpdateErrTuple{update: update.Resource})
    76  }
    77  
    78  func (ew *endpointsWatcher) OnError(err error) {
    79  	// When used with a go-control-plane management server that continuously
    80  	// resends resources which are NACKed by the xDS client, using a `Replace()`
    81  	// here and in OnResourceDoesNotExist() simplifies tests which will have
    82  	// access to the most recently received error.
    83  	ew.updateCh.Replace(endpointsUpdateErrTuple{err: err})
    84  }
    85  
    86  func (ew *endpointsWatcher) OnResourceDoesNotExist() {
    87  	ew.updateCh.Replace(endpointsUpdateErrTuple{err: xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "Endpoints not found in received response")})
    88  }
    89  
    90  // badEndpointsResource returns a endpoints resource for the given
    91  // edsServiceName which contains an endpoint with a load_balancing weight of
    92  // `0`. This is expected to be NACK'ed by the xDS client.
    93  func badEndpointsResource(edsServiceName string, host string, ports []uint32) *v3endpointpb.ClusterLoadAssignment {
    94  	e := e2e.DefaultEndpoint(edsServiceName, host, ports)
    95  	e.Endpoints[0].LbEndpoints[0].LoadBalancingWeight = &wrapperspb.UInt32Value{Value: 0}
    96  	return e
    97  }
    98  
    99  // xdsClient is expected to produce an error containing this string when an
   100  // update is received containing an endpoints resource created using
   101  // `badEndpointsResource`.
   102  const wantEndpointsNACKErr = "EDS response contains an endpoint with zero weight"
   103  
   104  // verifyEndpointsUpdate waits for an update to be received on the provided
   105  // update channel and verifies that it matches the expected update.
   106  //
   107  // Returns an error if no update is received before the context deadline expires
   108  // or the received update does not match the expected one.
   109  func verifyEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel, wantUpdate endpointsUpdateErrTuple) error {
   110  	u, err := updateCh.Receive(ctx)
   111  	if err != nil {
   112  		return fmt.Errorf("timeout when waiting for a endpoints resource from the management server: %v", err)
   113  	}
   114  	got := u.(endpointsUpdateErrTuple)
   115  	if wantUpdate.err != nil {
   116  		if gotType, wantType := xdsresource.ErrType(got.err), xdsresource.ErrType(wantUpdate.err); gotType != wantType {
   117  			return fmt.Errorf("received update with error type %v, want %v", gotType, wantType)
   118  		}
   119  	}
   120  	cmpOpts := []cmp.Option{cmpopts.EquateEmpty(), cmpopts.IgnoreFields(xdsresource.EndpointsUpdate{}, "Raw")}
   121  	if diff := cmp.Diff(wantUpdate.update, got.update, cmpOpts...); diff != "" {
   122  		return fmt.Errorf("received unepected diff in the endpoints resource update: (-want, got):\n%s", diff)
   123  	}
   124  	return nil
   125  }
   126  
   127  // verifyNoEndpointsUpdate verifies that no endpoints update is received on the
   128  // provided update channel, and returns an error if an update is received.
   129  //
   130  // A very short deadline is used while waiting for the update, as this function
   131  // is intended to be used when an update is not expected.
   132  func verifyNoEndpointsUpdate(ctx context.Context, updateCh *testutils.Channel) error {
   133  	sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
   134  	defer sCancel()
   135  	if u, err := updateCh.Receive(sCtx); err != context.DeadlineExceeded {
   136  		return fmt.Errorf("unexpected EndpointsUpdate: %v", u)
   137  	}
   138  	return nil
   139  }
   140  
   141  // TestEDSWatch covers the case where a single endpoint exists for a single
   142  // endpoints resource. The test verifies the following scenarios:
   143  //  1. An update from the management server containing the resource being
   144  //     watched should result in the invocation of the watch callback.
   145  //  2. An update from the management server containing a resource *not* being
   146  //     watched should not result in the invocation of the watch callback.
   147  //  3. After the watch is cancelled, an update from the management server
   148  //     containing the resource that was being watched should not result in the
   149  //     invocation of the watch callback.
   150  //
   151  // The test is run for old and new style names.
   152  func (s) TestEDSWatch(t *testing.T) {
   153  	tests := []struct {
   154  		desc                   string
   155  		resourceName           string
   156  		watchedResource        *v3endpointpb.ClusterLoadAssignment // The resource being watched.
   157  		updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update.
   158  		notWatchedResource     *v3endpointpb.ClusterLoadAssignment // A resource which is not being watched.
   159  		wantUpdate             endpointsUpdateErrTuple
   160  	}{
   161  		{
   162  			desc:                   "old style resource",
   163  			resourceName:           edsName,
   164  			watchedResource:        e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}),
   165  			updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}),
   166  			notWatchedResource:     e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}),
   167  			wantUpdate: endpointsUpdateErrTuple{
   168  				update: xdsresource.EndpointsUpdate{
   169  					Localities: []xdsresource.Locality{
   170  						{
   171  							Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   172  							ID: internal.LocalityID{
   173  								Region:  "region-1",
   174  								Zone:    "zone-1",
   175  								SubZone: "subzone-1",
   176  							},
   177  							Priority: 0,
   178  							Weight:   1,
   179  						},
   180  					},
   181  				},
   182  			},
   183  		},
   184  		{
   185  			desc:                   "new style resource",
   186  			resourceName:           edsNameNewStyle,
   187  			watchedResource:        e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}),
   188  			updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}),
   189  			notWatchedResource:     e2e.DefaultEndpoint("unsubscribed-eds-resource", edsHost3, []uint32{edsPort3}),
   190  			wantUpdate: endpointsUpdateErrTuple{
   191  				update: xdsresource.EndpointsUpdate{
   192  					Localities: []xdsresource.Locality{
   193  						{
   194  							Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   195  							ID: internal.LocalityID{
   196  								Region:  "region-1",
   197  								Zone:    "zone-1",
   198  								SubZone: "subzone-1",
   199  							},
   200  							Priority: 0,
   201  							Weight:   1,
   202  						},
   203  					},
   204  				},
   205  			},
   206  		},
   207  	}
   208  
   209  	for _, test := range tests {
   210  		t.Run(test.desc, func(t *testing.T) {
   211  			mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   212  			defer cleanup()
   213  
   214  			// Create an xDS client with the above bootstrap contents.
   215  			client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   216  			if err != nil {
   217  				t.Fatalf("Failed to create xDS client: %v", err)
   218  			}
   219  			defer close()
   220  
   221  			// Register a watch for a endpoint resource and have the watch
   222  			// callback push the received update on to a channel.
   223  			ew := newEndpointsWatcher()
   224  			edsCancel := xdsresource.WatchEndpoints(client, test.resourceName, ew)
   225  
   226  			// Configure the management server to return a single endpoint
   227  			// resource, corresponding to the one being watched.
   228  			resources := e2e.UpdateOptions{
   229  				NodeID:         nodeID,
   230  				Endpoints:      []*v3endpointpb.ClusterLoadAssignment{test.watchedResource},
   231  				SkipValidation: true,
   232  			}
   233  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   234  			defer cancel()
   235  			if err := mgmtServer.Update(ctx, resources); err != nil {
   236  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   237  			}
   238  
   239  			// Verify the contents of the received update.
   240  			if err := verifyEndpointsUpdate(ctx, ew.updateCh, test.wantUpdate); err != nil {
   241  				t.Fatal(err)
   242  			}
   243  
   244  			// Configure the management server to return an additional endpoint
   245  			// resource, one that we are not interested in.
   246  			resources = e2e.UpdateOptions{
   247  				NodeID:         nodeID,
   248  				Endpoints:      []*v3endpointpb.ClusterLoadAssignment{test.watchedResource, test.notWatchedResource},
   249  				SkipValidation: true,
   250  			}
   251  			if err := mgmtServer.Update(ctx, resources); err != nil {
   252  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   253  			}
   254  			if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil {
   255  				t.Fatal(err)
   256  			}
   257  
   258  			// Cancel the watch and update the resource corresponding to the original
   259  			// watch.  Ensure that the cancelled watch callback is not invoked.
   260  			edsCancel()
   261  			resources = e2e.UpdateOptions{
   262  				NodeID:         nodeID,
   263  				Endpoints:      []*v3endpointpb.ClusterLoadAssignment{test.updatedWatchedResource, test.notWatchedResource},
   264  				SkipValidation: true,
   265  			}
   266  			if err := mgmtServer.Update(ctx, resources); err != nil {
   267  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   268  			}
   269  			if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil {
   270  				t.Fatal(err)
   271  			}
   272  		})
   273  	}
   274  }
   275  
   276  // TestEDSWatch_TwoWatchesForSameResourceName covers the case where two watchers
   277  // exist for a single endpoint resource.  The test verifies the following
   278  // scenarios:
   279  //  1. An update from the management server containing the resource being
   280  //     watched should result in the invocation of both watch callbacks.
   281  //  2. After one of the watches is cancelled, a redundant update from the
   282  //     management server should not result in the invocation of either of the
   283  //     watch callbacks.
   284  //  3. An update from the management server containing the resource being
   285  //     watched should result in the invocation of the un-cancelled watch
   286  //     callback.
   287  //
   288  // The test is run for old and new style names.
   289  func (s) TestEDSWatch_TwoWatchesForSameResourceName(t *testing.T) {
   290  	tests := []struct {
   291  		desc                   string
   292  		resourceName           string
   293  		watchedResource        *v3endpointpb.ClusterLoadAssignment // The resource being watched.
   294  		updatedWatchedResource *v3endpointpb.ClusterLoadAssignment // The watched resource after an update.
   295  		wantUpdateV1           endpointsUpdateErrTuple
   296  		wantUpdateV2           endpointsUpdateErrTuple
   297  	}{
   298  		{
   299  			desc:                   "old style resource",
   300  			resourceName:           edsName,
   301  			watchedResource:        e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}),
   302  			updatedWatchedResource: e2e.DefaultEndpoint(edsName, edsHost2, []uint32{edsPort2}),
   303  			wantUpdateV1: endpointsUpdateErrTuple{
   304  				update: xdsresource.EndpointsUpdate{
   305  					Localities: []xdsresource.Locality{
   306  						{
   307  							Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   308  							ID: internal.LocalityID{
   309  								Region:  "region-1",
   310  								Zone:    "zone-1",
   311  								SubZone: "subzone-1",
   312  							},
   313  							Priority: 0,
   314  							Weight:   1,
   315  						},
   316  					},
   317  				},
   318  			},
   319  			wantUpdateV2: endpointsUpdateErrTuple{
   320  				update: xdsresource.EndpointsUpdate{
   321  					Localities: []xdsresource.Locality{
   322  						{
   323  							Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}},
   324  							ID: internal.LocalityID{
   325  								Region:  "region-1",
   326  								Zone:    "zone-1",
   327  								SubZone: "subzone-1",
   328  							},
   329  							Priority: 0,
   330  							Weight:   1,
   331  						},
   332  					},
   333  				},
   334  			},
   335  		},
   336  		{
   337  			desc:                   "new style resource",
   338  			resourceName:           edsNameNewStyle,
   339  			watchedResource:        e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}),
   340  			updatedWatchedResource: e2e.DefaultEndpoint(edsNameNewStyle, edsHost2, []uint32{edsPort2}),
   341  			wantUpdateV1: endpointsUpdateErrTuple{
   342  				update: xdsresource.EndpointsUpdate{
   343  					Localities: []xdsresource.Locality{
   344  						{
   345  							Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   346  							ID: internal.LocalityID{
   347  								Region:  "region-1",
   348  								Zone:    "zone-1",
   349  								SubZone: "subzone-1",
   350  							},
   351  							Priority: 0,
   352  							Weight:   1,
   353  						},
   354  					},
   355  				},
   356  			},
   357  			wantUpdateV2: endpointsUpdateErrTuple{
   358  				update: xdsresource.EndpointsUpdate{
   359  					Localities: []xdsresource.Locality{
   360  						{
   361  							Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost2, edsPort2), Weight: 1}},
   362  							ID: internal.LocalityID{
   363  								Region:  "region-1",
   364  								Zone:    "zone-1",
   365  								SubZone: "subzone-1",
   366  							},
   367  							Priority: 0,
   368  							Weight:   1,
   369  						},
   370  					},
   371  				},
   372  			},
   373  		},
   374  	}
   375  
   376  	for _, test := range tests {
   377  		t.Run(test.desc, func(t *testing.T) {
   378  			mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   379  			defer cleanup()
   380  
   381  			// Create an xDS client with the above bootstrap contents.
   382  			client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   383  			if err != nil {
   384  				t.Fatalf("Failed to create xDS client: %v", err)
   385  			}
   386  			defer close()
   387  
   388  			// Register two watches for the same endpoint resource and have the
   389  			// callbacks push the received updates on to a channel.
   390  			ew1 := newEndpointsWatcher()
   391  			edsCancel1 := xdsresource.WatchEndpoints(client, test.resourceName, ew1)
   392  			defer edsCancel1()
   393  			ew2 := newEndpointsWatcher()
   394  			edsCancel2 := xdsresource.WatchEndpoints(client, test.resourceName, ew2)
   395  
   396  			// Configure the management server to return a single endpoint
   397  			// resource, corresponding to the one being watched.
   398  			resources := e2e.UpdateOptions{
   399  				NodeID:         nodeID,
   400  				Endpoints:      []*v3endpointpb.ClusterLoadAssignment{test.watchedResource},
   401  				SkipValidation: true,
   402  			}
   403  			ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   404  			defer cancel()
   405  			if err := mgmtServer.Update(ctx, resources); err != nil {
   406  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   407  			}
   408  
   409  			// Verify the contents of the received update.
   410  			if err := verifyEndpointsUpdate(ctx, ew1.updateCh, test.wantUpdateV1); err != nil {
   411  				t.Fatal(err)
   412  			}
   413  			if err := verifyEndpointsUpdate(ctx, ew2.updateCh, test.wantUpdateV1); err != nil {
   414  				t.Fatal(err)
   415  			}
   416  
   417  			// Cancel the second watch and force the management server to push a
   418  			// redundant update for the resource being watched. Neither of the
   419  			// two watch callbacks should be invoked.
   420  			edsCancel2()
   421  			if err := mgmtServer.Update(ctx, resources); err != nil {
   422  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   423  			}
   424  			if err := verifyNoEndpointsUpdate(ctx, ew1.updateCh); err != nil {
   425  				t.Fatal(err)
   426  			}
   427  			if err := verifyNoEndpointsUpdate(ctx, ew2.updateCh); err != nil {
   428  				t.Fatal(err)
   429  			}
   430  
   431  			// Update to the resource being watched. The un-cancelled callback
   432  			// should be invoked while the cancelled one should not be.
   433  			resources = e2e.UpdateOptions{
   434  				NodeID:         nodeID,
   435  				Endpoints:      []*v3endpointpb.ClusterLoadAssignment{test.updatedWatchedResource},
   436  				SkipValidation: true,
   437  			}
   438  			if err := mgmtServer.Update(ctx, resources); err != nil {
   439  				t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   440  			}
   441  			if err := verifyEndpointsUpdate(ctx, ew1.updateCh, test.wantUpdateV2); err != nil {
   442  				t.Fatal(err)
   443  			}
   444  			if err := verifyNoEndpointsUpdate(ctx, ew2.updateCh); err != nil {
   445  				t.Fatal(err)
   446  			}
   447  		})
   448  	}
   449  }
   450  
   451  // TestEDSWatch_ThreeWatchesForDifferentResourceNames covers the case with three
   452  // watchers (two watchers for one resource, and the third watcher for another
   453  // resource), exist across two endpoint configuration resources.  The test verifies
   454  // that an update from the management server containing both resources results
   455  // in the invocation of all watch callbacks.
   456  //
   457  // The test is run with both old and new style names.
   458  func (s) TestEDSWatch_ThreeWatchesForDifferentResourceNames(t *testing.T) {
   459  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   460  	defer cleanup()
   461  
   462  	// Create an xDS client with the above bootstrap contents.
   463  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   464  	if err != nil {
   465  		t.Fatalf("Failed to create xDS client: %v", err)
   466  	}
   467  	defer close()
   468  
   469  	// Register two watches for the same endpoint resource and have the
   470  	// callbacks push the received updates on to a channel.
   471  	ew1 := newEndpointsWatcher()
   472  	edsCancel1 := xdsresource.WatchEndpoints(client, edsName, ew1)
   473  	defer edsCancel1()
   474  	ew2 := newEndpointsWatcher()
   475  	edsCancel2 := xdsresource.WatchEndpoints(client, edsName, ew2)
   476  	defer edsCancel2()
   477  
   478  	// Register the third watch for a different endpoint resource.
   479  	ew3 := newEndpointsWatcher()
   480  	edsCancel3 := xdsresource.WatchEndpoints(client, edsNameNewStyle, ew3)
   481  	defer edsCancel3()
   482  
   483  	// Configure the management server to return two endpoint resources,
   484  	// corresponding to the registered watches.
   485  	resources := e2e.UpdateOptions{
   486  		NodeID: nodeID,
   487  		Endpoints: []*v3endpointpb.ClusterLoadAssignment{
   488  			e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1}),
   489  			e2e.DefaultEndpoint(edsNameNewStyle, edsHost1, []uint32{edsPort1}),
   490  		},
   491  		SkipValidation: true,
   492  	}
   493  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   494  	defer cancel()
   495  	if err := mgmtServer.Update(ctx, resources); err != nil {
   496  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   497  	}
   498  
   499  	// Verify the contents of the received update for the all watchers. The two
   500  	// resources returned differ only in the resource name. Therefore the
   501  	// expected update is the same for all the watchers.
   502  	wantUpdate := endpointsUpdateErrTuple{
   503  		update: xdsresource.EndpointsUpdate{
   504  			Localities: []xdsresource.Locality{
   505  				{
   506  					Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   507  					ID: internal.LocalityID{
   508  						Region:  "region-1",
   509  						Zone:    "zone-1",
   510  						SubZone: "subzone-1",
   511  					},
   512  					Priority: 0,
   513  					Weight:   1,
   514  				},
   515  			},
   516  		},
   517  	}
   518  	if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil {
   519  		t.Fatal(err)
   520  	}
   521  	if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil {
   522  		t.Fatal(err)
   523  	}
   524  	if err := verifyEndpointsUpdate(ctx, ew3.updateCh, wantUpdate); err != nil {
   525  		t.Fatal(err)
   526  	}
   527  }
   528  
   529  // TestEDSWatch_ResourceCaching covers the case where a watch is registered for
   530  // a resource which is already present in the cache.  The test verifies that the
   531  // watch callback is invoked with the contents from the cache, instead of a
   532  // request being sent to the management server.
   533  func (s) TestEDSWatch_ResourceCaching(t *testing.T) {
   534  	firstRequestReceived := false
   535  	firstAckReceived := grpcsync.NewEvent()
   536  	secondRequestReceived := grpcsync.NewEvent()
   537  
   538  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{
   539  		OnStreamRequest: func(id int64, req *v3discoverypb.DiscoveryRequest) error {
   540  			// The first request has an empty version string.
   541  			if !firstRequestReceived && req.GetVersionInfo() == "" {
   542  				firstRequestReceived = true
   543  				return nil
   544  			}
   545  			// The first ack has a non-empty version string.
   546  			if !firstAckReceived.HasFired() && req.GetVersionInfo() != "" {
   547  				firstAckReceived.Fire()
   548  				return nil
   549  			}
   550  			// Any requests after the first request and ack, are not expected.
   551  			secondRequestReceived.Fire()
   552  			return nil
   553  		},
   554  	})
   555  	defer cleanup()
   556  
   557  	// Create an xDS client with the above bootstrap contents.
   558  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   559  	if err != nil {
   560  		t.Fatalf("Failed to create xDS client: %v", err)
   561  	}
   562  	defer close()
   563  
   564  	// Register a watch for an endpoint resource and have the watch callback
   565  	// push the received update on to a channel.
   566  	ew1 := newEndpointsWatcher()
   567  	edsCancel1 := xdsresource.WatchEndpoints(client, edsName, ew1)
   568  	defer edsCancel1()
   569  
   570  	// Configure the management server to return a single endpoint resource,
   571  	// corresponding to the one we registered a watch for.
   572  	resources := e2e.UpdateOptions{
   573  		NodeID:         nodeID,
   574  		Endpoints:      []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})},
   575  		SkipValidation: true,
   576  	}
   577  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   578  	defer cancel()
   579  	if err := mgmtServer.Update(ctx, resources); err != nil {
   580  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   581  	}
   582  
   583  	// Verify the contents of the received update.
   584  	wantUpdate := endpointsUpdateErrTuple{
   585  		update: xdsresource.EndpointsUpdate{
   586  			Localities: []xdsresource.Locality{
   587  				{
   588  					Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   589  					ID: internal.LocalityID{
   590  						Region:  "region-1",
   591  						Zone:    "zone-1",
   592  						SubZone: "subzone-1",
   593  					},
   594  					Priority: 0,
   595  					Weight:   1,
   596  				},
   597  			},
   598  		},
   599  	}
   600  	if err := verifyEndpointsUpdate(ctx, ew1.updateCh, wantUpdate); err != nil {
   601  		t.Fatal(err)
   602  	}
   603  	select {
   604  	case <-ctx.Done():
   605  		t.Fatal("timeout when waiting for receipt of ACK at the management server")
   606  	case <-firstAckReceived.Done():
   607  	}
   608  
   609  	// Register another watch for the same resource. This should get the update
   610  	// from the cache.
   611  	ew2 := newEndpointsWatcher()
   612  	edsCancel2 := xdsresource.WatchEndpoints(client, edsName, ew2)
   613  	defer edsCancel2()
   614  	if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil {
   615  		t.Fatal(err)
   616  	}
   617  
   618  	// No request should get sent out as part of this watch.
   619  	sCtx, sCancel := context.WithTimeout(ctx, defaultTestShortTimeout)
   620  	defer sCancel()
   621  	select {
   622  	case <-sCtx.Done():
   623  	case <-secondRequestReceived.Done():
   624  		t.Fatal("xdsClient sent out request instead of using update from cache")
   625  	}
   626  }
   627  
   628  // TestEDSWatch_ExpiryTimerFiresBeforeResponse tests the case where the client
   629  // does not receive an EDS response for the request that it sends. The test
   630  // verifies that the watch callback is invoked with an error once the
   631  // watchExpiryTimer fires.
   632  func (s) TestEDSWatch_ExpiryTimerFiresBeforeResponse(t *testing.T) {
   633  	mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
   634  	if err != nil {
   635  		t.Fatalf("Failed to spin up the xDS management server: %v", err)
   636  	}
   637  	defer mgmtServer.Stop()
   638  
   639  	client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
   640  		XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address),
   641  		NodeProto: &v3corepb.Node{},
   642  	}, defaultTestWatchExpiryTimeout, time.Duration(0))
   643  	if err != nil {
   644  		t.Fatalf("failed to create xds client: %v", err)
   645  	}
   646  	defer close()
   647  
   648  	// Register a watch for a resource which is expected to fail with an error
   649  	// after the watch expiry timer fires.
   650  	ew := newEndpointsWatcher()
   651  	edsCancel := xdsresource.WatchEndpoints(client, edsName, ew)
   652  	defer edsCancel()
   653  
   654  	// Wait for the watch expiry timer to fire.
   655  	<-time.After(defaultTestWatchExpiryTimeout)
   656  
   657  	// Verify that an empty update with the expected error is received.
   658  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   659  	defer cancel()
   660  	wantErr := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "")
   661  	if err := verifyEndpointsUpdate(ctx, ew.updateCh, endpointsUpdateErrTuple{err: wantErr}); err != nil {
   662  		t.Fatal(err)
   663  	}
   664  }
   665  
   666  // TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior tests the case where the
   667  // client receives a valid EDS response for the request that it sends. The test
   668  // verifies that the behavior associated with the expiry timer (i.e, callback
   669  // invocation with error) does not take place.
   670  func (s) TestEDSWatch_ValidResponseCancelsExpiryTimerBehavior(t *testing.T) {
   671  	mgmtServer, err := e2e.StartManagementServer(e2e.ManagementServerOptions{})
   672  	if err != nil {
   673  		t.Fatalf("Failed to spin up the xDS management server: %v", err)
   674  	}
   675  	defer mgmtServer.Stop()
   676  
   677  	// Create an xDS client talking to the above management server.
   678  	nodeID := uuid.New().String()
   679  	client, close, err := xdsclient.NewWithConfigForTesting(&bootstrap.Config{
   680  		XDSServer: xdstestutils.ServerConfigForAddress(t, mgmtServer.Address),
   681  		NodeProto: &v3corepb.Node{Id: nodeID},
   682  	}, defaultTestWatchExpiryTimeout, time.Duration(0))
   683  	if err != nil {
   684  		t.Fatalf("failed to create xds client: %v", err)
   685  	}
   686  	defer close()
   687  
   688  	// Register a watch for an endpoint resource and have the watch callback
   689  	// push the received update on to a channel.
   690  	ew := newEndpointsWatcher()
   691  	edsCancel := xdsresource.WatchEndpoints(client, edsName, ew)
   692  	defer edsCancel()
   693  
   694  	// Configure the management server to return a single endpoint resource,
   695  	// corresponding to the one we registered a watch for.
   696  	resources := e2e.UpdateOptions{
   697  		NodeID:         nodeID,
   698  		Endpoints:      []*v3endpointpb.ClusterLoadAssignment{e2e.DefaultEndpoint(edsName, edsHost1, []uint32{edsPort1})},
   699  		SkipValidation: true,
   700  	}
   701  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   702  	defer cancel()
   703  	if err := mgmtServer.Update(ctx, resources); err != nil {
   704  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   705  	}
   706  
   707  	// Verify the contents of the received update.
   708  	wantUpdate := endpointsUpdateErrTuple{
   709  		update: xdsresource.EndpointsUpdate{
   710  			Localities: []xdsresource.Locality{
   711  				{
   712  					Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   713  					ID: internal.LocalityID{
   714  						Region:  "region-1",
   715  						Zone:    "zone-1",
   716  						SubZone: "subzone-1",
   717  					},
   718  					Priority: 0,
   719  					Weight:   1,
   720  				},
   721  			},
   722  		},
   723  	}
   724  	if err := verifyEndpointsUpdate(ctx, ew.updateCh, wantUpdate); err != nil {
   725  		t.Fatal(err)
   726  	}
   727  
   728  	// Wait for the watch expiry timer to fire, and verify that the callback is
   729  	// not invoked.
   730  	<-time.After(defaultTestWatchExpiryTimeout)
   731  	if err := verifyNoEndpointsUpdate(ctx, ew.updateCh); err != nil {
   732  		t.Fatal(err)
   733  	}
   734  }
   735  
   736  // TestEDSWatch_NACKError covers the case where an update from the management
   737  // server is NACK'ed by the xdsclient. The test verifies that the error is
   738  // propagated to the watcher.
   739  func (s) TestEDSWatch_NACKError(t *testing.T) {
   740  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   741  	defer cleanup()
   742  
   743  	// Create an xDS client with the above bootstrap contents.
   744  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   745  	if err != nil {
   746  		t.Fatalf("Failed to create xDS client: %v", err)
   747  	}
   748  	defer close()
   749  
   750  	// Register a watch for a route configuration resource and have the watch
   751  	// callback push the received update on to a channel.
   752  	ew := newEndpointsWatcher()
   753  	edsCancel := xdsresource.WatchEndpoints(client, edsName, ew)
   754  	defer edsCancel()
   755  
   756  	// Configure the management server to return a single route configuration
   757  	// resource which is expected to be NACKed by the client.
   758  	resources := e2e.UpdateOptions{
   759  		NodeID:         nodeID,
   760  		Endpoints:      []*v3endpointpb.ClusterLoadAssignment{badEndpointsResource(edsName, edsHost1, []uint32{edsPort1})},
   761  		SkipValidation: true,
   762  	}
   763  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   764  	defer cancel()
   765  	if err := mgmtServer.Update(ctx, resources); err != nil {
   766  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   767  	}
   768  
   769  	// Verify that the expected error is propagated to the watcher.
   770  	u, err := ew.updateCh.Receive(ctx)
   771  	if err != nil {
   772  		t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err)
   773  	}
   774  	gotErr := u.(endpointsUpdateErrTuple).err
   775  	if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) {
   776  		t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr)
   777  	}
   778  }
   779  
   780  // TestEDSWatch_PartialValid covers the case where a response from the
   781  // management server contains both valid and invalid resources and is expected
   782  // to be NACK'ed by the xdsclient. The test verifies that watchers corresponding
   783  // to the valid resource receive the update, while watchers corresponding to the
   784  // invalid resource receive an error.
   785  func (s) TestEDSWatch_PartialValid(t *testing.T) {
   786  	mgmtServer, nodeID, bootstrapContents, _, cleanup := e2e.SetupManagementServer(t, e2e.ManagementServerOptions{})
   787  	defer cleanup()
   788  
   789  	// Create an xDS client with the above bootstrap contents.
   790  	client, close, err := xdsclient.NewWithBootstrapContentsForTesting(bootstrapContents)
   791  	if err != nil {
   792  		t.Fatalf("Failed to create xDS client: %v", err)
   793  	}
   794  	defer close()
   795  
   796  	// Register two watches for two endpoint resources. The first watch is
   797  	// expected to receive an error because the received resource is NACKed.
   798  	// The second watch is expected to get a good update.
   799  	badResourceName := rdsName
   800  	ew1 := newEndpointsWatcher()
   801  	edsCancel1 := xdsresource.WatchEndpoints(client, badResourceName, ew1)
   802  	defer edsCancel1()
   803  	goodResourceName := ldsNameNewStyle
   804  	ew2 := newEndpointsWatcher()
   805  	edsCancel2 := xdsresource.WatchEndpoints(client, goodResourceName, ew2)
   806  	defer edsCancel2()
   807  
   808  	// Configure the management server to return two endpoints resources,
   809  	// corresponding to the registered watches.
   810  	resources := e2e.UpdateOptions{
   811  		NodeID: nodeID,
   812  		Endpoints: []*v3endpointpb.ClusterLoadAssignment{
   813  			badEndpointsResource(badResourceName, edsHost1, []uint32{edsPort1}),
   814  			e2e.DefaultEndpoint(goodResourceName, edsHost1, []uint32{edsPort1}),
   815  		},
   816  		SkipValidation: true,
   817  	}
   818  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   819  	defer cancel()
   820  	if err := mgmtServer.Update(ctx, resources); err != nil {
   821  		t.Fatalf("Failed to update management server with resources: %v, err: %v", resources, err)
   822  	}
   823  
   824  	// Verify that the expected error is propagated to the watcher which
   825  	// requested for the bad resource.
   826  	u, err := ew1.updateCh.Receive(ctx)
   827  	if err != nil {
   828  		t.Fatalf("timeout when waiting for an endpoints resource from the management server: %v", err)
   829  	}
   830  	gotErr := u.(endpointsUpdateErrTuple).err
   831  	if gotErr == nil || !strings.Contains(gotErr.Error(), wantEndpointsNACKErr) {
   832  		t.Fatalf("update received with error: %v, want %q", gotErr, wantEndpointsNACKErr)
   833  	}
   834  
   835  	// Verify that the watcher watching the good resource receives an update.
   836  	wantUpdate := endpointsUpdateErrTuple{
   837  		update: xdsresource.EndpointsUpdate{
   838  			Localities: []xdsresource.Locality{
   839  				{
   840  					Endpoints: []xdsresource.Endpoint{{Address: fmt.Sprintf("%s:%d", edsHost1, edsPort1), Weight: 1}},
   841  					ID: internal.LocalityID{
   842  						Region:  "region-1",
   843  						Zone:    "zone-1",
   844  						SubZone: "subzone-1",
   845  					},
   846  					Priority: 0,
   847  					Weight:   1,
   848  				},
   849  			},
   850  		},
   851  	}
   852  	if err := verifyEndpointsUpdate(ctx, ew2.updateCh, wantUpdate); err != nil {
   853  		t.Fatal(err)
   854  	}
   855  }