github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/xds/internal/xdsclient/watchers_cluster_test.go (about)

     1  /*
     2   *
     3   * Copyright 2020 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package xdsclient
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"testing"
    25  
    26  	"github.com/google/go-cmp/cmp"
    27  	"github.com/hxx258456/ccgo/grpc/xds/internal/xdsclient/xdsresource"
    28  )
    29  
    30  // TestClusterWatch covers the cases:
    31  // - an update is received after a watch()
    32  // - an update for another resource name
    33  // - an update is received after cancel()
    34  func (s) TestClusterWatch(t *testing.T) {
    35  	testWatch(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
    36  }
    37  
    38  // TestClusterTwoWatchSameResourceName covers the case where an update is received
    39  // after two watch() for the same resource name.
    40  func (s) TestClusterTwoWatchSameResourceName(t *testing.T) {
    41  	testTwoWatchSameResourceName(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
    42  }
    43  
    44  // TestClusterThreeWatchDifferentResourceName covers the case where an update is
    45  // received after three watch() for different resource names.
    46  func (s) TestClusterThreeWatchDifferentResourceName(t *testing.T) {
    47  	testThreeWatchDifferentResourceName(t, xdsresource.ClusterResource,
    48  		xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1",
    49  		xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2",
    50  	)
    51  }
    52  
    53  // TestClusterWatchAfterCache covers the case where watch is called after the update
    54  // is in cache.
    55  func (s) TestClusterWatchAfterCache(t *testing.T) {
    56  	testWatchAfterCache(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
    57  }
    58  
    59  // TestClusterWatchExpiryTimer tests the case where the client does not receive
    60  // an CDS response for the request that it sends out. We want the watch callback
    61  // to be invoked with an error once the watchExpiryTimer fires.
    62  func (s) TestClusterWatchExpiryTimer(t *testing.T) {
    63  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
    64  	defer cancel()
    65  	client, _ := testClientSetup(t, true)
    66  	clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName)
    67  
    68  	u, err := clusterUpdateCh.Receive(ctx)
    69  	if err != nil {
    70  		t.Fatalf("timeout when waiting for cluster update: %v", err)
    71  	}
    72  	gotUpdate := u.(xdsresource.ClusterUpdateErrTuple)
    73  	if gotUpdate.Err == nil || !cmp.Equal(gotUpdate.Update, xdsresource.ClusterUpdate{}) {
    74  		t.Fatalf("unexpected clusterUpdate: (%v, %v), want: (ClusterUpdate{}, nil)", gotUpdate.Update, gotUpdate.Err)
    75  	}
    76  }
    77  
    78  // TestClusterWatchExpiryTimerStop tests the case where the client does receive
    79  // an CDS response for the request that it sends out. We want no error even
    80  // after expiry timeout.
    81  func (s) TestClusterWatchExpiryTimerStop(t *testing.T) {
    82  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
    83  	defer cancel()
    84  	client, ctrlCh := testClientSetup(t, true)
    85  	clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName)
    86  	_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName)
    87  
    88  	wantUpdate := xdsresource.ClusterUpdate{ClusterName: testEDSName}
    89  	updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{
    90  		testCDSName: {Update: wantUpdate},
    91  	}, xdsresource.UpdateMetadata{})
    92  	if err := verifyClusterUpdate(ctx, clusterUpdateCh, wantUpdate, nil); err != nil {
    93  		t.Fatal(err)
    94  	}
    95  
    96  	// Wait for an error, the error should never happen.
    97  	sCtx, sCancel := context.WithTimeout(ctx, defaultTestWatchExpiryTimeout)
    98  	defer sCancel()
    99  	if u, err := clusterUpdateCh.Receive(sCtx); err != context.DeadlineExceeded {
   100  		t.Errorf("unexpected clusterUpdate: %v, %v, want channel recv timeout", u, err)
   101  	}
   102  }
   103  
   104  // TestClusterResourceRemoved covers the cases:
   105  // - an update is received after a watch()
   106  // - another update is received, with one resource removed
   107  //   - this should trigger callback with resource removed error
   108  // - one more update without the removed resource
   109  //   - the callback (above) shouldn't receive any update
   110  func (s) TestClusterResourceRemoved(t *testing.T) {
   111  	testResourceRemoved(t, xdsresource.ClusterResource,
   112  		xdsresource.ClusterUpdate{ClusterName: testEDSName + "1"}, testCDSName+"1",
   113  		xdsresource.ClusterUpdate{ClusterName: testEDSName + "2"}, testCDSName+"2",
   114  	)
   115  }
   116  
   117  // TestClusterWatchNACKError covers the case that an update is NACK'ed, and the
   118  // watcher should also receive the error.
   119  func (s) TestClusterWatchNACKError(t *testing.T) {
   120  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   121  	defer cancel()
   122  	client, ctrlCh := testClientSetup(t, false)
   123  	clusterUpdateCh, _ := newWatch(t, client, xdsresource.ClusterResource, testCDSName)
   124  	_, updateHandler := getControllerAndPubsub(ctx, t, client, ctrlCh, xdsresource.ClusterResource, testCDSName)
   125  
   126  	wantError := fmt.Errorf("testing error")
   127  	updateHandler.NewClusters(map[string]xdsresource.ClusterUpdateErrTuple{testCDSName: {
   128  		Err: wantError,
   129  	}}, xdsresource.UpdateMetadata{ErrState: &xdsresource.UpdateErrorMetadata{Err: wantError}})
   130  	if err := verifyClusterUpdate(ctx, clusterUpdateCh, xdsresource.ClusterUpdate{}, wantError); err != nil {
   131  		t.Fatal(err)
   132  	}
   133  }
   134  
   135  // TestClusterWatchPartialValid covers the case that a response contains both
   136  // valid and invalid resources. This response will be NACK'ed by the xdsclient.
   137  // But the watchers with valid resources should receive the update, those with
   138  // invalida resources should receive an error.
   139  func (s) TestClusterWatchPartialValid(t *testing.T) {
   140  	testWatchPartialValid(t, xdsresource.ClusterResource, xdsresource.ClusterUpdate{ClusterName: testEDSName}, testCDSName)
   141  }