google.golang.org/grpc@v1.74.2/balancer/ringhash/ringhash_test.go (about)

     1  /*
     2   *
     3   * Copyright 2021 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package ringhash
    20  
    21  import (
    22  	"context"
    23  	"fmt"
    24  	"testing"
    25  	"time"
    26  
    27  	"google.golang.org/grpc/balancer"
    28  	"google.golang.org/grpc/connectivity"
    29  	"google.golang.org/grpc/internal/balancer/weight"
    30  	"google.golang.org/grpc/internal/grpctest"
    31  	iringhash "google.golang.org/grpc/internal/ringhash"
    32  	"google.golang.org/grpc/internal/testutils"
    33  	"google.golang.org/grpc/resolver"
    34  )
    35  
    36  const (
    37  	defaultTestTimeout      = 10 * time.Second
    38  	defaultTestShortTimeout = 10 * time.Millisecond
    39  
    40  	testBackendAddrsCount = 12
    41  )
    42  
    43  var (
    44  	testBackendAddrStrs []string
    45  	testConfig          = &iringhash.LBConfig{MinRingSize: 1, MaxRingSize: 10}
    46  )
    47  
    48  func init() {
    49  	for i := 0; i < testBackendAddrsCount; i++ {
    50  		testBackendAddrStrs = append(testBackendAddrStrs, fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i))
    51  	}
    52  }
    53  
    54  // setupTest creates the balancer, and does an initial sanity check.
    55  func setupTest(t *testing.T, endpoints []resolver.Endpoint) (*testutils.BalancerClientConn, balancer.Balancer, balancer.Picker) {
    56  	t.Helper()
    57  	cc := testutils.NewBalancerClientConn(t)
    58  	builder := balancer.Get(Name)
    59  	b := builder.Build(cc, balancer.BuildOptions{})
    60  	if b == nil {
    61  		t.Fatalf("builder.Build(%s) failed and returned nil", Name)
    62  	}
    63  	if err := b.UpdateClientConnState(balancer.ClientConnState{
    64  		ResolverState:  resolver.State{Endpoints: endpoints},
    65  		BalancerConfig: testConfig,
    66  	}); err != nil {
    67  		t.Fatalf("UpdateClientConnState returned err: %v", err)
    68  	}
    69  
    70  	// The leaf pickfirst are created lazily, only when their endpoint is picked
    71  	// or other endpoints are in TF. No SubConns should be created immediately.
    72  	select {
    73  	case sc := <-cc.NewSubConnCh:
    74  		t.Errorf("unexpected SubConn creation: %v", sc)
    75  	case <-time.After(defaultTestShortTimeout):
    76  	}
    77  
    78  	// Should also have a picker, with all endpoints in Idle.
    79  	p1 := <-cc.NewPickerCh
    80  
    81  	ringHashPicker := p1.(*picker)
    82  	if got, want := len(ringHashPicker.endpointStates), len(endpoints); got != want {
    83  		t.Errorf("Number of child balancers = %d, want = %d", got, want)
    84  	}
    85  	for firstAddr, bs := range ringHashPicker.endpointStates {
    86  		if got, want := bs.state.ConnectivityState, connectivity.Idle; got != want {
    87  			t.Errorf("Child balancer connectivity state for address %q = %v, want = %v", firstAddr, got, want)
    88  		}
    89  	}
    90  	return cc, b, p1
    91  }
    92  
    93  type s struct {
    94  	grpctest.Tester
    95  }
    96  
    97  func Test(t *testing.T) {
    98  	grpctest.RunSubTests(t, s{})
    99  }
   100  
   101  // TestUpdateClientConnState_NewRingSize tests the scenario where the ringhash
   102  // LB policy receives new configuration which specifies new values for the ring
   103  // min and max sizes. The test verifies that a new ring is created and a new
   104  // picker is sent to the ClientConn.
   105  func (s) TestUpdateClientConnState_NewRingSize(t *testing.T) {
   106  	origMinRingSize, origMaxRingSize := 1, 10 // Configured from `testConfig` in `setupTest`
   107  	newMinRingSize, newMaxRingSize := 20, 100
   108  
   109  	endpoints := []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}}}
   110  	cc, b, p1 := setupTest(t, endpoints)
   111  	ring1 := p1.(*picker).ring
   112  	if ringSize := len(ring1.items); ringSize < origMinRingSize || ringSize > origMaxRingSize {
   113  		t.Fatalf("Ring created with size %d, want between [%d, %d]", ringSize, origMinRingSize, origMaxRingSize)
   114  	}
   115  
   116  	if err := b.UpdateClientConnState(balancer.ClientConnState{
   117  		ResolverState: resolver.State{Endpoints: endpoints},
   118  		BalancerConfig: &iringhash.LBConfig{
   119  			MinRingSize: uint64(newMinRingSize),
   120  			MaxRingSize: uint64(newMaxRingSize),
   121  		},
   122  	}); err != nil {
   123  		t.Fatalf("UpdateClientConnState returned err: %v", err)
   124  	}
   125  
   126  	var ring2 *ring
   127  	select {
   128  	case <-time.After(defaultTestTimeout):
   129  		t.Fatal("Timeout when waiting for a picker update after a configuration update")
   130  	case p2 := <-cc.NewPickerCh:
   131  		ring2 = p2.(*picker).ring
   132  	}
   133  	if ringSize := len(ring2.items); ringSize < newMinRingSize || ringSize > newMaxRingSize {
   134  		t.Fatalf("Ring created with size %d, want between [%d, %d]", ringSize, newMinRingSize, newMaxRingSize)
   135  	}
   136  }
   137  
   138  func (s) TestOneEndpoint(t *testing.T) {
   139  	wantAddr1 := resolver.Address{Addr: testBackendAddrStrs[0]}
   140  	cc, _, p0 := setupTest(t, []resolver.Endpoint{{Addresses: []resolver.Address{wantAddr1}}})
   141  	ring0 := p0.(*picker).ring
   142  
   143  	firstHash := ring0.items[0].hash
   144  	// firstHash-1 will pick the first (and only) SubConn from the ring.
   145  	testHash := firstHash - 1
   146  	// The first pick should be queued, and should trigger a connection to the
   147  	// only Endpoint which has a single address.
   148  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   149  	defer cancel()
   150  	if _, err := p0.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)}); err != balancer.ErrNoSubConnAvailable {
   151  		t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable)
   152  	}
   153  	var sc0 *testutils.TestSubConn
   154  	select {
   155  	case <-ctx.Done():
   156  		t.Fatalf("Timed out waiting for SubConn creation.")
   157  	case sc0 = <-cc.NewSubConnCh:
   158  	}
   159  	if got, want := sc0.Addresses[0].Addr, wantAddr1.Addr; got != want {
   160  		t.Fatalf("SubConn.Addresses = %v, want = %v", got, want)
   161  	}
   162  	select {
   163  	case <-sc0.ConnectCh:
   164  	case <-time.After(defaultTestTimeout):
   165  		t.Errorf("timeout waiting for Connect() from SubConn %v", sc0)
   166  	}
   167  
   168  	// Send state updates to Ready.
   169  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   170  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   171  	if err := cc.WaitForConnectivityState(ctx, connectivity.Ready); err != nil {
   172  		t.Fatal(err)
   173  	}
   174  
   175  	// Test pick with one backend.
   176  	p1 := <-cc.NewPickerCh
   177  	for i := 0; i < 5; i++ {
   178  		gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)})
   179  		if gotSCSt.SubConn != sc0 {
   180  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0)
   181  		}
   182  	}
   183  }
   184  
   185  // TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the
   186  // same hash always pick the same SubConn. When the one picked is down, another
   187  // one will be picked.
   188  func (s) TestThreeSubConnsAffinity(t *testing.T) {
   189  	endpoints := []resolver.Endpoint{
   190  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}},
   191  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}},
   192  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}},
   193  	}
   194  	remainingAddrs := map[string]bool{
   195  		testBackendAddrStrs[0]: true,
   196  		testBackendAddrStrs[1]: true,
   197  		testBackendAddrStrs[2]: true,
   198  	}
   199  	cc, _, p0 := setupTest(t, endpoints)
   200  	// This test doesn't update addresses, so this ring will be used by all the
   201  	// pickers.
   202  	ring := p0.(*picker).ring
   203  
   204  	firstHash := ring.items[0].hash
   205  	// firstHash+1 will pick the second endpoint from the ring.
   206  	testHash := firstHash + 1
   207  	// The first pick should be queued, and should trigger Connect() on the only
   208  	// SubConn.
   209  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   210  	defer cancel()
   211  	if _, err := p0.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)}); err != balancer.ErrNoSubConnAvailable {
   212  		t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable)
   213  	}
   214  
   215  	// The picked endpoint should be the second in the ring.
   216  	var subConns [3]*testutils.TestSubConn
   217  	select {
   218  	case <-ctx.Done():
   219  		t.Fatalf("Timed out waiting for SubConn creation.")
   220  	case subConns[1] = <-cc.NewSubConnCh:
   221  	}
   222  	if got, want := subConns[1].Addresses[0].Addr, ring.items[1].hashKey; got != want {
   223  		t.Fatalf("SubConn.Address = %v, want = %v", got, want)
   224  	}
   225  	select {
   226  	case <-subConns[1].ConnectCh:
   227  	case <-time.After(defaultTestTimeout):
   228  		t.Errorf("timeout waiting for Connect() from SubConn %v", subConns[1])
   229  	}
   230  	delete(remainingAddrs, ring.items[1].hashKey)
   231  
   232  	// Turn down the subConn in use.
   233  	subConns[1].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   234  	subConns[1].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   235  
   236  	// This should trigger a connection to a new endpoint.
   237  	<-cc.NewPickerCh
   238  	var sc *testutils.TestSubConn
   239  	select {
   240  	case <-ctx.Done():
   241  		t.Fatalf("Timed out waiting for SubConn creation.")
   242  	case sc = <-cc.NewSubConnCh:
   243  	}
   244  	scAddr := sc.Addresses[0].Addr
   245  	if _, ok := remainingAddrs[scAddr]; !ok {
   246  		t.Fatalf("New SubConn created with previously used address: %q", scAddr)
   247  	}
   248  	delete(remainingAddrs, scAddr)
   249  	select {
   250  	case <-sc.ConnectCh:
   251  	case <-time.After(defaultTestTimeout):
   252  		t.Errorf("timeout waiting for Connect() from SubConn %v", subConns[1])
   253  	}
   254  	if scAddr == ring.items[0].hashKey {
   255  		subConns[0] = sc
   256  	} else if scAddr == ring.items[2].hashKey {
   257  		subConns[2] = sc
   258  	}
   259  
   260  	// Turning down the SubConn should cause creation of a connection to the
   261  	// final endpoint.
   262  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   263  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   264  	select {
   265  	case <-ctx.Done():
   266  		t.Fatalf("Timed out waiting for SubConn creation.")
   267  	case sc = <-cc.NewSubConnCh:
   268  	}
   269  	scAddr = sc.Addresses[0].Addr
   270  	if _, ok := remainingAddrs[scAddr]; !ok {
   271  		t.Fatalf("New SubConn created with previously used address: %q", scAddr)
   272  	}
   273  	delete(remainingAddrs, scAddr)
   274  	select {
   275  	case <-sc.ConnectCh:
   276  	case <-time.After(defaultTestTimeout):
   277  		t.Errorf("timeout waiting for Connect() from SubConn %v", subConns[1])
   278  	}
   279  	if scAddr == ring.items[0].hashKey {
   280  		subConns[0] = sc
   281  	} else if scAddr == ring.items[2].hashKey {
   282  		subConns[2] = sc
   283  	}
   284  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   285  	sc.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   286  
   287  	// All endpoints are in TransientFailure. Make the first endpoint in the
   288  	// ring report Ready. All picks should go to this endpoint which is two
   289  	// indexes away from the endpoint with the chosen hash.
   290  	subConns[0].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   291  	subConns[0].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   292  	subConns[0].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   293  	if err := cc.WaitForConnectivityState(ctx, connectivity.Ready); err != nil {
   294  		t.Fatalf("Context timed out while waiting for channel to report Ready.")
   295  	}
   296  	p1 := <-cc.NewPickerCh
   297  	for i := 0; i < 5; i++ {
   298  		gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)})
   299  		if gotSCSt.SubConn != subConns[0] {
   300  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, subConns[0])
   301  		}
   302  	}
   303  
   304  	// Make the last endpoint in the ring report Ready. All picks should go to
   305  	// this endpoint since it is one index away from the chosen hash.
   306  	subConns[2].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   307  	subConns[2].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   308  	subConns[2].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   309  	p2 := <-cc.NewPickerCh
   310  	for i := 0; i < 5; i++ {
   311  		gotSCSt, _ := p2.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)})
   312  		if gotSCSt.SubConn != subConns[2] {
   313  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, subConns[2])
   314  		}
   315  	}
   316  
   317  	// Make the second endpoint in the ring report Ready. All picks should go to
   318  	// this endpoint as it is the one with the chosen hash.
   319  	subConns[1].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   320  	subConns[1].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   321  	subConns[1].UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   322  	p3 := <-cc.NewPickerCh
   323  	for i := 0; i < 5; i++ {
   324  		gotSCSt, _ := p3.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)})
   325  		if gotSCSt.SubConn != subConns[1] {
   326  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, subConns[1])
   327  		}
   328  	}
   329  }
   330  
   331  // TestThreeBackendsAffinity covers that there are 3 SubConns, RPCs with the
   332  // same hash always pick the same SubConn. Then try different hash to pick
   333  // another backend, and verify the first hash still picks the first backend.
   334  func (s) TestThreeBackendsAffinityMultiple(t *testing.T) {
   335  	wantEndpoints := []resolver.Endpoint{
   336  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}},
   337  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}},
   338  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}},
   339  	}
   340  	cc, _, p0 := setupTest(t, wantEndpoints)
   341  	// This test doesn't update addresses, so this ring will be used by all the
   342  	// pickers.
   343  	ring0 := p0.(*picker).ring
   344  
   345  	firstHash := ring0.items[0].hash
   346  	// firstHash+1 will pick the second SubConn from the ring.
   347  	testHash := firstHash + 1
   348  	// The first pick should be queued, and should trigger Connect() on the only
   349  	// SubConn.
   350  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   351  	defer cancel()
   352  	if _, err := p0.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)}); err != balancer.ErrNoSubConnAvailable {
   353  		t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable)
   354  	}
   355  	// The picked SubConn should be the second in the ring.
   356  	var sc0 *testutils.TestSubConn
   357  	select {
   358  	case <-ctx.Done():
   359  		t.Fatalf("Timed out waiting for SubConn creation.")
   360  	case sc0 = <-cc.NewSubConnCh:
   361  	}
   362  	if got, want := sc0.Addresses[0].Addr, ring0.items[1].hashKey; got != want {
   363  		t.Fatalf("SubConn.Address = %v, want = %v", got, want)
   364  	}
   365  	select {
   366  	case <-sc0.ConnectCh:
   367  	case <-time.After(defaultTestTimeout):
   368  		t.Errorf("timeout waiting for Connect() from SubConn %v", sc0)
   369  	}
   370  
   371  	// Send state updates to Ready.
   372  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   373  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   374  	if err := cc.WaitForConnectivityState(ctx, connectivity.Ready); err != nil {
   375  		t.Fatal(err)
   376  	}
   377  
   378  	// First hash should always pick sc0.
   379  	p1 := <-cc.NewPickerCh
   380  	for i := 0; i < 5; i++ {
   381  		gotSCSt, _ := p1.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)})
   382  		if gotSCSt.SubConn != sc0 {
   383  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0)
   384  		}
   385  	}
   386  
   387  	secondHash := ring0.items[1].hash
   388  	// secondHash+1 will pick the third SubConn from the ring.
   389  	testHash2 := secondHash + 1
   390  	if _, err := p0.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash2)}); err != balancer.ErrNoSubConnAvailable {
   391  		t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable)
   392  	}
   393  	var sc1 *testutils.TestSubConn
   394  	select {
   395  	case <-ctx.Done():
   396  		t.Fatalf("Timed out waiting for SubConn creation.")
   397  	case sc1 = <-cc.NewSubConnCh:
   398  	}
   399  	if got, want := sc1.Addresses[0].Addr, ring0.items[2].hashKey; got != want {
   400  		t.Fatalf("SubConn.Address = %v, want = %v", got, want)
   401  	}
   402  	select {
   403  	case <-sc1.ConnectCh:
   404  	case <-time.After(defaultTestTimeout):
   405  		t.Errorf("timeout waiting for Connect() from SubConn %v", sc1)
   406  	}
   407  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   408  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   409  
   410  	// With the new generated picker, hash2 always picks sc1.
   411  	p2 := <-cc.NewPickerCh
   412  	for i := 0; i < 5; i++ {
   413  		gotSCSt, _ := p2.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash2)})
   414  		if gotSCSt.SubConn != sc1 {
   415  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc1)
   416  		}
   417  	}
   418  	// But the first hash still picks sc0.
   419  	for i := 0; i < 5; i++ {
   420  		gotSCSt, _ := p2.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, testHash)})
   421  		if gotSCSt.SubConn != sc0 {
   422  			t.Fatalf("picker.Pick, got %v, want SubConn=%v", gotSCSt, sc0)
   423  		}
   424  	}
   425  }
   426  
   427  // TestAddrWeightChange covers the following scenarios after setting up the
   428  // balancer with 3 addresses [A, B, C]:
   429  //   - updates balancer with [A, B, C], a new Picker should not be sent.
   430  //   - updates balancer with [A, B] (C removed), a new Picker is sent and the
   431  //     ring is updated.
   432  //   - updates balancer with [A, B], but B has a weight of 2, a new Picker is
   433  //     sent.  And the new ring should contain the correct number of entries
   434  //     and weights.
   435  func (s) TestAddrWeightChange(t *testing.T) {
   436  	endpoints := []resolver.Endpoint{
   437  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}},
   438  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}},
   439  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}},
   440  	}
   441  	cc, b, p0 := setupTest(t, endpoints)
   442  	ring0 := p0.(*picker).ring
   443  
   444  	// Update with the same addresses, it will result in a new picker, but with
   445  	// the same ring.
   446  	if err := b.UpdateClientConnState(balancer.ClientConnState{
   447  		ResolverState:  resolver.State{Endpoints: endpoints},
   448  		BalancerConfig: testConfig,
   449  	}); err != nil {
   450  		t.Fatalf("UpdateClientConnState returned err: %v", err)
   451  	}
   452  	var p1 balancer.Picker
   453  	select {
   454  	case p1 = <-cc.NewPickerCh:
   455  	case <-time.After(defaultTestTimeout):
   456  		t.Fatalf("timeout waiting for picker after UpdateClientConn with same addresses")
   457  	}
   458  	ring1 := p1.(*picker).ring
   459  	if ring1 != ring0 {
   460  		t.Fatalf("new picker with same address has a different ring than before, want same")
   461  	}
   462  
   463  	// Delete an address, should send a new Picker.
   464  	if err := b.UpdateClientConnState(balancer.ClientConnState{
   465  		ResolverState:  resolver.State{Endpoints: endpoints[:2]},
   466  		BalancerConfig: testConfig,
   467  	}); err != nil {
   468  		t.Fatalf("UpdateClientConnState returned err: %v", err)
   469  	}
   470  	var p2 balancer.Picker
   471  	select {
   472  	case p2 = <-cc.NewPickerCh:
   473  	case <-time.After(defaultTestTimeout):
   474  		t.Fatalf("timeout waiting for picker after UpdateClientConn with different addresses")
   475  	}
   476  	ring2 := p2.(*picker).ring
   477  	if ring2 == ring0 {
   478  		t.Fatalf("new picker after removing address has the same ring as before, want different")
   479  	}
   480  
   481  	// Another update with the same addresses, but different weight.
   482  	if err := b.UpdateClientConnState(balancer.ClientConnState{
   483  		ResolverState: resolver.State{Endpoints: []resolver.Endpoint{
   484  			endpoints[0],
   485  			weight.Set(endpoints[1], weight.EndpointInfo{Weight: 2}),
   486  		}},
   487  		BalancerConfig: testConfig,
   488  	}); err != nil {
   489  		t.Fatalf("UpdateClientConnState returned err: %v", err)
   490  	}
   491  	var p3 balancer.Picker
   492  	select {
   493  	case p3 = <-cc.NewPickerCh:
   494  	case <-time.After(defaultTestTimeout):
   495  		t.Fatalf("timeout waiting for picker after UpdateClientConn with different addresses")
   496  	}
   497  	if p3.(*picker).ring == ring2 {
   498  		t.Fatalf("new picker after changing address weight has the same ring as before, want different")
   499  	}
   500  	// With the new update, the ring must look like this:
   501  	//   [
   502  	//     {idx:0 endpoint: {addr: testBackendAddrStrs[0], weight: 1}},
   503  	//     {idx:1 endpoint: {addr: testBackendAddrStrs[1], weight: 2}},
   504  	//     {idx:2 endpoint: {addr: testBackendAddrStrs[2], weight: 1}},
   505  	//   ].
   506  	if len(p3.(*picker).ring.items) != 3 {
   507  		t.Fatalf("new picker after changing address weight has %d entries, want 3", len(p3.(*picker).ring.items))
   508  	}
   509  	for _, i := range p3.(*picker).ring.items {
   510  		if i.hashKey == testBackendAddrStrs[0] {
   511  			if i.weight != 1 {
   512  				t.Fatalf("new picker after changing address weight has weight %d for %v, want 1", i.weight, i.hashKey)
   513  			}
   514  		}
   515  		if i.hashKey == testBackendAddrStrs[1] {
   516  			if i.weight != 2 {
   517  				t.Fatalf("new picker after changing address weight has weight %d for %v, want 2", i.weight, i.hashKey)
   518  			}
   519  		}
   520  	}
   521  }
   522  
   523  // TestAutoConnectEndpointOnTransientFailure covers the situation when an
   524  // endpoint fails. It verifies that a new endpoint is automatically tried
   525  // (without a pick) when there is no endpoint already in Connecting state.
   526  func (s) TestAutoConnectEndpointOnTransientFailure(t *testing.T) {
   527  	wantEndpoints := []resolver.Endpoint{
   528  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[0]}}},
   529  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[1]}}},
   530  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[2]}}},
   531  		{Addresses: []resolver.Address{{Addr: testBackendAddrStrs[3]}}},
   532  	}
   533  	cc, _, p0 := setupTest(t, wantEndpoints)
   534  
   535  	// ringhash won't tell SCs to connect until there is an RPC, so simulate
   536  	// one now.
   537  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   538  	ctx = iringhash.SetXDSRequestHash(ctx, 0)
   539  	defer cancel()
   540  	p0.Pick(balancer.PickInfo{Ctx: ctx})
   541  
   542  	// The picked SubConn should be the second in the ring.
   543  	var sc0 *testutils.TestSubConn
   544  	select {
   545  	case <-ctx.Done():
   546  		t.Fatalf("Timed out waiting for SubConn creation.")
   547  	case sc0 = <-cc.NewSubConnCh:
   548  	}
   549  	select {
   550  	case <-sc0.ConnectCh:
   551  	case <-time.After(defaultTestTimeout):
   552  		t.Errorf("timeout waiting for Connect() from SubConn %v", sc0)
   553  	}
   554  
   555  	// Turn the first subconn to transient failure. This should set the overall
   556  	// connectivity state to CONNECTING.
   557  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   558  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   559  	cc.WaitForConnectivityState(ctx, connectivity.Connecting)
   560  
   561  	// It will trigger the second subconn to connect since there is only one
   562  	// endpoint, which is in TF.
   563  	var sc1 *testutils.TestSubConn
   564  	select {
   565  	case <-ctx.Done():
   566  		t.Fatalf("Timed out waiting for SubConn creation.")
   567  	case sc1 = <-cc.NewSubConnCh:
   568  	}
   569  	select {
   570  	case <-sc1.ConnectCh:
   571  	case <-time.After(defaultTestShortTimeout):
   572  		t.Fatalf("timeout waiting for Connect() from SubConn %v", sc1)
   573  	}
   574  
   575  	// Turn the second subconn to TF. This will set the overall state to TF.
   576  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   577  	sc1.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   578  	cc.WaitForConnectivityState(ctx, connectivity.TransientFailure)
   579  
   580  	// It will trigger the third subconn to connect.
   581  	var sc2 *testutils.TestSubConn
   582  	select {
   583  	case <-ctx.Done():
   584  		t.Fatalf("Timed out waiting for SubConn creation.")
   585  	case sc2 = <-cc.NewSubConnCh:
   586  	}
   587  	select {
   588  	case <-sc2.ConnectCh:
   589  	case <-time.After(defaultTestShortTimeout):
   590  		t.Fatalf("timeout waiting for Connect() from SubConn %v", sc2)
   591  	}
   592  
   593  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   594  
   595  	// Send the first SubConn into CONNECTING. To do this, first make it READY,
   596  	// then CONNECTING.
   597  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Ready})
   598  	cc.WaitForConnectivityState(ctx, connectivity.Ready)
   599  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Idle})
   600  	// Since one endpoint is in TF and one in CONNECTING, the aggregated state
   601  	// will be CONNECTING.
   602  	cc.WaitForConnectivityState(ctx, connectivity.Connecting)
   603  	p1 := <-cc.NewPickerCh
   604  	p1.Pick(balancer.PickInfo{Ctx: ctx})
   605  	select {
   606  	case <-sc0.ConnectCh:
   607  	case <-time.After(defaultTestTimeout):
   608  		t.Errorf("timeout waiting for Connect() from SubConn %v", sc0)
   609  	}
   610  	sc0.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.Connecting})
   611  
   612  	// This will not trigger any new SubCOnns to be created, because sc0 is
   613  	// still attempting to connect, and we only need one SubConn to connect.
   614  	sc2.UpdateState(balancer.SubConnState{ConnectivityState: connectivity.TransientFailure})
   615  
   616  	select {
   617  	case sc := <-cc.NewSubConnCh:
   618  		t.Fatalf("unexpected SubConn creation: %v", sc)
   619  	case <-sc0.ConnectCh:
   620  		t.Fatalf("unexpected Connect() from SubConn %v", sc0)
   621  	case <-sc1.ConnectCh:
   622  		t.Fatalf("unexpected Connect() from SubConn %v", sc1)
   623  	case <-sc2.ConnectCh:
   624  		t.Fatalf("unexpected Connect() from SubConn %v", sc2)
   625  	case <-time.After(defaultTestShortTimeout):
   626  	}
   627  }
   628  
   629  func (s) TestAggregatedConnectivityState(t *testing.T) {
   630  	tests := []struct {
   631  		name           string
   632  		endpointStates []connectivity.State
   633  		want           connectivity.State
   634  	}{
   635  		{
   636  			name:           "one ready",
   637  			endpointStates: []connectivity.State{connectivity.Ready},
   638  			want:           connectivity.Ready,
   639  		},
   640  		{
   641  			name:           "one connecting",
   642  			endpointStates: []connectivity.State{connectivity.Connecting},
   643  			want:           connectivity.Connecting,
   644  		},
   645  		{
   646  			name:           "one ready one transient failure",
   647  			endpointStates: []connectivity.State{connectivity.Ready, connectivity.TransientFailure},
   648  			want:           connectivity.Ready,
   649  		},
   650  		{
   651  			name:           "one connecting one transient failure",
   652  			endpointStates: []connectivity.State{connectivity.Connecting, connectivity.TransientFailure},
   653  			want:           connectivity.Connecting,
   654  		},
   655  		{
   656  			name:           "one connecting two transient failure",
   657  			endpointStates: []connectivity.State{connectivity.Connecting, connectivity.TransientFailure, connectivity.TransientFailure},
   658  			want:           connectivity.TransientFailure,
   659  		},
   660  	}
   661  	for _, tt := range tests {
   662  		t.Run(tt.name, func(t *testing.T) {
   663  			bal := &ringhashBalancer{endpointStates: resolver.NewEndpointMap[*endpointState]()}
   664  			for i, cs := range tt.endpointStates {
   665  				es := &endpointState{
   666  					state: balancer.State{ConnectivityState: cs},
   667  				}
   668  				ep := resolver.Endpoint{Addresses: []resolver.Address{{Addr: fmt.Sprintf("%d.%d.%d.%d:%d", i, i, i, i, i)}}}
   669  				bal.endpointStates.Set(ep, es)
   670  			}
   671  			if got := bal.aggregatedStateLocked(); got != tt.want {
   672  				t.Errorf("recordTransition() = %v, want %v", got, tt.want)
   673  			}
   674  		})
   675  	}
   676  }
   677  
   678  type testKeyType string
   679  
   680  const testKey testKeyType = "grpc.lb.ringhash.testKey"
   681  
   682  type testAttribute struct {
   683  	content string
   684  }
   685  
   686  func setTestAttrAddr(addr resolver.Address, content string) resolver.Address {
   687  	addr.BalancerAttributes = addr.BalancerAttributes.WithValue(testKey, testAttribute{content})
   688  	return addr
   689  }
   690  
   691  func setTestAttrEndpoint(endpoint resolver.Endpoint, content string) resolver.Endpoint {
   692  	endpoint.Attributes = endpoint.Attributes.WithValue(testKey, testAttribute{content})
   693  	return endpoint
   694  }
   695  
   696  // TestAddrBalancerAttributesChange tests the case where the ringhash balancer
   697  // receives a ClientConnUpdate with the same config and addresses as received in
   698  // the previous update. Although the `BalancerAttributes` and endpoint
   699  // attributes contents are the same, the pointers are different. This test
   700  // verifies that subConns are not recreated in this scenario.
   701  func (s) TestAddrBalancerAttributesChange(t *testing.T) {
   702  	content := "test"
   703  	addrs1 := []resolver.Address{setTestAttrAddr(resolver.Address{Addr: testBackendAddrStrs[0]}, content)}
   704  	wantEndpoints1 := []resolver.Endpoint{
   705  		setTestAttrEndpoint(resolver.Endpoint{Addresses: addrs1}, "content"),
   706  	}
   707  	cc, b, p0 := setupTest(t, wantEndpoints1)
   708  	ring0 := p0.(*picker).ring
   709  
   710  	firstHash := ring0.items[0].hash
   711  	// The first pick should be queued, and should trigger a connection to the
   712  	// only Endpoint which has a single address.
   713  	ctx, cancel := context.WithTimeout(context.Background(), defaultTestTimeout)
   714  	defer cancel()
   715  	if _, err := p0.Pick(balancer.PickInfo{Ctx: iringhash.SetXDSRequestHash(ctx, firstHash)}); err != balancer.ErrNoSubConnAvailable {
   716  		t.Fatalf("first pick returned err %v, want %v", err, balancer.ErrNoSubConnAvailable)
   717  	}
   718  	select {
   719  	case <-ctx.Done():
   720  		t.Fatalf("Timed out waiting for SubConn creation.")
   721  	case <-cc.NewSubConnCh:
   722  	}
   723  
   724  	addrs2 := []resolver.Address{setTestAttrAddr(resolver.Address{Addr: testBackendAddrStrs[0]}, content)}
   725  	wantEndpoints2 := []resolver.Endpoint{setTestAttrEndpoint(resolver.Endpoint{Addresses: addrs2}, content)}
   726  	if err := b.UpdateClientConnState(balancer.ClientConnState{
   727  		ResolverState:  resolver.State{Endpoints: wantEndpoints2},
   728  		BalancerConfig: testConfig,
   729  	}); err != nil {
   730  		t.Fatalf("UpdateClientConnState returned err: %v", err)
   731  	}
   732  	select {
   733  	case <-cc.NewSubConnCh:
   734  		t.Fatal("new subConn created for an update with the same addresses")
   735  	case <-time.After(defaultTestShortTimeout):
   736  	}
   737  }