gitee.com/zhaochuninhefei/gmgo@v0.0.31-0.20240209061119-069254a02979/grpc/xds/internal/test/e2e/e2e_test.go (about)

     1  /*
     2   *
     3   * Copyright 2021 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   */
    17  
    18  package e2e
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	"flag"
    24  	"fmt"
    25  	"os"
    26  	"strconv"
    27  	"testing"
    28  	"time"
    29  
    30  	v3clusterpb "gitee.com/zhaochuninhefei/gmgo/go-control-plane/envoy/config/cluster/v3"
    31  	v3routepb "gitee.com/zhaochuninhefei/gmgo/go-control-plane/envoy/config/route/v3"
    32  	channelzpb "gitee.com/zhaochuninhefei/gmgo/grpc/channelz/grpc_channelz_v1"
    33  	testpb "gitee.com/zhaochuninhefei/gmgo/grpc/interop/grpc_testing"
    34  	"gitee.com/zhaochuninhefei/gmgo/grpc/xds/internal/testutils/e2e"
    35  )
    36  
    37  var (
    38  	clientPath = flag.String("client", "./binaries/client", "The interop client")
    39  	serverPath = flag.String("server", "./binaries/server", "The interop server")
    40  )
    41  
    42  type testOpts struct {
    43  	testName     string
    44  	backendCount int
    45  	clientFlags  []string
    46  }
    47  
    48  func setup(t *testing.T, opts testOpts) (*controlPlane, *client, []*server) {
    49  	t.Helper()
    50  	if _, err := os.Stat(*clientPath); os.IsNotExist(err) {
    51  		t.Skip("skipped because client is not found")
    52  	}
    53  	if _, err := os.Stat(*serverPath); os.IsNotExist(err) {
    54  		t.Skip("skipped because server is not found")
    55  	}
    56  	backendCount := 1
    57  	if opts.backendCount != 0 {
    58  		backendCount = opts.backendCount
    59  	}
    60  
    61  	cp, err := newControlPlane()
    62  	if err != nil {
    63  		t.Fatalf("failed to start control-plane: %v", err)
    64  	}
    65  	t.Cleanup(cp.stop)
    66  
    67  	var clientLog bytes.Buffer
    68  	c, err := newClient(fmt.Sprintf("xds:///%s", opts.testName), *clientPath, cp.bootstrapContent, &clientLog, opts.clientFlags...)
    69  	if err != nil {
    70  		t.Fatalf("failed to start client: %v", err)
    71  	}
    72  	t.Cleanup(c.stop)
    73  
    74  	var serverLog bytes.Buffer
    75  	servers, err := newServers(opts.testName, *serverPath, cp.bootstrapContent, &serverLog, backendCount)
    76  	if err != nil {
    77  		t.Fatalf("failed to start server: %v", err)
    78  	}
    79  	t.Cleanup(func() {
    80  		for _, s := range servers {
    81  			s.stop()
    82  		}
    83  	})
    84  	t.Cleanup(func() {
    85  		// TODO: find a better way to print the log. They are long, and hide the failure.
    86  		t.Logf("\n----- client logs -----\n%v", clientLog.String())
    87  		t.Logf("\n----- server logs -----\n%v", serverLog.String())
    88  	})
    89  	return cp, c, servers
    90  }
    91  
    92  func TestPingPong(t *testing.T) {
    93  	const testName = "pingpong"
    94  	cp, c, _ := setup(t, testOpts{testName: testName})
    95  
    96  	resources := e2e.DefaultClientResources(e2e.ResourceParams{
    97  		DialTarget: testName,
    98  		NodeID:     cp.nodeID,
    99  		Host:       "localhost",
   100  		Port:       serverPort,
   101  		SecLevel:   e2e.SecurityLevelNone,
   102  	})
   103  
   104  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   105  	defer cancel()
   106  	if err := cp.server.Update(ctx, resources); err != nil {
   107  		t.Fatalf("failed to update control plane resources: %v", err)
   108  	}
   109  
   110  	st, err := c.clientStats(ctx)
   111  	if err != nil {
   112  		t.Fatalf("failed to get client stats: %v", err)
   113  	}
   114  	if st.NumFailures != 0 {
   115  		t.Fatalf("Got %v failures: %+v", st.NumFailures, st)
   116  	}
   117  }
   118  
   119  // TestAffinity covers the affinity tests with ringhash policy.
   120  // - client is configured to use ringhash, with 3 backends
   121  // - all RPCs will hash a specific metadata header
   122  // - verify that
   123  //   - all RPCs with the same metadata value are sent to the same backend
   124  //   - only one backend is Ready
   125  // - send more RPCs with different metadata values until a new backend is picked, and verify that
   126  //   - only two backends are in Ready
   127  func TestAffinity(t *testing.T) {
   128  	const (
   129  		testName     = "affinity"
   130  		backendCount = 3
   131  		testMDKey    = "xds_md"
   132  		testMDValue  = "unary_yranu"
   133  	)
   134  	cp, c, servers := setup(t, testOpts{
   135  		testName:     testName,
   136  		backendCount: backendCount,
   137  		clientFlags:  []string{"--rpc=EmptyCall", fmt.Sprintf("--metadata=EmptyCall:%s:%s", testMDKey, testMDValue)},
   138  	})
   139  
   140  	resources := e2e.DefaultClientResources(e2e.ResourceParams{
   141  		DialTarget: testName,
   142  		NodeID:     cp.nodeID,
   143  		Host:       "localhost",
   144  		Port:       serverPort,
   145  		SecLevel:   e2e.SecurityLevelNone,
   146  	})
   147  
   148  	// Update EDS to multiple backends.
   149  	var ports []uint32
   150  	for _, s := range servers {
   151  		ports = append(ports, uint32(s.port))
   152  	}
   153  	edsMsg := resources.Endpoints[0]
   154  	resources.Endpoints[0] = e2e.DefaultEndpoint(
   155  		edsMsg.ClusterName,
   156  		"localhost",
   157  		ports,
   158  	)
   159  
   160  	// Update CDS lbpolicy to ringhash.
   161  	cdsMsg := resources.Clusters[0]
   162  	cdsMsg.LbPolicy = v3clusterpb.Cluster_RING_HASH
   163  
   164  	// Update RDS to hash the header.
   165  	rdsMsg := resources.Routes[0]
   166  	rdsMsg.VirtualHosts[0].Routes[0].Action = &v3routepb.Route_Route{Route: &v3routepb.RouteAction{
   167  		ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: cdsMsg.Name},
   168  		HashPolicy: []*v3routepb.RouteAction_HashPolicy{{
   169  			PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{
   170  				Header: &v3routepb.RouteAction_HashPolicy_Header{
   171  					HeaderName: testMDKey,
   172  				},
   173  			},
   174  		}},
   175  	}}
   176  
   177  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   178  	defer cancel()
   179  	if err := cp.server.Update(ctx, resources); err != nil {
   180  		t.Fatalf("failed to update control plane resources: %v", err)
   181  	}
   182  
   183  	// Note: We can skip CSDS check because there's no long delay as in TD.
   184  	//
   185  	// The client stats check doesn't race with the xds resource update because
   186  	// there's only one version of xds resource, updated at the beginning of the
   187  	// test. So there's no need to retry the stats call.
   188  	//
   189  	// In the future, we may add tests that update xds in the middle. Then we
   190  	// either need to retry clientStats(), or make a CSDS check before so the
   191  	// result is stable.
   192  
   193  	st, err := c.clientStats(ctx)
   194  	if err != nil {
   195  		t.Fatalf("failed to get client stats: %v", err)
   196  	}
   197  	if st.NumFailures != 0 {
   198  		t.Fatalf("Got %v failures: %+v", st.NumFailures, st)
   199  	}
   200  	if len(st.RpcsByPeer) != 1 {
   201  		t.Fatalf("more than 1 backends got traffic: %v, want 1", st.RpcsByPeer)
   202  	}
   203  
   204  	// Call channelz to verify that only one subchannel is in state Ready.
   205  	scs, err := c.channelzSubChannels(ctx)
   206  	if err != nil {
   207  		t.Fatalf("failed to fetch channelz: %v", err)
   208  	}
   209  	verifySubConnStates(t, scs, map[channelzpb.ChannelConnectivityState_State]int{
   210  		channelzpb.ChannelConnectivityState_READY: 1,
   211  		channelzpb.ChannelConnectivityState_IDLE:  2,
   212  	})
   213  
   214  	// Send Unary call with different metadata value with integers starting from
   215  	// 0. Stop when a second peer is picked.
   216  	var (
   217  		diffPeerPicked bool
   218  		mdValue        int
   219  	)
   220  	for !diffPeerPicked {
   221  		if err := c.configRPCs(ctx, &testpb.ClientConfigureRequest{
   222  			Types: []testpb.ClientConfigureRequest_RpcType{
   223  				testpb.ClientConfigureRequest_EMPTY_CALL,
   224  				testpb.ClientConfigureRequest_UNARY_CALL,
   225  			},
   226  			Metadata: []*testpb.ClientConfigureRequest_Metadata{
   227  				{Type: testpb.ClientConfigureRequest_EMPTY_CALL, Key: testMDKey, Value: testMDValue},
   228  				{Type: testpb.ClientConfigureRequest_UNARY_CALL, Key: testMDKey, Value: strconv.Itoa(mdValue)},
   229  			},
   230  		}); err != nil {
   231  			t.Fatalf("failed to configure RPC: %v", err)
   232  		}
   233  
   234  		st, err := c.clientStats(ctx)
   235  		if err != nil {
   236  			t.Fatalf("failed to get client stats: %v", err)
   237  		}
   238  		if st.NumFailures != 0 {
   239  			t.Fatalf("Got %v failures: %+v", st.NumFailures, st)
   240  		}
   241  		if len(st.RpcsByPeer) == 2 {
   242  			break
   243  		}
   244  
   245  		mdValue++
   246  	}
   247  
   248  	// Call channelz to verify that only one subchannel is in state Ready.
   249  	scs2, err := c.channelzSubChannels(ctx)
   250  	if err != nil {
   251  		t.Fatalf("failed to fetch channelz: %v", err)
   252  	}
   253  	verifySubConnStates(t, scs2, map[channelzpb.ChannelConnectivityState_State]int{
   254  		channelzpb.ChannelConnectivityState_READY: 2,
   255  		channelzpb.ChannelConnectivityState_IDLE:  1,
   256  	})
   257  }