gitee.com/ks-custle/core-gm@v0.0.0-20230922171213-b83bdd97b62c/grpc/xds/internal/test/e2e/e2e_test.go (about)

     1  /*
     2   *
     3   * Copyright 2021 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   */
    17  
    18  package e2e
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	"flag"
    24  	"fmt"
    25  	"os"
    26  	"strconv"
    27  	"testing"
    28  	"time"
    29  
    30  	v3clusterpb "gitee.com/ks-custle/core-gm/go-control-plane/envoy/config/cluster/v3"
    31  	v3routepb "gitee.com/ks-custle/core-gm/go-control-plane/envoy/config/route/v3"
    32  	channelzpb "gitee.com/ks-custle/core-gm/grpc/channelz/grpc_channelz_v1"
    33  	testpb "gitee.com/ks-custle/core-gm/grpc/interop/grpc_testing"
    34  	"gitee.com/ks-custle/core-gm/grpc/xds/internal/testutils/e2e"
    35  )
    36  
    37  var (
    38  	clientPath = flag.String("client", "./binaries/client", "The interop client")
    39  	serverPath = flag.String("server", "./binaries/server", "The interop server")
    40  )
    41  
    42  type testOpts struct {
    43  	testName     string
    44  	backendCount int
    45  	clientFlags  []string
    46  }
    47  
    48  func setup(t *testing.T, opts testOpts) (*controlPlane, *client, []*server) {
    49  	t.Helper()
    50  	if _, err := os.Stat(*clientPath); os.IsNotExist(err) {
    51  		t.Skip("skipped because client is not found")
    52  	}
    53  	if _, err := os.Stat(*serverPath); os.IsNotExist(err) {
    54  		t.Skip("skipped because server is not found")
    55  	}
    56  	backendCount := 1
    57  	if opts.backendCount != 0 {
    58  		backendCount = opts.backendCount
    59  	}
    60  
    61  	cp, err := newControlPlane()
    62  	if err != nil {
    63  		t.Fatalf("failed to start control-plane: %v", err)
    64  	}
    65  	t.Cleanup(cp.stop)
    66  
    67  	var clientLog bytes.Buffer
    68  	c, err := newClient(fmt.Sprintf("xds:///%s", opts.testName), *clientPath, cp.bootstrapContent, &clientLog, opts.clientFlags...)
    69  	if err != nil {
    70  		t.Fatalf("failed to start client: %v", err)
    71  	}
    72  	t.Cleanup(c.stop)
    73  
    74  	var serverLog bytes.Buffer
    75  	servers, err := newServers(opts.testName, *serverPath, cp.bootstrapContent, &serverLog, backendCount)
    76  	if err != nil {
    77  		t.Fatalf("failed to start server: %v", err)
    78  	}
    79  	t.Cleanup(func() {
    80  		for _, s := range servers {
    81  			s.stop()
    82  		}
    83  	})
    84  	t.Cleanup(func() {
    85  		// TODO: find a better way to print the log. They are long, and hide the failure.
    86  		t.Logf("\n----- client logs -----\n%v", clientLog.String())
    87  		t.Logf("\n----- server logs -----\n%v", serverLog.String())
    88  	})
    89  	return cp, c, servers
    90  }
    91  
    92  func TestPingPong(t *testing.T) {
    93  	const testName = "pingpong"
    94  	cp, c, _ := setup(t, testOpts{testName: testName})
    95  
    96  	resources := e2e.DefaultClientResources(e2e.ResourceParams{
    97  		DialTarget: testName,
    98  		NodeID:     cp.nodeID,
    99  		Host:       "localhost",
   100  		Port:       serverPort,
   101  		SecLevel:   e2e.SecurityLevelNone,
   102  	})
   103  
   104  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   105  	defer cancel()
   106  	if err := cp.server.Update(ctx, resources); err != nil {
   107  		t.Fatalf("failed to update control plane resources: %v", err)
   108  	}
   109  
   110  	st, err := c.clientStats(ctx)
   111  	if err != nil {
   112  		t.Fatalf("failed to get client stats: %v", err)
   113  	}
   114  	if st.NumFailures != 0 {
   115  		t.Fatalf("Got %v failures: %+v", st.NumFailures, st)
   116  	}
   117  }
   118  
   119  // TestAffinity covers the affinity tests with ringhash policy.
   120  // - client is configured to use ringhash, with 3 backends
   121  // - all RPCs will hash a specific metadata header
   122  // - verify that
   123  //   - all RPCs with the same metadata value are sent to the same backend
   124  //   - only one backend is Ready
   125  //
   126  // - send more RPCs with different metadata values until a new backend is picked, and verify that
   127  //   - only two backends are in Ready
   128  func TestAffinity(t *testing.T) {
   129  	const (
   130  		testName     = "affinity"
   131  		backendCount = 3
   132  		testMDKey    = "xds_md"
   133  		testMDValue  = "unary_yranu"
   134  	)
   135  	cp, c, servers := setup(t, testOpts{
   136  		testName:     testName,
   137  		backendCount: backendCount,
   138  		clientFlags:  []string{"--rpc=EmptyCall", fmt.Sprintf("--metadata=EmptyCall:%s:%s", testMDKey, testMDValue)},
   139  	})
   140  
   141  	resources := e2e.DefaultClientResources(e2e.ResourceParams{
   142  		DialTarget: testName,
   143  		NodeID:     cp.nodeID,
   144  		Host:       "localhost",
   145  		Port:       serverPort,
   146  		SecLevel:   e2e.SecurityLevelNone,
   147  	})
   148  
   149  	// Update EDS to multiple backends.
   150  	var ports []uint32
   151  	for _, s := range servers {
   152  		ports = append(ports, uint32(s.port))
   153  	}
   154  	edsMsg := resources.Endpoints[0]
   155  	resources.Endpoints[0] = e2e.DefaultEndpoint(
   156  		edsMsg.ClusterName,
   157  		"localhost",
   158  		ports,
   159  	)
   160  
   161  	// Update CDS lbpolicy to ringhash.
   162  	cdsMsg := resources.Clusters[0]
   163  	cdsMsg.LbPolicy = v3clusterpb.Cluster_RING_HASH
   164  
   165  	// Update RDS to hash the header.
   166  	rdsMsg := resources.Routes[0]
   167  	rdsMsg.VirtualHosts[0].Routes[0].Action = &v3routepb.Route_Route{Route: &v3routepb.RouteAction{
   168  		ClusterSpecifier: &v3routepb.RouteAction_Cluster{Cluster: cdsMsg.Name},
   169  		HashPolicy: []*v3routepb.RouteAction_HashPolicy{{
   170  			PolicySpecifier: &v3routepb.RouteAction_HashPolicy_Header_{
   171  				Header: &v3routepb.RouteAction_HashPolicy_Header{
   172  					HeaderName: testMDKey,
   173  				},
   174  			},
   175  		}},
   176  	}}
   177  
   178  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   179  	defer cancel()
   180  	if err := cp.server.Update(ctx, resources); err != nil {
   181  		t.Fatalf("failed to update control plane resources: %v", err)
   182  	}
   183  
   184  	// Note: We can skip CSDS check because there's no long delay as in TD.
   185  	//
   186  	// The client stats check doesn't race with the xds resource update because
   187  	// there's only one version of xds resource, updated at the beginning of the
   188  	// test. So there's no need to retry the stats call.
   189  	//
   190  	// In the future, we may add tests that update xds in the middle. Then we
   191  	// either need to retry clientStats(), or make a CSDS check before so the
   192  	// result is stable.
   193  
   194  	st, err := c.clientStats(ctx)
   195  	if err != nil {
   196  		t.Fatalf("failed to get client stats: %v", err)
   197  	}
   198  	if st.NumFailures != 0 {
   199  		t.Fatalf("Got %v failures: %+v", st.NumFailures, st)
   200  	}
   201  	if len(st.RpcsByPeer) != 1 {
   202  		t.Fatalf("more than 1 backends got traffic: %v, want 1", st.RpcsByPeer)
   203  	}
   204  
   205  	// Call channelz to verify that only one subchannel is in state Ready.
   206  	scs, err := c.channelzSubChannels(ctx)
   207  	if err != nil {
   208  		t.Fatalf("failed to fetch channelz: %v", err)
   209  	}
   210  	verifySubConnStates(t, scs, map[channelzpb.ChannelConnectivityState_State]int{
   211  		channelzpb.ChannelConnectivityState_READY: 1,
   212  		channelzpb.ChannelConnectivityState_IDLE:  2,
   213  	})
   214  
   215  	// Send Unary call with different metadata value with integers starting from
   216  	// 0. Stop when a second peer is picked.
   217  	var (
   218  		diffPeerPicked bool
   219  		mdValue        int
   220  	)
   221  	for !diffPeerPicked {
   222  		if err := c.configRPCs(ctx, &testpb.ClientConfigureRequest{
   223  			Types: []testpb.ClientConfigureRequest_RpcType{
   224  				testpb.ClientConfigureRequest_EMPTY_CALL,
   225  				testpb.ClientConfigureRequest_UNARY_CALL,
   226  			},
   227  			Metadata: []*testpb.ClientConfigureRequest_Metadata{
   228  				{Type: testpb.ClientConfigureRequest_EMPTY_CALL, Key: testMDKey, Value: testMDValue},
   229  				{Type: testpb.ClientConfigureRequest_UNARY_CALL, Key: testMDKey, Value: strconv.Itoa(mdValue)},
   230  			},
   231  		}); err != nil {
   232  			t.Fatalf("failed to configure RPC: %v", err)
   233  		}
   234  
   235  		st, err := c.clientStats(ctx)
   236  		if err != nil {
   237  			t.Fatalf("failed to get client stats: %v", err)
   238  		}
   239  		if st.NumFailures != 0 {
   240  			t.Fatalf("Got %v failures: %+v", st.NumFailures, st)
   241  		}
   242  		if len(st.RpcsByPeer) == 2 {
   243  			break
   244  		}
   245  
   246  		mdValue++
   247  	}
   248  
   249  	// Call channelz to verify that only one subchannel is in state Ready.
   250  	scs2, err := c.channelzSubChannels(ctx)
   251  	if err != nil {
   252  		t.Fatalf("failed to fetch channelz: %v", err)
   253  	}
   254  	verifySubConnStates(t, scs2, map[channelzpb.ChannelConnectivityState_State]int{
   255  		channelzpb.ChannelConnectivityState_READY: 2,
   256  		channelzpb.ChannelConnectivityState_IDLE:  1,
   257  	})
   258  }