github.com/lfch/etcd-io/tests/v3@v3.0.0-20221004140520-eac99acd3e9d/integration/clientv3/kv_test.go (about)

     1  // Copyright 2016 The etcd Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package clientv3test
    16  
    17  import (
    18  	"bytes"
    19  	"context"
    20  	"fmt"
    21  	"os"
    22  	"reflect"
    23  	"strconv"
    24  	"strings"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/lfch/etcd-io/api/v3/mvccpb"
    29  	"github.com/lfch/etcd-io/api/v3/v3rpc/rpctypes"
    30  	"github.com/lfch/etcd-io/api/v3/version"
    31  	clientv3 "github.com/lfch/etcd-io/client/v3"
    32  	integration2 "github.com/lfch/etcd-io/tests/v3/framework/integration"
    33  	"google.golang.org/grpc"
    34  	"google.golang.org/grpc/codes"
    35  	"google.golang.org/grpc/status"
    36  )
    37  
    38  func TestKVPutError(t *testing.T) {
    39  	integration2.BeforeTest(t)
    40  
    41  	var (
    42  		maxReqBytes = 1.5 * 1024 * 1024                                // hard coded max in v3_server.go
    43  		quota       = int64(int(maxReqBytes*1.2) + 8*os.Getpagesize()) // make sure we have enough overhead in backend quota. See discussion in #6486.
    44  	)
    45  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, QuotaBackendBytes: quota, ClientMaxCallSendMsgSize: 100 * 1024 * 1024})
    46  	defer clus.Terminate(t)
    47  
    48  	kv := clus.RandClient()
    49  	ctx := context.TODO()
    50  
    51  	_, err := kv.Put(ctx, "", "bar")
    52  	if err != rpctypes.ErrEmptyKey {
    53  		t.Fatalf("expected %v, got %v", rpctypes.ErrEmptyKey, err)
    54  	}
    55  
    56  	_, err = kv.Put(ctx, "key", strings.Repeat("a", int(maxReqBytes+100)))
    57  	if err != rpctypes.ErrRequestTooLarge {
    58  		t.Fatalf("expected %v, got %v", rpctypes.ErrRequestTooLarge, err)
    59  	}
    60  
    61  	_, err = kv.Put(ctx, "foo1", strings.Repeat("a", int(maxReqBytes-50)))
    62  	if err != nil { // below quota
    63  		t.Fatal(err)
    64  	}
    65  
    66  	time.Sleep(1 * time.Second) // give enough time for commit
    67  
    68  	_, err = kv.Put(ctx, "foo2", strings.Repeat("a", int(maxReqBytes-50)))
    69  	if err != rpctypes.ErrNoSpace { // over quota
    70  		t.Fatalf("expected %v, got %v", rpctypes.ErrNoSpace, err)
    71  	}
    72  }
    73  
    74  func TestKVPutWithLease(t *testing.T) {
    75  	integration2.BeforeTest(t)
    76  
    77  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
    78  	defer clus.Terminate(t)
    79  
    80  	lapi := clus.RandClient()
    81  
    82  	kv := clus.RandClient()
    83  	ctx := context.TODO()
    84  
    85  	lease, err := lapi.Grant(context.Background(), 10)
    86  	if err != nil {
    87  		t.Fatalf("failed to create lease %v", err)
    88  	}
    89  
    90  	key := "hello"
    91  	val := "world"
    92  	if _, err := kv.Put(ctx, key, val, clientv3.WithLease(lease.ID)); err != nil {
    93  		t.Fatalf("couldn't put %q (%v)", key, err)
    94  	}
    95  	resp, err := kv.Get(ctx, key)
    96  	if err != nil {
    97  		t.Fatalf("couldn't get key (%v)", err)
    98  	}
    99  	if len(resp.Kvs) != 1 {
   100  		t.Fatalf("expected 1 key, got %d", len(resp.Kvs))
   101  	}
   102  	if !bytes.Equal([]byte(val), resp.Kvs[0].Value) {
   103  		t.Errorf("val = %s, want %s", val, resp.Kvs[0].Value)
   104  	}
   105  	if lease.ID != clientv3.LeaseID(resp.Kvs[0].Lease) {
   106  		t.Errorf("val = %d, want %d", lease.ID, resp.Kvs[0].Lease)
   107  	}
   108  }
   109  
   110  // TestKVPutWithIgnoreValue ensures that Put with WithIgnoreValue does not clobber the old value.
   111  func TestKVPutWithIgnoreValue(t *testing.T) {
   112  	integration2.BeforeTest(t)
   113  
   114  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   115  	defer clus.Terminate(t)
   116  
   117  	kv := clus.RandClient()
   118  
   119  	_, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue())
   120  	if err != rpctypes.ErrKeyNotFound {
   121  		t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
   122  	}
   123  
   124  	if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
   125  		t.Fatal(err)
   126  	}
   127  
   128  	if _, err := kv.Put(context.TODO(), "foo", "", clientv3.WithIgnoreValue()); err != nil {
   129  		t.Fatal(err)
   130  	}
   131  	rr, rerr := kv.Get(context.TODO(), "foo")
   132  	if rerr != nil {
   133  		t.Fatal(rerr)
   134  	}
   135  	if len(rr.Kvs) != 1 {
   136  		t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs))
   137  	}
   138  	if !bytes.Equal(rr.Kvs[0].Value, []byte("bar")) {
   139  		t.Fatalf("value expected 'bar', got %q", rr.Kvs[0].Value)
   140  	}
   141  }
   142  
   143  // TestKVPutWithIgnoreLease ensures that Put with WithIgnoreLease does not affect the existing lease for the key.
   144  func TestKVPutWithIgnoreLease(t *testing.T) {
   145  	integration2.BeforeTest(t)
   146  
   147  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   148  	defer clus.Terminate(t)
   149  
   150  	kv := clus.RandClient()
   151  
   152  	lapi := clus.RandClient()
   153  
   154  	resp, err := lapi.Grant(context.Background(), 10)
   155  	if err != nil {
   156  		t.Errorf("failed to create lease %v", err)
   157  	}
   158  
   159  	if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithIgnoreLease()); err != rpctypes.ErrKeyNotFound {
   160  		t.Fatalf("err expected %v, got %v", rpctypes.ErrKeyNotFound, err)
   161  	}
   162  
   163  	if _, err := kv.Put(context.TODO(), "zoo", "bar", clientv3.WithLease(resp.ID)); err != nil {
   164  		t.Fatal(err)
   165  	}
   166  
   167  	if _, err := kv.Put(context.TODO(), "zoo", "bar1", clientv3.WithIgnoreLease()); err != nil {
   168  		t.Fatal(err)
   169  	}
   170  
   171  	rr, rerr := kv.Get(context.TODO(), "zoo")
   172  	if rerr != nil {
   173  		t.Fatal(rerr)
   174  	}
   175  	if len(rr.Kvs) != 1 {
   176  		t.Fatalf("len(rr.Kvs) expected 1, got %d", len(rr.Kvs))
   177  	}
   178  	if rr.Kvs[0].Lease != int64(resp.ID) {
   179  		t.Fatalf("lease expected %v, got %v", resp.ID, rr.Kvs[0].Lease)
   180  	}
   181  }
   182  
   183  func TestKVPutWithRequireLeader(t *testing.T) {
   184  	integration2.BeforeTest(t)
   185  
   186  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   187  	defer clus.Terminate(t)
   188  
   189  	clus.Members[1].Stop(t)
   190  	clus.Members[2].Stop(t)
   191  
   192  	// wait for election timeout, then member[0] will not have a leader.
   193  	var (
   194  		electionTicks = 10
   195  		tickDuration  = 10 * time.Millisecond
   196  	)
   197  	time.Sleep(time.Duration(3*electionTicks) * tickDuration)
   198  
   199  	kv := clus.Client(0)
   200  	_, err := kv.Put(clientv3.WithRequireLeader(context.Background()), "foo", "bar")
   201  	if err != rpctypes.ErrNoLeader {
   202  		t.Fatal(err)
   203  	}
   204  
   205  	cnt, err := clus.Members[0].Metric(
   206  		"etcd_server_client_requests_total",
   207  		`type="unary"`,
   208  		fmt.Sprintf(`client_api_version="%v"`, version.APIVersion),
   209  	)
   210  	if err != nil {
   211  		t.Fatal(err)
   212  	}
   213  	cv, err := strconv.ParseInt(cnt, 10, 32)
   214  	if err != nil {
   215  		t.Fatal(err)
   216  	}
   217  	if cv < 1 { // >1 when retried
   218  		t.Fatalf("expected at least 1, got %q", cnt)
   219  	}
   220  
   221  	// clients may give timeout errors since the members are stopped; take
   222  	// the clients so that terminating the cluster won't complain
   223  	clus.Client(1).Close()
   224  	clus.Client(2).Close()
   225  	clus.TakeClient(1)
   226  	clus.TakeClient(2)
   227  }
   228  
   229  func TestKVRange(t *testing.T) {
   230  	integration2.BeforeTest(t)
   231  
   232  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   233  	defer clus.Terminate(t)
   234  
   235  	kv := clus.RandClient()
   236  	ctx := context.TODO()
   237  
   238  	keySet := []string{"a", "b", "c", "c", "c", "foo", "foo/abc", "fop"}
   239  	for i, key := range keySet {
   240  		if _, err := kv.Put(ctx, key, ""); err != nil {
   241  			t.Fatalf("#%d: couldn't put %q (%v)", i, key, err)
   242  		}
   243  	}
   244  	resp, err := kv.Get(ctx, keySet[0])
   245  	if err != nil {
   246  		t.Fatalf("couldn't get key (%v)", err)
   247  	}
   248  	wheader := resp.Header
   249  
   250  	tests := []struct {
   251  		begin, end string
   252  		rev        int64
   253  		opts       []clientv3.OpOption
   254  
   255  		wantSet []*mvccpb.KeyValue
   256  	}{
   257  		// fetch entire keyspace using WithFromKey
   258  		{
   259  			"\x00", "",
   260  			0,
   261  			[]clientv3.OpOption{clientv3.WithFromKey(), clientv3.WithSort(clientv3.SortByKey, clientv3.SortAscend)},
   262  
   263  			[]*mvccpb.KeyValue{
   264  				{Key: []byte("a"), Value: nil, CreateRevision: 2, ModRevision: 2, Version: 1},
   265  				{Key: []byte("b"), Value: nil, CreateRevision: 3, ModRevision: 3, Version: 1},
   266  				{Key: []byte("c"), Value: nil, CreateRevision: 4, ModRevision: 6, Version: 3},
   267  				{Key: []byte("foo"), Value: nil, CreateRevision: 7, ModRevision: 7, Version: 1},
   268  				{Key: []byte("foo/abc"), Value: nil, CreateRevision: 8, ModRevision: 8, Version: 1},
   269  				{Key: []byte("fop"), Value: nil, CreateRevision: 9, ModRevision: 9, Version: 1},
   270  			},
   271  		},
   272  	}
   273  
   274  	for i, tt := range tests {
   275  		opts := []clientv3.OpOption{clientv3.WithRange(tt.end), clientv3.WithRev(tt.rev)}
   276  		opts = append(opts, tt.opts...)
   277  		resp, err := kv.Get(ctx, tt.begin, opts...)
   278  		if err != nil {
   279  			t.Fatalf("#%d: couldn't range (%v)", i, err)
   280  		}
   281  		if !reflect.DeepEqual(wheader, resp.Header) {
   282  			t.Fatalf("#%d: wheader expected %+v, got %+v", i, wheader, resp.Header)
   283  		}
   284  		if !reflect.DeepEqual(tt.wantSet, resp.Kvs) {
   285  			t.Fatalf("#%d: resp.Kvs expected %+v, got %+v", i, tt.wantSet, resp.Kvs)
   286  		}
   287  	}
   288  }
   289  
   290  func TestKVGetErrConnClosed(t *testing.T) {
   291  	integration2.BeforeTest(t)
   292  
   293  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   294  	defer clus.Terminate(t)
   295  
   296  	cli := clus.Client(0)
   297  
   298  	donec := make(chan struct{})
   299  	if err := cli.Close(); err != nil {
   300  		t.Fatal(err)
   301  	}
   302  	clus.TakeClient(0)
   303  
   304  	go func() {
   305  		defer close(donec)
   306  		_, err := cli.Get(context.TODO(), "foo")
   307  		if !clientv3.IsConnCanceled(err) {
   308  			t.Errorf("expected %v, got %v", context.Canceled, err)
   309  		}
   310  	}()
   311  
   312  	select {
   313  	case <-time.After(integration2.RequestWaitTimeout):
   314  		t.Fatal("kv.Get took too long")
   315  	case <-donec:
   316  	}
   317  }
   318  
   319  func TestKVNewAfterClose(t *testing.T) {
   320  	integration2.BeforeTest(t)
   321  
   322  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   323  	defer clus.Terminate(t)
   324  
   325  	cli := clus.Client(0)
   326  	clus.TakeClient(0)
   327  	if err := cli.Close(); err != nil {
   328  		t.Fatal(err)
   329  	}
   330  
   331  	donec := make(chan struct{})
   332  	go func() {
   333  		_, err := cli.Get(context.TODO(), "foo")
   334  		if !clientv3.IsConnCanceled(err) {
   335  			t.Errorf("expected %v, got %v", context.Canceled, err)
   336  		}
   337  		close(donec)
   338  	}()
   339  	select {
   340  	case <-time.After(integration2.RequestWaitTimeout):
   341  		t.Fatal("kv.Get took too long")
   342  	case <-donec:
   343  	}
   344  }
   345  
   346  func TestKVDeleteRange(t *testing.T) {
   347  	integration2.BeforeTest(t)
   348  
   349  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   350  	defer clus.Terminate(t)
   351  
   352  	kv := clus.RandClient()
   353  	ctx := context.TODO()
   354  
   355  	tests := []struct {
   356  		key   string
   357  		opts  []clientv3.OpOption
   358  		wkeys []string
   359  	}{
   360  		// *
   361  		{
   362  			key:   "\x00",
   363  			opts:  []clientv3.OpOption{clientv3.WithFromKey()},
   364  			wkeys: nil,
   365  		},
   366  	}
   367  
   368  	for i, tt := range tests {
   369  		keySet := []string{"a", "b", "c", "c/abc", "d"}
   370  		for j, key := range keySet {
   371  			if _, err := kv.Put(ctx, key, ""); err != nil {
   372  				t.Fatalf("#%d: couldn't put %q (%v)", j, key, err)
   373  			}
   374  		}
   375  
   376  		_, err := kv.Delete(ctx, tt.key, tt.opts...)
   377  		if err != nil {
   378  			t.Fatalf("#%d: couldn't delete range (%v)", i, err)
   379  		}
   380  
   381  		resp, err := kv.Get(ctx, "a", clientv3.WithFromKey())
   382  		if err != nil {
   383  			t.Fatalf("#%d: couldn't get keys (%v)", i, err)
   384  		}
   385  		var keys []string
   386  		for _, kv := range resp.Kvs {
   387  			keys = append(keys, string(kv.Key))
   388  		}
   389  		if !reflect.DeepEqual(tt.wkeys, keys) {
   390  			t.Errorf("#%d: resp.Kvs got %v, expected %v", i, keys, tt.wkeys)
   391  		}
   392  	}
   393  }
   394  
   395  func TestKVCompactError(t *testing.T) {
   396  	integration2.BeforeTest(t)
   397  
   398  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   399  	defer clus.Terminate(t)
   400  
   401  	kv := clus.RandClient()
   402  	ctx := context.TODO()
   403  
   404  	for i := 0; i < 5; i++ {
   405  		if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
   406  			t.Fatalf("couldn't put 'foo' (%v)", err)
   407  		}
   408  	}
   409  	_, err := kv.Compact(ctx, 6)
   410  	if err != nil {
   411  		t.Fatalf("couldn't compact 6 (%v)", err)
   412  	}
   413  
   414  	_, err = kv.Compact(ctx, 6)
   415  	if err != rpctypes.ErrCompacted {
   416  		t.Fatalf("expected %v, got %v", rpctypes.ErrCompacted, err)
   417  	}
   418  
   419  	_, err = kv.Compact(ctx, 100)
   420  	if err != rpctypes.ErrFutureRev {
   421  		t.Fatalf("expected %v, got %v", rpctypes.ErrFutureRev, err)
   422  	}
   423  }
   424  
   425  func TestKVCompact(t *testing.T) {
   426  	integration2.BeforeTest(t)
   427  
   428  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   429  	defer clus.Terminate(t)
   430  
   431  	kv := clus.RandClient()
   432  	ctx := context.TODO()
   433  
   434  	for i := 0; i < 10; i++ {
   435  		if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
   436  			t.Fatalf("couldn't put 'foo' (%v)", err)
   437  		}
   438  	}
   439  
   440  	_, err := kv.Compact(ctx, 7)
   441  	if err != nil {
   442  		t.Fatalf("couldn't compact kv space (%v)", err)
   443  	}
   444  	_, err = kv.Compact(ctx, 7)
   445  	if err == nil || err != rpctypes.ErrCompacted {
   446  		t.Fatalf("error got %v, want %v", err, rpctypes.ErrCompacted)
   447  	}
   448  
   449  	wcli := clus.RandClient()
   450  	// new watcher could precede receiving the compaction without quorum first
   451  	wcli.Get(ctx, "quorum-get")
   452  
   453  	wchan := wcli.Watch(ctx, "foo", clientv3.WithRev(3))
   454  
   455  	wr := <-wchan
   456  	if wr.CompactRevision != 7 {
   457  		t.Fatalf("wchan CompactRevision got %v, want 7", wr.CompactRevision)
   458  	}
   459  	if !wr.Canceled {
   460  		t.Fatalf("expected canceled watcher on compacted revision, got %v", wr.Canceled)
   461  	}
   462  	if wr.Err() != rpctypes.ErrCompacted {
   463  		t.Fatalf("watch response error expected %v, got %v", rpctypes.ErrCompacted, wr.Err())
   464  	}
   465  	wr, ok := <-wchan
   466  	if ok {
   467  		t.Fatalf("wchan got %v, expected closed", wr)
   468  	}
   469  	if wr.Err() != nil {
   470  		t.Fatalf("watch response error expected nil, got %v", wr.Err())
   471  	}
   472  
   473  	_, err = kv.Compact(ctx, 1000)
   474  	if err == nil || err != rpctypes.ErrFutureRev {
   475  		t.Fatalf("error got %v, want %v", err, rpctypes.ErrFutureRev)
   476  	}
   477  }
   478  
   479  // TestKVGetRetry ensures get will retry on disconnect.
   480  func TestKVGetRetry(t *testing.T) {
   481  	integration2.BeforeTest(t)
   482  
   483  	clusterSize := 3
   484  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: clusterSize, UseBridge: true})
   485  	defer clus.Terminate(t)
   486  
   487  	// because killing leader and following election
   488  	// could give no other endpoints for client reconnection
   489  	fIdx := (clus.WaitLeader(t) + 1) % clusterSize
   490  
   491  	kv := clus.Client(fIdx)
   492  	ctx := context.TODO()
   493  
   494  	if _, err := kv.Put(ctx, "foo", "bar"); err != nil {
   495  		t.Fatal(err)
   496  	}
   497  
   498  	clus.Members[fIdx].Stop(t)
   499  
   500  	donec := make(chan struct{}, 1)
   501  	go func() {
   502  		// Get will fail, but reconnect will trigger
   503  		gresp, gerr := kv.Get(ctx, "foo")
   504  		if gerr != nil {
   505  			t.Error(gerr)
   506  		}
   507  		wkvs := []*mvccpb.KeyValue{
   508  			{
   509  				Key:            []byte("foo"),
   510  				Value:          []byte("bar"),
   511  				CreateRevision: 2,
   512  				ModRevision:    2,
   513  				Version:        1,
   514  			},
   515  		}
   516  		if !reflect.DeepEqual(gresp.Kvs, wkvs) {
   517  			t.Errorf("bad get: got %v, want %v", gresp.Kvs, wkvs)
   518  		}
   519  		donec <- struct{}{}
   520  	}()
   521  
   522  	time.Sleep(100 * time.Millisecond)
   523  	clus.Members[fIdx].Restart(t)
   524  	clus.Members[fIdx].WaitOK(t)
   525  
   526  	select {
   527  	case <-time.After(20 * time.Second):
   528  		t.Fatalf("timed out waiting for get")
   529  	case <-donec:
   530  	}
   531  }
   532  
   533  // TestKVPutFailGetRetry ensures a get will retry following a failed put.
   534  func TestKVPutFailGetRetry(t *testing.T) {
   535  	integration2.BeforeTest(t)
   536  
   537  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
   538  	defer clus.Terminate(t)
   539  
   540  	kv := clus.Client(0)
   541  	clus.Members[0].Stop(t)
   542  
   543  	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
   544  	defer cancel()
   545  	_, err := kv.Put(ctx, "foo", "bar")
   546  	if err == nil {
   547  		t.Fatalf("got success on disconnected put, wanted error")
   548  	}
   549  
   550  	donec := make(chan struct{}, 1)
   551  	go func() {
   552  		// Get will fail, but reconnect will trigger
   553  		gresp, gerr := kv.Get(context.TODO(), "foo")
   554  		if gerr != nil {
   555  			t.Error(gerr)
   556  		}
   557  		if len(gresp.Kvs) != 0 {
   558  			t.Errorf("bad get kvs: got %+v, want empty", gresp.Kvs)
   559  		}
   560  		donec <- struct{}{}
   561  	}()
   562  
   563  	time.Sleep(100 * time.Millisecond)
   564  	clus.Members[0].Restart(t)
   565  
   566  	select {
   567  	case <-time.After(20 * time.Second):
   568  		t.Fatalf("timed out waiting for get")
   569  	case <-donec:
   570  	}
   571  }
   572  
   573  // TestKVGetCancel tests that a context cancel on a Get terminates as expected.
   574  func TestKVGetCancel(t *testing.T) {
   575  	integration2.BeforeTest(t)
   576  
   577  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   578  	defer clus.Terminate(t)
   579  
   580  	oldconn := clus.Client(0).ActiveConnection()
   581  	kv := clus.Client(0)
   582  
   583  	ctx, cancel := context.WithCancel(context.TODO())
   584  	cancel()
   585  
   586  	resp, err := kv.Get(ctx, "abc")
   587  	if err == nil {
   588  		t.Fatalf("cancel on get response %v, expected context error", resp)
   589  	}
   590  	newconn := clus.Client(0).ActiveConnection()
   591  	if oldconn != newconn {
   592  		t.Fatalf("cancel on get broke client connection")
   593  	}
   594  }
   595  
   596  // TestKVGetStoppedServerAndClose ensures closing after a failed Get works.
   597  func TestKVGetStoppedServerAndClose(t *testing.T) {
   598  	integration2.BeforeTest(t)
   599  
   600  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   601  	defer clus.Terminate(t)
   602  
   603  	cli := clus.Client(0)
   604  	clus.Members[0].Stop(t)
   605  	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
   606  	// this Get fails and triggers an asynchronous connection retry
   607  	_, err := cli.Get(ctx, "abc")
   608  	cancel()
   609  	if err != nil && !(IsCanceled(err) || IsClientTimeout(err)) {
   610  		t.Fatal(err)
   611  	}
   612  }
   613  
   614  // TestKVPutStoppedServerAndClose ensures closing after a failed Put works.
   615  func TestKVPutStoppedServerAndClose(t *testing.T) {
   616  	integration2.BeforeTest(t)
   617  
   618  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   619  	defer clus.Terminate(t)
   620  
   621  	cli := clus.Client(0)
   622  	clus.Members[0].Stop(t)
   623  
   624  	ctx, cancel := context.WithTimeout(context.TODO(), time.Second)
   625  	// get retries on all errors.
   626  	// so here we use it to eat the potential broken pipe error for the next put.
   627  	// grpc client might see a broken pipe error when we issue the get request before
   628  	// grpc finds out the original connection is down due to the member shutdown.
   629  	_, err := cli.Get(ctx, "abc")
   630  	cancel()
   631  	if err != nil && !(IsCanceled(err) || IsClientTimeout(err)) {
   632  		t.Fatal(err)
   633  	}
   634  
   635  	ctx, cancel = context.WithTimeout(context.TODO(), time.Second)
   636  	// this Put fails and triggers an asynchronous connection retry
   637  	_, err = cli.Put(ctx, "abc", "123")
   638  	cancel()
   639  	if err != nil && !(IsCanceled(err) || IsClientTimeout(err) || IsUnavailable(err)) {
   640  		t.Fatal(err)
   641  	}
   642  }
   643  
   644  // TestKVPutAtMostOnce ensures that a Put will only occur at most once
   645  // in the presence of network errors.
   646  func TestKVPutAtMostOnce(t *testing.T) {
   647  	integration2.BeforeTest(t)
   648  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
   649  	defer clus.Terminate(t)
   650  
   651  	if _, err := clus.Client(0).Put(context.TODO(), "k", "1"); err != nil {
   652  		t.Fatal(err)
   653  	}
   654  
   655  	for i := 0; i < 10; i++ {
   656  		clus.Members[0].Bridge().DropConnections()
   657  		donec := make(chan struct{})
   658  		go func() {
   659  			defer close(donec)
   660  			for i := 0; i < 10; i++ {
   661  				clus.Members[0].Bridge().DropConnections()
   662  				time.Sleep(5 * time.Millisecond)
   663  			}
   664  		}()
   665  		_, err := clus.Client(0).Put(context.TODO(), "k", "v")
   666  		<-donec
   667  		if err != nil {
   668  			break
   669  		}
   670  	}
   671  
   672  	resp, err := clus.Client(0).Get(context.TODO(), "k")
   673  	if err != nil {
   674  		t.Fatal(err)
   675  	}
   676  	if resp.Kvs[0].Version > 11 {
   677  		t.Fatalf("expected version <= 10, got %+v", resp.Kvs[0])
   678  	}
   679  }
   680  
   681  // TestKVLargeRequests tests various client/server side request limits.
   682  func TestKVLargeRequests(t *testing.T) {
   683  	integration2.BeforeTest(t)
   684  	tests := []struct {
   685  		// make sure that "MaxCallSendMsgSize" < server-side default send/recv limit
   686  		maxRequestBytesServer  uint
   687  		maxCallSendBytesClient int
   688  		maxCallRecvBytesClient int
   689  
   690  		valueSize   int
   691  		expectError error
   692  	}{
   693  		{
   694  			maxRequestBytesServer:  256,
   695  			maxCallSendBytesClient: 0,
   696  			maxCallRecvBytesClient: 0,
   697  			valueSize:              1024,
   698  			expectError:            rpctypes.ErrRequestTooLarge,
   699  		},
   700  
   701  		// without proper client-side receive size limit
   702  		// "code = ResourceExhausted desc = grpc: received message larger than max (5242929 vs. 4194304)"
   703  		{
   704  
   705  			maxRequestBytesServer:  7*1024*1024 + 512*1024,
   706  			maxCallSendBytesClient: 7 * 1024 * 1024,
   707  			maxCallRecvBytesClient: 0,
   708  			valueSize:              5 * 1024 * 1024,
   709  			expectError:            nil,
   710  		},
   711  
   712  		{
   713  			maxRequestBytesServer:  10 * 1024 * 1024,
   714  			maxCallSendBytesClient: 100 * 1024 * 1024,
   715  			maxCallRecvBytesClient: 0,
   716  			valueSize:              10 * 1024 * 1024,
   717  			expectError:            rpctypes.ErrRequestTooLarge,
   718  		},
   719  		{
   720  			maxRequestBytesServer:  10 * 1024 * 1024,
   721  			maxCallSendBytesClient: 10 * 1024 * 1024,
   722  			maxCallRecvBytesClient: 0,
   723  			valueSize:              10 * 1024 * 1024,
   724  			expectError:            status.Errorf(codes.ResourceExhausted, "trying to send message larger than max "),
   725  		},
   726  		{
   727  			maxRequestBytesServer:  10 * 1024 * 1024,
   728  			maxCallSendBytesClient: 100 * 1024 * 1024,
   729  			maxCallRecvBytesClient: 0,
   730  			valueSize:              10*1024*1024 + 5,
   731  			expectError:            rpctypes.ErrRequestTooLarge,
   732  		},
   733  		{
   734  			maxRequestBytesServer:  10 * 1024 * 1024,
   735  			maxCallSendBytesClient: 10 * 1024 * 1024,
   736  			maxCallRecvBytesClient: 0,
   737  			valueSize:              10*1024*1024 + 5,
   738  			expectError:            status.Errorf(codes.ResourceExhausted, "trying to send message larger than max "),
   739  		},
   740  	}
   741  	for i, test := range tests {
   742  		clus := integration2.NewCluster(t,
   743  			&integration2.ClusterConfig{
   744  				Size:                     1,
   745  				MaxRequestBytes:          test.maxRequestBytesServer,
   746  				ClientMaxCallSendMsgSize: test.maxCallSendBytesClient,
   747  				ClientMaxCallRecvMsgSize: test.maxCallRecvBytesClient,
   748  			},
   749  		)
   750  		cli := clus.Client(0)
   751  		_, err := cli.Put(context.TODO(), "foo", strings.Repeat("a", test.valueSize))
   752  
   753  		if _, ok := err.(rpctypes.EtcdError); ok {
   754  			if err != test.expectError {
   755  				t.Errorf("#%d: expected %v, got %v", i, test.expectError, err)
   756  			}
   757  		} else if err != nil && !strings.HasPrefix(err.Error(), test.expectError.Error()) {
   758  			t.Errorf("#%d: expected error starting with '%s', got '%s'", i, test.expectError.Error(), err.Error())
   759  		}
   760  
   761  		// put request went through, now expects large response back
   762  		if err == nil {
   763  			_, err = cli.Get(context.TODO(), "foo")
   764  			if err != nil {
   765  				t.Errorf("#%d: get expected no error, got %v", i, err)
   766  			}
   767  		}
   768  
   769  		clus.Terminate(t)
   770  	}
   771  }
   772  
   773  // TestKVForLearner ensures learner member only accepts serializable read request.
   774  func TestKVForLearner(t *testing.T) {
   775  	integration2.BeforeTest(t)
   776  
   777  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
   778  	defer clus.Terminate(t)
   779  
   780  	// we have to add and launch learner member after initial cluster was created, because
   781  	// bootstrapping a cluster with learner member is not supported.
   782  	clus.AddAndLaunchLearnerMember(t)
   783  
   784  	learners, err := clus.GetLearnerMembers()
   785  	if err != nil {
   786  		t.Fatalf("failed to get the learner members in cluster: %v", err)
   787  	}
   788  	if len(learners) != 1 {
   789  		t.Fatalf("added 1 learner to cluster, got %d", len(learners))
   790  	}
   791  
   792  	if len(clus.Members) != 4 {
   793  		t.Fatalf("expecting 4 members in cluster after adding the learner member, got %d", len(clus.Members))
   794  	}
   795  	// note:
   796  	// 1. clus.Members[3] is the newly added learner member, which was appended to clus.Members
   797  	// 2. we are using member's grpcAddr instead of clientURLs as the endpoint for clientv3.Config,
   798  	// because the implementation of integration test has diverged from embed/etcd.go.
   799  	learnerEp := clus.Members[3].GRPCURL()
   800  	cfg := clientv3.Config{
   801  		Endpoints:   []string{learnerEp},
   802  		DialTimeout: 5 * time.Second,
   803  		DialOptions: []grpc.DialOption{grpc.WithBlock()},
   804  	}
   805  	// this client only has endpoint of the learner member
   806  	cli, err := integration2.NewClient(t, cfg)
   807  	if err != nil {
   808  		t.Fatalf("failed to create clientv3: %v", err)
   809  	}
   810  	defer cli.Close()
   811  
   812  	// wait until learner member is ready
   813  	<-clus.Members[3].ReadyNotify()
   814  
   815  	tests := []struct {
   816  		op   clientv3.Op
   817  		wErr bool
   818  	}{
   819  		{
   820  			op:   clientv3.OpGet("foo", clientv3.WithSerializable()),
   821  			wErr: false,
   822  		},
   823  		{
   824  			op:   clientv3.OpGet("foo"),
   825  			wErr: true,
   826  		},
   827  		{
   828  			op:   clientv3.OpPut("foo", "bar"),
   829  			wErr: true,
   830  		},
   831  		{
   832  			op:   clientv3.OpDelete("foo"),
   833  			wErr: true,
   834  		},
   835  		{
   836  			op:   clientv3.OpTxn([]clientv3.Cmp{clientv3.Compare(clientv3.CreateRevision("foo"), "=", 0)}, nil, nil),
   837  			wErr: true,
   838  		},
   839  	}
   840  
   841  	for idx, test := range tests {
   842  		_, err := cli.Do(context.TODO(), test.op)
   843  		if err != nil && !test.wErr {
   844  			t.Errorf("%d: expect no error, got %v", idx, err)
   845  		}
   846  		if err == nil && test.wErr {
   847  			t.Errorf("%d: expect error, got nil", idx)
   848  		}
   849  	}
   850  }
   851  
   852  // TestBalancerSupportLearner verifies that balancer's retry and failover mechanism supports cluster with learner member
   853  func TestBalancerSupportLearner(t *testing.T) {
   854  	integration2.BeforeTest(t)
   855  
   856  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, DisableStrictReconfigCheck: true})
   857  	defer clus.Terminate(t)
   858  
   859  	// we have to add and launch learner member after initial cluster was created, because
   860  	// bootstrapping a cluster with learner member is not supported.
   861  	clus.AddAndLaunchLearnerMember(t)
   862  
   863  	learners, err := clus.GetLearnerMembers()
   864  	if err != nil {
   865  		t.Fatalf("failed to get the learner members in cluster: %v", err)
   866  	}
   867  	if len(learners) != 1 {
   868  		t.Fatalf("added 1 learner to cluster, got %d", len(learners))
   869  	}
   870  
   871  	// clus.Members[3] is the newly added learner member, which was appended to clus.Members
   872  	learnerEp := clus.Members[3].GRPCURL()
   873  	cfg := clientv3.Config{
   874  		Endpoints:   []string{learnerEp},
   875  		DialTimeout: 5 * time.Second,
   876  		DialOptions: []grpc.DialOption{grpc.WithBlock()},
   877  	}
   878  	cli, err := integration2.NewClient(t, cfg)
   879  	if err != nil {
   880  		t.Fatalf("failed to create clientv3: %v", err)
   881  	}
   882  	defer cli.Close()
   883  
   884  	// wait until learner member is ready
   885  	<-clus.Members[3].ReadyNotify()
   886  
   887  	if _, err := cli.Get(context.Background(), "foo"); err == nil {
   888  		t.Fatalf("expect Get request to learner to fail, got no error")
   889  	}
   890  	t.Logf("Expected: Read from learner error: %v", err)
   891  
   892  	eps := []string{learnerEp, clus.Members[0].GRPCURL()}
   893  	cli.SetEndpoints(eps...)
   894  	if _, err := cli.Get(context.Background(), "foo"); err != nil {
   895  		t.Errorf("expect no error (balancer should retry when request to learner fails), got error: %v", err)
   896  	}
   897  }