github.com/lfch/etcd-io/tests/v3@v3.0.0-20221004140520-eac99acd3e9d/integration/clientv3/watch_test.go (about)

     1  // Copyright 2016 The etcd Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package clientv3test
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"math/rand"
    21  	"reflect"
    22  	"sort"
    23  	"strconv"
    24  	"testing"
    25  	"time"
    26  
    27  	mvccpb "github.com/lfch/etcd-io/api/v3/mvccpb"
    28  	"github.com/lfch/etcd-io/api/v3/v3rpc/rpctypes"
    29  	"github.com/lfch/etcd-io/api/v3/version"
    30  	clientv3 "github.com/lfch/etcd-io/client/v3"
    31  	"github.com/lfch/etcd-io/server/v3/etcdserver/api/v3rpc"
    32  	integration2 "github.com/lfch/etcd-io/tests/v3/framework/integration"
    33  	"google.golang.org/grpc/metadata"
    34  )
    35  
    36  type watcherTest func(*testing.T, *watchctx)
    37  
    38  type watchctx struct {
    39  	clus          *integration2.Cluster
    40  	w             clientv3.Watcher
    41  	kv            clientv3.KV
    42  	wclientMember int
    43  	kvMember      int
    44  	ch            clientv3.WatchChan
    45  }
    46  
    47  func runWatchTest(t *testing.T, f watcherTest) {
    48  	integration2.BeforeTest(t)
    49  
    50  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
    51  	defer clus.Terminate(t)
    52  
    53  	wclientMember := rand.Intn(3)
    54  	w := clus.Client(wclientMember).Watcher
    55  	// select a different client for KV operations so puts succeed if
    56  	// a test knocks out the watcher client.
    57  	kvMember := rand.Intn(3)
    58  	for kvMember == wclientMember {
    59  		kvMember = rand.Intn(3)
    60  	}
    61  	kv := clus.Client(kvMember).KV
    62  
    63  	wctx := &watchctx{clus, w, kv, wclientMember, kvMember, nil}
    64  	f(t, wctx)
    65  }
    66  
    67  // TestWatchMultiWatcher modifies multiple keys and observes the changes.
    68  func TestWatchMultiWatcher(t *testing.T) {
    69  	runWatchTest(t, testWatchMultiWatcher)
    70  }
    71  
    72  func testWatchMultiWatcher(t *testing.T, wctx *watchctx) {
    73  	numKeyUpdates := 4
    74  	keys := []string{"foo", "bar", "baz"}
    75  
    76  	donec := make(chan struct{})
    77  	// wait for watcher shutdown
    78  	defer func() {
    79  		for i := 0; i < len(keys)+1; i++ {
    80  			<-donec
    81  		}
    82  	}()
    83  	readyc := make(chan struct{})
    84  	for _, k := range keys {
    85  		// key watcher
    86  		go func(key string) {
    87  			ch := wctx.w.Watch(context.TODO(), key)
    88  			if ch == nil {
    89  				t.Errorf("expected watcher channel, got nil")
    90  			}
    91  			readyc <- struct{}{}
    92  			for i := 0; i < numKeyUpdates; i++ {
    93  				resp, ok := <-ch
    94  				if !ok {
    95  					t.Errorf("watcher unexpectedly closed")
    96  				}
    97  				v := fmt.Sprintf("%s-%d", key, i)
    98  				gotv := string(resp.Events[0].Kv.Value)
    99  				if gotv != v {
   100  					t.Errorf("#%d: got %s, wanted %s", i, gotv, v)
   101  				}
   102  			}
   103  			donec <- struct{}{}
   104  		}(k)
   105  	}
   106  	// prefix watcher on "b" (bar and baz)
   107  	go func() {
   108  		prefixc := wctx.w.Watch(context.TODO(), "b", clientv3.WithPrefix())
   109  		if prefixc == nil {
   110  			t.Errorf("expected watcher channel, got nil")
   111  		}
   112  		readyc <- struct{}{}
   113  		var evs []*clientv3.Event
   114  		for i := 0; i < numKeyUpdates*2; i++ {
   115  			resp, ok := <-prefixc
   116  			if !ok {
   117  				t.Errorf("watcher unexpectedly closed")
   118  			}
   119  			evs = append(evs, resp.Events...)
   120  		}
   121  
   122  		// check response
   123  		var expected []string
   124  		bkeys := []string{"bar", "baz"}
   125  		for _, k := range bkeys {
   126  			for i := 0; i < numKeyUpdates; i++ {
   127  				expected = append(expected, fmt.Sprintf("%s-%d", k, i))
   128  			}
   129  		}
   130  		var got []string
   131  		for _, ev := range evs {
   132  			got = append(got, string(ev.Kv.Value))
   133  		}
   134  		sort.Strings(got)
   135  		if !reflect.DeepEqual(expected, got) {
   136  			t.Errorf("got %v, expected %v", got, expected)
   137  		}
   138  
   139  		// ensure no extra data
   140  		select {
   141  		case resp, ok := <-prefixc:
   142  			if !ok {
   143  				t.Errorf("watcher unexpectedly closed")
   144  			}
   145  			t.Errorf("unexpected event %+v", resp)
   146  		case <-time.After(time.Second):
   147  		}
   148  		donec <- struct{}{}
   149  	}()
   150  
   151  	// wait for watcher bring up
   152  	for i := 0; i < len(keys)+1; i++ {
   153  		<-readyc
   154  	}
   155  	// generate events
   156  	ctx := context.TODO()
   157  	for i := 0; i < numKeyUpdates; i++ {
   158  		for _, k := range keys {
   159  			v := fmt.Sprintf("%s-%d", k, i)
   160  			if _, err := wctx.kv.Put(ctx, k, v); err != nil {
   161  				t.Fatal(err)
   162  			}
   163  		}
   164  	}
   165  }
   166  
   167  // TestWatchRange tests watcher creates ranges
   168  func TestWatchRange(t *testing.T) {
   169  	runWatchTest(t, testWatchRange)
   170  }
   171  
   172  func testWatchRange(t *testing.T, wctx *watchctx) {
   173  	if wctx.ch = wctx.w.Watch(context.TODO(), "a", clientv3.WithRange("c")); wctx.ch == nil {
   174  		t.Fatalf("expected non-nil channel")
   175  	}
   176  	putAndWatch(t, wctx, "a", "a")
   177  	putAndWatch(t, wctx, "b", "b")
   178  	putAndWatch(t, wctx, "bar", "bar")
   179  }
   180  
   181  // TestWatchReconnRequest tests the send failure path when requesting a watcher.
   182  func TestWatchReconnRequest(t *testing.T) {
   183  	runWatchTest(t, testWatchReconnRequest)
   184  }
   185  
   186  func testWatchReconnRequest(t *testing.T, wctx *watchctx) {
   187  	donec, stopc := make(chan struct{}), make(chan struct{}, 1)
   188  	go func() {
   189  		timer := time.After(2 * time.Second)
   190  		defer close(donec)
   191  		// take down watcher connection
   192  		for {
   193  			wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
   194  			select {
   195  			case <-timer:
   196  				// spinning on close may live lock reconnection
   197  				return
   198  			case <-stopc:
   199  				return
   200  			default:
   201  			}
   202  		}
   203  	}()
   204  	// should reconnect when requesting watch
   205  	if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
   206  		t.Fatalf("expected non-nil channel")
   207  	}
   208  
   209  	// wait for disconnections to stop
   210  	stopc <- struct{}{}
   211  	<-donec
   212  
   213  	// spinning on dropping connections may trigger a leader election
   214  	// due to resource starvation; l-read to ensure the cluster is stable
   215  	ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
   216  	if _, err := wctx.kv.Get(ctx, "_"); err != nil {
   217  		t.Fatal(err)
   218  	}
   219  	cancel()
   220  
   221  	// ensure watcher works
   222  	putAndWatch(t, wctx, "a", "a")
   223  }
   224  
   225  // TestWatchReconnInit tests watcher resumes correctly if connection lost
   226  // before any data was sent.
   227  func TestWatchReconnInit(t *testing.T) {
   228  	runWatchTest(t, testWatchReconnInit)
   229  }
   230  
   231  func testWatchReconnInit(t *testing.T, wctx *watchctx) {
   232  	if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
   233  		t.Fatalf("expected non-nil channel")
   234  	}
   235  	wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
   236  	// watcher should recover
   237  	putAndWatch(t, wctx, "a", "a")
   238  }
   239  
   240  // TestWatchReconnRunning tests watcher resumes correctly if connection lost
   241  // after data was sent.
   242  func TestWatchReconnRunning(t *testing.T) {
   243  	runWatchTest(t, testWatchReconnRunning)
   244  }
   245  
   246  func testWatchReconnRunning(t *testing.T, wctx *watchctx) {
   247  	if wctx.ch = wctx.w.Watch(context.TODO(), "a"); wctx.ch == nil {
   248  		t.Fatalf("expected non-nil channel")
   249  	}
   250  	putAndWatch(t, wctx, "a", "a")
   251  	// take down watcher connection
   252  	wctx.clus.Members[wctx.wclientMember].Bridge().DropConnections()
   253  	// watcher should recover
   254  	putAndWatch(t, wctx, "a", "b")
   255  }
   256  
   257  // TestWatchCancelImmediate ensures a closed channel is returned
   258  // if the context is cancelled.
   259  func TestWatchCancelImmediate(t *testing.T) {
   260  	runWatchTest(t, testWatchCancelImmediate)
   261  }
   262  
   263  func testWatchCancelImmediate(t *testing.T, wctx *watchctx) {
   264  	ctx, cancel := context.WithCancel(context.Background())
   265  	cancel()
   266  	wch := wctx.w.Watch(ctx, "a")
   267  	select {
   268  	case wresp, ok := <-wch:
   269  		if ok {
   270  			t.Fatalf("read wch got %v; expected closed channel", wresp)
   271  		}
   272  	default:
   273  		t.Fatalf("closed watcher channel should not block")
   274  	}
   275  }
   276  
   277  // TestWatchCancelInit tests watcher closes correctly after no events.
   278  func TestWatchCancelInit(t *testing.T) {
   279  	runWatchTest(t, testWatchCancelInit)
   280  }
   281  
   282  func testWatchCancelInit(t *testing.T, wctx *watchctx) {
   283  	ctx, cancel := context.WithCancel(context.Background())
   284  	if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
   285  		t.Fatalf("expected non-nil watcher channel")
   286  	}
   287  	cancel()
   288  	select {
   289  	case <-time.After(time.Second):
   290  		t.Fatalf("took too long to cancel")
   291  	case _, ok := <-wctx.ch:
   292  		if ok {
   293  			t.Fatalf("expected watcher channel to close")
   294  		}
   295  	}
   296  }
   297  
   298  // TestWatchCancelRunning tests watcher closes correctly after events.
   299  func TestWatchCancelRunning(t *testing.T) {
   300  	runWatchTest(t, testWatchCancelRunning)
   301  }
   302  
   303  func testWatchCancelRunning(t *testing.T, wctx *watchctx) {
   304  	ctx, cancel := context.WithCancel(context.Background())
   305  	if wctx.ch = wctx.w.Watch(ctx, "a"); wctx.ch == nil {
   306  		t.Fatalf("expected non-nil watcher channel")
   307  	}
   308  	if _, err := wctx.kv.Put(ctx, "a", "a"); err != nil {
   309  		t.Fatal(err)
   310  	}
   311  	cancel()
   312  	select {
   313  	case <-time.After(time.Second):
   314  		t.Fatalf("took too long to cancel")
   315  	case _, ok := <-wctx.ch:
   316  		if !ok {
   317  			// closed before getting put; OK
   318  			break
   319  		}
   320  		// got the PUT; should close next
   321  		select {
   322  		case <-time.After(time.Second):
   323  			t.Fatalf("took too long to close")
   324  		case v, ok2 := <-wctx.ch:
   325  			if ok2 {
   326  				t.Fatalf("expected watcher channel to close, got %v", v)
   327  			}
   328  		}
   329  	}
   330  }
   331  
   332  func putAndWatch(t *testing.T, wctx *watchctx, key, val string) {
   333  	if _, err := wctx.kv.Put(context.TODO(), key, val); err != nil {
   334  		t.Fatal(err)
   335  	}
   336  	select {
   337  	case <-time.After(5 * time.Second):
   338  		t.Fatalf("watch timed out")
   339  	case v, ok := <-wctx.ch:
   340  		if !ok {
   341  			t.Fatalf("unexpected watch close")
   342  		}
   343  		if string(v.Events[0].Kv.Value) != val {
   344  			t.Fatalf("bad value got %v, wanted %v", v.Events[0].Kv.Value, val)
   345  		}
   346  	}
   347  }
   348  
   349  func TestWatchResumeInitRev(t *testing.T) {
   350  	integration2.BeforeTest(t)
   351  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
   352  	defer clus.Terminate(t)
   353  
   354  	cli := clus.Client(0)
   355  	if _, err := cli.Put(context.TODO(), "b", "2"); err != nil {
   356  		t.Fatal(err)
   357  	}
   358  	if _, err := cli.Put(context.TODO(), "a", "3"); err != nil {
   359  		t.Fatal(err)
   360  	}
   361  	// if resume is broken, it'll pick up this key first instead of a=3
   362  	if _, err := cli.Put(context.TODO(), "a", "4"); err != nil {
   363  		t.Fatal(err)
   364  	}
   365  
   366  	wch := clus.Client(0).Watch(context.Background(), "a", clientv3.WithRev(1), clientv3.WithCreatedNotify())
   367  	if resp, ok := <-wch; !ok || resp.Header.Revision != 4 {
   368  		t.Fatalf("got (%v, %v), expected create notification rev=4", resp, ok)
   369  	}
   370  	// pause wch
   371  	clus.Members[0].Bridge().DropConnections()
   372  	clus.Members[0].Bridge().PauseConnections()
   373  
   374  	select {
   375  	case resp, ok := <-wch:
   376  		t.Skipf("wch should block, got (%+v, %v); drop not fast enough", resp, ok)
   377  	case <-time.After(100 * time.Millisecond):
   378  	}
   379  
   380  	// resume wch
   381  	clus.Members[0].Bridge().UnpauseConnections()
   382  
   383  	select {
   384  	case resp, ok := <-wch:
   385  		if !ok {
   386  			t.Fatal("unexpected watch close")
   387  		}
   388  		if len(resp.Events) == 0 {
   389  			t.Fatal("expected event on watch")
   390  		}
   391  		if string(resp.Events[0].Kv.Value) != "3" {
   392  			t.Fatalf("expected value=3, got event %+v", resp.Events[0])
   393  		}
   394  	case <-time.After(5 * time.Second):
   395  		t.Fatal("watch timed out")
   396  	}
   397  }
   398  
   399  // TestWatchResumeCompacted checks that the watcher gracefully closes in case
   400  // that it tries to resume to a revision that's been compacted out of the store.
   401  // Since the watcher's server restarts with stale data, the watcher will receive
   402  // either a compaction error or all keys by staying in sync before the compaction
   403  // is finally applied.
   404  func TestWatchResumeCompacted(t *testing.T) {
   405  	integration2.BeforeTest(t)
   406  
   407  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3, UseBridge: true})
   408  	defer clus.Terminate(t)
   409  
   410  	// create a waiting watcher at rev 1
   411  	w := clus.Client(0)
   412  	wch := w.Watch(context.Background(), "foo", clientv3.WithRev(1))
   413  	select {
   414  	case w := <-wch:
   415  		t.Errorf("unexpected message from wch %v", w)
   416  	default:
   417  	}
   418  	clus.Members[0].Stop(t)
   419  
   420  	clus.WaitLeader(t)
   421  
   422  	// put some data and compact away
   423  	numPuts := 5
   424  	kv := clus.Client(1)
   425  	for i := 0; i < numPuts; i++ {
   426  		if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
   427  			t.Fatal(err)
   428  		}
   429  	}
   430  	if _, err := kv.Compact(context.TODO(), 3); err != nil {
   431  		t.Fatal(err)
   432  	}
   433  
   434  	clus.Members[0].Restart(t)
   435  
   436  	// since watch's server isn't guaranteed to be synced with the cluster when
   437  	// the watch resumes, there is a window where the watch can stay synced and
   438  	// read off all events; if the watcher misses the window, it will go out of
   439  	// sync and get a compaction error.
   440  	wRev := int64(2)
   441  	for int(wRev) <= numPuts+1 {
   442  		var wresp clientv3.WatchResponse
   443  		var ok bool
   444  		select {
   445  		case wresp, ok = <-wch:
   446  			if !ok {
   447  				t.Fatalf("expected wresp, but got closed channel")
   448  			}
   449  		case <-time.After(5 * time.Second):
   450  			t.Fatalf("compacted watch timed out")
   451  		}
   452  		for _, ev := range wresp.Events {
   453  			if ev.Kv.ModRevision != wRev {
   454  				t.Fatalf("expected modRev %v, got %+v", wRev, ev)
   455  			}
   456  			wRev++
   457  		}
   458  		if wresp.Err() == nil {
   459  			continue
   460  		}
   461  		if wresp.Err() != rpctypes.ErrCompacted {
   462  			t.Fatalf("wresp.Err() expected %v, got %+v", rpctypes.ErrCompacted, wresp.Err())
   463  		}
   464  		break
   465  	}
   466  	if int(wRev) > numPuts+1 {
   467  		// got data faster than the compaction
   468  		return
   469  	}
   470  	// received compaction error; ensure the channel closes
   471  	select {
   472  	case wresp, ok := <-wch:
   473  		if ok {
   474  			t.Fatalf("expected closed channel, but got %v", wresp)
   475  		}
   476  	case <-time.After(5 * time.Second):
   477  		t.Fatalf("timed out waiting for channel close")
   478  	}
   479  }
   480  
   481  // TestWatchCompactRevision ensures the CompactRevision error is given on a
   482  // compaction event ahead of a watcher.
   483  func TestWatchCompactRevision(t *testing.T) {
   484  	integration2.BeforeTest(t)
   485  
   486  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   487  	defer clus.Terminate(t)
   488  
   489  	// set some keys
   490  	kv := clus.RandClient()
   491  	for i := 0; i < 5; i++ {
   492  		if _, err := kv.Put(context.TODO(), "foo", "bar"); err != nil {
   493  			t.Fatal(err)
   494  		}
   495  	}
   496  
   497  	w := clus.RandClient()
   498  
   499  	if _, err := kv.Compact(context.TODO(), 4); err != nil {
   500  		t.Fatal(err)
   501  	}
   502  	wch := w.Watch(context.Background(), "foo", clientv3.WithRev(2))
   503  
   504  	// get compacted error message
   505  	wresp, ok := <-wch
   506  	if !ok {
   507  		t.Fatalf("expected wresp, but got closed channel")
   508  	}
   509  	if wresp.Err() != rpctypes.ErrCompacted {
   510  		t.Fatalf("wresp.Err() expected %v, but got %v", rpctypes.ErrCompacted, wresp.Err())
   511  	}
   512  	if !wresp.Canceled {
   513  		t.Fatalf("wresp.Canceled expected true, got %+v", wresp)
   514  	}
   515  
   516  	// ensure the channel is closed
   517  	if wresp, ok = <-wch; ok {
   518  		t.Fatalf("expected closed channel, but got %v", wresp)
   519  	}
   520  }
   521  
   522  func TestWatchWithProgressNotify(t *testing.T)        { testWatchWithProgressNotify(t, true) }
   523  func TestWatchWithProgressNotifyNoEvent(t *testing.T) { testWatchWithProgressNotify(t, false) }
   524  
   525  func testWatchWithProgressNotify(t *testing.T, watchOnPut bool) {
   526  	integration2.BeforeTest(t)
   527  
   528  	// accelerate report interval so test terminates quickly
   529  	oldpi := v3rpc.GetProgressReportInterval()
   530  	// using atomics to avoid race warnings
   531  	v3rpc.SetProgressReportInterval(3 * time.Second)
   532  	pi := 3 * time.Second
   533  	defer func() { v3rpc.SetProgressReportInterval(oldpi) }()
   534  
   535  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   536  	defer clus.Terminate(t)
   537  
   538  	wc := clus.RandClient()
   539  
   540  	opts := []clientv3.OpOption{clientv3.WithProgressNotify()}
   541  	if watchOnPut {
   542  		opts = append(opts, clientv3.WithPrefix())
   543  	}
   544  	rch := wc.Watch(context.Background(), "foo", opts...)
   545  
   546  	select {
   547  	case resp := <-rch: // wait for notification
   548  		if len(resp.Events) != 0 {
   549  			t.Fatalf("resp.Events expected none, got %+v", resp.Events)
   550  		}
   551  	case <-time.After(2 * pi):
   552  		t.Fatalf("watch response expected in %v, but timed out", pi)
   553  	}
   554  
   555  	kvc := clus.RandClient()
   556  	if _, err := kvc.Put(context.TODO(), "foox", "bar"); err != nil {
   557  		t.Fatal(err)
   558  	}
   559  
   560  	select {
   561  	case resp := <-rch:
   562  		if resp.Header.Revision != 2 {
   563  			t.Fatalf("resp.Header.Revision expected 2, got %d", resp.Header.Revision)
   564  		}
   565  		if watchOnPut { // wait for put if watch on the put key
   566  			ev := []*clientv3.Event{{Type: clientv3.EventTypePut,
   567  				Kv: &mvccpb.KeyValue{Key: []byte("foox"), Value: []byte("bar"), CreateRevision: 2, ModRevision: 2, Version: 1}}}
   568  			if !reflect.DeepEqual(ev, resp.Events) {
   569  				t.Fatalf("expected %+v, got %+v", ev, resp.Events)
   570  			}
   571  		} else if len(resp.Events) != 0 { // wait for notification otherwise
   572  			t.Fatalf("expected no events, but got %+v", resp.Events)
   573  		}
   574  	case <-time.After(time.Duration(1.5 * float64(pi))):
   575  		t.Fatalf("watch response expected in %v, but timed out", pi)
   576  	}
   577  }
   578  
   579  func TestConfigurableWatchProgressNotifyInterval(t *testing.T) {
   580  	integration2.BeforeTest(t)
   581  
   582  	progressInterval := 200 * time.Millisecond
   583  	clus := integration2.NewCluster(t,
   584  		&integration2.ClusterConfig{
   585  			Size:                        3,
   586  			WatchProgressNotifyInterval: progressInterval,
   587  		})
   588  	defer clus.Terminate(t)
   589  
   590  	opts := []clientv3.OpOption{clientv3.WithProgressNotify()}
   591  	rch := clus.RandClient().Watch(context.Background(), "foo", opts...)
   592  
   593  	timeout := 1 * time.Second // we expect to receive watch progress notify in 2 * progressInterval,
   594  	// but for CPU-starved situation it may take longer. So we use 1 second here for timeout.
   595  	select {
   596  	case resp := <-rch: // waiting for a watch progress notify response
   597  		if !resp.IsProgressNotify() {
   598  			t.Fatalf("expected resp.IsProgressNotify() == true")
   599  		}
   600  	case <-time.After(timeout):
   601  		t.Fatalf("timed out waiting for watch progress notify response in %v", timeout)
   602  	}
   603  }
   604  
   605  func TestWatchRequestProgress(t *testing.T) {
   606  	if integration2.ThroughProxy {
   607  		t.Skipf("grpc-proxy does not support WatchProgress yet")
   608  	}
   609  	testCases := []struct {
   610  		name     string
   611  		watchers []string
   612  	}{
   613  		{"0-watcher", []string{}},
   614  		{"1-watcher", []string{"/"}},
   615  		{"2-watcher", []string{"/", "/"}},
   616  	}
   617  
   618  	for _, c := range testCases {
   619  		t.Run(c.name, func(t *testing.T) {
   620  			integration2.BeforeTest(t)
   621  
   622  			watchTimeout := 3 * time.Second
   623  
   624  			clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   625  			defer clus.Terminate(t)
   626  
   627  			wc := clus.RandClient()
   628  
   629  			var watchChans []clientv3.WatchChan
   630  
   631  			for _, prefix := range c.watchers {
   632  				watchChans = append(watchChans, wc.Watch(context.Background(), prefix, clientv3.WithPrefix()))
   633  			}
   634  
   635  			_, err := wc.Put(context.Background(), "/a", "1")
   636  			if err != nil {
   637  				t.Fatal(err)
   638  			}
   639  
   640  			for _, rch := range watchChans {
   641  				select {
   642  				case resp := <-rch: // wait for notification
   643  					if len(resp.Events) != 1 {
   644  						t.Fatalf("resp.Events expected 1, got %d", len(resp.Events))
   645  					}
   646  				case <-time.After(watchTimeout):
   647  					t.Fatalf("watch response expected in %v, but timed out", watchTimeout)
   648  				}
   649  			}
   650  
   651  			// put a value not being watched to increment revision
   652  			_, err = wc.Put(context.Background(), "x", "1")
   653  			if err != nil {
   654  				t.Fatal(err)
   655  			}
   656  
   657  			err = wc.RequestProgress(context.Background())
   658  			if err != nil {
   659  				t.Fatal(err)
   660  			}
   661  
   662  			// verify all watch channels receive a progress notify
   663  			for _, rch := range watchChans {
   664  				select {
   665  				case resp := <-rch:
   666  					if !resp.IsProgressNotify() {
   667  						t.Fatalf("expected resp.IsProgressNotify() == true")
   668  					}
   669  					if resp.Header.Revision != 3 {
   670  						t.Fatalf("resp.Header.Revision expected 3, got %d", resp.Header.Revision)
   671  					}
   672  				case <-time.After(watchTimeout):
   673  					t.Fatalf("progress response expected in %v, but timed out", watchTimeout)
   674  				}
   675  			}
   676  		})
   677  	}
   678  }
   679  
   680  func TestWatchEventType(t *testing.T) {
   681  	integration2.BeforeTest(t)
   682  
   683  	cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   684  	defer cluster.Terminate(t)
   685  
   686  	client := cluster.RandClient()
   687  	ctx := context.Background()
   688  	watchChan := client.Watch(ctx, "/", clientv3.WithPrefix())
   689  
   690  	if _, err := client.Put(ctx, "/toDelete", "foo"); err != nil {
   691  		t.Fatalf("Put failed: %v", err)
   692  	}
   693  	if _, err := client.Put(ctx, "/toDelete", "bar"); err != nil {
   694  		t.Fatalf("Put failed: %v", err)
   695  	}
   696  	if _, err := client.Delete(ctx, "/toDelete"); err != nil {
   697  		t.Fatalf("Delete failed: %v", err)
   698  	}
   699  	lcr, err := client.Lease.Grant(ctx, 1)
   700  	if err != nil {
   701  		t.Fatalf("lease create failed: %v", err)
   702  	}
   703  	if _, err := client.Put(ctx, "/toExpire", "foo", clientv3.WithLease(lcr.ID)); err != nil {
   704  		t.Fatalf("Put failed: %v", err)
   705  	}
   706  
   707  	tests := []struct {
   708  		et       mvccpb.Event_EventType
   709  		isCreate bool
   710  		isModify bool
   711  	}{{
   712  		et:       clientv3.EventTypePut,
   713  		isCreate: true,
   714  	}, {
   715  		et:       clientv3.EventTypePut,
   716  		isModify: true,
   717  	}, {
   718  		et: clientv3.EventTypeDelete,
   719  	}, {
   720  		et:       clientv3.EventTypePut,
   721  		isCreate: true,
   722  	}, {
   723  		et: clientv3.EventTypeDelete,
   724  	}}
   725  
   726  	var res []*clientv3.Event
   727  
   728  	for {
   729  		select {
   730  		case wres := <-watchChan:
   731  			res = append(res, wres.Events...)
   732  		case <-time.After(10 * time.Second):
   733  			t.Fatalf("Should receive %d events and then break out loop", len(tests))
   734  		}
   735  		if len(res) == len(tests) {
   736  			break
   737  		}
   738  	}
   739  
   740  	for i, tt := range tests {
   741  		ev := res[i]
   742  		if tt.et != ev.Type {
   743  			t.Errorf("#%d: event type want=%s, get=%s", i, tt.et, ev.Type)
   744  		}
   745  		if tt.isCreate && !ev.IsCreate() {
   746  			t.Errorf("#%d: event should be CreateEvent", i)
   747  		}
   748  		if tt.isModify && !ev.IsModify() {
   749  			t.Errorf("#%d: event should be ModifyEvent", i)
   750  		}
   751  	}
   752  }
   753  
   754  func TestWatchErrConnClosed(t *testing.T) {
   755  	integration2.BeforeTest(t)
   756  
   757  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   758  	defer clus.Terminate(t)
   759  
   760  	cli := clus.Client(0)
   761  
   762  	donec := make(chan struct{})
   763  	go func() {
   764  		defer close(donec)
   765  		ch := cli.Watch(context.TODO(), "foo")
   766  
   767  		if wr := <-ch; !IsCanceled(wr.Err()) {
   768  			t.Errorf("expected context canceled, got %v", wr.Err())
   769  		}
   770  	}()
   771  
   772  	if err := cli.ActiveConnection().Close(); err != nil {
   773  		t.Fatal(err)
   774  	}
   775  	clus.TakeClient(0)
   776  
   777  	select {
   778  	case <-time.After(integration2.RequestWaitTimeout):
   779  		t.Fatal("wc.Watch took too long")
   780  	case <-donec:
   781  	}
   782  }
   783  
   784  func TestWatchAfterClose(t *testing.T) {
   785  	integration2.BeforeTest(t)
   786  
   787  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   788  	defer clus.Terminate(t)
   789  
   790  	cli := clus.Client(0)
   791  	clus.TakeClient(0)
   792  	if err := cli.Close(); err != nil {
   793  		t.Fatal(err)
   794  	}
   795  
   796  	donec := make(chan struct{})
   797  	go func() {
   798  		cli.Watch(context.TODO(), "foo")
   799  		if err := cli.Close(); err != nil && err != context.Canceled {
   800  			t.Errorf("expected %v, got %v", context.Canceled, err)
   801  		}
   802  		close(donec)
   803  	}()
   804  	select {
   805  	case <-time.After(integration2.RequestWaitTimeout):
   806  		t.Fatal("wc.Watch took too long")
   807  	case <-donec:
   808  	}
   809  }
   810  
   811  // TestWatchWithRequireLeader checks the watch channel closes when no leader.
   812  func TestWatchWithRequireLeader(t *testing.T) {
   813  	integration2.BeforeTest(t)
   814  
   815  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   816  	defer clus.Terminate(t)
   817  
   818  	// Put a key for the non-require leader watch to read as an event.
   819  	// The watchers will be on member[0]; put key through member[0] to
   820  	// ensure that it receives the update so watching after killing quorum
   821  	// is guaranteed to have the key.
   822  	liveClient := clus.Client(0)
   823  	if _, err := liveClient.Put(context.TODO(), "foo", "bar"); err != nil {
   824  		t.Fatal(err)
   825  	}
   826  
   827  	clus.Members[1].Stop(t)
   828  	clus.Members[2].Stop(t)
   829  	clus.Client(1).Close()
   830  	clus.Client(2).Close()
   831  	clus.TakeClient(1)
   832  	clus.TakeClient(2)
   833  
   834  	// wait for election timeout, then member[0] will not have a leader.
   835  	tickDuration := 10 * time.Millisecond
   836  	// existing streams need three elections before they're torn down; wait until 5 elections cycle
   837  	// so proxy tests receive a leader loss event on its existing watch before creating a new watch.
   838  	time.Sleep(time.Duration(5*clus.Members[0].ElectionTicks) * tickDuration)
   839  
   840  	chLeader := liveClient.Watch(clientv3.WithRequireLeader(context.TODO()), "foo", clientv3.WithRev(1))
   841  	chNoLeader := liveClient.Watch(context.TODO(), "foo", clientv3.WithRev(1))
   842  
   843  	select {
   844  	case resp, ok := <-chLeader:
   845  		if !ok {
   846  			t.Fatalf("expected %v watch channel, got closed channel", rpctypes.ErrNoLeader)
   847  		}
   848  		if resp.Err() != rpctypes.ErrNoLeader {
   849  			t.Fatalf("expected %v watch response error, got %+v", rpctypes.ErrNoLeader, resp)
   850  		}
   851  	case <-time.After(integration2.RequestWaitTimeout):
   852  		t.Fatal("watch without leader took too long to close")
   853  	}
   854  
   855  	select {
   856  	case resp, ok := <-chLeader:
   857  		if ok {
   858  			t.Fatalf("expected closed channel, got response %v", resp)
   859  		}
   860  	case <-time.After(integration2.RequestWaitTimeout):
   861  		t.Fatal("waited too long for channel to close")
   862  	}
   863  
   864  	if _, ok := <-chNoLeader; !ok {
   865  		t.Fatalf("expected response, got closed channel")
   866  	}
   867  
   868  	cnt, err := clus.Members[0].Metric(
   869  		"etcd_server_client_requests_total",
   870  		`type="stream"`,
   871  		fmt.Sprintf(`client_api_version="%v"`, version.APIVersion),
   872  	)
   873  	if err != nil {
   874  		t.Fatal(err)
   875  	}
   876  	cv, err := strconv.ParseInt(cnt, 10, 32)
   877  	if err != nil {
   878  		t.Fatal(err)
   879  	}
   880  	if cv < 2 { // >2 when retried
   881  		t.Fatalf("expected at least 2, got %q", cnt)
   882  	}
   883  }
   884  
   885  // TestWatchWithFilter checks that watch filtering works.
   886  func TestWatchWithFilter(t *testing.T) {
   887  	integration2.BeforeTest(t)
   888  
   889  	cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   890  	defer cluster.Terminate(t)
   891  
   892  	client := cluster.RandClient()
   893  	ctx := context.Background()
   894  
   895  	wcNoPut := client.Watch(ctx, "a", clientv3.WithFilterPut())
   896  	wcNoDel := client.Watch(ctx, "a", clientv3.WithFilterDelete())
   897  
   898  	if _, err := client.Put(ctx, "a", "abc"); err != nil {
   899  		t.Fatal(err)
   900  	}
   901  	if _, err := client.Delete(ctx, "a"); err != nil {
   902  		t.Fatal(err)
   903  	}
   904  
   905  	npResp := <-wcNoPut
   906  	if len(npResp.Events) != 1 || npResp.Events[0].Type != clientv3.EventTypeDelete {
   907  		t.Fatalf("expected delete event, got %+v", npResp.Events)
   908  	}
   909  	ndResp := <-wcNoDel
   910  	if len(ndResp.Events) != 1 || ndResp.Events[0].Type != clientv3.EventTypePut {
   911  		t.Fatalf("expected put event, got %+v", ndResp.Events)
   912  	}
   913  
   914  	select {
   915  	case resp := <-wcNoPut:
   916  		t.Fatalf("unexpected event on filtered put (%+v)", resp)
   917  	case resp := <-wcNoDel:
   918  		t.Fatalf("unexpected event on filtered delete (%+v)", resp)
   919  	case <-time.After(100 * time.Millisecond):
   920  	}
   921  }
   922  
   923  // TestWatchWithCreatedNotification checks that WithCreatedNotify returns a
   924  // Created watch response.
   925  func TestWatchWithCreatedNotification(t *testing.T) {
   926  	integration2.BeforeTest(t)
   927  
   928  	cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   929  	defer cluster.Terminate(t)
   930  
   931  	client := cluster.RandClient()
   932  
   933  	ctx := context.Background()
   934  
   935  	createC := client.Watch(ctx, "a", clientv3.WithCreatedNotify())
   936  
   937  	resp := <-createC
   938  
   939  	if !resp.Created {
   940  		t.Fatalf("expected created event, got %v", resp)
   941  	}
   942  }
   943  
   944  // TestWatchWithCreatedNotificationDropConn ensures that
   945  // a watcher with created notify does not post duplicate
   946  // created events from disconnect.
   947  func TestWatchWithCreatedNotificationDropConn(t *testing.T) {
   948  	integration2.BeforeTest(t)
   949  
   950  	cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
   951  	defer cluster.Terminate(t)
   952  
   953  	client := cluster.RandClient()
   954  
   955  	wch := client.Watch(context.Background(), "a", clientv3.WithCreatedNotify())
   956  
   957  	resp := <-wch
   958  
   959  	if !resp.Created {
   960  		t.Fatalf("expected created event, got %v", resp)
   961  	}
   962  
   963  	cluster.Members[0].Bridge().DropConnections()
   964  
   965  	// check watch channel doesn't post another watch response.
   966  	select {
   967  	case wresp := <-wch:
   968  		t.Fatalf("got unexpected watch response: %+v\n", wresp)
   969  	case <-time.After(time.Second):
   970  		// watcher may not reconnect by the time it hits the select,
   971  		// so it wouldn't have a chance to filter out the second create event
   972  	}
   973  }
   974  
   975  // TestWatchCancelOnServer ensures client watcher cancels propagate back to the server.
   976  func TestWatchCancelOnServer(t *testing.T) {
   977  	integration2.BeforeTest(t)
   978  
   979  	cluster := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   980  	defer cluster.Terminate(t)
   981  
   982  	client := cluster.RandClient()
   983  	numWatches := 10
   984  
   985  	// The grpc proxy starts watches to detect leadership after the proxy server
   986  	// returns as started; to avoid racing on the proxy's internal watches, wait
   987  	// until require leader watches get create responses to ensure the leadership
   988  	// watches have started.
   989  	for {
   990  		ctx, cancel := context.WithCancel(clientv3.WithRequireLeader(context.TODO()))
   991  		ww := client.Watch(ctx, "a", clientv3.WithCreatedNotify())
   992  		wresp := <-ww
   993  		cancel()
   994  		if wresp.Err() == nil {
   995  			break
   996  		}
   997  	}
   998  
   999  	cancels := make([]context.CancelFunc, numWatches)
  1000  	for i := 0; i < numWatches; i++ {
  1001  		// force separate streams in client
  1002  		md := metadata.Pairs("some-key", fmt.Sprintf("%d", i))
  1003  		mctx := metadata.NewOutgoingContext(context.Background(), md)
  1004  		ctx, cancel := context.WithCancel(mctx)
  1005  		cancels[i] = cancel
  1006  		w := client.Watch(ctx, fmt.Sprintf("%d", i), clientv3.WithCreatedNotify())
  1007  		<-w
  1008  	}
  1009  
  1010  	// get max watches; proxy tests have leadership watches, so total may be >numWatches
  1011  	maxWatches, _ := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total")
  1012  
  1013  	// cancel all and wait for cancels to propagate to etcd server
  1014  	for i := 0; i < numWatches; i++ {
  1015  		cancels[i]()
  1016  	}
  1017  	time.Sleep(time.Second)
  1018  
  1019  	minWatches, err := cluster.Members[0].Metric("etcd_debugging_mvcc_watcher_total")
  1020  	if err != nil {
  1021  		t.Fatal(err)
  1022  	}
  1023  
  1024  	maxWatchV, minWatchV := 0, 0
  1025  	n, serr := fmt.Sscanf(maxWatches+" "+minWatches, "%d %d", &maxWatchV, &minWatchV)
  1026  	if n != 2 || serr != nil {
  1027  		t.Fatalf("expected n=2 and err=nil, got n=%d and err=%v", n, serr)
  1028  	}
  1029  
  1030  	if maxWatchV-minWatchV < numWatches {
  1031  		t.Fatalf("expected %d canceled watchers, got %d", numWatches, maxWatchV-minWatchV)
  1032  	}
  1033  }
  1034  
  1035  // TestWatchOverlapContextCancel stresses the watcher stream teardown path by
  1036  // creating/canceling watchers to ensure that new watchers are not taken down
  1037  // by a torn down watch stream. The sort of race that's being detected:
  1038  //  1. create w1 using a cancelable ctx with %v as "ctx"
  1039  //  2. cancel ctx
  1040  //  3. watcher client begins tearing down watcher grpc stream since no more watchers
  1041  //  3. start creating watcher w2 using a new "ctx" (not canceled), attaches to old grpc stream
  1042  //  4. watcher client finishes tearing down stream on "ctx"
  1043  //  5. w2 comes back canceled
  1044  func TestWatchOverlapContextCancel(t *testing.T) {
  1045  	f := func(clus *integration2.Cluster) {}
  1046  	testWatchOverlapContextCancel(t, f)
  1047  }
  1048  
  1049  func TestWatchOverlapDropConnContextCancel(t *testing.T) {
  1050  	f := func(clus *integration2.Cluster) {
  1051  		clus.Members[0].Bridge().DropConnections()
  1052  	}
  1053  	testWatchOverlapContextCancel(t, f)
  1054  }
  1055  
  1056  func testWatchOverlapContextCancel(t *testing.T, f func(*integration2.Cluster)) {
  1057  	integration2.BeforeTest(t)
  1058  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
  1059  	defer clus.Terminate(t)
  1060  
  1061  	n := 100
  1062  	ctxs, ctxc := make([]context.Context, 5), make([]chan struct{}, 5)
  1063  	for i := range ctxs {
  1064  		// make unique stream
  1065  		md := metadata.Pairs("some-key", fmt.Sprintf("%d", i))
  1066  		ctxs[i] = metadata.NewOutgoingContext(context.Background(), md)
  1067  		// limits the maximum number of outstanding watchers per stream
  1068  		ctxc[i] = make(chan struct{}, 2)
  1069  	}
  1070  
  1071  	// issue concurrent watches on "abc" with cancel
  1072  	cli := clus.RandClient()
  1073  	if _, err := cli.Put(context.TODO(), "abc", "def"); err != nil {
  1074  		t.Fatal(err)
  1075  	}
  1076  	ch := make(chan struct{}, n)
  1077  	tCtx, cancelFunc := context.WithCancel(context.Background())
  1078  	defer cancelFunc()
  1079  	for i := 0; i < n; i++ {
  1080  		go func() {
  1081  			defer func() { ch <- struct{}{} }()
  1082  			idx := rand.Intn(len(ctxs))
  1083  			ctx, cancel := context.WithCancel(ctxs[idx])
  1084  			ctxc[idx] <- struct{}{}
  1085  			wch := cli.Watch(ctx, "abc", clientv3.WithRev(1))
  1086  			select {
  1087  			case <-tCtx.Done():
  1088  				cancel()
  1089  				return
  1090  			default:
  1091  			}
  1092  			f(clus)
  1093  			select {
  1094  			case _, ok := <-wch:
  1095  				if !ok {
  1096  					t.Errorf("unexpected closed channel %p", wch)
  1097  				}
  1098  			// may take a second or two to reestablish a watcher because of
  1099  			// grpc back off policies for disconnects
  1100  			case <-time.After(5 * time.Second):
  1101  				t.Errorf("timed out waiting for watch on %p", wch)
  1102  			}
  1103  			// randomize how cancel overlaps with watch creation
  1104  			if rand.Intn(2) == 0 {
  1105  				<-ctxc[idx]
  1106  				cancel()
  1107  			} else {
  1108  				cancel()
  1109  				<-ctxc[idx]
  1110  			}
  1111  		}()
  1112  	}
  1113  	// join on watches
  1114  	for i := 0; i < n; i++ {
  1115  		select {
  1116  		case <-ch:
  1117  		case <-time.After(5 * time.Second):
  1118  			t.Fatalf("timed out waiting for completed watch")
  1119  		}
  1120  	}
  1121  }
  1122  
  1123  // TestWatchCancelAndCloseClient ensures that canceling a watcher then immediately
  1124  // closing the client does not return a client closing error.
  1125  func TestWatchCancelAndCloseClient(t *testing.T) {
  1126  	integration2.BeforeTest(t)
  1127  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
  1128  	defer clus.Terminate(t)
  1129  	cli := clus.Client(0)
  1130  	ctx, cancel := context.WithCancel(context.Background())
  1131  	wch := cli.Watch(ctx, "abc")
  1132  	donec := make(chan struct{})
  1133  	go func() {
  1134  		defer close(donec)
  1135  		select {
  1136  		case wr, ok := <-wch:
  1137  			if ok {
  1138  				t.Errorf("expected closed watch after cancel(), got resp=%+v err=%v", wr, wr.Err())
  1139  			}
  1140  		case <-time.After(5 * time.Second):
  1141  			t.Error("timed out waiting for closed channel")
  1142  		}
  1143  	}()
  1144  	cancel()
  1145  	if err := cli.Close(); err != nil {
  1146  		t.Fatal(err)
  1147  	}
  1148  	<-donec
  1149  	clus.TakeClient(0)
  1150  }
  1151  
  1152  // TestWatchStressResumeClose establishes a bunch of watchers, disconnects
  1153  // to put them in resuming mode, cancels them so some resumes by cancel fail,
  1154  // then closes the watcher interface to ensure correct clean up.
  1155  func TestWatchStressResumeClose(t *testing.T) {
  1156  	integration2.BeforeTest(t)
  1157  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1, UseBridge: true})
  1158  	defer clus.Terminate(t)
  1159  	cli := clus.Client(0)
  1160  
  1161  	ctx, cancel := context.WithCancel(context.Background())
  1162  	// add more watches than can be resumed before the cancel
  1163  	wchs := make([]clientv3.WatchChan, 2000)
  1164  	for i := range wchs {
  1165  		wchs[i] = cli.Watch(ctx, "abc")
  1166  	}
  1167  	clus.Members[0].Bridge().DropConnections()
  1168  	cancel()
  1169  	if err := cli.Close(); err != nil {
  1170  		t.Fatal(err)
  1171  	}
  1172  	clus.TakeClient(0)
  1173  }
  1174  
  1175  // TestWatchCancelDisconnected ensures canceling a watcher works when
  1176  // its grpc stream is disconnected / reconnecting.
  1177  func TestWatchCancelDisconnected(t *testing.T) {
  1178  	integration2.BeforeTest(t)
  1179  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
  1180  	defer clus.Terminate(t)
  1181  	cli := clus.Client(0)
  1182  	ctx, cancel := context.WithCancel(context.Background())
  1183  	// add more watches than can be resumed before the cancel
  1184  	wch := cli.Watch(ctx, "abc")
  1185  	clus.Members[0].Stop(t)
  1186  	cancel()
  1187  	select {
  1188  	case <-wch:
  1189  	case <-time.After(time.Second):
  1190  		t.Fatal("took too long to cancel disconnected watcher")
  1191  	}
  1192  }
  1193  
  1194  // TestWatchClose ensures that close does not return error
  1195  func TestWatchClose(t *testing.T) {
  1196  	runWatchTest(t, testWatchClose)
  1197  }
  1198  
  1199  func testWatchClose(t *testing.T, wctx *watchctx) {
  1200  	ctx, cancel := context.WithCancel(context.Background())
  1201  	wch := wctx.w.Watch(ctx, "a")
  1202  	cancel()
  1203  	if wch == nil {
  1204  		t.Fatalf("expected watcher channel, got nil")
  1205  	}
  1206  	if wctx.w.Close() != nil {
  1207  		t.Fatalf("watch did not close successfully")
  1208  	}
  1209  	wresp, ok := <-wch
  1210  	if ok {
  1211  		t.Fatalf("read wch got %v; expected closed channel", wresp)
  1212  	}
  1213  }