go.etcd.io/etcd@v3.3.27+incompatible/mvcc/watchable_store_test.go (about)

     1  // Copyright 2015 The etcd Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mvcc
    16  
    17  import (
    18  	"bytes"
    19  	"fmt"
    20  	"os"
    21  	"reflect"
    22  	"sync"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/coreos/etcd/lease"
    27  	"github.com/coreos/etcd/mvcc/backend"
    28  	"github.com/coreos/etcd/mvcc/mvccpb"
    29  )
    30  
    31  func TestWatch(t *testing.T) {
    32  	b, tmpPath := backend.NewDefaultTmpBackend()
    33  	s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
    34  
    35  	defer func() {
    36  		s.store.Close()
    37  		os.Remove(tmpPath)
    38  	}()
    39  
    40  	testKey := []byte("foo")
    41  	testValue := []byte("bar")
    42  	s.Put(testKey, testValue, lease.NoLease)
    43  
    44  	w := s.NewWatchStream()
    45  	w.Watch(testKey, nil, 0)
    46  
    47  	if !s.synced.contains(string(testKey)) {
    48  		// the key must have had an entry in synced
    49  		t.Errorf("existence = false, want true")
    50  	}
    51  }
    52  
    53  func TestNewWatcherCancel(t *testing.T) {
    54  	b, tmpPath := backend.NewDefaultTmpBackend()
    55  	s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
    56  
    57  	defer func() {
    58  		s.store.Close()
    59  		os.Remove(tmpPath)
    60  	}()
    61  	testKey := []byte("foo")
    62  	testValue := []byte("bar")
    63  	s.Put(testKey, testValue, lease.NoLease)
    64  
    65  	w := s.NewWatchStream()
    66  	wt := w.Watch(testKey, nil, 0)
    67  
    68  	if err := w.Cancel(wt); err != nil {
    69  		t.Error(err)
    70  	}
    71  
    72  	if s.synced.contains(string(testKey)) {
    73  		// the key shoud have been deleted
    74  		t.Errorf("existence = true, want false")
    75  	}
    76  }
    77  
    78  // TestCancelUnsynced tests if running CancelFunc removes watchers from unsynced.
    79  func TestCancelUnsynced(t *testing.T) {
    80  	b, tmpPath := backend.NewDefaultTmpBackend()
    81  
    82  	// manually create watchableStore instead of newWatchableStore
    83  	// because newWatchableStore automatically calls syncWatchers
    84  	// method to sync watchers in unsynced map. We want to keep watchers
    85  	// in unsynced to test if syncWatchers works as expected.
    86  	s := &watchableStore{
    87  		store:    NewStore(b, &lease.FakeLessor{}, nil),
    88  		unsynced: newWatcherGroup(),
    89  
    90  		// to make the test not crash from assigning to nil map.
    91  		// 'synced' doesn't get populated in this test.
    92  		synced: newWatcherGroup(),
    93  	}
    94  
    95  	defer func() {
    96  		s.store.Close()
    97  		os.Remove(tmpPath)
    98  	}()
    99  
   100  	// Put a key so that we can spawn watchers on that key.
   101  	// (testKey in this test). This increases the rev to 1,
   102  	// and later we can we set the watcher's startRev to 1,
   103  	// and force watchers to be in unsynced.
   104  	testKey := []byte("foo")
   105  	testValue := []byte("bar")
   106  	s.Put(testKey, testValue, lease.NoLease)
   107  
   108  	w := s.NewWatchStream()
   109  
   110  	// arbitrary number for watchers
   111  	watcherN := 100
   112  
   113  	// create watcherN of watch ids to cancel
   114  	watchIDs := make([]WatchID, watcherN)
   115  	for i := 0; i < watcherN; i++ {
   116  		// use 1 to keep watchers in unsynced
   117  		watchIDs[i] = w.Watch(testKey, nil, 1)
   118  	}
   119  
   120  	for _, idx := range watchIDs {
   121  		if err := w.Cancel(idx); err != nil {
   122  			t.Error(err)
   123  		}
   124  	}
   125  
   126  	// After running CancelFunc
   127  	//
   128  	// unsynced should be empty
   129  	// because cancel removes watcher from unsynced
   130  	if size := s.unsynced.size(); size != 0 {
   131  		t.Errorf("unsynced size = %d, want 0", size)
   132  	}
   133  }
   134  
   135  // TestSyncWatchers populates unsynced watcher map and tests syncWatchers
   136  // method to see if it correctly sends events to channel of unsynced watchers
   137  // and moves these watchers to synced.
   138  func TestSyncWatchers(t *testing.T) {
   139  	b, tmpPath := backend.NewDefaultTmpBackend()
   140  
   141  	s := &watchableStore{
   142  		store:    NewStore(b, &lease.FakeLessor{}, nil),
   143  		unsynced: newWatcherGroup(),
   144  		synced:   newWatcherGroup(),
   145  	}
   146  
   147  	defer func() {
   148  		s.store.Close()
   149  		os.Remove(tmpPath)
   150  	}()
   151  
   152  	testKey := []byte("foo")
   153  	testValue := []byte("bar")
   154  	s.Put(testKey, testValue, lease.NoLease)
   155  
   156  	w := s.NewWatchStream()
   157  
   158  	// arbitrary number for watchers
   159  	watcherN := 100
   160  
   161  	for i := 0; i < watcherN; i++ {
   162  		// specify rev as 1 to keep watchers in unsynced
   163  		w.Watch(testKey, nil, 1)
   164  	}
   165  
   166  	// Before running s.syncWatchers() synced should be empty because we manually
   167  	// populate unsynced only
   168  	sws := s.synced.watcherSetByKey(string(testKey))
   169  	uws := s.unsynced.watcherSetByKey(string(testKey))
   170  
   171  	if len(sws) != 0 {
   172  		t.Fatalf("synced[string(testKey)] size = %d, want 0", len(sws))
   173  	}
   174  	// unsynced should not be empty because we manually populated unsynced only
   175  	if len(uws) != watcherN {
   176  		t.Errorf("unsynced size = %d, want %d", len(uws), watcherN)
   177  	}
   178  
   179  	// this should move all unsynced watchers to synced ones
   180  	s.syncWatchers()
   181  
   182  	sws = s.synced.watcherSetByKey(string(testKey))
   183  	uws = s.unsynced.watcherSetByKey(string(testKey))
   184  
   185  	// After running s.syncWatchers(), synced should not be empty because syncwatchers
   186  	// populates synced in this test case
   187  	if len(sws) != watcherN {
   188  		t.Errorf("synced[string(testKey)] size = %d, want %d", len(sws), watcherN)
   189  	}
   190  
   191  	// unsynced should be empty because syncwatchers is expected to move all watchers
   192  	// from unsynced to synced in this test case
   193  	if len(uws) != 0 {
   194  		t.Errorf("unsynced size = %d, want 0", len(uws))
   195  	}
   196  
   197  	for w := range sws {
   198  		if w.minRev != s.Rev()+1 {
   199  			t.Errorf("w.minRev = %d, want %d", w.minRev, s.Rev()+1)
   200  		}
   201  	}
   202  
   203  	if len(w.(*watchStream).ch) != watcherN {
   204  		t.Errorf("watched event size = %d, want %d", len(w.(*watchStream).ch), watcherN)
   205  	}
   206  
   207  	evs := (<-w.(*watchStream).ch).Events
   208  	if len(evs) != 1 {
   209  		t.Errorf("len(evs) got = %d, want = 1", len(evs))
   210  	}
   211  	if evs[0].Type != mvccpb.PUT {
   212  		t.Errorf("got = %v, want = %v", evs[0].Type, mvccpb.PUT)
   213  	}
   214  	if !bytes.Equal(evs[0].Kv.Key, testKey) {
   215  		t.Errorf("got = %s, want = %s", evs[0].Kv.Key, testKey)
   216  	}
   217  	if !bytes.Equal(evs[0].Kv.Value, testValue) {
   218  		t.Errorf("got = %s, want = %s", evs[0].Kv.Value, testValue)
   219  	}
   220  }
   221  
   222  // TestWatchCompacted tests a watcher that watches on a compacted revision.
   223  func TestWatchCompacted(t *testing.T) {
   224  	b, tmpPath := backend.NewDefaultTmpBackend()
   225  	s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
   226  
   227  	defer func() {
   228  		s.store.Close()
   229  		os.Remove(tmpPath)
   230  	}()
   231  	testKey := []byte("foo")
   232  	testValue := []byte("bar")
   233  
   234  	maxRev := 10
   235  	compactRev := int64(5)
   236  	for i := 0; i < maxRev; i++ {
   237  		s.Put(testKey, testValue, lease.NoLease)
   238  	}
   239  	_, err := s.Compact(compactRev)
   240  	if err != nil {
   241  		t.Fatalf("failed to compact kv (%v)", err)
   242  	}
   243  
   244  	w := s.NewWatchStream()
   245  	wt := w.Watch(testKey, nil, compactRev-1)
   246  
   247  	select {
   248  	case resp := <-w.Chan():
   249  		if resp.WatchID != wt {
   250  			t.Errorf("resp.WatchID = %x, want %x", resp.WatchID, wt)
   251  		}
   252  		if resp.CompactRevision == 0 {
   253  			t.Errorf("resp.Compacted = %v, want %v", resp.CompactRevision, compactRev)
   254  		}
   255  	case <-time.After(1 * time.Second):
   256  		t.Fatalf("failed to receive response (timeout)")
   257  	}
   258  }
   259  
   260  func TestWatchFutureRev(t *testing.T) {
   261  	b, tmpPath := backend.NewDefaultTmpBackend()
   262  	s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
   263  
   264  	defer func() {
   265  		s.store.Close()
   266  		os.Remove(tmpPath)
   267  	}()
   268  
   269  	testKey := []byte("foo")
   270  	testValue := []byte("bar")
   271  
   272  	w := s.NewWatchStream()
   273  	wrev := int64(10)
   274  	w.Watch(testKey, nil, wrev)
   275  
   276  	for i := 0; i < 10; i++ {
   277  		rev := s.Put(testKey, testValue, lease.NoLease)
   278  		if rev >= wrev {
   279  			break
   280  		}
   281  	}
   282  
   283  	select {
   284  	case resp := <-w.Chan():
   285  		if resp.Revision != wrev {
   286  			t.Fatalf("rev = %d, want %d", resp.Revision, wrev)
   287  		}
   288  		if len(resp.Events) != 1 {
   289  			t.Fatalf("failed to get events from the response")
   290  		}
   291  		if resp.Events[0].Kv.ModRevision != wrev {
   292  			t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, wrev)
   293  		}
   294  	case <-time.After(time.Second):
   295  		t.Fatal("failed to receive event in 1 second.")
   296  	}
   297  }
   298  
   299  func TestWatchRestore(t *testing.T) {
   300  	test := func(delay time.Duration) func(t *testing.T) {
   301  		return func(t *testing.T) {
   302  			b, tmpPath := backend.NewDefaultTmpBackend()
   303  			s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
   304  			defer cleanup(s, b, tmpPath)
   305  
   306  			testKey := []byte("foo")
   307  			testValue := []byte("bar")
   308  			rev := s.Put(testKey, testValue, lease.NoLease)
   309  
   310  			newBackend, newPath := backend.NewDefaultTmpBackend()
   311  			newStore := newWatchableStore(newBackend, &lease.FakeLessor{}, nil, nil)
   312  			defer cleanup(newStore, newBackend, newPath)
   313  
   314  			w := newStore.NewWatchStream()
   315  			w.Watch(testKey, nil, rev-1)
   316  
   317  			time.Sleep(delay)
   318  
   319  			newStore.Restore(b)
   320  			select {
   321  			case resp := <-w.Chan():
   322  				if resp.Revision != rev {
   323  					t.Fatalf("rev = %d, want %d", resp.Revision, rev)
   324  				}
   325  				if len(resp.Events) != 1 {
   326  					t.Fatalf("failed to get events from the response")
   327  				}
   328  				if resp.Events[0].Kv.ModRevision != rev {
   329  					t.Fatalf("kv.rev = %d, want %d", resp.Events[0].Kv.ModRevision, rev)
   330  				}
   331  			case <-time.After(time.Second):
   332  				t.Fatal("failed to receive event in 1 second.")
   333  			}
   334  		}
   335  	}
   336  
   337  	t.Run("Normal", test(0))
   338  	t.Run("RunSyncWatchLoopBeforeRestore", test(time.Millisecond*120)) // longer than default waitDuration
   339  }
   340  
   341  // TestWatchRestoreSyncedWatcher tests such a case that:
   342  //   1. watcher is created with a future revision "math.MaxInt64 - 2"
   343  //   2. watcher with a future revision is added to "synced" watcher group
   344  //   3. restore/overwrite storage with snapshot of a higher lasat revision
   345  //   4. restore operation moves "synced" to "unsynced" watcher group
   346  //   5. choose the watcher from step 1, without panic
   347  func TestWatchRestoreSyncedWatcher(t *testing.T) {
   348  	b1, b1Path := backend.NewDefaultTmpBackend()
   349  	s1 := newWatchableStore(b1, &lease.FakeLessor{}, nil, nil)
   350  	defer cleanup(s1, b1, b1Path)
   351  
   352  	b2, b2Path := backend.NewDefaultTmpBackend()
   353  	s2 := newWatchableStore(b2, &lease.FakeLessor{}, nil, nil)
   354  	defer cleanup(s2, b2, b2Path)
   355  
   356  	testKey, testValue := []byte("foo"), []byte("bar")
   357  	rev := s1.Put(testKey, testValue, lease.NoLease)
   358  	startRev := rev + 2
   359  
   360  	// create a watcher with a future revision
   361  	// add to "synced" watcher group (startRev > s.store.currentRev)
   362  	w1 := s1.NewWatchStream()
   363  	w1.Watch(testKey, nil, startRev)
   364  
   365  	// make "s2" ends up with a higher last revision
   366  	s2.Put(testKey, testValue, lease.NoLease)
   367  	s2.Put(testKey, testValue, lease.NoLease)
   368  
   369  	// overwrite storage with higher revisions
   370  	if err := s1.Restore(b2); err != nil {
   371  		t.Fatal(err)
   372  	}
   373  
   374  	// wait for next "syncWatchersLoop" iteration
   375  	// and the unsynced watcher should be chosen
   376  	time.Sleep(2 * time.Second)
   377  
   378  	// trigger events for "startRev"
   379  	s1.Put(testKey, testValue, lease.NoLease)
   380  
   381  	select {
   382  	case resp := <-w1.Chan():
   383  		if resp.Revision != startRev {
   384  			t.Fatalf("resp.Revision expect %d, got %d", startRev, resp.Revision)
   385  		}
   386  		if len(resp.Events) != 1 {
   387  			t.Fatalf("len(resp.Events) expect 1, got %d", len(resp.Events))
   388  		}
   389  		if resp.Events[0].Kv.ModRevision != startRev {
   390  			t.Fatalf("resp.Events[0].Kv.ModRevision expect %d, got %d", startRev, resp.Events[0].Kv.ModRevision)
   391  		}
   392  	case <-time.After(time.Second):
   393  		t.Fatal("failed to receive event in 1 second")
   394  	}
   395  }
   396  
   397  // TestWatchBatchUnsynced tests batching on unsynced watchers
   398  func TestWatchBatchUnsynced(t *testing.T) {
   399  	b, tmpPath := backend.NewDefaultTmpBackend()
   400  	s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
   401  
   402  	oldMaxRevs := watchBatchMaxRevs
   403  	defer func() {
   404  		watchBatchMaxRevs = oldMaxRevs
   405  		s.store.Close()
   406  		os.Remove(tmpPath)
   407  	}()
   408  	batches := 3
   409  	watchBatchMaxRevs = 4
   410  
   411  	v := []byte("foo")
   412  	for i := 0; i < watchBatchMaxRevs*batches; i++ {
   413  		s.Put(v, v, lease.NoLease)
   414  	}
   415  
   416  	w := s.NewWatchStream()
   417  	w.Watch(v, nil, 1)
   418  	for i := 0; i < batches; i++ {
   419  		if resp := <-w.Chan(); len(resp.Events) != watchBatchMaxRevs {
   420  			t.Fatalf("len(events) = %d, want %d", len(resp.Events), watchBatchMaxRevs)
   421  		}
   422  	}
   423  
   424  	s.store.revMu.Lock()
   425  	defer s.store.revMu.Unlock()
   426  	if size := s.synced.size(); size != 1 {
   427  		t.Errorf("synced size = %d, want 1", size)
   428  	}
   429  }
   430  
   431  func TestNewMapwatcherToEventMap(t *testing.T) {
   432  	k0, k1, k2 := []byte("foo0"), []byte("foo1"), []byte("foo2")
   433  	v0, v1, v2 := []byte("bar0"), []byte("bar1"), []byte("bar2")
   434  
   435  	ws := []*watcher{{key: k0}, {key: k1}, {key: k2}}
   436  
   437  	evs := []mvccpb.Event{
   438  		{
   439  			Type: mvccpb.PUT,
   440  			Kv:   &mvccpb.KeyValue{Key: k0, Value: v0},
   441  		},
   442  		{
   443  			Type: mvccpb.PUT,
   444  			Kv:   &mvccpb.KeyValue{Key: k1, Value: v1},
   445  		},
   446  		{
   447  			Type: mvccpb.PUT,
   448  			Kv:   &mvccpb.KeyValue{Key: k2, Value: v2},
   449  		},
   450  	}
   451  
   452  	tests := []struct {
   453  		sync []*watcher
   454  		evs  []mvccpb.Event
   455  
   456  		wwe map[*watcher][]mvccpb.Event
   457  	}{
   458  		// no watcher in sync, some events should return empty wwe
   459  		{
   460  			nil,
   461  			evs,
   462  			map[*watcher][]mvccpb.Event{},
   463  		},
   464  
   465  		// one watcher in sync, one event that does not match the key of that
   466  		// watcher should return empty wwe
   467  		{
   468  			[]*watcher{ws[2]},
   469  			evs[:1],
   470  			map[*watcher][]mvccpb.Event{},
   471  		},
   472  
   473  		// one watcher in sync, one event that matches the key of that
   474  		// watcher should return wwe with that matching watcher
   475  		{
   476  			[]*watcher{ws[1]},
   477  			evs[1:2],
   478  			map[*watcher][]mvccpb.Event{
   479  				ws[1]: evs[1:2],
   480  			},
   481  		},
   482  
   483  		// two watchers in sync that watches two different keys, one event
   484  		// that matches the key of only one of the watcher should return wwe
   485  		// with the matching watcher
   486  		{
   487  			[]*watcher{ws[0], ws[2]},
   488  			evs[2:],
   489  			map[*watcher][]mvccpb.Event{
   490  				ws[2]: evs[2:],
   491  			},
   492  		},
   493  
   494  		// two watchers in sync that watches the same key, two events that
   495  		// match the keys should return wwe with those two watchers
   496  		{
   497  			[]*watcher{ws[0], ws[1]},
   498  			evs[:2],
   499  			map[*watcher][]mvccpb.Event{
   500  				ws[0]: evs[:1],
   501  				ws[1]: evs[1:2],
   502  			},
   503  		},
   504  	}
   505  
   506  	for i, tt := range tests {
   507  		wg := newWatcherGroup()
   508  		for _, w := range tt.sync {
   509  			wg.add(w)
   510  		}
   511  
   512  		gwe := newWatcherBatch(&wg, tt.evs)
   513  		if len(gwe) != len(tt.wwe) {
   514  			t.Errorf("#%d: len(gwe) got = %d, want = %d", i, len(gwe), len(tt.wwe))
   515  		}
   516  		// compare gwe and tt.wwe
   517  		for w, eb := range gwe {
   518  			if len(eb.evs) != len(tt.wwe[w]) {
   519  				t.Errorf("#%d: len(eb.evs) got = %d, want = %d", i, len(eb.evs), len(tt.wwe[w]))
   520  			}
   521  			if !reflect.DeepEqual(eb.evs, tt.wwe[w]) {
   522  				t.Errorf("#%d: reflect.DeepEqual events got = %v, want = true", i, false)
   523  			}
   524  		}
   525  	}
   526  }
   527  
   528  // TestWatchVictims tests that watchable store delivers watch events
   529  // when the watch channel is temporarily clogged with too many events.
   530  func TestWatchVictims(t *testing.T) {
   531  	oldChanBufLen, oldMaxWatchersPerSync := chanBufLen, maxWatchersPerSync
   532  
   533  	b, tmpPath := backend.NewDefaultTmpBackend()
   534  	s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
   535  
   536  	defer func() {
   537  		s.store.Close()
   538  		os.Remove(tmpPath)
   539  		chanBufLen, maxWatchersPerSync = oldChanBufLen, oldMaxWatchersPerSync
   540  	}()
   541  
   542  	chanBufLen, maxWatchersPerSync = 1, 2
   543  	numPuts := chanBufLen * 64
   544  	testKey, testValue := []byte("foo"), []byte("bar")
   545  
   546  	var wg sync.WaitGroup
   547  	numWatches := maxWatchersPerSync * 128
   548  	errc := make(chan error, numWatches)
   549  	wg.Add(numWatches)
   550  	for i := 0; i < numWatches; i++ {
   551  		go func() {
   552  			w := s.NewWatchStream()
   553  			w.Watch(testKey, nil, 1)
   554  			defer func() {
   555  				w.Close()
   556  				wg.Done()
   557  			}()
   558  			tc := time.After(10 * time.Second)
   559  			evs, nextRev := 0, int64(2)
   560  			for evs < numPuts {
   561  				select {
   562  				case <-tc:
   563  					errc <- fmt.Errorf("time out")
   564  					return
   565  				case wr := <-w.Chan():
   566  					evs += len(wr.Events)
   567  					for _, ev := range wr.Events {
   568  						if ev.Kv.ModRevision != nextRev {
   569  							errc <- fmt.Errorf("expected rev=%d, got %d", nextRev, ev.Kv.ModRevision)
   570  							return
   571  						}
   572  						nextRev++
   573  					}
   574  					time.Sleep(time.Millisecond)
   575  				}
   576  			}
   577  			if evs != numPuts {
   578  				errc <- fmt.Errorf("expected %d events, got %d", numPuts, evs)
   579  				return
   580  			}
   581  			select {
   582  			case <-w.Chan():
   583  				errc <- fmt.Errorf("unexpected response")
   584  			default:
   585  			}
   586  		}()
   587  		time.Sleep(time.Millisecond)
   588  	}
   589  
   590  	var wgPut sync.WaitGroup
   591  	wgPut.Add(numPuts)
   592  	for i := 0; i < numPuts; i++ {
   593  		go func() {
   594  			defer wgPut.Done()
   595  			s.Put(testKey, testValue, lease.NoLease)
   596  		}()
   597  	}
   598  	wgPut.Wait()
   599  
   600  	wg.Wait()
   601  	select {
   602  	case err := <-errc:
   603  		t.Fatal(err)
   604  	default:
   605  	}
   606  }
   607  
   608  // TestStressWatchCancelClose tests closing a watch stream while
   609  // canceling its watches.
   610  func TestStressWatchCancelClose(t *testing.T) {
   611  	b, tmpPath := backend.NewDefaultTmpBackend()
   612  	s := newWatchableStore(b, &lease.FakeLessor{}, nil, nil)
   613  
   614  	defer func() {
   615  		s.store.Close()
   616  		os.Remove(tmpPath)
   617  	}()
   618  
   619  	testKey, testValue := []byte("foo"), []byte("bar")
   620  	var wg sync.WaitGroup
   621  	readyc := make(chan struct{})
   622  	wg.Add(100)
   623  	for i := 0; i < 100; i++ {
   624  		go func() {
   625  			defer wg.Done()
   626  			w := s.NewWatchStream()
   627  			ids := make([]WatchID, 10)
   628  			for i := range ids {
   629  				ids[i] = w.Watch(testKey, nil, 0)
   630  			}
   631  			<-readyc
   632  			wg.Add(1 + len(ids)/2)
   633  			for i := range ids[:len(ids)/2] {
   634  				go func(n int) {
   635  					defer wg.Done()
   636  					w.Cancel(ids[n])
   637  				}(i)
   638  			}
   639  			go func() {
   640  				defer wg.Done()
   641  				w.Close()
   642  			}()
   643  		}()
   644  	}
   645  
   646  	close(readyc)
   647  	for i := 0; i < 100; i++ {
   648  		s.Put(testKey, testValue, lease.NoLease)
   649  	}
   650  
   651  	wg.Wait()
   652  }