github.com/xudingjun3131/etcd@v3.3.27+incompatible/mvcc/watchable_store_bench_test.go (about)

     1  // Copyright 2015 The etcd Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mvcc
    16  
    17  import (
    18  	"math/rand"
    19  	"os"
    20  	"testing"
    21  
    22  	"github.com/coreos/etcd/lease"
    23  	"github.com/coreos/etcd/mvcc/backend"
    24  )
    25  
    26  func BenchmarkWatchableStorePut(b *testing.B) {
    27  	be, tmpPath := backend.NewDefaultTmpBackend()
    28  	s := New(be, &lease.FakeLessor{}, nil, nil)
    29  	defer cleanup(s, be, tmpPath)
    30  
    31  	// arbitrary number of bytes
    32  	bytesN := 64
    33  	keys := createBytesSlice(bytesN, b.N)
    34  	vals := createBytesSlice(bytesN, b.N)
    35  
    36  	b.ResetTimer()
    37  	b.ReportAllocs()
    38  	for i := 0; i < b.N; i++ {
    39  		s.Put(keys[i], vals[i], lease.NoLease)
    40  	}
    41  }
    42  
    43  // BenchmarkWatchableStoreTxnPut benchmarks the Put operation
    44  // with transaction begin and end, where transaction involves
    45  // some synchronization operations, such as mutex locking.
    46  func BenchmarkWatchableStoreTxnPut(b *testing.B) {
    47  	var i fakeConsistentIndex
    48  	be, tmpPath := backend.NewDefaultTmpBackend()
    49  	s := New(be, &lease.FakeLessor{}, nil, &i)
    50  	defer cleanup(s, be, tmpPath)
    51  
    52  	// arbitrary number of bytes
    53  	bytesN := 64
    54  	keys := createBytesSlice(bytesN, b.N)
    55  	vals := createBytesSlice(bytesN, b.N)
    56  
    57  	b.ResetTimer()
    58  	b.ReportAllocs()
    59  	for i := 0; i < b.N; i++ {
    60  		txn := s.Write()
    61  		txn.Put(keys[i], vals[i], lease.NoLease)
    62  		txn.End()
    63  	}
    64  }
    65  
    66  // BenchmarkWatchableStoreWatchSyncPut benchmarks the case of
    67  // many synced watchers receiving a Put notification.
    68  func BenchmarkWatchableStoreWatchSyncPut(b *testing.B) {
    69  	be, tmpPath := backend.NewDefaultTmpBackend()
    70  	s := newWatchableStore(be, &lease.FakeLessor{}, nil, nil)
    71  	defer cleanup(s, be, tmpPath)
    72  
    73  	k := []byte("testkey")
    74  	v := []byte("testval")
    75  
    76  	w := s.NewWatchStream()
    77  	defer w.Close()
    78  	watchIDs := make([]WatchID, b.N)
    79  	for i := range watchIDs {
    80  		// non-0 value to keep watchers in unsynced
    81  		watchIDs[i] = w.Watch(k, nil, 1)
    82  	}
    83  
    84  	b.ResetTimer()
    85  	b.ReportAllocs()
    86  
    87  	// trigger watchers
    88  	s.Put(k, v, lease.NoLease)
    89  	for range watchIDs {
    90  		<-w.Chan()
    91  	}
    92  	select {
    93  	case wc := <-w.Chan():
    94  		b.Fatalf("unexpected data %v", wc)
    95  	default:
    96  	}
    97  }
    98  
    99  // Benchmarks on cancel function performance for unsynced watchers
   100  // in a WatchableStore. It creates k*N watchers to populate unsynced
   101  // with a reasonably large number of watchers. And measures the time it
   102  // takes to cancel N watchers out of k*N watchers. The performance is
   103  // expected to differ depending on the unsynced member implementation.
   104  // TODO: k is an arbitrary constant. We need to figure out what factor
   105  // we should put to simulate the real-world use cases.
   106  func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
   107  	be, tmpPath := backend.NewDefaultTmpBackend()
   108  	s := NewStore(be, &lease.FakeLessor{}, nil)
   109  
   110  	// manually create watchableStore instead of newWatchableStore
   111  	// because newWatchableStore periodically calls syncWatchersLoop
   112  	// method to sync watchers in unsynced map. We want to keep watchers
   113  	// in unsynced for this benchmark.
   114  	ws := &watchableStore{
   115  		store:    s,
   116  		unsynced: newWatcherGroup(),
   117  
   118  		// to make the test not crash from assigning to nil map.
   119  		// 'synced' doesn't get populated in this test.
   120  		synced: newWatcherGroup(),
   121  	}
   122  
   123  	defer func() {
   124  		ws.store.Close()
   125  		os.Remove(tmpPath)
   126  	}()
   127  
   128  	// Put a key so that we can spawn watchers on that key
   129  	// (testKey in this test). This increases the rev to 1,
   130  	// and later we can we set the watcher's startRev to 1,
   131  	// and force watchers to be in unsynced.
   132  	testKey := []byte("foo")
   133  	testValue := []byte("bar")
   134  	s.Put(testKey, testValue, lease.NoLease)
   135  
   136  	w := ws.NewWatchStream()
   137  
   138  	const k int = 2
   139  	benchSampleN := b.N
   140  	watcherN := k * benchSampleN
   141  
   142  	watchIDs := make([]WatchID, watcherN)
   143  	for i := 0; i < watcherN; i++ {
   144  		// non-0 value to keep watchers in unsynced
   145  		watchIDs[i] = w.Watch(testKey, nil, 1)
   146  	}
   147  
   148  	// random-cancel N watchers to make it not biased towards
   149  	// data structures with an order, such as slice.
   150  	ix := rand.Perm(watcherN)
   151  
   152  	b.ResetTimer()
   153  	b.ReportAllocs()
   154  
   155  	// cancel N watchers
   156  	for _, idx := range ix[:benchSampleN] {
   157  		if err := w.Cancel(watchIDs[idx]); err != nil {
   158  			b.Error(err)
   159  		}
   160  	}
   161  }
   162  
   163  func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
   164  	be, tmpPath := backend.NewDefaultTmpBackend()
   165  	s := newWatchableStore(be, &lease.FakeLessor{}, nil, nil)
   166  
   167  	defer func() {
   168  		s.store.Close()
   169  		os.Remove(tmpPath)
   170  	}()
   171  
   172  	// Put a key so that we can spawn watchers on that key
   173  	testKey := []byte("foo")
   174  	testValue := []byte("bar")
   175  	s.Put(testKey, testValue, lease.NoLease)
   176  
   177  	w := s.NewWatchStream()
   178  
   179  	// put 1 million watchers on the same key
   180  	const watcherN = 1000000
   181  
   182  	watchIDs := make([]WatchID, watcherN)
   183  	for i := 0; i < watcherN; i++ {
   184  		// 0 for startRev to keep watchers in synced
   185  		watchIDs[i] = w.Watch(testKey, nil, 0)
   186  	}
   187  
   188  	// randomly cancel watchers to make it not biased towards
   189  	// data structures with an order, such as slice.
   190  	ix := rand.Perm(watcherN)
   191  
   192  	b.ResetTimer()
   193  	b.ReportAllocs()
   194  
   195  	for _, idx := range ix {
   196  		if err := w.Cancel(watchIDs[idx]); err != nil {
   197  			b.Error(err)
   198  		}
   199  	}
   200  }