github.com/lfch/etcd-io/tests/v3@v3.0.0-20221004140520-eac99acd3e9d/integration/clientv3/experimental/recipes/v3_lock_test.go (about)

     1  // Copyright 2016 The etcd Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package recipes_test
    16  
    17  import (
    18  	"context"
    19  	"math/rand"
    20  	"testing"
    21  	"time"
    22  
    23  	"github.com/lfch/etcd-io/api/v3/mvccpb"
    24  	clientv3 "github.com/lfch/etcd-io/client/v3"
    25  	"github.com/lfch/etcd-io/client/v3/concurrency"
    26  	recipe "github.com/lfch/etcd-io/client/v3/experimental/recipes"
    27  	integration2 "github.com/lfch/etcd-io/tests/v3/framework/integration"
    28  )
    29  
    30  func TestMutexLockSingleNode(t *testing.T) {
    31  	integration2.BeforeTest(t)
    32  
    33  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
    34  	defer clus.Terminate(t)
    35  
    36  	var clients []*clientv3.Client
    37  	testMutexLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
    38  	integration2.CloseClients(t, clients)
    39  }
    40  
    41  func TestMutexLockMultiNode(t *testing.T) {
    42  	integration2.BeforeTest(t)
    43  
    44  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
    45  	defer clus.Terminate(t)
    46  
    47  	var clients []*clientv3.Client
    48  	testMutexLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
    49  	integration2.CloseClients(t, clients)
    50  }
    51  
    52  func testMutexLock(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
    53  	// stream lock acquisitions
    54  	lockedC := make(chan *concurrency.Mutex)
    55  	stopC := make(chan struct{})
    56  	defer close(stopC)
    57  
    58  	for i := 0; i < waiters; i++ {
    59  		go func() {
    60  			session, err := concurrency.NewSession(chooseClient())
    61  			if err != nil {
    62  				t.Error(err)
    63  			}
    64  			m := concurrency.NewMutex(session, "test-mutex")
    65  			if err := m.Lock(context.TODO()); err != nil {
    66  				t.Errorf("could not wait on lock (%v)", err)
    67  			}
    68  			select {
    69  			case lockedC <- m:
    70  			case <-stopC:
    71  			}
    72  
    73  		}()
    74  	}
    75  	// unlock locked mutexes
    76  	timerC := time.After(time.Duration(waiters) * time.Second)
    77  	for i := 0; i < waiters; i++ {
    78  		select {
    79  		case <-timerC:
    80  			t.Fatalf("timed out waiting for lock %d", i)
    81  		case m := <-lockedC:
    82  			// lock acquired with m
    83  			select {
    84  			case <-lockedC:
    85  				t.Fatalf("lock %d followers did not wait", i)
    86  			default:
    87  			}
    88  			if err := m.Unlock(context.TODO()); err != nil {
    89  				t.Fatalf("could not release lock (%v)", err)
    90  			}
    91  		}
    92  	}
    93  }
    94  
    95  func TestMutexTryLockSingleNode(t *testing.T) {
    96  	integration2.BeforeTest(t)
    97  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
    98  	defer clus.Terminate(t)
    99  	t.Logf("3 nodes cluster created...")
   100  	var clients []*clientv3.Client
   101  	testMutexTryLock(t, 5, integration2.MakeSingleNodeClients(t, clus, &clients))
   102  	integration2.CloseClients(t, clients)
   103  }
   104  
   105  func TestMutexTryLockMultiNode(t *testing.T) {
   106  	integration2.BeforeTest(t)
   107  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   108  	defer clus.Terminate(t)
   109  
   110  	var clients []*clientv3.Client
   111  	testMutexTryLock(t, 5, integration2.MakeMultiNodeClients(t, clus, &clients))
   112  	integration2.CloseClients(t, clients)
   113  }
   114  
   115  func testMutexTryLock(t *testing.T, lockers int, chooseClient func() *clientv3.Client) {
   116  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   117  	defer cancel()
   118  
   119  	lockedC := make(chan *concurrency.Mutex)
   120  	notlockedC := make(chan *concurrency.Mutex)
   121  
   122  	for i := 0; i < lockers; i++ {
   123  		go func(i int) {
   124  			session, err := concurrency.NewSession(chooseClient())
   125  			if err != nil {
   126  				t.Error(err)
   127  			}
   128  			m := concurrency.NewMutex(session, "test-mutex-try-lock")
   129  			err = m.TryLock(ctx)
   130  			if err == nil {
   131  				select {
   132  				case lockedC <- m:
   133  				case <-ctx.Done():
   134  					t.Errorf("Thread: %v, Context failed: %v", i, err)
   135  				}
   136  			} else if err == concurrency.ErrLocked {
   137  				select {
   138  				case notlockedC <- m:
   139  				case <-ctx.Done():
   140  					t.Errorf("Thread: %v, Context failed: %v", i, err)
   141  				}
   142  			} else {
   143  				t.Errorf("Thread: %v; Unexpected Error %v", i, err)
   144  			}
   145  		}(i)
   146  	}
   147  
   148  	timerC := time.After(30 * time.Second)
   149  	select {
   150  	case <-lockedC:
   151  		for i := 0; i < lockers-1; i++ {
   152  			select {
   153  			case <-lockedC:
   154  				t.Fatalf("Multiple Mutes locked on same key")
   155  			case <-notlockedC:
   156  			case <-timerC:
   157  				t.Errorf("timed out waiting for lock")
   158  			}
   159  		}
   160  	case <-timerC:
   161  		t.Errorf("timed out waiting for lock (30s)")
   162  	}
   163  }
   164  
   165  // TestMutexSessionRelock ensures that acquiring the same lock with the same
   166  // session will not result in deadlock.
   167  func TestMutexSessionRelock(t *testing.T) {
   168  	integration2.BeforeTest(t)
   169  
   170  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   171  	defer clus.Terminate(t)
   172  	session, err := concurrency.NewSession(clus.RandClient())
   173  	if err != nil {
   174  		t.Error(err)
   175  	}
   176  
   177  	m := concurrency.NewMutex(session, "test-mutex")
   178  	if err := m.Lock(context.TODO()); err != nil {
   179  		t.Fatal(err)
   180  	}
   181  
   182  	m2 := concurrency.NewMutex(session, "test-mutex")
   183  	if err := m2.Lock(context.TODO()); err != nil {
   184  		t.Fatal(err)
   185  	}
   186  }
   187  
   188  // TestMutexWaitsOnCurrentHolder ensures a mutex is only acquired once all
   189  // waiters older than the new owner are gone by testing the case where
   190  // the waiter prior to the acquirer expires before the current holder.
   191  func TestMutexWaitsOnCurrentHolder(t *testing.T) {
   192  	integration2.BeforeTest(t)
   193  
   194  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 1})
   195  	defer clus.Terminate(t)
   196  
   197  	cctx := context.Background()
   198  
   199  	cli := clus.Client(0)
   200  
   201  	firstOwnerSession, err := concurrency.NewSession(cli)
   202  	if err != nil {
   203  		t.Error(err)
   204  	}
   205  	defer firstOwnerSession.Close()
   206  	firstOwnerMutex := concurrency.NewMutex(firstOwnerSession, "test-mutex")
   207  	if err = firstOwnerMutex.Lock(cctx); err != nil {
   208  		t.Fatal(err)
   209  	}
   210  
   211  	victimSession, err := concurrency.NewSession(cli)
   212  	if err != nil {
   213  		t.Error(err)
   214  	}
   215  	defer victimSession.Close()
   216  	victimDonec := make(chan struct{})
   217  	go func() {
   218  		defer close(victimDonec)
   219  		concurrency.NewMutex(victimSession, "test-mutex").Lock(cctx)
   220  	}()
   221  
   222  	// ensure mutexes associated with firstOwnerSession and victimSession waits before new owner
   223  	wch := cli.Watch(cctx, "test-mutex", clientv3.WithPrefix(), clientv3.WithRev(1))
   224  	putCounts := 0
   225  	for putCounts < 2 {
   226  		select {
   227  		case wrp := <-wch:
   228  			putCounts += len(wrp.Events)
   229  		case <-time.After(time.Second):
   230  			t.Fatal("failed to receive watch response")
   231  		}
   232  	}
   233  	if putCounts != 2 {
   234  		t.Fatalf("expect 2 put events, but got %v", putCounts)
   235  	}
   236  
   237  	newOwnerSession, err := concurrency.NewSession(cli)
   238  	if err != nil {
   239  		t.Error(err)
   240  	}
   241  	defer newOwnerSession.Close()
   242  	newOwnerDonec := make(chan struct{})
   243  	go func() {
   244  		defer close(newOwnerDonec)
   245  		concurrency.NewMutex(newOwnerSession, "test-mutex").Lock(cctx)
   246  	}()
   247  
   248  	select {
   249  	case wrp := <-wch:
   250  		if len(wrp.Events) != 1 {
   251  			t.Fatalf("expect a event, but got %v events", len(wrp.Events))
   252  		}
   253  		if e := wrp.Events[0]; e.Type != mvccpb.PUT {
   254  			t.Fatalf("expect a put event on prefix test-mutex, but got event type %v", e.Type)
   255  		}
   256  	case <-time.After(time.Second):
   257  		t.Fatalf("failed to receive a watch response")
   258  	}
   259  
   260  	// simulate losing the client that's next in line to acquire the lock
   261  	victimSession.Close()
   262  
   263  	// ensures the deletion of victim waiter from server side.
   264  	select {
   265  	case wrp := <-wch:
   266  		if len(wrp.Events) != 1 {
   267  			t.Fatalf("expect a event, but got %v events", len(wrp.Events))
   268  		}
   269  		if e := wrp.Events[0]; e.Type != mvccpb.DELETE {
   270  			t.Fatalf("expect a delete event on prefix test-mutex, but got event type %v", e.Type)
   271  		}
   272  	case <-time.After(time.Second):
   273  		t.Fatal("failed to receive a watch response")
   274  	}
   275  
   276  	select {
   277  	case <-newOwnerDonec:
   278  		t.Fatal("new owner obtained lock before first owner unlocked")
   279  	default:
   280  	}
   281  
   282  	if err := firstOwnerMutex.Unlock(cctx); err != nil {
   283  		t.Fatal(err)
   284  	}
   285  
   286  	select {
   287  	case <-newOwnerDonec:
   288  	case <-time.After(time.Second):
   289  		t.Fatal("new owner failed to obtain lock")
   290  	}
   291  
   292  	select {
   293  	case <-victimDonec:
   294  	case <-time.After(time.Second):
   295  		t.Fatal("victim mutex failed to exit after first owner releases lock")
   296  	}
   297  }
   298  
   299  func BenchmarkMutex4Waiters(b *testing.B) {
   300  	integration2.BeforeTest(b)
   301  	// XXX switch tests to use TB interface
   302  	clus := integration2.NewCluster(nil, &integration2.ClusterConfig{Size: 3})
   303  	defer clus.Terminate(nil)
   304  	for i := 0; i < b.N; i++ {
   305  		testMutexLock(nil, 4, func() *clientv3.Client { return clus.RandClient() })
   306  	}
   307  }
   308  
   309  func TestRWMutexSingleNode(t *testing.T) {
   310  	integration2.BeforeTest(t)
   311  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   312  	defer clus.Terminate(t)
   313  	testRWMutex(t, 5, func() *clientv3.Client { return clus.Client(0) })
   314  }
   315  
   316  func TestRWMutexMultiNode(t *testing.T) {
   317  	integration2.BeforeTest(t)
   318  	clus := integration2.NewCluster(t, &integration2.ClusterConfig{Size: 3})
   319  	defer clus.Terminate(t)
   320  	testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
   321  }
   322  
   323  func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
   324  	// stream rwlock acquistions
   325  	rlockedC := make(chan *recipe.RWMutex, 1)
   326  	wlockedC := make(chan *recipe.RWMutex, 1)
   327  	for i := 0; i < waiters; i++ {
   328  		go func() {
   329  			session, err := concurrency.NewSession(chooseClient())
   330  			if err != nil {
   331  				t.Error(err)
   332  			}
   333  			rwm := recipe.NewRWMutex(session, "test-rwmutex")
   334  			if rand.Intn(2) == 0 {
   335  				if err := rwm.RLock(); err != nil {
   336  					t.Errorf("could not rlock (%v)", err)
   337  				}
   338  				rlockedC <- rwm
   339  			} else {
   340  				if err := rwm.Lock(); err != nil {
   341  					t.Errorf("could not lock (%v)", err)
   342  				}
   343  				wlockedC <- rwm
   344  			}
   345  		}()
   346  	}
   347  	// unlock locked rwmutexes
   348  	timerC := time.After(time.Duration(waiters) * time.Second)
   349  	for i := 0; i < waiters; i++ {
   350  		select {
   351  		case <-timerC:
   352  			t.Fatalf("timed out waiting for lock %d", i)
   353  		case wl := <-wlockedC:
   354  			select {
   355  			case <-rlockedC:
   356  				t.Fatalf("rlock %d readers did not wait", i)
   357  			default:
   358  			}
   359  			if err := wl.Unlock(); err != nil {
   360  				t.Fatalf("could not release lock (%v)", err)
   361  			}
   362  		case rl := <-rlockedC:
   363  			select {
   364  			case <-wlockedC:
   365  				t.Fatalf("rlock %d writers did not wait", i)
   366  			default:
   367  			}
   368  			if err := rl.RUnlock(); err != nil {
   369  				t.Fatalf("could not release rlock (%v)", err)
   370  			}
   371  		}
   372  	}
   373  }