github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/pkg/etcd/client_test.go (about)

     1  // Copyright 2020 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package etcd
    15  
    16  import (
    17  	"context"
    18  	"sync/atomic"
    19  	"testing"
    20  	"time"
    21  
    22  	"github.com/benbjohnson/clock"
    23  	"github.com/pingcap/errors"
    24  	"github.com/stretchr/testify/require"
    25  	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
    26  	clientv3 "go.etcd.io/etcd/client/v3"
    27  )
    28  
    29  type mockClient struct {
    30  	clientv3.KV
    31  	getOK bool
    32  }
    33  
    34  func (m *mockClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (resp *clientv3.GetResponse, err error) {
    35  	if m.getOK {
    36  		m.getOK = true
    37  		return nil, errors.New("mock error")
    38  	}
    39  	return &clientv3.GetResponse{}, nil
    40  }
    41  
    42  func (m *mockClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (resp *clientv3.PutResponse, err error) {
    43  	return nil, errors.New("mock error")
    44  }
    45  
    46  func (m *mockClient) Txn(ctx context.Context) clientv3.Txn {
    47  	return &mockTxn{ctx: ctx}
    48  }
    49  
    50  type mockWatcher struct {
    51  	clientv3.Watcher
    52  	watchCh      chan clientv3.WatchResponse
    53  	resetCount   *int32
    54  	requestCount *int32
    55  	rev          *int64
    56  }
    57  
    58  func (m mockWatcher) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
    59  	atomic.AddInt32(m.resetCount, 1)
    60  	op := &clientv3.Op{}
    61  	for _, opt := range opts {
    62  		opt(op)
    63  	}
    64  	atomic.StoreInt64(m.rev, op.Rev())
    65  	return m.watchCh
    66  }
    67  
    68  func (m mockWatcher) RequestProgress(ctx context.Context) error {
    69  	atomic.AddInt32(m.requestCount, 1)
    70  	return nil
    71  }
    72  
    73  func TestRetry(t *testing.T) {
    74  	// here we need to change maxTries, which is not thread safe
    75  	// so we don't use t.Parallel() for this test
    76  
    77  	originValue := maxTries
    78  	// to speedup the test
    79  	maxTries = 2
    80  
    81  	cli := clientv3.NewCtxClient(context.TODO())
    82  	cli.KV = &mockClient{}
    83  	retrycli := Wrap(cli, nil)
    84  	get, err := retrycli.Get(context.TODO(), "")
    85  
    86  	require.NoError(t, err)
    87  	require.NotNil(t, get)
    88  
    89  	_, err = retrycli.Put(context.TODO(), "", "")
    90  	require.NotNil(t, err)
    91  	require.Containsf(t, errors.Cause(err).Error(), "mock error", "err:%v", err.Error())
    92  
    93  	ctx, cancel := context.WithCancel(context.Background())
    94  	defer cancel()
    95  
    96  	// Test Txn case
    97  	// case 0: normal
    98  	rsp, err := retrycli.Txn(ctx, nil, nil, nil)
    99  	require.NoError(t, err)
   100  	require.False(t, rsp.Succeeded)
   101  
   102  	// case 1: errors.ErrReachMaxTry
   103  	_, err = retrycli.Txn(ctx, txnEmptyCmps, nil, nil)
   104  	require.Regexp(t, ".*CDC:ErrReachMaxTry.*", err)
   105  
   106  	// case 2: errors.ErrReachMaxTry
   107  	_, err = retrycli.Txn(ctx, nil, txnEmptyOpsThen, nil)
   108  	require.Regexp(t, ".*CDC:ErrReachMaxTry.*", err)
   109  
   110  	// case 3: context.DeadlineExceeded
   111  	_, err = retrycli.Txn(ctx, txnEmptyCmps, txnEmptyOpsThen, nil)
   112  	require.Equal(t, context.DeadlineExceeded, err)
   113  
   114  	// other case: mock error
   115  	_, err = retrycli.Txn(ctx, txnEmptyCmps, txnEmptyOpsThen, TxnEmptyOpsElse)
   116  	require.Containsf(t, errors.Cause(err).Error(), "mock error", "err:%v", err.Error())
   117  
   118  	maxTries = originValue
   119  }
   120  
   121  func TestDelegateLease(t *testing.T) {
   122  	t.Parallel()
   123  
   124  	ctx := context.Background()
   125  	url, server, err := SetupEmbedEtcd(t.TempDir())
   126  	defer func() {
   127  		server.Close()
   128  	}()
   129  	require.Nil(t, err)
   130  	cli, err := clientv3.New(clientv3.Config{
   131  		Endpoints:   []string{url.String()},
   132  		DialTimeout: 3 * time.Second,
   133  	})
   134  	require.Nil(t, err)
   135  	defer cli.Close()
   136  
   137  	ttl := int64(10)
   138  	lease, err := cli.Grant(ctx, ttl)
   139  	require.Nil(t, err)
   140  
   141  	ttlResp, err := cli.TimeToLive(ctx, lease.ID)
   142  	require.Nil(t, err)
   143  	require.Equal(t, ttlResp.GrantedTTL, ttl)
   144  	require.Less(t, ttlResp.TTL, ttl)
   145  	require.Greater(t, ttlResp.TTL, int64(0))
   146  
   147  	_, err = cli.Revoke(ctx, lease.ID)
   148  	require.Nil(t, err)
   149  	ttlResp, err = cli.TimeToLive(ctx, lease.ID)
   150  	require.Nil(t, err)
   151  	require.Equal(t, ttlResp.TTL, int64(-1))
   152  }
   153  
   154  // test no data lost when WatchCh blocked
   155  func TestWatchChBlocked(t *testing.T) {
   156  	t.Parallel()
   157  
   158  	cli := clientv3.NewCtxClient(context.TODO())
   159  	resetCount := int32(0)
   160  	requestCount := int32(0)
   161  	rev := int64(0)
   162  	watchCh := make(chan clientv3.WatchResponse, 1)
   163  	watcher := mockWatcher{watchCh: watchCh, resetCount: &resetCount, requestCount: &requestCount, rev: &rev}
   164  	cli.Watcher = watcher
   165  
   166  	sentRes := []clientv3.WatchResponse{
   167  		{CompactRevision: 1},
   168  		{CompactRevision: 2},
   169  		{CompactRevision: 3},
   170  		{CompactRevision: 4},
   171  		{CompactRevision: 5},
   172  		{CompactRevision: 6},
   173  	}
   174  
   175  	go func() {
   176  		for _, r := range sentRes {
   177  			watchCh <- r
   178  		}
   179  	}()
   180  
   181  	mockClock := clock.NewMock()
   182  	watchCli := Wrap(cli, nil)
   183  	watchCli.clock = mockClock
   184  
   185  	key := "testWatchChBlocked"
   186  	outCh := make(chan clientv3.WatchResponse, 6)
   187  	revision := int64(1)
   188  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
   189  	defer cancel()
   190  
   191  	go func() {
   192  		watchCli.WatchWithChan(ctx, outCh, key, "", clientv3.WithPrefix(), clientv3.WithRev(revision))
   193  	}()
   194  	receivedRes := make([]clientv3.WatchResponse, 0)
   195  	// wait for WatchWithChan set up
   196  	r := <-outCh
   197  	receivedRes = append(receivedRes, r)
   198  	// move time forward
   199  	mockClock.Add(time.Second * 30)
   200  
   201  	for r := range outCh {
   202  		receivedRes = append(receivedRes, r)
   203  		if len(receivedRes) == len(sentRes) {
   204  			cancel()
   205  		}
   206  	}
   207  
   208  	require.Equal(t, sentRes, receivedRes)
   209  	// make sure watchCh has been reset since timeout
   210  	require.True(t, atomic.LoadInt32(watcher.resetCount) > 1)
   211  	// make sure RequestProgress has been call since timeout
   212  	require.True(t, atomic.LoadInt32(watcher.requestCount) > 1)
   213  	// make sure etcdRequestProgressDuration is less than etcdWatchChTimeoutDuration
   214  	require.Less(t, etcdRequestProgressDuration, etcdWatchChTimeoutDuration)
   215  }
   216  
   217  // test no data lost when OutCh blocked
   218  func TestOutChBlocked(t *testing.T) {
   219  	t.Parallel()
   220  
   221  	cli := clientv3.NewCtxClient(context.TODO())
   222  	resetCount := int32(0)
   223  	requestCount := int32(0)
   224  	rev := int64(0)
   225  	watchCh := make(chan clientv3.WatchResponse, 1)
   226  	watcher := mockWatcher{watchCh: watchCh, resetCount: &resetCount, requestCount: &requestCount, rev: &rev}
   227  	cli.Watcher = watcher
   228  
   229  	mockClock := clock.NewMock()
   230  	watchCli := Wrap(cli, nil)
   231  	watchCli.clock = mockClock
   232  
   233  	sentRes := []clientv3.WatchResponse{
   234  		{CompactRevision: 1},
   235  		{CompactRevision: 2},
   236  		{CompactRevision: 3},
   237  	}
   238  
   239  	go func() {
   240  		for _, r := range sentRes {
   241  			watchCh <- r
   242  		}
   243  	}()
   244  
   245  	key := "testOutChBlocked"
   246  	outCh := make(chan clientv3.WatchResponse, 1)
   247  	revision := int64(1)
   248  
   249  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
   250  	defer cancel()
   251  	go func() {
   252  		watchCli.WatchWithChan(ctx, outCh, key, "", clientv3.WithPrefix(), clientv3.WithRev(revision))
   253  	}()
   254  	receivedRes := make([]clientv3.WatchResponse, 0)
   255  	// wait for WatchWithChan set up
   256  	r := <-outCh
   257  	receivedRes = append(receivedRes, r)
   258  	// move time forward
   259  	mockClock.Add(time.Second * 30)
   260  
   261  	for r := range outCh {
   262  		receivedRes = append(receivedRes, r)
   263  		if len(receivedRes) == len(sentRes) {
   264  			cancel()
   265  		}
   266  	}
   267  
   268  	require.Equal(t, sentRes, receivedRes)
   269  }
   270  
   271  func TestRevisionNotFallBack(t *testing.T) {
   272  	t.Parallel()
   273  
   274  	cli := clientv3.NewCtxClient(context.TODO())
   275  	resetCount := int32(0)
   276  	requestCount := int32(0)
   277  	rev := int64(0)
   278  	watchCh := make(chan clientv3.WatchResponse, 1)
   279  	watcher := mockWatcher{watchCh: watchCh, resetCount: &resetCount, requestCount: &requestCount, rev: &rev}
   280  	cli.Watcher = watcher
   281  	mockClock := clock.NewMock()
   282  	watchCli := Wrap(cli, nil)
   283  	watchCli.clock = mockClock
   284  
   285  	key := "testRevisionNotFallBack"
   286  	outCh := make(chan clientv3.WatchResponse, 1)
   287  	// watch from revision = 2
   288  	revision := int64(2)
   289  
   290  	sentRes := []clientv3.WatchResponse{
   291  		{CompactRevision: 1},
   292  	}
   293  
   294  	go func() {
   295  		for _, r := range sentRes {
   296  			watchCh <- r
   297  		}
   298  	}()
   299  
   300  	ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
   301  	defer cancel()
   302  	go func() {
   303  		watchCli.WatchWithChan(ctx, outCh, key, "", clientv3.WithPrefix(), clientv3.WithRev(revision))
   304  	}()
   305  	// wait for WatchWithChan set up
   306  	<-outCh
   307  	// move time forward
   308  	mockClock.Add(time.Second * 30)
   309  	// make sure watchCh has been reset since timeout
   310  	require.True(t, atomic.LoadInt32(watcher.resetCount) > 1)
   311  	// make sure revision in WatchWitchChan does not fall back
   312  	// even if there has not any response been received from WatchCh
   313  	// while WatchCh was reset
   314  	require.Equal(t, atomic.LoadInt64(watcher.rev), revision)
   315  }
   316  
   317  type mockTxn struct {
   318  	ctx  context.Context
   319  	mode int
   320  }
   321  
   322  func (txn *mockTxn) If(cs ...clientv3.Cmp) clientv3.Txn {
   323  	if cs != nil {
   324  		txn.mode += 1
   325  	}
   326  	return txn
   327  }
   328  
   329  func (txn *mockTxn) Then(ops ...clientv3.Op) clientv3.Txn {
   330  	if ops != nil {
   331  		txn.mode += 1 << 1
   332  	}
   333  	return txn
   334  }
   335  
   336  func (txn *mockTxn) Else(ops ...clientv3.Op) clientv3.Txn {
   337  	if ops != nil {
   338  		txn.mode += 1 << 2
   339  	}
   340  	return txn
   341  }
   342  
   343  func (txn *mockTxn) Commit() (*clientv3.TxnResponse, error) {
   344  	switch txn.mode {
   345  	case 0:
   346  		return &clientv3.TxnResponse{}, nil
   347  	case 1:
   348  		return nil, rpctypes.ErrNoSpace
   349  	case 2:
   350  		return nil, rpctypes.ErrTimeoutDueToLeaderFail
   351  	case 3:
   352  		return nil, context.DeadlineExceeded
   353  	default:
   354  		return nil, errors.New("mock error")
   355  	}
   356  }