github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/pkg/upstream/upstream_test.go (about) 1 // Copyright 2022 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package upstream 15 16 import ( 17 "context" 18 "fmt" 19 "strconv" 20 "sync" 21 "testing" 22 "time" 23 24 "github.com/benbjohnson/clock" 25 "github.com/pingcap/errors" 26 "github.com/pingcap/tiflow/cdc/model" 27 "github.com/pingcap/tiflow/pkg/etcd" 28 "github.com/prometheus/client_golang/prometheus" 29 "github.com/stretchr/testify/require" 30 "go.etcd.io/etcd/client/pkg/v3/logutil" 31 clientv3 "go.etcd.io/etcd/client/v3" 32 "go.uber.org/zap" 33 "go.uber.org/zap/zapcore" 34 ) 35 36 func TestUpstreamShouldClose(t *testing.T) { 37 t.Parallel() 38 39 up := &Upstream{} 40 up.isDefaultUpstream = false 41 mockClock := clock.NewMock() 42 require.False(t, up.shouldClose()) 43 up.clock = mockClock 44 up.idleTime = mockClock.Now().Add(-2 * maxIdleDuration) 45 require.True(t, up.shouldClose()) 46 up.isDefaultUpstream = true 47 require.False(t, up.shouldClose()) 48 } 49 50 func TestUpstreamError(t *testing.T) { 51 t.Parallel() 52 53 up := &Upstream{} 54 err := errors.New("test") 55 up.err.Store(err) 56 require.Equal(t, err, up.Error()) 57 up.err.Store(nil) 58 require.Nil(t, up.Error()) 59 } 60 61 func TestUpstreamIsNormal(t *testing.T) { 62 t.Parallel() 63 64 up := &Upstream{} 65 up.status = uninit 66 require.False(t, up.IsNormal()) 67 up.status = normal 68 require.True(t, up.IsNormal()) 69 up.err.Store(errors.New("test")) 70 require.False(t, up.IsNormal()) 71 } 72 73 func TestTrySetIdleTime(t *testing.T) { 74 t.Parallel() 75 76 up := newUpstream(nil, nil) 77 require.Equal(t, uninit, up.status) 78 up.clock = clock.New() 79 up.trySetIdleTime() 80 require.False(t, up.idleTime.IsZero()) 81 idleTime := up.idleTime 82 up.trySetIdleTime() 83 require.Equal(t, idleTime, up.idleTime) 84 up.resetIdleTime() 85 require.True(t, up.idleTime.IsZero()) 86 up.resetIdleTime() 87 require.True(t, up.idleTime.IsZero()) 88 } 89 90 func TestRegisterTopo(t *testing.T) { 91 t.Parallel() 92 93 ctx, cancel := context.WithCancel(context.Background()) 94 defer cancel() 95 clientURL, etcdServer, err := etcd.SetupEmbedEtcd(t.TempDir()) 96 defer etcdServer.Close() 97 98 require.NoError(t, err) 99 logConfig := logutil.DefaultZapLoggerConfig 100 logConfig.Level = zap.NewAtomicLevelAt(zapcore.DebugLevel) 101 102 rawEtcdCli, err := clientv3.New(clientv3.Config{ 103 Endpoints: []string{clientURL.String()}, 104 Context: ctx, 105 LogConfig: &logConfig, 106 DialTimeout: 3 * time.Second, 107 }) 108 require.NoError(t, err) 109 defer rawEtcdCli.Close() 110 etcdCli := etcd.Wrap(rawEtcdCli, make(map[string]prometheus.Counter)) 111 up := &Upstream{ 112 cancel: func() {}, 113 etcdCli: etcdCli, 114 wg: &sync.WaitGroup{}, 115 } 116 117 info := &model.CaptureInfo{ 118 AdvertiseAddr: "localhost:8300", 119 Version: "test.1.0", 120 } 121 err = up.registerTopologyInfo(ctx, CaptureTopologyCfg{ 122 CaptureInfo: info, 123 GCServiceID: "clusterID", 124 SessionTTL: 2, 125 }) 126 require.NoError(t, err) 127 128 resp, err := etcdCli.Get(ctx, "/topology/ticdc/clusterID/localhost:8300") 129 require.NoError(t, err) 130 131 infoData, err := info.Marshal() 132 require.NoError(t, err) 133 require.Equal(t, infoData, resp.Kvs[0].Value) 134 135 up.etcdCli = nil 136 up.Close() 137 require.Eventually(t, func() bool { 138 resp, err := etcdCli.Get(ctx, "/topology/ticdc/clusterID/localhost:8300") 139 require.NoError(t, err) 140 return len(resp.Kvs) == 0 141 }, time.Second*5, time.Millisecond*100) 142 } 143 144 func TestVerify(t *testing.T) { 145 t.Parallel() 146 147 ctx, cancel := context.WithCancel(context.Background()) 148 defer cancel() 149 clientURL, etcdServer, err := etcd.SetupEmbedEtcd(t.TempDir()) 150 defer etcdServer.Close() 151 152 require.NoError(t, err) 153 logConfig := logutil.DefaultZapLoggerConfig 154 logConfig.Level = zap.NewAtomicLevelAt(zapcore.DebugLevel) 155 156 rawEtcdCli, err := clientv3.New(clientv3.Config{ 157 Endpoints: []string{clientURL.String()}, 158 Context: ctx, 159 LogConfig: &logConfig, 160 DialTimeout: 3 * time.Second, 161 }) 162 require.NoError(t, err) 163 defer rawEtcdCli.Close() 164 165 etcdCli := etcd.Wrap(rawEtcdCli, make(map[string]prometheus.Counter)) 166 up := &Upstream{ 167 cancel: func() {}, 168 etcdCli: etcdCli, 169 wg: &sync.WaitGroup{}, 170 } 171 172 // case 1: no tidb instance 173 err = up.VerifyTiDBUser(ctx, "test", "") 174 require.ErrorContains(t, err, "tidb instance not found in topology") 175 176 // case 2: tidb instance not alive 177 tidbInstances := []*tidbInstance{ 178 { 179 IP: "127.0.0.1", 180 Port: 40000, 181 }, 182 { 183 IP: "127.0.0.1", 184 Port: 40001, 185 }, 186 } 187 for _, tidb := range tidbInstances { 188 infoKey := fmt.Sprintf("/topology/tidb/%s:%d/info", tidb.IP, tidb.Port) 189 ttlKey := fmt.Sprintf("/topology/tidb/%s:%d/ttl", tidb.IP, tidb.Port) 190 rawEtcdCli.Put(ctx, infoKey, "test") 191 rawEtcdCli.Put(ctx, ttlKey, strconv.FormatInt(time.Now().UnixNano(), 10)) 192 } 193 err = up.VerifyTiDBUser(ctx, "test", "") 194 require.ErrorContains(t, err, "connection refused") 195 }