github.com/pingcap/ticdc@v0.0.0-20220526033649-485a10ef2652/cdc/processor_test.go (about) 1 // Copyright 2020 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package cdc 15 16 import ( 17 "bytes" 18 19 "github.com/pingcap/check" 20 "github.com/pingcap/ticdc/cdc/model" 21 "github.com/pingcap/ticdc/pkg/config" 22 "github.com/pingcap/ticdc/pkg/util/testleak" 23 ) 24 25 type processorSuite struct{} 26 27 var _ = check.Suite(&processorSuite{}) 28 29 func (s *processorSuite) TestWriteDebugInfo(c *check.C) { 30 defer testleak.AfterTest(c)() 31 p := &oldProcessor{ 32 changefeedID: "test", 33 changefeed: model.ChangeFeedInfo{ 34 SinkURI: "blackhole://", 35 Config: config.GetDefaultReplicaConfig(), 36 }, 37 tables: map[int64]*tableInfo{ 38 1: { 39 id: 47, 40 name: "test.t1", 41 resolvedTs: 100, 42 }, 43 }, 44 } 45 var buf bytes.Buffer 46 p.writeDebugInfo(&buf) 47 c.Assert(buf.String(), check.Matches, `changefeedID[\s\S]*info[\s\S]*tables[\s\S]*`) 48 } 49 50 /* 51 import ( 52 "context" 53 "sort" 54 "sync" 55 "time" 56 57 "github.com/pingcap/ticdc/cdc/entry" 58 59 "github.com/pingcap/check" 60 pd "github.com/pingcap/pd/client" 61 "github.com/pingcap/ticdc/cdc/kv" 62 "github.com/pingcap/ticdc/cdc/model" 63 "github.com/pingcap/ticdc/cdc/roles/storage" 64 "github.com/pingcap/ticdc/cdc/sink" 65 "github.com/pingcap/ticdc/pkg/etcd" 66 ) 67 68 type processorSuite struct{} 69 70 type mockTsRWriter struct { 71 l sync.Mutex 72 globalResolvedTs uint64 73 74 memInfo *model.TaskStatus 75 storageInfo *model.TaskStatus 76 } 77 78 var _ storage.ProcessorTsRWriter = &mockTsRWriter{} 79 80 // ReadGlobalResolvedTs implement ProcessorTsRWriter interface. 81 func (s *mockTsRWriter) ReadGlobalResolvedTs(ctx context.Context) (uint64, error) { 82 s.l.Lock() 83 defer s.l.Unlock() 84 return s.globalResolvedTs, nil 85 } 86 87 func (s *mockTsRWriter) WritePosition(ctx context.Context, taskPosition *model.TaskPosition) error { 88 return nil 89 } 90 91 // GetTaskStatus implement ProcessorTsRWriter interface. 92 func (s *mockTsRWriter) GetTaskStatus() *model.TaskStatus { 93 return s.memInfo 94 } 95 96 // WriteInfoIntoStorage implement ProcessorTsRWriter interface. 97 func (s *mockTsRWriter) WriteInfoIntoStorage(ctx context.Context) error { 98 s.storageInfo = s.memInfo.Clone() 99 return nil 100 } 101 102 // UpdateInfo implement ProcessorTsRWriter interface. 103 func (s *mockTsRWriter) UpdateInfo(ctx context.Context) (bool, error) { 104 s.memInfo = s.storageInfo 105 s.storageInfo = s.memInfo.Clone() 106 107 return true, nil 108 } 109 110 func (s *mockTsRWriter) SetGlobalResolvedTs(ts uint64) { 111 s.l.Lock() 112 defer s.l.Unlock() 113 s.globalResolvedTs = ts 114 } 115 116 // mockMounter pretend to decode a RawTxn by returning a Txn of the same Ts 117 type mockMounter struct{} 118 119 func (m mockMounter) Mount(rawTxn model.RawTxn) (model.Txn, error) { 120 return model.Txn{Ts: rawTxn.Ts}, nil 121 } 122 123 // mockSinker append all received Txns for validation 124 type mockSinker struct { 125 sink.Sink 126 synced []model.Txn 127 mu sync.Mutex 128 } 129 130 func (m *mockSinker) Emit(ctx context.Context, txns ...model.Txn) error { 131 m.mu.Lock() 132 defer m.mu.Unlock() 133 m.synced = append(m.synced, txns...) 134 return nil 135 } 136 137 var _ = check.Suite(&processorSuite{}) 138 139 type processorTestCase struct { 140 rawTxnTs [][]uint64 141 globalResolvedTs []uint64 142 expect [][]uint64 143 } 144 145 func (p *processorSuite) TestProcessor(c *check.C) { 146 defer testleak.AfterTest(c)() 147 c.Skip("can't create mock puller") 148 cases := &processorTestCase{ 149 rawTxnTs: [][]uint64{ 150 {1, 4, 7, 9, 12, 14, 16, 20}, 151 {2, 4, 8, 13, 24}, 152 }, 153 globalResolvedTs: []uint64{14, 15, 19}, 154 expect: [][]uint64{ 155 {1, 2, 4, 4, 7, 8, 9, 12, 13, 14}, 156 {}, 157 {16}, 158 }, 159 } 160 runCase(c, cases) 161 } 162 163 func runCase(c *check.C, cases *processorTestCase) { 164 origFSchema := fCreateSchema 165 fCreateSchema = func(pdEndpoints []string) (*entry.Storage, error) { 166 return nil, nil 167 } 168 origFNewPD := fNewPDCli 169 fNewPDCli = func(pdAddrs []string, security pd.SecurityOption, opts ...pd.ClientOption) (pd.Client, error) { 170 return nil, nil 171 } 172 origFNewTsRw := fNewTsRWriter 173 fNewTsRWriter = func(cli kv.CDCEtcdClient, changefeedID, captureID string) (storage.ProcessorTsRWriter, error) { 174 return &mockTsRWriter{}, nil 175 } 176 origFNewMounter := fNewMounter 177 fNewMounter = func(schema *entry.Storage) mounter { 178 return mockMounter{} 179 } 180 origFNewSink := fNewMySQLSink 181 sinker := &mockSinker{} 182 fNewMySQLSink = func(sinkURI string, infoGetter sink.TableInfoGetter, opts map[string]string) (sink.Sink, error) { 183 return sinker, nil 184 } 185 defer func() { 186 fCreateSchema = origFSchema 187 fNewPDCli = origFNewPD 188 fNewTsRWriter = origFNewTsRw 189 fNewMounter = origFNewMounter 190 fNewMySQLSink = origFNewSink 191 }() 192 193 dir := c.MkDir() 194 etcdURL, etcd, err := etcd.SetupEmbedEtcd(dir) 195 c.Assert(err, check.IsNil) 196 defer etcd.Close() 197 198 ctx, cancel := context.WithCancel(context.Background()) 199 p, err := NewProcessor(ctx, []string{etcdURL.String()}, model.ChangeFeedInfo{}, "", "", 0) 200 c.Assert(err, check.IsNil) 201 errCh := make(chan error, 1) 202 p.Run(ctx, errCh) 203 204 for i, rawTxnTs := range cases.rawTxnTs { 205 p.addTable(ctx, int64(i), 0) 206 207 table := p.tables[int64(i)] 208 input := table.inputTxn 209 210 go func(rawTxnTs []uint64) { 211 for _, txnTs := range rawTxnTs { 212 input <- model.RawTxn{CRTs: txnTs} 213 } 214 }(rawTxnTs) 215 } 216 217 for i, globalResolvedTs := range cases.globalResolvedTs { 218 // hack to simulate owner to update global resolved ts 219 p.getTsRwriter().(*mockTsRWriter).SetGlobalResolvedTs(globalResolvedTs) 220 // waiting for processor push to resolvedTs 221 for { 222 sinker.mu.Lock() 223 needBreak := len(sinker.synced) == len(cases.expect[i]) 224 sinker.mu.Unlock() 225 if needBreak { 226 break 227 } 228 time.Sleep(10 * time.Millisecond) 229 } 230 231 sinker.mu.Lock() 232 syncedTs := make([]uint64, 0, len(sinker.synced)) 233 for _, s := range sinker.synced { 234 syncedTs = append(syncedTs, s.Ts) 235 } 236 sort.Slice(syncedTs, func(i, j int) bool { 237 return syncedTs[i] < syncedTs[j] 238 }) 239 c.Assert(syncedTs, check.DeepEquals, cases.expect[i]) 240 sinker.synced = sinker.synced[:0] 241 sinker.mu.Unlock() 242 } 243 cancel() 244 } 245 246 func (p *processorSuite) TestDiffProcessTableInfos(c *check.C) { 247 defer testleak.AfterTest(c)() 248 infos := make([]*model.ProcessTableInfo, 0, 3) 249 for i := uint64(0); i < uint64(3); i++ { 250 infos = append(infos, &model.ProcessTableInfo{ID: i, StartTs: 10 * i}) 251 } 252 var ( 253 emptyInfo = make([]*model.ProcessTableInfo, 0) 254 cases = []struct { 255 oldInfo []*model.ProcessTableInfo 256 newInfo []*model.ProcessTableInfo 257 removed []*model.ProcessTableInfo 258 added []*model.ProcessTableInfo 259 }{ 260 {emptyInfo, emptyInfo, nil, nil}, 261 {[]*model.ProcessTableInfo{infos[0]}, []*model.ProcessTableInfo{infos[0]}, nil, nil}, 262 {emptyInfo, []*model.ProcessTableInfo{infos[0]}, nil, []*model.ProcessTableInfo{infos[0]}}, 263 {[]*model.ProcessTableInfo{infos[0]}, emptyInfo, []*model.ProcessTableInfo{infos[0]}, nil}, 264 {[]*model.ProcessTableInfo{infos[0]}, []*model.ProcessTableInfo{infos[1]}, []*model.ProcessTableInfo{infos[0]}, []*model.ProcessTableInfo{infos[1]}}, 265 {[]*model.ProcessTableInfo{infos[1], infos[0]}, []*model.ProcessTableInfo{infos[2], infos[1]}, []*model.ProcessTableInfo{infos[0]}, []*model.ProcessTableInfo{infos[2]}}, 266 {[]*model.ProcessTableInfo{infos[1]}, []*model.ProcessTableInfo{infos[0]}, []*model.ProcessTableInfo{infos[1]}, []*model.ProcessTableInfo{infos[0]}}, 267 } 268 ) 269 270 for _, tc := range cases { 271 removed, added := diffProcessTableInfos(tc.oldInfo, tc.newInfo) 272 c.Assert(removed, check.DeepEquals, tc.removed) 273 c.Assert(added, check.DeepEquals, tc.added) 274 } 275 } 276 277 type txnChannelSuite struct{} 278 279 var _ = check.Suite(&txnChannelSuite{}) 280 281 func (s *txnChannelSuite) TestShouldForwardTxnsByTs(c *check.C) { 282 defer testleak.AfterTest(c)() 283 input := make(chan model.RawTxn, 5) 284 var lastTs uint64 285 callback := func(ts uint64) { 286 lastTs = ts 287 } 288 tc := newTxnChannel(input, 5, callback) 289 for _, ts := range []uint64{1, 2, 4, 6} { 290 select { 291 case input <- model.RawTxn{CRTs: ts}: 292 case <-time.After(time.Second): 293 c.Fatal("Timeout sending to input") 294 } 295 } 296 close(input) 297 298 output := make(chan model.RawTxn, 5) 299 300 assertCorrectOutput := func(expected []uint64) { 301 for _, ts := range expected { 302 c.Logf("Checking %d", ts) 303 select { 304 case e := <-output: 305 c.Assert(e.Ts, check.Equals, ts) 306 case <-time.After(time.Second): 307 c.Fatal("Timeout reading output") 308 } 309 } 310 311 select { 312 case <-output: 313 c.Fatal("Output should be empty now") 314 default: 315 } 316 } 317 318 tc.Forward(context.Background(), 3, output) 319 // Assert that all txns with ts not greater than 3 is sent to output 320 assertCorrectOutput([]uint64{1, 2}) 321 tc.Forward(context.Background(), 10, output) 322 // Assert that all txns with ts not greater than 10 is sent to output 323 assertCorrectOutput([]uint64{4, 6}) 324 c.Assert(lastTs, check.Equals, uint64(6)) 325 } 326 327 func (s *txnChannelSuite) TestShouldBeCancellable(c *check.C) { 328 defer testleak.AfterTest(c)() 329 input := make(chan model.RawTxn, 5) 330 tc := newTxnChannel(input, 5, func(ts uint64) {}) 331 ctx, cancel := context.WithCancel(context.Background()) 332 stopped := make(chan struct{}) 333 go func() { 334 tc.Forward(ctx, 1, make(chan model.RawTxn)) 335 close(stopped) 336 }() 337 cancel() 338 select { 339 case <-stopped: 340 case <-time.After(time.Second): 341 c.Fatal("Not stopped in time after cancelled") 342 } 343 } 344 */