github.com/pingcap/tiflow@v0.0.0-20240520035814-5bf52d54e205/pkg/migrate/migrate_test.go (about) 1 // Copyright 2022 PingCAP, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 package migrate 15 16 import ( 17 "context" 18 "encoding/json" 19 "fmt" 20 "net/http" 21 "net/http/httptest" 22 "strconv" 23 "sync" 24 "testing" 25 "time" 26 27 "github.com/pingcap/errors" 28 "github.com/pingcap/tiflow/cdc/model" 29 "github.com/pingcap/tiflow/pkg/config" 30 "github.com/pingcap/tiflow/pkg/etcd" 31 "github.com/pingcap/tiflow/pkg/pdutil" 32 "github.com/pingcap/tiflow/pkg/security" 33 "github.com/pingcap/tiflow/pkg/txnutil/gc" 34 "github.com/pingcap/tiflow/pkg/util" 35 "github.com/stretchr/testify/require" 36 "github.com/tikv/client-go/v2/oracle" 37 pd "github.com/tikv/pd/client" 38 clientv3 "go.etcd.io/etcd/client/v3" 39 ) 40 41 const cycylicChangefeedInfo = `{"upstream-id":0,"sink-uri":"blackhole://","opts":{"a":"b"}, 42 "create-time":"0001-01-01T00:00:00Z","start-ts":1,"target-ts":2,"admin-job-type":0,"sort-engine": 43 "memory","sort-dir":"/tmp/","config":{"case-sensitive":true, 44 "force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"], 45 "ignore-txn-start-ts":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"", 46 "column-selectors":null,"schema-registry":""},"cyclic-replication":{"enable":true,"replica-id":0, 47 "filter-replica-ids":[12,3],"id-buckets":0,"sync-ddl":true}, 48 "consistent":{"level":"none","max-log-size":64,"flush-interval":1000,"storage":""}}, 49 "state":"","error":null,"sync-point-enabled":false,"sync-point-interval":0, 50 "creator-version":"v6.1.0"} 51 ` 52 53 func TestUnmarshal(t *testing.T) { 54 cf := &model.ChangeFeedInfo{} 55 err := json.Unmarshal([]byte(cycylicChangefeedInfo), cf) 56 require.Nil(t, err) 57 } 58 59 // 1. create an etcd server 60 // 2. put some old metadata to etcd cluster 61 // 3. use 3 goroutine to mock cdc nodes, one is owner, which will migrate data, 62 // the other two are non-owner nodes, which will wait for migrating done 63 // 3. migrate the data to new meta version 64 // 4. check the data is migrated correctly 65 func TestMigration(t *testing.T) { 66 s := &etcd.Tester{} 67 s.SetUpTest(t) 68 defer s.TearDownTest(t) 69 curl := s.ClientURL.String() 70 cli, err := clientv3.New(clientv3.Config{ 71 Endpoints: []string{curl}, 72 DialTimeout: 3 * time.Second, 73 }) 74 require.NoError(t, err) 75 defer cli.Close() 76 info1 := model.ChangeFeedInfo{ 77 SinkURI: "test1", 78 StartTs: 1, TargetTs: 100, State: model.StateNormal, 79 } 80 status1 := model.ChangeFeedStatus{CheckpointTs: 1} 81 info2 := model.ChangeFeedInfo{ 82 SinkURI: "test1", 83 StartTs: 2, TargetTs: 200, State: model.StateWarning, 84 } 85 status2 := model.ChangeFeedStatus{CheckpointTs: 2} 86 cfg := config.GetDefaultReplicaConfig() 87 cfg.CheckGCSafePoint = false 88 cfg.Sink = &config.SinkConfig{ 89 DispatchRules: []*config.DispatchRule{ 90 { 91 Matcher: []string{"a", "b", "c"}, 92 DispatcherRule: "", 93 PartitionRule: "rule", 94 TopicRule: "topic", 95 }, 96 }, 97 Protocol: util.AddressOf("aaa"), 98 ColumnSelectors: []*config.ColumnSelector{ 99 { 100 Matcher: []string{"a", "b", "c"}, 101 Columns: []string{"a", "b"}, 102 }, 103 }, 104 SchemaRegistry: util.AddressOf("bbb"), 105 TxnAtomicity: util.AddressOf(config.AtomicityLevel("aa")), 106 } 107 cfg.Consistent = &config.ConsistentConfig{ 108 Level: "1", 109 MaxLogSize: 99, 110 FlushIntervalInMs: 10, 111 Storage: "s3", 112 } 113 cfg.Filter = &config.FilterConfig{ 114 Rules: []string{"a", "b", "c"}, 115 IgnoreTxnStartTs: []uint64{1, 2, 3}, 116 } 117 info3 := model.ChangeFeedInfo{ 118 SinkURI: "test1", 119 StartTs: 3, TargetTs: 300, State: model.StateFailed, 120 Config: cfg, 121 } 122 status3 := model.ChangeFeedStatus{CheckpointTs: 3} 123 124 testCases := []struct { 125 id string 126 info model.ChangeFeedInfo 127 status model.ChangeFeedStatus 128 }{ 129 {"test1", info1, status1}, 130 {"test2", info2, status2}, 131 {"test3", info3, status3}, 132 } 133 const oldInfoKeyBase = "/tidb/cdc/changefeed/info/%s" 134 const oldStatusKeyBase = "/tidb/cdc/job/%s" 135 136 // 0 add v6.1.0 config with cyclic enabled 137 otherClusterData := "/tidb/cdc/newcluster/default/upstream/1" 138 _, err = cli.Put(context.Background(), otherClusterData, "{}") 139 require.NoError(t, err) 140 141 // 0 add v6.1.0 config with cyclic enabled 142 _, err = cli.Put(context.Background(), 143 fmt.Sprintf(oldInfoKeyBase, "cyclic-test"), cycylicChangefeedInfo) 144 require.NoError(t, err) 145 146 // 1.put old version meta data to etcd 147 for _, tc := range testCases { 148 iv, err := tc.info.Marshal() 149 require.NoError(t, err) 150 _, err = cli.Put(context.Background(), fmt.Sprintf(oldInfoKeyBase, tc.id), iv) 151 require.NoError(t, err) 152 sv, err := tc.status.Marshal() 153 require.NoError(t, err) 154 _, err = cli.Put(context.Background(), fmt.Sprintf(oldStatusKeyBase, tc.id), sv) 155 require.NoError(t, err) 156 } 157 // 2. check old version data in etcd is expected 158 for _, tc := range testCases { 159 infoResp, err := cli.Get(context.Background(), 160 fmt.Sprintf(oldInfoKeyBase, tc.id)) 161 require.NoError(t, err) 162 info := model.ChangeFeedInfo{} 163 err = info.Unmarshal(infoResp.Kvs[0].Value) 164 require.NoError(t, err) 165 require.Equal(t, tc.info, info) 166 statusResp, err := cli.Get(context.Background(), 167 fmt.Sprintf(oldStatusKeyBase, tc.id)) 168 require.NoError(t, err) 169 status := model.ChangeFeedStatus{} 170 err = status.Unmarshal(statusResp.Kvs[0].Value) 171 require.NoError(t, err) 172 require.Equal(t, tc.status, status) 173 } 174 175 // set timeout to make sure this test will be finished 176 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 177 defer cancel() 178 cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli, "default") 179 require.Nil(t, err) 180 181 m := NewMigrator(cdcCli, []string{}, config.GetGlobalServerConfig()) 182 migrator := m.(*migrator) 183 migrator.createPDClientFunc = func(ctx context.Context, 184 pdEndpoints []string, conf *security.Credential, 185 ) (pd.Client, error) { 186 mock := newMockPDClient(true) 187 mock.respData = "{}" 188 mock.clusterID = 1 189 return mock, nil 190 } 191 192 // 3. tow non-owner node wait for meta migrating done 193 wg := new(sync.WaitGroup) 194 wg.Add(1) 195 go func() { 196 defer wg.Done() 197 err := migrator.WaitMetaVersionMatched(ctx) 198 require.NoError(t, err) 199 }() 200 wg.Add(1) 201 go func() { 202 defer wg.Done() 203 err := migrator.WaitMetaVersionMatched(ctx) 204 require.NoError(t, err) 205 }() 206 207 wg.Add(1) 208 // 4.owner note migrates meta data 209 go func() { 210 defer wg.Done() 211 // 5. test ShouldMigrate works as expected 212 should, err := migrator.ShouldMigrate(ctx) 213 require.NoError(t, err) 214 if should { 215 // migrate 216 err = migrator.Migrate(ctx) 217 require.NoError(t, err) 218 } 219 }() 220 221 // 6. wait for migration done 222 wg.Wait() 223 224 // 7. check new version data in etcd is expected 225 for _, tc := range testCases { 226 infoResp, err := cli.Get(context.Background(), 227 fmt.Sprintf("%s%s/%s", etcd.DefaultClusterAndNamespacePrefix, 228 etcd.ChangefeedInfoKey, tc.id)) 229 require.NoError(t, err) 230 info := model.ChangeFeedInfo{} 231 err = info.Unmarshal(infoResp.Kvs[0].Value) 232 require.NoError(t, err) 233 require.Equal(t, uint64(1), info.UpstreamID) 234 tc.info.UpstreamID = info.UpstreamID 235 require.Equal(t, model.DefaultNamespace, info.Namespace) 236 require.Equal(t, tc.id, info.ID) 237 tc.info.Namespace = info.Namespace 238 tc.info.ID = info.ID 239 require.Equal(t, tc.info, info) 240 statusResp, err := cli.Get(context.Background(), 241 fmt.Sprintf("%s%s/%s", etcd.DefaultClusterAndNamespacePrefix, 242 etcd.ChangefeedStatusKey, tc.id)) 243 require.NoError(t, err) 244 status := model.ChangeFeedStatus{} 245 err = status.Unmarshal(statusResp.Kvs[0].Value) 246 require.NoError(t, err) 247 require.Equal(t, tc.status, status) 248 249 // old key is deleted 250 resp, err := cli.Get(context.Background(), 251 fmt.Sprintf(oldInfoKeyBase, tc.id)) 252 require.Nil(t, err) 253 require.Equal(t, int64(0), resp.Count) 254 resp, err = cli.Get(context.Background(), 255 fmt.Sprintf(oldStatusKeyBase, tc.id)) 256 require.Nil(t, err) 257 require.Equal(t, int64(0), resp.Count) 258 259 // backup key is added 260 resp, err = cli.Get(context.Background(), 261 etcd.MigrateBackupKey(0, fmt.Sprintf(oldInfoKeyBase, tc.id))) 262 require.Nil(t, err) 263 require.Equal(t, int64(1), resp.Count) 264 resp, err = cli.Get(context.Background(), 265 etcd.MigrateBackupKey(0, fmt.Sprintf(oldInfoKeyBase, tc.id))) 266 require.Nil(t, err) 267 require.Equal(t, int64(1), resp.Count) 268 } 269 // check cyclic 270 infoResp, err := cli.Get(context.Background(), 271 fmt.Sprintf("%s%s/%s", etcd.DefaultClusterAndNamespacePrefix, 272 etcd.ChangefeedInfoKey, "cyclic-test")) 273 require.NoError(t, err) 274 info := model.ChangeFeedInfo{} 275 err = info.Unmarshal(infoResp.Kvs[0].Value) 276 require.NoError(t, err) 277 require.Equal(t, uint64(1), info.UpstreamID) 278 require.Equal(t, model.DefaultNamespace, info.Namespace) 279 280 resp, err := cli.Get(context.Background(), otherClusterData) 281 require.Nil(t, err) 282 require.Equal(t, int64(1), resp.Count) 283 284 m.MarkMigrateDone() 285 require.True(t, m.IsMigrateDone()) 286 key := etcd.CDCKey{Tp: etcd.CDCKeyTypeMetaVersion, ClusterID: "default"} 287 resp, err = cli.Get(ctx, key.String()) 288 require.Nil(t, err) 289 v, err := strconv.ParseInt(string(resp.Kvs[0].Value), 10, 64) 290 require.Nil(t, err) 291 require.Equal(t, int64(1), v) 292 293 // migrate again 294 for i := 0; i < 10; i++ { 295 err = m.Migrate(ctx) 296 require.Nil(t, err) 297 } 298 299 _, err = cli.Put(ctx, key.String(), "2") 300 require.Nil(t, err) 301 require.Panics(t, func() { 302 _ = m.Migrate(ctx) 303 }) 304 resp, err = cli.Get(ctx, key.String()) 305 require.Nil(t, err) 306 v, err = strconv.ParseInt(string(resp.Kvs[0].Value), 10, 64) 307 require.Nil(t, err) 308 require.Equal(t, int64(2), v) 309 310 _, err = cli.Put(ctx, key.String(), "aaa") 311 require.Nil(t, err) 312 require.NotNil(t, m.Migrate(ctx)) 313 resp, err = cli.Get(ctx, key.String()) 314 require.Nil(t, err) 315 require.Equal(t, "aaa", string(resp.Kvs[0].Value)) 316 } 317 318 func TestNoOpMigrator(t *testing.T) { 319 noOp := &NoOpMigrator{} 320 require.True(t, noOp.IsMigrateDone()) 321 noOp.MarkMigrateDone() 322 require.Nil(t, noOp.Migrate(context.Background())) 323 ok, err := noOp.ShouldMigrate(context.Background()) 324 require.False(t, ok) 325 require.Nil(t, err) 326 require.Nil(t, noOp.WaitMetaVersionMatched(context.Background())) 327 } 328 329 func TestMigrationNonDefaultCluster(t *testing.T) { 330 s := &etcd.Tester{} 331 s.SetUpTest(t) 332 defer s.TearDownTest(t) 333 curl := s.ClientURL.String() 334 cli, err := clientv3.New(clientv3.Config{ 335 Endpoints: []string{curl}, 336 DialTimeout: 3 * time.Second, 337 }) 338 require.NoError(t, err) 339 defer cli.Close() 340 info1 := model.ChangeFeedInfo{ 341 SinkURI: "test1", 342 StartTs: 1, TargetTs: 100, State: model.StateNormal, 343 } 344 status1 := model.ChangeFeedStatus{CheckpointTs: 1} 345 info2 := model.ChangeFeedInfo{ 346 SinkURI: "test1", 347 StartTs: 2, TargetTs: 200, State: model.StateWarning, 348 } 349 status2 := model.ChangeFeedStatus{CheckpointTs: 2} 350 info3 := model.ChangeFeedInfo{ 351 SinkURI: "test1", 352 StartTs: 3, TargetTs: 300, State: model.StateFailed, 353 } 354 status3 := model.ChangeFeedStatus{CheckpointTs: 3} 355 356 testCases := []struct { 357 id string 358 info model.ChangeFeedInfo 359 status model.ChangeFeedStatus 360 }{ 361 {"test1", info1, status1}, 362 {"test2", info2, status2}, 363 {"test3", info3, status3}, 364 } 365 const oldInfoKeyBase = "/tidb/cdc/changefeed/info/%s" 366 const oldStatusKeyBase = "/tidb/cdc/job/%s" 367 368 // 1.put old version meta data to etcd 369 for _, tc := range testCases { 370 iv, err := tc.info.Marshal() 371 require.NoError(t, err) 372 _, err = cli.Put(context.Background(), fmt.Sprintf(oldInfoKeyBase, tc.id), iv) 373 require.NoError(t, err) 374 sv, err := tc.status.Marshal() 375 require.NoError(t, err) 376 _, err = cli.Put(context.Background(), fmt.Sprintf(oldStatusKeyBase, tc.id), sv) 377 require.NoError(t, err) 378 } 379 // 2. check old version data in etcd is expected 380 for _, tc := range testCases { 381 infoResp, err := cli.Get(context.Background(), 382 fmt.Sprintf(oldInfoKeyBase, tc.id)) 383 require.NoError(t, err) 384 info := model.ChangeFeedInfo{} 385 err = info.Unmarshal(infoResp.Kvs[0].Value) 386 require.NoError(t, err) 387 require.Equal(t, tc.info, info) 388 statusResp, err := cli.Get(context.Background(), 389 fmt.Sprintf(oldStatusKeyBase, tc.id)) 390 require.NoError(t, err) 391 status := model.ChangeFeedStatus{} 392 err = status.Unmarshal(statusResp.Kvs[0].Value) 393 require.NoError(t, err) 394 require.Equal(t, tc.status, status) 395 } 396 397 // set timeout to make sure this test will be finished 398 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 399 defer cancel() 400 cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli, "nodefault") 401 require.Nil(t, err) 402 403 m := NewMigrator(cdcCli, []string{}, config.GetGlobalServerConfig()) 404 migrator := m.(*migrator) 405 migrator.createPDClientFunc = func(ctx context.Context, 406 pdEndpoints []string, conf *security.Credential, 407 ) (pd.Client, error) { 408 mock := gc.MockPDClient{ClusterID: 1} 409 return &mock, nil 410 } 411 // 3. tow non-owner node wait for meta migrating done 412 wg := new(sync.WaitGroup) 413 wg.Add(1) 414 go func() { 415 defer wg.Done() 416 err := migrator.WaitMetaVersionMatched(ctx) 417 require.NoError(t, err) 418 }() 419 wg.Add(1) 420 go func() { 421 defer wg.Done() 422 err := migrator.WaitMetaVersionMatched(ctx) 423 require.NoError(t, err) 424 }() 425 426 wg.Add(1) 427 // 4.owner note migrates meta data 428 go func() { 429 defer wg.Done() 430 // 5. test ShouldMigrate works as expected 431 should, err := migrator.ShouldMigrate(ctx) 432 require.NoError(t, err) 433 if should { 434 // migrate 435 err = migrator.Migrate(ctx) 436 require.NoError(t, err) 437 } 438 }() 439 440 // 6. wait for migration done 441 wg.Wait() 442 443 cfs, err := cdcCli.GetAllChangeFeedInfo(context.Background()) 444 require.Nil(t, err) 445 require.Equal(t, 0, len(cfs)) 446 m.MarkMigrateDone() 447 require.True(t, m.IsMigrateDone()) 448 } 449 450 type mockPDClient struct { 451 pd.Client 452 testServer *httptest.Server 453 url string 454 respData string 455 clusterID uint64 456 457 check func(serviceID string, ttl int64, safePoint uint64) (uint64, error) 458 } 459 460 func (m *mockPDClient) GetLeaderURL() string { 461 return m.url 462 } 463 464 func (m *mockPDClient) Close() {} 465 466 func (m *mockPDClient) UpdateServiceGCSafePoint(ctx context.Context, 467 serviceID string, ttl int64, safePoint uint64, 468 ) (uint64, error) { 469 return m.check(serviceID, ttl, safePoint) 470 } 471 472 // GetClusterID gets the cluster ID from PD. 473 func (m *mockPDClient) GetClusterID(ctx context.Context) uint64 { 474 return m.clusterID 475 } 476 477 // GetTS implements pd.Client.GetTS. 478 func (m *mockPDClient) GetTS(ctx context.Context) (int64, int64, error) { 479 return oracle.GetPhysical(time.Now()), 0, nil 480 } 481 482 //nolint:unparam 483 func newMockPDClient(normal bool) *mockPDClient { 484 mock := &mockPDClient{} 485 status := http.StatusOK 486 if !normal { 487 status = http.StatusNotFound 488 } 489 mock.testServer = httptest.NewServer(http.HandlerFunc( 490 func(w http.ResponseWriter, r *http.Request) { 491 w.WriteHeader(status) 492 _, _ = w.Write([]byte(mock.respData)) 493 }, 494 )) 495 mock.url = mock.testServer.URL 496 497 return mock 498 } 499 500 func TestMigrateGcServiceSafePoint(t *testing.T) { 501 ctx, cancel := context.WithCancel(context.Background()) 502 defer cancel() 503 m := &migrator{} 504 mockClient := newMockPDClient(true) 505 506 data := &pdutil.ListServiceGCSafepoint{ 507 ServiceGCSafepoints: []*pdutil.ServiceSafePoint{ 508 { 509 ServiceID: oldGcServiceID, 510 SafePoint: 10, 511 }, 512 { 513 ServiceID: "tidb", 514 SafePoint: 11, 515 }, 516 }, 517 GCSafePoint: 10, 518 } 519 buf, err := json.Marshal(data) 520 require.Nil(t, err) 521 mockClient.respData = string(buf) 522 var fParamters []struct { 523 serviceID string 524 ttl int64 525 safePoint uint64 526 } 527 ftimes := 0 528 mockClient.check = func(serviceID string, ttl int64, safePoint uint64) (uint64, error) { 529 ftimes++ 530 fParamters = append(fParamters, struct { 531 serviceID string 532 ttl int64 533 safePoint uint64 534 }{serviceID: serviceID, ttl: ttl, safePoint: safePoint}) 535 return 0, nil 536 } 537 err = m.migrateGcServiceSafePoint(ctx, mockClient, &security.Credential{}, "abcd", 10) 538 require.Nil(t, err) 539 require.Equal(t, 2, ftimes) 540 require.Equal(t, 2, len(fParamters)) 541 // set first then delete 542 require.Equal(t, "abcd", fParamters[0].serviceID) 543 require.Equal(t, oldGcServiceID, fParamters[1].serviceID) 544 require.Equal(t, int64(10), fParamters[0].ttl) 545 require.Equal(t, int64(0), fParamters[1].ttl) 546 require.Equal(t, uint64(10), fParamters[0].safePoint) 547 require.Equal(t, uint64(18446744073709551615), fParamters[1].safePoint) 548 mockClient.testServer.Close() 549 } 550 551 func TestRemoveOldGcServiceSafePointFailed(t *testing.T) { 552 ctx, cancel := context.WithCancel(context.Background()) 553 defer cancel() 554 mockClient := newMockPDClient(true) 555 556 m := &migrator{} 557 data := &pdutil.ListServiceGCSafepoint{ 558 ServiceGCSafepoints: []*pdutil.ServiceSafePoint{ 559 { 560 ServiceID: oldGcServiceID, 561 SafePoint: 10, 562 }, 563 }, 564 GCSafePoint: 10, 565 } 566 buf, err := json.Marshal(data) 567 require.Nil(t, err) 568 mockClient.respData = string(buf) 569 ftimes := 0 570 mockClient.check = func(serviceID string, ttl int64, safePoint uint64) (uint64, error) { 571 ftimes++ 572 if ftimes > 1 { 573 return 0, errors.New("test") 574 } 575 return 0, nil 576 } 577 err = m.migrateGcServiceSafePoint(ctx, mockClient, &security.Credential{}, "abcd", 10) 578 require.Nil(t, err) 579 require.Equal(t, 10, ftimes) 580 ftimes = 0 581 mockClient.check = func(serviceID string, ttl int64, safePoint uint64) (uint64, error) { 582 ftimes++ 583 return 0, errors.New("test") 584 } 585 err = m.migrateGcServiceSafePoint(ctx, mockClient, &security.Credential{}, "abcd", 10) 586 require.Equal(t, 9, ftimes) 587 require.NotNil(t, err) 588 mockClient.testServer.Close() 589 } 590 591 func TestListServiceSafePointFailed(t *testing.T) { 592 ctx, cancel := context.WithCancel(context.Background()) 593 defer cancel() 594 mockClient := newMockPDClient(true) 595 596 m := &migrator{} 597 mockClient.respData = "xxx" 598 err := m.migrateGcServiceSafePoint(ctx, mockClient, &security.Credential{}, "abcd", 10) 599 require.NotNil(t, err) 600 } 601 602 func TestNoServiceSafePoint(t *testing.T) { 603 ctx, cancel := context.WithCancel(context.Background()) 604 defer cancel() 605 mockClient := newMockPDClient(true) 606 607 m := &migrator{} 608 data := &pdutil.ListServiceGCSafepoint{ 609 ServiceGCSafepoints: []*pdutil.ServiceSafePoint{ 610 { 611 ServiceID: "cccc", 612 SafePoint: 10, 613 }, 614 }, 615 GCSafePoint: 10, 616 } 617 buf, err := json.Marshal(data) 618 require.Nil(t, err) 619 mockClient.respData = string(buf) 620 err = m.migrateGcServiceSafePoint(ctx, mockClient, &security.Credential{}, "abcd", 10) 621 require.Nil(t, err) 622 } 623 624 func TestMaskChangefeedData(t *testing.T) { 625 info := model.ChangeFeedInfo{ 626 SinkURI: "mysql://root:root@127.0.0.1:3306", 627 StartTs: 1, TargetTs: 100, State: model.StateNormal, 628 } 629 data, err := json.Marshal(&info) 630 require.Nil(t, err) 631 masked := maskChangefeedInfo(data) 632 maskedInfo := model.ChangeFeedInfo{} 633 err = json.Unmarshal([]byte(masked), &maskedInfo) 634 require.Nil(t, err) 635 require.Equal(t, "mysql://username:password@***", maskedInfo.SinkURI) 636 maskedInfo.SinkURI = "mysql://root:root@127.0.0.1:3306" 637 require.Equal(t, info, maskedInfo) 638 }