github.com/pingcap/ticdc@v0.0.0-20220526033649-485a10ef2652/tests/bank/case.go (about)

     1  // Copyright 2020 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package main
    15  
    16  import (
    17  	"context"
    18  	"database/sql"
    19  	"fmt"
    20  	"math/rand"
    21  	"strings"
    22  	"sync/atomic"
    23  	"time"
    24  
    25  	_ "github.com/go-sql-driver/mysql" // MySQL driver
    26  	"github.com/pingcap/errors"
    27  	"github.com/pingcap/log"
    28  	"github.com/pingcap/ticdc/pkg/retry"
    29  	"go.uber.org/zap"
    30  	"golang.org/x/sync/errgroup"
    31  )
    32  
    33  // -- Create table
    34  // CREATE TABLE IF NOT EXISTS accounts%d (
    35  // 	id BIGINT PRIMARY KEY,
    36  // 	balance BIGINT NOT NULL,
    37  // 	startts BIGINT NOT NULL
    38  // )
    39  // CREATE TABLE IF NOT EXISTS accounts_seq%d (
    40  // 	id BIGINT PRIMARY KEY,
    41  // 	counter BIGINT NOT NULL,
    42  // 	sequence BIGINT NOT NULL,
    43  // 	startts BIGINT NOT NULL
    44  // )
    45  //
    46  // BEGIN
    47  // -- Add sequential update rows.
    48  // SELECT counter, sequence FROM accounts_seq%d WHERE id = %d FOR UPDATE
    49  // UPDATE accounts_seq%d SET
    50  //   counter = %d,
    51  //   sequence = %d,
    52  //   startts = @@tidb_current_ts
    53  // WHERE id IN (%d, %d)
    54  //
    55  // -- Transaction between accounts.
    56  // SELECT id, balance FROM accounts%d WHERE id IN (%d, %d) FOR UPDATE
    57  // UPDATE accounts%d SET
    58  //   balance = CASE id WHEN %d THEN %d WHEN %d THEN %d END,
    59  //   sequence = %d,
    60  //   startts = @@tidb_current_ts
    61  // WHERE id IN (%d, %d)
    62  // COMMIT
    63  //
    64  // -- Verify sum of balance always be the same.
    65  // SELECT SUM(balance) as total FROM accounts%d
    66  // -- Verify no missing transaction
    67  // SELECT sequence FROM accounts_seq%d ORDER BY sequence
    68  
    69  // testcase ...
    70  // testcase.cleanup
    71  // testcase.prepare
    72  // go { loop { testcase.workload } }
    73  // go { loop { testcase.verify } }
    74  
    75  const (
    76  	initBalance = 1000
    77  )
    78  
    79  type testcase interface {
    80  	prepare(ctx context.Context, db *sql.DB, accounts int, tableID int, concurrency int) error
    81  	workload(ctx context.Context, tx *sql.Tx, accounts int, tableID int) error
    82  	verify(ctx context.Context, db *sql.DB, accounts, tableID int, tag string, endTs string) error
    83  	cleanup(ctx context.Context, db *sql.DB, accounts, tableID int, force bool) bool
    84  }
    85  
    86  type sequenceTest struct{}
    87  
    88  var _ testcase = &sequenceTest{}
    89  
    90  func (*sequenceTest) workload(ctx context.Context, tx *sql.Tx, accounts int, tableID int) error {
    91  	const sequenceRowID = 0
    92  
    93  	getCounterSeq := fmt.Sprintf("SELECT counter, sequence FROM accounts_seq%d WHERE id = %d FOR UPDATE", tableID, sequenceRowID)
    94  
    95  	var counter, maxSeq int
    96  	row := tx.QueryRowContext(ctx, getCounterSeq)
    97  	if err := row.Scan(&counter, &maxSeq); err != nil {
    98  		return errors.Trace(err)
    99  	}
   100  
   101  	next := counter % accounts
   102  	if next == sequenceRowID {
   103  		next++
   104  		counter++
   105  	}
   106  	counter++
   107  
   108  	addSeqCounter := fmt.Sprintf(`
   109  	UPDATE accounts_seq%d SET
   110    		counter = %d,
   111    		sequence = %d,
   112    		startts = @@tidb_current_ts
   113  	WHERE id IN (%d, %d)`, tableID, counter, maxSeq+1, sequenceRowID, next)
   114  
   115  	if _, err := tx.ExecContext(ctx, addSeqCounter); err != nil {
   116  		log.Error("sequenceTest workload exec failed", zap.Error(err))
   117  		return errors.Trace(err)
   118  	}
   119  	return nil
   120  }
   121  
   122  func (s *sequenceTest) prepare(ctx context.Context, db *sql.DB, accounts, tableID, concurrency int) error {
   123  	createTable := fmt.Sprintf(`
   124  	CREATE TABLE IF NOT EXISTS accounts_seq%d (
   125  		id BIGINT PRIMARY KEY,
   126  		counter BIGINT NOT NULL,
   127  		sequence BIGINT NOT NULL,
   128  		startts BIGINT NOT NULL
   129  	)`, tableID)
   130  	batchInsertSQLF := func(batchSize, offset int) string {
   131  		args := make([]string, batchSize)
   132  		for j := 0; j < batchSize; j++ {
   133  			args[j] = fmt.Sprintf("(%d, 0, 0, 0)", offset+j)
   134  		}
   135  		return fmt.Sprintf("INSERT IGNORE INTO accounts_seq%d (id, counter, sequence, startts) VALUES %s", tableID, strings.Join(args, ","))
   136  	}
   137  
   138  	prepareImpl(ctx, s, createTable, batchInsertSQLF, db, accounts, tableID, concurrency)
   139  	return nil
   140  }
   141  
   142  func (*sequenceTest) verify(ctx context.Context, db *sql.DB, accounts, tableID int, tag string, endTs string) error {
   143  	query := fmt.Sprintf("set @@tidb_snapshot='%s'", endTs)
   144  	if _, err := db.ExecContext(ctx, query); err != nil {
   145  		log.Error("sequenceTest set tidb_snapshot failed", zap.String("query", query), zap.Error(err))
   146  		return errors.Trace(err)
   147  	}
   148  
   149  	query = fmt.Sprintf("SELECT sequence FROM accounts_seq%d ORDER BY sequence", tableID)
   150  	rows, err := db.QueryContext(ctx, query)
   151  	if err != nil {
   152  		log.Warn("select sequence err", zap.String("query", query), zap.Error(err), zap.String("tag", tag))
   153  		return nil
   154  	}
   155  	defer rows.Close()
   156  
   157  	var curr, previous int
   158  	for rows.Next() {
   159  		if err = rows.Scan(&curr); err != nil {
   160  			log.Warn("select sequence err", zap.String("query", query), zap.Error(err), zap.String("tag", tag))
   161  			return nil
   162  		}
   163  
   164  		if previous != 0 && previous != curr && previous+1 != curr {
   165  			return errors.Errorf("missing changes sequence account_seq%d, current sequence=%d, previous sequence=%d", tableID, curr, previous)
   166  		}
   167  		previous = curr
   168  	}
   169  
   170  	log.Info("sequence verify pass", zap.String("tag", tag))
   171  
   172  	if _, err := db.ExecContext(ctx, "set @@tidb_snapshot=''"); err != nil {
   173  		log.Warn("sequenceTest reset tidb_snapshot failed")
   174  	}
   175  
   176  	return nil
   177  }
   178  
   179  // tryDropDB will drop table if data incorrect and panic error likes bad connect.
   180  func (s *sequenceTest) cleanup(ctx context.Context, db *sql.DB, accounts, tableID int, force bool) bool {
   181  	return cleanupImpl(ctx, s, fmt.Sprintf("accounts_seq%d", tableID), db, accounts, tableID, force)
   182  }
   183  
   184  type bankTest struct{}
   185  
   186  var _ testcase = &bankTest{}
   187  
   188  func (*bankTest) workload(ctx context.Context, tx *sql.Tx, accounts int, tableID int) error {
   189  	var (
   190  		from, fromBalance int
   191  		to, toBalance     int
   192  	)
   193  
   194  	for {
   195  		from, to = rand.Intn(accounts), rand.Intn(accounts)
   196  		if from != to {
   197  			break
   198  		}
   199  	}
   200  
   201  	sqlFormat := fmt.Sprintf("SELECT balance FROM accounts%d WHERE id = ? FOR UPDATE", tableID)
   202  	row := tx.QueryRowContext(ctx, sqlFormat, from)
   203  	if err := row.Scan(&fromBalance); err != nil {
   204  		return errors.Trace(err)
   205  	}
   206  	row = tx.QueryRowContext(ctx, sqlFormat, to)
   207  	if err := row.Scan(&toBalance); err != nil {
   208  		return errors.Trace(err)
   209  	}
   210  
   211  	amount := rand.Intn(fromBalance/2 + 1)
   212  	fromBalance -= amount
   213  	toBalance += amount
   214  
   215  	sqlFormat = fmt.Sprintf("UPDATE accounts%d SET balance = ? WHERE id = ?", tableID)
   216  	if _, err := tx.ExecContext(ctx, sqlFormat, fromBalance, from); err != nil {
   217  		return errors.Trace(err)
   218  	}
   219  	if _, err := tx.ExecContext(ctx, sqlFormat, toBalance, to); err != nil {
   220  		return errors.Trace(err)
   221  	}
   222  
   223  	return nil
   224  }
   225  
   226  func (s *bankTest) prepare(ctx context.Context, db *sql.DB, accounts, tableID, concurrency int) error {
   227  	createTable := fmt.Sprintf(`
   228  	CREATE TABLE IF NOT EXISTS accounts%d (
   229  		id BIGINT PRIMARY KEY,
   230  		balance BIGINT NOT NULL,
   231  		startts BIGINT NOT NULL
   232  	)`, tableID)
   233  	batchInsertSQLF := func(batchSize, offset int) string {
   234  		args := make([]string, batchSize)
   235  		for j := 0; j < batchSize; j++ {
   236  			args[j] = fmt.Sprintf("(%d, %d, 0)", offset+j, initBalance)
   237  		}
   238  		return fmt.Sprintf("INSERT IGNORE INTO accounts%d (id, balance, startts) VALUES %s", tableID, strings.Join(args, ","))
   239  	}
   240  
   241  	prepareImpl(ctx, s, createTable, batchInsertSQLF, db, accounts, tableID, concurrency)
   242  	return nil
   243  }
   244  
   245  func (*bankTest) verify(ctx context.Context, db *sql.DB, accounts, tableID int, tag string, endTs string) error {
   246  	var obtained, expect int
   247  
   248  	if _, err := db.ExecContext(ctx, fmt.Sprintf("set @@tidb_snapshot='%s'", endTs)); err != nil {
   249  		log.Error("bank set tidb_snapshot failed", zap.String("endTs", endTs))
   250  		return errors.Trace(err)
   251  	}
   252  
   253  	query := fmt.Sprintf("SELECT SUM(balance) as total FROM accounts%d", tableID)
   254  	if err := db.QueryRowContext(ctx, query).Scan(&obtained); err != nil {
   255  		log.Warn("query failed", zap.String("query", query), zap.Error(err), zap.String("tag", tag))
   256  		return errors.Trace(err)
   257  	}
   258  
   259  	expect = accounts * initBalance
   260  	if obtained != expect {
   261  		return errors.Errorf("verify balance failed, accounts%d expect %d, but got %d", tableID, expect, obtained)
   262  	}
   263  
   264  	query = fmt.Sprintf("SELECT COUNT(*) as count FROM accounts%d", tableID)
   265  	if err := db.QueryRowContext(ctx, query).Scan(&obtained); err != nil {
   266  		log.Warn("query failed", zap.String("query", query), zap.Error(err), zap.String("tag", tag))
   267  		return errors.Trace(err)
   268  	}
   269  	if obtained != accounts {
   270  		return errors.Errorf("verify count failed, accounts%d expected=%d, obtained=%d", tableID, accounts, obtained)
   271  	}
   272  
   273  	log.Info("bank verify pass", zap.String("tag", tag))
   274  
   275  	if _, err := db.ExecContext(ctx, "set @@tidb_snapshot=''"); err != nil {
   276  		log.Warn("bank reset tidb_snapshot failed")
   277  	}
   278  
   279  	return nil
   280  }
   281  
   282  // tryDropDB will drop table if data incorrect and panic error likes bad connect.
   283  func (s *bankTest) cleanup(ctx context.Context, db *sql.DB, accounts, tableID int, force bool) bool {
   284  	return cleanupImpl(ctx, s, fmt.Sprintf("accounts%d", tableID), db, accounts, tableID, force)
   285  }
   286  
   287  func prepareImpl(
   288  	ctx context.Context,
   289  	test testcase, createTable string, batchInsertSQLF func(batchSize, offset int) string,
   290  	db *sql.DB, accounts, tableID, concurrency int,
   291  ) {
   292  	isDropped := test.cleanup(ctx, db, accounts, tableID, false)
   293  	if !isDropped {
   294  		return
   295  	}
   296  
   297  	mustExec(ctx, db, createTable)
   298  
   299  	batchSize := 100
   300  	jobCount := accounts / batchSize
   301  	if accounts%batchSize != 0 {
   302  		jobCount++
   303  	}
   304  
   305  	insertF := func(query string) error {
   306  		_, err := db.ExecContext(ctx, query)
   307  		return err
   308  	}
   309  
   310  	g := new(errgroup.Group)
   311  	ch := make(chan int, jobCount)
   312  	for i := 0; i < concurrency; i++ {
   313  		g.Go(func() error {
   314  			for {
   315  				startIndex, ok := <-ch
   316  				if !ok {
   317  					return nil
   318  				}
   319  
   320  				size := batchSize
   321  				remained := accounts - startIndex + 1
   322  				if remained < size {
   323  					size = remained
   324  				}
   325  
   326  				batchInsertSQL := batchInsertSQLF(size, startIndex)
   327  				start := time.Now()
   328  				err := retry.Do(context.Background(), func() error {
   329  					return insertF(batchInsertSQL)
   330  				}, retry.WithBackoffBaseDelay(100), retry.WithBackoffMaxDelay(60*100), retry.WithMaxTries(5))
   331  				if err != nil {
   332  					log.Panic("exec batch insert failed", zap.String("query", batchInsertSQL), zap.Error(err))
   333  				}
   334  				log.Info(fmt.Sprintf("insert %d takes %s", batchSize, time.Since(start)), zap.String("query", batchInsertSQL))
   335  			}
   336  		})
   337  	}
   338  
   339  	for i := 0; i < jobCount; i++ {
   340  		ch <- i * batchSize
   341  	}
   342  	close(ch)
   343  	_ = g.Wait()
   344  }
   345  
   346  func dropDB(ctx context.Context, db *sql.DB) {
   347  	log.Info("drop database")
   348  	mustExec(ctx, db, "DROP DATABASES IF EXISTS bank")
   349  }
   350  
   351  func dropTable(ctx context.Context, db *sql.DB, table string) {
   352  	log.Info("drop tables", zap.String("table", table))
   353  	mustExec(ctx, db, fmt.Sprintf("DROP TABLE IF EXISTS %s", table))
   354  }
   355  
   356  func cleanupImpl(ctx context.Context, test testcase, tableName string, db *sql.DB, accounts, tableID int, force bool) bool {
   357  	if force {
   358  		dropTable(ctx, db, tableName)
   359  		return true
   360  	}
   361  
   362  	if !isTableExist(ctx, db, tableName) {
   363  		dropTable(ctx, db, tableName)
   364  		return true
   365  	}
   366  
   367  	if err := test.verify(ctx, db, accounts, tableID, "tryDropDB", ""); err != nil {
   368  		dropTable(ctx, db, tableName)
   369  		return true
   370  	}
   371  
   372  	return false
   373  }
   374  
   375  func mustExec(ctx context.Context, db *sql.DB, query string) {
   376  	execF := func() error {
   377  		_, err := db.ExecContext(ctx, query)
   378  		return err
   379  	}
   380  
   381  	err := retry.Do(context.Background(), execF, retry.WithBackoffBaseDelay(100), retry.WithBackoffMaxDelay(60*100), retry.WithMaxTries(5))
   382  	if err != nil {
   383  		log.Panic("exec failed", zap.String("query", query), zap.Error(err))
   384  	}
   385  }
   386  
   387  func isTableExist(ctx context.Context, db *sql.DB, table string) bool {
   388  	// if table is not exist, return true directly
   389  	query := fmt.Sprintf("SHOW TABLES LIKE '%s'", table)
   390  	var t string
   391  	err := db.QueryRowContext(ctx, query).Scan(&t)
   392  	switch {
   393  	case err == sql.ErrNoRows:
   394  		return false
   395  	case err != nil:
   396  		log.Panic("query failed", zap.String("query", query), zap.Error(err))
   397  	}
   398  	return true
   399  }
   400  
   401  func openDB(ctx context.Context, dsn string) *sql.DB {
   402  	db, err := sql.Open("mysql", dsn)
   403  	if err != nil {
   404  		log.Panic("open db failed", zap.String("dsn", dsn), zap.Error(err))
   405  	}
   406  	db.SetMaxOpenConns(10)
   407  	db.SetMaxIdleConns(10)
   408  	db.SetConnMaxLifetime(50 * time.Minute)
   409  	ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
   410  	defer cancel()
   411  	if err = db.PingContext(ctx); err != nil {
   412  		log.Panic("ping db failed", zap.String("dsn", dsn), zap.Error(err))
   413  	}
   414  	log.Info("open db success", zap.String("dsn", dsn))
   415  	return db
   416  }
   417  
   418  func run(
   419  	ctx context.Context, upstream, downstream string, accounts, tables, concurrency int,
   420  	interval, testRound int64, cleanupOnly bool,
   421  ) {
   422  	ctx, cancel := context.WithTimeout(ctx, 30*time.Minute)
   423  	defer cancel()
   424  
   425  	upstreamDB := openDB(ctx, upstream)
   426  	defer upstreamDB.Close()
   427  
   428  	downstreamDB := openDB(ctx, downstream)
   429  	defer downstreamDB.Close()
   430  
   431  	tests := []testcase{&sequenceTest{}, &bankTest{}}
   432  
   433  	if cleanupOnly {
   434  		for tableID := 0; tableID < tables; tableID++ {
   435  			for i := range tests {
   436  				tests[i].cleanup(ctx, upstreamDB, accounts, tableID, true)
   437  				tests[i].cleanup(ctx, downstreamDB, accounts, tableID, true)
   438  			}
   439  		}
   440  
   441  		// a lot of ddl executed at upstream, just drop the db
   442  		dropDB(ctx, upstreamDB)
   443  		dropDB(ctx, downstreamDB)
   444  		log.Info("cleanup done")
   445  		return
   446  	}
   447  
   448  	// prepare data for upstream db.
   449  	for _, test := range tests {
   450  		for tableID := 0; tableID < tables; tableID++ {
   451  			if err := test.prepare(ctx, upstreamDB, accounts, tableID, concurrency); err != nil {
   452  				log.Panic("prepare failed", zap.Error(err))
   453  			}
   454  		}
   455  	}
   456  
   457  	// DDL is a strong sync point in TiCDC. Once finishmark table is replicated to downstream
   458  	// all previous DDL and DML are replicated too.
   459  	mustExec(ctx, upstreamDB, `CREATE TABLE IF NOT EXISTS finishmark (foo BIGINT PRIMARY KEY)`)
   460  	waitCtx, waitCancel := context.WithTimeout(ctx, 5*time.Minute)
   461  	endTs, err := getDownStreamSyncedEndTs(waitCtx, downstreamDB, "finishmark")
   462  	waitCancel()
   463  	if err != nil {
   464  		log.Panic("wait for table finishmark failed", zap.Error(err))
   465  	}
   466  	log.Info("all tables synced", zap.String("endTs", endTs))
   467  
   468  	var (
   469  		counts       int64 = 0
   470  		g                  = new(errgroup.Group)
   471  		tblChan            = make(chan string, tables)
   472  		doneCh             = make(chan struct{}, 1)
   473  		valid, tried int64 = 0, 0
   474  	)
   475  
   476  	for id := 0; id < tables; id++ {
   477  		tableID := id
   478  		// Workload
   479  		g.Go(func() error {
   480  			workload := func() error {
   481  				tx, err := upstreamDB.Begin()
   482  				if err != nil {
   483  					log.Error("upstream begin tx failed", zap.Error(err))
   484  					return errors.Trace(err)
   485  				}
   486  
   487  				for _, test := range tests {
   488  					if err := test.workload(context.Background(), tx, accounts, tableID); err != nil {
   489  						_ = tx.Rollback()
   490  						return errors.Trace(err)
   491  					}
   492  				}
   493  
   494  				if err := tx.Commit(); err != nil {
   495  					_ = tx.Rollback()
   496  					log.Error("upstream tx commit failed", zap.Error(err))
   497  					return errors.Trace(err)
   498  				}
   499  
   500  				curr := atomic.AddInt64(&counts, 1)
   501  				if curr%interval == 0 {
   502  					tblName := fmt.Sprintf("finishmark%d", curr)
   503  					ddl := fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s (foo BIGINT PRIMARY KEY)", tblName)
   504  					mustExec(ctx, upstreamDB, ddl)
   505  					tblChan <- tblName
   506  
   507  					log.Info("upstream create table executed", zap.String("tblName", tblName))
   508  				}
   509  				return nil
   510  			}
   511  
   512  			for {
   513  				select {
   514  				case <-ctx.Done():
   515  					return ctx.Err()
   516  				default:
   517  					err := workload()
   518  					if err != nil && errors.Cause(err) != context.Canceled {
   519  						log.Warn("workload failed", zap.Error(err))
   520  					}
   521  
   522  					curr := atomic.LoadInt64(&counts)
   523  					if curr >= testRound {
   524  						log.Info("one upstream workload finished", zap.Int64("round", curr))
   525  						doneCh <- struct{}{}
   526  						return nil
   527  					}
   528  				}
   529  			}
   530  		})
   531  
   532  		// Verify
   533  		g.Go(func() error {
   534  			for {
   535  				select {
   536  				case <-ctx.Done():
   537  					return ctx.Err()
   538  				case tblName := <-tblChan:
   539  					log.Info("downstream start wait for table", zap.String("tblName", tblName))
   540  					waitCtx, waitCancel := context.WithTimeout(ctx, 5*time.Minute)
   541  					endTs, err := getDownStreamSyncedEndTs(waitCtx, downstreamDB, tblName)
   542  					waitCancel()
   543  					log.Info("ddl synced", zap.String("table", tblName))
   544  					if err != nil {
   545  						log.Fatal("[cdc-bank] get ddl end ts error", zap.Error(err))
   546  					}
   547  
   548  					atomic.AddInt64(&tried, 1)
   549  					log.Info("downstream sync success", zap.String("endTs", endTs))
   550  
   551  					if endTs == "" {
   552  						continue
   553  					}
   554  					atomic.AddInt64(&valid, 1)
   555  
   556  					for _, test := range tests {
   557  						verifyCtx, verifyCancel := context.WithTimeout(ctx, time.Second*20)
   558  						if err := test.verify(verifyCtx, upstreamDB, accounts, tableID, upstream, ""); err != nil {
   559  							log.Panic("upstream verify failed", zap.Error(err))
   560  						}
   561  						verifyCancel()
   562  
   563  						verifyCtx, verifyCancel = context.WithTimeout(ctx, time.Second*20)
   564  						if err := test.verify(verifyCtx, downstreamDB, accounts, tableID, downstream, endTs); err != nil {
   565  							log.Panic("downstream verify failed", zap.Error(err))
   566  						}
   567  						verifyCancel()
   568  					}
   569  				case <-doneCh:
   570  					log.Info("one downstream exit due to receive done")
   571  					return nil
   572  				}
   573  			}
   574  		})
   575  	}
   576  
   577  	_ = g.Wait()
   578  
   579  	if tried == 0 {
   580  		log.Warn("bank test finished, but tries is 0")
   581  	} else {
   582  		log.Info("bank test finished", zap.Int64("valid", valid), zap.Int64("tries", tried), zap.Float64("ratio", float64(valid)/float64(tried)))
   583  	}
   584  }
   585  
   586  type dataRow struct {
   587  	JobID       int64
   588  	DBName      string
   589  	TblName     string
   590  	JobType     string
   591  	SchemaState string
   592  	SchemeID    int64
   593  	TblID       int64
   594  	RowCount    int64
   595  	StartTime   string
   596  	EndTime     *string
   597  	State       string
   598  }
   599  
   600  func getDownStreamSyncedEndTs(ctx context.Context, db *sql.DB, tableName string) (result string, err error) {
   601  	for {
   602  		select {
   603  		case <-ctx.Done():
   604  			log.Error("get downstream sync end ts failed due to timeout", zap.String("table", tableName), zap.Error(ctx.Err()))
   605  			return "", ctx.Err()
   606  		case <-time.After(2 * time.Second):
   607  			result, ok := tryGetEndTs(db, tableName)
   608  			if ok {
   609  				return result, nil
   610  			}
   611  		}
   612  	}
   613  }
   614  
   615  func tryGetEndTs(db *sql.DB, tableName string) (result string, ok bool) {
   616  	query := fmt.Sprintf("admin show ddl jobs where table_name = '%s'", tableName)
   617  	log.Info("try get end ts", zap.String("query", query))
   618  
   619  	var line dataRow
   620  	row := db.QueryRow(query)
   621  	if err := row.Scan(&line.JobID, &line.DBName, &line.TblName, &line.JobType, &line.SchemaState, &line.SchemeID,
   622  		&line.TblID, &line.RowCount, &line.StartTime, &line.EndTime, &line.State); err != nil {
   623  		if err != sql.ErrNoRows {
   624  			log.Info("rows scan failed", zap.Error(err))
   625  		}
   626  		return "", false
   627  	}
   628  
   629  	if line.EndTime == nil {
   630  		return "", true
   631  	}
   632  	return *line.EndTime, true
   633  }