github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/lightning/restore/restore_test.go (about)

     1  // Copyright 2019 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package restore
    15  
    16  import (
    17  	"context"
    18  	"fmt"
    19  	"net/http"
    20  	"net/http/httptest"
    21  	"os"
    22  	"path/filepath"
    23  	"sort"
    24  	"strings"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/DATA-DOG/go-sqlmock"
    29  	"github.com/docker/go-units"
    30  	"github.com/golang/mock/gomock"
    31  	"github.com/google/uuid"
    32  	. "github.com/pingcap/check"
    33  	"github.com/pingcap/errors"
    34  	"github.com/pingcap/failpoint"
    35  	"github.com/pingcap/kvproto/pkg/import_kvpb"
    36  	"github.com/pingcap/parser"
    37  	"github.com/pingcap/parser/ast"
    38  	"github.com/pingcap/parser/model"
    39  	"github.com/pingcap/parser/mysql"
    40  	"github.com/pingcap/parser/types"
    41  	filter "github.com/pingcap/tidb-tools/pkg/table-filter"
    42  	"github.com/pingcap/tidb/ddl"
    43  	tmock "github.com/pingcap/tidb/util/mock"
    44  
    45  	"github.com/pingcap/br/pkg/lightning/backend"
    46  	"github.com/pingcap/br/pkg/lightning/backend/importer"
    47  	"github.com/pingcap/br/pkg/lightning/backend/kv"
    48  	"github.com/pingcap/br/pkg/lightning/backend/noop"
    49  	"github.com/pingcap/br/pkg/lightning/backend/tidb"
    50  	"github.com/pingcap/br/pkg/lightning/checkpoints"
    51  	"github.com/pingcap/br/pkg/lightning/common"
    52  	"github.com/pingcap/br/pkg/lightning/config"
    53  	"github.com/pingcap/br/pkg/lightning/glue"
    54  	"github.com/pingcap/br/pkg/lightning/log"
    55  	"github.com/pingcap/br/pkg/lightning/metric"
    56  	"github.com/pingcap/br/pkg/lightning/mydump"
    57  	"github.com/pingcap/br/pkg/lightning/verification"
    58  	"github.com/pingcap/br/pkg/lightning/web"
    59  	"github.com/pingcap/br/pkg/lightning/worker"
    60  	"github.com/pingcap/br/pkg/mock"
    61  	"github.com/pingcap/br/pkg/storage"
    62  	"github.com/pingcap/br/pkg/version/build"
    63  )
    64  
    65  var _ = Suite(&restoreSuite{})
    66  
    67  type restoreSuite struct{}
    68  
    69  func (s *restoreSuite) TestNewTableRestore(c *C) {
    70  	testCases := []struct {
    71  		name       string
    72  		createStmt string
    73  	}{
    74  		{"t1", "CREATE TABLE `t1` (`c1` varchar(5) NOT NULL)"},
    75  		// {"t2", "CREATE TABLE `t2` (`c1` varchar(30000) NOT NULL)"}, // no longer able to create this kind of table.
    76  		{"t3", "CREATE TABLE `t3-a` (`c1-a` varchar(5) NOT NULL)"},
    77  	}
    78  
    79  	p := parser.New()
    80  	se := tmock.NewContext()
    81  
    82  	dbInfo := &checkpoints.TidbDBInfo{Name: "mockdb", Tables: map[string]*checkpoints.TidbTableInfo{}}
    83  	for i, tc := range testCases {
    84  		node, err := p.ParseOneStmt(tc.createStmt, "utf8mb4", "utf8mb4_bin")
    85  		c.Assert(err, IsNil)
    86  		tableInfo, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), int64(i+1))
    87  		c.Assert(err, IsNil)
    88  		tableInfo.State = model.StatePublic
    89  
    90  		dbInfo.Tables[tc.name] = &checkpoints.TidbTableInfo{
    91  			Name: tc.name,
    92  			Core: tableInfo,
    93  		}
    94  	}
    95  
    96  	for _, tc := range testCases {
    97  		tableInfo := dbInfo.Tables[tc.name]
    98  		tableName := common.UniqueTable("mockdb", tableInfo.Name)
    99  		tr, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil)
   100  		c.Assert(tr, NotNil)
   101  		c.Assert(err, IsNil)
   102  	}
   103  }
   104  
   105  func (s *restoreSuite) TestNewTableRestoreFailure(c *C) {
   106  	tableInfo := &checkpoints.TidbTableInfo{
   107  		Name: "failure",
   108  		Core: &model.TableInfo{},
   109  	}
   110  	dbInfo := &checkpoints.TidbDBInfo{Name: "mockdb", Tables: map[string]*checkpoints.TidbTableInfo{
   111  		"failure": tableInfo,
   112  	}}
   113  	tableName := common.UniqueTable("mockdb", "failure")
   114  
   115  	_, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &checkpoints.TableCheckpoint{}, nil)
   116  	c.Assert(err, ErrorMatches, `failed to tables\.TableFromMeta.*`)
   117  }
   118  
   119  func (s *restoreSuite) TestErrorSummaries(c *C) {
   120  	logger, buffer := log.MakeTestLogger()
   121  
   122  	es := makeErrorSummaries(logger)
   123  	es.record("first", errors.New("a1 error"), checkpoints.CheckpointStatusAnalyzed)
   124  	es.record("second", errors.New("b2 error"), checkpoints.CheckpointStatusAllWritten)
   125  	es.emitLog()
   126  
   127  	lines := buffer.Lines()
   128  	sort.Strings(lines[1:])
   129  	c.Assert(lines, DeepEquals, []string{
   130  		`{"$lvl":"ERROR","$msg":"tables failed to be imported","count":2}`,
   131  		`{"$lvl":"ERROR","$msg":"-","table":"first","status":"analyzed","error":"a1 error"}`,
   132  		`{"$lvl":"ERROR","$msg":"-","table":"second","status":"written","error":"b2 error"}`,
   133  	})
   134  }
   135  
   136  func (s *restoreSuite) TestVerifyCheckpoint(c *C) {
   137  	dir := c.MkDir()
   138  	cpdb := checkpoints.NewFileCheckpointsDB(filepath.Join(dir, "cp.pb"))
   139  	defer cpdb.Close()
   140  	ctx := context.Background()
   141  
   142  	actualReleaseVersion := build.ReleaseVersion
   143  	defer func() {
   144  		build.ReleaseVersion = actualReleaseVersion
   145  	}()
   146  
   147  	taskCp, err := cpdb.TaskCheckpoint(ctx)
   148  	c.Assert(err, IsNil)
   149  	c.Assert(taskCp, IsNil)
   150  
   151  	newCfg := func() *config.Config {
   152  		cfg := config.NewConfig()
   153  		cfg.Mydumper.SourceDir = "/data"
   154  		cfg.TaskID = 123
   155  		cfg.TiDB.Port = 4000
   156  		cfg.TiDB.PdAddr = "127.0.0.1:2379"
   157  		cfg.TikvImporter.Backend = config.BackendImporter
   158  		cfg.TikvImporter.Addr = "127.0.0.1:8287"
   159  		cfg.TikvImporter.SortedKVDir = "/tmp/sorted-kv"
   160  
   161  		return cfg
   162  	}
   163  
   164  	err = cpdb.Initialize(ctx, newCfg(), map[string]*checkpoints.TidbDBInfo{})
   165  	c.Assert(err, IsNil)
   166  
   167  	adjustFuncs := map[string]func(cfg *config.Config){
   168  		"tikv-importer.backend": func(cfg *config.Config) {
   169  			cfg.TikvImporter.Backend = config.BackendLocal
   170  		},
   171  		"tikv-importer.addr": func(cfg *config.Config) {
   172  			cfg.TikvImporter.Addr = "128.0.0.1:8287"
   173  		},
   174  		"mydumper.data-source-dir": func(cfg *config.Config) {
   175  			cfg.Mydumper.SourceDir = "/tmp/test"
   176  		},
   177  		"tidb.host": func(cfg *config.Config) {
   178  			cfg.TiDB.Host = "192.168.0.1"
   179  		},
   180  		"tidb.port": func(cfg *config.Config) {
   181  			cfg.TiDB.Port = 5000
   182  		},
   183  		"tidb.pd-addr": func(cfg *config.Config) {
   184  			cfg.TiDB.PdAddr = "127.0.0.1:3379"
   185  		},
   186  		"version": func(cfg *config.Config) {
   187  			build.ReleaseVersion = "some newer version"
   188  		},
   189  	}
   190  
   191  	// default mode, will return error
   192  	taskCp, err = cpdb.TaskCheckpoint(ctx)
   193  	c.Assert(err, IsNil)
   194  	for conf, fn := range adjustFuncs {
   195  		cfg := newCfg()
   196  		fn(cfg)
   197  		err := verifyCheckpoint(cfg, taskCp)
   198  		if conf == "version" {
   199  			build.ReleaseVersion = actualReleaseVersion
   200  			c.Assert(err, ErrorMatches, "lightning version is 'some newer version', but checkpoint was created at '"+actualReleaseVersion+"'.*")
   201  		} else {
   202  			c.Assert(err, ErrorMatches, fmt.Sprintf("config '%s' value '.*' different from checkpoint value .*", conf))
   203  		}
   204  	}
   205  
   206  	for conf, fn := range adjustFuncs {
   207  		if conf == "tikv-importer.backend" {
   208  			continue
   209  		}
   210  		cfg := newCfg()
   211  		cfg.App.CheckRequirements = false
   212  		fn(cfg)
   213  		err := cpdb.Initialize(context.Background(), cfg, map[string]*checkpoints.TidbDBInfo{})
   214  		c.Assert(err, IsNil)
   215  	}
   216  }
   217  
   218  func (s *restoreSuite) TestDiskQuotaLock(c *C) {
   219  	lock := newDiskQuotaLock()
   220  
   221  	lock.Lock()
   222  	c.Assert(lock.TryRLock(), IsFalse)
   223  	lock.Unlock()
   224  	c.Assert(lock.TryRLock(), IsTrue)
   225  	c.Assert(lock.TryRLock(), IsTrue)
   226  
   227  	rLocked := 2
   228  	lockHeld := make(chan struct{})
   229  	go func() {
   230  		lock.Lock()
   231  		lockHeld <- struct{}{}
   232  	}()
   233  	for lock.TryRLock() {
   234  		rLocked++
   235  		time.Sleep(time.Millisecond)
   236  	}
   237  	select {
   238  	case <-lockHeld:
   239  		c.Fatal("write lock is held before all read locks are released")
   240  	case <-time.NewTimer(10 * time.Millisecond).C:
   241  	}
   242  	for ; rLocked > 0; rLocked-- {
   243  		lock.RUnlock()
   244  	}
   245  	<-lockHeld
   246  	lock.Unlock()
   247  
   248  	done := make(chan struct{})
   249  	count := int32(0)
   250  	reader := func() {
   251  		for i := 0; i < 1000; i++ {
   252  			if lock.TryRLock() {
   253  				n := atomic.AddInt32(&count, 1)
   254  				if n < 1 || n >= 10000 {
   255  					lock.RUnlock()
   256  					panic(fmt.Sprintf("unexpected count(%d)", n))
   257  				}
   258  				for i := 0; i < 100; i++ {
   259  				}
   260  				atomic.AddInt32(&count, -1)
   261  				lock.RUnlock()
   262  			}
   263  			time.Sleep(time.Microsecond)
   264  		}
   265  		done <- struct{}{}
   266  	}
   267  	writer := func() {
   268  		for i := 0; i < 1000; i++ {
   269  			lock.Lock()
   270  			n := atomic.AddInt32(&count, 10000)
   271  			if n != 10000 {
   272  				lock.RUnlock()
   273  				panic(fmt.Sprintf("unexpected count(%d)", n))
   274  			}
   275  			for i := 0; i < 100; i++ {
   276  			}
   277  			atomic.AddInt32(&count, -10000)
   278  			lock.Unlock()
   279  			time.Sleep(time.Microsecond)
   280  		}
   281  		done <- struct{}{}
   282  	}
   283  	for i := 0; i < 5; i++ {
   284  		go reader()
   285  	}
   286  	for i := 0; i < 2; i++ {
   287  		go writer()
   288  	}
   289  	for i := 0; i < 5; i++ {
   290  		go reader()
   291  	}
   292  	for i := 0; i < 12; i++ {
   293  		<-done
   294  	}
   295  }
   296  
   297  var _ = Suite(&tableRestoreSuite{})
   298  
   299  type tableRestoreSuiteBase struct {
   300  	tr  *TableRestore
   301  	cfg *config.Config
   302  
   303  	tableInfo *checkpoints.TidbTableInfo
   304  	dbInfo    *checkpoints.TidbDBInfo
   305  	tableMeta *mydump.MDTableMeta
   306  
   307  	store storage.ExternalStorage
   308  }
   309  
   310  type tableRestoreSuite struct {
   311  	tableRestoreSuiteBase
   312  }
   313  
   314  func (s *tableRestoreSuiteBase) SetUpSuite(c *C) {
   315  	// Produce a mock table info
   316  
   317  	p := parser.New()
   318  	p.SetSQLMode(mysql.ModeANSIQuotes)
   319  	se := tmock.NewContext()
   320  	node, err := p.ParseOneStmt(`
   321  		CREATE TABLE "table" (
   322  			a INT,
   323  			b INT,
   324  			c INT,
   325  			KEY (b)
   326  		)
   327  	`, "", "")
   328  	c.Assert(err, IsNil)
   329  	core, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), 0xabcdef)
   330  	c.Assert(err, IsNil)
   331  	core.State = model.StatePublic
   332  
   333  	s.tableInfo = &checkpoints.TidbTableInfo{Name: "table", DB: "db", Core: core}
   334  	s.dbInfo = &checkpoints.TidbDBInfo{
   335  		Name:   "db",
   336  		Tables: map[string]*checkpoints.TidbTableInfo{"table": s.tableInfo},
   337  	}
   338  
   339  	// Write some sample SQL dump
   340  
   341  	fakeDataDir := c.MkDir()
   342  
   343  	store, err := storage.NewLocalStorage(fakeDataDir)
   344  	c.Assert(err, IsNil)
   345  	s.store = store
   346  
   347  	fakeDataFilesCount := 6
   348  	fakeDataFilesContent := []byte("INSERT INTO `table` VALUES (1, 2, 3);")
   349  	c.Assert(len(fakeDataFilesContent), Equals, 37)
   350  	fakeDataFiles := make([]mydump.FileInfo, 0, fakeDataFilesCount)
   351  	for i := 1; i <= fakeDataFilesCount; i++ {
   352  		fakeFileName := fmt.Sprintf("db.table.%d.sql", i)
   353  		fakeDataPath := filepath.Join(fakeDataDir, fakeFileName)
   354  		err = os.WriteFile(fakeDataPath, fakeDataFilesContent, 0o644)
   355  		c.Assert(err, IsNil)
   356  		fakeDataFiles = append(fakeDataFiles, mydump.FileInfo{
   357  			TableName: filter.Table{Schema: "db", Name: "table"},
   358  			FileMeta: mydump.SourceFileMeta{
   359  				Path:     fakeFileName,
   360  				Type:     mydump.SourceTypeSQL,
   361  				SortKey:  fmt.Sprintf("%d", i),
   362  				FileSize: 37,
   363  			},
   364  		})
   365  	}
   366  
   367  	fakeCsvContent := []byte("1,2,3\r\n4,5,6\r\n")
   368  	csvName := "db.table.99.csv"
   369  	err = os.WriteFile(filepath.Join(fakeDataDir, csvName), fakeCsvContent, 0o644)
   370  	c.Assert(err, IsNil)
   371  	fakeDataFiles = append(fakeDataFiles, mydump.FileInfo{
   372  		TableName: filter.Table{Schema: "db", Name: "table"},
   373  		FileMeta: mydump.SourceFileMeta{
   374  			Path:     csvName,
   375  			Type:     mydump.SourceTypeCSV,
   376  			SortKey:  "99",
   377  			FileSize: 14,
   378  		},
   379  	})
   380  
   381  	s.tableMeta = &mydump.MDTableMeta{
   382  		DB:        "db",
   383  		Name:      "table",
   384  		TotalSize: 222,
   385  		SchemaFile: mydump.FileInfo{
   386  			TableName: filter.Table{Schema: "db", Name: "table"},
   387  			FileMeta: mydump.SourceFileMeta{
   388  				Path: "db.table-schema.sql",
   389  				Type: mydump.SourceTypeTableSchema,
   390  			},
   391  		},
   392  		DataFiles: fakeDataFiles,
   393  	}
   394  }
   395  
   396  func (s *tableRestoreSuiteBase) SetUpTest(c *C) {
   397  	// Collect into the test TableRestore structure
   398  	var err error
   399  	s.tr, err = NewTableRestore("`db`.`table`", s.tableMeta, s.dbInfo, s.tableInfo, &checkpoints.TableCheckpoint{}, nil)
   400  	c.Assert(err, IsNil)
   401  
   402  	s.cfg = config.NewConfig()
   403  	s.cfg.Mydumper.BatchSize = 111
   404  	s.cfg.App.TableConcurrency = 2
   405  }
   406  
   407  func (s *tableRestoreSuite) TestPopulateChunks(c *C) {
   408  	_ = failpoint.Enable("github.com/pingcap/br/pkg/lightning/restore/PopulateChunkTimestamp", "return(1234567897)")
   409  	defer func() {
   410  		_ = failpoint.Disable("github.com/pingcap/br/pkg/lightning/restore/PopulateChunkTimestamp")
   411  	}()
   412  
   413  	cp := &checkpoints.TableCheckpoint{
   414  		Engines: make(map[int32]*checkpoints.EngineCheckpoint),
   415  	}
   416  
   417  	rc := &Controller{cfg: s.cfg, ioWorkers: worker.NewPool(context.Background(), 1, "io"), store: s.store}
   418  	err := s.tr.populateChunks(context.Background(), rc, cp)
   419  	c.Assert(err, IsNil)
   420  	//nolint:dupl // false positive.
   421  	c.Assert(cp.Engines, DeepEquals, map[int32]*checkpoints.EngineCheckpoint{
   422  		-1: {
   423  			Status: checkpoints.CheckpointStatusLoaded,
   424  		},
   425  		0: {
   426  			Status: checkpoints.CheckpointStatusLoaded,
   427  			Chunks: []*checkpoints.ChunkCheckpoint{
   428  				{
   429  					Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[0].FileMeta.Path, Offset: 0},
   430  					FileMeta: s.tr.tableMeta.DataFiles[0].FileMeta,
   431  					Chunk: mydump.Chunk{
   432  						Offset:       0,
   433  						EndOffset:    37,
   434  						PrevRowIDMax: 0,
   435  						RowIDMax:     7, // 37 bytes with 3 columns can store at most 7 rows.
   436  					},
   437  					Timestamp: 1234567897,
   438  				},
   439  				{
   440  					Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[1].FileMeta.Path, Offset: 0},
   441  					FileMeta: s.tr.tableMeta.DataFiles[1].FileMeta,
   442  					Chunk: mydump.Chunk{
   443  						Offset:       0,
   444  						EndOffset:    37,
   445  						PrevRowIDMax: 7,
   446  						RowIDMax:     14,
   447  					},
   448  					Timestamp: 1234567897,
   449  				},
   450  				{
   451  					Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[2].FileMeta.Path, Offset: 0},
   452  					FileMeta: s.tr.tableMeta.DataFiles[2].FileMeta,
   453  					Chunk: mydump.Chunk{
   454  						Offset:       0,
   455  						EndOffset:    37,
   456  						PrevRowIDMax: 14,
   457  						RowIDMax:     21,
   458  					},
   459  					Timestamp: 1234567897,
   460  				},
   461  			},
   462  		},
   463  		1: {
   464  			Status: checkpoints.CheckpointStatusLoaded,
   465  			Chunks: []*checkpoints.ChunkCheckpoint{
   466  				{
   467  					Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[3].FileMeta.Path, Offset: 0},
   468  					FileMeta: s.tr.tableMeta.DataFiles[3].FileMeta,
   469  					Chunk: mydump.Chunk{
   470  						Offset:       0,
   471  						EndOffset:    37,
   472  						PrevRowIDMax: 21,
   473  						RowIDMax:     28,
   474  					},
   475  					Timestamp: 1234567897,
   476  				},
   477  				{
   478  					Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[4].FileMeta.Path, Offset: 0},
   479  					FileMeta: s.tr.tableMeta.DataFiles[4].FileMeta,
   480  					Chunk: mydump.Chunk{
   481  						Offset:       0,
   482  						EndOffset:    37,
   483  						PrevRowIDMax: 28,
   484  						RowIDMax:     35,
   485  					},
   486  					Timestamp: 1234567897,
   487  				},
   488  				{
   489  					Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[5].FileMeta.Path, Offset: 0},
   490  					FileMeta: s.tr.tableMeta.DataFiles[5].FileMeta,
   491  					Chunk: mydump.Chunk{
   492  						Offset:       0,
   493  						EndOffset:    37,
   494  						PrevRowIDMax: 35,
   495  						RowIDMax:     42,
   496  					},
   497  					Timestamp: 1234567897,
   498  				},
   499  			},
   500  		},
   501  		2: {
   502  			Status: checkpoints.CheckpointStatusLoaded,
   503  			Chunks: []*checkpoints.ChunkCheckpoint{
   504  				{
   505  					Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[6].FileMeta.Path, Offset: 0},
   506  					FileMeta: s.tr.tableMeta.DataFiles[6].FileMeta,
   507  					Chunk: mydump.Chunk{
   508  						Offset:       0,
   509  						EndOffset:    14,
   510  						PrevRowIDMax: 42,
   511  						RowIDMax:     46,
   512  					},
   513  					Timestamp: 1234567897,
   514  				},
   515  			},
   516  		},
   517  	})
   518  
   519  	// set csv header to true, this will cause check columns fail
   520  	s.cfg.Mydumper.CSV.Header = true
   521  	s.cfg.Mydumper.StrictFormat = true
   522  	regionSize := s.cfg.Mydumper.MaxRegionSize
   523  	s.cfg.Mydumper.MaxRegionSize = 5
   524  	err = s.tr.populateChunks(context.Background(), rc, cp)
   525  	c.Assert(err, NotNil)
   526  	c.Assert(err, ErrorMatches, `.*unknown columns in header \[1 2 3\]`)
   527  	s.cfg.Mydumper.MaxRegionSize = regionSize
   528  	s.cfg.Mydumper.CSV.Header = false
   529  }
   530  
   531  func (s *tableRestoreSuite) TestPopulateChunksCSVHeader(c *C) {
   532  	fakeDataDir := c.MkDir()
   533  	store, err := storage.NewLocalStorage(fakeDataDir)
   534  	c.Assert(err, IsNil)
   535  
   536  	fakeDataFiles := make([]mydump.FileInfo, 0)
   537  
   538  	fakeCsvContents := []string{
   539  		// small full header
   540  		"a,b,c\r\n1,2,3\r\n",
   541  		// small partial header
   542  		"b,c\r\n2,3\r\n",
   543  		// big full header
   544  		"a,b,c\r\n90000,80000,700000\r\n1000,2000,3000\r\n11,22,33\r\n3,4,5\r\n",
   545  		// big full header unordered
   546  		"c,a,b\r\n,1000,2000,3000\r\n11,22,33\r\n1000,2000,404\r\n3,4,5\r\n90000,80000,700000\r\n7999999,89999999,9999999\r\n",
   547  		// big partial header
   548  		"b,c\r\n2000001,30000001\r\n35231616,462424626\r\n62432,434898934\r\n",
   549  	}
   550  	total := 0
   551  	for i, s := range fakeCsvContents {
   552  		csvName := fmt.Sprintf("db.table.%02d.csv", i)
   553  		err := os.WriteFile(filepath.Join(fakeDataDir, csvName), []byte(s), 0o644)
   554  		c.Assert(err, IsNil)
   555  		fakeDataFiles = append(fakeDataFiles, mydump.FileInfo{
   556  			TableName: filter.Table{Schema: "db", Name: "table"},
   557  			FileMeta:  mydump.SourceFileMeta{Path: csvName, Type: mydump.SourceTypeCSV, SortKey: fmt.Sprintf("%02d", i), FileSize: int64(len(s))},
   558  		})
   559  		total += len(s)
   560  	}
   561  	tableMeta := &mydump.MDTableMeta{
   562  		DB:         "db",
   563  		Name:       "table",
   564  		TotalSize:  int64(total),
   565  		SchemaFile: mydump.FileInfo{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: mydump.SourceFileMeta{Path: "db.table-schema.sql", Type: mydump.SourceTypeTableSchema}},
   566  		DataFiles:  fakeDataFiles,
   567  	}
   568  
   569  	_ = failpoint.Enable("github.com/pingcap/br/pkg/lightning/restore/PopulateChunkTimestamp", "return(1234567897)")
   570  	defer func() {
   571  		_ = failpoint.Disable("github.com/pingcap/br/pkg/lightning/restore/PopulateChunkTimestamp")
   572  	}()
   573  
   574  	cp := &checkpoints.TableCheckpoint{
   575  		Engines: make(map[int32]*checkpoints.EngineCheckpoint),
   576  	}
   577  
   578  	cfg := config.NewConfig()
   579  	cfg.Mydumper.BatchSize = 100
   580  	cfg.Mydumper.MaxRegionSize = 40
   581  
   582  	cfg.Mydumper.CSV.Header = true
   583  	cfg.Mydumper.StrictFormat = true
   584  	rc := &Controller{cfg: cfg, ioWorkers: worker.NewPool(context.Background(), 1, "io"), store: store}
   585  
   586  	tr, err := NewTableRestore("`db`.`table`", tableMeta, s.dbInfo, s.tableInfo, &checkpoints.TableCheckpoint{}, nil)
   587  	c.Assert(err, IsNil)
   588  	c.Assert(tr.populateChunks(context.Background(), rc, cp), IsNil)
   589  
   590  	c.Assert(cp.Engines, DeepEquals, map[int32]*checkpoints.EngineCheckpoint{
   591  		-1: {
   592  			Status: checkpoints.CheckpointStatusLoaded,
   593  		},
   594  		0: {
   595  			Status: checkpoints.CheckpointStatusLoaded,
   596  			Chunks: []*checkpoints.ChunkCheckpoint{
   597  				{
   598  					Key:      checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[0].FileMeta.Path, Offset: 0},
   599  					FileMeta: tableMeta.DataFiles[0].FileMeta,
   600  					Chunk: mydump.Chunk{
   601  						Offset:       0,
   602  						EndOffset:    14,
   603  						PrevRowIDMax: 0,
   604  						RowIDMax:     4, // 37 bytes with 3 columns can store at most 7 rows.
   605  					},
   606  					Timestamp: 1234567897,
   607  				},
   608  				{
   609  					Key:      checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[1].FileMeta.Path, Offset: 0},
   610  					FileMeta: tableMeta.DataFiles[1].FileMeta,
   611  					Chunk: mydump.Chunk{
   612  						Offset:       0,
   613  						EndOffset:    10,
   614  						PrevRowIDMax: 4,
   615  						RowIDMax:     7,
   616  					},
   617  					Timestamp: 1234567897,
   618  				},
   619  				{
   620  					Key:               checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[2].FileMeta.Path, Offset: 6},
   621  					FileMeta:          tableMeta.DataFiles[2].FileMeta,
   622  					ColumnPermutation: []int{0, 1, 2, -1},
   623  					Chunk: mydump.Chunk{
   624  						Offset:       6,
   625  						EndOffset:    52,
   626  						PrevRowIDMax: 7,
   627  						RowIDMax:     20,
   628  						Columns:      []string{"a", "b", "c"},
   629  					},
   630  
   631  					Timestamp: 1234567897,
   632  				},
   633  				{
   634  					Key:               checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[2].FileMeta.Path, Offset: 52},
   635  					FileMeta:          tableMeta.DataFiles[2].FileMeta,
   636  					ColumnPermutation: []int{0, 1, 2, -1},
   637  					Chunk: mydump.Chunk{
   638  						Offset:       52,
   639  						EndOffset:    60,
   640  						PrevRowIDMax: 20,
   641  						RowIDMax:     22,
   642  						Columns:      []string{"a", "b", "c"},
   643  					},
   644  					Timestamp: 1234567897,
   645  				},
   646  				{
   647  					Key:               checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[3].FileMeta.Path, Offset: 6},
   648  					FileMeta:          tableMeta.DataFiles[3].FileMeta,
   649  					ColumnPermutation: []int{1, 2, 0, -1},
   650  					Chunk: mydump.Chunk{
   651  						Offset:       6,
   652  						EndOffset:    48,
   653  						PrevRowIDMax: 22,
   654  						RowIDMax:     35,
   655  						Columns:      []string{"c", "a", "b"},
   656  					},
   657  					Timestamp: 1234567897,
   658  				},
   659  			},
   660  		},
   661  		1: {
   662  			Status: checkpoints.CheckpointStatusLoaded,
   663  			Chunks: []*checkpoints.ChunkCheckpoint{
   664  				{
   665  					Key:               checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[3].FileMeta.Path, Offset: 48},
   666  					FileMeta:          tableMeta.DataFiles[3].FileMeta,
   667  					ColumnPermutation: []int{1, 2, 0, -1},
   668  					Chunk: mydump.Chunk{
   669  						Offset:       48,
   670  						EndOffset:    101,
   671  						PrevRowIDMax: 35,
   672  						RowIDMax:     48,
   673  						Columns:      []string{"c", "a", "b"},
   674  					},
   675  					Timestamp: 1234567897,
   676  				},
   677  				{
   678  					Key:               checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[3].FileMeta.Path, Offset: 101},
   679  					FileMeta:          tableMeta.DataFiles[3].FileMeta,
   680  					ColumnPermutation: []int{1, 2, 0, -1},
   681  					Chunk: mydump.Chunk{
   682  						Offset:       101,
   683  						EndOffset:    102,
   684  						PrevRowIDMax: 48,
   685  						RowIDMax:     48,
   686  						Columns:      []string{"c", "a", "b"},
   687  					},
   688  					Timestamp: 1234567897,
   689  				},
   690  				{
   691  					Key:               checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[4].FileMeta.Path, Offset: 4},
   692  					FileMeta:          tableMeta.DataFiles[4].FileMeta,
   693  					ColumnPermutation: []int{-1, 0, 1, -1},
   694  					Chunk: mydump.Chunk{
   695  						Offset:       4,
   696  						EndOffset:    59,
   697  						PrevRowIDMax: 48,
   698  						RowIDMax:     61,
   699  						Columns:      []string{"b", "c"},
   700  					},
   701  					Timestamp: 1234567897,
   702  				},
   703  			},
   704  		},
   705  		2: {
   706  			Status: checkpoints.CheckpointStatusLoaded,
   707  			Chunks: []*checkpoints.ChunkCheckpoint{
   708  				{
   709  					Key:               checkpoints.ChunkCheckpointKey{Path: tableMeta.DataFiles[4].FileMeta.Path, Offset: 59},
   710  					FileMeta:          tableMeta.DataFiles[4].FileMeta,
   711  					ColumnPermutation: []int{-1, 0, 1, -1},
   712  					Chunk: mydump.Chunk{
   713  						Offset:       59,
   714  						EndOffset:    60,
   715  						PrevRowIDMax: 61,
   716  						RowIDMax:     61,
   717  						Columns:      []string{"b", "c"},
   718  					},
   719  					Timestamp: 1234567897,
   720  				},
   721  			},
   722  		},
   723  	})
   724  }
   725  
   726  func (s *tableRestoreSuite) TestGetColumnsNames(c *C) {
   727  	c.Assert(getColumnNames(s.tableInfo.Core, []int{0, 1, 2, -1}), DeepEquals, []string{"a", "b", "c"})
   728  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, 0, 2, -1}), DeepEquals, []string{"b", "a", "c"})
   729  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 0, 1, -1}), DeepEquals, []string{"b", "c"})
   730  	c.Assert(getColumnNames(s.tableInfo.Core, []int{0, 1, -1, -1}), DeepEquals, []string{"a", "b"})
   731  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, -1, 0, -1}), DeepEquals, []string{"c", "a"})
   732  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 0, -1, -1}), DeepEquals, []string{"b"})
   733  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, 2, 3, 0}), DeepEquals, []string{"_tidb_rowid", "a", "b", "c"})
   734  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, 0, 2, 3}), DeepEquals, []string{"b", "a", "c", "_tidb_rowid"})
   735  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 0, 2, 1}), DeepEquals, []string{"b", "_tidb_rowid", "c"})
   736  	c.Assert(getColumnNames(s.tableInfo.Core, []int{2, -1, 0, 1}), DeepEquals, []string{"c", "_tidb_rowid", "a"})
   737  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 1, -1, 0}), DeepEquals, []string{"_tidb_rowid", "b"})
   738  }
   739  
   740  func (s *tableRestoreSuite) TestInitializeColumns(c *C) {
   741  	ccp := &checkpoints.ChunkCheckpoint{}
   742  	c.Assert(s.tr.initializeColumns(nil, ccp), IsNil)
   743  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{0, 1, 2, -1})
   744  
   745  	ccp.ColumnPermutation = nil
   746  	c.Assert(s.tr.initializeColumns([]string{"b", "c", "a"}, ccp), IsNil)
   747  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{2, 0, 1, -1})
   748  
   749  	ccp.ColumnPermutation = nil
   750  	c.Assert(s.tr.initializeColumns([]string{"b"}, ccp), IsNil)
   751  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{-1, 0, -1, -1})
   752  
   753  	ccp.ColumnPermutation = nil
   754  	c.Assert(s.tr.initializeColumns([]string{"_tidb_rowid", "b", "a", "c"}, ccp), IsNil)
   755  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{2, 1, 3, 0})
   756  
   757  	ccp.ColumnPermutation = nil
   758  	err := s.tr.initializeColumns([]string{"_tidb_rowid", "b", "a", "c", "d"}, ccp)
   759  	c.Assert(err, NotNil)
   760  	c.Assert(err, ErrorMatches, `unknown columns in header \[d\]`)
   761  
   762  	ccp.ColumnPermutation = nil
   763  	err = s.tr.initializeColumns([]string{"e", "b", "c", "d"}, ccp)
   764  	c.Assert(err, NotNil)
   765  	c.Assert(err, ErrorMatches, `unknown columns in header \[e d\]`)
   766  }
   767  
   768  func (s *tableRestoreSuite) TestCompareChecksumSuccess(c *C) {
   769  	db, mock, err := sqlmock.New()
   770  	c.Assert(err, IsNil)
   771  
   772  	mock.ExpectQuery("SELECT.*tikv_gc_life_time.*").
   773  		WillReturnRows(sqlmock.NewRows([]string{"VARIABLE_VALUE"}).AddRow("10m"))
   774  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   775  		WithArgs("100h0m0s").
   776  		WillReturnResult(sqlmock.NewResult(1, 1))
   777  	mock.ExpectQuery("ADMIN CHECKSUM.*").
   778  		WillReturnRows(
   779  			sqlmock.NewRows([]string{"Db_name", "Table_name", "Checksum_crc64_xor", "Total_kvs", "Total_bytes"}).
   780  				AddRow("db", "table", 1234567890, 12345, 1234567),
   781  		)
   782  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   783  		WithArgs("10m").
   784  		WillReturnResult(sqlmock.NewResult(2, 1))
   785  	mock.ExpectClose()
   786  
   787  	ctx := MockDoChecksumCtx(db)
   788  	remoteChecksum, err := DoChecksum(ctx, s.tr.tableInfo)
   789  	c.Assert(err, IsNil)
   790  	err = s.tr.compareChecksum(remoteChecksum, verification.MakeKVChecksum(1234567, 12345, 1234567890))
   791  	c.Assert(err, IsNil)
   792  
   793  	c.Assert(db.Close(), IsNil)
   794  	c.Assert(mock.ExpectationsWereMet(), IsNil)
   795  }
   796  
   797  func (s *tableRestoreSuite) TestCompareChecksumFailure(c *C) {
   798  	db, mock, err := sqlmock.New()
   799  	c.Assert(err, IsNil)
   800  
   801  	mock.ExpectQuery("SELECT.*tikv_gc_life_time.*").
   802  		WillReturnRows(sqlmock.NewRows([]string{"VARIABLE_VALUE"}).AddRow("10m"))
   803  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   804  		WithArgs("100h0m0s").
   805  		WillReturnResult(sqlmock.NewResult(1, 1))
   806  	mock.ExpectQuery("ADMIN CHECKSUM TABLE `db`\\.`table`").
   807  		WillReturnRows(
   808  			sqlmock.NewRows([]string{"Db_name", "Table_name", "Checksum_crc64_xor", "Total_kvs", "Total_bytes"}).
   809  				AddRow("db", "table", 1234567890, 12345, 1234567),
   810  		)
   811  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   812  		WithArgs("10m").
   813  		WillReturnResult(sqlmock.NewResult(2, 1))
   814  	mock.ExpectClose()
   815  
   816  	ctx := MockDoChecksumCtx(db)
   817  	remoteChecksum, err := DoChecksum(ctx, s.tr.tableInfo)
   818  	c.Assert(err, IsNil)
   819  	err = s.tr.compareChecksum(remoteChecksum, verification.MakeKVChecksum(9876543, 54321, 1357924680))
   820  	c.Assert(err, ErrorMatches, "checksum mismatched.*")
   821  
   822  	c.Assert(db.Close(), IsNil)
   823  	c.Assert(mock.ExpectationsWereMet(), IsNil)
   824  }
   825  
   826  func (s *tableRestoreSuite) TestAnalyzeTable(c *C) {
   827  	db, mock, err := sqlmock.New()
   828  	c.Assert(err, IsNil)
   829  
   830  	mock.ExpectExec("ANALYZE TABLE `db`\\.`table`").
   831  		WillReturnResult(sqlmock.NewResult(1, 1))
   832  	mock.ExpectClose()
   833  
   834  	ctx := context.Background()
   835  	defaultSQLMode, err := mysql.GetSQLMode(mysql.DefaultSQLMode)
   836  	c.Assert(err, IsNil)
   837  	g := glue.NewExternalTiDBGlue(db, defaultSQLMode)
   838  	err = s.tr.analyzeTable(ctx, g)
   839  	c.Assert(err, IsNil)
   840  
   841  	c.Assert(db.Close(), IsNil)
   842  	c.Assert(mock.ExpectationsWereMet(), IsNil)
   843  }
   844  
   845  func (s *tableRestoreSuite) TestImportKVSuccess(c *C) {
   846  	controller := gomock.NewController(c)
   847  	defer controller.Finish()
   848  	mockBackend := mock.NewMockBackend(controller)
   849  	importer := backend.MakeBackend(mockBackend)
   850  	chptCh := make(chan saveCp)
   851  	defer close(chptCh)
   852  	rc := &Controller{saveCpCh: chptCh}
   853  	go func() {
   854  		for range chptCh {
   855  		}
   856  	}()
   857  
   858  	ctx := context.Background()
   859  	engineUUID := uuid.New()
   860  
   861  	mockBackend.EXPECT().
   862  		CloseEngine(ctx, nil, engineUUID).
   863  		Return(nil)
   864  	mockBackend.EXPECT().
   865  		ImportEngine(ctx, engineUUID).
   866  		Return(nil)
   867  	mockBackend.EXPECT().
   868  		CleanupEngine(ctx, engineUUID).
   869  		Return(nil)
   870  
   871  	closedEngine, err := importer.UnsafeCloseEngineWithUUID(ctx, nil, "tag", engineUUID)
   872  	c.Assert(err, IsNil)
   873  	err = s.tr.importKV(ctx, closedEngine, rc, 1)
   874  	c.Assert(err, IsNil)
   875  }
   876  
   877  func (s *tableRestoreSuite) TestImportKVFailure(c *C) {
   878  	controller := gomock.NewController(c)
   879  	defer controller.Finish()
   880  	mockBackend := mock.NewMockBackend(controller)
   881  	importer := backend.MakeBackend(mockBackend)
   882  	chptCh := make(chan saveCp)
   883  	defer close(chptCh)
   884  	rc := &Controller{saveCpCh: chptCh}
   885  	go func() {
   886  		for range chptCh {
   887  		}
   888  	}()
   889  
   890  	ctx := context.Background()
   891  	engineUUID := uuid.New()
   892  
   893  	mockBackend.EXPECT().
   894  		CloseEngine(ctx, nil, engineUUID).
   895  		Return(nil)
   896  	mockBackend.EXPECT().
   897  		ImportEngine(ctx, engineUUID).
   898  		Return(errors.Annotate(context.Canceled, "fake import error"))
   899  
   900  	closedEngine, err := importer.UnsafeCloseEngineWithUUID(ctx, nil, "tag", engineUUID)
   901  	c.Assert(err, IsNil)
   902  	err = s.tr.importKV(ctx, closedEngine, rc, 1)
   903  	c.Assert(err, ErrorMatches, "fake import error.*")
   904  }
   905  
   906  func (s *tableRestoreSuite) TestTableRestoreMetrics(c *C) {
   907  	controller := gomock.NewController(c)
   908  	defer controller.Finish()
   909  
   910  	chunkPendingBase := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStatePending))
   911  	chunkFinishedBase := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStatePending))
   912  	engineFinishedBase := metric.ReadCounter(metric.ProcessedEngineCounter.WithLabelValues("imported", metric.TableResultSuccess))
   913  	tableFinishedBase := metric.ReadCounter(metric.TableCounter.WithLabelValues("index_imported", metric.TableResultSuccess))
   914  
   915  	ctx := context.Background()
   916  	chptCh := make(chan saveCp)
   917  	defer close(chptCh)
   918  	cfg := config.NewConfig()
   919  	cfg.Mydumper.BatchSize = 1
   920  	cfg.PostRestore.Checksum = config.OpLevelOff
   921  
   922  	cfg.Checkpoint.Enable = false
   923  	cfg.TiDB.Host = "127.0.0.1"
   924  	cfg.TiDB.StatusPort = 10080
   925  	cfg.TiDB.Port = 4000
   926  	cfg.TiDB.PdAddr = "127.0.0.1:2379"
   927  
   928  	cfg.Mydumper.SourceDir = "."
   929  	cfg.Mydumper.CSV.Header = false
   930  	cfg.TikvImporter.Backend = config.BackendImporter
   931  	tls, err := cfg.ToTLS()
   932  	c.Assert(err, IsNil)
   933  
   934  	err = cfg.Adjust(ctx)
   935  	c.Assert(err, IsNil)
   936  
   937  	cpDB := checkpoints.NewNullCheckpointsDB()
   938  	g := mock.NewMockGlue(controller)
   939  	rc := &Controller{
   940  		cfg: cfg,
   941  		dbMetas: []*mydump.MDDatabaseMeta{
   942  			{
   943  				Name:   s.tableInfo.DB,
   944  				Tables: []*mydump.MDTableMeta{s.tableMeta},
   945  			},
   946  		},
   947  		dbInfos: map[string]*checkpoints.TidbDBInfo{
   948  			s.tableInfo.DB: s.dbInfo,
   949  		},
   950  		tableWorkers:      worker.NewPool(ctx, 6, "table"),
   951  		ioWorkers:         worker.NewPool(ctx, 5, "io"),
   952  		indexWorkers:      worker.NewPool(ctx, 2, "index"),
   953  		regionWorkers:     worker.NewPool(ctx, 10, "region"),
   954  		checksumWorks:     worker.NewPool(ctx, 2, "region"),
   955  		saveCpCh:          chptCh,
   956  		pauser:            DeliverPauser,
   957  		backend:           noop.NewNoopBackend(),
   958  		tidbGlue:          g,
   959  		errorSummaries:    makeErrorSummaries(log.L()),
   960  		tls:               tls,
   961  		checkpointsDB:     cpDB,
   962  		closedEngineLimit: worker.NewPool(ctx, 1, "closed_engine"),
   963  		store:             s.store,
   964  		metaMgrBuilder:    noopMetaMgrBuilder{},
   965  		diskQuotaLock:     newDiskQuotaLock(),
   966  	}
   967  	go func() {
   968  		for range chptCh {
   969  		}
   970  	}()
   971  	exec := mock.NewMockSQLExecutor(controller)
   972  	g.EXPECT().GetSQLExecutor().Return(exec).AnyTimes()
   973  	exec.EXPECT().ObtainStringWithLog(gomock.Any(), "SELECT version()", gomock.Any(), gomock.Any()).
   974  		Return("5.7.25-TiDB-v5.0.1", nil).AnyTimes()
   975  
   976  	web.BroadcastInitProgress(rc.dbMetas)
   977  
   978  	err = rc.restoreTables(ctx)
   979  	c.Assert(err, IsNil)
   980  
   981  	chunkPending := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStatePending))
   982  	chunkFinished := metric.ReadCounter(metric.ChunkCounter.WithLabelValues(metric.ChunkStatePending))
   983  	c.Assert(chunkPending-chunkPendingBase, Equals, float64(7))
   984  	c.Assert(chunkFinished-chunkFinishedBase, Equals, chunkPending)
   985  
   986  	engineFinished := metric.ReadCounter(metric.ProcessedEngineCounter.WithLabelValues("imported", metric.TableResultSuccess))
   987  	c.Assert(engineFinished-engineFinishedBase, Equals, float64(8))
   988  
   989  	tableFinished := metric.ReadCounter(metric.TableCounter.WithLabelValues("index_imported", metric.TableResultSuccess))
   990  	c.Assert(tableFinished-tableFinishedBase, Equals, float64(1))
   991  }
   992  
   993  var _ = Suite(&chunkRestoreSuite{})
   994  
   995  type chunkRestoreSuite struct {
   996  	tableRestoreSuiteBase
   997  	cr *chunkRestore
   998  }
   999  
  1000  func (s *chunkRestoreSuite) SetUpTest(c *C) {
  1001  	s.tableRestoreSuiteBase.SetUpTest(c)
  1002  
  1003  	ctx := context.Background()
  1004  	w := worker.NewPool(ctx, 5, "io")
  1005  
  1006  	chunk := checkpoints.ChunkCheckpoint{
  1007  		Key:      checkpoints.ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[1].FileMeta.Path, Offset: 0},
  1008  		FileMeta: s.tr.tableMeta.DataFiles[1].FileMeta,
  1009  		Chunk: mydump.Chunk{
  1010  			Offset:       0,
  1011  			EndOffset:    37,
  1012  			PrevRowIDMax: 18,
  1013  			RowIDMax:     36,
  1014  		},
  1015  	}
  1016  
  1017  	var err error
  1018  	s.cr, err = newChunkRestore(context.Background(), 1, s.cfg, &chunk, w, s.store, nil)
  1019  	c.Assert(err, IsNil)
  1020  }
  1021  
  1022  func (s *chunkRestoreSuite) TearDownTest(c *C) {
  1023  	s.cr.close()
  1024  }
  1025  
  1026  func (s *chunkRestoreSuite) TestDeliverLoopCancel(c *C) {
  1027  	rc := &Controller{backend: importer.NewMockImporter(nil, "")}
  1028  
  1029  	ctx, cancel := context.WithCancel(context.Background())
  1030  	kvsCh := make(chan []deliveredKVs)
  1031  	go cancel()
  1032  	_, err := s.cr.deliverLoop(ctx, kvsCh, s.tr, 0, nil, nil, rc)
  1033  	c.Assert(errors.Cause(err), Equals, context.Canceled)
  1034  }
  1035  
  1036  func (s *chunkRestoreSuite) TestDeliverLoopEmptyData(c *C) {
  1037  	ctx := context.Background()
  1038  
  1039  	// Open two mock engines.
  1040  
  1041  	controller := gomock.NewController(c)
  1042  	defer controller.Finish()
  1043  	mockBackend := mock.NewMockBackend(controller)
  1044  	importer := backend.MakeBackend(mockBackend)
  1045  
  1046  	mockBackend.EXPECT().OpenEngine(ctx, gomock.Any(), gomock.Any()).Return(nil).Times(2)
  1047  	mockBackend.EXPECT().MakeEmptyRows().Return(kv.MakeRowsFromKvPairs(nil)).AnyTimes()
  1048  	mockWriter := mock.NewMockEngineWriter(controller)
  1049  	mockBackend.EXPECT().LocalWriter(ctx, gomock.Any(), gomock.Any()).Return(mockWriter, nil).AnyTimes()
  1050  	mockWriter.EXPECT().
  1051  		AppendRows(ctx, gomock.Any(), gomock.Any(), gomock.Any()).
  1052  		Return(nil).AnyTimes()
  1053  
  1054  	dataEngine, err := importer.OpenEngine(ctx, &backend.EngineConfig{}, s.tr.tableName, 0)
  1055  	c.Assert(err, IsNil)
  1056  	dataWriter, err := dataEngine.LocalWriter(ctx, &backend.LocalWriterConfig{})
  1057  	c.Assert(err, IsNil)
  1058  	indexEngine, err := importer.OpenEngine(ctx, &backend.EngineConfig{}, s.tr.tableName, -1)
  1059  	c.Assert(err, IsNil)
  1060  	indexWriter, err := indexEngine.LocalWriter(ctx, &backend.LocalWriterConfig{})
  1061  	c.Assert(err, IsNil)
  1062  
  1063  	// Deliver nothing.
  1064  
  1065  	cfg := &config.Config{}
  1066  	rc := &Controller{cfg: cfg, backend: importer, diskQuotaLock: newDiskQuotaLock()}
  1067  
  1068  	kvsCh := make(chan []deliveredKVs, 1)
  1069  	kvsCh <- []deliveredKVs{}
  1070  	_, err = s.cr.deliverLoop(ctx, kvsCh, s.tr, 0, dataWriter, indexWriter, rc)
  1071  	c.Assert(err, IsNil)
  1072  }
  1073  
  1074  func (s *chunkRestoreSuite) TestDeliverLoop(c *C) {
  1075  	ctx := context.Background()
  1076  	kvsCh := make(chan []deliveredKVs)
  1077  	mockCols := []string{"c1", "c2"}
  1078  
  1079  	// Open two mock engines.
  1080  
  1081  	controller := gomock.NewController(c)
  1082  	defer controller.Finish()
  1083  	mockBackend := mock.NewMockBackend(controller)
  1084  	importer := backend.MakeBackend(mockBackend)
  1085  
  1086  	mockBackend.EXPECT().OpenEngine(ctx, gomock.Any(), gomock.Any()).Return(nil).Times(2)
  1087  	// avoid return the same object at each call
  1088  	mockBackend.EXPECT().MakeEmptyRows().Return(kv.MakeRowsFromKvPairs(nil)).Times(1)
  1089  	mockBackend.EXPECT().MakeEmptyRows().Return(kv.MakeRowsFromKvPairs(nil)).Times(1)
  1090  	mockWriter := mock.NewMockEngineWriter(controller)
  1091  	mockBackend.EXPECT().LocalWriter(ctx, gomock.Any(), gomock.Any()).Return(mockWriter, nil).AnyTimes()
  1092  	mockWriter.EXPECT().IsSynced().Return(true).AnyTimes()
  1093  
  1094  	dataEngine, err := importer.OpenEngine(ctx, &backend.EngineConfig{}, s.tr.tableName, 0)
  1095  	c.Assert(err, IsNil)
  1096  	indexEngine, err := importer.OpenEngine(ctx, &backend.EngineConfig{}, s.tr.tableName, -1)
  1097  	c.Assert(err, IsNil)
  1098  
  1099  	dataWriter, err := dataEngine.LocalWriter(ctx, &backend.LocalWriterConfig{})
  1100  	c.Assert(err, IsNil)
  1101  	indexWriter, err := indexEngine.LocalWriter(ctx, &backend.LocalWriterConfig{})
  1102  	c.Assert(err, IsNil)
  1103  
  1104  	// Set up the expected API calls to the data engine...
  1105  
  1106  	mockWriter.EXPECT().
  1107  		AppendRows(ctx, s.tr.tableName, mockCols, kv.MakeRowsFromKvPairs([]common.KvPair{
  1108  			{
  1109  				Key: []byte("txxxxxxxx_ryyyyyyyy"),
  1110  				Val: []byte("value1"),
  1111  			},
  1112  			{
  1113  				Key: []byte("txxxxxxxx_rwwwwwwww"),
  1114  				Val: []byte("value2"),
  1115  			},
  1116  		})).
  1117  		Return(nil)
  1118  
  1119  	// ... and the index engine.
  1120  	//
  1121  	// Note: This test assumes data engine is written before the index engine.
  1122  
  1123  	mockWriter.EXPECT().
  1124  		AppendRows(ctx, s.tr.tableName, mockCols, kv.MakeRowsFromKvPairs([]common.KvPair{
  1125  			{
  1126  				Key: []byte("txxxxxxxx_izzzzzzzz"),
  1127  				Val: []byte("index1"),
  1128  			},
  1129  		})).
  1130  		Return(nil)
  1131  
  1132  	// Now actually start the delivery loop.
  1133  
  1134  	saveCpCh := make(chan saveCp, 2)
  1135  	go func() {
  1136  		kvsCh <- []deliveredKVs{
  1137  			{
  1138  				kvs: kv.MakeRowFromKvPairs([]common.KvPair{
  1139  					{
  1140  						Key: []byte("txxxxxxxx_ryyyyyyyy"),
  1141  						Val: []byte("value1"),
  1142  					},
  1143  					{
  1144  						Key: []byte("txxxxxxxx_rwwwwwwww"),
  1145  						Val: []byte("value2"),
  1146  					},
  1147  					{
  1148  						Key: []byte("txxxxxxxx_izzzzzzzz"),
  1149  						Val: []byte("index1"),
  1150  					},
  1151  				}),
  1152  				columns: mockCols,
  1153  				offset:  12,
  1154  				rowID:   76,
  1155  			},
  1156  		}
  1157  		kvsCh <- []deliveredKVs{}
  1158  		close(kvsCh)
  1159  	}()
  1160  
  1161  	cfg := &config.Config{}
  1162  	rc := &Controller{cfg: cfg, saveCpCh: saveCpCh, backend: importer, diskQuotaLock: newDiskQuotaLock()}
  1163  
  1164  	_, err = s.cr.deliverLoop(ctx, kvsCh, s.tr, 0, dataWriter, indexWriter, rc)
  1165  	c.Assert(err, IsNil)
  1166  	c.Assert(saveCpCh, HasLen, 2)
  1167  	c.Assert(s.cr.chunk.Chunk.Offset, Equals, int64(12))
  1168  	c.Assert(s.cr.chunk.Chunk.PrevRowIDMax, Equals, int64(76))
  1169  	c.Assert(s.cr.chunk.Checksum.SumKVS(), Equals, uint64(3))
  1170  }
  1171  
  1172  func (s *chunkRestoreSuite) TestEncodeLoop(c *C) {
  1173  	ctx := context.Background()
  1174  	kvsCh := make(chan []deliveredKVs, 2)
  1175  	deliverCompleteCh := make(chan deliverResult)
  1176  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
  1177  		SQLMode:   s.cfg.TiDB.SQLMode,
  1178  		Timestamp: 1234567895,
  1179  	})
  1180  	c.Assert(err, IsNil)
  1181  	cfg := config.NewConfig()
  1182  	rc := &Controller{pauser: DeliverPauser, cfg: cfg}
  1183  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1184  	c.Assert(err, IsNil)
  1185  	c.Assert(kvsCh, HasLen, 2)
  1186  
  1187  	kvs := <-kvsCh
  1188  	c.Assert(kvs, HasLen, 1)
  1189  	c.Assert(kvs[0].rowID, Equals, int64(19))
  1190  	c.Assert(kvs[0].offset, Equals, int64(36))
  1191  
  1192  	kvs = <-kvsCh
  1193  	c.Assert(len(kvs), Equals, 0)
  1194  }
  1195  
  1196  func (s *chunkRestoreSuite) TestEncodeLoopCanceled(c *C) {
  1197  	ctx, cancel := context.WithCancel(context.Background())
  1198  	kvsCh := make(chan []deliveredKVs)
  1199  	deliverCompleteCh := make(chan deliverResult)
  1200  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
  1201  		SQLMode:   s.cfg.TiDB.SQLMode,
  1202  		Timestamp: 1234567896,
  1203  	})
  1204  	c.Assert(err, IsNil)
  1205  
  1206  	go cancel()
  1207  	cfg := config.NewConfig()
  1208  	rc := &Controller{pauser: DeliverPauser, cfg: cfg}
  1209  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1210  	c.Assert(errors.Cause(err), Equals, context.Canceled)
  1211  	c.Assert(kvsCh, HasLen, 0)
  1212  }
  1213  
  1214  func (s *chunkRestoreSuite) TestEncodeLoopForcedError(c *C) {
  1215  	ctx := context.Background()
  1216  	kvsCh := make(chan []deliveredKVs, 2)
  1217  	deliverCompleteCh := make(chan deliverResult)
  1218  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
  1219  		SQLMode:   s.cfg.TiDB.SQLMode,
  1220  		Timestamp: 1234567897,
  1221  	})
  1222  	c.Assert(err, IsNil)
  1223  
  1224  	// close the chunk so reading it will result in the "file already closed" error.
  1225  	s.cr.parser.Close()
  1226  
  1227  	cfg := config.NewConfig()
  1228  	rc := &Controller{pauser: DeliverPauser, cfg: cfg}
  1229  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1230  	c.Assert(err, ErrorMatches, `in file .*[/\\]?db\.table\.2\.sql:0 at offset 0:.*file already closed`)
  1231  	c.Assert(kvsCh, HasLen, 0)
  1232  }
  1233  
  1234  func (s *chunkRestoreSuite) TestEncodeLoopDeliverLimit(c *C) {
  1235  	ctx := context.Background()
  1236  	kvsCh := make(chan []deliveredKVs, 4)
  1237  	deliverCompleteCh := make(chan deliverResult)
  1238  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
  1239  		SQLMode:   s.cfg.TiDB.SQLMode,
  1240  		Timestamp: 1234567898,
  1241  	})
  1242  	c.Assert(err, IsNil)
  1243  
  1244  	dir := c.MkDir()
  1245  	fileName := "db.limit.000.csv"
  1246  	err = os.WriteFile(filepath.Join(dir, fileName), []byte("1,2,3\r\n4,5,6\r\n7,8,9\r"), 0o644)
  1247  	c.Assert(err, IsNil)
  1248  
  1249  	store, err := storage.NewLocalStorage(dir)
  1250  	c.Assert(err, IsNil)
  1251  	cfg := config.NewConfig()
  1252  
  1253  	reader, err := store.Open(ctx, fileName)
  1254  	c.Assert(err, IsNil)
  1255  	w := worker.NewPool(ctx, 1, "io")
  1256  	p := mydump.NewCSVParser(&cfg.Mydumper.CSV, reader, 111, w, false)
  1257  	s.cr.parser = p
  1258  
  1259  	rc := &Controller{pauser: DeliverPauser, cfg: cfg}
  1260  	c.Assert(failpoint.Enable(
  1261  		"github.com/pingcap/br/pkg/lightning/restore/mock-kv-size", "return(110000000)"), IsNil)
  1262  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1263  	c.Assert(err, IsNil)
  1264  
  1265  	// we have 3 kvs total. after the failpoint injected.
  1266  	// we will send one kv each time.
  1267  	count := 0
  1268  	for {
  1269  		kvs, ok := <-kvsCh
  1270  		if !ok {
  1271  			break
  1272  		}
  1273  		count += 1
  1274  		if count <= 3 {
  1275  			c.Assert(kvs, HasLen, 1)
  1276  		}
  1277  		if count == 4 {
  1278  			// we will send empty kvs before encodeLoop exists
  1279  			// so, we can receive 4 batch and 1 is empty
  1280  			c.Assert(kvs, HasLen, 0)
  1281  			break
  1282  		}
  1283  	}
  1284  }
  1285  
  1286  func (s *chunkRestoreSuite) TestEncodeLoopDeliverErrored(c *C) {
  1287  	ctx := context.Background()
  1288  	kvsCh := make(chan []deliveredKVs)
  1289  	deliverCompleteCh := make(chan deliverResult)
  1290  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
  1291  		SQLMode:   s.cfg.TiDB.SQLMode,
  1292  		Timestamp: 1234567898,
  1293  	})
  1294  	c.Assert(err, IsNil)
  1295  
  1296  	go func() {
  1297  		deliverCompleteCh <- deliverResult{
  1298  			err: errors.New("fake deliver error"),
  1299  		}
  1300  	}()
  1301  	cfg := config.NewConfig()
  1302  	rc := &Controller{pauser: DeliverPauser, cfg: cfg}
  1303  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1304  	c.Assert(err, ErrorMatches, "fake deliver error")
  1305  	c.Assert(kvsCh, HasLen, 0)
  1306  }
  1307  
  1308  func (s *chunkRestoreSuite) TestEncodeLoopColumnsMismatch(c *C) {
  1309  	dir := c.MkDir()
  1310  	fileName := "db.table.000.csv"
  1311  	err := os.WriteFile(filepath.Join(dir, fileName), []byte("1,2,3,4\r\n4,5,6,7\r\n"), 0o644)
  1312  	c.Assert(err, IsNil)
  1313  
  1314  	store, err := storage.NewLocalStorage(dir)
  1315  	c.Assert(err, IsNil)
  1316  
  1317  	ctx := context.Background()
  1318  	cfg := config.NewConfig()
  1319  	rc := &Controller{pauser: DeliverPauser, cfg: cfg}
  1320  
  1321  	reader, err := store.Open(ctx, fileName)
  1322  	c.Assert(err, IsNil)
  1323  	w := worker.NewPool(ctx, 5, "io")
  1324  	p := mydump.NewCSVParser(&cfg.Mydumper.CSV, reader, 111, w, false)
  1325  
  1326  	err = s.cr.parser.Close()
  1327  	c.Assert(err, IsNil)
  1328  	s.cr.parser = p
  1329  
  1330  	kvsCh := make(chan []deliveredKVs, 2)
  1331  	deliverCompleteCh := make(chan deliverResult)
  1332  	kvEncoder, err := tidb.NewTiDBBackend(nil, config.ReplaceOnDup).NewEncoder(
  1333  		s.tr.encTable,
  1334  		&kv.SessionOptions{
  1335  			SQLMode:   s.cfg.TiDB.SQLMode,
  1336  			Timestamp: 1234567895,
  1337  		})
  1338  	c.Assert(err, IsNil)
  1339  
  1340  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1341  	c.Assert(err, ErrorMatches, "in file db.table.2.sql:0 at offset 8: column count mismatch, expected 3, got 4")
  1342  	c.Assert(kvsCh, HasLen, 0)
  1343  }
  1344  
  1345  func (s *chunkRestoreSuite) TestRestore(c *C) {
  1346  	ctx := context.Background()
  1347  
  1348  	// Open two mock engines
  1349  
  1350  	controller := gomock.NewController(c)
  1351  	defer controller.Finish()
  1352  	mockClient := mock.NewMockImportKVClient(controller)
  1353  	mockDataWriter := mock.NewMockImportKV_WriteEngineClient(controller)
  1354  	mockIndexWriter := mock.NewMockImportKV_WriteEngineClient(controller)
  1355  	importer := importer.NewMockImporter(mockClient, "127.0.0.1:2379")
  1356  
  1357  	mockClient.EXPECT().OpenEngine(ctx, gomock.Any()).Return(nil, nil)
  1358  	mockClient.EXPECT().OpenEngine(ctx, gomock.Any()).Return(nil, nil)
  1359  
  1360  	dataEngine, err := importer.OpenEngine(ctx, &backend.EngineConfig{}, s.tr.tableName, 0)
  1361  	c.Assert(err, IsNil)
  1362  	indexEngine, err := importer.OpenEngine(ctx, &backend.EngineConfig{}, s.tr.tableName, -1)
  1363  	c.Assert(err, IsNil)
  1364  	dataWriter, err := dataEngine.LocalWriter(ctx, &backend.LocalWriterConfig{})
  1365  	c.Assert(err, IsNil)
  1366  	indexWriter, err := indexEngine.LocalWriter(ctx, &backend.LocalWriterConfig{})
  1367  	c.Assert(err, IsNil)
  1368  
  1369  	// Expected API sequence
  1370  	// (we don't care about the actual content, this would be checked in the integrated tests)
  1371  
  1372  	mockClient.EXPECT().WriteEngine(ctx).Return(mockDataWriter, nil)
  1373  	mockDataWriter.EXPECT().Send(gomock.Any()).Return(nil)
  1374  	mockDataWriter.EXPECT().Send(gomock.Any()).DoAndReturn(func(req *import_kvpb.WriteEngineRequest) error {
  1375  		c.Assert(req.GetBatch().GetMutations(), HasLen, 1)
  1376  		return nil
  1377  	})
  1378  	mockDataWriter.EXPECT().CloseAndRecv().Return(nil, nil)
  1379  
  1380  	mockClient.EXPECT().WriteEngine(ctx).Return(mockIndexWriter, nil)
  1381  	mockIndexWriter.EXPECT().Send(gomock.Any()).Return(nil)
  1382  	mockIndexWriter.EXPECT().Send(gomock.Any()).DoAndReturn(func(req *import_kvpb.WriteEngineRequest) error {
  1383  		c.Assert(req.GetBatch().GetMutations(), HasLen, 1)
  1384  		return nil
  1385  	})
  1386  	mockIndexWriter.EXPECT().CloseAndRecv().Return(nil, nil)
  1387  
  1388  	// Now actually start the restore loop.
  1389  
  1390  	saveCpCh := make(chan saveCp, 2)
  1391  	err = s.cr.restore(ctx, s.tr, 0, dataWriter, indexWriter, &Controller{
  1392  		cfg:           s.cfg,
  1393  		saveCpCh:      saveCpCh,
  1394  		backend:       importer,
  1395  		pauser:        DeliverPauser,
  1396  		diskQuotaLock: newDiskQuotaLock(),
  1397  	})
  1398  	c.Assert(err, IsNil)
  1399  	c.Assert(saveCpCh, HasLen, 2)
  1400  }
  1401  
  1402  var _ = Suite(&restoreSchemaSuite{})
  1403  
  1404  type restoreSchemaSuite struct {
  1405  	ctx        context.Context
  1406  	rc         *Controller
  1407  	controller *gomock.Controller
  1408  	tableInfos []*model.TableInfo
  1409  }
  1410  
  1411  func (s *restoreSchemaSuite) SetUpSuite(c *C) {
  1412  	ctx := context.Background()
  1413  	fakeDataDir := c.MkDir()
  1414  	store, err := storage.NewLocalStorage(fakeDataDir)
  1415  	c.Assert(err, IsNil)
  1416  	// restore database schema file
  1417  	fakeDBName := "fakedb"
  1418  	// please follow the `mydump.defaultFileRouteRules`, matches files like '{schema}-schema-create.sql'
  1419  	fakeFileName := fmt.Sprintf("%s-schema-create.sql", fakeDBName)
  1420  	err = store.WriteFile(ctx, fakeFileName, []byte(fmt.Sprintf("CREATE DATABASE %s;", fakeDBName)))
  1421  	c.Assert(err, IsNil)
  1422  	// restore table schema files
  1423  	fakeTableFilesCount := 8
  1424  
  1425  	p := parser.New()
  1426  	p.SetSQLMode(mysql.ModeANSIQuotes)
  1427  	se := tmock.NewContext()
  1428  
  1429  	tableInfos := make([]*model.TableInfo, 0, fakeTableFilesCount)
  1430  	for i := 1; i <= fakeTableFilesCount; i++ {
  1431  		fakeTableName := fmt.Sprintf("tbl%d", i)
  1432  		// please follow the `mydump.defaultFileRouteRules`, matches files like '{schema}.{table}-schema.sql'
  1433  		fakeFileName := fmt.Sprintf("%s.%s-schema.sql", fakeDBName, fakeTableName)
  1434  		fakeFileContent := fmt.Sprintf("CREATE TABLE %s(i TINYINT);", fakeTableName)
  1435  		err = store.WriteFile(ctx, fakeFileName, []byte(fakeFileContent))
  1436  		c.Assert(err, IsNil)
  1437  
  1438  		node, err := p.ParseOneStmt(fakeFileContent, "", "")
  1439  		c.Assert(err, IsNil)
  1440  		core, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), 0xabcdef)
  1441  		c.Assert(err, IsNil)
  1442  		core.State = model.StatePublic
  1443  		tableInfos = append(tableInfos, core)
  1444  	}
  1445  	s.tableInfos = tableInfos
  1446  	// restore view schema files
  1447  	fakeViewFilesCount := 8
  1448  	for i := 1; i <= fakeViewFilesCount; i++ {
  1449  		fakeViewName := fmt.Sprintf("tbl%d", i)
  1450  		// please follow the `mydump.defaultFileRouteRules`, matches files like '{schema}.{table}-schema-view.sql'
  1451  		fakeFileName := fmt.Sprintf("%s.%s-schema-view.sql", fakeDBName, fakeViewName)
  1452  		fakeFileContent := []byte(fmt.Sprintf("CREATE ALGORITHM=UNDEFINED VIEW `%s` (`i`) AS SELECT `i` FROM `%s`.`%s`;", fakeViewName, fakeDBName, fmt.Sprintf("tbl%d", i)))
  1453  		err = store.WriteFile(ctx, fakeFileName, fakeFileContent)
  1454  		c.Assert(err, IsNil)
  1455  	}
  1456  	config := config.NewConfig()
  1457  	config.Mydumper.DefaultFileRules = true
  1458  	config.Mydumper.CharacterSet = "utf8mb4"
  1459  	config.App.RegionConcurrency = 8
  1460  	mydumpLoader, err := mydump.NewMyDumpLoaderWithStore(ctx, config, store)
  1461  	c.Assert(err, IsNil)
  1462  	s.rc = &Controller{
  1463  		checkTemplate: NewSimpleTemplate(),
  1464  		cfg:           config,
  1465  		store:         store,
  1466  		dbMetas:       mydumpLoader.GetDatabases(),
  1467  		checkpointsDB: &checkpoints.NullCheckpointsDB{},
  1468  	}
  1469  }
  1470  
  1471  //nolint:interfacer // change test case signature might cause Check failed to find this test case?
  1472  func (s *restoreSchemaSuite) SetUpTest(c *C) {
  1473  	s.controller, s.ctx = gomock.WithContext(context.Background(), c)
  1474  	mockBackend := mock.NewMockBackend(s.controller)
  1475  	mockBackend.EXPECT().
  1476  		FetchRemoteTableModels(gomock.Any(), gomock.Any()).
  1477  		AnyTimes().
  1478  		Return(s.tableInfos, nil)
  1479  	mockBackend.EXPECT().Close()
  1480  	s.rc.backend = backend.MakeBackend(mockBackend)
  1481  
  1482  	mockDB, sqlMock, err := sqlmock.New()
  1483  	c.Assert(err, IsNil)
  1484  	for i := 0; i < 17; i++ {
  1485  		sqlMock.ExpectExec(".*").WillReturnResult(sqlmock.NewResult(int64(i), 1))
  1486  	}
  1487  	mockTiDBGlue := mock.NewMockGlue(s.controller)
  1488  	mockTiDBGlue.EXPECT().GetDB().AnyTimes().Return(mockDB, nil)
  1489  	mockTiDBGlue.EXPECT().
  1490  		OwnsSQLExecutor().
  1491  		AnyTimes().
  1492  		Return(true)
  1493  	parser := parser.New()
  1494  	mockTiDBGlue.EXPECT().
  1495  		GetParser().
  1496  		AnyTimes().
  1497  		Return(parser)
  1498  	s.rc.tidbGlue = mockTiDBGlue
  1499  }
  1500  
  1501  func (s *restoreSchemaSuite) TearDownTest(c *C) {
  1502  	s.rc.Close()
  1503  	s.controller.Finish()
  1504  }
  1505  
  1506  func (s *restoreSchemaSuite) TestRestoreSchemaSuccessful(c *C) {
  1507  	err := s.rc.restoreSchema(s.ctx)
  1508  	c.Assert(err, IsNil)
  1509  }
  1510  
  1511  func (s *restoreSchemaSuite) TestRestoreSchemaFailed(c *C) {
  1512  	injectErr := errors.New("Something wrong")
  1513  	mockSession := mock.NewMockSession(s.controller)
  1514  	mockSession.EXPECT().
  1515  		Close().
  1516  		AnyTimes().
  1517  		Return()
  1518  	mockSession.EXPECT().
  1519  		Execute(gomock.Any(), gomock.Any()).
  1520  		AnyTimes().
  1521  		Return(nil, injectErr)
  1522  	mockTiDBGlue := mock.NewMockGlue(s.controller)
  1523  	mockTiDBGlue.EXPECT().
  1524  		GetSession(gomock.Any()).
  1525  		AnyTimes().
  1526  		Return(mockSession, nil)
  1527  	s.rc.tidbGlue = mockTiDBGlue
  1528  	err := s.rc.restoreSchema(s.ctx)
  1529  	c.Assert(err, NotNil)
  1530  	c.Assert(errors.ErrorEqual(err, injectErr), IsTrue)
  1531  }
  1532  
  1533  // When restoring a CSV with `-no-schema` and the target table doesn't exist
  1534  // then we can't restore the schema as the `Path` is empty. This is to make
  1535  // sure this results in the correct error.
  1536  // https://github.com/pingcap/br/issues/1394
  1537  func (s *restoreSchemaSuite) TestNoSchemaPath(c *C) {
  1538  	fakeTable := mydump.MDTableMeta{
  1539  		DB:   "fakedb",
  1540  		Name: "fake1",
  1541  		SchemaFile: mydump.FileInfo{
  1542  			TableName: filter.Table{
  1543  				Schema: "fakedb",
  1544  				Name:   "fake1",
  1545  			},
  1546  			FileMeta: mydump.SourceFileMeta{
  1547  				Path: "",
  1548  			},
  1549  		},
  1550  		DataFiles: []mydump.FileInfo{},
  1551  		TotalSize: 0,
  1552  	}
  1553  	s.rc.dbMetas[0].Tables = append(s.rc.dbMetas[0].Tables, &fakeTable)
  1554  	err := s.rc.restoreSchema(s.ctx)
  1555  	c.Assert(err, NotNil)
  1556  	c.Assert(err, ErrorMatches, `table .* schema not found`)
  1557  	s.rc.dbMetas[0].Tables = s.rc.dbMetas[0].Tables[:len(s.rc.dbMetas[0].Tables)-1]
  1558  }
  1559  
  1560  func (s *restoreSchemaSuite) TestRestoreSchemaContextCancel(c *C) {
  1561  	childCtx, cancel := context.WithCancel(s.ctx)
  1562  	mockSession := mock.NewMockSession(s.controller)
  1563  	mockSession.EXPECT().
  1564  		Close().
  1565  		AnyTimes().
  1566  		Return()
  1567  	mockSession.EXPECT().
  1568  		Execute(gomock.Any(), gomock.Any()).
  1569  		AnyTimes().
  1570  		Do(func(context.Context, string) { cancel() }).
  1571  		Return(nil, nil)
  1572  	mockTiDBGlue := mock.NewMockGlue(s.controller)
  1573  	mockTiDBGlue.EXPECT().
  1574  		GetSession(gomock.Any()).
  1575  		AnyTimes().
  1576  		Return(mockSession, nil)
  1577  	s.rc.tidbGlue = mockTiDBGlue
  1578  	err := s.rc.restoreSchema(childCtx)
  1579  	cancel()
  1580  	c.Assert(err, NotNil)
  1581  	c.Assert(err, Equals, childCtx.Err())
  1582  }
  1583  
  1584  func (s *tableRestoreSuite) TestCheckClusterResource(c *C) {
  1585  	cases := []struct {
  1586  		mockStoreResponse   []byte
  1587  		mockReplicaResponse []byte
  1588  		expectMsg           string
  1589  		expectResult        bool
  1590  		expectErrorCount    int
  1591  	}{
  1592  		{
  1593  			[]byte(`{
  1594  				"count": 1,
  1595  				"stores": [
  1596  					{
  1597  						"store": {
  1598  							"id": 2
  1599  						},
  1600  						"status": {
  1601  							"capacity": "24"
  1602  						}
  1603  					}
  1604  				]
  1605  			}`),
  1606  			[]byte(`{
  1607  				"max-replicas": 1
  1608  			}`),
  1609  			"(.*)Cluster capacity is rich(.*)",
  1610  			true,
  1611  			0,
  1612  		},
  1613  		{
  1614  			[]byte(`{
  1615  				"count": 1,
  1616  				"stores": [
  1617  					{
  1618  						"store": {
  1619  							"id": 2
  1620  						},
  1621  						"status": {
  1622  							"capacity": "15"
  1623  						}
  1624  					}
  1625  				]
  1626  			}`),
  1627  			[]byte(`{
  1628  				"max-replicas": 1
  1629  			}`),
  1630  			"(.*)Cluster doesn't have enough space(.*)",
  1631  			false,
  1632  			1,
  1633  		},
  1634  	}
  1635  
  1636  	ctx := context.Background()
  1637  	dir := c.MkDir()
  1638  	file := filepath.Join(dir, "tmp")
  1639  	f, err := os.Create(file)
  1640  	c.Assert(err, IsNil)
  1641  	buf := make([]byte, 16)
  1642  	// write 16 bytes file into local storage
  1643  	for i := range buf {
  1644  		buf[i] = byte('A' + i)
  1645  	}
  1646  	_, err = f.Write(buf)
  1647  	c.Assert(err, IsNil)
  1648  	mockStore, err := storage.NewLocalStorage(dir)
  1649  	c.Assert(err, IsNil)
  1650  	for _, ca := range cases {
  1651  		server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
  1652  			var err error
  1653  			if strings.HasSuffix(req.URL.Path, "stores") {
  1654  				_, err = w.Write(ca.mockStoreResponse)
  1655  			} else {
  1656  				_, err = w.Write(ca.mockReplicaResponse)
  1657  			}
  1658  			c.Assert(err, IsNil)
  1659  		}))
  1660  
  1661  		tls := common.NewTLSFromMockServer(server)
  1662  		template := NewSimpleTemplate()
  1663  
  1664  		url := strings.TrimPrefix(server.URL, "https://")
  1665  		cfg := &config.Config{TiDB: config.DBStore{PdAddr: url}}
  1666  		rc := &Controller{cfg: cfg, tls: tls, store: mockStore, checkTemplate: template}
  1667  		var sourceSize int64
  1668  		err = rc.store.WalkDir(ctx, &storage.WalkOption{}, func(path string, size int64) error {
  1669  			sourceSize += size
  1670  			return nil
  1671  		})
  1672  		err = rc.ClusterResource(ctx, sourceSize)
  1673  		c.Assert(err, IsNil)
  1674  
  1675  		c.Assert(template.FailedCount(Critical), Equals, ca.expectErrorCount)
  1676  		c.Assert(template.Success(), Equals, ca.expectResult)
  1677  		c.Assert(strings.ReplaceAll(template.Output(), "\n", ""), Matches, ca.expectMsg)
  1678  
  1679  		server.Close()
  1680  	}
  1681  }
  1682  
  1683  func (s *tableRestoreSuite) TestCheckHasLargeCSV(c *C) {
  1684  	cases := []struct {
  1685  		strictFormat    bool
  1686  		expectMsg       string
  1687  		expectResult    bool
  1688  		expectWarnCount int
  1689  		dbMetas         []*mydump.MDDatabaseMeta
  1690  	}{
  1691  		{
  1692  			true,
  1693  			"(.*)Skip the csv size check, because config.StrictFormat is true(.*)",
  1694  			true,
  1695  			0,
  1696  			nil,
  1697  		},
  1698  		{
  1699  			false,
  1700  			"(.*)Source csv files size is proper(.*)",
  1701  			true,
  1702  			0,
  1703  			[]*mydump.MDDatabaseMeta{
  1704  				{
  1705  					Tables: []*mydump.MDTableMeta{
  1706  						{
  1707  							DataFiles: []mydump.FileInfo{
  1708  								{
  1709  									FileMeta: mydump.SourceFileMeta{
  1710  										FileSize: 1 * units.KiB,
  1711  									},
  1712  								},
  1713  							},
  1714  						},
  1715  					},
  1716  				},
  1717  			},
  1718  		},
  1719  		{
  1720  			false,
  1721  			"(.*)large csv: /testPath file exists(.*)",
  1722  			false,
  1723  			1,
  1724  			[]*mydump.MDDatabaseMeta{
  1725  				{
  1726  					Tables: []*mydump.MDTableMeta{
  1727  						{
  1728  							DataFiles: []mydump.FileInfo{
  1729  								{
  1730  									FileMeta: mydump.SourceFileMeta{
  1731  										FileSize: 1 * units.TiB,
  1732  										Path:     "/testPath",
  1733  									},
  1734  								},
  1735  							},
  1736  						},
  1737  					},
  1738  				},
  1739  			},
  1740  		},
  1741  	}
  1742  	dir := c.MkDir()
  1743  	mockStore, err := storage.NewLocalStorage(dir)
  1744  	c.Assert(err, IsNil)
  1745  
  1746  	for _, ca := range cases {
  1747  		template := NewSimpleTemplate()
  1748  		cfg := &config.Config{Mydumper: config.MydumperRuntime{StrictFormat: ca.strictFormat}}
  1749  		rc := &Controller{cfg: cfg, checkTemplate: template, store: mockStore}
  1750  		err := rc.HasLargeCSV(ca.dbMetas)
  1751  		c.Assert(err, IsNil)
  1752  		c.Assert(template.FailedCount(Warn), Equals, ca.expectWarnCount)
  1753  		c.Assert(template.Success(), Equals, ca.expectResult)
  1754  		c.Assert(strings.ReplaceAll(template.Output(), "\n", ""), Matches, ca.expectMsg)
  1755  	}
  1756  }
  1757  
  1758  func (s *tableRestoreSuite) TestSchemaIsValid(c *C) {
  1759  	dir := c.MkDir()
  1760  	ctx := context.Background()
  1761  
  1762  	case1File := "db1.table1.csv"
  1763  	mockStore, err := storage.NewLocalStorage(dir)
  1764  	c.Assert(err, IsNil)
  1765  	err = mockStore.WriteFile(ctx, case1File, []byte(`"a"`))
  1766  	c.Assert(err, IsNil)
  1767  
  1768  	case2File := "db1.table2.csv"
  1769  	err = mockStore.WriteFile(ctx, case2File, []byte("\"colA\",\"colB\"\n\"a\",\"b\""))
  1770  	c.Assert(err, IsNil)
  1771  
  1772  	cases := []struct {
  1773  		ignoreColumns []*config.IgnoreColumns
  1774  		expectMsg     string
  1775  		// MsgNum == 0 means the check passed.
  1776  		MsgNum    int
  1777  		hasHeader bool
  1778  		dbInfos   map[string]*checkpoints.TidbDBInfo
  1779  		tableMeta *mydump.MDTableMeta
  1780  	}{
  1781  		// Case 1:
  1782  		// csv has one column without header.
  1783  		// tidb has the two columns but the second column doesn't have the default value.
  1784  		// we expect the check failed.
  1785  		{
  1786  			nil,
  1787  			"TiDB schema `db1`.`table1` has 2 columns,and data file has 1 columns, but column colb are missing(.*)",
  1788  			1,
  1789  			false,
  1790  			map[string]*checkpoints.TidbDBInfo{
  1791  				"db1": {
  1792  					Name: "db1",
  1793  					Tables: map[string]*checkpoints.TidbTableInfo{
  1794  						"table1": {
  1795  							ID:   1,
  1796  							DB:   "db1",
  1797  							Name: "table1",
  1798  							Core: &model.TableInfo{
  1799  								Columns: []*model.ColumnInfo{
  1800  									{
  1801  										// colA has the default value
  1802  										Name:          model.NewCIStr("colA"),
  1803  										DefaultIsExpr: true,
  1804  									},
  1805  									{
  1806  										// colB doesn't have the default value
  1807  										Name: model.NewCIStr("colB"),
  1808  										FieldType: types.FieldType{
  1809  											// not null flag
  1810  											Flag: 1,
  1811  										},
  1812  									},
  1813  								},
  1814  							},
  1815  						},
  1816  					},
  1817  				},
  1818  			},
  1819  			&mydump.MDTableMeta{
  1820  				DB:   "db1",
  1821  				Name: "table1",
  1822  				DataFiles: []mydump.FileInfo{
  1823  					{
  1824  						FileMeta: mydump.SourceFileMeta{
  1825  							FileSize: 1 * units.TiB,
  1826  							Path:     case1File,
  1827  							Type:     mydump.SourceTypeCSV,
  1828  						},
  1829  					},
  1830  				},
  1831  			},
  1832  		},
  1833  		// Case 2.1:
  1834  		// csv has two columns(colA, colB) with the header.
  1835  		// tidb only has one column(colB).
  1836  		// we expect the check failed.
  1837  		{
  1838  			nil,
  1839  			"TiDB schema `db1`.`table2` doesn't have column cola,(.*)use tables.ignoreColumns to ignore(.*)",
  1840  			1,
  1841  			true,
  1842  			map[string]*checkpoints.TidbDBInfo{
  1843  				"db1": {
  1844  					Name: "db1",
  1845  					Tables: map[string]*checkpoints.TidbTableInfo{
  1846  						"table2": {
  1847  							ID:   1,
  1848  							DB:   "db1",
  1849  							Name: "table2",
  1850  							Core: &model.TableInfo{
  1851  								Columns: []*model.ColumnInfo{
  1852  									{
  1853  										// colB has the default value
  1854  										Name:          model.NewCIStr("colB"),
  1855  										DefaultIsExpr: true,
  1856  									},
  1857  								},
  1858  							},
  1859  						},
  1860  					},
  1861  				},
  1862  			},
  1863  			&mydump.MDTableMeta{
  1864  				DB:   "db1",
  1865  				Name: "table2",
  1866  				DataFiles: []mydump.FileInfo{
  1867  					{
  1868  						FileMeta: mydump.SourceFileMeta{
  1869  							FileSize: 1 * units.TiB,
  1870  							Path:     case2File,
  1871  							Type:     mydump.SourceTypeCSV,
  1872  						},
  1873  					},
  1874  				},
  1875  			},
  1876  		},
  1877  		// Case 2.2:
  1878  		// csv has two columns(colA, colB) with the header.
  1879  		// tidb only has one column(colB).
  1880  		// we ignore colA by set config tables.IgnoreColumns
  1881  		// we expect the check success.
  1882  		{
  1883  			[]*config.IgnoreColumns{
  1884  				{
  1885  					DB:      "db1",
  1886  					Table:   "table2",
  1887  					Columns: []string{"cola"},
  1888  				},
  1889  			},
  1890  			"",
  1891  			0,
  1892  			true,
  1893  			map[string]*checkpoints.TidbDBInfo{
  1894  				"db1": {
  1895  					Name: "db1",
  1896  					Tables: map[string]*checkpoints.TidbTableInfo{
  1897  						"table2": {
  1898  							ID:   1,
  1899  							DB:   "db1",
  1900  							Name: "table2",
  1901  							Core: &model.TableInfo{
  1902  								Columns: []*model.ColumnInfo{
  1903  									{
  1904  										// colB has the default value
  1905  										Name:          model.NewCIStr("colB"),
  1906  										DefaultIsExpr: true,
  1907  									},
  1908  								},
  1909  							},
  1910  						},
  1911  					},
  1912  				},
  1913  			},
  1914  			&mydump.MDTableMeta{
  1915  				DB:   "db1",
  1916  				Name: "table2",
  1917  				DataFiles: []mydump.FileInfo{
  1918  					{
  1919  						FileMeta: mydump.SourceFileMeta{
  1920  							FileSize: 1 * units.TiB,
  1921  							Path:     case2File,
  1922  							Type:     mydump.SourceTypeCSV,
  1923  						},
  1924  					},
  1925  				},
  1926  			},
  1927  		},
  1928  		// Case 2.3:
  1929  		// csv has two columns(colA, colB) with the header.
  1930  		// tidb has two columns(colB, colC).
  1931  		// we ignore colA by set config tables.IgnoreColumns
  1932  		// colC doesn't have the default value.
  1933  		// we expect the check failed.
  1934  		{
  1935  			[]*config.IgnoreColumns{
  1936  				{
  1937  					DB:      "db1",
  1938  					Table:   "table2",
  1939  					Columns: []string{"cola"},
  1940  				},
  1941  			},
  1942  			"TiDB schema `db1`.`table2` doesn't have the default value for colc(.*)",
  1943  			1,
  1944  			true,
  1945  			map[string]*checkpoints.TidbDBInfo{
  1946  				"db1": {
  1947  					Name: "db1",
  1948  					Tables: map[string]*checkpoints.TidbTableInfo{
  1949  						"table2": {
  1950  							ID:   1,
  1951  							DB:   "db1",
  1952  							Name: "table2",
  1953  							Core: &model.TableInfo{
  1954  								Columns: []*model.ColumnInfo{
  1955  									{
  1956  										// colB has the default value
  1957  										Name:          model.NewCIStr("colB"),
  1958  										DefaultIsExpr: true,
  1959  									},
  1960  									{
  1961  										// colC doesn't have the default value
  1962  										Name: model.NewCIStr("colC"),
  1963  										FieldType: types.FieldType{
  1964  											Flag: 1,
  1965  										},
  1966  									},
  1967  								},
  1968  							},
  1969  						},
  1970  					},
  1971  				},
  1972  			},
  1973  			&mydump.MDTableMeta{
  1974  				DB:   "db1",
  1975  				Name: "table2",
  1976  				DataFiles: []mydump.FileInfo{
  1977  					{
  1978  						FileMeta: mydump.SourceFileMeta{
  1979  							FileSize: 1 * units.TiB,
  1980  							Path:     case2File,
  1981  							Type:     mydump.SourceTypeCSV,
  1982  						},
  1983  					},
  1984  				},
  1985  			},
  1986  		},
  1987  		// Case 2.4:
  1988  		// csv has two columns(colA, colB) with the header.
  1989  		// tidb has two columns(colB, colC).
  1990  		// we ignore colB by set config tables.IgnoreColumns
  1991  		// colB doesn't have the default value.
  1992  		// we expect the check failed.
  1993  		{
  1994  			[]*config.IgnoreColumns{
  1995  				{
  1996  					TableFilter: []string{"`db1`.`table2`"},
  1997  					Columns:     []string{"colb"},
  1998  				},
  1999  			},
  2000  			"TiDB schema `db1`.`table2`'s column colb cannot be ignored(.*)",
  2001  			2,
  2002  			true,
  2003  			map[string]*checkpoints.TidbDBInfo{
  2004  				"db1": {
  2005  					Name: "db1",
  2006  					Tables: map[string]*checkpoints.TidbTableInfo{
  2007  						"table2": {
  2008  							ID:   1,
  2009  							DB:   "db1",
  2010  							Name: "table2",
  2011  							Core: &model.TableInfo{
  2012  								Columns: []*model.ColumnInfo{
  2013  									{
  2014  										// colB doesn't have the default value
  2015  										Name: model.NewCIStr("colB"),
  2016  										FieldType: types.FieldType{
  2017  											Flag: 1,
  2018  										},
  2019  									},
  2020  									{
  2021  										// colC has the default value
  2022  										Name:          model.NewCIStr("colC"),
  2023  										DefaultIsExpr: true,
  2024  									},
  2025  								},
  2026  							},
  2027  						},
  2028  					},
  2029  				},
  2030  			},
  2031  			&mydump.MDTableMeta{
  2032  				DB:   "db1",
  2033  				Name: "table2",
  2034  				DataFiles: []mydump.FileInfo{
  2035  					{
  2036  						FileMeta: mydump.SourceFileMeta{
  2037  							FileSize: 1 * units.TiB,
  2038  							Path:     case2File,
  2039  							Type:     mydump.SourceTypeCSV,
  2040  						},
  2041  					},
  2042  				},
  2043  			},
  2044  		},
  2045  		// Case 3:
  2046  		// table3's schema file not found.
  2047  		// tidb has no table3.
  2048  		// we expect the check failed.
  2049  		{
  2050  			[]*config.IgnoreColumns{
  2051  				{
  2052  					TableFilter: []string{"`db1`.`table2`"},
  2053  					Columns:     []string{"colb"},
  2054  				},
  2055  			},
  2056  			"TiDB schema `db1`.`table3` doesn't exists(.*)",
  2057  			1,
  2058  			true,
  2059  			map[string]*checkpoints.TidbDBInfo{
  2060  				"db1": {
  2061  					Name: "db1",
  2062  					Tables: map[string]*checkpoints.TidbTableInfo{
  2063  						"": {},
  2064  					},
  2065  				},
  2066  			},
  2067  			&mydump.MDTableMeta{
  2068  				DB:   "db1",
  2069  				Name: "table3",
  2070  				DataFiles: []mydump.FileInfo{
  2071  					{
  2072  						FileMeta: mydump.SourceFileMeta{
  2073  							FileSize: 1 * units.TiB,
  2074  							Path:     case2File,
  2075  							Type:     mydump.SourceTypeCSV,
  2076  						},
  2077  					},
  2078  				},
  2079  			},
  2080  		},
  2081  	}
  2082  
  2083  	for _, ca := range cases {
  2084  		template := NewSimpleTemplate()
  2085  		cfg := &config.Config{
  2086  			Mydumper: config.MydumperRuntime{
  2087  				ReadBlockSize: config.ReadBlockSize,
  2088  				CSV: config.CSVConfig{
  2089  					Separator:       ",",
  2090  					Delimiter:       `"`,
  2091  					Header:          ca.hasHeader,
  2092  					NotNull:         false,
  2093  					Null:            `\N`,
  2094  					BackslashEscape: true,
  2095  					TrimLastSep:     false,
  2096  				},
  2097  				IgnoreColumns: ca.ignoreColumns,
  2098  			},
  2099  		}
  2100  		rc := &Controller{
  2101  			cfg:           cfg,
  2102  			checkTemplate: template,
  2103  			store:         mockStore,
  2104  			dbInfos:       ca.dbInfos,
  2105  			ioWorkers:     worker.NewPool(context.Background(), 1, "io"),
  2106  		}
  2107  		msgs, err := rc.SchemaIsValid(ctx, ca.tableMeta)
  2108  		c.Assert(err, IsNil)
  2109  		c.Assert(msgs, HasLen, ca.MsgNum)
  2110  		if len(msgs) > 0 {
  2111  			c.Assert(msgs[0], Matches, ca.expectMsg)
  2112  		}
  2113  	}
  2114  }
  2115  
  2116  type testChecksumMgr struct {
  2117  	checksum RemoteChecksum
  2118  	callCnt  int
  2119  }
  2120  
  2121  func (t *testChecksumMgr) Checksum(ctx context.Context, tableInfo *checkpoints.TidbTableInfo) (*RemoteChecksum, error) {
  2122  	t.callCnt++
  2123  	return &t.checksum, nil
  2124  }