github.com/pingcap/tidb-lightning@v5.0.0-rc.0.20210428090220-84b649866577+incompatible/lightning/restore/restore_test.go (about)

     1  // Copyright 2019 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package restore
    15  
    16  import (
    17  	"context"
    18  	"fmt"
    19  	"io/ioutil"
    20  	"path/filepath"
    21  	"sort"
    22  
    23  	"github.com/DATA-DOG/go-sqlmock"
    24  	"github.com/golang/mock/gomock"
    25  	"github.com/google/uuid"
    26  	"github.com/pingcap/br/pkg/storage"
    27  	. "github.com/pingcap/check"
    28  	"github.com/pingcap/errors"
    29  	"github.com/pingcap/failpoint"
    30  	"github.com/pingcap/kvproto/pkg/import_kvpb"
    31  	"github.com/pingcap/parser"
    32  	"github.com/pingcap/parser/ast"
    33  	"github.com/pingcap/parser/model"
    34  	"github.com/pingcap/parser/mysql"
    35  	"github.com/pingcap/tidb-lightning/lightning/glue"
    36  	filter "github.com/pingcap/tidb-tools/pkg/table-filter"
    37  	"github.com/pingcap/tidb/ddl"
    38  	tmock "github.com/pingcap/tidb/util/mock"
    39  
    40  	kv "github.com/pingcap/tidb-lightning/lightning/backend"
    41  	"github.com/pingcap/tidb-lightning/lightning/checkpoints"
    42  	. "github.com/pingcap/tidb-lightning/lightning/checkpoints"
    43  	"github.com/pingcap/tidb-lightning/lightning/common"
    44  	"github.com/pingcap/tidb-lightning/lightning/config"
    45  	"github.com/pingcap/tidb-lightning/lightning/log"
    46  	"github.com/pingcap/tidb-lightning/lightning/mydump"
    47  	"github.com/pingcap/tidb-lightning/lightning/verification"
    48  	"github.com/pingcap/tidb-lightning/lightning/worker"
    49  	"github.com/pingcap/tidb-lightning/mock"
    50  )
    51  
    52  var _ = Suite(&restoreSuite{})
    53  
    54  type restoreSuite struct{}
    55  
    56  func (s *restoreSuite) TestNewTableRestore(c *C) {
    57  	testCases := []struct {
    58  		name       string
    59  		createStmt string
    60  	}{
    61  		{"t1", "CREATE TABLE `t1` (`c1` varchar(5) NOT NULL)"},
    62  		// {"t2", "CREATE TABLE `t2` (`c1` varchar(30000) NOT NULL)"}, // no longer able to create this kind of table.
    63  		{"t3", "CREATE TABLE `t3-a` (`c1-a` varchar(5) NOT NULL)"},
    64  	}
    65  
    66  	p := parser.New()
    67  	se := tmock.NewContext()
    68  
    69  	dbInfo := &TidbDBInfo{Name: "mockdb", Tables: map[string]*TidbTableInfo{}}
    70  	for i, tc := range testCases {
    71  		node, err := p.ParseOneStmt(tc.createStmt, "utf8mb4", "utf8mb4_bin")
    72  		c.Assert(err, IsNil)
    73  		tableInfo, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), int64(i+1))
    74  		c.Assert(err, IsNil)
    75  		tableInfo.State = model.StatePublic
    76  
    77  		dbInfo.Tables[tc.name] = &TidbTableInfo{
    78  			Name: tc.name,
    79  			Core: tableInfo,
    80  		}
    81  	}
    82  
    83  	for _, tc := range testCases {
    84  		tableInfo := dbInfo.Tables[tc.name]
    85  		tableName := common.UniqueTable("mockdb", tableInfo.Name)
    86  		tr, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &TableCheckpoint{})
    87  		c.Assert(tr, NotNil)
    88  		c.Assert(err, IsNil)
    89  	}
    90  }
    91  
    92  func (s *restoreSuite) TestNewTableRestoreFailure(c *C) {
    93  	tableInfo := &TidbTableInfo{
    94  		Name: "failure",
    95  		Core: &model.TableInfo{},
    96  	}
    97  	dbInfo := &TidbDBInfo{Name: "mockdb", Tables: map[string]*TidbTableInfo{
    98  		"failure": tableInfo,
    99  	}}
   100  	tableName := common.UniqueTable("mockdb", "failure")
   101  
   102  	_, err := NewTableRestore(tableName, nil, dbInfo, tableInfo, &TableCheckpoint{})
   103  	c.Assert(err, ErrorMatches, `failed to tables\.TableFromMeta.*`)
   104  }
   105  
   106  func (s *restoreSuite) TestErrorSummaries(c *C) {
   107  	logger, buffer := log.MakeTestLogger()
   108  
   109  	es := makeErrorSummaries(logger)
   110  	es.record("first", errors.New("a1 error"), CheckpointStatusAnalyzed)
   111  	es.record("second", errors.New("b2 error"), CheckpointStatusAllWritten)
   112  	es.emitLog()
   113  
   114  	lines := buffer.Lines()
   115  	sort.Strings(lines[1:])
   116  	c.Assert(lines, DeepEquals, []string{
   117  		`{"$lvl":"ERROR","$msg":"tables failed to be imported","count":2}`,
   118  		`{"$lvl":"ERROR","$msg":"-","table":"first","status":"analyzed","error":"a1 error"}`,
   119  		`{"$lvl":"ERROR","$msg":"-","table":"second","status":"written","error":"b2 error"}`,
   120  	})
   121  }
   122  
   123  func (s *restoreSuite) TestVerifyCheckpoint(c *C) {
   124  	dir := c.MkDir()
   125  	cpdb := checkpoints.NewFileCheckpointsDB(filepath.Join(dir, "cp.pb"))
   126  	defer cpdb.Close()
   127  	ctx := context.Background()
   128  
   129  	actualReleaseVersion := common.ReleaseVersion
   130  	defer func() {
   131  		common.ReleaseVersion = actualReleaseVersion
   132  	}()
   133  
   134  	taskCp, err := cpdb.TaskCheckpoint(ctx)
   135  	c.Assert(err, IsNil)
   136  	c.Assert(taskCp, IsNil)
   137  
   138  	newCfg := func() *config.Config {
   139  		cfg := config.NewConfig()
   140  		cfg.Mydumper.SourceDir = "/data"
   141  		cfg.TaskID = 123
   142  		cfg.TiDB.Port = 4000
   143  		cfg.TiDB.PdAddr = "127.0.0.1:2379"
   144  		cfg.TikvImporter.Addr = "127.0.0.1:8287"
   145  		cfg.TikvImporter.SortedKVDir = "/tmp/sorted-kv"
   146  
   147  		return cfg
   148  	}
   149  
   150  	err = cpdb.Initialize(ctx, newCfg(), map[string]*checkpoints.TidbDBInfo{})
   151  	c.Assert(err, IsNil)
   152  
   153  	adjustFuncs := map[string]func(cfg *config.Config){
   154  		"tikv-importer.backend": func(cfg *config.Config) {
   155  			cfg.TikvImporter.Backend = "local"
   156  		},
   157  		"tikv-importer.addr": func(cfg *config.Config) {
   158  			cfg.TikvImporter.Addr = "128.0.0.1:8287"
   159  		},
   160  		"mydumper.data-source-dir": func(cfg *config.Config) {
   161  			cfg.Mydumper.SourceDir = "/tmp/test"
   162  		},
   163  		"tidb.host": func(cfg *config.Config) {
   164  			cfg.TiDB.Host = "192.168.0.1"
   165  		},
   166  		"tidb.port": func(cfg *config.Config) {
   167  			cfg.TiDB.Port = 5000
   168  		},
   169  		"tidb.pd-addr": func(cfg *config.Config) {
   170  			cfg.TiDB.PdAddr = "127.0.0.1:3379"
   171  		},
   172  		"version": func(cfg *config.Config) {
   173  			common.ReleaseVersion = "some newer version"
   174  		},
   175  	}
   176  
   177  	// default mode, will return error
   178  	taskCp, err = cpdb.TaskCheckpoint(ctx)
   179  	c.Assert(err, IsNil)
   180  	for conf, fn := range adjustFuncs {
   181  		cfg := newCfg()
   182  		fn(cfg)
   183  		err := verifyCheckpoint(cfg, taskCp)
   184  		if conf == "version" {
   185  			common.ReleaseVersion = actualReleaseVersion
   186  			c.Assert(err, ErrorMatches, "lightning version is 'some newer version', but checkpoint was created at '"+actualReleaseVersion+"'.*")
   187  		} else {
   188  			c.Assert(err, ErrorMatches, fmt.Sprintf("config '%s' value '.*' different from checkpoint value .*", conf))
   189  		}
   190  	}
   191  
   192  	for conf, fn := range adjustFuncs {
   193  		if conf == "tikv-importer.backend" {
   194  			continue
   195  		}
   196  		cfg := newCfg()
   197  		cfg.App.CheckRequirements = false
   198  		fn(cfg)
   199  		err := cpdb.Initialize(context.Background(), cfg, map[string]*checkpoints.TidbDBInfo{})
   200  		c.Assert(err, IsNil)
   201  	}
   202  }
   203  
   204  var _ = Suite(&tableRestoreSuite{})
   205  
   206  type tableRestoreSuiteBase struct {
   207  	tr  *TableRestore
   208  	cfg *config.Config
   209  
   210  	tableInfo *TidbTableInfo
   211  	dbInfo    *TidbDBInfo
   212  	tableMeta *mydump.MDTableMeta
   213  
   214  	store storage.ExternalStorage
   215  }
   216  
   217  type tableRestoreSuite struct {
   218  	tableRestoreSuiteBase
   219  }
   220  
   221  func (s *tableRestoreSuiteBase) SetUpSuite(c *C) {
   222  	// Produce a mock table info
   223  
   224  	p := parser.New()
   225  	p.SetSQLMode(mysql.ModeANSIQuotes)
   226  	se := tmock.NewContext()
   227  	node, err := p.ParseOneStmt(`
   228  		CREATE TABLE "table" (
   229  			a INT,
   230  			b INT,
   231  			c INT,
   232  			KEY (b)
   233  		)
   234  	`, "", "")
   235  	c.Assert(err, IsNil)
   236  	core, err := ddl.MockTableInfo(se, node.(*ast.CreateTableStmt), 0xabcdef)
   237  	c.Assert(err, IsNil)
   238  	core.State = model.StatePublic
   239  
   240  	s.tableInfo = &TidbTableInfo{Name: "table", DB: "db", Core: core}
   241  	s.dbInfo = &TidbDBInfo{
   242  		Name:   "db",
   243  		Tables: map[string]*TidbTableInfo{"table": s.tableInfo},
   244  	}
   245  
   246  	// Write some sample SQL dump
   247  
   248  	fakeDataDir := c.MkDir()
   249  
   250  	store, err := storage.NewLocalStorage(fakeDataDir)
   251  	c.Assert(err, IsNil)
   252  	s.store = store
   253  
   254  	fakeDataFilesCount := 6
   255  	fakeDataFilesContent := []byte("INSERT INTO `table` VALUES (1, 2, 3);")
   256  	c.Assert(len(fakeDataFilesContent), Equals, 37)
   257  	fakeDataFiles := make([]mydump.FileInfo, 0, fakeDataFilesCount)
   258  	for i := 1; i <= fakeDataFilesCount; i++ {
   259  		fakeFileName := fmt.Sprintf("db.table.%d.sql", i)
   260  		fakeDataPath := filepath.Join(fakeDataDir, fakeFileName)
   261  		err = ioutil.WriteFile(fakeDataPath, fakeDataFilesContent, 0644)
   262  		c.Assert(err, IsNil)
   263  		fakeDataFiles = append(fakeDataFiles, mydump.FileInfo{TableName: filter.Table{"db", "table"}, FileMeta: mydump.SourceFileMeta{Path: fakeFileName, Type: mydump.SourceTypeSQL, SortKey: fmt.Sprintf("%d", i), FileSize: 37}})
   264  	}
   265  
   266  	fakeCsvContent := []byte("1,2,3\r\n4,5,6\r\n")
   267  	csvName := "db.table.99.csv"
   268  	err = ioutil.WriteFile(filepath.Join(fakeDataDir, csvName), fakeCsvContent, 0644)
   269  	c.Assert(err, IsNil)
   270  	fakeDataFiles = append(fakeDataFiles, mydump.FileInfo{TableName: filter.Table{"db", "table"}, FileMeta: mydump.SourceFileMeta{Path: csvName, Type: mydump.SourceTypeCSV, SortKey: "99", FileSize: 14}})
   271  
   272  	s.tableMeta = &mydump.MDTableMeta{
   273  		DB:         "db",
   274  		Name:       "table",
   275  		TotalSize:  222,
   276  		SchemaFile: mydump.FileInfo{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: mydump.SourceFileMeta{Path: "db.table-schema.sql", Type: mydump.SourceTypeTableSchema}},
   277  		DataFiles:  fakeDataFiles,
   278  	}
   279  }
   280  
   281  func (s *tableRestoreSuiteBase) SetUpTest(c *C) {
   282  	// Collect into the test TableRestore structure
   283  	var err error
   284  	s.tr, err = NewTableRestore("`db`.`table`", s.tableMeta, s.dbInfo, s.tableInfo, &TableCheckpoint{})
   285  	c.Assert(err, IsNil)
   286  
   287  	s.cfg = config.NewConfig()
   288  	s.cfg.Mydumper.BatchSize = 111
   289  	s.cfg.App.TableConcurrency = 2
   290  }
   291  
   292  func (s *tableRestoreSuite) TestPopulateChunks(c *C) {
   293  	failpoint.Enable("github.com/pingcap/tidb-lightning/lightning/restore/PopulateChunkTimestamp", "return(1234567897)")
   294  	defer failpoint.Disable("github.com/pingcap/tidb-lightning/lightning/restore/PopulateChunkTimestamp")
   295  
   296  	cp := &TableCheckpoint{
   297  		Engines: make(map[int32]*EngineCheckpoint),
   298  	}
   299  
   300  	rc := &RestoreController{cfg: s.cfg, ioWorkers: worker.NewPool(context.Background(), 1, "io"), store: s.store}
   301  	err := s.tr.populateChunks(context.Background(), rc, cp)
   302  	c.Assert(err, IsNil)
   303  	c.Assert(cp.Engines, DeepEquals, map[int32]*EngineCheckpoint{
   304  		-1: {
   305  			Status: CheckpointStatusLoaded,
   306  		},
   307  		0: {
   308  			Status: CheckpointStatusLoaded,
   309  			Chunks: []*ChunkCheckpoint{
   310  				{
   311  					Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[0].FileMeta.Path, Offset: 0},
   312  					FileMeta: s.tr.tableMeta.DataFiles[0].FileMeta,
   313  					Chunk: mydump.Chunk{
   314  						Offset:       0,
   315  						EndOffset:    37,
   316  						PrevRowIDMax: 0,
   317  						RowIDMax:     7, // 37 bytes with 3 columns can store at most 7 rows.
   318  					},
   319  					Timestamp: 1234567897,
   320  				},
   321  				{
   322  					Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[1].FileMeta.Path, Offset: 0},
   323  					FileMeta: s.tr.tableMeta.DataFiles[1].FileMeta,
   324  					Chunk: mydump.Chunk{
   325  						Offset:       0,
   326  						EndOffset:    37,
   327  						PrevRowIDMax: 7,
   328  						RowIDMax:     14,
   329  					},
   330  					Timestamp: 1234567897,
   331  				},
   332  				{
   333  					Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[2].FileMeta.Path, Offset: 0},
   334  					FileMeta: s.tr.tableMeta.DataFiles[2].FileMeta,
   335  					Chunk: mydump.Chunk{
   336  						Offset:       0,
   337  						EndOffset:    37,
   338  						PrevRowIDMax: 14,
   339  						RowIDMax:     21,
   340  					},
   341  					Timestamp: 1234567897,
   342  				},
   343  			},
   344  		},
   345  		1: {
   346  			Status: CheckpointStatusLoaded,
   347  			Chunks: []*ChunkCheckpoint{
   348  				{
   349  					Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[3].FileMeta.Path, Offset: 0},
   350  					FileMeta: s.tr.tableMeta.DataFiles[3].FileMeta,
   351  					Chunk: mydump.Chunk{
   352  						Offset:       0,
   353  						EndOffset:    37,
   354  						PrevRowIDMax: 21,
   355  						RowIDMax:     28,
   356  					},
   357  					Timestamp: 1234567897,
   358  				},
   359  				{
   360  					Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[4].FileMeta.Path, Offset: 0},
   361  					FileMeta: s.tr.tableMeta.DataFiles[4].FileMeta,
   362  					Chunk: mydump.Chunk{
   363  						Offset:       0,
   364  						EndOffset:    37,
   365  						PrevRowIDMax: 28,
   366  						RowIDMax:     35,
   367  					},
   368  					Timestamp: 1234567897,
   369  				},
   370  				{
   371  					Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[5].FileMeta.Path, Offset: 0},
   372  					FileMeta: s.tr.tableMeta.DataFiles[5].FileMeta,
   373  					Chunk: mydump.Chunk{
   374  						Offset:       0,
   375  						EndOffset:    37,
   376  						PrevRowIDMax: 35,
   377  						RowIDMax:     42,
   378  					},
   379  					Timestamp: 1234567897,
   380  				},
   381  			},
   382  		},
   383  		2: {
   384  			Status: CheckpointStatusLoaded,
   385  			Chunks: []*ChunkCheckpoint{
   386  				{
   387  					Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[6].FileMeta.Path, Offset: 0},
   388  					FileMeta: s.tr.tableMeta.DataFiles[6].FileMeta,
   389  					Chunk: mydump.Chunk{
   390  						Offset:       0,
   391  						EndOffset:    14,
   392  						PrevRowIDMax: 42,
   393  						RowIDMax:     46,
   394  					},
   395  					Timestamp: 1234567897,
   396  				},
   397  			},
   398  		},
   399  	})
   400  
   401  	// set csv header to true, this will cause check columns fail
   402  	s.cfg.Mydumper.CSV.Header = true
   403  	s.cfg.Mydumper.StrictFormat = true
   404  	regionSize := s.cfg.Mydumper.MaxRegionSize
   405  	s.cfg.Mydumper.MaxRegionSize = 5
   406  	err = s.tr.populateChunks(context.Background(), rc, cp)
   407  	c.Assert(err, NotNil)
   408  	c.Assert(err, ErrorMatches, `.*unknown columns in header \[1 2 3\]`)
   409  	s.cfg.Mydumper.MaxRegionSize = regionSize
   410  	s.cfg.Mydumper.CSV.Header = false
   411  }
   412  
   413  func (s *tableRestoreSuite) TestPopulateChunksCSVHeader(c *C) {
   414  	fakeDataDir := c.MkDir()
   415  	store, err := storage.NewLocalStorage(fakeDataDir)
   416  	c.Assert(err, IsNil)
   417  
   418  	fakeDataFiles := make([]mydump.FileInfo, 0)
   419  
   420  	fakeCsvContents := []string{
   421  		// small full header
   422  		"a,b,c\r\n1,2,3\r\n",
   423  		// small partial header
   424  		"b,c\r\n2,3\r\n",
   425  		// big full header
   426  		"a,b,c\r\n90000,80000,700000\r\n1000,2000,3000\r\n11,22,33\r\n3,4,5\r\n",
   427  		// big full header unordered
   428  		"c,a,b\r\n,1000,2000,3000\r\n11,22,33\r\n1000,2000,404\r\n3,4,5\r\n90000,80000,700000\r\n7999999,89999999,9999999\r\n",
   429  		// big partial header
   430  		"b,c\r\n2000001,30000001\r\n35231616,462424626\r\n62432,434898934\r\n",
   431  	}
   432  	total := 0
   433  	for i, s := range fakeCsvContents {
   434  		csvName := fmt.Sprintf("db.table.%02d.csv", i)
   435  		err := ioutil.WriteFile(filepath.Join(fakeDataDir, csvName), []byte(s), 0644)
   436  		c.Assert(err, IsNil)
   437  		fakeDataFiles = append(fakeDataFiles, mydump.FileInfo{
   438  			TableName: filter.Table{"db", "table"},
   439  			FileMeta:  mydump.SourceFileMeta{Path: csvName, Type: mydump.SourceTypeCSV, SortKey: fmt.Sprintf("%02d", i), FileSize: int64(len(s))},
   440  		})
   441  		total += len(s)
   442  	}
   443  	tableMeta := &mydump.MDTableMeta{
   444  		DB:         "db",
   445  		Name:       "table",
   446  		TotalSize:  int64(total),
   447  		SchemaFile: mydump.FileInfo{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: mydump.SourceFileMeta{Path: "db.table-schema.sql", Type: mydump.SourceTypeTableSchema}},
   448  		DataFiles:  fakeDataFiles,
   449  	}
   450  
   451  	failpoint.Enable("github.com/pingcap/tidb-lightning/lightning/restore/PopulateChunkTimestamp", "return(1234567897)")
   452  	defer failpoint.Disable("github.com/pingcap/tidb-lightning/lightning/restore/PopulateChunkTimestamp")
   453  
   454  	cp := &TableCheckpoint{
   455  		Engines: make(map[int32]*EngineCheckpoint),
   456  	}
   457  
   458  	cfg := config.NewConfig()
   459  	cfg.Mydumper.BatchSize = 100
   460  	cfg.Mydumper.MaxRegionSize = 40
   461  
   462  	cfg.Mydumper.CSV.Header = true
   463  	cfg.Mydumper.StrictFormat = true
   464  	rc := &RestoreController{cfg: cfg, ioWorkers: worker.NewPool(context.Background(), 1, "io"), store: store}
   465  
   466  	tr, err := NewTableRestore("`db`.`table`", tableMeta, s.dbInfo, s.tableInfo, &TableCheckpoint{})
   467  	c.Assert(err, IsNil)
   468  	c.Assert(tr.populateChunks(context.Background(), rc, cp), IsNil)
   469  
   470  	c.Assert(cp.Engines, DeepEquals, map[int32]*EngineCheckpoint{
   471  		-1: {
   472  			Status: CheckpointStatusLoaded,
   473  		},
   474  		0: {
   475  			Status: CheckpointStatusLoaded,
   476  			Chunks: []*ChunkCheckpoint{
   477  				{
   478  					Key:      ChunkCheckpointKey{Path: tableMeta.DataFiles[0].FileMeta.Path, Offset: 0},
   479  					FileMeta: tableMeta.DataFiles[0].FileMeta,
   480  					Chunk: mydump.Chunk{
   481  						Offset:       0,
   482  						EndOffset:    14,
   483  						PrevRowIDMax: 0,
   484  						RowIDMax:     4, // 37 bytes with 3 columns can store at most 7 rows.
   485  					},
   486  					Timestamp: 1234567897,
   487  				},
   488  				{
   489  					Key:      ChunkCheckpointKey{Path: tableMeta.DataFiles[1].FileMeta.Path, Offset: 0},
   490  					FileMeta: tableMeta.DataFiles[1].FileMeta,
   491  					Chunk: mydump.Chunk{
   492  						Offset:       0,
   493  						EndOffset:    10,
   494  						PrevRowIDMax: 4,
   495  						RowIDMax:     7,
   496  					},
   497  					Timestamp: 1234567897,
   498  				},
   499  				{
   500  					Key:               ChunkCheckpointKey{Path: tableMeta.DataFiles[2].FileMeta.Path, Offset: 6},
   501  					FileMeta:          tableMeta.DataFiles[2].FileMeta,
   502  					ColumnPermutation: []int{0, 1, 2, -1},
   503  					Chunk: mydump.Chunk{
   504  						Offset:       6,
   505  						EndOffset:    52,
   506  						PrevRowIDMax: 7,
   507  						RowIDMax:     20,
   508  						Columns:      []string{"a", "b", "c"},
   509  					},
   510  
   511  					Timestamp: 1234567897,
   512  				},
   513  				{
   514  					Key:               ChunkCheckpointKey{Path: tableMeta.DataFiles[2].FileMeta.Path, Offset: 52},
   515  					FileMeta:          tableMeta.DataFiles[2].FileMeta,
   516  					ColumnPermutation: []int{0, 1, 2, -1},
   517  					Chunk: mydump.Chunk{
   518  						Offset:       52,
   519  						EndOffset:    60,
   520  						PrevRowIDMax: 20,
   521  						RowIDMax:     22,
   522  						Columns:      []string{"a", "b", "c"},
   523  					},
   524  					Timestamp: 1234567897,
   525  				},
   526  				{
   527  					Key:               ChunkCheckpointKey{Path: tableMeta.DataFiles[3].FileMeta.Path, Offset: 6},
   528  					FileMeta:          tableMeta.DataFiles[3].FileMeta,
   529  					ColumnPermutation: []int{1, 2, 0, -1},
   530  					Chunk: mydump.Chunk{
   531  						Offset:       6,
   532  						EndOffset:    48,
   533  						PrevRowIDMax: 22,
   534  						RowIDMax:     35,
   535  						Columns:      []string{"c", "a", "b"},
   536  					},
   537  					Timestamp: 1234567897,
   538  				},
   539  			},
   540  		},
   541  		1: {
   542  			Status: CheckpointStatusLoaded,
   543  			Chunks: []*ChunkCheckpoint{
   544  				{
   545  					Key:               ChunkCheckpointKey{Path: tableMeta.DataFiles[3].FileMeta.Path, Offset: 48},
   546  					FileMeta:          tableMeta.DataFiles[3].FileMeta,
   547  					ColumnPermutation: []int{1, 2, 0, -1},
   548  					Chunk: mydump.Chunk{
   549  						Offset:       48,
   550  						EndOffset:    101,
   551  						PrevRowIDMax: 35,
   552  						RowIDMax:     48,
   553  						Columns:      []string{"c", "a", "b"},
   554  					},
   555  					Timestamp: 1234567897,
   556  				},
   557  				{
   558  					Key:               ChunkCheckpointKey{Path: tableMeta.DataFiles[3].FileMeta.Path, Offset: 101},
   559  					FileMeta:          tableMeta.DataFiles[3].FileMeta,
   560  					ColumnPermutation: []int{1, 2, 0, -1},
   561  					Chunk: mydump.Chunk{
   562  						Offset:       101,
   563  						EndOffset:    102,
   564  						PrevRowIDMax: 48,
   565  						RowIDMax:     48,
   566  						Columns:      []string{"c", "a", "b"},
   567  					},
   568  					Timestamp: 1234567897,
   569  				},
   570  				{
   571  					Key:               ChunkCheckpointKey{Path: tableMeta.DataFiles[4].FileMeta.Path, Offset: 4},
   572  					FileMeta:          tableMeta.DataFiles[4].FileMeta,
   573  					ColumnPermutation: []int{-1, 0, 1, -1},
   574  					Chunk: mydump.Chunk{
   575  						Offset:       4,
   576  						EndOffset:    59,
   577  						PrevRowIDMax: 48,
   578  						RowIDMax:     61,
   579  						Columns:      []string{"b", "c"},
   580  					},
   581  					Timestamp: 1234567897,
   582  				},
   583  			},
   584  		},
   585  		2: {
   586  			Status: CheckpointStatusLoaded,
   587  			Chunks: []*ChunkCheckpoint{
   588  				{
   589  					Key:               ChunkCheckpointKey{Path: tableMeta.DataFiles[4].FileMeta.Path, Offset: 59},
   590  					FileMeta:          tableMeta.DataFiles[4].FileMeta,
   591  					ColumnPermutation: []int{-1, 0, 1, -1},
   592  					Chunk: mydump.Chunk{
   593  						Offset:       59,
   594  						EndOffset:    60,
   595  						PrevRowIDMax: 61,
   596  						RowIDMax:     61,
   597  						Columns:      []string{"b", "c"},
   598  					},
   599  					Timestamp: 1234567897,
   600  				},
   601  			},
   602  		},
   603  	})
   604  }
   605  
   606  func (s *tableRestoreSuite) TestGetColumnsNames(c *C) {
   607  	c.Assert(getColumnNames(s.tableInfo.Core, []int{0, 1, 2, -1}), DeepEquals, []string{"a", "b", "c"})
   608  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, 0, 2, -1}), DeepEquals, []string{"b", "a", "c"})
   609  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 0, 1, -1}), DeepEquals, []string{"b", "c"})
   610  	c.Assert(getColumnNames(s.tableInfo.Core, []int{0, 1, -1, -1}), DeepEquals, []string{"a", "b"})
   611  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, -1, 0, -1}), DeepEquals, []string{"c", "a"})
   612  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 0, -1, -1}), DeepEquals, []string{"b"})
   613  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, 2, 3, 0}), DeepEquals, []string{"_tidb_rowid", "a", "b", "c"})
   614  	c.Assert(getColumnNames(s.tableInfo.Core, []int{1, 0, 2, 3}), DeepEquals, []string{"b", "a", "c", "_tidb_rowid"})
   615  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 0, 2, 1}), DeepEquals, []string{"b", "_tidb_rowid", "c"})
   616  	c.Assert(getColumnNames(s.tableInfo.Core, []int{2, -1, 0, 1}), DeepEquals, []string{"c", "_tidb_rowid", "a"})
   617  	c.Assert(getColumnNames(s.tableInfo.Core, []int{-1, 1, -1, 0}), DeepEquals, []string{"_tidb_rowid", "b"})
   618  }
   619  
   620  func (s *tableRestoreSuite) TestInitializeColumns(c *C) {
   621  	ccp := &ChunkCheckpoint{}
   622  	c.Assert(s.tr.initializeColumns(nil, ccp), IsNil)
   623  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{0, 1, 2, -1})
   624  
   625  	ccp.ColumnPermutation = nil
   626  	c.Assert(s.tr.initializeColumns([]string{"b", "c", "a"}, ccp), IsNil)
   627  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{2, 0, 1, -1})
   628  
   629  	ccp.ColumnPermutation = nil
   630  	c.Assert(s.tr.initializeColumns([]string{"b"}, ccp), IsNil)
   631  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{-1, 0, -1, -1})
   632  
   633  	ccp.ColumnPermutation = nil
   634  	c.Assert(s.tr.initializeColumns([]string{"_tidb_rowid", "b", "a", "c"}, ccp), IsNil)
   635  	c.Assert(ccp.ColumnPermutation, DeepEquals, []int{2, 1, 3, 0})
   636  
   637  	ccp.ColumnPermutation = nil
   638  	err := s.tr.initializeColumns([]string{"_tidb_rowid", "b", "a", "c", "d"}, ccp)
   639  	c.Assert(err, NotNil)
   640  	c.Assert(err, ErrorMatches, `unknown columns in header \[d\]`)
   641  
   642  	ccp.ColumnPermutation = nil
   643  	err = s.tr.initializeColumns([]string{"e", "b", "c", "d"}, ccp)
   644  	c.Assert(err, NotNil)
   645  	c.Assert(err, ErrorMatches, `unknown columns in header \[e d\]`)
   646  }
   647  
   648  func (s *tableRestoreSuite) TestCompareChecksumSuccess(c *C) {
   649  	db, mock, err := sqlmock.New()
   650  	c.Assert(err, IsNil)
   651  
   652  	mock.ExpectQuery("SELECT.*tikv_gc_life_time.*").
   653  		WillReturnRows(sqlmock.NewRows([]string{"VARIABLE_VALUE"}).AddRow("10m"))
   654  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   655  		WithArgs("100h0m0s").
   656  		WillReturnResult(sqlmock.NewResult(1, 1))
   657  	mock.ExpectQuery("ADMIN CHECKSUM.*").
   658  		WillReturnRows(
   659  			sqlmock.NewRows([]string{"Db_name", "Table_name", "Checksum_crc64_xor", "Total_kvs", "Total_bytes"}).
   660  				AddRow("db", "table", 1234567890, 12345, 1234567),
   661  		)
   662  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   663  		WithArgs("10m").
   664  		WillReturnResult(sqlmock.NewResult(2, 1))
   665  	mock.ExpectClose()
   666  
   667  	ctx := MockDoChecksumCtx(db)
   668  	err = s.tr.compareChecksum(ctx, verification.MakeKVChecksum(1234567, 12345, 1234567890))
   669  	c.Assert(err, IsNil)
   670  
   671  	c.Assert(db.Close(), IsNil)
   672  	c.Assert(mock.ExpectationsWereMet(), IsNil)
   673  
   674  }
   675  
   676  func (s *tableRestoreSuite) TestCompareChecksumFailure(c *C) {
   677  	db, mock, err := sqlmock.New()
   678  	c.Assert(err, IsNil)
   679  
   680  	mock.ExpectQuery("SELECT.*tikv_gc_life_time.*").
   681  		WillReturnRows(sqlmock.NewRows([]string{"VARIABLE_VALUE"}).AddRow("10m"))
   682  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   683  		WithArgs("100h0m0s").
   684  		WillReturnResult(sqlmock.NewResult(1, 1))
   685  	mock.ExpectQuery("ADMIN CHECKSUM TABLE `db`\\.`table`").
   686  		WillReturnRows(
   687  			sqlmock.NewRows([]string{"Db_name", "Table_name", "Checksum_crc64_xor", "Total_kvs", "Total_bytes"}).
   688  				AddRow("db", "table", 1234567890, 12345, 1234567),
   689  		)
   690  	mock.ExpectExec("UPDATE.*tikv_gc_life_time.*").
   691  		WithArgs("10m").
   692  		WillReturnResult(sqlmock.NewResult(2, 1))
   693  	mock.ExpectClose()
   694  
   695  	ctx := MockDoChecksumCtx(db)
   696  	err = s.tr.compareChecksum(ctx, verification.MakeKVChecksum(9876543, 54321, 1357924680))
   697  	c.Assert(err, ErrorMatches, "checksum mismatched.*")
   698  
   699  	c.Assert(db.Close(), IsNil)
   700  	c.Assert(mock.ExpectationsWereMet(), IsNil)
   701  }
   702  
   703  func (s *tableRestoreSuite) TestAnalyzeTable(c *C) {
   704  	db, mock, err := sqlmock.New()
   705  	c.Assert(err, IsNil)
   706  
   707  	mock.ExpectExec("ANALYZE TABLE `db`\\.`table`").
   708  		WillReturnResult(sqlmock.NewResult(1, 1))
   709  	mock.ExpectClose()
   710  
   711  	ctx := context.Background()
   712  	defaultSQLMode, err := mysql.GetSQLMode(mysql.DefaultSQLMode)
   713  	c.Assert(err, IsNil)
   714  	g := glue.NewExternalTiDBGlue(db, defaultSQLMode)
   715  	err = s.tr.analyzeTable(ctx, g)
   716  	c.Assert(err, IsNil)
   717  
   718  	c.Assert(db.Close(), IsNil)
   719  	c.Assert(mock.ExpectationsWereMet(), IsNil)
   720  }
   721  
   722  func (s *tableRestoreSuite) TestImportKVSuccess(c *C) {
   723  	controller := gomock.NewController(c)
   724  	defer controller.Finish()
   725  	mockBackend := mock.NewMockBackend(controller)
   726  	importer := kv.MakeBackend(mockBackend)
   727  	chptCh := make(chan saveCp)
   728  	defer close(chptCh)
   729  	rc := &RestoreController{saveCpCh: chptCh}
   730  	go func() {
   731  		for range chptCh {
   732  		}
   733  	}()
   734  
   735  	ctx := context.Background()
   736  	engineUUID := uuid.New()
   737  
   738  	mockBackend.EXPECT().
   739  		CloseEngine(ctx, engineUUID).
   740  		Return(nil)
   741  	mockBackend.EXPECT().
   742  		ImportEngine(ctx, engineUUID).
   743  		Return(nil)
   744  	mockBackend.EXPECT().
   745  		CleanupEngine(ctx, engineUUID).
   746  		Return(nil)
   747  
   748  	closedEngine, err := importer.UnsafeCloseEngineWithUUID(ctx, "tag", engineUUID)
   749  	c.Assert(err, IsNil)
   750  	err = s.tr.importKV(ctx, closedEngine, rc, 1)
   751  	c.Assert(err, IsNil)
   752  }
   753  
   754  func (s *tableRestoreSuite) TestImportKVFailure(c *C) {
   755  	controller := gomock.NewController(c)
   756  	defer controller.Finish()
   757  	mockBackend := mock.NewMockBackend(controller)
   758  	importer := kv.MakeBackend(mockBackend)
   759  	chptCh := make(chan saveCp)
   760  	defer close(chptCh)
   761  	rc := &RestoreController{saveCpCh: chptCh}
   762  	go func() {
   763  		for range chptCh {
   764  		}
   765  	}()
   766  
   767  	ctx := context.Background()
   768  	engineUUID := uuid.New()
   769  
   770  	mockBackend.EXPECT().
   771  		CloseEngine(ctx, engineUUID).
   772  		Return(nil)
   773  	mockBackend.EXPECT().
   774  		ImportEngine(ctx, engineUUID).
   775  		Return(errors.Annotate(context.Canceled, "fake import error"))
   776  
   777  	closedEngine, err := importer.UnsafeCloseEngineWithUUID(ctx, "tag", engineUUID)
   778  	c.Assert(err, IsNil)
   779  	err = s.tr.importKV(ctx, closedEngine, rc, 1)
   780  	c.Assert(err, ErrorMatches, "fake import error.*")
   781  }
   782  
   783  var _ = Suite(&chunkRestoreSuite{})
   784  
   785  type chunkRestoreSuite struct {
   786  	tableRestoreSuiteBase
   787  	cr *chunkRestore
   788  }
   789  
   790  func (s *chunkRestoreSuite) SetUpTest(c *C) {
   791  	s.tableRestoreSuiteBase.SetUpTest(c)
   792  
   793  	ctx := context.Background()
   794  	w := worker.NewPool(ctx, 5, "io")
   795  
   796  	chunk := ChunkCheckpoint{
   797  		Key:      ChunkCheckpointKey{Path: s.tr.tableMeta.DataFiles[1].FileMeta.Path, Offset: 0},
   798  		FileMeta: s.tr.tableMeta.DataFiles[1].FileMeta,
   799  		Chunk: mydump.Chunk{
   800  			Offset:       0,
   801  			EndOffset:    37,
   802  			PrevRowIDMax: 18,
   803  			RowIDMax:     36,
   804  		},
   805  	}
   806  
   807  	var err error
   808  	s.cr, err = newChunkRestore(context.Background(), 1, s.cfg, &chunk, w, s.store, nil)
   809  	c.Assert(err, IsNil)
   810  }
   811  
   812  func (s *chunkRestoreSuite) TearDownTest(c *C) {
   813  	s.cr.close()
   814  }
   815  
   816  func (s *chunkRestoreSuite) TestDeliverLoopCancel(c *C) {
   817  	rc := &RestoreController{backend: kv.NewMockImporter(nil, "")}
   818  
   819  	ctx, cancel := context.WithCancel(context.Background())
   820  	kvsCh := make(chan []deliveredKVs)
   821  	go cancel()
   822  	_, err := s.cr.deliverLoop(ctx, kvsCh, s.tr, 0, nil, nil, rc)
   823  	c.Assert(errors.Cause(err), Equals, context.Canceled)
   824  }
   825  
   826  func (s *chunkRestoreSuite) TestDeliverLoopEmptyData(c *C) {
   827  	ctx := context.Background()
   828  
   829  	// Open two mock engines.
   830  
   831  	controller := gomock.NewController(c)
   832  	defer controller.Finish()
   833  	mockBackend := mock.NewMockBackend(controller)
   834  	importer := kv.MakeBackend(mockBackend)
   835  
   836  	mockBackend.EXPECT().OpenEngine(ctx, gomock.Any()).Return(nil).Times(2)
   837  	mockBackend.EXPECT().MakeEmptyRows().Return(kv.MakeRowsFromKvPairs(nil)).AnyTimes()
   838  	mockWriter := mock.NewMockEngineWriter(controller)
   839  	mockBackend.EXPECT().LocalWriter(ctx, gomock.Any(), int64(2048)).Return(mockWriter, nil).AnyTimes()
   840  	mockWriter.EXPECT().
   841  		AppendRows(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
   842  		Return(nil).AnyTimes()
   843  
   844  	dataEngine, err := importer.OpenEngine(ctx, s.tr.tableName, 0)
   845  	c.Assert(err, IsNil)
   846  	dataWriter, err := dataEngine.LocalWriter(ctx, 2048)
   847  	c.Assert(err, IsNil)
   848  	indexEngine, err := importer.OpenEngine(ctx, s.tr.tableName, -1)
   849  	c.Assert(err, IsNil)
   850  	indexWriter, err := indexEngine.LocalWriter(ctx, 2048)
   851  	c.Assert(err, IsNil)
   852  
   853  	// Deliver nothing.
   854  
   855  	cfg := &config.Config{}
   856  	rc := &RestoreController{cfg: cfg, backend: importer}
   857  
   858  	kvsCh := make(chan []deliveredKVs, 1)
   859  	kvsCh <- []deliveredKVs{}
   860  	_, err = s.cr.deliverLoop(ctx, kvsCh, s.tr, 0, dataWriter, indexWriter, rc)
   861  	c.Assert(err, IsNil)
   862  }
   863  
   864  func (s *chunkRestoreSuite) TestDeliverLoop(c *C) {
   865  	ctx := context.Background()
   866  	kvsCh := make(chan []deliveredKVs)
   867  	mockCols := []string{"c1", "c2"}
   868  
   869  	// Open two mock engines.
   870  
   871  	controller := gomock.NewController(c)
   872  	defer controller.Finish()
   873  	mockBackend := mock.NewMockBackend(controller)
   874  	importer := kv.MakeBackend(mockBackend)
   875  
   876  	mockBackend.EXPECT().OpenEngine(ctx, gomock.Any()).Return(nil).Times(2)
   877  	mockBackend.EXPECT().MakeEmptyRows().Return(kv.MakeRowsFromKvPairs(nil)).AnyTimes()
   878  	mockWriter := mock.NewMockEngineWriter(controller)
   879  	mockBackend.EXPECT().LocalWriter(ctx, gomock.Any(), int64(2048)).Return(mockWriter, nil).AnyTimes()
   880  
   881  	dataEngine, err := importer.OpenEngine(ctx, s.tr.tableName, 0)
   882  	c.Assert(err, IsNil)
   883  	indexEngine, err := importer.OpenEngine(ctx, s.tr.tableName, -1)
   884  	c.Assert(err, IsNil)
   885  
   886  	dataWriter, err := dataEngine.LocalWriter(ctx, 2048)
   887  	c.Assert(err, IsNil)
   888  	indexWriter, err := indexEngine.LocalWriter(ctx, 2048)
   889  	c.Assert(err, IsNil)
   890  
   891  	// Set up the expected API calls to the data engine...
   892  
   893  	mockWriter.EXPECT().
   894  		AppendRows(ctx, s.tr.tableName, mockCols, gomock.Any(), kv.MakeRowsFromKvPairs([]common.KvPair{
   895  			{
   896  				Key: []byte("txxxxxxxx_ryyyyyyyy"),
   897  				Val: []byte("value1"),
   898  			},
   899  			{
   900  				Key: []byte("txxxxxxxx_rwwwwwwww"),
   901  				Val: []byte("value2"),
   902  			},
   903  		})).
   904  		Return(nil)
   905  
   906  	// ... and the index engine.
   907  	//
   908  	// Note: This test assumes data engine is written before the index engine.
   909  
   910  	mockWriter.EXPECT().
   911  		AppendRows(ctx, s.tr.tableName, mockCols, gomock.Any(), kv.MakeRowsFromKvPairs([]common.KvPair{
   912  			{
   913  				Key: []byte("txxxxxxxx_izzzzzzzz"),
   914  				Val: []byte("index1"),
   915  			},
   916  		})).
   917  		Return(nil)
   918  
   919  	// Now actually start the delivery loop.
   920  
   921  	saveCpCh := make(chan saveCp, 2)
   922  	go func() {
   923  		kvsCh <- []deliveredKVs{{
   924  			kvs: kv.MakeRowFromKvPairs([]common.KvPair{
   925  				{
   926  					Key: []byte("txxxxxxxx_ryyyyyyyy"),
   927  					Val: []byte("value1"),
   928  				},
   929  				{
   930  					Key: []byte("txxxxxxxx_rwwwwwwww"),
   931  					Val: []byte("value2"),
   932  				},
   933  				{
   934  					Key: []byte("txxxxxxxx_izzzzzzzz"),
   935  					Val: []byte("index1"),
   936  				},
   937  			}),
   938  			columns: mockCols,
   939  			offset:  12,
   940  			rowID:   76,
   941  		},
   942  		}
   943  		kvsCh <- []deliveredKVs{}
   944  		close(kvsCh)
   945  	}()
   946  
   947  	cfg := &config.Config{}
   948  	rc := &RestoreController{cfg: cfg, saveCpCh: saveCpCh, backend: importer}
   949  
   950  	_, err = s.cr.deliverLoop(ctx, kvsCh, s.tr, 0, dataWriter, indexWriter, rc)
   951  	c.Assert(err, IsNil)
   952  	c.Assert(saveCpCh, HasLen, 2)
   953  	c.Assert(s.cr.chunk.Chunk.Offset, Equals, int64(12))
   954  	c.Assert(s.cr.chunk.Chunk.PrevRowIDMax, Equals, int64(76))
   955  	c.Assert(s.cr.chunk.Checksum.SumKVS(), Equals, uint64(3))
   956  }
   957  
   958  func (s *chunkRestoreSuite) TestEncodeLoop(c *C) {
   959  	ctx := context.Background()
   960  	kvsCh := make(chan []deliveredKVs, 2)
   961  	deliverCompleteCh := make(chan deliverResult)
   962  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
   963  		SQLMode:   s.cfg.TiDB.SQLMode,
   964  		Timestamp: 1234567895,
   965  	})
   966  	c.Assert(err, IsNil)
   967  	cfg := config.NewConfig()
   968  	rc := &RestoreController{pauser: DeliverPauser, cfg: cfg}
   969  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
   970  	c.Assert(err, IsNil)
   971  	c.Assert(kvsCh, HasLen, 2)
   972  
   973  	kvs := <-kvsCh
   974  	c.Assert(kvs, HasLen, 1)
   975  	c.Assert(kvs[0].kvs, HasLen, 2)
   976  	c.Assert(kvs[0].rowID, Equals, int64(19))
   977  	c.Assert(kvs[0].offset, Equals, int64(36))
   978  
   979  	kvs = <-kvsCh
   980  	c.Assert(len(kvs), Equals, 0)
   981  }
   982  
   983  func (s *chunkRestoreSuite) TestEncodeLoopCanceled(c *C) {
   984  	ctx, cancel := context.WithCancel(context.Background())
   985  	kvsCh := make(chan []deliveredKVs)
   986  	deliverCompleteCh := make(chan deliverResult)
   987  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
   988  		SQLMode:   s.cfg.TiDB.SQLMode,
   989  		Timestamp: 1234567896,
   990  	})
   991  	c.Assert(err, IsNil)
   992  
   993  	go cancel()
   994  	cfg := config.NewConfig()
   995  	rc := &RestoreController{pauser: DeliverPauser, cfg: cfg}
   996  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
   997  	c.Assert(errors.Cause(err), Equals, context.Canceled)
   998  	c.Assert(kvsCh, HasLen, 0)
   999  }
  1000  
  1001  func (s *chunkRestoreSuite) TestEncodeLoopForcedError(c *C) {
  1002  	ctx := context.Background()
  1003  	kvsCh := make(chan []deliveredKVs, 2)
  1004  	deliverCompleteCh := make(chan deliverResult)
  1005  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
  1006  		SQLMode:   s.cfg.TiDB.SQLMode,
  1007  		Timestamp: 1234567897,
  1008  	})
  1009  	c.Assert(err, IsNil)
  1010  
  1011  	// close the chunk so reading it will result in the "file already closed" error.
  1012  	s.cr.parser.Close()
  1013  
  1014  	cfg := config.NewConfig()
  1015  	rc := &RestoreController{pauser: DeliverPauser, cfg: cfg}
  1016  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1017  	c.Assert(err, ErrorMatches, `in file .*[/\\]?db\.table\.2\.sql:0 at offset 0:.*file already closed`)
  1018  	c.Assert(kvsCh, HasLen, 0)
  1019  }
  1020  
  1021  func (s *chunkRestoreSuite) TestEncodeLoopDeliverErrored(c *C) {
  1022  	ctx := context.Background()
  1023  	kvsCh := make(chan []deliveredKVs)
  1024  	deliverCompleteCh := make(chan deliverResult)
  1025  	kvEncoder, err := kv.NewTableKVEncoder(s.tr.encTable, &kv.SessionOptions{
  1026  		SQLMode:   s.cfg.TiDB.SQLMode,
  1027  		Timestamp: 1234567898,
  1028  	})
  1029  	c.Assert(err, IsNil)
  1030  
  1031  	go func() {
  1032  		deliverCompleteCh <- deliverResult{
  1033  			err: errors.New("fake deliver error"),
  1034  		}
  1035  	}()
  1036  	cfg := config.NewConfig()
  1037  	rc := &RestoreController{pauser: DeliverPauser, cfg: cfg}
  1038  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1039  	c.Assert(err, ErrorMatches, "fake deliver error")
  1040  	c.Assert(kvsCh, HasLen, 0)
  1041  }
  1042  
  1043  func (s *chunkRestoreSuite) TestEncodeLoopColumnsMismatch(c *C) {
  1044  	dir := c.MkDir()
  1045  	fileName := "db.table.000.csv"
  1046  	err := ioutil.WriteFile(filepath.Join(dir, fileName), []byte("1,2,3,4\r\n4,5,6,7\r\n"), 0644)
  1047  	c.Assert(err, IsNil)
  1048  
  1049  	store, err := storage.NewLocalStorage(dir)
  1050  	c.Assert(err, IsNil)
  1051  
  1052  	ctx := context.Background()
  1053  	cfg := config.NewConfig()
  1054  	rc := &RestoreController{pauser: DeliverPauser, cfg: cfg}
  1055  
  1056  	reader, err := store.Open(ctx, fileName)
  1057  	c.Assert(err, IsNil)
  1058  	w := worker.NewPool(ctx, 5, "io")
  1059  	p := mydump.NewCSVParser(&cfg.Mydumper.CSV, reader, 111, w, false)
  1060  
  1061  	err = s.cr.parser.Close()
  1062  	c.Assert(err, IsNil)
  1063  	s.cr.parser = p
  1064  
  1065  	kvsCh := make(chan []deliveredKVs, 2)
  1066  	deliverCompleteCh := make(chan deliverResult)
  1067  	kvEncoder, err := kv.NewTiDBBackend(nil, config.ReplaceOnDup).NewEncoder(
  1068  		s.tr.encTable,
  1069  		&kv.SessionOptions{
  1070  			SQLMode:   s.cfg.TiDB.SQLMode,
  1071  			Timestamp: 1234567895,
  1072  		})
  1073  	c.Assert(err, IsNil)
  1074  
  1075  	_, _, err = s.cr.encodeLoop(ctx, kvsCh, s.tr, s.tr.logger, kvEncoder, deliverCompleteCh, rc)
  1076  	c.Assert(err, ErrorMatches, "in file db.table.2.sql:0 at offset 8: column count mismatch, expected 3, got 4")
  1077  	c.Assert(kvsCh, HasLen, 0)
  1078  }
  1079  
  1080  func (s *chunkRestoreSuite) TestRestore(c *C) {
  1081  	ctx := context.Background()
  1082  
  1083  	// Open two mock engines
  1084  
  1085  	controller := gomock.NewController(c)
  1086  	defer controller.Finish()
  1087  	mockClient := mock.NewMockImportKVClient(controller)
  1088  	mockDataWriter := mock.NewMockImportKV_WriteEngineClient(controller)
  1089  	mockIndexWriter := mock.NewMockImportKV_WriteEngineClient(controller)
  1090  	importer := kv.NewMockImporter(mockClient, "127.0.0.1:2379")
  1091  
  1092  	mockClient.EXPECT().OpenEngine(ctx, gomock.Any()).Return(nil, nil)
  1093  	mockClient.EXPECT().OpenEngine(ctx, gomock.Any()).Return(nil, nil)
  1094  
  1095  	dataEngine, err := importer.OpenEngine(ctx, s.tr.tableName, 0)
  1096  	c.Assert(err, IsNil)
  1097  	indexEngine, err := importer.OpenEngine(ctx, s.tr.tableName, -1)
  1098  	c.Assert(err, IsNil)
  1099  	dataWriter, err := dataEngine.LocalWriter(ctx, 2048)
  1100  	c.Assert(err, IsNil)
  1101  	indexWriter, err := indexEngine.LocalWriter(ctx, 2048)
  1102  	c.Assert(err, IsNil)
  1103  
  1104  	// Expected API sequence
  1105  	// (we don't care about the actual content, this would be checked in the integrated tests)
  1106  
  1107  	mockClient.EXPECT().WriteEngine(ctx).Return(mockDataWriter, nil)
  1108  	mockDataWriter.EXPECT().Send(gomock.Any()).Return(nil)
  1109  	mockDataWriter.EXPECT().Send(gomock.Any()).DoAndReturn(func(req *import_kvpb.WriteEngineRequest) error {
  1110  		c.Assert(req.GetBatch().GetMutations(), HasLen, 1)
  1111  		return nil
  1112  	})
  1113  	mockDataWriter.EXPECT().CloseAndRecv().Return(nil, nil)
  1114  
  1115  	mockClient.EXPECT().WriteEngine(ctx).Return(mockIndexWriter, nil)
  1116  	mockIndexWriter.EXPECT().Send(gomock.Any()).Return(nil)
  1117  	mockIndexWriter.EXPECT().Send(gomock.Any()).DoAndReturn(func(req *import_kvpb.WriteEngineRequest) error {
  1118  		c.Assert(req.GetBatch().GetMutations(), HasLen, 1)
  1119  		return nil
  1120  	})
  1121  	mockIndexWriter.EXPECT().CloseAndRecv().Return(nil, nil)
  1122  
  1123  	// Now actually start the restore loop.
  1124  
  1125  	saveCpCh := make(chan saveCp, 2)
  1126  	err = s.cr.restore(ctx, s.tr, 0, dataWriter, indexWriter, &RestoreController{
  1127  		cfg:      s.cfg,
  1128  		saveCpCh: saveCpCh,
  1129  		backend:  importer,
  1130  		pauser:   DeliverPauser,
  1131  	})
  1132  	c.Assert(err, IsNil)
  1133  	c.Assert(saveCpCh, HasLen, 2)
  1134  }
  1135  
  1136  var _ = Suite(&restoreSchemaSuite{})
  1137  
  1138  type restoreSchemaSuite struct {
  1139  	ctx        context.Context
  1140  	rc         *RestoreController
  1141  	controller *gomock.Controller
  1142  }
  1143  
  1144  func (s *restoreSchemaSuite) SetUpSuite(c *C) {
  1145  	ctx := context.Background()
  1146  	fakeDataDir := c.MkDir()
  1147  	store, err := storage.NewLocalStorage(fakeDataDir)
  1148  	c.Assert(err, IsNil)
  1149  	// restore database schema file
  1150  	fakeDBName := "fakedb"
  1151  	// please follow the `mydump.defaultFileRouteRules`, matches files like '{schema}-schema-create.sql'
  1152  	fakeFileName := fmt.Sprintf("%s-schema-create.sql", fakeDBName)
  1153  	err = store.Write(ctx, fakeFileName, []byte(fmt.Sprintf("CREATE DATABASE %s;", fakeDBName)))
  1154  	c.Assert(err, IsNil)
  1155  	// restore table schema files
  1156  	fakeTableFilesCount := 8
  1157  	for i := 1; i <= fakeTableFilesCount; i++ {
  1158  		fakeTableName := fmt.Sprintf("tbl%d", i)
  1159  		// please follow the `mydump.defaultFileRouteRules`, matches files like '{schema}.{table}-schema.sql'
  1160  		fakeFileName := fmt.Sprintf("%s.%s-schema.sql", fakeDBName, fakeTableName)
  1161  		fakeFileContent := []byte(fmt.Sprintf("CREATE TABLE %s(i TINYINT);", fakeTableName))
  1162  		err = store.Write(ctx, fakeFileName, fakeFileContent)
  1163  		c.Assert(err, IsNil)
  1164  	}
  1165  	// restore view schema files
  1166  	fakeViewFilesCount := 8
  1167  	for i := 1; i <= fakeViewFilesCount; i++ {
  1168  		fakeViewName := fmt.Sprintf("tbl%d", i)
  1169  		// please follow the `mydump.defaultFileRouteRules`, matches files like '{schema}.{table}-schema-view.sql'
  1170  		fakeFileName := fmt.Sprintf("%s.%s-schema-view.sql", fakeDBName, fakeViewName)
  1171  		fakeFileContent := []byte(fmt.Sprintf("CREATE ALGORITHM=UNDEFINED VIEW `%s` (`i`) AS SELECT `i` FROM `%s`.`%s`;", fakeViewName, fakeDBName, fmt.Sprintf("tbl%d", i)))
  1172  		err = store.Write(ctx, fakeFileName, fakeFileContent)
  1173  		c.Assert(err, IsNil)
  1174  	}
  1175  	config := config.NewConfig()
  1176  	config.Mydumper.NoSchema = false
  1177  	config.Mydumper.DefaultFileRules = true
  1178  	config.Mydumper.CharacterSet = "utf8mb4"
  1179  	config.App.RegionConcurrency = 8
  1180  	mydumpLoader, err := mydump.NewMyDumpLoaderWithStore(ctx, config, store)
  1181  	c.Assert(err, IsNil)
  1182  	s.rc = &RestoreController{
  1183  		cfg:           config,
  1184  		store:         store,
  1185  		dbMetas:       mydumpLoader.GetDatabases(),
  1186  		checkpointsDB: &checkpoints.NullCheckpointsDB{},
  1187  	}
  1188  }
  1189  
  1190  func (s *restoreSchemaSuite) SetUpTest(c *C) {
  1191  	s.controller, s.ctx = gomock.WithContext(context.Background(), c)
  1192  	mockBackend := mock.NewMockBackend(s.controller)
  1193  	// We don't care the execute results of those
  1194  	mockBackend.EXPECT().
  1195  		FetchRemoteTableModels(gomock.Any(), gomock.Any()).
  1196  		AnyTimes().
  1197  		Return(make([]*model.TableInfo, 0), nil)
  1198  	s.rc.backend = kv.MakeBackend(mockBackend)
  1199  	mockSQLExecutor := mock.NewMockSQLExecutor(s.controller)
  1200  	mockSQLExecutor.EXPECT().
  1201  		ExecuteWithLog(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
  1202  		AnyTimes().
  1203  		Return(nil)
  1204  	mockSession := mock.NewMockSession(s.controller)
  1205  	mockSession.EXPECT().
  1206  		Close().
  1207  		AnyTimes().
  1208  		Return()
  1209  	mockSession.EXPECT().
  1210  		Execute(gomock.Any(), gomock.Any()).
  1211  		AnyTimes().
  1212  		Return(nil, nil)
  1213  	mockTiDBGlue := mock.NewMockGlue(s.controller)
  1214  	mockTiDBGlue.EXPECT().
  1215  		GetSQLExecutor().
  1216  		AnyTimes().
  1217  		Return(mockSQLExecutor)
  1218  	mockTiDBGlue.EXPECT().
  1219  		GetSession(gomock.Any()).
  1220  		AnyTimes().
  1221  		Return(mockSession, nil)
  1222  	mockTiDBGlue.EXPECT().
  1223  		OwnsSQLExecutor().
  1224  		AnyTimes().
  1225  		Return(true)
  1226  	parser := parser.New()
  1227  	mockTiDBGlue.EXPECT().
  1228  		GetParser().
  1229  		AnyTimes().
  1230  		Return(parser)
  1231  	s.rc.tidbGlue = mockTiDBGlue
  1232  }
  1233  
  1234  func (s *restoreSchemaSuite) TearDownTest(c *C) {
  1235  	s.rc.Close()
  1236  	s.controller.Finish()
  1237  }
  1238  
  1239  func (s *restoreSchemaSuite) TestRestoreSchemaSuccessful(c *C) {
  1240  	err := s.rc.restoreSchema(s.ctx)
  1241  	c.Assert(err, IsNil)
  1242  }
  1243  
  1244  func (s *restoreSchemaSuite) TestRestoreSchemaFailed(c *C) {
  1245  	injectErr := errors.New("Somthing wrong")
  1246  	mockSession := mock.NewMockSession(s.controller)
  1247  	mockSession.EXPECT().
  1248  		Close().
  1249  		AnyTimes().
  1250  		Return()
  1251  	mockSession.EXPECT().
  1252  		Execute(gomock.Any(), gomock.Any()).
  1253  		AnyTimes().
  1254  		Return(nil, injectErr)
  1255  	mockTiDBGlue := mock.NewMockGlue(s.controller)
  1256  	mockTiDBGlue.EXPECT().
  1257  		GetSession(gomock.Any()).
  1258  		AnyTimes().
  1259  		Return(mockSession, nil)
  1260  	s.rc.tidbGlue = mockTiDBGlue
  1261  	err := s.rc.restoreSchema(s.ctx)
  1262  	c.Assert(err, NotNil)
  1263  	c.Assert(errors.ErrorEqual(err, injectErr), IsTrue)
  1264  }
  1265  
  1266  func (s *restoreSchemaSuite) TestRestoreSchemaContextCancel(c *C) {
  1267  	childCtx, cancel := context.WithCancel(s.ctx)
  1268  	mockSession := mock.NewMockSession(s.controller)
  1269  	mockSession.EXPECT().
  1270  		Close().
  1271  		AnyTimes().
  1272  		Return()
  1273  	mockSession.EXPECT().
  1274  		Execute(gomock.Any(), gomock.Any()).
  1275  		AnyTimes().
  1276  		Do(func(context.Context, string) { cancel() }).
  1277  		Return(nil, nil)
  1278  	mockTiDBGlue := mock.NewMockGlue(s.controller)
  1279  	mockTiDBGlue.EXPECT().
  1280  		GetSession(gomock.Any()).
  1281  		AnyTimes().
  1282  		Return(mockSession, nil)
  1283  	s.rc.tidbGlue = mockTiDBGlue
  1284  	err := s.rc.restoreSchema(childCtx)
  1285  	cancel()
  1286  	c.Assert(err, NotNil)
  1287  	c.Assert(err, Equals, childCtx.Err())
  1288  }