github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/utils/schema_test.go (about)

     1  // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
     2  
     3  package utils
     4  
     5  import (
     6  	"context"
     7  	"encoding/json"
     8  	"fmt"
     9  
    10  	"github.com/golang/protobuf/proto"
    11  
    12  	"github.com/pingcap/br/pkg/storage"
    13  
    14  	"github.com/pingcap/br/pkg/metautil"
    15  
    16  	. "github.com/pingcap/check"
    17  	backuppb "github.com/pingcap/kvproto/pkg/backup"
    18  	"github.com/pingcap/parser/model"
    19  	"github.com/pingcap/tidb/statistics/handle"
    20  	"github.com/pingcap/tidb/tablecodec"
    21  )
    22  
    23  type testSchemaSuite struct {
    24  	store storage.ExternalStorage
    25  }
    26  
    27  var _ = Suite(&testSchemaSuite{})
    28  
    29  func (r *testSchemaSuite) SetUpSuite(c *C) {
    30  	var err error
    31  	base := c.MkDir()
    32  	r.store, err = storage.NewLocalStorage(base)
    33  	c.Assert(err, IsNil)
    34  }
    35  
    36  func mockBackupMeta(mockSchemas []*backuppb.Schema, mockFiles []*backuppb.File) *backuppb.BackupMeta {
    37  	return &backuppb.BackupMeta{
    38  		Files:   mockFiles,
    39  		Schemas: mockSchemas,
    40  	}
    41  }
    42  
    43  func (r *testSchemaSuite) TestLoadBackupMeta(c *C) {
    44  	tblName := model.NewCIStr("t1")
    45  	dbName := model.NewCIStr("test")
    46  	tblID := int64(123)
    47  	mockTbl := &model.TableInfo{
    48  		ID:   tblID,
    49  		Name: tblName,
    50  	}
    51  	mockStats := handle.JSONTable{
    52  		DatabaseName: dbName.String(),
    53  		TableName:    tblName.String(),
    54  	}
    55  	mockDB := model.DBInfo{
    56  		ID:   1,
    57  		Name: dbName,
    58  		Tables: []*model.TableInfo{
    59  			mockTbl,
    60  		},
    61  	}
    62  	dbBytes, err := json.Marshal(mockDB)
    63  	c.Assert(err, IsNil)
    64  	tblBytes, err := json.Marshal(mockTbl)
    65  	c.Assert(err, IsNil)
    66  	statsBytes, err := json.Marshal(mockStats)
    67  	c.Assert(err, IsNil)
    68  
    69  	mockSchemas := []*backuppb.Schema{
    70  		{
    71  			Db:    dbBytes,
    72  			Table: tblBytes,
    73  			Stats: statsBytes,
    74  		},
    75  	}
    76  
    77  	mockFiles := []*backuppb.File{
    78  		// should include 1.sst
    79  		{
    80  			Name:     "1.sst",
    81  			StartKey: tablecodec.EncodeRowKey(tblID, []byte("a")),
    82  			EndKey:   tablecodec.EncodeRowKey(tblID+1, []byte("a")),
    83  		},
    84  		// shouldn't include 2.sst
    85  		{
    86  			Name:     "2.sst",
    87  			StartKey: tablecodec.EncodeRowKey(tblID-1, []byte("a")),
    88  			EndKey:   tablecodec.EncodeRowKey(tblID, []byte("a")),
    89  		},
    90  	}
    91  
    92  	meta := mockBackupMeta(mockSchemas, mockFiles)
    93  	data, err := proto.Marshal(meta)
    94  	c.Assert(err, IsNil)
    95  
    96  	ctx := context.Background()
    97  	err = r.store.WriteFile(ctx, metautil.MetaFile, data)
    98  	c.Assert(err, IsNil)
    99  
   100  	dbs, err := LoadBackupTables(ctx, metautil.NewMetaReader(meta, r.store))
   101  	tbl := dbs[dbName.String()].GetTable(tblName.String())
   102  	c.Assert(err, IsNil)
   103  	c.Assert(tbl.Files, HasLen, 1)
   104  	c.Assert(tbl.Files[0].Name, Equals, "1.sst")
   105  }
   106  
   107  func (r *testSchemaSuite) TestLoadBackupMetaPartionTable(c *C) {
   108  	tblName := model.NewCIStr("t1")
   109  	dbName := model.NewCIStr("test")
   110  	tblID := int64(123)
   111  	partID1 := int64(124)
   112  	partID2 := int64(125)
   113  	mockTbl := &model.TableInfo{
   114  		ID:   tblID,
   115  		Name: tblName,
   116  		Partition: &model.PartitionInfo{
   117  			Definitions: []model.PartitionDefinition{
   118  				{ID: partID1},
   119  				{ID: partID2},
   120  			},
   121  		},
   122  	}
   123  	mockStats := handle.JSONTable{
   124  		DatabaseName: dbName.String(),
   125  		TableName:    tblName.String(),
   126  	}
   127  	mockDB := model.DBInfo{
   128  		ID:   1,
   129  		Name: dbName,
   130  		Tables: []*model.TableInfo{
   131  			mockTbl,
   132  		},
   133  	}
   134  	dbBytes, err := json.Marshal(mockDB)
   135  	c.Assert(err, IsNil)
   136  	tblBytes, err := json.Marshal(mockTbl)
   137  	c.Assert(err, IsNil)
   138  	statsBytes, err := json.Marshal(mockStats)
   139  	c.Assert(err, IsNil)
   140  
   141  	mockSchemas := []*backuppb.Schema{
   142  		{
   143  			Db:    dbBytes,
   144  			Table: tblBytes,
   145  			Stats: statsBytes,
   146  		},
   147  	}
   148  
   149  	mockFiles := []*backuppb.File{
   150  		// should include 1.sst - 3.sst
   151  		{
   152  			Name:     "1.sst",
   153  			StartKey: tablecodec.EncodeRowKey(partID1, []byte("a")),
   154  			EndKey:   tablecodec.EncodeRowKey(partID1, []byte("b")),
   155  		},
   156  		{
   157  			Name:     "2.sst",
   158  			StartKey: tablecodec.EncodeRowKey(partID1, []byte("b")),
   159  			EndKey:   tablecodec.EncodeRowKey(partID2, []byte("a")),
   160  		},
   161  		{
   162  			Name:     "3.sst",
   163  			StartKey: tablecodec.EncodeRowKey(partID2, []byte("a")),
   164  			EndKey:   tablecodec.EncodeRowKey(partID2+1, []byte("b")),
   165  		},
   166  		// shouldn't include 4.sst
   167  		{
   168  			Name:     "4.sst",
   169  			StartKey: tablecodec.EncodeRowKey(tblID-1, []byte("a")),
   170  			EndKey:   tablecodec.EncodeRowKey(tblID, []byte("a")),
   171  		},
   172  	}
   173  
   174  	meta := mockBackupMeta(mockSchemas, mockFiles)
   175  
   176  	data, err := proto.Marshal(meta)
   177  	c.Assert(err, IsNil)
   178  
   179  	ctx := context.Background()
   180  	err = r.store.WriteFile(ctx, metautil.MetaFile, data)
   181  	c.Assert(err, IsNil)
   182  
   183  	dbs, err := LoadBackupTables(ctx, metautil.NewMetaReader(meta, r.store))
   184  	tbl := dbs[dbName.String()].GetTable(tblName.String())
   185  	c.Assert(err, IsNil)
   186  	c.Assert(tbl.Files, HasLen, 3)
   187  	contains := func(name string) bool {
   188  		for i := range tbl.Files {
   189  			if tbl.Files[i].Name == name {
   190  				return true
   191  			}
   192  		}
   193  		return false
   194  	}
   195  	c.Assert(contains("1.sst"), IsTrue)
   196  	c.Assert(contains("2.sst"), IsTrue)
   197  	c.Assert(contains("3.sst"), IsTrue)
   198  }
   199  
   200  func buildTableAndFiles(name string, tableID, fileCount int) (*model.TableInfo, []*backuppb.File) {
   201  	tblName := model.NewCIStr(name)
   202  	tblID := int64(tableID)
   203  	mockTbl := &model.TableInfo{
   204  		ID:   tblID,
   205  		Name: tblName,
   206  	}
   207  
   208  	mockFiles := make([]*backuppb.File, 0, fileCount)
   209  	for i := 0; i < fileCount; i++ {
   210  		mockFiles = append(mockFiles, &backuppb.File{
   211  			Name:     fmt.Sprintf("%d-%d.sst", tableID, i),
   212  			StartKey: tablecodec.EncodeRowKey(tblID, []byte(fmt.Sprintf("%09d", i))),
   213  			EndKey:   tablecodec.EncodeRowKey(tblID, []byte(fmt.Sprintf("%09d", i+1))),
   214  		})
   215  	}
   216  	return mockTbl, mockFiles
   217  }
   218  
   219  func buildBenchmarkBackupmeta(c *C, dbName string, tableCount, fileCountPerTable int) *backuppb.BackupMeta {
   220  	mockFiles := make([]*backuppb.File, 0, tableCount*fileCountPerTable)
   221  	mockSchemas := make([]*backuppb.Schema, 0, tableCount)
   222  	for i := 1; i <= tableCount; i++ {
   223  		mockTbl, files := buildTableAndFiles(fmt.Sprintf("mock%d", i), i, fileCountPerTable)
   224  		mockFiles = append(mockFiles, files...)
   225  
   226  		mockDB := model.DBInfo{
   227  			ID:   1,
   228  			Name: model.NewCIStr(dbName),
   229  			Tables: []*model.TableInfo{
   230  				mockTbl,
   231  			},
   232  		}
   233  		dbBytes, err := json.Marshal(mockDB)
   234  		c.Assert(err, IsNil)
   235  		tblBytes, err := json.Marshal(mockTbl)
   236  		c.Assert(err, IsNil)
   237  		mockSchemas = append(mockSchemas, &backuppb.Schema{
   238  			Db:    dbBytes,
   239  			Table: tblBytes,
   240  		})
   241  	}
   242  	return mockBackupMeta(mockSchemas, mockFiles)
   243  }
   244  
   245  // Run `go test github.com/pingcap/br/pkg/utils -check.b -test.v` to get benchmark result.
   246  func (r *testSchemaSuite) BenchmarkLoadBackupMeta64(c *C) {
   247  	meta := buildBenchmarkBackupmeta(c, "bench", 64, 64)
   248  	c.ResetTimer()
   249  	for i := 0; i < c.N; i++ {
   250  		data, err := proto.Marshal(meta)
   251  		c.Assert(err, IsNil)
   252  
   253  		ctx := context.Background()
   254  		err = r.store.WriteFile(ctx, metautil.MetaFile, data)
   255  		c.Assert(err, IsNil)
   256  
   257  		dbs, err := LoadBackupTables(ctx, metautil.NewMetaReader(meta, r.store))
   258  		c.Assert(err, IsNil)
   259  		c.Assert(dbs, HasLen, 1)
   260  		c.Assert(dbs, HasKey, "bench")
   261  		c.Assert(dbs["bench"].Tables, HasLen, 64)
   262  	}
   263  }
   264  
   265  func (r *testSchemaSuite) BenchmarkLoadBackupMeta1024(c *C) {
   266  	meta := buildBenchmarkBackupmeta(c, "bench", 1024, 64)
   267  	c.ResetTimer()
   268  	for i := 0; i < c.N; i++ {
   269  		data, err := proto.Marshal(meta)
   270  		c.Assert(err, IsNil)
   271  
   272  		ctx := context.Background()
   273  		err = r.store.WriteFile(ctx, metautil.MetaFile, data)
   274  		c.Assert(err, IsNil)
   275  
   276  		dbs, err := LoadBackupTables(ctx, metautil.NewMetaReader(meta, r.store))
   277  		c.Assert(err, IsNil)
   278  		c.Assert(dbs, HasLen, 1)
   279  		c.Assert(dbs, HasKey, "bench")
   280  		c.Assert(dbs["bench"].Tables, HasLen, 1024)
   281  	}
   282  }
   283  
   284  func (r *testSchemaSuite) BenchmarkLoadBackupMeta10240(c *C) {
   285  	meta := buildBenchmarkBackupmeta(c, "bench", 10240, 64)
   286  	c.ResetTimer()
   287  	for i := 0; i < c.N; i++ {
   288  		data, err := proto.Marshal(meta)
   289  		c.Assert(err, IsNil)
   290  
   291  		ctx := context.Background()
   292  		err = r.store.WriteFile(ctx, metautil.MetaFile, data)
   293  		c.Assert(err, IsNil)
   294  
   295  		dbs, err := LoadBackupTables(ctx, metautil.NewMetaReader(meta, r.store))
   296  		c.Assert(err, IsNil)
   297  		c.Assert(dbs, HasLen, 1)
   298  		c.Assert(dbs, HasKey, "bench")
   299  		c.Assert(dbs["bench"].Tables, HasLen, 10240)
   300  	}
   301  }