github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/backup/client_test.go (about)

     1  // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
     2  
     3  package backup_test
     4  
     5  import (
     6  	"context"
     7  	"math"
     8  	"testing"
     9  	"time"
    10  
    11  	. "github.com/pingcap/check"
    12  	backuppb "github.com/pingcap/kvproto/pkg/backup"
    13  	"github.com/pingcap/kvproto/pkg/errorpb"
    14  	"github.com/pingcap/parser/model"
    15  	"github.com/pingcap/tidb/kv"
    16  	"github.com/pingcap/tidb/tablecodec"
    17  	"github.com/pingcap/tidb/types"
    18  	"github.com/pingcap/tidb/util/codec"
    19  	"github.com/tikv/client-go/v2/oracle"
    20  	"github.com/tikv/client-go/v2/testutils"
    21  	"github.com/tikv/client-go/v2/tikv"
    22  	"github.com/tikv/client-go/v2/txnkv/txnlock"
    23  	pd "github.com/tikv/pd/client"
    24  
    25  	"github.com/pingcap/br/pkg/backup"
    26  	"github.com/pingcap/br/pkg/conn"
    27  	"github.com/pingcap/br/pkg/pdutil"
    28  	"github.com/pingcap/br/pkg/storage"
    29  )
    30  
    31  type testBackup struct {
    32  	ctx    context.Context
    33  	cancel context.CancelFunc
    34  
    35  	mockPDClient pd.Client
    36  	backupClient *backup.Client
    37  }
    38  
    39  var _ = Suite(&testBackup{})
    40  
    41  func TestT(t *testing.T) {
    42  	TestingT(t)
    43  }
    44  
    45  func (r *testBackup) SetUpSuite(c *C) {
    46  	_, _, pdClient, err := testutils.NewMockTiKV("", nil)
    47  	c.Assert(err, IsNil)
    48  	r.mockPDClient = pdClient
    49  	r.ctx, r.cancel = context.WithCancel(context.Background())
    50  	mockMgr := &conn.Mgr{PdController: &pdutil.PdController{}}
    51  	mockMgr.SetPDClient(r.mockPDClient)
    52  	mockMgr.SetHTTP([]string{"test"}, nil)
    53  	r.backupClient, err = backup.NewBackupClient(r.ctx, mockMgr)
    54  	c.Assert(err, IsNil)
    55  }
    56  
    57  func (r *testBackup) TestGetTS(c *C) {
    58  	var (
    59  		err error
    60  		// mockPDClient' physical ts and current ts will have deviation
    61  		// so make this deviation tolerance 100ms
    62  		deviation = 100
    63  	)
    64  
    65  	// timeago not work
    66  	expectedDuration := 0
    67  	currentTS := time.Now().UnixNano() / int64(time.Millisecond)
    68  	ts, err := r.backupClient.GetTS(r.ctx, 0, 0)
    69  	c.Assert(err, IsNil)
    70  	pdTS := oracle.ExtractPhysical(ts)
    71  	duration := int(currentTS - pdTS)
    72  	c.Assert(duration, Greater, expectedDuration-deviation)
    73  	c.Assert(duration, Less, expectedDuration+deviation)
    74  
    75  	// timeago = "1.5m"
    76  	expectedDuration = 90000
    77  	currentTS = time.Now().UnixNano() / int64(time.Millisecond)
    78  	ts, err = r.backupClient.GetTS(r.ctx, 90*time.Second, 0)
    79  	c.Assert(err, IsNil)
    80  	pdTS = oracle.ExtractPhysical(ts)
    81  	duration = int(currentTS - pdTS)
    82  	c.Assert(duration, Greater, expectedDuration-deviation)
    83  	c.Assert(duration, Less, expectedDuration+deviation)
    84  
    85  	// timeago = "-1m"
    86  	_, err = r.backupClient.GetTS(r.ctx, -time.Minute, 0)
    87  	c.Assert(err, ErrorMatches, "negative timeago is not allowed.*")
    88  
    89  	// timeago = "1000000h" overflows
    90  	_, err = r.backupClient.GetTS(r.ctx, 1000000*time.Hour, 0)
    91  	c.Assert(err, ErrorMatches, ".*backup ts overflow.*")
    92  
    93  	// timeago = "10h" exceed GCSafePoint
    94  	p, l, err := r.mockPDClient.GetTS(r.ctx)
    95  	c.Assert(err, IsNil)
    96  	now := oracle.ComposeTS(p, l)
    97  	_, err = r.mockPDClient.UpdateGCSafePoint(r.ctx, now)
    98  	c.Assert(err, IsNil)
    99  	_, err = r.backupClient.GetTS(r.ctx, 10*time.Hour, 0)
   100  	c.Assert(err, ErrorMatches, ".*GC safepoint [0-9]+ exceed TS [0-9]+.*")
   101  
   102  	// timeago and backupts both exists, use backupts
   103  	backupts := oracle.ComposeTS(p+10, l)
   104  	ts, err = r.backupClient.GetTS(r.ctx, time.Minute, backupts)
   105  	c.Assert(err, IsNil)
   106  	c.Assert(ts, Equals, backupts)
   107  }
   108  
   109  func (r *testBackup) TestBuildTableRangeIntHandle(c *C) {
   110  	type Case struct {
   111  		ids []int64
   112  		trs []kv.KeyRange
   113  	}
   114  	low := codec.EncodeInt(nil, math.MinInt64)
   115  	high := kv.Key(codec.EncodeInt(nil, math.MaxInt64)).PrefixNext()
   116  	cases := []Case{
   117  		{ids: []int64{1}, trs: []kv.KeyRange{
   118  			{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
   119  		}},
   120  		{ids: []int64{1, 2, 3}, trs: []kv.KeyRange{
   121  			{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
   122  			{StartKey: tablecodec.EncodeRowKey(2, low), EndKey: tablecodec.EncodeRowKey(2, high)},
   123  			{StartKey: tablecodec.EncodeRowKey(3, low), EndKey: tablecodec.EncodeRowKey(3, high)},
   124  		}},
   125  		{ids: []int64{1, 3}, trs: []kv.KeyRange{
   126  			{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
   127  			{StartKey: tablecodec.EncodeRowKey(3, low), EndKey: tablecodec.EncodeRowKey(3, high)},
   128  		}},
   129  	}
   130  	for _, cs := range cases {
   131  		c.Log(cs)
   132  		tbl := &model.TableInfo{Partition: &model.PartitionInfo{Enable: true}}
   133  		for _, id := range cs.ids {
   134  			tbl.Partition.Definitions = append(tbl.Partition.Definitions,
   135  				model.PartitionDefinition{ID: id})
   136  		}
   137  		ranges, err := backup.BuildTableRanges(tbl)
   138  		c.Assert(err, IsNil)
   139  		c.Assert(ranges, DeepEquals, cs.trs)
   140  	}
   141  
   142  	tbl := &model.TableInfo{ID: 7}
   143  	ranges, err := backup.BuildTableRanges(tbl)
   144  	c.Assert(err, IsNil)
   145  	c.Assert(ranges, DeepEquals, []kv.KeyRange{
   146  		{StartKey: tablecodec.EncodeRowKey(7, low), EndKey: tablecodec.EncodeRowKey(7, high)},
   147  	})
   148  }
   149  
   150  func (r *testBackup) TestBuildTableRangeCommonHandle(c *C) {
   151  	type Case struct {
   152  		ids []int64
   153  		trs []kv.KeyRange
   154  	}
   155  	low, err_l := codec.EncodeKey(nil, nil, []types.Datum{types.MinNotNullDatum()}...)
   156  	c.Assert(err_l, IsNil)
   157  	high, err_h := codec.EncodeKey(nil, nil, []types.Datum{types.MaxValueDatum()}...)
   158  	c.Assert(err_h, IsNil)
   159  	high = kv.Key(high).PrefixNext()
   160  	cases := []Case{
   161  		{ids: []int64{1}, trs: []kv.KeyRange{
   162  			{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
   163  		}},
   164  		{ids: []int64{1, 2, 3}, trs: []kv.KeyRange{
   165  			{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
   166  			{StartKey: tablecodec.EncodeRowKey(2, low), EndKey: tablecodec.EncodeRowKey(2, high)},
   167  			{StartKey: tablecodec.EncodeRowKey(3, low), EndKey: tablecodec.EncodeRowKey(3, high)},
   168  		}},
   169  		{ids: []int64{1, 3}, trs: []kv.KeyRange{
   170  			{StartKey: tablecodec.EncodeRowKey(1, low), EndKey: tablecodec.EncodeRowKey(1, high)},
   171  			{StartKey: tablecodec.EncodeRowKey(3, low), EndKey: tablecodec.EncodeRowKey(3, high)},
   172  		}},
   173  	}
   174  	for _, cs := range cases {
   175  		c.Log(cs)
   176  		tbl := &model.TableInfo{Partition: &model.PartitionInfo{Enable: true}, IsCommonHandle: true}
   177  		for _, id := range cs.ids {
   178  			tbl.Partition.Definitions = append(tbl.Partition.Definitions,
   179  				model.PartitionDefinition{ID: id})
   180  		}
   181  		ranges, err := backup.BuildTableRanges(tbl)
   182  		c.Assert(err, IsNil)
   183  		c.Assert(ranges, DeepEquals, cs.trs)
   184  	}
   185  
   186  	tbl := &model.TableInfo{ID: 7, IsCommonHandle: true}
   187  	ranges, err_r := backup.BuildTableRanges(tbl)
   188  	c.Assert(err_r, IsNil)
   189  	c.Assert(ranges, DeepEquals, []kv.KeyRange{
   190  		{StartKey: tablecodec.EncodeRowKey(7, low), EndKey: tablecodec.EncodeRowKey(7, high)},
   191  	})
   192  }
   193  
   194  func (r *testBackup) TestOnBackupRegionErrorResponse(c *C) {
   195  	type Case struct {
   196  		storeID           uint64
   197  		bo                *tikv.Backoffer
   198  		backupTS          uint64
   199  		lockResolver      *txnlock.LockResolver
   200  		resp              *backuppb.BackupResponse
   201  		exceptedBackoffMs int
   202  		exceptedErr       bool
   203  	}
   204  	newBackupRegionErrorResp := func(regionError *errorpb.Error) *backuppb.BackupResponse {
   205  		return &backuppb.BackupResponse{Error: &backuppb.Error{Detail: &backuppb.Error_RegionError{RegionError: regionError}}}
   206  	}
   207  
   208  	cases := []Case{
   209  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{NotLeader: &errorpb.NotLeader{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   210  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{RegionNotFound: &errorpb.RegionNotFound{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   211  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{KeyNotInRegion: &errorpb.KeyNotInRegion{}}), exceptedBackoffMs: 0, exceptedErr: true},
   212  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{EpochNotMatch: &errorpb.EpochNotMatch{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   213  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{ServerIsBusy: &errorpb.ServerIsBusy{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   214  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{StaleCommand: &errorpb.StaleCommand{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   215  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{StoreNotMatch: &errorpb.StoreNotMatch{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   216  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{RaftEntryTooLarge: &errorpb.RaftEntryTooLarge{}}), exceptedBackoffMs: 0, exceptedErr: true},
   217  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{ReadIndexNotReady: &errorpb.ReadIndexNotReady{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   218  		{storeID: 1, backupTS: 421123291611137, resp: newBackupRegionErrorResp(&errorpb.Error{ProposalInMergingMode: &errorpb.ProposalInMergingMode{}}), exceptedBackoffMs: 1000, exceptedErr: false},
   219  	}
   220  	for _, cs := range cases {
   221  		c.Log(cs)
   222  		_, backoffMs, err := backup.OnBackupResponse(cs.storeID, cs.bo, cs.backupTS, cs.lockResolver, cs.resp)
   223  		c.Assert(backoffMs, Equals, cs.exceptedBackoffMs)
   224  		if cs.exceptedErr {
   225  			c.Assert(err, NotNil)
   226  		} else {
   227  			c.Assert(err, IsNil)
   228  		}
   229  	}
   230  }
   231  
   232  func (r *testBackup) TestSendCreds(c *C) {
   233  	accessKey := "ab"
   234  	secretAccessKey := "cd"
   235  	backendOpt := storage.BackendOptions{
   236  		S3: storage.S3BackendOptions{
   237  			AccessKey:       accessKey,
   238  			SecretAccessKey: secretAccessKey,
   239  		},
   240  	}
   241  	backend, err := storage.ParseBackend("s3://bucket/prefix/", &backendOpt)
   242  	c.Assert(err, IsNil)
   243  	opts := &storage.ExternalStorageOptions{
   244  		SendCredentials: true,
   245  		SkipCheckPath:   true,
   246  	}
   247  	_, err = storage.New(r.ctx, backend, opts)
   248  	c.Assert(err, IsNil)
   249  	access_key := backend.GetS3().AccessKey
   250  	c.Assert(access_key, Equals, "ab")
   251  	secret_access_key := backend.GetS3().SecretAccessKey
   252  	c.Assert(secret_access_key, Equals, "cd")
   253  
   254  	backendOpt = storage.BackendOptions{
   255  		S3: storage.S3BackendOptions{
   256  			AccessKey:       accessKey,
   257  			SecretAccessKey: secretAccessKey,
   258  		},
   259  	}
   260  	backend, err = storage.ParseBackend("s3://bucket/prefix/", &backendOpt)
   261  	c.Assert(err, IsNil)
   262  	opts = &storage.ExternalStorageOptions{
   263  		SendCredentials: false,
   264  		SkipCheckPath:   true,
   265  	}
   266  	_, err = storage.New(r.ctx, backend, opts)
   267  	c.Assert(err, IsNil)
   268  	access_key = backend.GetS3().AccessKey
   269  	c.Assert(access_key, Equals, "")
   270  	secret_access_key = backend.GetS3().SecretAccessKey
   271  	c.Assert(secret_access_key, Equals, "")
   272  }