github.com/pingcap/br@v5.3.0-alpha.0.20220125034240-ec59c7b6ce30+incompatible/pkg/storage/s3_test.go (about)

     1  // Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
     2  
     3  package storage_test
     4  
     5  import (
     6  	"bufio"
     7  	"bytes"
     8  	"context"
     9  	"fmt"
    10  	"io"
    11  	"math/rand"
    12  	"os"
    13  
    14  	"github.com/aws/aws-sdk-go/aws"
    15  	"github.com/aws/aws-sdk-go/aws/awserr"
    16  	"github.com/aws/aws-sdk-go/service/s3"
    17  	"github.com/golang/mock/gomock"
    18  	. "github.com/pingcap/check"
    19  	"github.com/pingcap/errors"
    20  	backuppb "github.com/pingcap/kvproto/pkg/backup"
    21  
    22  	"github.com/pingcap/br/pkg/mock"
    23  	. "github.com/pingcap/br/pkg/storage"
    24  )
    25  
    26  type s3Suite struct {
    27  	controller *gomock.Controller
    28  	s3         *mock.MockS3API
    29  	storage    *S3Storage
    30  }
    31  
    32  type s3SuiteCustom struct{}
    33  
    34  var (
    35  	_ = Suite(&s3Suite{})
    36  	_ = Suite(&s3SuiteCustom{})
    37  )
    38  
    39  // FIXME: Cannot use the real SetUpTest/TearDownTest to set up the mock
    40  // otherwise the mock error will be ignored.
    41  
    42  func (s *s3Suite) setUpTest(c gomock.TestReporter) {
    43  	s.controller = gomock.NewController(c)
    44  	s.s3 = mock.NewMockS3API(s.controller)
    45  	s.storage = NewS3StorageForTest(
    46  		s.s3,
    47  		&backuppb.S3{
    48  			Region:       "us-west-2",
    49  			Bucket:       "bucket",
    50  			Prefix:       "prefix/",
    51  			Acl:          "acl",
    52  			Sse:          "sse",
    53  			StorageClass: "sc",
    54  		},
    55  	)
    56  }
    57  
    58  func (s *s3Suite) tearDownTest() {
    59  	s.controller.Finish()
    60  }
    61  
    62  func (s *s3Suite) TestApply(c *C) {
    63  	type testcase struct {
    64  		name      string
    65  		options   S3BackendOptions
    66  		errMsg    string
    67  		errReturn bool
    68  	}
    69  	testFn := func(test *testcase, c *C) {
    70  		c.Log(test.name)
    71  		_, err := ParseBackend("s3://bucket2/prefix/", &BackendOptions{S3: test.options})
    72  		if test.errReturn {
    73  			c.Assert(err, ErrorMatches, test.errMsg)
    74  		} else {
    75  			c.Assert(err, IsNil)
    76  		}
    77  	}
    78  	tests := []testcase{
    79  		{
    80  			name: "access_key not found",
    81  			options: S3BackendOptions{
    82  				Region:          "us-west-2",
    83  				SecretAccessKey: "cd",
    84  			},
    85  			errMsg:    "access_key not found.*",
    86  			errReturn: true,
    87  		},
    88  		{
    89  			name: "secret_access_key not found",
    90  			options: S3BackendOptions{
    91  				Region:    "us-west-2",
    92  				AccessKey: "ab",
    93  			},
    94  			errMsg:    "secret_access_key not found.*",
    95  			errReturn: true,
    96  		},
    97  		{
    98  			name: "scheme not found",
    99  			options: S3BackendOptions{
   100  				Endpoint: "12345",
   101  			},
   102  			errMsg:    "scheme not found in endpoint.*",
   103  			errReturn: true,
   104  		},
   105  		{
   106  			name: "host not found",
   107  			options: S3BackendOptions{
   108  				Endpoint: "http:12345",
   109  			},
   110  			errMsg:    "host not found in endpoint.*",
   111  			errReturn: true,
   112  		},
   113  		{
   114  			name: "invalid endpoint",
   115  			options: S3BackendOptions{
   116  				Endpoint: "!http:12345",
   117  			},
   118  			errMsg:    "parse (.*)!http:12345(.*): first path segment in URL cannot contain colon.*",
   119  			errReturn: true,
   120  		},
   121  	}
   122  	for i := range tests {
   123  		testFn(&tests[i], c)
   124  	}
   125  }
   126  
   127  func (s *s3Suite) TestApplyUpdate(c *C) {
   128  	type testcase struct {
   129  		name    string
   130  		options S3BackendOptions
   131  		setEnv  bool
   132  		s3      *backuppb.S3
   133  	}
   134  	testFn := func(test *testcase, c *C) {
   135  		c.Log(test.name)
   136  		if test.setEnv {
   137  			os.Setenv("AWS_ACCESS_KEY_ID", "ab")
   138  			os.Setenv("AWS_SECRET_ACCESS_KEY", "cd")
   139  		}
   140  		u, err := ParseBackend("s3://bucket/prefix/", &BackendOptions{S3: test.options})
   141  		s3 := u.GetS3()
   142  		c.Assert(err, IsNil)
   143  		c.Assert(s3, DeepEquals, test.s3)
   144  	}
   145  
   146  	tests := []testcase{
   147  		{
   148  			name: "no region and no endpoint",
   149  			options: S3BackendOptions{
   150  				Region:   "",
   151  				Endpoint: "",
   152  			},
   153  			s3: &backuppb.S3{
   154  				Region: "us-east-1",
   155  				Bucket: "bucket",
   156  				Prefix: "prefix",
   157  			},
   158  		},
   159  		{
   160  			name: "no endpoint",
   161  			options: S3BackendOptions{
   162  				Region: "us-west-2",
   163  			},
   164  			s3: &backuppb.S3{
   165  				Region: "us-west-2",
   166  				Bucket: "bucket",
   167  				Prefix: "prefix",
   168  			},
   169  		},
   170  		{
   171  			name: "https endpoint",
   172  			options: S3BackendOptions{
   173  				Endpoint: "https://s3.us-west-2",
   174  			},
   175  			s3: &backuppb.S3{
   176  				Region:   "us-east-1",
   177  				Endpoint: "https://s3.us-west-2",
   178  				Bucket:   "bucket",
   179  				Prefix:   "prefix",
   180  			},
   181  		},
   182  		{
   183  			name: "http endpoint",
   184  			options: S3BackendOptions{
   185  				Endpoint: "http://s3.us-west-2",
   186  			},
   187  			s3: &backuppb.S3{
   188  				Region:   "us-east-1",
   189  				Endpoint: "http://s3.us-west-2",
   190  				Bucket:   "bucket",
   191  				Prefix:   "prefix",
   192  			},
   193  		},
   194  		{
   195  			name: "ceph provider",
   196  			options: S3BackendOptions{
   197  				Region:         "us-west-2",
   198  				ForcePathStyle: true,
   199  				Provider:       "ceph",
   200  			},
   201  			s3: &backuppb.S3{
   202  				Region:         "us-west-2",
   203  				ForcePathStyle: true,
   204  				Bucket:         "bucket",
   205  				Prefix:         "prefix",
   206  			},
   207  		},
   208  		{
   209  			name: "ali provider",
   210  			options: S3BackendOptions{
   211  				Region:         "us-west-2",
   212  				ForcePathStyle: true,
   213  				Provider:       "alibaba",
   214  			},
   215  			s3: &backuppb.S3{
   216  				Region:         "us-west-2",
   217  				ForcePathStyle: false,
   218  				Bucket:         "bucket",
   219  				Prefix:         "prefix",
   220  			},
   221  		},
   222  		{
   223  			name: "netease provider",
   224  			options: S3BackendOptions{
   225  				Region:         "us-west-2",
   226  				ForcePathStyle: true,
   227  				Provider:       "netease",
   228  			},
   229  			s3: &backuppb.S3{
   230  				Region:         "us-west-2",
   231  				ForcePathStyle: false,
   232  				Bucket:         "bucket",
   233  				Prefix:         "prefix",
   234  			},
   235  		},
   236  		{
   237  			name: "useAccelerateEndpoint",
   238  			options: S3BackendOptions{
   239  				Region:                "us-west-2",
   240  				ForcePathStyle:        true,
   241  				UseAccelerateEndpoint: true,
   242  			},
   243  			s3: &backuppb.S3{
   244  				Region:         "us-west-2",
   245  				ForcePathStyle: false,
   246  				Bucket:         "bucket",
   247  				Prefix:         "prefix",
   248  			},
   249  		},
   250  		{
   251  			name: "keys",
   252  			options: S3BackendOptions{
   253  				Region:          "us-west-2",
   254  				AccessKey:       "ab",
   255  				SecretAccessKey: "cd",
   256  			},
   257  			s3: &backuppb.S3{
   258  				Region:          "us-west-2",
   259  				AccessKey:       "ab",
   260  				SecretAccessKey: "cd",
   261  				Bucket:          "bucket",
   262  				Prefix:          "prefix",
   263  			},
   264  			setEnv: true,
   265  		},
   266  	}
   267  	for i := range tests {
   268  		testFn(&tests[i], c)
   269  	}
   270  }
   271  
   272  func (s *s3Suite) TestS3Storage(c *C) {
   273  	type testcase struct {
   274  		name           string
   275  		s3             *backuppb.S3
   276  		errReturn      bool
   277  		hackPermission []Permission
   278  		sendCredential bool
   279  	}
   280  	testFn := func(test *testcase, c *C) {
   281  		c.Log(test.name)
   282  		ctx := aws.BackgroundContext()
   283  		s3 := &backuppb.StorageBackend{
   284  			Backend: &backuppb.StorageBackend_S3{
   285  				S3: test.s3,
   286  			},
   287  		}
   288  		_, err := New(ctx, s3, &ExternalStorageOptions{
   289  			SendCredentials:  test.sendCredential,
   290  			CheckPermissions: test.hackPermission,
   291  			SkipCheckPath:    true,
   292  		})
   293  		if test.errReturn {
   294  			c.Assert(err, NotNil)
   295  			return
   296  		}
   297  		c.Assert(err, IsNil)
   298  		if test.sendCredential {
   299  			c.Assert(len(test.s3.AccessKey), Greater, 0)
   300  		} else {
   301  			c.Assert(len(test.s3.AccessKey), Equals, 0)
   302  		}
   303  	}
   304  	tests := []testcase{
   305  		{
   306  			name: "no region and endpoint",
   307  			s3: &backuppb.S3{
   308  				Region:   "",
   309  				Endpoint: "",
   310  				Bucket:   "bucket",
   311  				Prefix:   "prefix",
   312  			},
   313  			errReturn:      true,
   314  			hackPermission: []Permission{AccessBuckets},
   315  			sendCredential: true,
   316  		},
   317  		{
   318  			name: "no region",
   319  			s3: &backuppb.S3{
   320  				Region:   "",
   321  				Endpoint: "http://10.1.2.3",
   322  				Bucket:   "bucket",
   323  				Prefix:   "prefix",
   324  			},
   325  			errReturn:      true,
   326  			hackPermission: []Permission{AccessBuckets},
   327  			sendCredential: true,
   328  		},
   329  		{
   330  			name: "no endpoint",
   331  			s3: &backuppb.S3{
   332  				Region:   "us-west-2",
   333  				Endpoint: "",
   334  				Bucket:   "bucket",
   335  				Prefix:   "prefix",
   336  			},
   337  			errReturn:      true,
   338  			hackPermission: []Permission{AccessBuckets},
   339  			sendCredential: true,
   340  		},
   341  		{
   342  			name: "no region",
   343  			s3: &backuppb.S3{
   344  				Region:   "",
   345  				Endpoint: "http://10.1.2.3",
   346  				Bucket:   "bucket",
   347  				Prefix:   "prefix",
   348  			},
   349  			errReturn:      false,
   350  			sendCredential: true,
   351  		},
   352  		{
   353  			name: "normal region",
   354  			s3: &backuppb.S3{
   355  				Region:   "us-west-2",
   356  				Endpoint: "",
   357  				Bucket:   "bucket",
   358  				Prefix:   "prefix",
   359  			},
   360  			errReturn:      false,
   361  			sendCredential: true,
   362  		},
   363  		{
   364  			name: "keys configured explicitly",
   365  			s3: &backuppb.S3{
   366  				Region:          "us-west-2",
   367  				AccessKey:       "ab",
   368  				SecretAccessKey: "cd",
   369  				Bucket:          "bucket",
   370  				Prefix:          "prefix",
   371  			},
   372  			errReturn:      false,
   373  			sendCredential: true,
   374  		},
   375  		{
   376  			name: "no access key",
   377  			s3: &backuppb.S3{
   378  				Region:          "us-west-2",
   379  				SecretAccessKey: "cd",
   380  				Bucket:          "bucket",
   381  				Prefix:          "prefix",
   382  			},
   383  			errReturn:      false,
   384  			sendCredential: true,
   385  		},
   386  		{
   387  			name: "no secret access key",
   388  			s3: &backuppb.S3{
   389  				Region:    "us-west-2",
   390  				AccessKey: "ab",
   391  				Bucket:    "bucket",
   392  				Prefix:    "prefix",
   393  			},
   394  			errReturn:      false,
   395  			sendCredential: true,
   396  		},
   397  		{
   398  			name: "no secret access key",
   399  			s3: &backuppb.S3{
   400  				Region:    "us-west-2",
   401  				AccessKey: "ab",
   402  				Bucket:    "bucket",
   403  				Prefix:    "prefix",
   404  			},
   405  			errReturn:      false,
   406  			sendCredential: false,
   407  		},
   408  	}
   409  	for i := range tests {
   410  		testFn(&tests[i], c)
   411  	}
   412  }
   413  
   414  func (s *s3Suite) TestS3URI(c *C) {
   415  	backend, err := ParseBackend("s3://bucket/prefix/", nil)
   416  	c.Assert(err, IsNil)
   417  	storage, err := New(context.Background(), backend, &ExternalStorageOptions{SkipCheckPath: true})
   418  	c.Assert(err, IsNil)
   419  	c.Assert(storage.URI(), Equals, "s3://bucket/prefix/")
   420  }
   421  
   422  func (s *s3Suite) TestS3Range(c *C) {
   423  	contentRange := "bytes 0-9/443"
   424  	ri, err := ParseRangeInfo(&contentRange)
   425  	c.Assert(err, IsNil)
   426  	c.Assert(ri, Equals, RangeInfo{Start: 0, End: 9, Size: 443})
   427  
   428  	_, err = ParseRangeInfo(nil)
   429  	c.Assert(err, ErrorMatches, "ContentRange is empty.*")
   430  
   431  	badRange := "bytes "
   432  	_, err = ParseRangeInfo(&badRange)
   433  	c.Assert(err, ErrorMatches, "invalid content range: 'bytes '.*")
   434  }
   435  
   436  // TestWriteNoError ensures the WriteFile API issues a PutObject request and wait
   437  // until the object is available in the S3 bucket.
   438  func (s *s3Suite) TestWriteNoError(c *C) {
   439  	s.setUpTest(c)
   440  	defer s.tearDownTest()
   441  	ctx := aws.BackgroundContext()
   442  
   443  	putCall := s.s3.EXPECT().
   444  		PutObjectWithContext(ctx, gomock.Any()).
   445  		DoAndReturn(func(_ context.Context, input *s3.PutObjectInput) (*s3.PutObjectOutput, error) {
   446  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   447  			c.Assert(aws.StringValue(input.Key), Equals, "prefix/file")
   448  			c.Assert(aws.StringValue(input.ACL), Equals, "acl")
   449  			c.Assert(aws.StringValue(input.ServerSideEncryption), Equals, "sse")
   450  			c.Assert(aws.StringValue(input.StorageClass), Equals, "sc")
   451  			body, err := io.ReadAll(input.Body)
   452  			c.Assert(err, IsNil)
   453  			c.Assert(body, DeepEquals, []byte("test"))
   454  			return &s3.PutObjectOutput{}, nil
   455  		})
   456  	s.s3.EXPECT().
   457  		WaitUntilObjectExistsWithContext(ctx, gomock.Any()).
   458  		DoAndReturn(func(_ context.Context, input *s3.HeadObjectInput) error {
   459  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   460  			c.Assert(aws.StringValue(input.Key), Equals, "prefix/file")
   461  			return nil
   462  		}).
   463  		After(putCall)
   464  
   465  	err := s.storage.WriteFile(ctx, "file", []byte("test"))
   466  	c.Assert(err, IsNil)
   467  }
   468  
   469  // TestReadNoError ensures the ReadFile API issues a GetObject request and correctly
   470  // read the entire body.
   471  func (s *s3Suite) TestReadNoError(c *C) {
   472  	s.setUpTest(c)
   473  	defer s.tearDownTest()
   474  	ctx := aws.BackgroundContext()
   475  
   476  	s.s3.EXPECT().
   477  		GetObjectWithContext(ctx, gomock.Any()).
   478  		DoAndReturn(func(_ context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
   479  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   480  			c.Assert(aws.StringValue(input.Key), Equals, "prefix/file")
   481  			return &s3.GetObjectOutput{
   482  				Body: io.NopCloser(bytes.NewReader([]byte("test"))),
   483  			}, nil
   484  		})
   485  
   486  	content, err := s.storage.ReadFile(ctx, "file")
   487  	c.Assert(err, IsNil)
   488  	c.Assert(content, DeepEquals, []byte("test"))
   489  }
   490  
   491  // TestFileExistsNoError ensures the FileExists API issues a HeadObject request
   492  // and reports a file exists.
   493  func (s *s3Suite) TestFileExistsNoError(c *C) {
   494  	s.setUpTest(c)
   495  	defer s.tearDownTest()
   496  	ctx := aws.BackgroundContext()
   497  
   498  	s.s3.EXPECT().
   499  		HeadObjectWithContext(ctx, gomock.Any()).
   500  		DoAndReturn(func(_ context.Context, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
   501  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   502  			c.Assert(aws.StringValue(input.Key), Equals, "prefix/file")
   503  			return &s3.HeadObjectOutput{}, nil
   504  		})
   505  
   506  	exists, err := s.storage.FileExists(ctx, "file")
   507  	c.Assert(err, IsNil)
   508  	c.Assert(exists, IsTrue)
   509  }
   510  
   511  // TestFileExistsNoSuckKey ensures FileExists API reports file missing if S3's
   512  // HeadObject request replied NoSuchKey.
   513  func (s *s3Suite) TestFileExistsMissing(c *C) {
   514  	s.setUpTest(c)
   515  	defer s.tearDownTest()
   516  	ctx := aws.BackgroundContext()
   517  
   518  	s.s3.EXPECT().
   519  		HeadObjectWithContext(ctx, gomock.Any()).
   520  		Return(nil, awserr.New(s3.ErrCodeNoSuchKey, "no such key", nil))
   521  
   522  	exists, err := s.storage.FileExists(ctx, "file-missing")
   523  	c.Assert(err, IsNil)
   524  	c.Assert(exists, IsFalse)
   525  }
   526  
   527  // TestWriteError checks that a PutObject error is propagated.
   528  func (s *s3Suite) TestWriteError(c *C) {
   529  	s.setUpTest(c)
   530  	defer s.tearDownTest()
   531  	ctx := aws.BackgroundContext()
   532  
   533  	expectedErr := awserr.New(s3.ErrCodeNoSuchBucket, "no such bucket", nil)
   534  
   535  	s.s3.EXPECT().
   536  		PutObjectWithContext(ctx, gomock.Any()).
   537  		Return(nil, expectedErr)
   538  
   539  	err := s.storage.WriteFile(ctx, "file2", []byte("test"))
   540  	c.Assert(err, ErrorMatches, `\Q`+expectedErr.Error()+`\E`)
   541  }
   542  
   543  // TestWriteError checks that a GetObject error is propagated.
   544  func (s *s3Suite) TestReadError(c *C) {
   545  	s.setUpTest(c)
   546  	defer s.tearDownTest()
   547  	ctx := aws.BackgroundContext()
   548  
   549  	expectedErr := awserr.New(s3.ErrCodeNoSuchKey, "no such key", nil)
   550  
   551  	s.s3.EXPECT().
   552  		GetObjectWithContext(ctx, gomock.Any()).
   553  		Return(nil, expectedErr)
   554  
   555  	_, err := s.storage.ReadFile(ctx, "file-missing")
   556  
   557  	c.Assert(err, ErrorMatches, "failed to read s3 file, file info: "+
   558  		"input.bucket='bucket', input.key='prefix/file-missing': "+expectedErr.Error())
   559  }
   560  
   561  // TestFileExistsError checks that a HeadObject error is propagated.
   562  func (s *s3Suite) TestFileExistsError(c *C) {
   563  	s.setUpTest(c)
   564  	defer s.tearDownTest()
   565  	ctx := aws.BackgroundContext()
   566  
   567  	expectedErr := errors.New("just some unrelated error")
   568  
   569  	s.s3.EXPECT().
   570  		HeadObjectWithContext(ctx, gomock.Any()).
   571  		Return(nil, expectedErr)
   572  
   573  	_, err := s.storage.FileExists(ctx, "file3")
   574  	c.Assert(err, ErrorMatches, `\Q`+expectedErr.Error()+`\E`)
   575  }
   576  
   577  // TestOpenAsBufio checks that we can open a file for reading via bufio.
   578  func (s *s3Suite) TestOpenAsBufio(c *C) {
   579  	s.setUpTest(c)
   580  	defer s.tearDownTest()
   581  	ctx := aws.BackgroundContext()
   582  
   583  	s.s3.EXPECT().
   584  		GetObjectWithContext(ctx, gomock.Any()).
   585  		DoAndReturn(func(_ context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
   586  			c.Assert(aws.StringValue(input.Range), Equals, "bytes=0-")
   587  			return &s3.GetObjectOutput{
   588  				Body:         io.NopCloser(bytes.NewReader([]byte("plain text\ncontent"))),
   589  				ContentRange: aws.String("bytes 0-17/18"),
   590  			}, nil
   591  		})
   592  
   593  	reader, err := s.storage.Open(ctx, "plain-text-file")
   594  	c.Assert(err, IsNil)
   595  	defer c.Assert(reader.Close(), IsNil)
   596  	bufReader := bufio.NewReaderSize(reader, 5)
   597  	content, err := bufReader.ReadString('\n')
   598  	c.Assert(err, IsNil)
   599  	c.Assert(content, Equals, "plain text\n")
   600  	content, err = bufReader.ReadString('\n')
   601  	c.Assert(err, ErrorMatches, "EOF")
   602  	c.Assert(content, Equals, "content")
   603  }
   604  
   605  // alphabetReader is used in TestOpenReadSlowly. This Reader produces a single
   606  // upper case letter one Read() at a time.
   607  type alphabetReader struct{ character byte }
   608  
   609  func (r *alphabetReader) Read(buf []byte) (int, error) {
   610  	if r.character > 'Z' {
   611  		return 0, io.EOF
   612  	}
   613  	if len(buf) == 0 {
   614  		return 0, nil
   615  	}
   616  	buf[0] = r.character
   617  	r.character++
   618  	return 1, nil
   619  }
   620  
   621  func (r *alphabetReader) Close() error {
   622  	return nil
   623  }
   624  
   625  // TestOpenReadSlowly checks that we can open a file for reading, even if the
   626  // reader emits content one byte at a time.
   627  func (s *s3Suite) TestOpenReadSlowly(c *C) {
   628  	s.setUpTest(c)
   629  	defer s.tearDownTest()
   630  	ctx := aws.BackgroundContext()
   631  
   632  	s.s3.EXPECT().
   633  		GetObjectWithContext(ctx, gomock.Any()).
   634  		Return(&s3.GetObjectOutput{
   635  			Body:         &alphabetReader{character: 'A'},
   636  			ContentRange: aws.String("bytes 0-25/26"),
   637  		}, nil)
   638  
   639  	reader, err := s.storage.Open(ctx, "alphabets")
   640  	c.Assert(err, IsNil)
   641  	res, err := io.ReadAll(reader)
   642  	c.Assert(err, IsNil)
   643  	c.Assert(res, DeepEquals, []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
   644  }
   645  
   646  // TestOpenSeek checks that Seek is implemented correctly.
   647  func (s *s3Suite) TestOpenSeek(c *C) {
   648  	s.setUpTest(c)
   649  	defer s.tearDownTest()
   650  	ctx := aws.BackgroundContext()
   651  
   652  	someRandomBytes := make([]byte, 1000000)
   653  	rand.Read(someRandomBytes)
   654  	// ^ we just want some random bytes for testing, we don't care about its security.
   655  
   656  	s.expectedCalls(ctx, c, someRandomBytes, []int{0, 998000, 990100}, func(data []byte, offset int) io.ReadCloser {
   657  		return io.NopCloser(bytes.NewReader(data[offset:]))
   658  	})
   659  
   660  	reader, err := s.storage.Open(ctx, "random")
   661  	c.Assert(err, IsNil)
   662  	defer reader.Close()
   663  
   664  	// first do some simple read...
   665  	slice := make([]byte, 100)
   666  	n, err := io.ReadFull(reader, slice)
   667  	c.Assert(err, IsNil)
   668  	c.Assert(n, Equals, 100)
   669  	c.Assert(slice, DeepEquals, someRandomBytes[:100])
   670  
   671  	// a short seek will not result in a different GetObject request.
   672  	offset, err := reader.Seek(2000, io.SeekStart)
   673  	c.Assert(err, IsNil)
   674  	c.Assert(offset, Equals, int64(2000))
   675  	n, err = io.ReadFull(reader, slice)
   676  	c.Assert(err, IsNil)
   677  	c.Assert(n, Equals, 100)
   678  	c.Assert(slice, DeepEquals, someRandomBytes[2000:2100])
   679  
   680  	// a long seek will perform a new GetObject request
   681  	offset, err = reader.Seek(-2000, io.SeekEnd)
   682  	c.Assert(err, IsNil)
   683  	c.Assert(offset, Equals, int64(998000))
   684  	n, err = io.ReadFull(reader, slice)
   685  	c.Assert(err, IsNil)
   686  	c.Assert(n, Equals, 100)
   687  	c.Assert(slice, DeepEquals, someRandomBytes[998000:998100])
   688  
   689  	// jumping backward should be fine, but would perform a new GetObject request.
   690  	offset, err = reader.Seek(-8000, io.SeekCurrent)
   691  	c.Assert(err, IsNil)
   692  	c.Assert(offset, Equals, int64(990100))
   693  	n, err = io.ReadFull(reader, slice)
   694  	c.Assert(err, IsNil)
   695  	c.Assert(n, Equals, 100)
   696  	c.Assert(slice, DeepEquals, someRandomBytes[990100:990200])
   697  
   698  	// test seek to the file end or bigger positions
   699  	for _, p := range []int64{1000000, 1000001, 2000000} {
   700  		offset, err = reader.Seek(p, io.SeekStart)
   701  		c.Assert(offset, Equals, int64(1000000))
   702  		c.Assert(err, IsNil)
   703  		_, err := reader.Read(slice)
   704  		c.Assert(err, Equals, io.EOF)
   705  	}
   706  }
   707  
   708  type limitedBytesReader struct {
   709  	*bytes.Reader
   710  	offset int
   711  	limit  int
   712  }
   713  
   714  func (r *limitedBytesReader) Read(p []byte) (n int, err error) {
   715  	n, err = r.Reader.Read(p)
   716  	if err != nil {
   717  		return
   718  	}
   719  	if r.offset+n > r.limit {
   720  		return n, errors.New("read exceeded limit")
   721  	}
   722  	r.offset += n
   723  	return
   724  }
   725  
   726  func (s *s3Suite) expectedCalls(ctx context.Context, c *C, data []byte, startOffsets []int, newReader func(data []byte, offset int) io.ReadCloser) {
   727  	var lastCall *gomock.Call
   728  	for _, offset := range startOffsets {
   729  		thisOffset := offset
   730  		thisCall := s.s3.EXPECT().
   731  			GetObjectWithContext(ctx, gomock.Any()).
   732  			DoAndReturn(func(_ context.Context, input *s3.GetObjectInput) (*s3.GetObjectOutput, error) {
   733  				c.Assert(aws.StringValue(input.Range), Equals, fmt.Sprintf("bytes=%d-", thisOffset))
   734  				return &s3.GetObjectOutput{
   735  					Body:         newReader(data, thisOffset),
   736  					ContentRange: aws.String(fmt.Sprintf("bytes %d-%d/%d", thisOffset, len(data)-1, len(data))),
   737  				}, nil
   738  			})
   739  		if lastCall != nil {
   740  			thisCall = thisCall.After(lastCall)
   741  		}
   742  		lastCall = thisCall
   743  	}
   744  }
   745  
   746  // TestS3ReaderWithRetryEOF check the Read with retry and end with io.EOF.
   747  func (s *s3Suite) TestS3ReaderWithRetryEOF(c *C) {
   748  	s.setUpTest(c)
   749  	defer s.tearDownTest()
   750  	ctx := aws.BackgroundContext()
   751  
   752  	someRandomBytes := make([]byte, 100)
   753  	rand.Read(someRandomBytes) //nolint:gosec
   754  	// ^ we just want some random bytes for testing, we don't care about its security.
   755  
   756  	s.expectedCalls(ctx, c, someRandomBytes, []int{0, 20, 50, 75}, func(data []byte, offset int) io.ReadCloser {
   757  		return io.NopCloser(&limitedBytesReader{Reader: bytes.NewReader(data[offset:]), limit: 30})
   758  	})
   759  
   760  	reader, err := s.storage.Open(ctx, "random")
   761  	c.Assert(err, IsNil)
   762  	defer reader.Close()
   763  
   764  	var n int
   765  	slice := make([]byte, 30)
   766  	readAndCheck := func(cnt, offset int) {
   767  		n, err = io.ReadFull(reader, slice[:cnt])
   768  		c.Assert(err, IsNil)
   769  		c.Assert(n, Equals, cnt)
   770  		c.Assert(slice[:cnt], DeepEquals, someRandomBytes[offset:offset+cnt])
   771  	}
   772  
   773  	// first do some simple read...
   774  	readAndCheck(20, 0)
   775  
   776  	// two more small short read that is ok
   777  	readAndCheck(15, 20)
   778  	readAndCheck(15, 35)
   779  	readAndCheck(25, 50)
   780  	readAndCheck(20, 75)
   781  
   782  	// there only remains 10 bytes
   783  	n, err = reader.Read(slice)
   784  	c.Assert(err, IsNil)
   785  	c.Assert(n, Equals, 5)
   786  
   787  	_, err = reader.Read(slice)
   788  	c.Assert(err, Equals, io.EOF)
   789  }
   790  
   791  // TestS3ReaderWithRetryFailed check the Read with retry failed after maxRetryTimes.
   792  func (s *s3Suite) TestS3ReaderWithRetryFailed(c *C) {
   793  	s.setUpTest(c)
   794  	defer s.tearDownTest()
   795  	ctx := aws.BackgroundContext()
   796  
   797  	someRandomBytes := make([]byte, 100)
   798  	rand.Read(someRandomBytes) //nolint:gosec
   799  	// ^ we just want some random bytes for testing, we don't care about its security.
   800  
   801  	s.expectedCalls(ctx, c, someRandomBytes, []int{0, 20, 40, 60}, func(data []byte, offset int) io.ReadCloser {
   802  		return io.NopCloser(&limitedBytesReader{Reader: bytes.NewReader(data[offset:]), limit: 30})
   803  	})
   804  
   805  	reader, err := s.storage.Open(ctx, "random")
   806  	c.Assert(err, IsNil)
   807  	defer reader.Close()
   808  
   809  	var n int
   810  	slice := make([]byte, 20)
   811  	readAndCheck := func(cnt, offset int) {
   812  		n, err = io.ReadFull(reader, slice[:cnt])
   813  		c.Assert(err, IsNil)
   814  		c.Assert(n, Equals, cnt)
   815  		c.Assert(slice[:cnt], DeepEquals, someRandomBytes[offset:offset+cnt])
   816  	}
   817  
   818  	// we can retry 3 times, so read will succeed for 4 times
   819  	for i := 0; i < 4; i++ {
   820  		readAndCheck(20, i*20)
   821  	}
   822  
   823  	_, err = reader.Read(slice)
   824  	c.Assert(err, ErrorMatches, "read exceeded limit")
   825  }
   826  
   827  // TestWalkDir checks WalkDir retrieves all directory content under a prefix.
   828  func (s *s3Suite) TestWalkDir(c *C) {
   829  	s.setUpTest(c)
   830  	defer s.tearDownTest()
   831  	ctx := aws.BackgroundContext()
   832  
   833  	contents := []*s3.Object{
   834  		{
   835  			Key:  aws.String("prefix/sp/.gitignore"),
   836  			Size: aws.Int64(437),
   837  		},
   838  		{
   839  			Key:  aws.String("prefix/sp/01.jpg"),
   840  			Size: aws.Int64(27499),
   841  		},
   842  		{
   843  			Key:  aws.String("prefix/sp/1-f.png"),
   844  			Size: aws.Int64(32507),
   845  		},
   846  		{
   847  			Key:  aws.String("prefix/sp/10-f.png"),
   848  			Size: aws.Int64(549735),
   849  		},
   850  		{
   851  			Key:  aws.String("prefix/sp/10-t.jpg"),
   852  			Size: aws.Int64(44151),
   853  		},
   854  	}
   855  
   856  	// first call serve item #0, #1; second call #2, #3; third call #4.
   857  	firstCall := s.s3.EXPECT().
   858  		ListObjectsWithContext(ctx, gomock.Any()).
   859  		DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
   860  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   861  			c.Assert(aws.StringValue(input.Prefix), Equals, "prefix/sp/")
   862  			c.Assert(aws.StringValue(input.Marker), Equals, "")
   863  			c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2))
   864  			c.Assert(aws.StringValue(input.Delimiter), Equals, "")
   865  			return &s3.ListObjectsOutput{
   866  				IsTruncated: aws.Bool(true),
   867  				Contents:    contents[:2],
   868  			}, nil
   869  		})
   870  	secondCall := s.s3.EXPECT().
   871  		ListObjectsWithContext(ctx, gomock.Any()).
   872  		DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
   873  			c.Assert(aws.StringValue(input.Marker), Equals, aws.StringValue(contents[1].Key))
   874  			c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2))
   875  			return &s3.ListObjectsOutput{
   876  				IsTruncated: aws.Bool(true),
   877  				Contents:    contents[2:4],
   878  			}, nil
   879  		}).
   880  		After(firstCall)
   881  	thirdCall := s.s3.EXPECT().
   882  		ListObjectsWithContext(ctx, gomock.Any()).
   883  		DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
   884  			c.Assert(aws.StringValue(input.Marker), Equals, aws.StringValue(contents[3].Key))
   885  			c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2))
   886  			return &s3.ListObjectsOutput{
   887  				IsTruncated: aws.Bool(false),
   888  				Contents:    contents[4:],
   889  			}, nil
   890  		}).
   891  		After(secondCall)
   892  	fourthCall := s.s3.EXPECT().
   893  		ListObjectsWithContext(ctx, gomock.Any()).
   894  		DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
   895  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   896  			c.Assert(aws.StringValue(input.Prefix), Equals, "prefix/")
   897  			c.Assert(aws.StringValue(input.Marker), Equals, "")
   898  			c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(4))
   899  			c.Assert(aws.StringValue(input.Delimiter), Equals, "")
   900  			return &s3.ListObjectsOutput{
   901  				IsTruncated: aws.Bool(true),
   902  				Contents:    contents[:4],
   903  			}, nil
   904  		}).
   905  		After(thirdCall)
   906  	s.s3.EXPECT().
   907  		ListObjectsWithContext(ctx, gomock.Any()).
   908  		DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
   909  			c.Assert(aws.StringValue(input.Marker), Equals, aws.StringValue(contents[3].Key))
   910  			c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(4))
   911  			return &s3.ListObjectsOutput{
   912  				IsTruncated: aws.Bool(false),
   913  				Contents:    contents[4:],
   914  			}, nil
   915  		}).
   916  		After(fourthCall)
   917  
   918  	// Ensure we receive the items in order.
   919  	i := 0
   920  	err := s.storage.WalkDir(
   921  		ctx,
   922  		&WalkOption{SubDir: "sp", ListCount: 2},
   923  		func(path string, size int64) error {
   924  			comment := Commentf("index = %d", i)
   925  			c.Assert("prefix/"+path, Equals, *contents[i].Key, comment)
   926  			c.Assert(size, Equals, *contents[i].Size, comment)
   927  			i++
   928  			return nil
   929  		},
   930  	)
   931  	c.Assert(err, IsNil)
   932  	c.Assert(i, Equals, len(contents))
   933  
   934  	// test with empty subDir
   935  	i = 0
   936  	err = s.storage.WalkDir(
   937  		ctx,
   938  		&WalkOption{ListCount: 4},
   939  		func(path string, size int64) error {
   940  			comment := Commentf("index = %d", i)
   941  			c.Assert("prefix/"+path, Equals, *contents[i].Key, comment)
   942  			c.Assert(size, Equals, *contents[i].Size, comment)
   943  			i++
   944  			return nil
   945  		},
   946  	)
   947  	c.Assert(err, IsNil)
   948  	c.Assert(i, Equals, len(contents))
   949  }
   950  
   951  // TestWalkDirBucket checks WalkDir retrieves all directory content under a bucket.
   952  func (s *s3SuiteCustom) TestWalkDirWithEmptyPrefix(c *C) {
   953  	controller := gomock.NewController(c)
   954  	s3API := mock.NewMockS3API(controller)
   955  	storage := NewS3StorageForTest(
   956  		s3API,
   957  		&backuppb.S3{
   958  			Region:       "us-west-2",
   959  			Bucket:       "bucket",
   960  			Prefix:       "",
   961  			Acl:          "acl",
   962  			Sse:          "sse",
   963  			StorageClass: "sc",
   964  		},
   965  	)
   966  	defer controller.Finish()
   967  	ctx := aws.BackgroundContext()
   968  
   969  	contents := []*s3.Object{
   970  		{
   971  			Key:  aws.String("sp/.gitignore"),
   972  			Size: aws.Int64(437),
   973  		},
   974  		{
   975  			Key:  aws.String("prefix/sp/01.jpg"),
   976  			Size: aws.Int64(27499),
   977  		},
   978  	}
   979  	firstCall := s3API.EXPECT().
   980  		ListObjectsWithContext(ctx, gomock.Any()).
   981  		DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
   982  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   983  			c.Assert(aws.StringValue(input.Prefix), Equals, "")
   984  			c.Assert(aws.StringValue(input.Marker), Equals, "")
   985  			c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2))
   986  			c.Assert(aws.StringValue(input.Delimiter), Equals, "")
   987  			return &s3.ListObjectsOutput{
   988  				IsTruncated: aws.Bool(false),
   989  				Contents:    contents,
   990  			}, nil
   991  		})
   992  	s3API.EXPECT().
   993  		ListObjectsWithContext(ctx, gomock.Any()).
   994  		DoAndReturn(func(_ context.Context, input *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) {
   995  			c.Assert(aws.StringValue(input.Bucket), Equals, "bucket")
   996  			c.Assert(aws.StringValue(input.Prefix), Equals, "sp/")
   997  			c.Assert(aws.StringValue(input.Marker), Equals, "")
   998  			c.Assert(aws.Int64Value(input.MaxKeys), Equals, int64(2))
   999  			c.Assert(aws.StringValue(input.Delimiter), Equals, "")
  1000  			return &s3.ListObjectsOutput{
  1001  				IsTruncated: aws.Bool(false),
  1002  				Contents:    contents[:1],
  1003  			}, nil
  1004  		}).
  1005  		After(firstCall)
  1006  
  1007  	// Ensure we receive the items in order.
  1008  	i := 0
  1009  	err := storage.WalkDir(
  1010  		ctx,
  1011  		&WalkOption{SubDir: "", ListCount: 2},
  1012  		func(path string, size int64) error {
  1013  			comment := Commentf("index = %d", i)
  1014  			c.Assert(path, Equals, *contents[i].Key, comment)
  1015  			c.Assert(size, Equals, *contents[i].Size, comment)
  1016  			i++
  1017  			return nil
  1018  		},
  1019  	)
  1020  	c.Assert(err, IsNil)
  1021  	c.Assert(i, Equals, len(contents))
  1022  
  1023  	// test with non-empty sub-dir
  1024  	i = 0
  1025  	err = storage.WalkDir(
  1026  		ctx,
  1027  		&WalkOption{SubDir: "sp", ListCount: 2},
  1028  		func(path string, size int64) error {
  1029  			comment := Commentf("index = %d", i)
  1030  			c.Assert(path, Equals, *contents[i].Key, comment)
  1031  			c.Assert(size, Equals, *contents[i].Size, comment)
  1032  			i++
  1033  			return nil
  1034  		},
  1035  	)
  1036  	c.Assert(err, IsNil)
  1037  	c.Assert(i, Equals, 1)
  1038  }