github.com/SaurabhDubey-Groww/go-cloud@v0.0.0-20221124105541-b26c29285fd8/blob/drivertest/drivertest.go (about)

     1  // Copyright 2018 The Go Cloud Development Kit Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     https://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package drivertest provides a conformance test for implementations of
    16  // driver.
    17  package drivertest // import "gocloud.dev/blob/drivertest"
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/md5"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"io/ioutil"
    27  	"log"
    28  	"net/http"
    29  	"net/url"
    30  	"reflect"
    31  	"strconv"
    32  	"strings"
    33  	"sync"
    34  	"sync/atomic"
    35  	"testing"
    36  	"testing/iotest"
    37  	"time"
    38  
    39  	"github.com/google/go-cmp/cmp"
    40  	"github.com/google/go-cmp/cmp/cmpopts"
    41  	"gocloud.dev/blob"
    42  	"gocloud.dev/blob/driver"
    43  	"gocloud.dev/gcerrors"
    44  	"gocloud.dev/internal/escape"
    45  )
    46  
    47  // Harness descibes the functionality test harnesses must provide to run
    48  // conformance tests.
    49  type Harness interface {
    50  	// MakeDriver creates a driver.Bucket to test.
    51  	// Multiple calls to MakeDriver during a test run must refer to the
    52  	// same storage bucket; i.e., a blob created using one driver.Bucket must
    53  	// be readable by a subsequent driver.Bucket.
    54  	MakeDriver(ctx context.Context) (driver.Bucket, error)
    55  	// MakeDriverForNonexistentBucket creates a driver.Bucket for a nonexistent
    56  	// bucket. If that concept doesn't make sense for a driver, return (nil, nil).
    57  	MakeDriverForNonexistentBucket(ctx context.Context) (driver.Bucket, error)
    58  	// HTTPClient should return an unauthorized *http.Client, or nil.
    59  	// Required if the service supports SignedURL.
    60  	HTTPClient() *http.Client
    61  	// Close closes resources used by the harness.
    62  	Close()
    63  }
    64  
    65  // HarnessMaker describes functions that construct a harness for running tests.
    66  // It is called exactly once per test; Harness.Close() will be called when the test is complete.
    67  type HarnessMaker func(ctx context.Context, t *testing.T) (Harness, error)
    68  
    69  // AsTest represents a test of As functionality.
    70  // The conformance test:
    71  // 1. Calls BucketCheck.
    72  // 2. Creates a blob in a directory, using BeforeWrite as a WriterOption.
    73  // 3. Fetches the blob's attributes and calls AttributeCheck.
    74  // 4. Creates a Reader for the blob using BeforeReader as a ReaderOption,
    75  //
    76  //	and calls ReaderCheck with the resulting Reader.
    77  //
    78  // 5. Calls List using BeforeList as a ListOption, with Delimiter set so
    79  //
    80  //	that only the directory is returned, and calls ListObjectCheck
    81  //	on the single directory list entry returned.
    82  //
    83  // 6. Calls List using BeforeList as a ListOption, and calls ListObjectCheck
    84  //
    85  //	on the single blob entry returned.
    86  //
    87  // 7. Tries to read a non-existent blob, and calls ErrorCheck with the error.
    88  // 8. Makes a copy of the blob, using BeforeCopy as a CopyOption.
    89  // 9. Calls SignedURL using BeforeSign as a SignedURLOption for each supported
    90  //
    91  //	signing method (i.e. GET, PUT and DELETE).
    92  //
    93  // For example, an AsTest might set a driver-specific field to a custom
    94  // value in BeforeWrite, and then verify the custom value was returned in
    95  // AttributesCheck and/or ReaderCheck.
    96  type AsTest interface {
    97  	// Name should return a descriptive name for the test.
    98  	Name() string
    99  	// BucketCheck will be called to allow verification of Bucket.As.
   100  	BucketCheck(b *blob.Bucket) error
   101  	// ErrorCheck will be called to allow verification of Bucket.ErrorAs.
   102  	ErrorCheck(b *blob.Bucket, err error) error
   103  	// BeforeRead will be passed directly to ReaderOptions as part of reading
   104  	// a test blob.
   105  	BeforeRead(as func(interface{}) bool) error
   106  	// BeforeWrite will be passed directly to WriterOptions as part of creating
   107  	// a test blob.
   108  	BeforeWrite(as func(interface{}) bool) error
   109  	// BeforeCopy will be passed directly to CopyOptions as part of copying
   110  	// the test blob.
   111  	BeforeCopy(as func(interface{}) bool) error
   112  	// BeforeList will be passed directly to ListOptions as part of listing the
   113  	// test blob.
   114  	BeforeList(as func(interface{}) bool) error
   115  	// BeforeSign will be passed directly to SignedURLOptions as part of
   116  	// generating a signed URL to the test blob.
   117  	BeforeSign(as func(interface{}) bool) error
   118  	// AttributesCheck will be called after fetching the test blob's attributes.
   119  	// It should call attrs.As and verify the results.
   120  	AttributesCheck(attrs *blob.Attributes) error
   121  	// ReaderCheck will be called after creating a blob.Reader.
   122  	// It should call r.As and verify the results.
   123  	ReaderCheck(r *blob.Reader) error
   124  	// ListObjectCheck will be called after calling List with the test object's
   125  	// name as the Prefix. It should call o.As and verify the results.
   126  	ListObjectCheck(o *blob.ListObject) error
   127  }
   128  
   129  type verifyAsFailsOnNil struct{}
   130  
   131  func (verifyAsFailsOnNil) Name() string {
   132  	return "verify As returns false when passed nil"
   133  }
   134  
   135  func (verifyAsFailsOnNil) BucketCheck(b *blob.Bucket) error {
   136  	if b.As(nil) {
   137  		return errors.New("want Bucket.As to return false when passed nil")
   138  	}
   139  	return nil
   140  }
   141  
   142  func (verifyAsFailsOnNil) ErrorCheck(b *blob.Bucket, err error) (ret error) {
   143  	defer func() {
   144  		if recover() == nil {
   145  			ret = errors.New("want ErrorAs to panic when passed nil")
   146  		}
   147  	}()
   148  	b.ErrorAs(err, nil)
   149  	return nil
   150  }
   151  
   152  func (verifyAsFailsOnNil) BeforeRead(as func(interface{}) bool) error {
   153  	if as(nil) {
   154  		return errors.New("want BeforeReader's As to return false when passed nil")
   155  	}
   156  	return nil
   157  }
   158  
   159  func (verifyAsFailsOnNil) BeforeWrite(as func(interface{}) bool) error {
   160  	if as(nil) {
   161  		return errors.New("want BeforeWrite's As to return false when passed nil")
   162  	}
   163  	return nil
   164  }
   165  
   166  func (verifyAsFailsOnNil) BeforeCopy(as func(interface{}) bool) error {
   167  	if as(nil) {
   168  		return errors.New("want BeforeCopy's As to return false when passed nil")
   169  	}
   170  	return nil
   171  }
   172  
   173  func (verifyAsFailsOnNil) BeforeList(as func(interface{}) bool) error {
   174  	if as(nil) {
   175  		return errors.New("want BeforeList's As to return false when passed nil")
   176  	}
   177  	return nil
   178  }
   179  
   180  func (verifyAsFailsOnNil) BeforeSign(as func(interface{}) bool) error {
   181  	if as(nil) {
   182  		return errors.New("want BeforeSign's As to return false when passed nil")
   183  	}
   184  	return nil
   185  }
   186  
   187  func (verifyAsFailsOnNil) AttributesCheck(attrs *blob.Attributes) error {
   188  	if attrs.As(nil) {
   189  		return errors.New("want Attributes.As to return false when passed nil")
   190  	}
   191  	return nil
   192  }
   193  
   194  func (verifyAsFailsOnNil) ReaderCheck(r *blob.Reader) error {
   195  	if r.As(nil) {
   196  		return errors.New("want Reader.As to return false when passed nil")
   197  	}
   198  	return nil
   199  }
   200  
   201  func (verifyAsFailsOnNil) ListObjectCheck(o *blob.ListObject) error {
   202  	if o.As(nil) {
   203  		return errors.New("want ListObject.As to return false when passed nil")
   204  	}
   205  	return nil
   206  }
   207  
   208  // RunConformanceTests runs conformance tests for driver implementations of blob.
   209  func RunConformanceTests(t *testing.T, newHarness HarnessMaker, asTests []AsTest) {
   210  	t.Run("TestNonexistentBucket", func(t *testing.T) {
   211  		testNonexistentBucket(t, newHarness)
   212  	})
   213  	t.Run("TestList", func(t *testing.T) {
   214  		testList(t, newHarness)
   215  	})
   216  	t.Run("TestListWeirdKeys", func(t *testing.T) {
   217  		testListWeirdKeys(t, newHarness)
   218  	})
   219  	t.Run("TestListDelimiters", func(t *testing.T) {
   220  		testListDelimiters(t, newHarness)
   221  	})
   222  	t.Run("TestDirsWithCharactersBeforeDelimiter", func(t *testing.T) {
   223  		testDirsWithCharactersBeforeDelimiter(t, newHarness)
   224  	})
   225  	t.Run("TestRead", func(t *testing.T) {
   226  		testRead(t, newHarness)
   227  	})
   228  	t.Run("TestAttributes", func(t *testing.T) {
   229  		testAttributes(t, newHarness)
   230  	})
   231  	t.Run("TestWrite", func(t *testing.T) {
   232  		testWrite(t, newHarness)
   233  	})
   234  	t.Run("TestCanceledWrite", func(t *testing.T) {
   235  		testCanceledWrite(t, newHarness)
   236  	})
   237  	t.Run("TestConcurrentWriteAndRead", func(t *testing.T) {
   238  		testConcurrentWriteAndRead(t, newHarness)
   239  	})
   240  	t.Run("TestMetadata", func(t *testing.T) {
   241  		testMetadata(t, newHarness)
   242  	})
   243  	t.Run("TestMD5", func(t *testing.T) {
   244  		testMD5(t, newHarness)
   245  	})
   246  	t.Run("TestCopy", func(t *testing.T) {
   247  		testCopy(t, newHarness)
   248  	})
   249  	t.Run("TestDelete", func(t *testing.T) {
   250  		testDelete(t, newHarness)
   251  	})
   252  	t.Run("TestKeys", func(t *testing.T) {
   253  		testKeys(t, newHarness)
   254  	})
   255  	t.Run("TestSignedURL", func(t *testing.T) {
   256  		testSignedURL(t, newHarness)
   257  	})
   258  	asTests = append(asTests, verifyAsFailsOnNil{})
   259  	t.Run("TestAs", func(t *testing.T) {
   260  		for _, st := range asTests {
   261  			if st.Name() == "" {
   262  				t.Fatalf("AsTest.Name is required")
   263  			}
   264  			t.Run(st.Name(), func(t *testing.T) {
   265  				testAs(t, newHarness, st)
   266  			})
   267  		}
   268  	})
   269  }
   270  
   271  // RunBenchmarks runs benchmarks for driver implementations of blob.
   272  func RunBenchmarks(b *testing.B, bkt *blob.Bucket) {
   273  	b.Run("BenchmarkRead", func(b *testing.B) {
   274  		benchmarkRead(b, bkt)
   275  	})
   276  	b.Run("BenchmarkWriteReadDelete", func(b *testing.B) {
   277  		benchmarkWriteReadDelete(b, bkt)
   278  	})
   279  }
   280  
   281  // testNonexistentBucket tests the functionality of IsAccessible.
   282  func testNonexistentBucket(t *testing.T, newHarness HarnessMaker) {
   283  	ctx := context.Background()
   284  	h, err := newHarness(ctx, t)
   285  	if err != nil {
   286  		t.Fatal(err)
   287  	}
   288  	defer h.Close()
   289  
   290  	// Get a driver instance pointing to a nonexistent bucket.
   291  	{
   292  		drv, err := h.MakeDriverForNonexistentBucket(ctx)
   293  		if err != nil {
   294  			t.Fatal(err)
   295  		}
   296  		if drv == nil {
   297  			// No such thing as a "nonexistent bucket" for this driver.
   298  			t.Skip()
   299  		}
   300  		b := blob.NewBucket(drv)
   301  		defer b.Close()
   302  		exists, err := b.IsAccessible(ctx)
   303  		if err != nil {
   304  			t.Fatal(err)
   305  		}
   306  		if exists {
   307  			t.Error("got IsAccessible true for nonexistent bucket, want false")
   308  		}
   309  	}
   310  
   311  	// Verify that IsAccessible returns true for a real bucket.
   312  	{
   313  		drv, err := h.MakeDriver(ctx)
   314  		if err != nil {
   315  			t.Fatal(err)
   316  		}
   317  		b := blob.NewBucket(drv)
   318  		defer b.Close()
   319  		exists, err := b.IsAccessible(ctx)
   320  		if err != nil {
   321  			t.Fatal(err)
   322  		}
   323  		if !exists {
   324  			t.Error("got IsAccessible false for real bucket, want true")
   325  		}
   326  	}
   327  }
   328  
   329  // testList tests the functionality of List.
   330  func testList(t *testing.T, newHarness HarnessMaker) {
   331  	const keyPrefix = "blob-for-list"
   332  	content := []byte("hello")
   333  
   334  	keyForIndex := func(i int) string { return fmt.Sprintf("%s-%d", keyPrefix, i) }
   335  	gotIndices := func(t *testing.T, objs []*driver.ListObject) []int {
   336  		var got []int
   337  		for _, obj := range objs {
   338  			if !strings.HasPrefix(obj.Key, keyPrefix) {
   339  				t.Errorf("got name %q, expected it to have prefix %q", obj.Key, keyPrefix)
   340  				continue
   341  			}
   342  			i, err := strconv.Atoi(obj.Key[len(keyPrefix)+1:])
   343  			if err != nil {
   344  				t.Error(err)
   345  				continue
   346  			}
   347  			got = append(got, i)
   348  		}
   349  		return got
   350  	}
   351  
   352  	tests := []struct {
   353  		name      string
   354  		pageSize  int
   355  		prefix    string
   356  		wantPages [][]int
   357  		want      []int
   358  	}{
   359  		{
   360  			name:      "no objects",
   361  			prefix:    "no-objects-with-this-prefix",
   362  			wantPages: [][]int{nil},
   363  		},
   364  		{
   365  			name:      "exactly 1 object due to prefix",
   366  			prefix:    keyForIndex(1),
   367  			wantPages: [][]int{{1}},
   368  			want:      []int{1},
   369  		},
   370  		{
   371  			name:      "no pagination",
   372  			prefix:    keyPrefix,
   373  			wantPages: [][]int{{0, 1, 2}},
   374  			want:      []int{0, 1, 2},
   375  		},
   376  		{
   377  			name:      "by 1",
   378  			prefix:    keyPrefix,
   379  			pageSize:  1,
   380  			wantPages: [][]int{{0}, {1}, {2}},
   381  			want:      []int{0, 1, 2},
   382  		},
   383  		{
   384  			name:      "by 2",
   385  			prefix:    keyPrefix,
   386  			pageSize:  2,
   387  			wantPages: [][]int{{0, 1}, {2}},
   388  			want:      []int{0, 1, 2},
   389  		},
   390  		{
   391  			name:      "by 3",
   392  			prefix:    keyPrefix,
   393  			pageSize:  3,
   394  			wantPages: [][]int{{0, 1, 2}},
   395  			want:      []int{0, 1, 2},
   396  		},
   397  	}
   398  
   399  	ctx := context.Background()
   400  
   401  	// Creates blobs for sub-tests below.
   402  	// We only create the blobs once, for efficiency and because there's
   403  	// no guarantee that after we create them they will be immediately returned
   404  	// from List. The very first time the test is run against a Bucket, it may be
   405  	// flaky due to this race.
   406  	init := func(t *testing.T) (driver.Bucket, func()) {
   407  		h, err := newHarness(ctx, t)
   408  		if err != nil {
   409  			t.Fatal(err)
   410  		}
   411  		drv, err := h.MakeDriver(ctx)
   412  		if err != nil {
   413  			t.Fatal(err)
   414  		}
   415  		// See if the blobs are already there.
   416  		b := blob.NewBucket(drv)
   417  		iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
   418  		found := iterToSetOfKeys(ctx, t, iter)
   419  		for i := 0; i < 3; i++ {
   420  			key := keyForIndex(i)
   421  			if !found[key] {
   422  				if err := b.WriteAll(ctx, key, content, nil); err != nil {
   423  					b.Close()
   424  					t.Fatal(err)
   425  				}
   426  			}
   427  		}
   428  		return drv, func() { b.Close(); h.Close() }
   429  	}
   430  
   431  	for _, tc := range tests {
   432  		t.Run(tc.name, func(t *testing.T) {
   433  			drv, done := init(t)
   434  			defer done()
   435  
   436  			var gotPages [][]int
   437  			var got []int
   438  			var nextPageToken []byte
   439  			for {
   440  				page, err := drv.ListPaged(ctx, &driver.ListOptions{
   441  					PageSize:  tc.pageSize,
   442  					Prefix:    tc.prefix,
   443  					PageToken: nextPageToken,
   444  				})
   445  				if err != nil {
   446  					t.Fatal(err)
   447  				}
   448  				gotThisPage := gotIndices(t, page.Objects)
   449  				got = append(got, gotThisPage...)
   450  				gotPages = append(gotPages, gotThisPage)
   451  				if len(page.NextPageToken) == 0 {
   452  					break
   453  				}
   454  				nextPageToken = page.NextPageToken
   455  			}
   456  			if diff := cmp.Diff(gotPages, tc.wantPages); diff != "" {
   457  				t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", gotPages, tc.wantPages, diff)
   458  			}
   459  			if diff := cmp.Diff(got, tc.want); diff != "" {
   460  				t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, tc.want, diff)
   461  			}
   462  		})
   463  	}
   464  
   465  	// Verify pagination works when inserting in a retrieved page.
   466  	t.Run("PaginationConsistencyAfterInsert", func(t *testing.T) {
   467  		drv, done := init(t)
   468  		defer done()
   469  
   470  		// Fetch a page of 2 results: 0, 1.
   471  		page, err := drv.ListPaged(ctx, &driver.ListOptions{
   472  			PageSize: 2,
   473  			Prefix:   keyPrefix,
   474  		})
   475  		if err != nil {
   476  			t.Fatal(err)
   477  		}
   478  		got := gotIndices(t, page.Objects)
   479  		want := []int{0, 1}
   480  		if diff := cmp.Diff(got, want); diff != "" {
   481  			t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   482  		}
   483  
   484  		// Insert a key "0a" in the middle of the page we already retrieved.
   485  		b := blob.NewBucket(drv)
   486  		defer b.Close()
   487  		key := page.Objects[0].Key + "a"
   488  		if err := b.WriteAll(ctx, key, content, nil); err != nil {
   489  			t.Fatal(err)
   490  		}
   491  		defer func() {
   492  			_ = b.Delete(ctx, key)
   493  		}()
   494  
   495  		// Fetch the next page. It should not include 0, 0a, or 1, and it should
   496  		// include 2.
   497  		page, err = drv.ListPaged(ctx, &driver.ListOptions{
   498  			Prefix:    keyPrefix,
   499  			PageToken: page.NextPageToken,
   500  		})
   501  		if err != nil {
   502  			t.Fatal(err)
   503  		}
   504  		got = gotIndices(t, page.Objects)
   505  		want = []int{2}
   506  		if diff := cmp.Diff(got, want); diff != "" {
   507  			t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   508  		}
   509  	})
   510  
   511  	// Verify pagination works when deleting in a retrieved page.
   512  	t.Run("PaginationConsistencyAfterDelete", func(t *testing.T) {
   513  		drv, done := init(t)
   514  		defer done()
   515  
   516  		// Fetch a page of 2 results: 0, 1.
   517  		page, err := drv.ListPaged(ctx, &driver.ListOptions{
   518  			PageSize: 2,
   519  			Prefix:   keyPrefix,
   520  		})
   521  		if err != nil {
   522  			t.Fatal(err)
   523  		}
   524  		got := gotIndices(t, page.Objects)
   525  		want := []int{0, 1}
   526  		if diff := cmp.Diff(got, want); diff != "" {
   527  			t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   528  		}
   529  
   530  		// Delete key "1".
   531  		b := blob.NewBucket(drv)
   532  		defer b.Close()
   533  		key := page.Objects[1].Key
   534  		if err := b.Delete(ctx, key); err != nil {
   535  			t.Fatal(err)
   536  		}
   537  		defer func() {
   538  			_ = b.WriteAll(ctx, key, content, nil)
   539  		}()
   540  
   541  		// Fetch the next page. It should not include 0 or 1, and it should
   542  		// include 2.
   543  		page, err = drv.ListPaged(ctx, &driver.ListOptions{
   544  			Prefix:    keyPrefix,
   545  			PageToken: page.NextPageToken,
   546  		})
   547  		if err != nil {
   548  			t.Fatal(err)
   549  		}
   550  		got = gotIndices(t, page.Objects)
   551  		want = []int{2}
   552  		if diff := cmp.Diff(got, want); diff != "" {
   553  			t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   554  		}
   555  	})
   556  }
   557  
   558  // testListWeirdKeys tests the functionality of List on weird keys.
   559  func testListWeirdKeys(t *testing.T, newHarness HarnessMaker) {
   560  	const keyPrefix = "list-weirdkeys-"
   561  	content := []byte("hello")
   562  	ctx := context.Background()
   563  
   564  	// We're going to create a blob for each of the weird key strings, and
   565  	// then verify we can see them with List.
   566  	want := map[string]bool{}
   567  	for _, k := range escape.WeirdStrings {
   568  		want[keyPrefix+k] = true
   569  	}
   570  
   571  	// Creates blobs for sub-tests below.
   572  	// We only create the blobs once, for efficiency and because there's
   573  	// no guarantee that after we create them they will be immediately returned
   574  	// from List. The very first time the test is run against a Bucket, it may be
   575  	// flaky due to this race.
   576  	init := func(t *testing.T) (*blob.Bucket, func()) {
   577  		h, err := newHarness(ctx, t)
   578  		if err != nil {
   579  			t.Fatal(err)
   580  		}
   581  		drv, err := h.MakeDriver(ctx)
   582  		if err != nil {
   583  			t.Fatal(err)
   584  		}
   585  		// See if the blobs are already there.
   586  		b := blob.NewBucket(drv)
   587  		iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
   588  		found := iterToSetOfKeys(ctx, t, iter)
   589  		for _, k := range escape.WeirdStrings {
   590  			key := keyPrefix + k
   591  			if !found[key] {
   592  				if err := b.WriteAll(ctx, key, content, nil); err != nil {
   593  					b.Close()
   594  					t.Fatal(err)
   595  				}
   596  			}
   597  		}
   598  		return b, func() { b.Close(); h.Close() }
   599  	}
   600  
   601  	b, done := init(t)
   602  	defer done()
   603  
   604  	iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
   605  	got := iterToSetOfKeys(ctx, t, iter)
   606  
   607  	if diff := cmp.Diff(got, want); diff != "" {
   608  		t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   609  	}
   610  }
   611  
   612  // listResult is a recursive view of the hierarchy. It's used to verify List
   613  // using Delimiter.
   614  type listResult struct {
   615  	Key   string
   616  	IsDir bool
   617  	// If IsDir is true and recursion is enabled, the recursive listing of the directory.
   618  	Sub []listResult
   619  }
   620  
   621  // doList lists b using prefix and delim.
   622  // If recurse is true, it recurses into directories filling in listResult.Sub.
   623  func doList(ctx context.Context, b *blob.Bucket, prefix, delim string, recurse bool) ([]listResult, error) {
   624  	iter := b.List(&blob.ListOptions{
   625  		Prefix:    prefix,
   626  		Delimiter: delim,
   627  	})
   628  	var retval []listResult
   629  	for {
   630  		obj, err := iter.Next(ctx)
   631  		if err == io.EOF {
   632  			if obj != nil {
   633  				return nil, errors.New("obj is not nil on EOF")
   634  			}
   635  			break
   636  		}
   637  		if err != nil {
   638  			return nil, err
   639  		}
   640  		var sub []listResult
   641  		if obj.IsDir && recurse {
   642  			sub, err = doList(ctx, b, obj.Key, delim, true)
   643  			if err != nil {
   644  				return nil, err
   645  			}
   646  		}
   647  		retval = append(retval, listResult{
   648  			Key:   obj.Key,
   649  			IsDir: obj.IsDir,
   650  			Sub:   sub,
   651  		})
   652  	}
   653  	return retval, nil
   654  }
   655  
   656  // testListDelimiters tests the functionality of List using Delimiters.
   657  func testListDelimiters(t *testing.T, newHarness HarnessMaker) {
   658  	const keyPrefix = "blob-for-delimiters-"
   659  	content := []byte("hello")
   660  
   661  	// The set of files to use for these tests. The strings in each entry will
   662  	// be joined using delim, so the result is a directory structure like this
   663  	// (using / as delimiter):
   664  	// dir1/a.txt
   665  	// dir1/b.txt
   666  	// dir1/subdir/c.txt
   667  	// dir1/subdir/d.txt
   668  	// dir2/e.txt
   669  	// f.txt
   670  	keys := [][]string{
   671  		{"dir1", "a.txt"},
   672  		{"dir1", "b.txt"},
   673  		{"dir1", "subdir", "c.txt"},
   674  		{"dir1", "subdir", "d.txt"},
   675  		{"dir2", "e.txt"},
   676  		{"f.txt"},
   677  	}
   678  
   679  	// Test with several different delimiters.
   680  	tests := []struct {
   681  		name, delim string
   682  		// Expected result of doList with an empty delimiter.
   683  		// All keys should be listed at the top level, with no directories.
   684  		wantFlat []listResult
   685  		// Expected result of doList with delimiter and recurse = true.
   686  		// All keys should be listed, with keys in directories in the Sub field
   687  		// of their directory.
   688  		wantRecursive []listResult
   689  		// Expected result of repeatedly calling driver.ListPaged with delimiter
   690  		// and page size = 1.
   691  		wantPaged []listResult
   692  		// expected result of doList with delimiter and recurse = false
   693  		// after dir2/e.txt is deleted
   694  		// dir1/ and f.txt should be listed; dir2/ should no longer be present
   695  		// because there are no keys in it.
   696  		wantAfterDel []listResult
   697  	}{
   698  		{
   699  			name:  "fwdslash",
   700  			delim: "/",
   701  			wantFlat: []listResult{
   702  				{Key: keyPrefix + "/dir1/a.txt"},
   703  				{Key: keyPrefix + "/dir1/b.txt"},
   704  				{Key: keyPrefix + "/dir1/subdir/c.txt"},
   705  				{Key: keyPrefix + "/dir1/subdir/d.txt"},
   706  				{Key: keyPrefix + "/dir2/e.txt"},
   707  				{Key: keyPrefix + "/f.txt"},
   708  			},
   709  			wantRecursive: []listResult{
   710  				{
   711  					Key:   keyPrefix + "/dir1/",
   712  					IsDir: true,
   713  					Sub: []listResult{
   714  						{Key: keyPrefix + "/dir1/a.txt"},
   715  						{Key: keyPrefix + "/dir1/b.txt"},
   716  						{
   717  							Key:   keyPrefix + "/dir1/subdir/",
   718  							IsDir: true,
   719  							Sub: []listResult{
   720  								{Key: keyPrefix + "/dir1/subdir/c.txt"},
   721  								{Key: keyPrefix + "/dir1/subdir/d.txt"},
   722  							},
   723  						},
   724  					},
   725  				},
   726  				{
   727  					Key:   keyPrefix + "/dir2/",
   728  					IsDir: true,
   729  					Sub: []listResult{
   730  						{Key: keyPrefix + "/dir2/e.txt"},
   731  					},
   732  				},
   733  				{Key: keyPrefix + "/f.txt"},
   734  			},
   735  			wantPaged: []listResult{
   736  				{
   737  					Key:   keyPrefix + "/dir1/",
   738  					IsDir: true,
   739  				},
   740  				{
   741  					Key:   keyPrefix + "/dir2/",
   742  					IsDir: true,
   743  				},
   744  				{Key: keyPrefix + "/f.txt"},
   745  			},
   746  			wantAfterDel: []listResult{
   747  				{
   748  					Key:   keyPrefix + "/dir1/",
   749  					IsDir: true,
   750  				},
   751  				{Key: keyPrefix + "/f.txt"},
   752  			},
   753  		},
   754  		{
   755  			name:  "backslash",
   756  			delim: "\\",
   757  			wantFlat: []listResult{
   758  				{Key: keyPrefix + "\\dir1\\a.txt"},
   759  				{Key: keyPrefix + "\\dir1\\b.txt"},
   760  				{Key: keyPrefix + "\\dir1\\subdir\\c.txt"},
   761  				{Key: keyPrefix + "\\dir1\\subdir\\d.txt"},
   762  				{Key: keyPrefix + "\\dir2\\e.txt"},
   763  				{Key: keyPrefix + "\\f.txt"},
   764  			},
   765  			wantRecursive: []listResult{
   766  				{
   767  					Key:   keyPrefix + "\\dir1\\",
   768  					IsDir: true,
   769  					Sub: []listResult{
   770  						{Key: keyPrefix + "\\dir1\\a.txt"},
   771  						{Key: keyPrefix + "\\dir1\\b.txt"},
   772  						{
   773  							Key:   keyPrefix + "\\dir1\\subdir\\",
   774  							IsDir: true,
   775  							Sub: []listResult{
   776  								{Key: keyPrefix + "\\dir1\\subdir\\c.txt"},
   777  								{Key: keyPrefix + "\\dir1\\subdir\\d.txt"},
   778  							},
   779  						},
   780  					},
   781  				},
   782  				{
   783  					Key:   keyPrefix + "\\dir2\\",
   784  					IsDir: true,
   785  					Sub: []listResult{
   786  						{Key: keyPrefix + "\\dir2\\e.txt"},
   787  					},
   788  				},
   789  				{Key: keyPrefix + "\\f.txt"},
   790  			},
   791  			wantPaged: []listResult{
   792  				{
   793  					Key:   keyPrefix + "\\dir1\\",
   794  					IsDir: true,
   795  				},
   796  				{
   797  					Key:   keyPrefix + "\\dir2\\",
   798  					IsDir: true,
   799  				},
   800  				{Key: keyPrefix + "\\f.txt"},
   801  			},
   802  			wantAfterDel: []listResult{
   803  				{
   804  					Key:   keyPrefix + "\\dir1\\",
   805  					IsDir: true,
   806  				},
   807  				{Key: keyPrefix + "\\f.txt"},
   808  			},
   809  		},
   810  		{
   811  			name:  "abc",
   812  			delim: "abc",
   813  			wantFlat: []listResult{
   814  				{Key: keyPrefix + "abcdir1abca.txt"},
   815  				{Key: keyPrefix + "abcdir1abcb.txt"},
   816  				{Key: keyPrefix + "abcdir1abcsubdirabcc.txt"},
   817  				{Key: keyPrefix + "abcdir1abcsubdirabcd.txt"},
   818  				{Key: keyPrefix + "abcdir2abce.txt"},
   819  				{Key: keyPrefix + "abcf.txt"},
   820  			},
   821  			wantRecursive: []listResult{
   822  				{
   823  					Key:   keyPrefix + "abcdir1abc",
   824  					IsDir: true,
   825  					Sub: []listResult{
   826  						{Key: keyPrefix + "abcdir1abca.txt"},
   827  						{Key: keyPrefix + "abcdir1abcb.txt"},
   828  						{
   829  							Key:   keyPrefix + "abcdir1abcsubdirabc",
   830  							IsDir: true,
   831  							Sub: []listResult{
   832  								{Key: keyPrefix + "abcdir1abcsubdirabcc.txt"},
   833  								{Key: keyPrefix + "abcdir1abcsubdirabcd.txt"},
   834  							},
   835  						},
   836  					},
   837  				},
   838  				{
   839  					Key:   keyPrefix + "abcdir2abc",
   840  					IsDir: true,
   841  					Sub: []listResult{
   842  						{Key: keyPrefix + "abcdir2abce.txt"},
   843  					},
   844  				},
   845  				{Key: keyPrefix + "abcf.txt"},
   846  			},
   847  			wantPaged: []listResult{
   848  				{
   849  					Key:   keyPrefix + "abcdir1abc",
   850  					IsDir: true,
   851  				},
   852  				{
   853  					Key:   keyPrefix + "abcdir2abc",
   854  					IsDir: true,
   855  				},
   856  				{Key: keyPrefix + "abcf.txt"},
   857  			},
   858  			wantAfterDel: []listResult{
   859  				{
   860  					Key:   keyPrefix + "abcdir1abc",
   861  					IsDir: true,
   862  				},
   863  				{Key: keyPrefix + "abcf.txt"},
   864  			},
   865  		},
   866  	}
   867  
   868  	ctx := context.Background()
   869  
   870  	// Creates blobs for sub-tests below.
   871  	// We only create the blobs once, for efficiency and because there's
   872  	// no guarantee that after we create them they will be immediately returned
   873  	// from List. The very first time the test is run against a Bucket, it may be
   874  	// flaky due to this race.
   875  	init := func(t *testing.T, delim string) (driver.Bucket, *blob.Bucket, func()) {
   876  		h, err := newHarness(ctx, t)
   877  		if err != nil {
   878  			t.Fatal(err)
   879  		}
   880  		drv, err := h.MakeDriver(ctx)
   881  		if err != nil {
   882  			t.Fatal(err)
   883  		}
   884  		b := blob.NewBucket(drv)
   885  
   886  		// See if the blobs are already there.
   887  		prefix := keyPrefix + delim
   888  		iter := b.List(&blob.ListOptions{Prefix: prefix})
   889  		found := iterToSetOfKeys(ctx, t, iter)
   890  		for _, keyParts := range keys {
   891  			key := prefix + strings.Join(keyParts, delim)
   892  			if !found[key] {
   893  				if err := b.WriteAll(ctx, key, content, nil); err != nil {
   894  					b.Close()
   895  					t.Fatal(err)
   896  				}
   897  			}
   898  		}
   899  		return drv, b, func() { b.Close(); h.Close() }
   900  	}
   901  
   902  	for _, tc := range tests {
   903  		t.Run(tc.name, func(t *testing.T) {
   904  			drv, b, done := init(t, tc.delim)
   905  			defer done()
   906  
   907  			// Fetch without using delimiter.
   908  			got, err := doList(ctx, b, keyPrefix+tc.delim, "", true)
   909  			if err != nil {
   910  				t.Fatal(err)
   911  			}
   912  			if diff := cmp.Diff(got, tc.wantFlat); diff != "" {
   913  				t.Errorf("with no delimiter, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantFlat, diff)
   914  			}
   915  
   916  			// Fetch using delimiter, recursively.
   917  			got, err = doList(ctx, b, keyPrefix+tc.delim, tc.delim, true)
   918  			if err != nil {
   919  				t.Fatal(err)
   920  			}
   921  			if diff := cmp.Diff(got, tc.wantRecursive); diff != "" {
   922  				t.Errorf("with delimiter, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantRecursive, diff)
   923  			}
   924  
   925  			// Test pagination via driver.ListPaged.
   926  			var nextPageToken []byte
   927  			got = nil
   928  			for {
   929  				page, err := drv.ListPaged(ctx, &driver.ListOptions{
   930  					Prefix:    keyPrefix + tc.delim,
   931  					Delimiter: tc.delim,
   932  					PageSize:  1,
   933  					PageToken: nextPageToken,
   934  				})
   935  				if err != nil {
   936  					t.Fatal(err)
   937  				}
   938  				if len(page.Objects) > 1 {
   939  					t.Errorf("got %d objects on a page, want 0 or 1", len(page.Objects))
   940  				}
   941  				for _, obj := range page.Objects {
   942  					got = append(got, listResult{
   943  						Key:   obj.Key,
   944  						IsDir: obj.IsDir,
   945  					})
   946  				}
   947  				if len(page.NextPageToken) == 0 {
   948  					break
   949  				}
   950  				nextPageToken = page.NextPageToken
   951  			}
   952  			if diff := cmp.Diff(got, tc.wantPaged); diff != "" {
   953  				t.Errorf("paged got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantPaged, diff)
   954  			}
   955  
   956  			// Delete dir2/e.txt and verify that dir2/ is no longer returned.
   957  			key := strings.Join(append([]string{keyPrefix}, "dir2", "e.txt"), tc.delim)
   958  			if err := b.Delete(ctx, key); err != nil {
   959  				t.Fatal(err)
   960  			}
   961  			// Attempt to restore dir2/e.txt at the end of the test for the next run.
   962  			defer func() {
   963  				_ = b.WriteAll(ctx, key, content, nil)
   964  			}()
   965  
   966  			got, err = doList(ctx, b, keyPrefix+tc.delim, tc.delim, false)
   967  			if err != nil {
   968  				t.Fatal(err)
   969  			}
   970  			if diff := cmp.Diff(got, tc.wantAfterDel); diff != "" {
   971  				t.Errorf("after delete, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantAfterDel, diff)
   972  			}
   973  		})
   974  	}
   975  }
   976  
   977  // testDirsWithCharactersBeforeDelimiter tests a case where there's
   978  // a directory on a pagination boundary that ends with a character that's
   979  // less than the delimiter.
   980  // See https://github.com/google/go-cloud/issues/3089.
   981  func testDirsWithCharactersBeforeDelimiter(t *testing.T, newHarness HarnessMaker) {
   982  	const keyPrefix = "blob-for-dirs-with-chars-before-delimiter/"
   983  	content := []byte("hello")
   984  
   985  	// The set of files to use for these tests.
   986  	keys := []string{
   987  		"testFile1",
   988  		"t/t/t",
   989  		"t-/t.",
   990  		"dir1/testFile1dir1",
   991  		"dir2/testFile1dir2",
   992  		"d",
   993  	}
   994  
   995  	// Note that "t-/" is before "t/". The delimiter is included in the
   996  	// alphabetical ordering.
   997  	want := []string{"d", "dir1/", "dir2/", "t-/", "t/", "testFile1"}
   998  
   999  	// Create blobs.
  1000  	// We only create the blobs once, for efficiency and because there's
  1001  	// no guarantee that after we create them they will be immediately returned
  1002  	// from List. The very first time the test is run against a Bucket, it may be
  1003  	// flaky due to this race.
  1004  	ctx := context.Background()
  1005  	h, err := newHarness(ctx, t)
  1006  	if err != nil {
  1007  		t.Fatal(err)
  1008  	}
  1009  	drv, err := h.MakeDriver(ctx)
  1010  	if err != nil {
  1011  		t.Fatal(err)
  1012  	}
  1013  	b := blob.NewBucket(drv)
  1014  
  1015  	// See if the blobs are already there.
  1016  	iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
  1017  	found := iterToSetOfKeys(ctx, t, iter)
  1018  	for _, key := range keys {
  1019  		key = keyPrefix + key
  1020  		if !found[key] {
  1021  			if err := b.WriteAll(ctx, key, content, nil); err != nil {
  1022  				b.Close()
  1023  				t.Fatal(err)
  1024  			}
  1025  		}
  1026  	}
  1027  	defer b.Close()
  1028  	defer h.Close()
  1029  
  1030  	opts := &blob.ListOptions{
  1031  		Prefix:    keyPrefix,
  1032  		Delimiter: "/",
  1033  	}
  1034  	// All page sizes should return the same end result.
  1035  	for pageSize := 10; pageSize != 0; pageSize-- {
  1036  		var got []string
  1037  		objs, token, err := b.ListPage(ctx, blob.FirstPageToken, pageSize, opts)
  1038  		for {
  1039  			if err != nil {
  1040  				t.Fatal(err)
  1041  			}
  1042  			for _, o := range objs {
  1043  				key := strings.TrimPrefix(o.Key, keyPrefix)
  1044  				got = append(got, key)
  1045  			}
  1046  			if token == nil {
  1047  				break
  1048  			}
  1049  			objs, token, err = b.ListPage(ctx, token, pageSize, opts)
  1050  		}
  1051  		if !reflect.DeepEqual(want, got) {
  1052  			t.Fatalf("For page size %d, got \n%v\nwant\n%v", pageSize, got, want)
  1053  		}
  1054  	}
  1055  }
  1056  
  1057  func iterToSetOfKeys(ctx context.Context, t *testing.T, iter *blob.ListIterator) map[string]bool {
  1058  	retval := map[string]bool{}
  1059  	for {
  1060  		if item, err := iter.Next(ctx); err == io.EOF {
  1061  			break
  1062  		} else if err != nil {
  1063  			t.Fatal(err)
  1064  		} else {
  1065  			retval[item.Key] = true
  1066  		}
  1067  	}
  1068  	return retval
  1069  }
  1070  
  1071  // testRead tests the functionality of NewReader, NewRangeReader, and Reader.
  1072  func testRead(t *testing.T, newHarness HarnessMaker) {
  1073  	const key = "blob-for-reading"
  1074  	content := []byte("abcdefghijklmnopqurstuvwxyz")
  1075  	contentSize := int64(len(content))
  1076  
  1077  	tests := []struct {
  1078  		name           string
  1079  		key            string
  1080  		offset, length int64
  1081  		want           []byte
  1082  		wantReadSize   int64
  1083  		wantErr        bool
  1084  		// set to true to skip creation of the object for
  1085  		// tests where we expect an error without any actual
  1086  		// read.
  1087  		skipCreate bool
  1088  	}{
  1089  		{
  1090  			name:    "read of nonexistent key fails",
  1091  			key:     "key-does-not-exist",
  1092  			length:  -1,
  1093  			wantErr: true,
  1094  		},
  1095  		{
  1096  			name:       "negative offset fails",
  1097  			key:        key,
  1098  			offset:     -1,
  1099  			wantErr:    true,
  1100  			skipCreate: true,
  1101  		},
  1102  		{
  1103  			name: "length 0 read",
  1104  			key:  key,
  1105  			want: []byte{},
  1106  		},
  1107  		{
  1108  			name:         "read from positive offset to end",
  1109  			key:          key,
  1110  			offset:       10,
  1111  			length:       -1,
  1112  			want:         content[10:],
  1113  			wantReadSize: contentSize - 10,
  1114  		},
  1115  		{
  1116  			name:         "read a part in middle",
  1117  			key:          key,
  1118  			offset:       10,
  1119  			length:       5,
  1120  			want:         content[10:15],
  1121  			wantReadSize: 5,
  1122  		},
  1123  		{
  1124  			name:         "read in full",
  1125  			key:          key,
  1126  			length:       -1,
  1127  			want:         content,
  1128  			wantReadSize: contentSize,
  1129  		},
  1130  		{
  1131  			name:         "read in full with negative length not -1",
  1132  			key:          key,
  1133  			length:       -42,
  1134  			want:         content,
  1135  			wantReadSize: contentSize,
  1136  		},
  1137  	}
  1138  
  1139  	ctx := context.Background()
  1140  
  1141  	// Creates a blob for sub-tests below.
  1142  	init := func(t *testing.T, skipCreate bool) (*blob.Bucket, func()) {
  1143  		h, err := newHarness(ctx, t)
  1144  		if err != nil {
  1145  			t.Fatal(err)
  1146  		}
  1147  
  1148  		drv, err := h.MakeDriver(ctx)
  1149  		if err != nil {
  1150  			t.Fatal(err)
  1151  		}
  1152  		b := blob.NewBucket(drv)
  1153  		if skipCreate {
  1154  			return b, func() { b.Close(); h.Close() }
  1155  		}
  1156  		if err := b.WriteAll(ctx, key, content, nil); err != nil {
  1157  			b.Close()
  1158  			t.Fatal(err)
  1159  		}
  1160  		return b, func() {
  1161  			_ = b.Delete(ctx, key)
  1162  			b.Close()
  1163  			h.Close()
  1164  		}
  1165  	}
  1166  
  1167  	for _, tc := range tests {
  1168  		t.Run(tc.name, func(t *testing.T) {
  1169  			b, done := init(t, tc.skipCreate)
  1170  			defer done()
  1171  
  1172  			r, err := b.NewRangeReader(ctx, tc.key, tc.offset, tc.length, nil)
  1173  			if (err != nil) != tc.wantErr {
  1174  				t.Errorf("got err %v want error %v", err, tc.wantErr)
  1175  			}
  1176  			if err != nil {
  1177  				return
  1178  			}
  1179  			defer r.Close()
  1180  			// Make the buffer bigger than needed to make sure we actually only read
  1181  			// the expected number of bytes.
  1182  			got := make([]byte, tc.wantReadSize+10)
  1183  			n, err := r.Read(got)
  1184  			// EOF error is optional, see https://golang.org/pkg/io/#Reader.
  1185  			if err != nil && err != io.EOF {
  1186  				t.Errorf("unexpected error during read: %v", err)
  1187  			}
  1188  			if int64(n) != tc.wantReadSize {
  1189  				t.Errorf("got read length %d want %d", n, tc.wantReadSize)
  1190  			}
  1191  			if !cmp.Equal(got[:tc.wantReadSize], tc.want) {
  1192  				t.Errorf("got %q want %q", string(got), string(tc.want))
  1193  			}
  1194  			if r.Size() != contentSize {
  1195  				t.Errorf("got size %d want %d", r.Size(), contentSize)
  1196  			}
  1197  			if r.ModTime().IsZero() {
  1198  				t.Errorf("got zero mod time, want non-zero")
  1199  			}
  1200  			// For tests that successfully read, recreate the io.Reader and
  1201  			// test it with iotest.TestReader.
  1202  			r, err = b.NewRangeReader(ctx, tc.key, tc.offset, tc.length, nil)
  1203  			if err != nil {
  1204  				t.Errorf("failed to recreate Reader: %v", err)
  1205  				return
  1206  			}
  1207  			defer r.Close()
  1208  			if err = iotest.TestReader(r, tc.want); err != nil {
  1209  				t.Errorf("iotest.TestReader failed: %v", err)
  1210  				return
  1211  			}
  1212  		})
  1213  	}
  1214  }
  1215  
  1216  // testAttributes tests Attributes.
  1217  func testAttributes(t *testing.T, newHarness HarnessMaker) {
  1218  	const (
  1219  		dirKey             = "someDir"
  1220  		key                = dirKey + "/blob-for-attributes"
  1221  		contentType        = "text/plain"
  1222  		cacheControl       = "no-cache"
  1223  		contentDisposition = "inline"
  1224  		contentEncoding    = "identity"
  1225  		contentLanguage    = "en"
  1226  	)
  1227  	content := []byte("Hello World!")
  1228  
  1229  	ctx := context.Background()
  1230  
  1231  	// Creates a blob for sub-tests below.
  1232  	init := func(t *testing.T) (*blob.Bucket, func()) {
  1233  		h, err := newHarness(ctx, t)
  1234  		if err != nil {
  1235  			t.Fatal(err)
  1236  		}
  1237  		drv, err := h.MakeDriver(ctx)
  1238  		if err != nil {
  1239  			t.Fatal(err)
  1240  		}
  1241  		b := blob.NewBucket(drv)
  1242  		opts := &blob.WriterOptions{
  1243  			ContentType:        contentType,
  1244  			CacheControl:       cacheControl,
  1245  			ContentDisposition: contentDisposition,
  1246  			ContentEncoding:    contentEncoding,
  1247  			ContentLanguage:    contentLanguage,
  1248  		}
  1249  		if err := b.WriteAll(ctx, key, content, opts); err != nil {
  1250  			b.Close()
  1251  			t.Fatal(err)
  1252  		}
  1253  		return b, func() {
  1254  			_ = b.Delete(ctx, key)
  1255  			b.Close()
  1256  			h.Close()
  1257  		}
  1258  	}
  1259  
  1260  	b, done := init(t)
  1261  	defer done()
  1262  
  1263  	for _, badKey := range []string{
  1264  		"not-found",
  1265  		dirKey,
  1266  		dirKey + "/",
  1267  	} {
  1268  		_, err := b.Attributes(ctx, badKey)
  1269  		if err == nil {
  1270  			t.Errorf("got nil want error")
  1271  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  1272  			t.Errorf("got %v want NotFound error", err)
  1273  		} else if !strings.Contains(err.Error(), badKey) {
  1274  			t.Errorf("got %v want error to include missing key", err)
  1275  		}
  1276  	}
  1277  
  1278  	a, err := b.Attributes(ctx, key)
  1279  	if err != nil {
  1280  		t.Fatalf("failed Attributes: %v", err)
  1281  	}
  1282  	// Also make a Reader so we can verify the subset of attributes
  1283  	// that it exposes.
  1284  	r, err := b.NewReader(ctx, key, nil)
  1285  	if err != nil {
  1286  		t.Fatalf("failed Attributes: %v", err)
  1287  	}
  1288  	if a.CacheControl != cacheControl {
  1289  		t.Errorf("got CacheControl %q want %q", a.CacheControl, cacheControl)
  1290  	}
  1291  	if a.ContentDisposition != contentDisposition {
  1292  		t.Errorf("got ContentDisposition %q want %q", a.ContentDisposition, contentDisposition)
  1293  	}
  1294  	if a.ContentEncoding != contentEncoding {
  1295  		t.Errorf("got ContentEncoding %q want %q", a.ContentEncoding, contentEncoding)
  1296  	}
  1297  	if a.ContentLanguage != contentLanguage {
  1298  		t.Errorf("got ContentLanguage %q want %q", a.ContentLanguage, contentLanguage)
  1299  	}
  1300  	if a.ContentType != contentType {
  1301  		t.Errorf("got ContentType %q want %q", a.ContentType, contentType)
  1302  	}
  1303  	if r.ContentType() != contentType {
  1304  		t.Errorf("got Reader.ContentType() %q want %q", r.ContentType(), contentType)
  1305  	}
  1306  	if !a.CreateTime.IsZero() {
  1307  		if a.CreateTime.After(a.ModTime) {
  1308  			t.Errorf("CreateTime %v is after ModTime %v", a.CreateTime, a.ModTime)
  1309  		}
  1310  	}
  1311  	if a.ModTime.IsZero() {
  1312  		t.Errorf("ModTime not set")
  1313  	}
  1314  	if a.Size != int64(len(content)) {
  1315  		t.Errorf("got Size %d want %d", a.Size, len(content))
  1316  	}
  1317  	if r.Size() != int64(len(content)) {
  1318  		t.Errorf("got Reader.Size() %d want %d", r.Size(), len(content))
  1319  	}
  1320  	if a.ETag == "" {
  1321  		t.Error("ETag not set")
  1322  	}
  1323  	// Some basic syntax checks on ETag based on https://en.wikipedia.org/wiki/HTTP_ETag.
  1324  	// It should be of the form "xxxx" or W/"xxxx".
  1325  	if !strings.HasPrefix(a.ETag, "W/\"") && !strings.HasPrefix(a.ETag, "\"") {
  1326  		t.Errorf("ETag should start with W/\" or \" (got %s)", a.ETag)
  1327  	}
  1328  	if !strings.HasSuffix(a.ETag, "\"") {
  1329  		t.Errorf("ETag should end with \" (got %s)", a.ETag)
  1330  	}
  1331  	r.Close()
  1332  
  1333  	// Modify and re-fetch attributes.
  1334  	if err := b.WriteAll(ctx, key, content, nil); err != nil {
  1335  		t.Fatal(err)
  1336  	}
  1337  
  1338  	a2, err := b.Attributes(ctx, key)
  1339  	if err != nil {
  1340  		t.Errorf("failed Attributes#2: %v", err)
  1341  	}
  1342  	if a2.ModTime.Before(a.ModTime) {
  1343  		t.Errorf("ModTime %v is before %v", a2.ModTime, a.ModTime)
  1344  	}
  1345  }
  1346  
  1347  // loadTestData loads test data, inlined using go-bindata.
  1348  func loadTestData(t testing.TB, name string) []byte {
  1349  	data, err := Asset(name)
  1350  	if err != nil {
  1351  		t.Fatal(err)
  1352  	}
  1353  	return data
  1354  }
  1355  
  1356  // testWrite tests the functionality of NewWriter and Writer.
  1357  func testWrite(t *testing.T, newHarness HarnessMaker) {
  1358  	const key = "blob-for-reading"
  1359  	const existingContent = "existing content"
  1360  	smallText := loadTestData(t, "test-small.txt")
  1361  	mediumHTML := loadTestData(t, "test-medium.html")
  1362  	largeJpg := loadTestData(t, "test-large.jpg")
  1363  	helloWorld := []byte("hello world")
  1364  	helloWorldMD5 := md5.Sum(helloWorld)
  1365  
  1366  	tests := []struct {
  1367  		name            string
  1368  		key             string
  1369  		exists          bool
  1370  		content         []byte
  1371  		contentType     string
  1372  		contentMD5      []byte
  1373  		firstChunk      int
  1374  		wantContentType string
  1375  		wantErr         bool
  1376  		wantReadErr     bool // if wantErr is true, and Read after err should fail with something other than NotExists
  1377  	}{
  1378  		{
  1379  			name:        "write to empty key fails",
  1380  			wantErr:     true,
  1381  			wantReadErr: true, // read from empty key fails, but not always with NotExists
  1382  		},
  1383  		{
  1384  			name: "no write then close results in empty blob",
  1385  			key:  key,
  1386  		},
  1387  		{
  1388  			name: "no write then close results in empty blob, blob existed",
  1389  			key:  key,
  1390  		},
  1391  		{
  1392  			name:        "invalid ContentType fails",
  1393  			key:         key,
  1394  			contentType: "application/octet/stream",
  1395  			wantErr:     true,
  1396  		},
  1397  		{
  1398  			name:            "ContentType is discovered if not provided",
  1399  			key:             key,
  1400  			content:         mediumHTML,
  1401  			wantContentType: "text/html",
  1402  		},
  1403  		{
  1404  			name:            "write with explicit ContentType overrides discovery",
  1405  			key:             key,
  1406  			content:         mediumHTML,
  1407  			contentType:     "application/json",
  1408  			wantContentType: "application/json",
  1409  		},
  1410  		{
  1411  			name:       "Content md5 match",
  1412  			key:        key,
  1413  			content:    helloWorld,
  1414  			contentMD5: helloWorldMD5[:],
  1415  		},
  1416  		{
  1417  			name:       "Content md5 did not match",
  1418  			key:        key,
  1419  			content:    []byte("not hello world"),
  1420  			contentMD5: helloWorldMD5[:],
  1421  			wantErr:    true,
  1422  		},
  1423  		{
  1424  			name:       "Content md5 did not match, blob existed",
  1425  			exists:     true,
  1426  			key:        key,
  1427  			content:    []byte("not hello world"),
  1428  			contentMD5: helloWorldMD5[:],
  1429  			wantErr:    true,
  1430  		},
  1431  		{
  1432  			name:            "a small text file",
  1433  			key:             key,
  1434  			content:         smallText,
  1435  			wantContentType: "text/html",
  1436  		},
  1437  		{
  1438  			name:            "a large jpg file",
  1439  			key:             key,
  1440  			content:         largeJpg,
  1441  			wantContentType: "image/jpg",
  1442  		},
  1443  		{
  1444  			name:            "a large jpg file written in two chunks",
  1445  			key:             key,
  1446  			firstChunk:      10,
  1447  			content:         largeJpg,
  1448  			wantContentType: "image/jpg",
  1449  		},
  1450  		// TODO(issue #304): Fails for GCS.
  1451  		/*
  1452  			{
  1453  				name:            "ContentType is parsed and reformatted",
  1454  				key:             key,
  1455  				content:         []byte("foo"),
  1456  				contentType:     `FORM-DATA;name="foo"`,
  1457  				wantContentType: `form-data; name=foo`,
  1458  			},
  1459  		*/
  1460  	}
  1461  
  1462  	ctx := context.Background()
  1463  	for _, tc := range tests {
  1464  		t.Run(tc.name, func(t *testing.T) {
  1465  			h, err := newHarness(ctx, t)
  1466  			if err != nil {
  1467  				t.Fatal(err)
  1468  			}
  1469  			defer h.Close()
  1470  			drv, err := h.MakeDriver(ctx)
  1471  			if err != nil {
  1472  				t.Fatal(err)
  1473  			}
  1474  			b := blob.NewBucket(drv)
  1475  			defer b.Close()
  1476  
  1477  			// If the test wants the blob to already exist, write it.
  1478  			if tc.exists {
  1479  				if err := b.WriteAll(ctx, key, []byte(existingContent), nil); err != nil {
  1480  					t.Fatal(err)
  1481  				}
  1482  				defer func() {
  1483  					_ = b.Delete(ctx, key)
  1484  				}()
  1485  			}
  1486  
  1487  			// Write the content.
  1488  			opts := &blob.WriterOptions{
  1489  				ContentType: tc.contentType,
  1490  				ContentMD5:  tc.contentMD5[:],
  1491  			}
  1492  			w, err := b.NewWriter(ctx, tc.key, opts)
  1493  			if err == nil {
  1494  				if len(tc.content) > 0 {
  1495  					if tc.firstChunk == 0 {
  1496  						// Write the whole thing.
  1497  						_, err = w.Write(tc.content)
  1498  					} else {
  1499  						// Write it in 2 chunks.
  1500  						_, err = w.Write(tc.content[:tc.firstChunk])
  1501  						if err == nil {
  1502  							_, err = w.Write(tc.content[tc.firstChunk:])
  1503  						}
  1504  					}
  1505  				}
  1506  				if err == nil {
  1507  					err = w.Close()
  1508  				}
  1509  			}
  1510  			if (err != nil) != tc.wantErr {
  1511  				t.Errorf("NewWriter or Close got err %v want error %v", err, tc.wantErr)
  1512  			}
  1513  			if err != nil {
  1514  				// The write failed; verify that it had no effect.
  1515  				buf, err := b.ReadAll(ctx, tc.key)
  1516  				if tc.exists {
  1517  					// Verify the previous content is still there.
  1518  					if !bytes.Equal(buf, []byte(existingContent)) {
  1519  						t.Errorf("Write failed as expected, but content doesn't match expected previous content; got \n%s\n want \n%s", string(buf), existingContent)
  1520  					}
  1521  				} else {
  1522  					// Verify that the read fails with NotFound.
  1523  					if err == nil {
  1524  						t.Error("Write failed as expected, but Read after that didn't return an error")
  1525  					} else if !tc.wantReadErr && gcerrors.Code(err) != gcerrors.NotFound {
  1526  						t.Errorf("Write failed as expected, but Read after that didn't return the right error; got %v want NotFound", err)
  1527  					} else if !strings.Contains(err.Error(), tc.key) {
  1528  						t.Errorf("got %v want error to include missing key", err)
  1529  					}
  1530  				}
  1531  				return
  1532  			}
  1533  			defer func() { _ = b.Delete(ctx, tc.key) }()
  1534  
  1535  			// Read it back.
  1536  			buf, err := b.ReadAll(ctx, tc.key)
  1537  			if err != nil {
  1538  				t.Fatal(err)
  1539  			}
  1540  			if !bytes.Equal(buf, tc.content) {
  1541  				if len(buf) < 100 && len(tc.content) < 100 {
  1542  					t.Errorf("read didn't match write; got \n%s\n want \n%s", string(buf), string(tc.content))
  1543  				} else {
  1544  					t.Error("read didn't match write, content too large to display")
  1545  				}
  1546  			}
  1547  		})
  1548  	}
  1549  }
  1550  
  1551  // testCanceledWrite tests the functionality of canceling an in-progress write.
  1552  func testCanceledWrite(t *testing.T, newHarness HarnessMaker) {
  1553  	const key = "blob-for-canceled-write"
  1554  	content := []byte("hello world")
  1555  	cancelContent := []byte("going to cancel")
  1556  
  1557  	tests := []struct {
  1558  		description string
  1559  		contentType string
  1560  		exists      bool
  1561  	}{
  1562  		{
  1563  			// The write will be buffered in the portable type as part of
  1564  			// ContentType detection, so the first call to the Driver will be Close.
  1565  			description: "EmptyContentType",
  1566  		},
  1567  		{
  1568  			// The write will be sent to the Driver, which may do its own
  1569  			// internal buffering.
  1570  			description: "NonEmptyContentType",
  1571  			contentType: "text/plain",
  1572  		},
  1573  		{
  1574  			description: "BlobExists",
  1575  			exists:      true,
  1576  		},
  1577  		// TODO(issue #482): Find a way to test that a chunked upload that's interrupted
  1578  		// after some chunks are uploaded cancels correctly.
  1579  	}
  1580  
  1581  	ctx := context.Background()
  1582  	for _, test := range tests {
  1583  		t.Run(test.description, func(t *testing.T) {
  1584  			cancelCtx, cancel := context.WithCancel(ctx)
  1585  			h, err := newHarness(ctx, t)
  1586  			if err != nil {
  1587  				t.Fatal(err)
  1588  			}
  1589  			defer h.Close()
  1590  			drv, err := h.MakeDriver(ctx)
  1591  			if err != nil {
  1592  				t.Fatal(err)
  1593  			}
  1594  			b := blob.NewBucket(drv)
  1595  			defer b.Close()
  1596  
  1597  			opts := &blob.WriterOptions{
  1598  				ContentType: test.contentType,
  1599  			}
  1600  			// If the test wants the blob to already exist, write it.
  1601  			if test.exists {
  1602  				if err := b.WriteAll(ctx, key, content, opts); err != nil {
  1603  					t.Fatal(err)
  1604  				}
  1605  				defer func() {
  1606  					_ = b.Delete(ctx, key)
  1607  				}()
  1608  			}
  1609  
  1610  			// Create a writer with the context that we're going
  1611  			// to cancel.
  1612  			w, err := b.NewWriter(cancelCtx, key, opts)
  1613  			if err != nil {
  1614  				t.Fatal(err)
  1615  			}
  1616  			// Write the content.
  1617  			if _, err := w.Write(cancelContent); err != nil {
  1618  				t.Fatal(err)
  1619  			}
  1620  
  1621  			// Verify that the previous content (if any) is still readable,
  1622  			// because the write hasn't been Closed yet.
  1623  			got, err := b.ReadAll(ctx, key)
  1624  			if test.exists {
  1625  				// The previous content should still be there.
  1626  				if !cmp.Equal(got, content) {
  1627  					t.Errorf("during unclosed write, got %q want %q", string(got), string(content))
  1628  				}
  1629  			} else {
  1630  				// The read should fail; the write hasn't been Closed so the
  1631  				// blob shouldn't exist.
  1632  				if err == nil {
  1633  					t.Error("wanted read to return an error when write is not yet Closed")
  1634  				}
  1635  			}
  1636  
  1637  			// Cancel the context to abort the write.
  1638  			cancel()
  1639  			// Close should return some kind of canceled context error.
  1640  			// We can't verify the kind of error cleanly, so we just verify there's
  1641  			// an error.
  1642  			if err := w.Close(); err == nil {
  1643  				t.Errorf("got Close error %v want canceled ctx error", err)
  1644  			}
  1645  
  1646  			// Verify the write was truly aborted.
  1647  			got, err = b.ReadAll(ctx, key)
  1648  			if test.exists {
  1649  				// The previous content should still be there.
  1650  				if !cmp.Equal(got, content) {
  1651  					t.Errorf("after canceled write, got %q want %q", string(got), string(content))
  1652  				}
  1653  			} else {
  1654  				// The read should fail; the write was aborted so the
  1655  				// blob shouldn't exist.
  1656  				if err == nil {
  1657  					t.Error("wanted read to return an error when write was canceled")
  1658  				}
  1659  			}
  1660  		})
  1661  	}
  1662  }
  1663  
  1664  // testMetadata tests writing and reading the key/value metadata for a blob.
  1665  func testMetadata(t *testing.T, newHarness HarnessMaker) {
  1666  	const key = "blob-for-metadata"
  1667  	hello := []byte("hello")
  1668  
  1669  	weirdMetadata := map[string]string{}
  1670  	for _, k := range escape.WeirdStrings {
  1671  		weirdMetadata[k] = k
  1672  	}
  1673  
  1674  	tests := []struct {
  1675  		name        string
  1676  		metadata    map[string]string
  1677  		content     []byte
  1678  		contentType string
  1679  		want        map[string]string
  1680  		wantErr     bool
  1681  	}{
  1682  		{
  1683  			name:     "empty",
  1684  			content:  hello,
  1685  			metadata: map[string]string{},
  1686  			want:     nil,
  1687  		},
  1688  		{
  1689  			name:     "empty key fails",
  1690  			content:  hello,
  1691  			metadata: map[string]string{"": "empty key value"},
  1692  			wantErr:  true,
  1693  		},
  1694  		{
  1695  			name:     "duplicate case-insensitive key fails",
  1696  			content:  hello,
  1697  			metadata: map[string]string{"abc": "foo", "aBc": "bar"},
  1698  			wantErr:  true,
  1699  		},
  1700  		{
  1701  			name:    "valid metadata",
  1702  			content: hello,
  1703  			metadata: map[string]string{
  1704  				"key_a": "value-a",
  1705  				"kEy_B": "value-b",
  1706  				"key_c": "vAlUe-c",
  1707  			},
  1708  			want: map[string]string{
  1709  				"key_a": "value-a",
  1710  				"key_b": "value-b",
  1711  				"key_c": "vAlUe-c",
  1712  			},
  1713  		},
  1714  		{
  1715  			name:     "valid metadata with empty body",
  1716  			content:  nil,
  1717  			metadata: map[string]string{"foo": "bar"},
  1718  			want:     map[string]string{"foo": "bar"},
  1719  		},
  1720  		{
  1721  			name:        "valid metadata with content type",
  1722  			content:     hello,
  1723  			contentType: "text/plain",
  1724  			metadata:    map[string]string{"foo": "bar"},
  1725  			want:        map[string]string{"foo": "bar"},
  1726  		},
  1727  		{
  1728  			name:     "weird metadata keys",
  1729  			content:  hello,
  1730  			metadata: weirdMetadata,
  1731  			want:     weirdMetadata,
  1732  		},
  1733  		{
  1734  			name:     "non-utf8 metadata key",
  1735  			content:  hello,
  1736  			metadata: map[string]string{escape.NonUTF8String: "bar"},
  1737  			wantErr:  true,
  1738  		},
  1739  		{
  1740  			name:     "non-utf8 metadata value",
  1741  			content:  hello,
  1742  			metadata: map[string]string{"foo": escape.NonUTF8String},
  1743  			wantErr:  true,
  1744  		},
  1745  	}
  1746  
  1747  	ctx := context.Background()
  1748  	for _, tc := range tests {
  1749  		t.Run(tc.name, func(t *testing.T) {
  1750  			h, err := newHarness(ctx, t)
  1751  			if err != nil {
  1752  				t.Fatal(err)
  1753  			}
  1754  			defer h.Close()
  1755  
  1756  			drv, err := h.MakeDriver(ctx)
  1757  			if err != nil {
  1758  				t.Fatal(err)
  1759  			}
  1760  			b := blob.NewBucket(drv)
  1761  			defer b.Close()
  1762  			opts := &blob.WriterOptions{
  1763  				Metadata:    tc.metadata,
  1764  				ContentType: tc.contentType,
  1765  			}
  1766  			err = b.WriteAll(ctx, key, hello, opts)
  1767  			if (err != nil) != tc.wantErr {
  1768  				t.Errorf("got error %v want error %v", err, tc.wantErr)
  1769  			}
  1770  			if err != nil {
  1771  				return
  1772  			}
  1773  			defer func() {
  1774  				_ = b.Delete(ctx, key)
  1775  			}()
  1776  			a, err := b.Attributes(ctx, key)
  1777  			if err != nil {
  1778  				t.Fatal(err)
  1779  			}
  1780  			if diff := cmp.Diff(a.Metadata, tc.want); diff != "" {
  1781  				t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", a.Metadata, tc.want, diff)
  1782  			}
  1783  		})
  1784  	}
  1785  }
  1786  
  1787  // testMD5 tests reading MD5 hashes via List and Attributes.
  1788  func testMD5(t *testing.T, newHarness HarnessMaker) {
  1789  	ctx := context.Background()
  1790  
  1791  	// Define two blobs with different content; we'll write them and then verify
  1792  	// their returned MD5 hashes.
  1793  	const aKey, bKey = "blob-for-md5-aaa", "blob-for-md5-bbb"
  1794  	aContent, bContent := []byte("hello"), []byte("goodbye")
  1795  	aMD5 := md5.Sum(aContent)
  1796  	bMD5 := md5.Sum(bContent)
  1797  
  1798  	h, err := newHarness(ctx, t)
  1799  	if err != nil {
  1800  		t.Fatal(err)
  1801  	}
  1802  	defer h.Close()
  1803  	drv, err := h.MakeDriver(ctx)
  1804  	if err != nil {
  1805  		t.Fatal(err)
  1806  	}
  1807  	b := blob.NewBucket(drv)
  1808  	defer b.Close()
  1809  
  1810  	// Write the two blobs.
  1811  	if err := b.WriteAll(ctx, aKey, aContent, nil); err != nil {
  1812  		t.Fatal(err)
  1813  	}
  1814  	defer func() { _ = b.Delete(ctx, aKey) }()
  1815  	if err := b.WriteAll(ctx, bKey, bContent, nil); err != nil {
  1816  		t.Fatal(err)
  1817  	}
  1818  	defer func() { _ = b.Delete(ctx, bKey) }()
  1819  
  1820  	// Check the MD5 we get through Attributes. Note that it's always legal to
  1821  	// return a nil MD5.
  1822  	aAttr, err := b.Attributes(ctx, aKey)
  1823  	if err != nil {
  1824  		t.Fatal(err)
  1825  	}
  1826  	if aAttr.MD5 != nil && !bytes.Equal(aAttr.MD5, aMD5[:]) {
  1827  		t.Errorf("got MD5\n%x\nwant\n%x", aAttr.MD5, aMD5)
  1828  	}
  1829  
  1830  	bAttr, err := b.Attributes(ctx, bKey)
  1831  	if err != nil {
  1832  		t.Fatal(err)
  1833  	}
  1834  	if bAttr.MD5 != nil && !bytes.Equal(bAttr.MD5, bMD5[:]) {
  1835  		t.Errorf("got MD5\n%x\nwant\n%x", bAttr.MD5, bMD5)
  1836  	}
  1837  
  1838  	// Check the MD5 we get through List. Note that it's always legal to
  1839  	// return a nil MD5.
  1840  	iter := b.List(&blob.ListOptions{Prefix: "blob-for-md5-"})
  1841  	obj, err := iter.Next(ctx)
  1842  	if err != nil {
  1843  		t.Fatal(err)
  1844  	}
  1845  	if obj.Key != aKey {
  1846  		t.Errorf("got name %q want %q", obj.Key, aKey)
  1847  	}
  1848  	if obj.MD5 != nil && !bytes.Equal(obj.MD5, aMD5[:]) {
  1849  		t.Errorf("got MD5\n%x\nwant\n%x", obj.MD5, aMD5)
  1850  	}
  1851  	obj, err = iter.Next(ctx)
  1852  	if err != nil {
  1853  		t.Fatal(err)
  1854  	}
  1855  	if obj.Key != bKey {
  1856  		t.Errorf("got name %q want %q", obj.Key, bKey)
  1857  	}
  1858  	if obj.MD5 != nil && !bytes.Equal(obj.MD5, bMD5[:]) {
  1859  		t.Errorf("got MD5\n%x\nwant\n%x", obj.MD5, bMD5)
  1860  	}
  1861  }
  1862  
  1863  // testCopy tests the functionality of Copy.
  1864  func testCopy(t *testing.T, newHarness HarnessMaker) {
  1865  	const (
  1866  		srcKey             = "blob-for-copying-src"
  1867  		dstKey             = "blob-for-copying-dest"
  1868  		dstKeyExists       = "blob-for-copying-dest-exists"
  1869  		contentType        = "text/plain"
  1870  		cacheControl       = "no-cache"
  1871  		contentDisposition = "inline"
  1872  		contentEncoding    = "identity"
  1873  		contentLanguage    = "en"
  1874  	)
  1875  	var contents = []byte("Hello World")
  1876  
  1877  	ctx := context.Background()
  1878  	t.Run("NonExistentSourceFails", func(t *testing.T) {
  1879  		h, err := newHarness(ctx, t)
  1880  		if err != nil {
  1881  			t.Fatal(err)
  1882  		}
  1883  		defer h.Close()
  1884  		drv, err := h.MakeDriver(ctx)
  1885  		if err != nil {
  1886  			t.Fatal(err)
  1887  		}
  1888  		b := blob.NewBucket(drv)
  1889  		defer b.Close()
  1890  
  1891  		err = b.Copy(ctx, dstKey, "does-not-exist", nil)
  1892  		if err == nil {
  1893  			t.Errorf("got nil want error")
  1894  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  1895  			t.Errorf("got %v want NotFound error", err)
  1896  		} else if !strings.Contains(err.Error(), "does-not-exist") {
  1897  			t.Errorf("got %v want error to include missing key", err)
  1898  		}
  1899  	})
  1900  
  1901  	t.Run("Works", func(t *testing.T) {
  1902  		h, err := newHarness(ctx, t)
  1903  		if err != nil {
  1904  			t.Fatal(err)
  1905  		}
  1906  		defer h.Close()
  1907  		drv, err := h.MakeDriver(ctx)
  1908  		if err != nil {
  1909  			t.Fatal(err)
  1910  		}
  1911  		b := blob.NewBucket(drv)
  1912  		defer b.Close()
  1913  
  1914  		// Create the source blob.
  1915  		wopts := &blob.WriterOptions{
  1916  			ContentType:        contentType,
  1917  			CacheControl:       cacheControl,
  1918  			ContentDisposition: contentDisposition,
  1919  			ContentEncoding:    contentEncoding,
  1920  			ContentLanguage:    contentLanguage,
  1921  			Metadata:           map[string]string{"foo": "bar"},
  1922  		}
  1923  		if err := b.WriteAll(ctx, srcKey, contents, wopts); err != nil {
  1924  			t.Fatal(err)
  1925  		}
  1926  
  1927  		// Grab its attributes to compare to the copy's attributes later.
  1928  		wantAttr, err := b.Attributes(ctx, srcKey)
  1929  		if err != nil {
  1930  			t.Fatal(err)
  1931  		}
  1932  
  1933  		// Clear uncomparable fields.
  1934  		clearUncomparableFields := func(a *blob.Attributes) {
  1935  			a.CreateTime = time.Time{}
  1936  			a.ModTime = time.Time{}
  1937  			a.ETag = ""
  1938  		}
  1939  		clearUncomparableFields(wantAttr)
  1940  
  1941  		// Create another blob that we're going to overwrite.
  1942  		if err := b.WriteAll(ctx, dstKeyExists, []byte("clobber me"), nil); err != nil {
  1943  			t.Fatal(err)
  1944  		}
  1945  
  1946  		// Copy the source to the destination.
  1947  		if err := b.Copy(ctx, dstKey, srcKey, nil); err != nil {
  1948  			t.Errorf("got unexpected error copying blob: %v", err)
  1949  		}
  1950  		// Read the copy.
  1951  		got, err := b.ReadAll(ctx, dstKey)
  1952  		if err != nil {
  1953  			t.Fatal(err)
  1954  		}
  1955  		if !cmp.Equal(got, contents) {
  1956  			t.Errorf("got %q want %q", string(got), string(contents))
  1957  		}
  1958  		// Verify attributes of the copy.
  1959  		gotAttr, err := b.Attributes(ctx, dstKey)
  1960  		if err != nil {
  1961  			t.Fatal(err)
  1962  		}
  1963  		clearUncomparableFields(gotAttr)
  1964  		if diff := cmp.Diff(gotAttr, wantAttr, cmpopts.IgnoreUnexported(blob.Attributes{})); diff != "" {
  1965  			t.Errorf("got %v want %v diff %s", gotAttr, wantAttr, diff)
  1966  		}
  1967  
  1968  		// Copy the source to the second destination, where there's an existing blob.
  1969  		// It should be overwritten.
  1970  		if err := b.Copy(ctx, dstKeyExists, srcKey, nil); err != nil {
  1971  			t.Errorf("got unexpected error copying blob: %v", err)
  1972  		}
  1973  		// Read the copy.
  1974  		got, err = b.ReadAll(ctx, dstKeyExists)
  1975  		if err != nil {
  1976  			t.Fatal(err)
  1977  		}
  1978  		if !cmp.Equal(got, contents) {
  1979  			t.Errorf("got %q want %q", string(got), string(contents))
  1980  		}
  1981  		// Verify attributes of the copy.
  1982  		gotAttr, err = b.Attributes(ctx, dstKeyExists)
  1983  		if err != nil {
  1984  			t.Fatal(err)
  1985  		}
  1986  		clearUncomparableFields(gotAttr)
  1987  		if diff := cmp.Diff(gotAttr, wantAttr, cmpopts.IgnoreUnexported(blob.Attributes{})); diff != "" {
  1988  			t.Errorf("got %v want %v diff %s", gotAttr, wantAttr, diff)
  1989  		}
  1990  	})
  1991  }
  1992  
  1993  // testDelete tests the functionality of Delete.
  1994  func testDelete(t *testing.T, newHarness HarnessMaker) {
  1995  	const key = "blob-for-deleting"
  1996  
  1997  	ctx := context.Background()
  1998  	t.Run("NonExistentFails", func(t *testing.T) {
  1999  		h, err := newHarness(ctx, t)
  2000  		if err != nil {
  2001  			t.Fatal(err)
  2002  		}
  2003  		defer h.Close()
  2004  		drv, err := h.MakeDriver(ctx)
  2005  		if err != nil {
  2006  			t.Fatal(err)
  2007  		}
  2008  		b := blob.NewBucket(drv)
  2009  		defer b.Close()
  2010  
  2011  		err = b.Delete(ctx, "does-not-exist")
  2012  		if err == nil {
  2013  			t.Errorf("got nil want error")
  2014  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  2015  			t.Errorf("got %v want NotFound error", err)
  2016  		} else if !strings.Contains(err.Error(), "does-not-exist") {
  2017  			t.Errorf("got %v want error to include missing key", err)
  2018  		}
  2019  	})
  2020  
  2021  	t.Run("Works", func(t *testing.T) {
  2022  		h, err := newHarness(ctx, t)
  2023  		if err != nil {
  2024  			t.Fatal(err)
  2025  		}
  2026  		defer h.Close()
  2027  		drv, err := h.MakeDriver(ctx)
  2028  		if err != nil {
  2029  			t.Fatal(err)
  2030  		}
  2031  		b := blob.NewBucket(drv)
  2032  		defer b.Close()
  2033  
  2034  		// Create the blob.
  2035  		if err := b.WriteAll(ctx, key, []byte("Hello world"), nil); err != nil {
  2036  			t.Fatal(err)
  2037  		}
  2038  		// Delete it.
  2039  		if err := b.Delete(ctx, key); err != nil {
  2040  			t.Errorf("got unexpected error deleting blob: %v", err)
  2041  		}
  2042  		// Subsequent read fails with NotFound.
  2043  		_, err = b.NewReader(ctx, key, nil)
  2044  		if err == nil {
  2045  			t.Errorf("read after delete got nil, want error")
  2046  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  2047  			t.Errorf("read after delete want NotFound error, got %v", err)
  2048  		} else if !strings.Contains(err.Error(), key) {
  2049  			t.Errorf("got %v want error to include missing key", err)
  2050  		}
  2051  		// Subsequent delete also fails.
  2052  		err = b.Delete(ctx, key)
  2053  		if err == nil {
  2054  			t.Errorf("delete after delete got nil, want error")
  2055  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  2056  			t.Errorf("delete after delete got %v, want NotFound error", err)
  2057  		} else if !strings.Contains(err.Error(), key) {
  2058  			t.Errorf("got %v want error to include missing key", err)
  2059  		}
  2060  	})
  2061  }
  2062  
  2063  // testConcurrentWriteAndRead tests that concurrent writing to multiple blob
  2064  // keys and concurrent reading from multiple blob keys works.
  2065  func testConcurrentWriteAndRead(t *testing.T, newHarness HarnessMaker) {
  2066  	ctx := context.Background()
  2067  	h, err := newHarness(ctx, t)
  2068  	if err != nil {
  2069  		t.Fatal(err)
  2070  	}
  2071  	defer h.Close()
  2072  	drv, err := h.MakeDriver(ctx)
  2073  	if err != nil {
  2074  		t.Fatal(err)
  2075  	}
  2076  	b := blob.NewBucket(drv)
  2077  	defer b.Close()
  2078  
  2079  	// Prepare data. Each of the numKeys blobs has dataSize bytes, with each byte
  2080  	// set to the numeric key index. For example, the blob at "key0" consists of
  2081  	// all dataSize bytes set to 0.
  2082  	const numKeys = 20
  2083  	const dataSize = 4 * 1024
  2084  	keyData := make(map[int][]byte)
  2085  	for k := 0; k < numKeys; k++ {
  2086  		data := make([]byte, dataSize)
  2087  		for i := 0; i < dataSize; i++ {
  2088  			data[i] = byte(k)
  2089  		}
  2090  		keyData[k] = data
  2091  	}
  2092  
  2093  	blobName := func(k int) string {
  2094  		return fmt.Sprintf("key%d", k)
  2095  	}
  2096  
  2097  	var wg sync.WaitGroup
  2098  
  2099  	// Write all blobs concurrently.
  2100  	for k := 0; k < numKeys; k++ {
  2101  		wg.Add(1)
  2102  		go func(key int) {
  2103  			if err := b.WriteAll(ctx, blobName(key), keyData[key], nil); err != nil {
  2104  				t.Fatal(err)
  2105  			}
  2106  			wg.Done()
  2107  		}(k)
  2108  		defer b.Delete(ctx, blobName(k))
  2109  	}
  2110  	wg.Wait()
  2111  
  2112  	// Read all blobs concurrently and verify that they contain the expected data.
  2113  	for k := 0; k < numKeys; k++ {
  2114  		wg.Add(1)
  2115  		go func(key int) {
  2116  			buf, err := b.ReadAll(ctx, blobName(key))
  2117  			if err != nil {
  2118  				t.Fatal(err)
  2119  			}
  2120  			if !bytes.Equal(buf, keyData[key]) {
  2121  				t.Errorf("read data mismatch for key %d", key)
  2122  			}
  2123  			wg.Done()
  2124  		}(k)
  2125  	}
  2126  	wg.Wait()
  2127  }
  2128  
  2129  // testKeys tests a variety of weird keys.
  2130  func testKeys(t *testing.T, newHarness HarnessMaker) {
  2131  	const keyPrefix = "weird-keys"
  2132  	content := []byte("hello")
  2133  	ctx := context.Background()
  2134  
  2135  	t.Run("non-UTF8 fails", func(t *testing.T) {
  2136  		h, err := newHarness(ctx, t)
  2137  		if err != nil {
  2138  			t.Fatal(err)
  2139  		}
  2140  		defer h.Close()
  2141  		drv, err := h.MakeDriver(ctx)
  2142  		if err != nil {
  2143  			t.Fatal(err)
  2144  		}
  2145  		b := blob.NewBucket(drv)
  2146  		defer b.Close()
  2147  
  2148  		// Write the blob.
  2149  		key := keyPrefix + escape.NonUTF8String
  2150  		if err := b.WriteAll(ctx, key, content, nil); err == nil {
  2151  			t.Error("got nil error, expected error for using non-UTF8 string as key")
  2152  		}
  2153  	})
  2154  
  2155  	for description, key := range escape.WeirdStrings {
  2156  		t.Run(description, func(t *testing.T) {
  2157  			h, err := newHarness(ctx, t)
  2158  			if err != nil {
  2159  				t.Fatal(err)
  2160  			}
  2161  			defer h.Close()
  2162  			drv, err := h.MakeDriver(ctx)
  2163  			if err != nil {
  2164  				t.Fatal(err)
  2165  			}
  2166  			b := blob.NewBucket(drv)
  2167  			defer b.Close()
  2168  
  2169  			// Write the blob.
  2170  			key = keyPrefix + key
  2171  			if err := b.WriteAll(ctx, key, content, nil); err != nil {
  2172  				t.Fatal(err)
  2173  			}
  2174  
  2175  			defer func() {
  2176  				err := b.Delete(ctx, key)
  2177  				if err != nil {
  2178  					t.Error(err)
  2179  				}
  2180  			}()
  2181  
  2182  			// Verify read works.
  2183  			got, err := b.ReadAll(ctx, key)
  2184  			if err != nil {
  2185  				t.Fatal(err)
  2186  			}
  2187  			if !cmp.Equal(got, content) {
  2188  				t.Errorf("got %q want %q", string(got), string(content))
  2189  			}
  2190  
  2191  			// Verify Attributes works.
  2192  			_, err = b.Attributes(ctx, key)
  2193  			if err != nil {
  2194  				t.Error(err)
  2195  			}
  2196  
  2197  			// Verify SignedURL works.
  2198  			url, err := b.SignedURL(ctx, key, nil)
  2199  			if gcerrors.Code(err) != gcerrors.Unimplemented {
  2200  				if err != nil {
  2201  					t.Error(err)
  2202  				}
  2203  				client := h.HTTPClient()
  2204  				if client == nil {
  2205  					t.Error("can't verify SignedURL, Harness.HTTPClient() returned nil")
  2206  				}
  2207  				resp, err := client.Get(url)
  2208  				if err != nil {
  2209  					t.Fatal(err)
  2210  				}
  2211  				defer resp.Body.Close()
  2212  				if resp.StatusCode != 200 {
  2213  					t.Errorf("got status code %d, want 200", resp.StatusCode)
  2214  				}
  2215  				got, err := ioutil.ReadAll(resp.Body)
  2216  				if err != nil {
  2217  					t.Fatal(err)
  2218  				}
  2219  				if !bytes.Equal(got, content) {
  2220  					t.Errorf("got body %q, want %q", string(got), string(content))
  2221  				}
  2222  			}
  2223  		})
  2224  	}
  2225  }
  2226  
  2227  // testSignedURL tests the functionality of SignedURL.
  2228  func testSignedURL(t *testing.T, newHarness HarnessMaker) {
  2229  	const key = "blob-for-signing"
  2230  	const contents = "hello world"
  2231  
  2232  	ctx := context.Background()
  2233  
  2234  	h, err := newHarness(ctx, t)
  2235  	if err != nil {
  2236  		t.Fatal(err)
  2237  	}
  2238  	defer h.Close()
  2239  
  2240  	drv, err := h.MakeDriver(ctx)
  2241  	if err != nil {
  2242  		t.Fatal(err)
  2243  	}
  2244  	b := blob.NewBucket(drv)
  2245  	defer b.Close()
  2246  
  2247  	// Verify that a negative Expiry gives an error. This is enforced in the
  2248  	// portable type, so works regardless of driver support.
  2249  	_, err = b.SignedURL(ctx, key, &blob.SignedURLOptions{Expiry: -1 * time.Minute})
  2250  	if err == nil {
  2251  		t.Error("got nil error, expected error for negative SignedURLOptions.Expiry")
  2252  	}
  2253  
  2254  	// Generate a signed URL for GET.
  2255  	getURL, err := b.SignedURL(ctx, key, nil)
  2256  	if err != nil {
  2257  		if gcerrors.Code(err) == gcerrors.Unimplemented {
  2258  			t.Skipf("SignedURL not supported")
  2259  			return
  2260  		}
  2261  		t.Fatal(err)
  2262  	} else if getURL == "" {
  2263  		t.Fatal("got empty GET url")
  2264  	}
  2265  
  2266  	// Copy getURL, but remove all query params. This URL should not be allowed
  2267  	// to GET since the client is unauthorized.
  2268  	getURLNoParamsURL, err := url.Parse(getURL)
  2269  	if err != nil {
  2270  		t.Fatalf("failed to parse getURL: %v", err)
  2271  	}
  2272  	getURLNoParamsURL.RawQuery = ""
  2273  	getURLNoParams := getURLNoParamsURL.String()
  2274  	const (
  2275  		allowedContentType   = "text/plain"
  2276  		differentContentType = "application/octet-stream"
  2277  	)
  2278  
  2279  	// Generate a signed URL for PUT, with a non-empty ContentType.
  2280  	putURLWithContentType, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{
  2281  		Method:      http.MethodPut,
  2282  		ContentType: allowedContentType,
  2283  	})
  2284  	if gcerrors.Code(err) == gcerrors.Unimplemented {
  2285  		t.Log("PUT URLs with content type not supported, skipping")
  2286  	} else if err != nil {
  2287  		t.Fatal(err)
  2288  	} else if putURLWithContentType == "" {
  2289  		t.Fatal("got empty PUT url")
  2290  	}
  2291  
  2292  	// Generate a signed URL for PUT, with an empty ContentType that's enforced.
  2293  	putURLEnforcedAbsentContentType, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{
  2294  		Method:                   http.MethodPut,
  2295  		EnforceAbsentContentType: true,
  2296  	})
  2297  	if gcerrors.Code(err) == gcerrors.Unimplemented {
  2298  		t.Log("PUT URLs with enforced absent content type not supported, skipping")
  2299  	} else if err != nil {
  2300  		t.Fatal(err)
  2301  	} else if putURLEnforcedAbsentContentType == "" {
  2302  		t.Fatal("got empty PUT url")
  2303  	}
  2304  
  2305  	// Same as above, but not enforced.
  2306  	putURLWithoutContentType, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{
  2307  		Method: http.MethodPut,
  2308  	})
  2309  	if err != nil {
  2310  		t.Fatal(err)
  2311  	} else if putURLWithoutContentType == "" {
  2312  		t.Fatal("got empty PUT url")
  2313  	}
  2314  
  2315  	// Generate a signed URL for DELETE.
  2316  	deleteURL, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{Method: http.MethodDelete})
  2317  	if gcerrors.Code(err) == gcerrors.Unimplemented {
  2318  		t.Log("DELETE URLs not supported, skipping")
  2319  	} else if err != nil {
  2320  		t.Fatal(err)
  2321  	} else if deleteURL == "" {
  2322  		t.Fatal("got empty DELETE url")
  2323  	}
  2324  
  2325  	client := h.HTTPClient()
  2326  	if client == nil {
  2327  		t.Fatal("can't verify SignedURL, Harness.HTTPClient() returned nil")
  2328  	}
  2329  
  2330  	// PUT the blob. Try with all URLs, only putURL should work when given the
  2331  	// content type used in the signature.
  2332  	type signedURLTest struct {
  2333  		urlDescription string
  2334  		url            string
  2335  		contentType    string
  2336  		wantSuccess    bool
  2337  	}
  2338  	tests := []signedURLTest{
  2339  		{"getURL", getURL, "", false},
  2340  	}
  2341  	if deleteURL != "" {
  2342  		tests = append(tests, signedURLTest{"deleteURL", deleteURL, "", false})
  2343  	}
  2344  	if putURLWithContentType != "" {
  2345  		// Allowed content type should work.
  2346  		// Different or empty content type should fail.
  2347  		tests = append(tests, signedURLTest{"putURLWithContentType", putURLWithContentType, allowedContentType, true})
  2348  		tests = append(tests, signedURLTest{"putURLWithContentType", putURLWithContentType, differentContentType, false})
  2349  		tests = append(tests, signedURLTest{"putURLWithContentType", putURLWithContentType, "", false})
  2350  	}
  2351  	if putURLEnforcedAbsentContentType != "" {
  2352  		// Empty content type should work, non-empty should fail.
  2353  		tests = append(tests, signedURLTest{"putURLEnforcedAbsentContentType", putURLEnforcedAbsentContentType, "", true})
  2354  		tests = append(tests, signedURLTest{"putURLEnforcedAbsentContentType", putURLEnforcedAbsentContentType, differentContentType, false})
  2355  	}
  2356  	if putURLWithoutContentType != "" {
  2357  		// Empty content type should work.
  2358  		tests = append(tests, signedURLTest{"putURLWithoutContentType", putURLWithoutContentType, "", true})
  2359  		// From the SignedURLOptions docstring:
  2360  		// If EnforceAbsentContentType is false and ContentType is the empty string,
  2361  		// then PUTing without a Content-Type header will succeed, but it is
  2362  		// implementation-specific whether providing a Content-Type header will fail.
  2363  		// --> so, we don't have a test for putURLWithoutContentType and a non-empty ContentType.
  2364  	}
  2365  	for _, test := range tests {
  2366  		req, err := http.NewRequest(http.MethodPut, test.url, strings.NewReader(contents))
  2367  		if err != nil {
  2368  			t.Fatalf("failed to create PUT HTTP request using %q: %v", test.urlDescription, err)
  2369  		}
  2370  		if test.contentType != "" {
  2371  			req.Header.Set("Content-Type", test.contentType)
  2372  		}
  2373  		if resp, err := client.Do(req); err != nil {
  2374  			t.Fatalf("PUT to %q with ContentType %q failed: %v", test.urlDescription, test.contentType, err)
  2375  		} else {
  2376  			defer resp.Body.Close()
  2377  			success := resp.StatusCode >= 200 && resp.StatusCode < 300
  2378  			if success != test.wantSuccess {
  2379  				t.Errorf("PUT to %q with ContentType %q got status code %d, wanted 2xx? %v", test.urlDescription, test.contentType, resp.StatusCode, test.wantSuccess)
  2380  				gotBody, _ := ioutil.ReadAll(resp.Body)
  2381  				t.Errorf(string(gotBody))
  2382  			}
  2383  		}
  2384  	}
  2385  
  2386  	// GET it. Try with all URLs, only getURL should work.
  2387  	tests = nil
  2388  	if deleteURL != "" {
  2389  		tests = append(tests, signedURLTest{"deleteURL", deleteURL, "", false})
  2390  	}
  2391  	tests = append(tests, []signedURLTest{
  2392  		{"putURLWithoutContentType", putURLWithoutContentType, "", false},
  2393  		{"getURLNoParams", getURLNoParams, "", false},
  2394  		{"getURL", getURL, "", true},
  2395  	}...)
  2396  	for _, test := range tests {
  2397  		if resp, err := client.Get(test.url); err != nil {
  2398  			t.Fatalf("GET with %s URL failed: %v", test.urlDescription, err)
  2399  		} else {
  2400  			defer resp.Body.Close()
  2401  			success := resp.StatusCode >= 200 && resp.StatusCode < 300
  2402  			if success != test.wantSuccess {
  2403  				t.Errorf("GET to %q got status code %d, want 2xx? %v", test.urlDescription, resp.StatusCode, test.wantSuccess)
  2404  				gotBody, _ := ioutil.ReadAll(resp.Body)
  2405  				t.Errorf(string(gotBody))
  2406  			} else if success {
  2407  				gotBody, err := ioutil.ReadAll(resp.Body)
  2408  				if err != nil {
  2409  					t.Errorf("GET to %q failed to read response body: %v", test.urlDescription, err)
  2410  				} else if gotBodyStr := string(gotBody); gotBodyStr != contents {
  2411  					t.Errorf("GET to %q got body %q, want %q", test.urlDescription, gotBodyStr, contents)
  2412  				}
  2413  			}
  2414  		}
  2415  	}
  2416  
  2417  	// DELETE it. Try with all URLs, only deleteURL should work.
  2418  	tests = []signedURLTest{
  2419  		{"getURL", getURL, "", false},
  2420  		{"putURLWithoutContentType", putURLWithoutContentType, "", false},
  2421  	}
  2422  	if deleteURL != "" {
  2423  		tests = append(tests, signedURLTest{"deleteURL", deleteURL, "", true})
  2424  	}
  2425  	for _, test := range tests {
  2426  		req, err := http.NewRequest(http.MethodDelete, test.url, nil)
  2427  		if err != nil {
  2428  			t.Fatalf("failed to create DELETE HTTP request using %q: %v", test.urlDescription, err)
  2429  		}
  2430  		if resp, err := client.Do(req); err != nil {
  2431  			t.Fatalf("DELETE to %q failed: %v", test.urlDescription, err)
  2432  		} else {
  2433  			defer resp.Body.Close()
  2434  			success := resp.StatusCode >= 200 && resp.StatusCode < 300
  2435  			if success != test.wantSuccess {
  2436  				gotBody, _ := ioutil.ReadAll(resp.Body)
  2437  				t.Errorf(string(gotBody))
  2438  				t.Fatalf("DELETE to %q got status code %d, want 2xx? %v", test.urlDescription, resp.StatusCode, test.wantSuccess)
  2439  			}
  2440  		}
  2441  	}
  2442  
  2443  	// GET should fail now that the blob has been deleted.
  2444  	if deleteURL != "" {
  2445  		if resp, err := client.Get(getURL); err != nil {
  2446  			t.Errorf("GET after DELETE failed: %v", err)
  2447  		} else {
  2448  			defer resp.Body.Close()
  2449  			if resp.StatusCode != 404 {
  2450  				t.Errorf("GET after DELETE got status code %d, want 404", resp.StatusCode)
  2451  				gotBody, _ := ioutil.ReadAll(resp.Body)
  2452  				t.Errorf(string(gotBody))
  2453  			}
  2454  		}
  2455  	}
  2456  }
  2457  
  2458  // testAs tests the various As functions, using AsTest.
  2459  func testAs(t *testing.T, newHarness HarnessMaker, st AsTest) {
  2460  	const (
  2461  		dir     = "mydir"
  2462  		key     = dir + "/as-test"
  2463  		copyKey = dir + "/as-test-copy"
  2464  	)
  2465  	var content = []byte("hello world")
  2466  	ctx := context.Background()
  2467  
  2468  	h, err := newHarness(ctx, t)
  2469  	if err != nil {
  2470  		t.Fatal(err)
  2471  	}
  2472  	defer h.Close()
  2473  
  2474  	drv, err := h.MakeDriver(ctx)
  2475  	if err != nil {
  2476  		t.Fatal(err)
  2477  	}
  2478  	b := blob.NewBucket(drv)
  2479  	defer b.Close()
  2480  
  2481  	// Verify Bucket.As.
  2482  	if err := st.BucketCheck(b); err != nil {
  2483  		t.Error(err)
  2484  	}
  2485  
  2486  	// Create a blob, using the provided callback.
  2487  	if err := b.WriteAll(ctx, key, content, &blob.WriterOptions{BeforeWrite: st.BeforeWrite}); err != nil {
  2488  		t.Error(err)
  2489  	}
  2490  	defer func() { _ = b.Delete(ctx, key) }()
  2491  
  2492  	// Verify Attributes.As.
  2493  	attrs, err := b.Attributes(ctx, key)
  2494  	if err != nil {
  2495  		t.Fatal(err)
  2496  	}
  2497  	if err := st.AttributesCheck(attrs); err != nil {
  2498  		t.Error(err)
  2499  	}
  2500  
  2501  	// Verify Reader.As.
  2502  	r, err := b.NewReader(ctx, key, &blob.ReaderOptions{BeforeRead: st.BeforeRead})
  2503  	if err != nil {
  2504  		t.Fatal(err)
  2505  	}
  2506  	defer r.Close()
  2507  	if err := st.ReaderCheck(r); err != nil {
  2508  		t.Error(err)
  2509  	}
  2510  
  2511  	// Verify ListObject.As for the directory.
  2512  	iter := b.List(&blob.ListOptions{Prefix: dir, Delimiter: "/", BeforeList: st.BeforeList})
  2513  	found := false
  2514  	for {
  2515  		obj, err := iter.Next(ctx)
  2516  		if err == io.EOF {
  2517  			break
  2518  		}
  2519  		if found {
  2520  			t.Fatal("got a second object returned from List, only wanted one")
  2521  		}
  2522  		found = true
  2523  		if err != nil {
  2524  			log.Fatal(err)
  2525  		}
  2526  		if err := st.ListObjectCheck(obj); err != nil {
  2527  			t.Error(err)
  2528  		}
  2529  	}
  2530  
  2531  	// Verify ListObject.As for the blob.
  2532  	iter = b.List(&blob.ListOptions{Prefix: key, BeforeList: st.BeforeList})
  2533  	found = false
  2534  	for {
  2535  		obj, err := iter.Next(ctx)
  2536  		if err == io.EOF {
  2537  			break
  2538  		}
  2539  		if found {
  2540  			t.Fatal("got a second object returned from List, only wanted one")
  2541  		}
  2542  		found = true
  2543  		if err != nil {
  2544  			log.Fatal(err)
  2545  		}
  2546  		if err := st.ListObjectCheck(obj); err != nil {
  2547  			t.Error(err)
  2548  		}
  2549  	}
  2550  
  2551  	_, gotErr := b.NewReader(ctx, "key-does-not-exist", nil)
  2552  	if gotErr == nil {
  2553  		t.Fatalf("got nil error from NewReader for nonexistent key, want an error")
  2554  	}
  2555  	if err := st.ErrorCheck(b, gotErr); err != nil {
  2556  		t.Error(err)
  2557  	}
  2558  
  2559  	// Copy the blob, using the provided callback.
  2560  	if err := b.Copy(ctx, copyKey, key, &blob.CopyOptions{BeforeCopy: st.BeforeCopy}); err != nil {
  2561  		t.Error(err)
  2562  	} else {
  2563  		defer func() { _ = b.Delete(ctx, copyKey) }()
  2564  	}
  2565  
  2566  	for _, method := range []string{http.MethodGet, http.MethodPut, http.MethodDelete} {
  2567  		_, err = b.SignedURL(ctx, key, &blob.SignedURLOptions{Method: method, BeforeSign: st.BeforeSign})
  2568  		if err != nil && gcerrors.Code(err) != gcerrors.Unimplemented {
  2569  			t.Errorf("got err %v when signing url with method %q", err, method)
  2570  		}
  2571  	}
  2572  }
  2573  
  2574  func benchmarkRead(b *testing.B, bkt *blob.Bucket) {
  2575  	ctx := context.Background()
  2576  	const key = "readbenchmark-blob"
  2577  
  2578  	content := loadTestData(b, "test-large.jpg")
  2579  	if err := bkt.WriteAll(ctx, key, content, nil); err != nil {
  2580  		b.Fatal(err)
  2581  	}
  2582  	defer func() {
  2583  		_ = bkt.Delete(ctx, key)
  2584  	}()
  2585  
  2586  	b.ResetTimer()
  2587  	b.RunParallel(func(pb *testing.PB) {
  2588  		for pb.Next() {
  2589  			buf, err := bkt.ReadAll(ctx, key)
  2590  			if err != nil {
  2591  				b.Error(err)
  2592  			}
  2593  			if !bytes.Equal(buf, content) {
  2594  				b.Error("read didn't match write")
  2595  			}
  2596  		}
  2597  	})
  2598  }
  2599  
  2600  func benchmarkWriteReadDelete(b *testing.B, bkt *blob.Bucket) {
  2601  	ctx := context.Background()
  2602  	const baseKey = "writereaddeletebenchmark-blob-"
  2603  
  2604  	content := loadTestData(b, "test-large.jpg")
  2605  	var nextID uint32
  2606  
  2607  	b.ResetTimer()
  2608  	b.RunParallel(func(pb *testing.PB) {
  2609  		key := fmt.Sprintf("%s%d", baseKey, atomic.AddUint32(&nextID, 1))
  2610  		for pb.Next() {
  2611  			if err := bkt.WriteAll(ctx, key, content, nil); err != nil {
  2612  				b.Error(err)
  2613  				continue
  2614  			}
  2615  			buf, err := bkt.ReadAll(ctx, key)
  2616  			if err != nil {
  2617  				b.Error(err)
  2618  			}
  2619  			if !bytes.Equal(buf, content) {
  2620  				b.Error("read didn't match write")
  2621  			}
  2622  			if err := bkt.Delete(ctx, key); err != nil {
  2623  				b.Error(err)
  2624  				continue
  2625  			}
  2626  		}
  2627  	})
  2628  }