github.com/cornelk/go-cloud@v0.17.1/blob/drivertest/drivertest.go (about)

     1  // Copyright 2018 The Go Cloud Development Kit Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     https://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package drivertest provides a conformance test for implementations of
    16  // driver.
    17  package drivertest // import "github.com/cornelk/go-cloud/blob/drivertest"
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"crypto/md5"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"io/ioutil"
    27  	"log"
    28  	"net/http"
    29  	"net/url"
    30  	"strconv"
    31  	"strings"
    32  	"sync"
    33  	"sync/atomic"
    34  	"testing"
    35  	"time"
    36  
    37  	"github.com/cornelk/go-cloud/blob"
    38  	"github.com/cornelk/go-cloud/blob/driver"
    39  	"github.com/cornelk/go-cloud/gcerrors"
    40  	"github.com/cornelk/go-cloud/internal/escape"
    41  	"github.com/google/go-cmp/cmp"
    42  	"github.com/google/go-cmp/cmp/cmpopts"
    43  )
    44  
    45  // Harness descibes the functionality test harnesses must provide to run
    46  // conformance tests.
    47  type Harness interface {
    48  	// MakeDriver creates a driver.Bucket to test.
    49  	// Multiple calls to MakeDriver during a test run must refer to the
    50  	// same storage bucket; i.e., a blob created using one driver.Bucket must
    51  	// be readable by a subsequent driver.Bucket.
    52  	MakeDriver(ctx context.Context) (driver.Bucket, error)
    53  	// HTTPClient should return an unauthorized *http.Client, or nil.
    54  	// Required if the service supports SignedURL.
    55  	HTTPClient() *http.Client
    56  	// Close closes resources used by the harness.
    57  	Close()
    58  }
    59  
    60  // HarnessMaker describes functions that construct a harness for running tests.
    61  // It is called exactly once per test; Harness.Close() will be called when the test is complete.
    62  type HarnessMaker func(ctx context.Context, t *testing.T) (Harness, error)
    63  
    64  // AsTest represents a test of As functionality.
    65  // The conformance test:
    66  // 1. Calls BucketCheck.
    67  // 2. Creates a blob in a directory, using BeforeWrite as a WriterOption.
    68  // 3. Fetches the blob's attributes and calls AttributeCheck.
    69  // 4. Creates a Reader for the blob using BeforeReader as a ReaderOption,
    70  //    and calls ReaderCheck with the resulting Reader.
    71  // 5. Calls List using BeforeList as a ListOption, with Delimiter set so
    72  //    that only the directory is returned, and calls ListObjectCheck
    73  //    on the single directory list entry returned.
    74  // 6. Calls List using BeforeList as a ListOption, and calls ListObjectCheck
    75  //    on the single blob entry returned.
    76  // 7. Tries to read a non-existent blob, and calls ErrorCheck with the error.
    77  // 8. Makes a copy of the blob, using BeforeCopy as a CopyOption.
    78  //
    79  // For example, an AsTest might set a driver-specific field to a custom
    80  // value in BeforeWrite, and then verify the custom value was returned in
    81  // AttributesCheck and/or ReaderCheck.
    82  type AsTest interface {
    83  	// Name should return a descriptive name for the test.
    84  	Name() string
    85  	// BucketCheck will be called to allow verification of Bucket.As.
    86  	BucketCheck(b *blob.Bucket) error
    87  	// ErrorCheck will be called to allow verification of Bucket.ErrorAs.
    88  	ErrorCheck(b *blob.Bucket, err error) error
    89  	// BeforeRead will be passed directly to ReaderOptions as part of reading
    90  	// a test blob.
    91  	BeforeRead(as func(interface{}) bool) error
    92  	// BeforeWrite will be passed directly to WriterOptions as part of creating
    93  	// a test blob.
    94  	BeforeWrite(as func(interface{}) bool) error
    95  	// BeforeCopy will be passed directly to CopyOptions as part of copying
    96  	// the test blob.
    97  	BeforeCopy(as func(interface{}) bool) error
    98  	// BeforeList will be passed directly to ListOptions as part of listing the
    99  	// test blob.
   100  	BeforeList(as func(interface{}) bool) error
   101  	// AttributesCheck will be called after fetching the test blob's attributes.
   102  	// It should call attrs.As and verify the results.
   103  	AttributesCheck(attrs *blob.Attributes) error
   104  	// ReaderCheck will be called after creating a blob.Reader.
   105  	// It should call r.As and verify the results.
   106  	ReaderCheck(r *blob.Reader) error
   107  	// ListObjectCheck will be called after calling List with the test object's
   108  	// name as the Prefix. It should call o.As and verify the results.
   109  	ListObjectCheck(o *blob.ListObject) error
   110  }
   111  
   112  type verifyAsFailsOnNil struct{}
   113  
   114  func (verifyAsFailsOnNil) Name() string {
   115  	return "verify As returns false when passed nil"
   116  }
   117  
   118  func (verifyAsFailsOnNil) BucketCheck(b *blob.Bucket) error {
   119  	if b.As(nil) {
   120  		return errors.New("want Bucket.As to return false when passed nil")
   121  	}
   122  	return nil
   123  }
   124  
   125  func (verifyAsFailsOnNil) ErrorCheck(b *blob.Bucket, err error) (ret error) {
   126  	defer func() {
   127  		if recover() == nil {
   128  			ret = errors.New("want ErrorAs to panic when passed nil")
   129  		}
   130  	}()
   131  	b.ErrorAs(err, nil)
   132  	return nil
   133  }
   134  
   135  func (verifyAsFailsOnNil) BeforeRead(as func(interface{}) bool) error {
   136  	if as(nil) {
   137  		return errors.New("want BeforeReader's As to return false when passed nil")
   138  	}
   139  	return nil
   140  }
   141  
   142  func (verifyAsFailsOnNil) BeforeWrite(as func(interface{}) bool) error {
   143  	if as(nil) {
   144  		return errors.New("want BeforeWrite's As to return false when passed nil")
   145  	}
   146  	return nil
   147  }
   148  
   149  func (verifyAsFailsOnNil) BeforeCopy(as func(interface{}) bool) error {
   150  	if as(nil) {
   151  		return errors.New("want BeforeCopy's As to return false when passed nil")
   152  	}
   153  	return nil
   154  }
   155  
   156  func (verifyAsFailsOnNil) BeforeList(as func(interface{}) bool) error {
   157  	if as(nil) {
   158  		return errors.New("want BeforeList's As to return false when passed nil")
   159  	}
   160  	return nil
   161  }
   162  
   163  func (verifyAsFailsOnNil) AttributesCheck(attrs *blob.Attributes) error {
   164  	if attrs.As(nil) {
   165  		return errors.New("want Attributes.As to return false when passed nil")
   166  	}
   167  	return nil
   168  }
   169  
   170  func (verifyAsFailsOnNil) ReaderCheck(r *blob.Reader) error {
   171  	if r.As(nil) {
   172  		return errors.New("want Reader.As to return false when passed nil")
   173  	}
   174  	return nil
   175  }
   176  
   177  func (verifyAsFailsOnNil) ListObjectCheck(o *blob.ListObject) error {
   178  	if o.As(nil) {
   179  		return errors.New("want ListObject.As to return false when passed nil")
   180  	}
   181  	return nil
   182  }
   183  
   184  // RunConformanceTests runs conformance tests for driver implementations of blob.
   185  func RunConformanceTests(t *testing.T, newHarness HarnessMaker, asTests []AsTest) {
   186  	t.Run("TestList", func(t *testing.T) {
   187  		testList(t, newHarness)
   188  	})
   189  	t.Run("TestListWeirdKeys", func(t *testing.T) {
   190  		testListWeirdKeys(t, newHarness)
   191  	})
   192  	t.Run("TestListDelimiters", func(t *testing.T) {
   193  		testListDelimiters(t, newHarness)
   194  	})
   195  	t.Run("TestRead", func(t *testing.T) {
   196  		testRead(t, newHarness)
   197  	})
   198  	t.Run("TestAttributes", func(t *testing.T) {
   199  		testAttributes(t, newHarness)
   200  	})
   201  	t.Run("TestWrite", func(t *testing.T) {
   202  		testWrite(t, newHarness)
   203  	})
   204  	t.Run("TestCanceledWrite", func(t *testing.T) {
   205  		testCanceledWrite(t, newHarness)
   206  	})
   207  	t.Run("TestConcurrentWriteAndRead", func(t *testing.T) {
   208  		testConcurrentWriteAndRead(t, newHarness)
   209  	})
   210  	t.Run("TestMetadata", func(t *testing.T) {
   211  		testMetadata(t, newHarness)
   212  	})
   213  	t.Run("TestMD5", func(t *testing.T) {
   214  		testMD5(t, newHarness)
   215  	})
   216  	t.Run("TestCopy", func(t *testing.T) {
   217  		testCopy(t, newHarness)
   218  	})
   219  	t.Run("TestDelete", func(t *testing.T) {
   220  		testDelete(t, newHarness)
   221  	})
   222  	t.Run("TestKeys", func(t *testing.T) {
   223  		testKeys(t, newHarness)
   224  	})
   225  	t.Run("TestSignedURL", func(t *testing.T) {
   226  		testSignedURL(t, newHarness)
   227  	})
   228  	asTests = append(asTests, verifyAsFailsOnNil{})
   229  	t.Run("TestAs", func(t *testing.T) {
   230  		for _, st := range asTests {
   231  			if st.Name() == "" {
   232  				t.Fatalf("AsTest.Name is required")
   233  			}
   234  			t.Run(st.Name(), func(t *testing.T) {
   235  				testAs(t, newHarness, st)
   236  			})
   237  		}
   238  	})
   239  }
   240  
   241  // RunBenchmarks runs benchmarks for driver implementations of blob.
   242  func RunBenchmarks(b *testing.B, bkt *blob.Bucket) {
   243  	b.Run("BenchmarkRead", func(b *testing.B) {
   244  		benchmarkRead(b, bkt)
   245  	})
   246  	b.Run("BenchmarkWriteReadDelete", func(b *testing.B) {
   247  		benchmarkWriteReadDelete(b, bkt)
   248  	})
   249  }
   250  
   251  // testList tests the functionality of List.
   252  func testList(t *testing.T, newHarness HarnessMaker) {
   253  	const keyPrefix = "blob-for-list"
   254  	content := []byte("hello")
   255  
   256  	keyForIndex := func(i int) string { return fmt.Sprintf("%s-%d", keyPrefix, i) }
   257  	gotIndices := func(t *testing.T, objs []*driver.ListObject) []int {
   258  		var got []int
   259  		for _, obj := range objs {
   260  			if !strings.HasPrefix(obj.Key, keyPrefix) {
   261  				t.Errorf("got name %q, expected it to have prefix %q", obj.Key, keyPrefix)
   262  				continue
   263  			}
   264  			i, err := strconv.Atoi(obj.Key[len(keyPrefix)+1:])
   265  			if err != nil {
   266  				t.Error(err)
   267  				continue
   268  			}
   269  			got = append(got, i)
   270  		}
   271  		return got
   272  	}
   273  
   274  	tests := []struct {
   275  		name      string
   276  		pageSize  int
   277  		prefix    string
   278  		wantPages [][]int
   279  		want      []int
   280  	}{
   281  		{
   282  			name:      "no objects",
   283  			prefix:    "no-objects-with-this-prefix",
   284  			wantPages: [][]int{nil},
   285  		},
   286  		{
   287  			name:      "exactly 1 object due to prefix",
   288  			prefix:    keyForIndex(1),
   289  			wantPages: [][]int{{1}},
   290  			want:      []int{1},
   291  		},
   292  		{
   293  			name:      "no pagination",
   294  			prefix:    keyPrefix,
   295  			wantPages: [][]int{{0, 1, 2}},
   296  			want:      []int{0, 1, 2},
   297  		},
   298  		{
   299  			name:      "by 1",
   300  			prefix:    keyPrefix,
   301  			pageSize:  1,
   302  			wantPages: [][]int{{0}, {1}, {2}},
   303  			want:      []int{0, 1, 2},
   304  		},
   305  		{
   306  			name:      "by 2",
   307  			prefix:    keyPrefix,
   308  			pageSize:  2,
   309  			wantPages: [][]int{{0, 1}, {2}},
   310  			want:      []int{0, 1, 2},
   311  		},
   312  		{
   313  			name:      "by 3",
   314  			prefix:    keyPrefix,
   315  			pageSize:  3,
   316  			wantPages: [][]int{{0, 1, 2}},
   317  			want:      []int{0, 1, 2},
   318  		},
   319  	}
   320  
   321  	ctx := context.Background()
   322  
   323  	// Creates blobs for sub-tests below.
   324  	// We only create the blobs once, for efficiency and because there's
   325  	// no guarantee that after we create them they will be immediately returned
   326  	// from List. The very first time the test is run against a Bucket, it may be
   327  	// flaky due to this race.
   328  	init := func(t *testing.T) (driver.Bucket, func()) {
   329  		h, err := newHarness(ctx, t)
   330  		if err != nil {
   331  			t.Fatal(err)
   332  		}
   333  		drv, err := h.MakeDriver(ctx)
   334  		if err != nil {
   335  			t.Fatal(err)
   336  		}
   337  		// See if the blobs are already there.
   338  		b := blob.NewBucket(drv)
   339  		iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
   340  		found := iterToSetOfKeys(ctx, t, iter)
   341  		for i := 0; i < 3; i++ {
   342  			key := keyForIndex(i)
   343  			if !found[key] {
   344  				if err := b.WriteAll(ctx, key, content, nil); err != nil {
   345  					b.Close()
   346  					t.Fatal(err)
   347  				}
   348  			}
   349  		}
   350  		return drv, func() { b.Close(); h.Close() }
   351  	}
   352  
   353  	for _, tc := range tests {
   354  		t.Run(tc.name, func(t *testing.T) {
   355  			drv, done := init(t)
   356  			defer done()
   357  
   358  			var gotPages [][]int
   359  			var got []int
   360  			var nextPageToken []byte
   361  			for {
   362  				page, err := drv.ListPaged(ctx, &driver.ListOptions{
   363  					PageSize:  tc.pageSize,
   364  					Prefix:    tc.prefix,
   365  					PageToken: nextPageToken,
   366  				})
   367  				if err != nil {
   368  					t.Fatal(err)
   369  				}
   370  				gotThisPage := gotIndices(t, page.Objects)
   371  				got = append(got, gotThisPage...)
   372  				gotPages = append(gotPages, gotThisPage)
   373  				if len(page.NextPageToken) == 0 {
   374  					break
   375  				}
   376  				nextPageToken = page.NextPageToken
   377  			}
   378  			if diff := cmp.Diff(gotPages, tc.wantPages); diff != "" {
   379  				t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", gotPages, tc.wantPages, diff)
   380  			}
   381  			if diff := cmp.Diff(got, tc.want); diff != "" {
   382  				t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, tc.want, diff)
   383  			}
   384  		})
   385  	}
   386  
   387  	// Verify pagination works when inserting in a retrieved page.
   388  	t.Run("PaginationConsistencyAfterInsert", func(t *testing.T) {
   389  		drv, done := init(t)
   390  		defer done()
   391  
   392  		// Fetch a page of 2 results: 0, 1.
   393  		page, err := drv.ListPaged(ctx, &driver.ListOptions{
   394  			PageSize: 2,
   395  			Prefix:   keyPrefix,
   396  		})
   397  		if err != nil {
   398  			t.Fatal(err)
   399  		}
   400  		got := gotIndices(t, page.Objects)
   401  		want := []int{0, 1}
   402  		if diff := cmp.Diff(got, want); diff != "" {
   403  			t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   404  		}
   405  
   406  		// Insert a key "0a" in the middle of the page we already retrieved.
   407  		b := blob.NewBucket(drv)
   408  		defer b.Close()
   409  		key := page.Objects[0].Key + "a"
   410  		if err := b.WriteAll(ctx, key, content, nil); err != nil {
   411  			t.Fatal(err)
   412  		}
   413  		defer func() {
   414  			_ = b.Delete(ctx, key)
   415  		}()
   416  
   417  		// Fetch the next page. It should not include 0, 0a, or 1, and it should
   418  		// include 2.
   419  		page, err = drv.ListPaged(ctx, &driver.ListOptions{
   420  			Prefix:    keyPrefix,
   421  			PageToken: page.NextPageToken,
   422  		})
   423  		if err != nil {
   424  			t.Fatal(err)
   425  		}
   426  		got = gotIndices(t, page.Objects)
   427  		want = []int{2}
   428  		if diff := cmp.Diff(got, want); diff != "" {
   429  			t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   430  		}
   431  	})
   432  
   433  	// Verify pagination works when deleting in a retrieved page.
   434  	t.Run("PaginationConsistencyAfterDelete", func(t *testing.T) {
   435  		drv, done := init(t)
   436  		defer done()
   437  
   438  		// Fetch a page of 2 results: 0, 1.
   439  		page, err := drv.ListPaged(ctx, &driver.ListOptions{
   440  			PageSize: 2,
   441  			Prefix:   keyPrefix,
   442  		})
   443  		if err != nil {
   444  			t.Fatal(err)
   445  		}
   446  		got := gotIndices(t, page.Objects)
   447  		want := []int{0, 1}
   448  		if diff := cmp.Diff(got, want); diff != "" {
   449  			t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   450  		}
   451  
   452  		// Delete key "1".
   453  		b := blob.NewBucket(drv)
   454  		defer b.Close()
   455  		key := page.Objects[1].Key
   456  		if err := b.Delete(ctx, key); err != nil {
   457  			t.Fatal(err)
   458  		}
   459  		defer func() {
   460  			_ = b.WriteAll(ctx, key, content, nil)
   461  		}()
   462  
   463  		// Fetch the next page. It should not include 0 or 1, and it should
   464  		// include 2.
   465  		page, err = drv.ListPaged(ctx, &driver.ListOptions{
   466  			Prefix:    keyPrefix,
   467  			PageToken: page.NextPageToken,
   468  		})
   469  		if err != nil {
   470  			t.Fatal(err)
   471  		}
   472  		got = gotIndices(t, page.Objects)
   473  		want = []int{2}
   474  		if diff := cmp.Diff(got, want); diff != "" {
   475  			t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   476  		}
   477  	})
   478  }
   479  
   480  // testListWeirdKeys tests the functionality of List on weird keys.
   481  func testListWeirdKeys(t *testing.T, newHarness HarnessMaker) {
   482  	const keyPrefix = "list-weirdkeys-"
   483  	content := []byte("hello")
   484  	ctx := context.Background()
   485  
   486  	// We're going to create a blob for each of the weird key strings, and
   487  	// then verify we can see them with List.
   488  	want := map[string]bool{}
   489  	for _, k := range escape.WeirdStrings {
   490  		want[keyPrefix+k] = true
   491  	}
   492  
   493  	// Creates blobs for sub-tests below.
   494  	// We only create the blobs once, for efficiency and because there's
   495  	// no guarantee that after we create them they will be immediately returned
   496  	// from List. The very first time the test is run against a Bucket, it may be
   497  	// flaky due to this race.
   498  	init := func(t *testing.T) (*blob.Bucket, func()) {
   499  		h, err := newHarness(ctx, t)
   500  		if err != nil {
   501  			t.Fatal(err)
   502  		}
   503  		drv, err := h.MakeDriver(ctx)
   504  		if err != nil {
   505  			t.Fatal(err)
   506  		}
   507  		// See if the blobs are already there.
   508  		b := blob.NewBucket(drv)
   509  		iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
   510  		found := iterToSetOfKeys(ctx, t, iter)
   511  		for _, k := range escape.WeirdStrings {
   512  			key := keyPrefix + k
   513  			if !found[key] {
   514  				if err := b.WriteAll(ctx, key, content, nil); err != nil {
   515  					b.Close()
   516  					t.Fatal(err)
   517  				}
   518  			}
   519  		}
   520  		return b, func() { b.Close(); h.Close() }
   521  	}
   522  
   523  	b, done := init(t)
   524  	defer done()
   525  
   526  	iter := b.List(&blob.ListOptions{Prefix: keyPrefix})
   527  	got := iterToSetOfKeys(ctx, t, iter)
   528  
   529  	if diff := cmp.Diff(got, want); diff != "" {
   530  		t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", got, want, diff)
   531  	}
   532  }
   533  
   534  // listResult is a recursive view of the hierarchy. It's used to verify List
   535  // using Delimiter.
   536  type listResult struct {
   537  	Key   string
   538  	IsDir bool
   539  	// If IsDir is true and recursion is enabled, the recursive listing of the directory.
   540  	Sub []listResult
   541  }
   542  
   543  // doList lists b using prefix and delim.
   544  // If recurse is true, it recurses into directories filling in listResult.Sub.
   545  func doList(ctx context.Context, b *blob.Bucket, prefix, delim string, recurse bool) ([]listResult, error) {
   546  	iter := b.List(&blob.ListOptions{
   547  		Prefix:    prefix,
   548  		Delimiter: delim,
   549  	})
   550  	var retval []listResult
   551  	for {
   552  		obj, err := iter.Next(ctx)
   553  		if err == io.EOF {
   554  			if obj != nil {
   555  				return nil, errors.New("obj is not nil on EOF")
   556  			}
   557  			break
   558  		}
   559  		if err != nil {
   560  			return nil, err
   561  		}
   562  		var sub []listResult
   563  		if obj.IsDir && recurse {
   564  			sub, err = doList(ctx, b, obj.Key, delim, true)
   565  			if err != nil {
   566  				return nil, err
   567  			}
   568  		}
   569  		retval = append(retval, listResult{
   570  			Key:   obj.Key,
   571  			IsDir: obj.IsDir,
   572  			Sub:   sub,
   573  		})
   574  	}
   575  	return retval, nil
   576  }
   577  
   578  // testListDelimiters tests the functionality of List using Delimiters.
   579  func testListDelimiters(t *testing.T, newHarness HarnessMaker) {
   580  	const keyPrefix = "blob-for-delimiters-"
   581  	content := []byte("hello")
   582  
   583  	// The set of files to use for these tests. The strings in each entry will
   584  	// be joined using delim, so the result is a directory structure like this
   585  	// (using / as delimiter):
   586  	// dir1/a.txt
   587  	// dir1/b.txt
   588  	// dir1/subdir/c.txt
   589  	// dir1/subdir/d.txt
   590  	// dir2/e.txt
   591  	// f.txt
   592  	keys := [][]string{
   593  		{"dir1", "a.txt"},
   594  		{"dir1", "b.txt"},
   595  		{"dir1", "subdir", "c.txt"},
   596  		{"dir1", "subdir", "d.txt"},
   597  		{"dir2", "e.txt"},
   598  		{"f.txt"},
   599  	}
   600  
   601  	// Test with several different delimiters.
   602  	tests := []struct {
   603  		name, delim string
   604  		// Expected result of doList with an empty delimiter.
   605  		// All keys should be listed at the top level, with no directories.
   606  		wantFlat []listResult
   607  		// Expected result of doList with delimiter and recurse = true.
   608  		// All keys should be listed, with keys in directories in the Sub field
   609  		// of their directory.
   610  		wantRecursive []listResult
   611  		// Expected result of repeatedly calling driver.ListPaged with delimiter
   612  		// and page size = 1.
   613  		wantPaged []listResult
   614  		// expected result of doList with delimiter and recurse = false
   615  		// after dir2/e.txt is deleted
   616  		// dir1/ and f.txt should be listed; dir2/ should no longer be present
   617  		// because there are no keys in it.
   618  		wantAfterDel []listResult
   619  	}{
   620  		{
   621  			name:  "fwdslash",
   622  			delim: "/",
   623  			wantFlat: []listResult{
   624  				{Key: keyPrefix + "/dir1/a.txt"},
   625  				{Key: keyPrefix + "/dir1/b.txt"},
   626  				{Key: keyPrefix + "/dir1/subdir/c.txt"},
   627  				{Key: keyPrefix + "/dir1/subdir/d.txt"},
   628  				{Key: keyPrefix + "/dir2/e.txt"},
   629  				{Key: keyPrefix + "/f.txt"},
   630  			},
   631  			wantRecursive: []listResult{
   632  				{
   633  					Key:   keyPrefix + "/dir1/",
   634  					IsDir: true,
   635  					Sub: []listResult{
   636  						{Key: keyPrefix + "/dir1/a.txt"},
   637  						{Key: keyPrefix + "/dir1/b.txt"},
   638  						{
   639  							Key:   keyPrefix + "/dir1/subdir/",
   640  							IsDir: true,
   641  							Sub: []listResult{
   642  								{Key: keyPrefix + "/dir1/subdir/c.txt"},
   643  								{Key: keyPrefix + "/dir1/subdir/d.txt"},
   644  							},
   645  						},
   646  					},
   647  				},
   648  				{
   649  					Key:   keyPrefix + "/dir2/",
   650  					IsDir: true,
   651  					Sub: []listResult{
   652  						{Key: keyPrefix + "/dir2/e.txt"},
   653  					},
   654  				},
   655  				{Key: keyPrefix + "/f.txt"},
   656  			},
   657  			wantPaged: []listResult{
   658  				{
   659  					Key:   keyPrefix + "/dir1/",
   660  					IsDir: true,
   661  				},
   662  				{
   663  					Key:   keyPrefix + "/dir2/",
   664  					IsDir: true,
   665  				},
   666  				{Key: keyPrefix + "/f.txt"},
   667  			},
   668  			wantAfterDel: []listResult{
   669  				{
   670  					Key:   keyPrefix + "/dir1/",
   671  					IsDir: true,
   672  				},
   673  				{Key: keyPrefix + "/f.txt"},
   674  			},
   675  		},
   676  		{
   677  			name:  "backslash",
   678  			delim: "\\",
   679  			wantFlat: []listResult{
   680  				{Key: keyPrefix + "\\dir1\\a.txt"},
   681  				{Key: keyPrefix + "\\dir1\\b.txt"},
   682  				{Key: keyPrefix + "\\dir1\\subdir\\c.txt"},
   683  				{Key: keyPrefix + "\\dir1\\subdir\\d.txt"},
   684  				{Key: keyPrefix + "\\dir2\\e.txt"},
   685  				{Key: keyPrefix + "\\f.txt"},
   686  			},
   687  			wantRecursive: []listResult{
   688  				{
   689  					Key:   keyPrefix + "\\dir1\\",
   690  					IsDir: true,
   691  					Sub: []listResult{
   692  						{Key: keyPrefix + "\\dir1\\a.txt"},
   693  						{Key: keyPrefix + "\\dir1\\b.txt"},
   694  						{
   695  							Key:   keyPrefix + "\\dir1\\subdir\\",
   696  							IsDir: true,
   697  							Sub: []listResult{
   698  								{Key: keyPrefix + "\\dir1\\subdir\\c.txt"},
   699  								{Key: keyPrefix + "\\dir1\\subdir\\d.txt"},
   700  							},
   701  						},
   702  					},
   703  				},
   704  				{
   705  					Key:   keyPrefix + "\\dir2\\",
   706  					IsDir: true,
   707  					Sub: []listResult{
   708  						{Key: keyPrefix + "\\dir2\\e.txt"},
   709  					},
   710  				},
   711  				{Key: keyPrefix + "\\f.txt"},
   712  			},
   713  			wantPaged: []listResult{
   714  				{
   715  					Key:   keyPrefix + "\\dir1\\",
   716  					IsDir: true,
   717  				},
   718  				{
   719  					Key:   keyPrefix + "\\dir2\\",
   720  					IsDir: true,
   721  				},
   722  				{Key: keyPrefix + "\\f.txt"},
   723  			},
   724  			wantAfterDel: []listResult{
   725  				{
   726  					Key:   keyPrefix + "\\dir1\\",
   727  					IsDir: true,
   728  				},
   729  				{Key: keyPrefix + "\\f.txt"},
   730  			},
   731  		},
   732  		{
   733  			name:  "abc",
   734  			delim: "abc",
   735  			wantFlat: []listResult{
   736  				{Key: keyPrefix + "abcdir1abca.txt"},
   737  				{Key: keyPrefix + "abcdir1abcb.txt"},
   738  				{Key: keyPrefix + "abcdir1abcsubdirabcc.txt"},
   739  				{Key: keyPrefix + "abcdir1abcsubdirabcd.txt"},
   740  				{Key: keyPrefix + "abcdir2abce.txt"},
   741  				{Key: keyPrefix + "abcf.txt"},
   742  			},
   743  			wantRecursive: []listResult{
   744  				{
   745  					Key:   keyPrefix + "abcdir1abc",
   746  					IsDir: true,
   747  					Sub: []listResult{
   748  						{Key: keyPrefix + "abcdir1abca.txt"},
   749  						{Key: keyPrefix + "abcdir1abcb.txt"},
   750  						{
   751  							Key:   keyPrefix + "abcdir1abcsubdirabc",
   752  							IsDir: true,
   753  							Sub: []listResult{
   754  								{Key: keyPrefix + "abcdir1abcsubdirabcc.txt"},
   755  								{Key: keyPrefix + "abcdir1abcsubdirabcd.txt"},
   756  							},
   757  						},
   758  					},
   759  				},
   760  				{
   761  					Key:   keyPrefix + "abcdir2abc",
   762  					IsDir: true,
   763  					Sub: []listResult{
   764  						{Key: keyPrefix + "abcdir2abce.txt"},
   765  					},
   766  				},
   767  				{Key: keyPrefix + "abcf.txt"},
   768  			},
   769  			wantPaged: []listResult{
   770  				{
   771  					Key:   keyPrefix + "abcdir1abc",
   772  					IsDir: true,
   773  				},
   774  				{
   775  					Key:   keyPrefix + "abcdir2abc",
   776  					IsDir: true,
   777  				},
   778  				{Key: keyPrefix + "abcf.txt"},
   779  			},
   780  			wantAfterDel: []listResult{
   781  				{
   782  					Key:   keyPrefix + "abcdir1abc",
   783  					IsDir: true,
   784  				},
   785  				{Key: keyPrefix + "abcf.txt"},
   786  			},
   787  		},
   788  	}
   789  
   790  	ctx := context.Background()
   791  
   792  	// Creates blobs for sub-tests below.
   793  	// We only create the blobs once, for efficiency and because there's
   794  	// no guarantee that after we create them they will be immediately returned
   795  	// from List. The very first time the test is run against a Bucket, it may be
   796  	// flaky due to this race.
   797  	init := func(t *testing.T, delim string) (driver.Bucket, *blob.Bucket, func()) {
   798  		h, err := newHarness(ctx, t)
   799  		if err != nil {
   800  			t.Fatal(err)
   801  		}
   802  		drv, err := h.MakeDriver(ctx)
   803  		if err != nil {
   804  			t.Fatal(err)
   805  		}
   806  		b := blob.NewBucket(drv)
   807  
   808  		// See if the blobs are already there.
   809  		prefix := keyPrefix + delim
   810  		iter := b.List(&blob.ListOptions{Prefix: prefix})
   811  		found := iterToSetOfKeys(ctx, t, iter)
   812  		for _, keyParts := range keys {
   813  			key := prefix + strings.Join(keyParts, delim)
   814  			if !found[key] {
   815  				if err := b.WriteAll(ctx, key, content, nil); err != nil {
   816  					b.Close()
   817  					t.Fatal(err)
   818  				}
   819  			}
   820  		}
   821  		return drv, b, func() { b.Close(); h.Close() }
   822  	}
   823  
   824  	for _, tc := range tests {
   825  		t.Run(tc.name, func(t *testing.T) {
   826  			drv, b, done := init(t, tc.delim)
   827  			defer done()
   828  
   829  			// Fetch without using delimiter.
   830  			got, err := doList(ctx, b, keyPrefix+tc.delim, "", true)
   831  			if err != nil {
   832  				t.Fatal(err)
   833  			}
   834  			if diff := cmp.Diff(got, tc.wantFlat); diff != "" {
   835  				t.Errorf("with no delimiter, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantFlat, diff)
   836  			}
   837  
   838  			// Fetch using delimiter, recursively.
   839  			got, err = doList(ctx, b, keyPrefix+tc.delim, tc.delim, true)
   840  			if err != nil {
   841  				t.Fatal(err)
   842  			}
   843  			if diff := cmp.Diff(got, tc.wantRecursive); diff != "" {
   844  				t.Errorf("with delimiter, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantRecursive, diff)
   845  			}
   846  
   847  			// Test pagination via driver.ListPaged.
   848  			var nextPageToken []byte
   849  			got = nil
   850  			for {
   851  				page, err := drv.ListPaged(ctx, &driver.ListOptions{
   852  					Prefix:    keyPrefix + tc.delim,
   853  					Delimiter: tc.delim,
   854  					PageSize:  1,
   855  					PageToken: nextPageToken,
   856  				})
   857  				if err != nil {
   858  					t.Fatal(err)
   859  				}
   860  				if len(page.Objects) > 1 {
   861  					t.Errorf("got %d objects on a page, want 0 or 1", len(page.Objects))
   862  				}
   863  				for _, obj := range page.Objects {
   864  					got = append(got, listResult{
   865  						Key:   obj.Key,
   866  						IsDir: obj.IsDir,
   867  					})
   868  				}
   869  				if len(page.NextPageToken) == 0 {
   870  					break
   871  				}
   872  				nextPageToken = page.NextPageToken
   873  			}
   874  			if diff := cmp.Diff(got, tc.wantPaged); diff != "" {
   875  				t.Errorf("paged got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantPaged, diff)
   876  			}
   877  
   878  			// Delete dir2/e.txt and verify that dir2/ is no longer returned.
   879  			key := strings.Join(append([]string{keyPrefix}, "dir2", "e.txt"), tc.delim)
   880  			if err := b.Delete(ctx, key); err != nil {
   881  				t.Fatal(err)
   882  			}
   883  			// Attempt to restore dir2/e.txt at the end of the test for the next run.
   884  			defer func() {
   885  				_ = b.WriteAll(ctx, key, content, nil)
   886  			}()
   887  
   888  			got, err = doList(ctx, b, keyPrefix+tc.delim, tc.delim, false)
   889  			if err != nil {
   890  				t.Fatal(err)
   891  			}
   892  			if diff := cmp.Diff(got, tc.wantAfterDel); diff != "" {
   893  				t.Errorf("after delete, got\n%v\nwant\n%v\ndiff\n%s", got, tc.wantAfterDel, diff)
   894  			}
   895  		})
   896  	}
   897  }
   898  
   899  func iterToSetOfKeys(ctx context.Context, t *testing.T, iter *blob.ListIterator) map[string]bool {
   900  	retval := map[string]bool{}
   901  	for {
   902  		if item, err := iter.Next(ctx); err == io.EOF {
   903  			break
   904  		} else if err != nil {
   905  			t.Fatal(err)
   906  		} else {
   907  			retval[item.Key] = true
   908  		}
   909  	}
   910  	return retval
   911  }
   912  
   913  // testRead tests the functionality of NewReader, NewRangeReader, and Reader.
   914  func testRead(t *testing.T, newHarness HarnessMaker) {
   915  	const key = "blob-for-reading"
   916  	content := []byte("abcdefghijklmnopqurstuvwxyz")
   917  	contentSize := int64(len(content))
   918  
   919  	tests := []struct {
   920  		name           string
   921  		key            string
   922  		offset, length int64
   923  		want           []byte
   924  		wantReadSize   int64
   925  		wantErr        bool
   926  		// set to true to skip creation of the object for
   927  		// tests where we expect an error without any actual
   928  		// read.
   929  		skipCreate bool
   930  	}{
   931  		{
   932  			name:    "read of nonexistent key fails",
   933  			key:     "key-does-not-exist",
   934  			length:  -1,
   935  			wantErr: true,
   936  		},
   937  		{
   938  			name:       "negative offset fails",
   939  			key:        key,
   940  			offset:     -1,
   941  			wantErr:    true,
   942  			skipCreate: true,
   943  		},
   944  		{
   945  			name: "length 0 read",
   946  			key:  key,
   947  			want: []byte{},
   948  		},
   949  		{
   950  			name:         "read from positive offset to end",
   951  			key:          key,
   952  			offset:       10,
   953  			length:       -1,
   954  			want:         content[10:],
   955  			wantReadSize: contentSize - 10,
   956  		},
   957  		{
   958  			name:         "read a part in middle",
   959  			key:          key,
   960  			offset:       10,
   961  			length:       5,
   962  			want:         content[10:15],
   963  			wantReadSize: 5,
   964  		},
   965  		{
   966  			name:         "read in full",
   967  			key:          key,
   968  			length:       -1,
   969  			want:         content,
   970  			wantReadSize: contentSize,
   971  		},
   972  		{
   973  			name:         "read in full with negative length not -1",
   974  			key:          key,
   975  			length:       -42,
   976  			want:         content,
   977  			wantReadSize: contentSize,
   978  		},
   979  	}
   980  
   981  	ctx := context.Background()
   982  
   983  	// Creates a blob for sub-tests below.
   984  	init := func(t *testing.T, skipCreate bool) (*blob.Bucket, func()) {
   985  		h, err := newHarness(ctx, t)
   986  		if err != nil {
   987  			t.Fatal(err)
   988  		}
   989  
   990  		drv, err := h.MakeDriver(ctx)
   991  		if err != nil {
   992  			t.Fatal(err)
   993  		}
   994  		b := blob.NewBucket(drv)
   995  		if skipCreate {
   996  			return b, func() { b.Close(); h.Close() }
   997  		}
   998  		if err := b.WriteAll(ctx, key, content, nil); err != nil {
   999  			b.Close()
  1000  			t.Fatal(err)
  1001  		}
  1002  		return b, func() {
  1003  			_ = b.Delete(ctx, key)
  1004  			b.Close()
  1005  			h.Close()
  1006  		}
  1007  	}
  1008  
  1009  	for _, tc := range tests {
  1010  		t.Run(tc.name, func(t *testing.T) {
  1011  			b, done := init(t, tc.skipCreate)
  1012  			defer done()
  1013  
  1014  			r, err := b.NewRangeReader(ctx, tc.key, tc.offset, tc.length, nil)
  1015  			if (err != nil) != tc.wantErr {
  1016  				t.Errorf("got err %v want error %v", err, tc.wantErr)
  1017  			}
  1018  			if err != nil {
  1019  				return
  1020  			}
  1021  			defer r.Close()
  1022  			// Make the buffer bigger than needed to make sure we actually only read
  1023  			// the expected number of bytes.
  1024  			got := make([]byte, tc.wantReadSize+10)
  1025  			n, err := r.Read(got)
  1026  			// EOF error is optional, see https://golang.org/pkg/io/#Reader.
  1027  			if err != nil && err != io.EOF {
  1028  				t.Errorf("unexpected error during read: %v", err)
  1029  			}
  1030  			if int64(n) != tc.wantReadSize {
  1031  				t.Errorf("got read length %d want %d", n, tc.wantReadSize)
  1032  			}
  1033  			if !cmp.Equal(got[:tc.wantReadSize], tc.want) {
  1034  				t.Errorf("got %q want %q", string(got), string(tc.want))
  1035  			}
  1036  			if r.Size() != contentSize {
  1037  				t.Errorf("got size %d want %d", r.Size(), contentSize)
  1038  			}
  1039  			if r.ModTime().IsZero() {
  1040  				t.Errorf("got zero mod time, want non-zero")
  1041  			}
  1042  		})
  1043  	}
  1044  }
  1045  
  1046  // testAttributes tests Attributes.
  1047  func testAttributes(t *testing.T, newHarness HarnessMaker) {
  1048  	const (
  1049  		key                = "blob-for-attributes"
  1050  		contentType        = "text/plain"
  1051  		cacheControl       = "no-cache"
  1052  		contentDisposition = "inline"
  1053  		contentEncoding    = "identity"
  1054  		contentLanguage    = "en"
  1055  	)
  1056  	content := []byte("Hello World!")
  1057  
  1058  	ctx := context.Background()
  1059  
  1060  	// Creates a blob for sub-tests below.
  1061  	init := func(t *testing.T) (*blob.Bucket, func()) {
  1062  		h, err := newHarness(ctx, t)
  1063  		if err != nil {
  1064  			t.Fatal(err)
  1065  		}
  1066  		drv, err := h.MakeDriver(ctx)
  1067  		if err != nil {
  1068  			t.Fatal(err)
  1069  		}
  1070  		b := blob.NewBucket(drv)
  1071  		opts := &blob.WriterOptions{
  1072  			ContentType:        contentType,
  1073  			CacheControl:       cacheControl,
  1074  			ContentDisposition: contentDisposition,
  1075  			ContentEncoding:    contentEncoding,
  1076  			ContentLanguage:    contentLanguage,
  1077  		}
  1078  		if err := b.WriteAll(ctx, key, content, opts); err != nil {
  1079  			b.Close()
  1080  			t.Fatal(err)
  1081  		}
  1082  		return b, func() {
  1083  			_ = b.Delete(ctx, key)
  1084  			b.Close()
  1085  			h.Close()
  1086  		}
  1087  	}
  1088  
  1089  	b, done := init(t)
  1090  	defer done()
  1091  
  1092  	_, err := b.Attributes(ctx, "not-found")
  1093  	if err == nil {
  1094  		t.Errorf("got nil want error")
  1095  	} else if gcerrors.Code(err) != gcerrors.NotFound {
  1096  		t.Errorf("got %v want NotFound error", err)
  1097  	} else if !strings.Contains(err.Error(), "not-found") {
  1098  		t.Errorf("got %v want error to include missing key", err)
  1099  	}
  1100  	a, err := b.Attributes(ctx, key)
  1101  	if err != nil {
  1102  		t.Fatalf("failed Attributes: %v", err)
  1103  	}
  1104  	// Also make a Reader so we can verify the subset of attributes
  1105  	// that it exposes.
  1106  	r, err := b.NewReader(ctx, key, nil)
  1107  	if err != nil {
  1108  		t.Fatalf("failed Attributes: %v", err)
  1109  	}
  1110  	if a.CacheControl != cacheControl {
  1111  		t.Errorf("got CacheControl %q want %q", a.CacheControl, cacheControl)
  1112  	}
  1113  	if a.ContentDisposition != contentDisposition {
  1114  		t.Errorf("got ContentDisposition %q want %q", a.ContentDisposition, contentDisposition)
  1115  	}
  1116  	if a.ContentEncoding != contentEncoding {
  1117  		t.Errorf("got ContentEncoding %q want %q", a.ContentEncoding, contentEncoding)
  1118  	}
  1119  	if a.ContentLanguage != contentLanguage {
  1120  		t.Errorf("got ContentLanguage %q want %q", a.ContentLanguage, contentLanguage)
  1121  	}
  1122  	if a.ContentType != contentType {
  1123  		t.Errorf("got ContentType %q want %q", a.ContentType, contentType)
  1124  	}
  1125  	if r.ContentType() != contentType {
  1126  		t.Errorf("got Reader.ContentType() %q want %q", r.ContentType(), contentType)
  1127  	}
  1128  	if a.Size != int64(len(content)) {
  1129  		t.Errorf("got Size %d want %d", a.Size, len(content))
  1130  	}
  1131  	if r.Size() != int64(len(content)) {
  1132  		t.Errorf("got Reader.Size() %d want %d", r.Size(), len(content))
  1133  	}
  1134  	r.Close()
  1135  
  1136  	t1 := a.ModTime
  1137  	if err := b.WriteAll(ctx, key, content, nil); err != nil {
  1138  		t.Fatal(err)
  1139  	}
  1140  	a2, err := b.Attributes(ctx, key)
  1141  	if err != nil {
  1142  		t.Errorf("failed Attributes#2: %v", err)
  1143  	}
  1144  	t2 := a2.ModTime
  1145  	if t2.Before(t1) {
  1146  		t.Errorf("ModTime %v is before %v", t2, t1)
  1147  	}
  1148  }
  1149  
  1150  // loadTestData loads test data, inlined using go-bindata.
  1151  func loadTestData(t testing.TB, name string) []byte {
  1152  	data, err := Asset(name)
  1153  	if err != nil {
  1154  		t.Fatal(err)
  1155  	}
  1156  	return data
  1157  }
  1158  
  1159  // testWrite tests the functionality of NewWriter and Writer.
  1160  func testWrite(t *testing.T, newHarness HarnessMaker) {
  1161  	const key = "blob-for-reading"
  1162  	const existingContent = "existing content"
  1163  	smallText := loadTestData(t, "test-small.txt")
  1164  	mediumHTML := loadTestData(t, "test-medium.html")
  1165  	largeJpg := loadTestData(t, "test-large.jpg")
  1166  	helloWorld := []byte("hello world")
  1167  	helloWorldMD5 := md5.Sum(helloWorld)
  1168  
  1169  	tests := []struct {
  1170  		name            string
  1171  		key             string
  1172  		exists          bool
  1173  		content         []byte
  1174  		contentType     string
  1175  		contentMD5      []byte
  1176  		firstChunk      int
  1177  		wantContentType string
  1178  		wantErr         bool
  1179  		wantReadErr     bool // if wantErr is true, and Read after err should fail with something other than NotExists
  1180  	}{
  1181  		{
  1182  			name:        "write to empty key fails",
  1183  			wantErr:     true,
  1184  			wantReadErr: true, // read from empty key fails, but not always with NotExists
  1185  		},
  1186  		{
  1187  			name: "no write then close results in empty blob",
  1188  			key:  key,
  1189  		},
  1190  		{
  1191  			name: "no write then close results in empty blob, blob existed",
  1192  			key:  key,
  1193  		},
  1194  		{
  1195  			name:        "invalid ContentType fails",
  1196  			key:         key,
  1197  			contentType: "application/octet/stream",
  1198  			wantErr:     true,
  1199  		},
  1200  		{
  1201  			name:            "ContentType is discovered if not provided",
  1202  			key:             key,
  1203  			content:         mediumHTML,
  1204  			wantContentType: "text/html",
  1205  		},
  1206  		{
  1207  			name:            "write with explicit ContentType overrides discovery",
  1208  			key:             key,
  1209  			content:         mediumHTML,
  1210  			contentType:     "application/json",
  1211  			wantContentType: "application/json",
  1212  		},
  1213  		{
  1214  			name:       "Content md5 match",
  1215  			key:        key,
  1216  			content:    helloWorld,
  1217  			contentMD5: helloWorldMD5[:],
  1218  		},
  1219  		{
  1220  			name:       "Content md5 did not match",
  1221  			key:        key,
  1222  			content:    []byte("not hello world"),
  1223  			contentMD5: helloWorldMD5[:],
  1224  			wantErr:    true,
  1225  		},
  1226  		{
  1227  			name:       "Content md5 did not match, blob existed",
  1228  			exists:     true,
  1229  			key:        key,
  1230  			content:    []byte("not hello world"),
  1231  			contentMD5: helloWorldMD5[:],
  1232  			wantErr:    true,
  1233  		},
  1234  		{
  1235  			name:            "a small text file",
  1236  			key:             key,
  1237  			content:         smallText,
  1238  			wantContentType: "text/html",
  1239  		},
  1240  		{
  1241  			name:            "a large jpg file",
  1242  			key:             key,
  1243  			content:         largeJpg,
  1244  			wantContentType: "image/jpg",
  1245  		},
  1246  		{
  1247  			name:            "a large jpg file written in two chunks",
  1248  			key:             key,
  1249  			firstChunk:      10,
  1250  			content:         largeJpg,
  1251  			wantContentType: "image/jpg",
  1252  		},
  1253  		// TODO(issue #304): Fails for GCS.
  1254  		/*
  1255  			{
  1256  				name:            "ContentType is parsed and reformatted",
  1257  				key:             key,
  1258  				content:         []byte("foo"),
  1259  				contentType:     `FORM-DATA;name="foo"`,
  1260  				wantContentType: `form-data; name=foo`,
  1261  			},
  1262  		*/
  1263  	}
  1264  
  1265  	ctx := context.Background()
  1266  	for _, tc := range tests {
  1267  		t.Run(tc.name, func(t *testing.T) {
  1268  			h, err := newHarness(ctx, t)
  1269  			if err != nil {
  1270  				t.Fatal(err)
  1271  			}
  1272  			defer h.Close()
  1273  			drv, err := h.MakeDriver(ctx)
  1274  			if err != nil {
  1275  				t.Fatal(err)
  1276  			}
  1277  			b := blob.NewBucket(drv)
  1278  			defer b.Close()
  1279  
  1280  			// If the test wants the blob to already exist, write it.
  1281  			if tc.exists {
  1282  				if err := b.WriteAll(ctx, key, []byte(existingContent), nil); err != nil {
  1283  					t.Fatal(err)
  1284  				}
  1285  				defer func() {
  1286  					_ = b.Delete(ctx, key)
  1287  				}()
  1288  			}
  1289  
  1290  			// Write the content.
  1291  			opts := &blob.WriterOptions{
  1292  				ContentType: tc.contentType,
  1293  				ContentMD5:  tc.contentMD5[:],
  1294  			}
  1295  			w, err := b.NewWriter(ctx, tc.key, opts)
  1296  			if err == nil {
  1297  				if len(tc.content) > 0 {
  1298  					if tc.firstChunk == 0 {
  1299  						// Write the whole thing.
  1300  						_, err = w.Write(tc.content)
  1301  					} else {
  1302  						// Write it in 2 chunks.
  1303  						_, err = w.Write(tc.content[:tc.firstChunk])
  1304  						if err == nil {
  1305  							_, err = w.Write(tc.content[tc.firstChunk:])
  1306  						}
  1307  					}
  1308  				}
  1309  				if err == nil {
  1310  					err = w.Close()
  1311  				}
  1312  			}
  1313  			if (err != nil) != tc.wantErr {
  1314  				t.Errorf("NewWriter or Close got err %v want error %v", err, tc.wantErr)
  1315  			}
  1316  			if err != nil {
  1317  				// The write failed; verify that it had no effect.
  1318  				buf, err := b.ReadAll(ctx, tc.key)
  1319  				if tc.exists {
  1320  					// Verify the previous content is still there.
  1321  					if !bytes.Equal(buf, []byte(existingContent)) {
  1322  						t.Errorf("Write failed as expected, but content doesn't match expected previous content; got \n%s\n want \n%s", string(buf), existingContent)
  1323  					}
  1324  				} else {
  1325  					// Verify that the read fails with NotFound.
  1326  					if err == nil {
  1327  						t.Error("Write failed as expected, but Read after that didn't return an error")
  1328  					} else if !tc.wantReadErr && gcerrors.Code(err) != gcerrors.NotFound {
  1329  						t.Errorf("Write failed as expected, but Read after that didn't return the right error; got %v want NotFound", err)
  1330  					} else if !strings.Contains(err.Error(), tc.key) {
  1331  						t.Errorf("got %v want error to include missing key", err)
  1332  					}
  1333  				}
  1334  				return
  1335  			}
  1336  			defer func() { _ = b.Delete(ctx, tc.key) }()
  1337  
  1338  			// Read it back.
  1339  			buf, err := b.ReadAll(ctx, tc.key)
  1340  			if err != nil {
  1341  				t.Fatal(err)
  1342  			}
  1343  			if !bytes.Equal(buf, tc.content) {
  1344  				if len(buf) < 100 && len(tc.content) < 100 {
  1345  					t.Errorf("read didn't match write; got \n%s\n want \n%s", string(buf), string(tc.content))
  1346  				} else {
  1347  					t.Error("read didn't match write, content too large to display")
  1348  				}
  1349  			}
  1350  		})
  1351  	}
  1352  }
  1353  
  1354  // testCanceledWrite tests the functionality of canceling an in-progress write.
  1355  func testCanceledWrite(t *testing.T, newHarness HarnessMaker) {
  1356  	const key = "blob-for-canceled-write"
  1357  	content := []byte("hello world")
  1358  	cancelContent := []byte("going to cancel")
  1359  
  1360  	tests := []struct {
  1361  		description string
  1362  		contentType string
  1363  		exists      bool
  1364  	}{
  1365  		{
  1366  			// The write will be buffered in the portable type as part of
  1367  			// ContentType detection, so the first call to the Driver will be Close.
  1368  			description: "EmptyContentType",
  1369  		},
  1370  		{
  1371  			// The write will be sent to the Driver, which may do its own
  1372  			// internal buffering.
  1373  			description: "NonEmptyContentType",
  1374  			contentType: "text/plain",
  1375  		},
  1376  		{
  1377  			description: "BlobExists",
  1378  			exists:      true,
  1379  		},
  1380  		// TODO(issue #482): Find a way to test that a chunked upload that's interrupted
  1381  		// after some chunks are uploaded cancels correctly.
  1382  	}
  1383  
  1384  	ctx := context.Background()
  1385  	for _, test := range tests {
  1386  		t.Run(test.description, func(t *testing.T) {
  1387  			cancelCtx, cancel := context.WithCancel(ctx)
  1388  			h, err := newHarness(ctx, t)
  1389  			if err != nil {
  1390  				t.Fatal(err)
  1391  			}
  1392  			defer h.Close()
  1393  			drv, err := h.MakeDriver(ctx)
  1394  			if err != nil {
  1395  				t.Fatal(err)
  1396  			}
  1397  			b := blob.NewBucket(drv)
  1398  			defer b.Close()
  1399  
  1400  			opts := &blob.WriterOptions{
  1401  				ContentType: test.contentType,
  1402  			}
  1403  			// If the test wants the blob to already exist, write it.
  1404  			if test.exists {
  1405  				if err := b.WriteAll(ctx, key, content, opts); err != nil {
  1406  					t.Fatal(err)
  1407  				}
  1408  				defer func() {
  1409  					_ = b.Delete(ctx, key)
  1410  				}()
  1411  			}
  1412  
  1413  			// Create a writer with the context that we're going
  1414  			// to cancel.
  1415  			w, err := b.NewWriter(cancelCtx, key, opts)
  1416  			if err != nil {
  1417  				t.Fatal(err)
  1418  			}
  1419  			// Write the content.
  1420  			if _, err := w.Write(cancelContent); err != nil {
  1421  				t.Fatal(err)
  1422  			}
  1423  
  1424  			// Verify that the previous content (if any) is still readable,
  1425  			// because the write hasn't been Closed yet.
  1426  			got, err := b.ReadAll(ctx, key)
  1427  			if test.exists {
  1428  				// The previous content should still be there.
  1429  				if !cmp.Equal(got, content) {
  1430  					t.Errorf("during unclosed write, got %q want %q", string(got), string(content))
  1431  				}
  1432  			} else {
  1433  				// The read should fail; the write hasn't been Closed so the
  1434  				// blob shouldn't exist.
  1435  				if err == nil {
  1436  					t.Error("wanted read to return an error when write is not yet Closed")
  1437  				}
  1438  			}
  1439  
  1440  			// Cancel the context to abort the write.
  1441  			cancel()
  1442  			// Close should return some kind of canceled context error.
  1443  			// We can't verify the kind of error cleanly, so we just verify there's
  1444  			// an error.
  1445  			if err := w.Close(); err == nil {
  1446  				t.Errorf("got Close error %v want canceled ctx error", err)
  1447  			}
  1448  
  1449  			// Verify the write was truly aborted.
  1450  			got, err = b.ReadAll(ctx, key)
  1451  			if test.exists {
  1452  				// The previous content should still be there.
  1453  				if !cmp.Equal(got, content) {
  1454  					t.Errorf("after canceled write, got %q want %q", string(got), string(content))
  1455  				}
  1456  			} else {
  1457  				// The read should fail; the write was aborted so the
  1458  				// blob shouldn't exist.
  1459  				if err == nil {
  1460  					t.Error("wanted read to return an error when write was canceled")
  1461  				}
  1462  			}
  1463  		})
  1464  	}
  1465  }
  1466  
  1467  // testMetadata tests writing and reading the key/value metadata for a blob.
  1468  func testMetadata(t *testing.T, newHarness HarnessMaker) {
  1469  	const key = "blob-for-metadata"
  1470  	hello := []byte("hello")
  1471  
  1472  	weirdMetadata := map[string]string{}
  1473  	for _, k := range escape.WeirdStrings {
  1474  		weirdMetadata[k] = k
  1475  	}
  1476  
  1477  	tests := []struct {
  1478  		name        string
  1479  		metadata    map[string]string
  1480  		content     []byte
  1481  		contentType string
  1482  		want        map[string]string
  1483  		wantErr     bool
  1484  	}{
  1485  		{
  1486  			name:     "empty",
  1487  			content:  hello,
  1488  			metadata: map[string]string{},
  1489  			want:     nil,
  1490  		},
  1491  		{
  1492  			name:     "empty key fails",
  1493  			content:  hello,
  1494  			metadata: map[string]string{"": "empty key value"},
  1495  			wantErr:  true,
  1496  		},
  1497  		{
  1498  			name:     "duplicate case-insensitive key fails",
  1499  			content:  hello,
  1500  			metadata: map[string]string{"abc": "foo", "aBc": "bar"},
  1501  			wantErr:  true,
  1502  		},
  1503  		{
  1504  			name:    "valid metadata",
  1505  			content: hello,
  1506  			metadata: map[string]string{
  1507  				"key_a": "value-a",
  1508  				"kEy_B": "value-b",
  1509  				"key_c": "vAlUe-c",
  1510  			},
  1511  			want: map[string]string{
  1512  				"key_a": "value-a",
  1513  				"key_b": "value-b",
  1514  				"key_c": "vAlUe-c",
  1515  			},
  1516  		},
  1517  		{
  1518  			name:     "valid metadata with empty body",
  1519  			content:  nil,
  1520  			metadata: map[string]string{"foo": "bar"},
  1521  			want:     map[string]string{"foo": "bar"},
  1522  		},
  1523  		{
  1524  			name:        "valid metadata with content type",
  1525  			content:     hello,
  1526  			contentType: "text/plain",
  1527  			metadata:    map[string]string{"foo": "bar"},
  1528  			want:        map[string]string{"foo": "bar"},
  1529  		},
  1530  		{
  1531  			name:     "weird metadata keys",
  1532  			content:  hello,
  1533  			metadata: weirdMetadata,
  1534  			want:     weirdMetadata,
  1535  		},
  1536  		{
  1537  			name:     "non-utf8 metadata key",
  1538  			content:  hello,
  1539  			metadata: map[string]string{escape.NonUTF8String: "bar"},
  1540  			wantErr:  true,
  1541  		},
  1542  		{
  1543  			name:     "non-utf8 metadata value",
  1544  			content:  hello,
  1545  			metadata: map[string]string{"foo": escape.NonUTF8String},
  1546  			wantErr:  true,
  1547  		},
  1548  	}
  1549  
  1550  	ctx := context.Background()
  1551  	for _, tc := range tests {
  1552  		t.Run(tc.name, func(t *testing.T) {
  1553  			h, err := newHarness(ctx, t)
  1554  			if err != nil {
  1555  				t.Fatal(err)
  1556  			}
  1557  			defer h.Close()
  1558  
  1559  			drv, err := h.MakeDriver(ctx)
  1560  			if err != nil {
  1561  				t.Fatal(err)
  1562  			}
  1563  			b := blob.NewBucket(drv)
  1564  			defer b.Close()
  1565  			opts := &blob.WriterOptions{
  1566  				Metadata:    tc.metadata,
  1567  				ContentType: tc.contentType,
  1568  			}
  1569  			err = b.WriteAll(ctx, key, hello, opts)
  1570  			if (err != nil) != tc.wantErr {
  1571  				t.Errorf("got error %v want error %v", err, tc.wantErr)
  1572  			}
  1573  			if err != nil {
  1574  				return
  1575  			}
  1576  			defer func() {
  1577  				_ = b.Delete(ctx, key)
  1578  			}()
  1579  			a, err := b.Attributes(ctx, key)
  1580  			if err != nil {
  1581  				t.Fatal(err)
  1582  			}
  1583  			if diff := cmp.Diff(a.Metadata, tc.want); diff != "" {
  1584  				t.Errorf("got\n%v\nwant\n%v\ndiff\n%s", a.Metadata, tc.want, diff)
  1585  			}
  1586  		})
  1587  	}
  1588  }
  1589  
  1590  // testMD5 tests reading MD5 hashes via List and Attributes.
  1591  func testMD5(t *testing.T, newHarness HarnessMaker) {
  1592  	ctx := context.Background()
  1593  
  1594  	// Define two blobs with different content; we'll write them and then verify
  1595  	// their returned MD5 hashes.
  1596  	const aKey, bKey = "blob-for-md5-aaa", "blob-for-md5-bbb"
  1597  	aContent, bContent := []byte("hello"), []byte("goodbye")
  1598  	aMD5 := md5.Sum(aContent)
  1599  	bMD5 := md5.Sum(bContent)
  1600  
  1601  	h, err := newHarness(ctx, t)
  1602  	if err != nil {
  1603  		t.Fatal(err)
  1604  	}
  1605  	defer h.Close()
  1606  	drv, err := h.MakeDriver(ctx)
  1607  	if err != nil {
  1608  		t.Fatal(err)
  1609  	}
  1610  	b := blob.NewBucket(drv)
  1611  	defer b.Close()
  1612  
  1613  	// Write the two blobs.
  1614  	if err := b.WriteAll(ctx, aKey, aContent, nil); err != nil {
  1615  		t.Fatal(err)
  1616  	}
  1617  	defer func() { _ = b.Delete(ctx, aKey) }()
  1618  	if err := b.WriteAll(ctx, bKey, bContent, nil); err != nil {
  1619  		t.Fatal(err)
  1620  	}
  1621  	defer func() { _ = b.Delete(ctx, bKey) }()
  1622  
  1623  	// Check the MD5 we get through Attributes. Note that it's always legal to
  1624  	// return a nil MD5.
  1625  	aAttr, err := b.Attributes(ctx, aKey)
  1626  	if err != nil {
  1627  		t.Fatal(err)
  1628  	}
  1629  	if aAttr.MD5 != nil && !bytes.Equal(aAttr.MD5, aMD5[:]) {
  1630  		t.Errorf("got MD5\n%x\nwant\n%x", aAttr.MD5, aMD5)
  1631  	}
  1632  
  1633  	bAttr, err := b.Attributes(ctx, bKey)
  1634  	if err != nil {
  1635  		t.Fatal(err)
  1636  	}
  1637  	if bAttr.MD5 != nil && !bytes.Equal(bAttr.MD5, bMD5[:]) {
  1638  		t.Errorf("got MD5\n%x\nwant\n%x", bAttr.MD5, bMD5)
  1639  	}
  1640  
  1641  	// Check the MD5 we get through List. Note that it's always legal to
  1642  	// return a nil MD5.
  1643  	iter := b.List(&blob.ListOptions{Prefix: "blob-for-md5-"})
  1644  	obj, err := iter.Next(ctx)
  1645  	if err != nil {
  1646  		t.Fatal(err)
  1647  	}
  1648  	if obj.Key != aKey {
  1649  		t.Errorf("got name %q want %q", obj.Key, aKey)
  1650  	}
  1651  	if obj.MD5 != nil && !bytes.Equal(obj.MD5, aMD5[:]) {
  1652  		t.Errorf("got MD5\n%x\nwant\n%x", obj.MD5, aMD5)
  1653  	}
  1654  	obj, err = iter.Next(ctx)
  1655  	if err != nil {
  1656  		t.Fatal(err)
  1657  	}
  1658  	if obj.Key != bKey {
  1659  		t.Errorf("got name %q want %q", obj.Key, bKey)
  1660  	}
  1661  	if obj.MD5 != nil && !bytes.Equal(obj.MD5, bMD5[:]) {
  1662  		t.Errorf("got MD5\n%x\nwant\n%x", obj.MD5, bMD5)
  1663  	}
  1664  }
  1665  
  1666  // testCopy tests the functionality of Copy.
  1667  func testCopy(t *testing.T, newHarness HarnessMaker) {
  1668  	const (
  1669  		srcKey             = "blob-for-copying-src"
  1670  		dstKey             = "blob-for-copying-dest"
  1671  		dstKeyExists       = "blob-for-copying-dest-exists"
  1672  		contentType        = "text/plain"
  1673  		cacheControl       = "no-cache"
  1674  		contentDisposition = "inline"
  1675  		contentEncoding    = "identity"
  1676  		contentLanguage    = "en"
  1677  	)
  1678  	var contents = []byte("Hello World")
  1679  
  1680  	ctx := context.Background()
  1681  	t.Run("NonExistentSourceFails", func(t *testing.T) {
  1682  		h, err := newHarness(ctx, t)
  1683  		if err != nil {
  1684  			t.Fatal(err)
  1685  		}
  1686  		defer h.Close()
  1687  		drv, err := h.MakeDriver(ctx)
  1688  		if err != nil {
  1689  			t.Fatal(err)
  1690  		}
  1691  		b := blob.NewBucket(drv)
  1692  		defer b.Close()
  1693  
  1694  		err = b.Copy(ctx, dstKey, "does-not-exist", nil)
  1695  		if err == nil {
  1696  			t.Errorf("got nil want error")
  1697  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  1698  			t.Errorf("got %v want NotFound error", err)
  1699  		} else if !strings.Contains(err.Error(), "does-not-exist") {
  1700  			t.Errorf("got %v want error to include missing key", err)
  1701  		}
  1702  	})
  1703  
  1704  	t.Run("Works", func(t *testing.T) {
  1705  		h, err := newHarness(ctx, t)
  1706  		if err != nil {
  1707  			t.Fatal(err)
  1708  		}
  1709  		defer h.Close()
  1710  		drv, err := h.MakeDriver(ctx)
  1711  		if err != nil {
  1712  			t.Fatal(err)
  1713  		}
  1714  		b := blob.NewBucket(drv)
  1715  		defer b.Close()
  1716  
  1717  		// Create the source blob.
  1718  		wopts := &blob.WriterOptions{
  1719  			ContentType:        contentType,
  1720  			CacheControl:       cacheControl,
  1721  			ContentDisposition: contentDisposition,
  1722  			ContentEncoding:    contentEncoding,
  1723  			ContentLanguage:    contentLanguage,
  1724  			Metadata:           map[string]string{"foo": "bar"},
  1725  		}
  1726  		if err := b.WriteAll(ctx, srcKey, contents, wopts); err != nil {
  1727  			t.Fatal(err)
  1728  		}
  1729  
  1730  		// Grab its attributes to compare to the copy's attributes later.
  1731  		wantAttr, err := b.Attributes(ctx, srcKey)
  1732  		if err != nil {
  1733  			t.Fatal(err)
  1734  		}
  1735  		wantAttr.ModTime = time.Time{} // don't compare this field
  1736  
  1737  		// Create another blob that we're going to overwrite.
  1738  		if err := b.WriteAll(ctx, dstKeyExists, []byte("clobber me"), nil); err != nil {
  1739  			t.Fatal(err)
  1740  		}
  1741  
  1742  		// Copy the source to the destination.
  1743  		if err := b.Copy(ctx, dstKey, srcKey, nil); err != nil {
  1744  			t.Errorf("got unexpected error copying blob: %v", err)
  1745  		}
  1746  		// Read the copy.
  1747  		got, err := b.ReadAll(ctx, dstKey)
  1748  		if err != nil {
  1749  			t.Fatal(err)
  1750  		}
  1751  		if !cmp.Equal(got, contents) {
  1752  			t.Errorf("got %q want %q", string(got), string(contents))
  1753  		}
  1754  		// Verify attributes of the copy.
  1755  		gotAttr, err := b.Attributes(ctx, dstKey)
  1756  		if err != nil {
  1757  			t.Fatal(err)
  1758  		}
  1759  		gotAttr.ModTime = time.Time{} // don't compare this field
  1760  		if diff := cmp.Diff(gotAttr, wantAttr, cmpopts.IgnoreUnexported(blob.Attributes{})); diff != "" {
  1761  			t.Errorf("got %v want %v diff %s", gotAttr, wantAttr, diff)
  1762  		}
  1763  
  1764  		// Copy the source to the second destination, where there's an existing blob.
  1765  		// It should be overwritten.
  1766  		if err := b.Copy(ctx, dstKeyExists, srcKey, nil); err != nil {
  1767  			t.Errorf("got unexpected error copying blob: %v", err)
  1768  		}
  1769  		// Read the copy.
  1770  		got, err = b.ReadAll(ctx, dstKeyExists)
  1771  		if err != nil {
  1772  			t.Fatal(err)
  1773  		}
  1774  		if !cmp.Equal(got, contents) {
  1775  			t.Errorf("got %q want %q", string(got), string(contents))
  1776  		}
  1777  		// Verify attributes of the copy.
  1778  		gotAttr, err = b.Attributes(ctx, dstKeyExists)
  1779  		if err != nil {
  1780  			t.Fatal(err)
  1781  		}
  1782  		gotAttr.ModTime = time.Time{} // don't compare this field
  1783  		if diff := cmp.Diff(gotAttr, wantAttr, cmpopts.IgnoreUnexported(blob.Attributes{})); diff != "" {
  1784  			t.Errorf("got %v want %v diff %s", gotAttr, wantAttr, diff)
  1785  		}
  1786  	})
  1787  }
  1788  
  1789  // testDelete tests the functionality of Delete.
  1790  func testDelete(t *testing.T, newHarness HarnessMaker) {
  1791  	const key = "blob-for-deleting"
  1792  
  1793  	ctx := context.Background()
  1794  	t.Run("NonExistentFails", func(t *testing.T) {
  1795  		h, err := newHarness(ctx, t)
  1796  		if err != nil {
  1797  			t.Fatal(err)
  1798  		}
  1799  		defer h.Close()
  1800  		drv, err := h.MakeDriver(ctx)
  1801  		if err != nil {
  1802  			t.Fatal(err)
  1803  		}
  1804  		b := blob.NewBucket(drv)
  1805  		defer b.Close()
  1806  
  1807  		err = b.Delete(ctx, "does-not-exist")
  1808  		if err == nil {
  1809  			t.Errorf("got nil want error")
  1810  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  1811  			t.Errorf("got %v want NotFound error", err)
  1812  		} else if !strings.Contains(err.Error(), "does-not-exist") {
  1813  			t.Errorf("got %v want error to include missing key", err)
  1814  		}
  1815  	})
  1816  
  1817  	t.Run("Works", func(t *testing.T) {
  1818  		h, err := newHarness(ctx, t)
  1819  		if err != nil {
  1820  			t.Fatal(err)
  1821  		}
  1822  		defer h.Close()
  1823  		drv, err := h.MakeDriver(ctx)
  1824  		if err != nil {
  1825  			t.Fatal(err)
  1826  		}
  1827  		b := blob.NewBucket(drv)
  1828  		defer b.Close()
  1829  
  1830  		// Create the blob.
  1831  		if err := b.WriteAll(ctx, key, []byte("Hello world"), nil); err != nil {
  1832  			t.Fatal(err)
  1833  		}
  1834  		// Delete it.
  1835  		if err := b.Delete(ctx, key); err != nil {
  1836  			t.Errorf("got unexpected error deleting blob: %v", err)
  1837  		}
  1838  		// Subsequent read fails with NotFound.
  1839  		_, err = b.NewReader(ctx, key, nil)
  1840  		if err == nil {
  1841  			t.Errorf("read after delete got nil, want error")
  1842  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  1843  			t.Errorf("read after delete want NotFound error, got %v", err)
  1844  		} else if !strings.Contains(err.Error(), key) {
  1845  			t.Errorf("got %v want error to include missing key", err)
  1846  		}
  1847  		// Subsequent delete also fails.
  1848  		err = b.Delete(ctx, key)
  1849  		if err == nil {
  1850  			t.Errorf("delete after delete got nil, want error")
  1851  		} else if gcerrors.Code(err) != gcerrors.NotFound {
  1852  			t.Errorf("delete after delete got %v, want NotFound error", err)
  1853  		} else if !strings.Contains(err.Error(), key) {
  1854  			t.Errorf("got %v want error to include missing key", err)
  1855  		}
  1856  	})
  1857  }
  1858  
  1859  // testConcurrentWriteAndRead tests that concurrent writing to multiple blob
  1860  // keys and concurrent reading from multiple blob keys works.
  1861  func testConcurrentWriteAndRead(t *testing.T, newHarness HarnessMaker) {
  1862  	ctx := context.Background()
  1863  	h, err := newHarness(ctx, t)
  1864  	if err != nil {
  1865  		t.Fatal(err)
  1866  	}
  1867  	defer h.Close()
  1868  	drv, err := h.MakeDriver(ctx)
  1869  	if err != nil {
  1870  		t.Fatal(err)
  1871  	}
  1872  	b := blob.NewBucket(drv)
  1873  	defer b.Close()
  1874  
  1875  	// Prepare data. Each of the numKeys blobs has dataSize bytes, with each byte
  1876  	// set to the numeric key index. For example, the blob at "key0" consists of
  1877  	// all dataSize bytes set to 0.
  1878  	const numKeys = 20
  1879  	const dataSize = 4 * 1024
  1880  	keyData := make(map[int][]byte)
  1881  	for k := 0; k < numKeys; k++ {
  1882  		data := make([]byte, dataSize)
  1883  		for i := 0; i < dataSize; i++ {
  1884  			data[i] = byte(k)
  1885  		}
  1886  		keyData[k] = data
  1887  	}
  1888  
  1889  	blobName := func(k int) string {
  1890  		return fmt.Sprintf("key%d", k)
  1891  	}
  1892  
  1893  	var wg sync.WaitGroup
  1894  
  1895  	// Write all blobs concurrently.
  1896  	for k := 0; k < numKeys; k++ {
  1897  		wg.Add(1)
  1898  		go func(key int) {
  1899  			if err := b.WriteAll(ctx, blobName(key), keyData[key], nil); err != nil {
  1900  				t.Fatal(err)
  1901  			}
  1902  			wg.Done()
  1903  		}(k)
  1904  		defer b.Delete(ctx, blobName(k))
  1905  	}
  1906  	wg.Wait()
  1907  
  1908  	// Read all blobs concurrently and verify that they contain the expected data.
  1909  	for k := 0; k < numKeys; k++ {
  1910  		wg.Add(1)
  1911  		go func(key int) {
  1912  			buf, err := b.ReadAll(ctx, blobName(key))
  1913  			if err != nil {
  1914  				t.Fatal(err)
  1915  			}
  1916  			if !bytes.Equal(buf, keyData[key]) {
  1917  				t.Errorf("read data mismatch for key %d", key)
  1918  			}
  1919  			wg.Done()
  1920  		}(k)
  1921  	}
  1922  	wg.Wait()
  1923  }
  1924  
  1925  // testKeys tests a variety of weird keys.
  1926  func testKeys(t *testing.T, newHarness HarnessMaker) {
  1927  	const keyPrefix = "weird-keys"
  1928  	content := []byte("hello")
  1929  	ctx := context.Background()
  1930  
  1931  	t.Run("non-UTF8 fails", func(t *testing.T) {
  1932  		h, err := newHarness(ctx, t)
  1933  		if err != nil {
  1934  			t.Fatal(err)
  1935  		}
  1936  		defer h.Close()
  1937  		drv, err := h.MakeDriver(ctx)
  1938  		if err != nil {
  1939  			t.Fatal(err)
  1940  		}
  1941  		b := blob.NewBucket(drv)
  1942  		defer b.Close()
  1943  
  1944  		// Write the blob.
  1945  		key := keyPrefix + escape.NonUTF8String
  1946  		if err := b.WriteAll(ctx, key, content, nil); err == nil {
  1947  			t.Error("got nil error, expected error for using non-UTF8 string as key")
  1948  		}
  1949  	})
  1950  
  1951  	for description, key := range escape.WeirdStrings {
  1952  		t.Run(description, func(t *testing.T) {
  1953  			h, err := newHarness(ctx, t)
  1954  			if err != nil {
  1955  				t.Fatal(err)
  1956  			}
  1957  			defer h.Close()
  1958  			drv, err := h.MakeDriver(ctx)
  1959  			if err != nil {
  1960  				t.Fatal(err)
  1961  			}
  1962  			b := blob.NewBucket(drv)
  1963  			defer b.Close()
  1964  
  1965  			// Write the blob.
  1966  			key = keyPrefix + key
  1967  			if err := b.WriteAll(ctx, key, content, nil); err != nil {
  1968  				t.Fatal(err)
  1969  			}
  1970  
  1971  			defer func() {
  1972  				err := b.Delete(ctx, key)
  1973  				if err != nil {
  1974  					t.Error(err)
  1975  				}
  1976  			}()
  1977  
  1978  			// Verify read works.
  1979  			got, err := b.ReadAll(ctx, key)
  1980  			if err != nil {
  1981  				t.Fatal(err)
  1982  			}
  1983  			if !cmp.Equal(got, content) {
  1984  				t.Errorf("got %q want %q", string(got), string(content))
  1985  			}
  1986  
  1987  			// Verify Attributes works.
  1988  			_, err = b.Attributes(ctx, key)
  1989  			if err != nil {
  1990  				t.Error(err)
  1991  			}
  1992  
  1993  			// Verify SignedURL works.
  1994  			url, err := b.SignedURL(ctx, key, nil)
  1995  			if gcerrors.Code(err) != gcerrors.Unimplemented {
  1996  				if err != nil {
  1997  					t.Error(err)
  1998  				}
  1999  				client := h.HTTPClient()
  2000  				if client == nil {
  2001  					t.Error("can't verify SignedURL, Harness.HTTPClient() returned nil")
  2002  				}
  2003  				resp, err := client.Get(url)
  2004  				if err != nil {
  2005  					t.Fatal(err)
  2006  				}
  2007  				defer resp.Body.Close()
  2008  				if resp.StatusCode != 200 {
  2009  					t.Errorf("got status code %d, want 200", resp.StatusCode)
  2010  				}
  2011  				got, err := ioutil.ReadAll(resp.Body)
  2012  				if err != nil {
  2013  					t.Fatal(err)
  2014  				}
  2015  				if !bytes.Equal(got, content) {
  2016  					t.Errorf("got body %q, want %q", string(got), string(content))
  2017  				}
  2018  			}
  2019  		})
  2020  	}
  2021  }
  2022  
  2023  // testSignedURL tests the functionality of SignedURL.
  2024  func testSignedURL(t *testing.T, newHarness HarnessMaker) {
  2025  	const key = "blob-for-signing"
  2026  	const contents = "hello world"
  2027  
  2028  	ctx := context.Background()
  2029  
  2030  	h, err := newHarness(ctx, t)
  2031  	if err != nil {
  2032  		t.Fatal(err)
  2033  	}
  2034  	defer h.Close()
  2035  
  2036  	drv, err := h.MakeDriver(ctx)
  2037  	if err != nil {
  2038  		t.Fatal(err)
  2039  	}
  2040  	b := blob.NewBucket(drv)
  2041  	defer b.Close()
  2042  
  2043  	// Verify that a negative Expiry gives an error. This is enforced in the
  2044  	// portable type, so works regardless of driver support.
  2045  	_, err = b.SignedURL(ctx, key, &blob.SignedURLOptions{Expiry: -1 * time.Minute})
  2046  	if err == nil {
  2047  		t.Error("got nil error, expected error for negative SignedURLOptions.Expiry")
  2048  	}
  2049  
  2050  	// Generate real signed URLs for GET, GET with the query params remvoed, PUT, and DELETE.
  2051  	getURL, err := b.SignedURL(ctx, key, nil)
  2052  	if err != nil {
  2053  		if gcerrors.Code(err) == gcerrors.Unimplemented {
  2054  			t.Skipf("SignedURL not supported")
  2055  			return
  2056  		}
  2057  		t.Fatal(err)
  2058  	} else if getURL == "" {
  2059  		t.Fatal("got empty GET url")
  2060  	}
  2061  	// Copy getURL, but remove all query params. This URL should not be allowed
  2062  	// to GET since the client is unauthorized.
  2063  	getURLNoParamsURL, err := url.Parse(getURL)
  2064  	if err != nil {
  2065  		t.Fatalf("failed to parse getURL: %v", err)
  2066  	}
  2067  	getURLNoParamsURL.RawQuery = ""
  2068  	getURLNoParams := getURLNoParamsURL.String()
  2069  	putURL, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{Method: http.MethodPut})
  2070  	if err != nil {
  2071  		t.Fatal(err)
  2072  	} else if putURL == "" {
  2073  		t.Fatal("got empty PUT url")
  2074  	}
  2075  	deleteURL, err := b.SignedURL(ctx, key, &blob.SignedURLOptions{Method: http.MethodDelete})
  2076  	if err != nil {
  2077  		t.Fatal(err)
  2078  	} else if deleteURL == "" {
  2079  		t.Fatal("got empty DELETE url")
  2080  	}
  2081  
  2082  	client := h.HTTPClient()
  2083  	if client == nil {
  2084  		t.Fatal("can't verify SignedURL, Harness.HTTPClient() returned nil")
  2085  	}
  2086  
  2087  	// PUT the blob. Try with all URLs, only putURL should work.
  2088  	for _, test := range []struct {
  2089  		urlMethod   string
  2090  		url         string
  2091  		wantSuccess bool
  2092  	}{
  2093  		{http.MethodGet, getURL, false},
  2094  		{http.MethodDelete, deleteURL, false},
  2095  		{http.MethodPut, putURL, true},
  2096  	} {
  2097  		req, err := http.NewRequest(http.MethodPut, test.url, strings.NewReader(contents))
  2098  		if err != nil {
  2099  			t.Fatalf("failed to create PUT HTTP request using %s URL: %v", test.urlMethod, err)
  2100  		}
  2101  		if resp, err := client.Do(req); err != nil {
  2102  			t.Fatalf("PUT failed with %s URL: %v", test.urlMethod, err)
  2103  		} else {
  2104  			defer resp.Body.Close()
  2105  			success := resp.StatusCode >= 200 && resp.StatusCode < 300
  2106  			if success != test.wantSuccess {
  2107  				t.Errorf("PUT with %s URL got status code %d, want 2xx? %v", test.urlMethod, resp.StatusCode, test.wantSuccess)
  2108  				gotBody, _ := ioutil.ReadAll(resp.Body)
  2109  				t.Errorf(string(gotBody))
  2110  			}
  2111  		}
  2112  	}
  2113  
  2114  	// GET it. Try with all URLs, only getURL should work.
  2115  	for _, test := range []struct {
  2116  		urlMethod   string
  2117  		url         string
  2118  		wantSuccess bool
  2119  	}{
  2120  		{http.MethodDelete, deleteURL, false},
  2121  		{http.MethodPut, putURL, false},
  2122  		{http.MethodGet, getURLNoParams, false},
  2123  		{http.MethodGet, getURL, true},
  2124  	} {
  2125  		if resp, err := client.Get(test.url); err != nil {
  2126  			t.Fatalf("GET with %s URL failed: %v", test.urlMethod, err)
  2127  		} else {
  2128  			defer resp.Body.Close()
  2129  			success := resp.StatusCode >= 200 && resp.StatusCode < 300
  2130  			if success != test.wantSuccess {
  2131  				t.Errorf("GET with %s URL got status code %d, want 2xx? %v", test.urlMethod, resp.StatusCode, test.wantSuccess)
  2132  				gotBody, _ := ioutil.ReadAll(resp.Body)
  2133  				t.Errorf(string(gotBody))
  2134  			} else if success {
  2135  				gotBody, err := ioutil.ReadAll(resp.Body)
  2136  				if err != nil {
  2137  					t.Errorf("GET with %s URL failed to read response body: %v", test.urlMethod, err)
  2138  				} else if gotBodyStr := string(gotBody); gotBodyStr != contents {
  2139  					t.Errorf("GET with %s URL got body %q, want %q", test.urlMethod, gotBodyStr, contents)
  2140  				}
  2141  			}
  2142  		}
  2143  	}
  2144  
  2145  	// DELETE it. Try with all URLs, only deleteURL should work.
  2146  	for _, test := range []struct {
  2147  		urlMethod   string
  2148  		url         string
  2149  		wantSuccess bool
  2150  	}{
  2151  		{http.MethodGet, getURL, false},
  2152  		{http.MethodPut, putURL, false},
  2153  		{http.MethodDelete, deleteURL, true},
  2154  	} {
  2155  		req, err := http.NewRequest(http.MethodDelete, test.url, nil)
  2156  		if err != nil {
  2157  			t.Fatalf("failed to create DELETE HTTP request using %s URL: %v", test.urlMethod, err)
  2158  		}
  2159  		if resp, err := client.Do(req); err != nil {
  2160  			t.Fatalf("DELETE with %s URL failed: %v", test.urlMethod, err)
  2161  		} else {
  2162  			defer resp.Body.Close()
  2163  			success := resp.StatusCode >= 200 && resp.StatusCode < 300
  2164  			if success != test.wantSuccess {
  2165  				t.Fatalf("DELETE with %s URL got status code %d, want 2xx? %v", test.urlMethod, resp.StatusCode, test.wantSuccess)
  2166  				gotBody, _ := ioutil.ReadAll(resp.Body)
  2167  				t.Errorf(string(gotBody))
  2168  			}
  2169  		}
  2170  	}
  2171  
  2172  	// GET should fail now that the blob has been deleted.
  2173  	if resp, err := client.Get(getURL); err != nil {
  2174  		t.Errorf("GET after DELETE failed: %v", err)
  2175  	} else {
  2176  		defer resp.Body.Close()
  2177  		if resp.StatusCode != 404 {
  2178  			t.Errorf("GET after DELETE got status code %d, want 404", resp.StatusCode)
  2179  			gotBody, _ := ioutil.ReadAll(resp.Body)
  2180  			t.Errorf(string(gotBody))
  2181  		}
  2182  	}
  2183  }
  2184  
  2185  // testAs tests the various As functions, using AsTest.
  2186  func testAs(t *testing.T, newHarness HarnessMaker, st AsTest) {
  2187  	const (
  2188  		dir     = "mydir"
  2189  		key     = dir + "/as-test"
  2190  		copyKey = dir + "/as-test-copy"
  2191  	)
  2192  	var content = []byte("hello world")
  2193  	ctx := context.Background()
  2194  
  2195  	h, err := newHarness(ctx, t)
  2196  	if err != nil {
  2197  		t.Fatal(err)
  2198  	}
  2199  	defer h.Close()
  2200  
  2201  	drv, err := h.MakeDriver(ctx)
  2202  	if err != nil {
  2203  		t.Fatal(err)
  2204  	}
  2205  	b := blob.NewBucket(drv)
  2206  	defer b.Close()
  2207  
  2208  	// Verify Bucket.As.
  2209  	if err := st.BucketCheck(b); err != nil {
  2210  		t.Error(err)
  2211  	}
  2212  
  2213  	// Create a blob, using the provided callback.
  2214  	if err := b.WriteAll(ctx, key, content, &blob.WriterOptions{BeforeWrite: st.BeforeWrite}); err != nil {
  2215  		t.Error(err)
  2216  	}
  2217  	defer func() { _ = b.Delete(ctx, key) }()
  2218  
  2219  	// Verify Attributes.As.
  2220  	attrs, err := b.Attributes(ctx, key)
  2221  	if err != nil {
  2222  		t.Fatal(err)
  2223  	}
  2224  	if err := st.AttributesCheck(attrs); err != nil {
  2225  		t.Error(err)
  2226  	}
  2227  
  2228  	// Verify Reader.As.
  2229  	r, err := b.NewReader(ctx, key, &blob.ReaderOptions{BeforeRead: st.BeforeRead})
  2230  	if err != nil {
  2231  		t.Fatal(err)
  2232  	}
  2233  	defer r.Close()
  2234  	if err := st.ReaderCheck(r); err != nil {
  2235  		t.Error(err)
  2236  	}
  2237  
  2238  	// Verify ListObject.As for the directory.
  2239  	iter := b.List(&blob.ListOptions{Prefix: dir, Delimiter: "/", BeforeList: st.BeforeList})
  2240  	found := false
  2241  	for {
  2242  		obj, err := iter.Next(ctx)
  2243  		if err == io.EOF {
  2244  			break
  2245  		}
  2246  		if found {
  2247  			t.Fatal("got a second object returned from List, only wanted one")
  2248  		}
  2249  		found = true
  2250  		if err != nil {
  2251  			log.Fatal(err)
  2252  		}
  2253  		if err := st.ListObjectCheck(obj); err != nil {
  2254  			t.Error(err)
  2255  		}
  2256  	}
  2257  
  2258  	// Verify ListObject.As for the blob.
  2259  	iter = b.List(&blob.ListOptions{Prefix: key, BeforeList: st.BeforeList})
  2260  	found = false
  2261  	for {
  2262  		obj, err := iter.Next(ctx)
  2263  		if err == io.EOF {
  2264  			break
  2265  		}
  2266  		if found {
  2267  			t.Fatal("got a second object returned from List, only wanted one")
  2268  		}
  2269  		found = true
  2270  		if err != nil {
  2271  			log.Fatal(err)
  2272  		}
  2273  		if err := st.ListObjectCheck(obj); err != nil {
  2274  			t.Error(err)
  2275  		}
  2276  	}
  2277  
  2278  	_, gotErr := b.NewReader(ctx, "key-does-not-exist", nil)
  2279  	if gotErr == nil {
  2280  		t.Fatalf("got nil error from NewReader for nonexistent key, want an error")
  2281  	}
  2282  	if err := st.ErrorCheck(b, gotErr); err != nil {
  2283  		t.Error(err)
  2284  	}
  2285  
  2286  	// Copy the blob, using the provided callback.
  2287  	if err := b.Copy(ctx, copyKey, key, &blob.CopyOptions{BeforeCopy: st.BeforeCopy}); err != nil {
  2288  		t.Error(err)
  2289  	} else {
  2290  		defer func() { _ = b.Delete(ctx, copyKey) }()
  2291  	}
  2292  }
  2293  
  2294  func benchmarkRead(b *testing.B, bkt *blob.Bucket) {
  2295  	ctx := context.Background()
  2296  	const key = "readbenchmark-blob"
  2297  
  2298  	content := loadTestData(b, "test-large.jpg")
  2299  	if err := bkt.WriteAll(ctx, key, content, nil); err != nil {
  2300  		b.Fatal(err)
  2301  	}
  2302  	defer func() {
  2303  		_ = bkt.Delete(ctx, key)
  2304  	}()
  2305  
  2306  	b.ResetTimer()
  2307  	b.RunParallel(func(pb *testing.PB) {
  2308  		for pb.Next() {
  2309  			buf, err := bkt.ReadAll(ctx, key)
  2310  			if err != nil {
  2311  				b.Error(err)
  2312  			}
  2313  			if !bytes.Equal(buf, content) {
  2314  				b.Error("read didn't match write")
  2315  			}
  2316  		}
  2317  	})
  2318  }
  2319  
  2320  func benchmarkWriteReadDelete(b *testing.B, bkt *blob.Bucket) {
  2321  	ctx := context.Background()
  2322  	const baseKey = "writereaddeletebenchmark-blob-"
  2323  
  2324  	content := loadTestData(b, "test-large.jpg")
  2325  	var nextID uint32
  2326  
  2327  	b.ResetTimer()
  2328  	b.RunParallel(func(pb *testing.PB) {
  2329  		key := fmt.Sprintf("%s%d", baseKey, atomic.AddUint32(&nextID, 1))
  2330  		for pb.Next() {
  2331  			if err := bkt.WriteAll(ctx, key, content, nil); err != nil {
  2332  				b.Error(err)
  2333  				continue
  2334  			}
  2335  			buf, err := bkt.ReadAll(ctx, key)
  2336  			if err != nil {
  2337  				b.Error(err)
  2338  			}
  2339  			if !bytes.Equal(buf, content) {
  2340  				b.Error("read didn't match write")
  2341  			}
  2342  			if err := bkt.Delete(ctx, key); err != nil {
  2343  				b.Error(err)
  2344  				continue
  2345  			}
  2346  		}
  2347  	})
  2348  }