github.com/SaurabhDubey-Groww/go-cloud@v0.0.0-20221124105541-b26c29285fd8/pubsub/drivertest/drivertest.go (about)

     1  // Copyright 2018 The Go Cloud Development Kit Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     https://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package drivertest provides a conformance test for implementations of
    16  // driver.
    17  package drivertest // import "gocloud.dev/pubsub/drivertest"
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"errors"
    23  	"sort"
    24  	"strconv"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/google/go-cmp/cmp"
    29  	"github.com/google/go-cmp/cmp/cmpopts"
    30  	"gocloud.dev/gcerrors"
    31  	"gocloud.dev/internal/escape"
    32  	"gocloud.dev/internal/retry"
    33  	"gocloud.dev/pubsub"
    34  	"gocloud.dev/pubsub/batcher"
    35  	"gocloud.dev/pubsub/driver"
    36  	"golang.org/x/sync/errgroup"
    37  )
    38  
    39  // Harness descibes the functionality test harnesses must provide to run
    40  // conformance tests.
    41  type Harness interface {
    42  	// CreateTopic creates a new topic and returns a driver.Topic
    43  	// for testing. The topic may have to be removed manually if the test is
    44  	// abruptly terminated or the network connection fails.
    45  	CreateTopic(ctx context.Context, testName string) (dt driver.Topic, cleanup func(), err error)
    46  
    47  	// MakeNonexistentTopic makes a driver.Topic referencing a topic that
    48  	// does not exist.
    49  	MakeNonexistentTopic(ctx context.Context) (driver.Topic, error)
    50  
    51  	// CreateSubscription creates a new subscription, subscribed
    52  	// to the given topic, and returns a driver.Subscription for testing. The
    53  	// subscription may have to be cleaned up manually if the test is abruptly
    54  	// terminated or the network connection fails.
    55  	CreateSubscription(ctx context.Context, t driver.Topic, testName string) (ds driver.Subscription, cleanup func(), err error)
    56  
    57  	// MakeNonexistentSubscription makes a driver.Subscription referencing a
    58  	// subscription that does not exist.
    59  	MakeNonexistentSubscription(ctx context.Context) (ds driver.Subscription, cleanup func(), err error)
    60  
    61  	// Close closes resources used by the harness, but does not call Close
    62  	// on the Topics and Subscriptions generated by the Harness.
    63  	Close()
    64  
    65  	// MaxBatchSizes returns the maximum size of SendBatch/Send(Na|A)cks, or 0
    66  	// if there's no max.
    67  	MaxBatchSizes() (int, int)
    68  
    69  	// SupportsMultipleSubscriptions reports whether the driver supports
    70  	// multiple subscriptions for the same topic.
    71  	SupportsMultipleSubscriptions() bool
    72  }
    73  
    74  // HarnessMaker describes functions that construct a harness for running tests.
    75  // It is called exactly once per test; Harness.Close() will be called when the test is complete.
    76  type HarnessMaker func(ctx context.Context, t *testing.T) (Harness, error)
    77  
    78  // AsTest represents a test of As functionality.
    79  // The conformance test:
    80  // 1. Calls TopicCheck.
    81  // 2. Calls SubscriptionCheck.
    82  // 3. Sends a message, setting Message.BeforeSend to BeforeSend
    83  //
    84  //	and Message.AfterSend to AfterSend.
    85  //
    86  // 4. Receives the message and calls MessageCheck.
    87  // 5. Calls TopicErrorCheck.
    88  // 6. Calls SubscriptionErrorCheck.
    89  type AsTest interface {
    90  	// Name should return a descriptive name for the test.
    91  	Name() string
    92  	// TopicCheck will be called to allow verifcation of Topic.As.
    93  	TopicCheck(t *pubsub.Topic) error
    94  	// SubscriptionCheck will be called to allow verification of Subscription.As.
    95  	SubscriptionCheck(s *pubsub.Subscription) error
    96  	// TopicErrorCheck will be called to allow verification of Topic.ErrorAs.
    97  	// The error will be the one returned from SendBatch when called with
    98  	// a non-existent topic.
    99  	TopicErrorCheck(t *pubsub.Topic, err error) error
   100  	// SubscriptionErrorCheck will be called to allow verification of
   101  	// Subscription.ErrorAs.
   102  	// The error will be the one returned from ReceiveBatch when called with
   103  	// a non-existent subscription.
   104  	SubscriptionErrorCheck(s *pubsub.Subscription, err error) error
   105  	// MessageCheck will be called to allow verification of Message.As.
   106  	MessageCheck(m *pubsub.Message) error
   107  	// BeforeSend will be used as Message.BeforeSend as part of sending a test
   108  	// message.
   109  	BeforeSend(as func(interface{}) bool) error
   110  	// AfterSend will be used as Message.AfterSend as part of sending a test
   111  	// message.
   112  	AfterSend(as func(interface{}) bool) error
   113  }
   114  
   115  // Many tests set the maximum batch size to 1 to make record/replay stable.
   116  var batchSizeOne = &batcher.Options{MaxBatchSize: 1, MaxHandlers: 1}
   117  
   118  type verifyAsFailsOnNil struct{}
   119  
   120  func (verifyAsFailsOnNil) Name() string {
   121  	return "verify As returns false when passed nil"
   122  }
   123  
   124  func (verifyAsFailsOnNil) TopicCheck(t *pubsub.Topic) error {
   125  	if t.As(nil) {
   126  		return errors.New("want Topic.As to return false when passed nil")
   127  	}
   128  	return nil
   129  }
   130  
   131  func (verifyAsFailsOnNil) SubscriptionCheck(s *pubsub.Subscription) error {
   132  	if s.As(nil) {
   133  		return errors.New("want Subscription.As to return false when passed nil")
   134  	}
   135  	return nil
   136  }
   137  
   138  func (verifyAsFailsOnNil) TopicErrorCheck(t *pubsub.Topic, err error) (ret error) {
   139  	defer func() {
   140  		if recover() == nil {
   141  			ret = errors.New("want Topic.ErrorAs to panic when passed nil")
   142  		}
   143  	}()
   144  	t.ErrorAs(err, nil)
   145  	return nil
   146  }
   147  
   148  func (verifyAsFailsOnNil) SubscriptionErrorCheck(s *pubsub.Subscription, err error) (ret error) {
   149  	defer func() {
   150  		if recover() == nil {
   151  			ret = errors.New("want Subscription.ErrorAs to panic when passed nil")
   152  		}
   153  	}()
   154  	s.ErrorAs(err, nil)
   155  	return nil
   156  }
   157  
   158  func (verifyAsFailsOnNil) MessageCheck(m *pubsub.Message) error {
   159  	if m.As(nil) {
   160  		return errors.New("want Message.As to return false when passed nil")
   161  	}
   162  	return nil
   163  }
   164  
   165  func (verifyAsFailsOnNil) BeforeSend(as func(interface{}) bool) error {
   166  	if as(nil) {
   167  		return errors.New("want Message.BeforeSend's As function to return false when passed nil")
   168  	}
   169  	return nil
   170  }
   171  
   172  func (verifyAsFailsOnNil) AfterSend(as func(interface{}) bool) error {
   173  	if as(nil) {
   174  		return errors.New("want Message.AfterSend's As function to return false when passed nil")
   175  	}
   176  	return nil
   177  }
   178  
   179  // RunConformanceTests runs conformance tests for driver implementations of pubsub.
   180  func RunConformanceTests(t *testing.T, newHarness HarnessMaker, asTests []AsTest) {
   181  	tests := map[string]func(t *testing.T, newHarness HarnessMaker){
   182  		"TestSendReceive":                          testSendReceive,
   183  		"TestSendReceiveTwo":                       testSendReceiveTwo,
   184  		"TestSendReceiveJSON":                      testSendReceiveJSON,
   185  		"TestNack":                                 testNack,
   186  		"TestBatching":                             testBatching,
   187  		"TestDoubleAck":                            testDoubleAck,
   188  		"TestErrorOnSendToClosedTopic":             testErrorOnSendToClosedTopic,
   189  		"TestErrorOnReceiveFromClosedSubscription": testErrorOnReceiveFromClosedSubscription,
   190  		"TestCancelSendReceive":                    testCancelSendReceive,
   191  		"TestNonExistentTopicSucceedsOnOpenButFailsOnSend":           testNonExistentTopicSucceedsOnOpenButFailsOnSend,
   192  		"TestNonExistentSubscriptionSucceedsOnOpenButFailsOnReceive": testNonExistentSubscriptionSucceedsOnOpenButFailsOnReceive,
   193  		"TestMetadata":           testMetadata,
   194  		"TestNonUTF8MessageBody": testNonUTF8MessageBody,
   195  	}
   196  	for name, test := range tests {
   197  		t.Run(name, func(t *testing.T) { test(t, newHarness) })
   198  	}
   199  
   200  	asTests = append(asTests, verifyAsFailsOnNil{})
   201  	t.Run("TestAs", func(t *testing.T) {
   202  		for _, st := range asTests {
   203  			if st.Name() == "" {
   204  				t.Fatalf("AsTest.Name is required")
   205  			}
   206  			t.Run(st.Name(), func(t *testing.T) { testAs(t, newHarness, st) })
   207  		}
   208  	})
   209  }
   210  
   211  // RunBenchmarks runs benchmarks for driver implementations of pubsub.
   212  func RunBenchmarks(b *testing.B, topic *pubsub.Topic, sub *pubsub.Subscription) {
   213  	b.Run("BenchmarkReceive", func(b *testing.B) {
   214  		benchmark(b, topic, sub, false)
   215  	})
   216  	b.Run("BenchmarkSend", func(b *testing.B) {
   217  		benchmark(b, topic, sub, true)
   218  	})
   219  }
   220  
   221  func testNonExistentTopicSucceedsOnOpenButFailsOnSend(t *testing.T, newHarness HarnessMaker) {
   222  	// Set up.
   223  	ctx := context.Background()
   224  	h, err := newHarness(ctx, t)
   225  	if err != nil {
   226  		t.Fatal(err)
   227  	}
   228  	defer h.Close()
   229  
   230  	dt, err := h.MakeNonexistentTopic(ctx)
   231  	if err != nil {
   232  		// Failure shouldn't happen for non-existent topics until messages are sent
   233  		// to them.
   234  		t.Fatalf("creating a local topic that doesn't exist on the server: %v", err)
   235  	}
   236  	topic := pubsub.NewTopic(dt, nil)
   237  	defer func() {
   238  		if err := topic.Shutdown(ctx); err != nil {
   239  			t.Error(err)
   240  		}
   241  	}()
   242  
   243  	m := &pubsub.Message{}
   244  	err = topic.Send(ctx, m)
   245  	if err == nil || gcerrors.Code(err) != gcerrors.NotFound {
   246  		t.Errorf("got error %v for send to non-existent topic, want code=NotFound", err)
   247  	}
   248  }
   249  
   250  func testNonExistentSubscriptionSucceedsOnOpenButFailsOnReceive(t *testing.T, newHarness HarnessMaker) {
   251  	// Set up.
   252  	ctx := context.Background()
   253  	h, err := newHarness(ctx, t)
   254  	if err != nil {
   255  		t.Fatal(err)
   256  	}
   257  	defer h.Close()
   258  
   259  	ds, cleanup, err := h.MakeNonexistentSubscription(ctx)
   260  	if err != nil {
   261  		t.Fatalf("failed to make non-existent subscription: %v", err)
   262  	}
   263  	defer cleanup()
   264  	sub := pubsub.NewSubscription(ds, batchSizeOne, batchSizeOne)
   265  	defer func() {
   266  		if err := sub.Shutdown(ctx); err != nil {
   267  			t.Error(err)
   268  		}
   269  	}()
   270  
   271  	// The test will hang here if the message isn't available, so use a shorter timeout.
   272  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   273  	defer cancel()
   274  	_, err = sub.Receive(ctx2)
   275  	if err == nil || ctx2.Err() != nil || gcerrors.Code(err) != gcerrors.NotFound {
   276  		t.Errorf("got error %v for receive from non-existent subscription, want code=NotFound", err)
   277  	}
   278  }
   279  
   280  func testSendReceive(t *testing.T, newHarness HarnessMaker) {
   281  	// Set up.
   282  	ctx := context.Background()
   283  	h, err := newHarness(ctx, t)
   284  	if err != nil {
   285  		t.Fatal(err)
   286  	}
   287  	defer h.Close()
   288  	topic, sub, cleanup, err := makePair(ctx, t, h)
   289  	if err != nil {
   290  		t.Fatal(err)
   291  	}
   292  	defer cleanup()
   293  
   294  	want := publishN(ctx, t, topic, 3)
   295  	got := receiveN(ctx, t, sub, len(want))
   296  
   297  	// Verify LoggableID is set.
   298  	for _, msg := range got {
   299  		if msg.LoggableID == "" {
   300  			t.Errorf("msg.LoggableID was empty, should be set")
   301  		}
   302  	}
   303  
   304  	// Check that the received messages match the sent ones.
   305  	if diff := diffMessageSets(got, want); diff != "" {
   306  		t.Error(diff)
   307  	}
   308  }
   309  
   310  // Receive from two subscriptions to the same topic.
   311  // Verify both get all the messages.
   312  func testSendReceiveTwo(t *testing.T, newHarness HarnessMaker) {
   313  	// Set up.
   314  	ctx := context.Background()
   315  	h, err := newHarness(ctx, t)
   316  	if err != nil {
   317  		t.Fatal(err)
   318  	}
   319  	defer h.Close()
   320  	if !h.SupportsMultipleSubscriptions() {
   321  		t.Skip("multiple subscriptions to a topic not supported")
   322  	}
   323  
   324  	dt, cleanup, err := h.CreateTopic(ctx, t.Name())
   325  	if err != nil {
   326  		t.Fatal(err)
   327  	}
   328  	defer cleanup()
   329  	topic := pubsub.NewTopic(dt, batchSizeOne)
   330  	defer func() {
   331  		if err := topic.Shutdown(ctx); err != nil {
   332  			t.Error(err)
   333  		}
   334  	}()
   335  
   336  	var ss []*pubsub.Subscription
   337  	for i := 0; i < 2; i++ {
   338  		ds, cleanup, err := h.CreateSubscription(ctx, dt, t.Name())
   339  		if err != nil {
   340  			t.Fatal(err)
   341  		}
   342  		defer cleanup()
   343  		s := pubsub.NewSubscription(ds, batchSizeOne, batchSizeOne)
   344  		defer func() {
   345  			if err := s.Shutdown(ctx); err != nil {
   346  				t.Error(err)
   347  			}
   348  		}()
   349  		ss = append(ss, s)
   350  	}
   351  	want := publishN(ctx, t, topic, 3)
   352  	for i, s := range ss {
   353  		got := receiveN(ctx, t, s, len(want))
   354  		if diff := diffMessageSets(got, want); diff != "" {
   355  			t.Errorf("sub #%d: %s", i, diff)
   356  		}
   357  	}
   358  }
   359  
   360  func testSendReceiveJSON(t *testing.T, newHarness HarnessMaker) {
   361  	const json = `{"Foo": "Bar"}`
   362  	// Set up.
   363  	ctx := context.Background()
   364  	h, err := newHarness(ctx, t)
   365  	if err != nil {
   366  		t.Fatal(err)
   367  	}
   368  	defer h.Close()
   369  	topic, sub, cleanup, err := makePair(ctx, t, h)
   370  	if err != nil {
   371  		t.Fatal(err)
   372  	}
   373  	defer cleanup()
   374  
   375  	sendM := &pubsub.Message{Body: []byte(json)}
   376  	if err := topic.Send(ctx, sendM); err != nil {
   377  		t.Fatal(err)
   378  	}
   379  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   380  	defer cancel()
   381  	receiveM, err := sub.Receive(ctx2)
   382  	if err != nil {
   383  		t.Fatal(err)
   384  	}
   385  	receiveM.Ack()
   386  	if diff := diffMessageSets([]*pubsub.Message{receiveM}, []*pubsub.Message{sendM}); diff != "" {
   387  		t.Error(diff)
   388  	}
   389  }
   390  
   391  func testNack(t *testing.T, newHarness HarnessMaker) {
   392  	const nMessages = 2
   393  
   394  	// Set up.
   395  	ctx := context.Background()
   396  	h, err := newHarness(ctx, t)
   397  	if err != nil {
   398  		t.Fatal(err)
   399  	}
   400  	defer h.Close()
   401  	dt, topicCleanup, err := h.CreateTopic(ctx, t.Name())
   402  	if err != nil {
   403  		t.Fatal(err)
   404  	}
   405  	defer topicCleanup()
   406  	ds, subCleanup, err := h.CreateSubscription(ctx, dt, t.Name())
   407  	if err != nil {
   408  		t.Fatal(err)
   409  	}
   410  	defer subCleanup()
   411  	if !ds.CanNack() {
   412  		t.Skip("Nack not supported")
   413  	}
   414  	topic := pubsub.NewTopic(dt, batchSizeOne)
   415  	defer func() {
   416  		if err := topic.Shutdown(ctx); err != nil {
   417  			t.Error(err)
   418  		}
   419  	}()
   420  	sub := pubsub.NewSubscription(ds, batchSizeOne, batchSizeOne)
   421  	defer func() {
   422  		if err := sub.Shutdown(ctx); err != nil {
   423  			t.Error(err)
   424  		}
   425  	}()
   426  
   427  	want := publishN(ctx, t, topic, nMessages)
   428  
   429  	// Get the messages, but nack them.
   430  	// Make sure to nack after receiving all of them; otherwise, we could
   431  	// receive one of the messages twice instead of receiving all nMessages.
   432  	// The test will hang here if the messages aren't redelivered, so use a shorter timeout.
   433  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   434  	defer cancel()
   435  	var got []*pubsub.Message
   436  	for i := 0; i < nMessages; i++ {
   437  		m, err := sub.Receive(ctx2)
   438  		if err != nil {
   439  			t.Fatal(err)
   440  		}
   441  		got = append(got, m)
   442  	}
   443  	for _, m := range got {
   444  		m.Nack()
   445  	}
   446  	// Check that the received messages match the sent ones.
   447  	if diff := diffMessageSets(got, want); diff != "" {
   448  		t.Error(diff)
   449  	}
   450  	// The test will hang here if the messages aren't redelivered, so use a shorter timeout.
   451  	ctx2, cancel = context.WithTimeout(ctx, 30*time.Second)
   452  	defer cancel()
   453  
   454  	got = nil
   455  	for i := 0; i < nMessages; i++ {
   456  		m, err := sub.Receive(ctx2)
   457  		if err != nil {
   458  			t.Fatal(err)
   459  		}
   460  		got = append(got, m)
   461  		m.Ack()
   462  	}
   463  	if diff := diffMessageSets(got, want); diff != "" {
   464  		t.Error(diff)
   465  	}
   466  }
   467  
   468  func testBatching(t *testing.T, newHarness HarnessMaker) {
   469  	const nMessages = 12 // must be divisible by 2
   470  	const batchSize = nMessages / 2
   471  
   472  	// Set up.
   473  	ctx := context.Background()
   474  	h, err := newHarness(ctx, t)
   475  	if err != nil {
   476  		t.Fatal(err)
   477  	}
   478  	defer h.Close()
   479  	maxSendBatch, maxAckBatch := h.MaxBatchSizes()
   480  
   481  	dt, topicCleanup, err := h.CreateTopic(ctx, t.Name())
   482  	if err != nil {
   483  		t.Fatal(err)
   484  	}
   485  	defer topicCleanup()
   486  	ds, subCleanup, err := h.CreateSubscription(ctx, dt, t.Name())
   487  	if err != nil {
   488  		t.Fatal(err)
   489  	}
   490  	defer subCleanup()
   491  
   492  	sendBatchOpts := &batcher.Options{MinBatchSize: batchSize, MaxBatchSize: batchSize}
   493  	// If the driver doesn't support batchSize batches, fall back to size 1.
   494  	if maxSendBatch != 0 && batchSize > maxSendBatch {
   495  		sendBatchOpts = batchSizeOne
   496  	}
   497  	topic := pubsub.NewTopic(dt, sendBatchOpts)
   498  	defer func() {
   499  		if err := topic.Shutdown(ctx); err != nil {
   500  			t.Error(err)
   501  		}
   502  	}()
   503  	ackBatchOpts := &batcher.Options{MinBatchSize: batchSize, MaxBatchSize: batchSize}
   504  	// If the driver doesn't support batchSize batches, fall back to size 1.
   505  	if maxAckBatch != 0 && batchSize > maxAckBatch {
   506  		ackBatchOpts = batchSizeOne
   507  	}
   508  	sub := pubsub.NewSubscription(ds, batchSizeOne, ackBatchOpts)
   509  	defer func() {
   510  		if err := sub.Shutdown(ctx); err != nil {
   511  			t.Error(err)
   512  		}
   513  	}()
   514  
   515  	// Publish nMessages. We have to do them asynchronously because topic.Send
   516  	// blocks until the message is sent, and these messages won't be sent until
   517  	// all batchSize are queued.
   518  	// Note: this test uses the same Body for each message, because the order
   519  	// that they appear in the SendBatch is not stable.
   520  	gr, grctx := errgroup.WithContext(ctx)
   521  	var want []*pubsub.Message
   522  	for i := 0; i < nMessages; i++ {
   523  		m := &pubsub.Message{Body: []byte("hello world")}
   524  		want = append(want, m)
   525  		gr.Go(func() error { return topic.Send(grctx, m) })
   526  	}
   527  	if err := gr.Wait(); err != nil {
   528  		t.Fatal(err)
   529  	}
   530  
   531  	// Get the messages.
   532  	// The test will hang here if the messages aren't delivered, so use a shorter timeout.
   533  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   534  	defer cancel()
   535  	var got []*pubsub.Message
   536  	for i := 0; i < nMessages; i++ {
   537  		m, err := sub.Receive(ctx2)
   538  		if err != nil {
   539  			t.Fatal(err)
   540  		}
   541  		got = append(got, m)
   542  		m.Ack()
   543  	}
   544  	if diff := diffMessageSets(got, want); diff != "" {
   545  		t.Error(diff)
   546  	}
   547  }
   548  
   549  func testDoubleAck(t *testing.T, newHarness HarnessMaker) {
   550  	// Set up.
   551  	ctx := context.Background()
   552  	h, err := newHarness(ctx, t)
   553  	if err != nil {
   554  		t.Fatal(err)
   555  	}
   556  	defer h.Close()
   557  	dt, topicCleanup, err := h.CreateTopic(ctx, t.Name())
   558  	if err != nil {
   559  		t.Fatal(err)
   560  	}
   561  	defer topicCleanup()
   562  	ds, subCleanup, err := h.CreateSubscription(ctx, dt, t.Name())
   563  	if err != nil {
   564  		t.Fatal(err)
   565  	}
   566  	defer subCleanup()
   567  
   568  	// Publish 3 messages.
   569  	for i := 0; i < 3; i++ {
   570  		err := dt.SendBatch(ctx, []*driver.Message{{Body: []byte(strconv.Itoa(i))}})
   571  		if err != nil {
   572  			t.Fatal(err)
   573  		}
   574  	}
   575  
   576  	// Retrieve the messages.
   577  	// The test will hang here if the messages aren't delivered, so use a shorter timeout.
   578  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   579  	defer cancel()
   580  	var dms []*driver.Message
   581  	for len(dms) != 3 {
   582  		curdms, err := ds.ReceiveBatch(ctx2, 3)
   583  		if err != nil {
   584  			t.Fatal(err)
   585  		}
   586  		if err := ctx2.Err(); err != nil {
   587  			t.Fatalf("never received expected messages: %v", err)
   588  		}
   589  		dms = append(dms, curdms...)
   590  	}
   591  
   592  	// Ack the first two messages.
   593  	err = ds.SendAcks(ctx, []driver.AckID{dms[0].AckID, dms[1].AckID})
   594  	if err != nil {
   595  		t.Fatal(err)
   596  	}
   597  
   598  	// Ack them again; this should succeed even though we've acked them before.
   599  	// If services return an error for this, drivers should drop them.
   600  	err = ds.SendAcks(ctx, []driver.AckID{dms[0].AckID, dms[1].AckID})
   601  	if err != nil {
   602  		t.Fatal(err)
   603  	}
   604  
   605  	if !ds.CanNack() {
   606  		return
   607  	}
   608  
   609  	// Nack all 3 messages. This should also succeed, and the nack of the third
   610  	// message should take effect, so we should be able to fetch it again.
   611  	// Note that the other messages *may* also be re-sent, because we're nacking
   612  	// them here (even though we acked them earlier); it depends on service
   613  	// semantics and time-sensitivity.
   614  	err = ds.SendNacks(ctx, []driver.AckID{dms[0].AckID, dms[1].AckID, dms[2].AckID})
   615  	if err != nil {
   616  		t.Fatal(err)
   617  	}
   618  
   619  	// The test will hang here if the message isn't redelivered, so use a shorter timeout.
   620  	ctx2, cancel = context.WithTimeout(ctx, 30*time.Second)
   621  	defer cancel()
   622  
   623  	// We're looking to re-receive dms[2].
   624  Loop:
   625  	for {
   626  		curdms, err := ds.ReceiveBatch(ctx2, 1)
   627  		if err != nil {
   628  			t.Fatal(err)
   629  		}
   630  		for _, curdm := range curdms {
   631  			if bytes.Equal(curdm.Body, dms[2].Body) {
   632  				// Found it!
   633  				break Loop
   634  			}
   635  		}
   636  	}
   637  }
   638  
   639  // Publish n different messages to the topic. Return the messages.
   640  func publishN(ctx context.Context, t *testing.T, topic *pubsub.Topic, n int) []*pubsub.Message {
   641  	var ms []*pubsub.Message
   642  	for i := 0; i < n; i++ {
   643  		m := &pubsub.Message{
   644  			Body:     []byte(strconv.Itoa(i)),
   645  			Metadata: map[string]string{"a": strconv.Itoa(i)},
   646  		}
   647  		if err := topic.Send(ctx, m); err != nil {
   648  			t.Fatal(err)
   649  		}
   650  		ms = append(ms, m)
   651  	}
   652  	return ms
   653  }
   654  
   655  // Receive and ack n messages from sub.
   656  func receiveN(ctx context.Context, t *testing.T, sub *pubsub.Subscription, n int) []*pubsub.Message {
   657  	// The test will hang here if the message(s) aren't available, so use a shorter timeout.
   658  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   659  	defer cancel()
   660  	var ms []*pubsub.Message
   661  	for i := 0; i < n; i++ {
   662  		m, err := sub.Receive(ctx2)
   663  		if err != nil {
   664  			t.Fatal(err)
   665  		}
   666  		ms = append(ms, m)
   667  		m.Ack()
   668  	}
   669  	return ms
   670  }
   671  
   672  // Find the differences between two sets of messages.
   673  func diffMessageSets(got, want []*pubsub.Message) string {
   674  	for _, m := range got {
   675  		m.LoggableID = ""
   676  	}
   677  	less := func(x, y *pubsub.Message) bool { return bytes.Compare(x.Body, y.Body) < 0 }
   678  	return cmp.Diff(got, want, cmpopts.SortSlices(less), cmpopts.IgnoreUnexported(pubsub.Message{}))
   679  }
   680  
   681  func testErrorOnSendToClosedTopic(t *testing.T, newHarness HarnessMaker) {
   682  	// Set up.
   683  	ctx := context.Background()
   684  	h, err := newHarness(ctx, t)
   685  	if err != nil {
   686  		t.Fatal(err)
   687  	}
   688  	defer h.Close()
   689  
   690  	dt, cleanup, err := h.CreateTopic(ctx, t.Name())
   691  	if err != nil {
   692  		t.Fatal(err)
   693  	}
   694  	defer cleanup()
   695  
   696  	topic := pubsub.NewTopic(dt, batchSizeOne)
   697  	if err := topic.Shutdown(ctx); err != nil {
   698  		t.Error(err)
   699  	}
   700  
   701  	// Check that sending to the closed topic fails.
   702  	m := &pubsub.Message{}
   703  	if err := topic.Send(ctx, m); err == nil {
   704  		t.Error("topic.Send returned nil, want error")
   705  	}
   706  	if err := topic.Shutdown(ctx); err == nil {
   707  		t.Error("wanted error on double Shutdown")
   708  	}
   709  }
   710  
   711  func testErrorOnReceiveFromClosedSubscription(t *testing.T, newHarness HarnessMaker) {
   712  	ctx := context.Background()
   713  	h, err := newHarness(ctx, t)
   714  	if err != nil {
   715  		t.Fatal(err)
   716  	}
   717  	defer h.Close()
   718  
   719  	dt, cleanup, err := h.CreateTopic(ctx, t.Name())
   720  	if err != nil {
   721  		t.Fatal(err)
   722  	}
   723  	defer cleanup()
   724  
   725  	ds, cleanup, err := h.CreateSubscription(ctx, dt, t.Name())
   726  	if err != nil {
   727  		t.Fatal(err)
   728  	}
   729  	defer cleanup()
   730  
   731  	sub := pubsub.NewSubscription(ds, batchSizeOne, batchSizeOne)
   732  	if err := sub.Shutdown(ctx); err != nil {
   733  		t.Error(err)
   734  	}
   735  	if _, err = sub.Receive(ctx); err == nil {
   736  		t.Error("sub.Receive returned nil, want error")
   737  	}
   738  	if err := sub.Shutdown(ctx); err == nil {
   739  		t.Error("wanted error on double Shutdown")
   740  	}
   741  }
   742  
   743  func testCancelSendReceive(t *testing.T, newHarness HarnessMaker) {
   744  	ctx := context.Background()
   745  	h, err := newHarness(ctx, t)
   746  	if err != nil {
   747  		t.Fatal(err)
   748  	}
   749  	defer h.Close()
   750  	topic, sub, cleanup, err := makePair(ctx, t, h)
   751  	if err != nil {
   752  		t.Fatal(err)
   753  	}
   754  	defer cleanup()
   755  
   756  	ctx, cancel := context.WithCancel(ctx)
   757  	cancel()
   758  
   759  	m := &pubsub.Message{}
   760  	if err := topic.Send(ctx, m); !isCanceled(err) {
   761  		t.Errorf("topic.Send returned %v (%T), want context.Canceled", err, err)
   762  	}
   763  	if _, err := sub.Receive(ctx); !isCanceled(err) {
   764  		t.Errorf("sub.Receive returned %v (%T), want context.Canceled", err, err)
   765  	}
   766  
   767  	// It would be nice to add a test that cancels an in-flight blocking Receive.
   768  	// However, because pubsub.Subscription.Receive repeatedly calls
   769  	// driver.ReceiveBatch if it returns 0 messages, it's difficult to write
   770  	// such a test without it being flaky for drivers with record/replay
   771  	// (the number of times driver.ReceiveBatch is called is timing-dependent).
   772  }
   773  
   774  func testMetadata(t *testing.T, newHarness HarnessMaker) {
   775  	// Set up.
   776  	ctx := context.Background()
   777  	h, err := newHarness(ctx, t)
   778  	if err != nil {
   779  		t.Fatal(err)
   780  	}
   781  	defer h.Close()
   782  
   783  	// Some services limit the number of metadata per message.
   784  	// Sort the escape.WeirdStrings values for record/replay consistency,
   785  	// then break the weird strings up into groups of at most maxMetadataKeys.
   786  	const maxMetadataKeys = 10
   787  	var weirdStrings []string
   788  	for _, v := range escape.WeirdStrings {
   789  		weirdStrings = append(weirdStrings, v)
   790  	}
   791  	sort.Slice(weirdStrings, func(i, j int) bool { return weirdStrings[i] < weirdStrings[j] })
   792  
   793  	weirdMetaDataGroups := []map[string]string{{}}
   794  	i := 0
   795  	for _, k := range weirdStrings {
   796  		weirdMetaDataGroups[i][k] = k
   797  		if len(weirdMetaDataGroups[i]) == maxMetadataKeys {
   798  			weirdMetaDataGroups = append(weirdMetaDataGroups, map[string]string{})
   799  			i++
   800  		}
   801  	}
   802  
   803  	topic, sub, cleanup, err := makePair(ctx, t, h)
   804  	if err != nil {
   805  		t.Fatal(err)
   806  	}
   807  	defer cleanup()
   808  
   809  	for _, wm := range weirdMetaDataGroups {
   810  		m := &pubsub.Message{
   811  			Body:     []byte("hello world"),
   812  			Metadata: wm,
   813  		}
   814  		if err := topic.Send(ctx, m); err != nil {
   815  			t.Fatal(err)
   816  		}
   817  
   818  		// The test will hang here if the messages aren't delivered, so use a shorter timeout.
   819  		ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   820  		defer cancel()
   821  		m, err = sub.Receive(ctx2)
   822  		if err != nil {
   823  			t.Fatal(err)
   824  		}
   825  		m.Ack()
   826  
   827  		if diff := cmp.Diff(m.Metadata, wm); diff != "" {
   828  			t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", m.Metadata, wm, diff)
   829  		}
   830  	}
   831  
   832  	// Verify that non-UTF8 strings in metadata key or value fail.
   833  	m := &pubsub.Message{
   834  		Body:     []byte("hello world"),
   835  		Metadata: map[string]string{escape.NonUTF8String: "bar"},
   836  	}
   837  	if err := topic.Send(ctx, m); err == nil {
   838  		t.Error("got nil error, expected error for using non-UTF8 string as metadata key")
   839  	}
   840  	m.Metadata = map[string]string{"foo": escape.NonUTF8String}
   841  	if err := topic.Send(ctx, m); err == nil {
   842  		t.Error("got nil error, expected error for using non-UTF8 string as metadata value")
   843  	}
   844  }
   845  
   846  func testNonUTF8MessageBody(t *testing.T, newHarness HarnessMaker) {
   847  	// Set up.
   848  	ctx := context.Background()
   849  	h, err := newHarness(ctx, t)
   850  	if err != nil {
   851  		t.Fatal(err)
   852  	}
   853  	defer h.Close()
   854  
   855  	topic, sub, cleanup, err := makePair(ctx, t, h)
   856  	if err != nil {
   857  		t.Fatal(err)
   858  	}
   859  	defer cleanup()
   860  
   861  	// Sort the WeirdStrings map for record/replay consistency.
   862  	var weirdStrings [][]string // [0] = key, [1] = value
   863  	for k, v := range escape.WeirdStrings {
   864  		weirdStrings = append(weirdStrings, []string{k, v})
   865  	}
   866  	sort.Slice(weirdStrings, func(i, j int) bool { return weirdStrings[i][0] < weirdStrings[j][0] })
   867  
   868  	// Construct a message body with the weird strings and some non-UTF-8 bytes.
   869  	var body []byte
   870  	for _, v := range weirdStrings {
   871  		body = append(body, []byte(v[1])...)
   872  	}
   873  	body = append(body, []byte(escape.NonUTF8String)...)
   874  	m := &pubsub.Message{Body: body}
   875  
   876  	if err := topic.Send(ctx, m); err != nil {
   877  		t.Fatal(err)
   878  	}
   879  	// The test will hang here if the messages aren't delivered, so use a shorter timeout.
   880  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   881  	defer cancel()
   882  	m, err = sub.Receive(ctx2)
   883  	if err != nil {
   884  		t.Fatal(err)
   885  	}
   886  	m.Ack()
   887  
   888  	if diff := cmp.Diff(m.Body, body); diff != "" {
   889  		t.Fatalf("got\n%v\nwant\n%v\ndiff\n%s", m.Body, body, diff)
   890  	}
   891  }
   892  
   893  func isCanceled(err error) bool {
   894  	if err == context.Canceled {
   895  		return true
   896  	}
   897  	if cerr, ok := err.(*retry.ContextError); ok {
   898  		return cerr.CtxErr == context.Canceled
   899  	}
   900  	return gcerrors.Code(err) == gcerrors.Canceled
   901  }
   902  
   903  func makePair(ctx context.Context, t *testing.T, h Harness) (*pubsub.Topic, *pubsub.Subscription, func(), error) {
   904  	dt, topicCleanup, err := h.CreateTopic(ctx, t.Name())
   905  	if err != nil {
   906  		return nil, nil, nil, err
   907  	}
   908  	ds, subCleanup, err := h.CreateSubscription(ctx, dt, t.Name())
   909  	if err != nil {
   910  		topicCleanup()
   911  		return nil, nil, nil, err
   912  	}
   913  	topic := pubsub.NewTopic(dt, batchSizeOne)
   914  	sub := pubsub.NewSubscription(ds, batchSizeOne, batchSizeOne)
   915  	cleanup := func() {
   916  		if err := topic.Shutdown(ctx); err != nil {
   917  			t.Error(err)
   918  		}
   919  		if err := sub.Shutdown(ctx); err != nil {
   920  			t.Error(err)
   921  		}
   922  		subCleanup()
   923  		topicCleanup()
   924  	}
   925  	return topic, sub, cleanup, nil
   926  }
   927  
   928  // testAs tests the various As functions, using AsTest.
   929  func testAs(t *testing.T, newHarness HarnessMaker, st AsTest) {
   930  	ctx := context.Background()
   931  	h, err := newHarness(ctx, t)
   932  	if err != nil {
   933  		t.Fatal(err)
   934  	}
   935  	defer h.Close()
   936  	topic, sub, cleanup, err := makePair(ctx, t, h)
   937  	if err != nil {
   938  		t.Fatal(err)
   939  	}
   940  	defer cleanup()
   941  
   942  	if err := st.TopicCheck(topic); err != nil {
   943  		t.Error(err)
   944  	}
   945  
   946  	if err := st.SubscriptionCheck(sub); err != nil {
   947  		t.Error(err)
   948  	}
   949  
   950  	msg := &pubsub.Message{
   951  		Body:       []byte("x"),
   952  		BeforeSend: st.BeforeSend,
   953  		AfterSend:  st.AfterSend,
   954  	}
   955  	if err := topic.Send(ctx, msg); err != nil {
   956  		t.Fatal(err)
   957  	}
   958  	// The test will hang here if the messages aren't delivered, so use a shorter timeout.
   959  	ctx2, cancel := context.WithTimeout(ctx, 30*time.Second)
   960  	defer cancel()
   961  	m, err := sub.Receive(ctx2)
   962  	if err != nil {
   963  		t.Fatal(err)
   964  	}
   965  	if err := st.MessageCheck(m); err != nil {
   966  		t.Error(err)
   967  	}
   968  	m.Ack()
   969  
   970  	// Make a nonexistent topic and try to to send on it, to get an error we can
   971  	// use to call TopicErrorCheck.
   972  	dt, err := h.MakeNonexistentTopic(ctx)
   973  	if err != nil {
   974  		t.Fatal(err)
   975  	}
   976  	nonexistentTopic := pubsub.NewTopic(dt, batchSizeOne)
   977  	defer func() {
   978  		if err := nonexistentTopic.Shutdown(ctx); err != nil {
   979  			t.Error(err)
   980  		}
   981  	}()
   982  	// The test will hang here if Send doesn't fail quickly, so set a shorter timeout.
   983  	ctx2, cancel = context.WithTimeout(ctx, 30*time.Second)
   984  	defer cancel()
   985  	topicErr := nonexistentTopic.Send(ctx2, &pubsub.Message{})
   986  	if topicErr == nil || gcerrors.Code(topicErr) != gcerrors.NotFound {
   987  		t.Errorf("got error %v sending to nonexistent topic, want Code=NotFound", topicErr)
   988  	} else if err := st.TopicErrorCheck(topic, topicErr); err != nil {
   989  		t.Error(err)
   990  	}
   991  
   992  	// Make a nonexistent subscription and try to receive from it, to get an error
   993  	// we can use to call SubscriptionErrorCheck.
   994  	ds, cleanup, err := h.MakeNonexistentSubscription(ctx)
   995  	if err != nil {
   996  		t.Fatal(err)
   997  	}
   998  	defer cleanup()
   999  	nonExistentSub := pubsub.NewSubscription(ds, batchSizeOne, batchSizeOne)
  1000  	defer func() {
  1001  		if err := nonExistentSub.Shutdown(ctx); err != nil {
  1002  			t.Error(err)
  1003  		}
  1004  	}()
  1005  
  1006  	// The test will hang here if Receive doesn't fail quickly, so set a shorter timeout.
  1007  	ctx2, cancel = context.WithTimeout(ctx, 30*time.Second)
  1008  	defer cancel()
  1009  	_, subErr := nonExistentSub.Receive(ctx2)
  1010  	if subErr == nil || ctx2.Err() != nil || gcerrors.Code(subErr) != gcerrors.NotFound {
  1011  		t.Errorf("got error %v receiving from nonexistent subscription, want Code=NotFound", subErr)
  1012  	} else if err := st.SubscriptionErrorCheck(nonExistentSub, subErr); err != nil {
  1013  		t.Error(err)
  1014  	}
  1015  }
  1016  
  1017  // Publishes a large number of messages to topic concurrently, and then times
  1018  // how long it takes to send (if timeSend is true) or receive (if timeSend
  1019  // is false) them all.
  1020  func benchmark(b *testing.B, topic *pubsub.Topic, sub *pubsub.Subscription, timeSend bool) {
  1021  	attrs := map[string]string{"label": "value"}
  1022  	body := []byte("hello, world")
  1023  	const (
  1024  		nMessages          = 10000
  1025  		concurrencySend    = 100
  1026  		concurrencyReceive = 100
  1027  	)
  1028  	if nMessages%concurrencySend != 0 || nMessages%concurrencyReceive != 0 {
  1029  		b.Fatal("nMessages must be divisible by # of sending/receiving goroutines")
  1030  	}
  1031  	b.ResetTimer()
  1032  	for i := 0; i < b.N; i++ {
  1033  		if !timeSend {
  1034  			b.StopTimer()
  1035  		}
  1036  		if err := publishNConcurrently(topic, nMessages, concurrencySend, attrs, body); err != nil {
  1037  			b.Fatalf("publishing: %v", err)
  1038  		}
  1039  		b.Logf("published %d messages", nMessages)
  1040  		if timeSend {
  1041  			b.StopTimer()
  1042  		} else {
  1043  			b.StartTimer()
  1044  		}
  1045  		if err := receiveNConcurrently(sub, nMessages, concurrencyReceive); err != nil {
  1046  			b.Fatalf("receiving: %v", err)
  1047  		}
  1048  		b.SetBytes(nMessages * 1e6)
  1049  		b.Log("MB/s is actually number of messages received per second")
  1050  		if timeSend {
  1051  			b.StartTimer()
  1052  		}
  1053  	}
  1054  }
  1055  
  1056  func publishNConcurrently(topic *pubsub.Topic, nMessages, nGoroutines int, attrs map[string]string, body []byte) error {
  1057  	return runConcurrently(nMessages, nGoroutines, func(ctx context.Context) error {
  1058  		return topic.Send(ctx, &pubsub.Message{Metadata: attrs, Body: body})
  1059  	})
  1060  }
  1061  
  1062  func receiveNConcurrently(sub *pubsub.Subscription, nMessages, nGoroutines int) error {
  1063  	return runConcurrently(nMessages, nGoroutines, func(ctx context.Context) error {
  1064  		m, err := sub.Receive(ctx)
  1065  		if err != nil {
  1066  			return err
  1067  		}
  1068  		m.Ack()
  1069  		return nil
  1070  	})
  1071  }
  1072  
  1073  // Call function f n times concurrently, using g goroutines. g must divide n.
  1074  // Wait until all calls complete. If any fail, cancel the remaining ones.
  1075  func runConcurrently(n, g int, f func(context.Context) error) error {
  1076  	gr, ctx := errgroup.WithContext(context.Background())
  1077  	ng := n / g
  1078  	for i := 0; i < g; i++ {
  1079  		gr.Go(func() error {
  1080  			for j := 0; j < ng; j++ {
  1081  				if err := f(ctx); err != nil {
  1082  					return err
  1083  				}
  1084  			}
  1085  			return nil
  1086  		})
  1087  	}
  1088  	return gr.Wait()
  1089  }