github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/network/stream/streamer_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"os"
    25  	"strconv"
    26  	"strings"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/swarm/testutil"
    32  
    33  	"github.com/ethereum/go-ethereum/common"
    34  	"github.com/ethereum/go-ethereum/log"
    35  	"github.com/ethereum/go-ethereum/node"
    36  	"github.com/ethereum/go-ethereum/p2p/enode"
    37  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    38  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    39  	"github.com/ethereum/go-ethereum/swarm/network"
    40  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    41  	"github.com/ethereum/go-ethereum/swarm/state"
    42  	"golang.org/x/crypto/sha3"
    43  )
    44  
    45  func TestStreamerSubscribe(t *testing.T) {
    46  	tester, streamer, _, teardown, err := newStreamerTester(nil)
    47  	if err != nil {
    48  		t.Fatal(err)
    49  	}
    50  	defer teardown()
    51  
    52  	stream := NewStream("foo", "", true)
    53  	err = streamer.Subscribe(tester.Nodes[0].ID(), stream, NewRange(0, 0), Top)
    54  	if err == nil || err.Error() != "stream foo not registered" {
    55  		t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
    56  	}
    57  }
    58  
    59  func TestStreamerRequestSubscription(t *testing.T) {
    60  	tester, streamer, _, teardown, err := newStreamerTester(nil)
    61  	if err != nil {
    62  		t.Fatal(err)
    63  	}
    64  	defer teardown()
    65  
    66  	stream := NewStream("foo", "", false)
    67  	err = streamer.RequestSubscription(tester.Nodes[0].ID(), stream, &Range{}, Top)
    68  	if err == nil || err.Error() != "stream foo not registered" {
    69  		t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
    70  	}
    71  }
    72  
    73  var (
    74  	hash0         = sha3.Sum256([]byte{0})
    75  	hash1         = sha3.Sum256([]byte{1})
    76  	hash2         = sha3.Sum256([]byte{2})
    77  	hashesTmp     = append(hash0[:], hash1[:]...)
    78  	hashes        = append(hashesTmp, hash2[:]...)
    79  	corruptHashes = append(hashes[:40])
    80  )
    81  
    82  type testClient struct {
    83  	t              string
    84  	wait0          chan bool
    85  	wait2          chan bool
    86  	batchDone      chan bool
    87  	receivedHashes map[string][]byte
    88  }
    89  
    90  func newTestClient(t string) *testClient {
    91  	return &testClient{
    92  		t:              t,
    93  		wait0:          make(chan bool),
    94  		wait2:          make(chan bool),
    95  		batchDone:      make(chan bool),
    96  		receivedHashes: make(map[string][]byte),
    97  	}
    98  }
    99  
   100  func (self *testClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
   101  	self.receivedHashes[string(hash)] = hash
   102  	if bytes.Equal(hash, hash0[:]) {
   103  		return func(context.Context) error {
   104  			<-self.wait0
   105  			return nil
   106  		}
   107  	} else if bytes.Equal(hash, hash2[:]) {
   108  		return func(context.Context) error {
   109  			<-self.wait2
   110  			return nil
   111  		}
   112  	}
   113  	return nil
   114  }
   115  
   116  func (self *testClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
   117  	close(self.batchDone)
   118  	return nil
   119  }
   120  
   121  func (self *testClient) Close() {}
   122  
   123  type testServer struct {
   124  	t            string
   125  	sessionIndex uint64
   126  }
   127  
   128  func newTestServer(t string, sessionIndex uint64) *testServer {
   129  	return &testServer{
   130  		t:            t,
   131  		sessionIndex: sessionIndex,
   132  	}
   133  }
   134  
   135  func (s *testServer) SessionIndex() (uint64, error) {
   136  	return s.sessionIndex, nil
   137  }
   138  
   139  func (self *testServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
   140  	return make([]byte, HashSize), from + 1, to + 1, nil, nil
   141  }
   142  
   143  func (self *testServer) GetData(context.Context, []byte) ([]byte, error) {
   144  	return nil, nil
   145  }
   146  
   147  func (self *testServer) Close() {
   148  }
   149  
   150  func TestStreamerDownstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
   151  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   152  	if err != nil {
   153  		t.Fatal(err)
   154  	}
   155  	defer teardown()
   156  
   157  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   158  		return newTestClient(t), nil
   159  	})
   160  
   161  	node := tester.Nodes[0]
   162  
   163  	stream := NewStream("foo", "", true)
   164  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   165  	if err != nil {
   166  		t.Fatalf("Expected no error, got %v", err)
   167  	}
   168  
   169  	err = tester.TestExchanges(
   170  		p2ptest.Exchange{
   171  			Label: "Subscribe message",
   172  			Expects: []p2ptest.Expect{
   173  				{
   174  					Code: 4,
   175  					Msg: &SubscribeMsg{
   176  						Stream:   stream,
   177  						History:  NewRange(5, 8),
   178  						Priority: Top,
   179  					},
   180  					Peer: node.ID(),
   181  				},
   182  			},
   183  		},
   184  		// trigger OfferedHashesMsg to actually create the client
   185  		p2ptest.Exchange{
   186  			Label: "OfferedHashes message",
   187  			Triggers: []p2ptest.Trigger{
   188  				{
   189  					Code: 1,
   190  					Msg: &OfferedHashesMsg{
   191  						HandoverProof: &HandoverProof{
   192  							Handover: &Handover{},
   193  						},
   194  						Hashes: hashes,
   195  						From:   5,
   196  						To:     8,
   197  						Stream: stream,
   198  					},
   199  					Peer: node.ID(),
   200  				},
   201  			},
   202  			Expects: []p2ptest.Expect{
   203  				{
   204  					Code: 2,
   205  					Msg: &WantedHashesMsg{
   206  						Stream: stream,
   207  						Want:   []byte{5},
   208  						From:   9,
   209  						To:     0,
   210  					},
   211  					Peer: node.ID(),
   212  				},
   213  			},
   214  		},
   215  	)
   216  	if err != nil {
   217  		t.Fatal(err)
   218  	}
   219  
   220  	err = streamer.Unsubscribe(node.ID(), stream)
   221  	if err != nil {
   222  		t.Fatalf("Expected no error, got %v", err)
   223  	}
   224  
   225  	err = tester.TestExchanges(p2ptest.Exchange{
   226  		Label: "Unsubscribe message",
   227  		Expects: []p2ptest.Expect{
   228  			{
   229  				Code: 0,
   230  				Msg: &UnsubscribeMsg{
   231  					Stream: stream,
   232  				},
   233  				Peer: node.ID(),
   234  			},
   235  		},
   236  	})
   237  
   238  	if err != nil {
   239  		t.Fatal(err)
   240  	}
   241  }
   242  
   243  func TestStreamerUpstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
   244  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   245  	if err != nil {
   246  		t.Fatal(err)
   247  	}
   248  	defer teardown()
   249  
   250  	stream := NewStream("foo", "", false)
   251  
   252  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   253  		return newTestServer(t, 10), nil
   254  	})
   255  
   256  	node := tester.Nodes[0]
   257  
   258  	err = tester.TestExchanges(p2ptest.Exchange{
   259  		Label: "Subscribe message",
   260  		Triggers: []p2ptest.Trigger{
   261  			{
   262  				Code: 4,
   263  				Msg: &SubscribeMsg{
   264  					Stream:   stream,
   265  					History:  NewRange(5, 8),
   266  					Priority: Top,
   267  				},
   268  				Peer: node.ID(),
   269  			},
   270  		},
   271  		Expects: []p2ptest.Expect{
   272  			{
   273  				Code: 1,
   274  				Msg: &OfferedHashesMsg{
   275  					Stream: stream,
   276  					HandoverProof: &HandoverProof{
   277  						Handover: &Handover{},
   278  					},
   279  					Hashes: make([]byte, HashSize),
   280  					From:   6,
   281  					To:     9,
   282  				},
   283  				Peer: node.ID(),
   284  			},
   285  		},
   286  	})
   287  
   288  	if err != nil {
   289  		t.Fatal(err)
   290  	}
   291  
   292  	err = tester.TestExchanges(p2ptest.Exchange{
   293  		Label: "unsubscribe message",
   294  		Triggers: []p2ptest.Trigger{
   295  			{
   296  				Code: 0,
   297  				Msg: &UnsubscribeMsg{
   298  					Stream: stream,
   299  				},
   300  				Peer: node.ID(),
   301  			},
   302  		},
   303  	})
   304  
   305  	if err != nil {
   306  		t.Fatal(err)
   307  	}
   308  }
   309  
   310  func TestStreamerUpstreamSubscribeUnsubscribeMsgExchangeLive(t *testing.T) {
   311  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   312  	if err != nil {
   313  		t.Fatal(err)
   314  	}
   315  	defer teardown()
   316  
   317  	stream := NewStream("foo", "", true)
   318  
   319  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   320  		return newTestServer(t, 0), nil
   321  	})
   322  
   323  	node := tester.Nodes[0]
   324  
   325  	err = tester.TestExchanges(p2ptest.Exchange{
   326  		Label: "Subscribe message",
   327  		Triggers: []p2ptest.Trigger{
   328  			{
   329  				Code: 4,
   330  				Msg: &SubscribeMsg{
   331  					Stream:   stream,
   332  					Priority: Top,
   333  				},
   334  				Peer: node.ID(),
   335  			},
   336  		},
   337  		Expects: []p2ptest.Expect{
   338  			{
   339  				Code: 1,
   340  				Msg: &OfferedHashesMsg{
   341  					Stream: stream,
   342  					HandoverProof: &HandoverProof{
   343  						Handover: &Handover{},
   344  					},
   345  					Hashes: make([]byte, HashSize),
   346  					From:   1,
   347  					To:     0,
   348  				},
   349  				Peer: node.ID(),
   350  			},
   351  		},
   352  	})
   353  
   354  	if err != nil {
   355  		t.Fatal(err)
   356  	}
   357  
   358  	err = tester.TestExchanges(p2ptest.Exchange{
   359  		Label: "unsubscribe message",
   360  		Triggers: []p2ptest.Trigger{
   361  			{
   362  				Code: 0,
   363  				Msg: &UnsubscribeMsg{
   364  					Stream: stream,
   365  				},
   366  				Peer: node.ID(),
   367  			},
   368  		},
   369  	})
   370  
   371  	if err != nil {
   372  		t.Fatal(err)
   373  	}
   374  }
   375  
   376  func TestStreamerUpstreamSubscribeErrorMsgExchange(t *testing.T) {
   377  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   378  	if err != nil {
   379  		t.Fatal(err)
   380  	}
   381  	defer teardown()
   382  
   383  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   384  		return newTestServer(t, 0), nil
   385  	})
   386  
   387  	stream := NewStream("bar", "", true)
   388  
   389  	node := tester.Nodes[0]
   390  
   391  	err = tester.TestExchanges(p2ptest.Exchange{
   392  		Label: "Subscribe message",
   393  		Triggers: []p2ptest.Trigger{
   394  			{
   395  				Code: 4,
   396  				Msg: &SubscribeMsg{
   397  					Stream:   stream,
   398  					History:  NewRange(5, 8),
   399  					Priority: Top,
   400  				},
   401  				Peer: node.ID(),
   402  			},
   403  		},
   404  		Expects: []p2ptest.Expect{
   405  			{
   406  				Code: 7,
   407  				Msg: &SubscribeErrorMsg{
   408  					Error: "stream bar not registered",
   409  				},
   410  				Peer: node.ID(),
   411  			},
   412  		},
   413  	})
   414  
   415  	if err != nil {
   416  		t.Fatal(err)
   417  	}
   418  }
   419  
   420  func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) {
   421  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   422  	if err != nil {
   423  		t.Fatal(err)
   424  	}
   425  	defer teardown()
   426  
   427  	stream := NewStream("foo", "", true)
   428  
   429  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   430  		return newTestServer(t, 10), nil
   431  	})
   432  
   433  	node := tester.Nodes[0]
   434  
   435  	err = tester.TestExchanges(p2ptest.Exchange{
   436  		Label: "Subscribe message",
   437  		Triggers: []p2ptest.Trigger{
   438  			{
   439  				Code: 4,
   440  				Msg: &SubscribeMsg{
   441  					Stream:   stream,
   442  					History:  NewRange(5, 8),
   443  					Priority: Top,
   444  				},
   445  				Peer: node.ID(),
   446  			},
   447  		},
   448  		Expects: []p2ptest.Expect{
   449  			{
   450  				Code: 1,
   451  				Msg: &OfferedHashesMsg{
   452  					Stream: NewStream("foo", "", false),
   453  					HandoverProof: &HandoverProof{
   454  						Handover: &Handover{},
   455  					},
   456  					Hashes: make([]byte, HashSize),
   457  					From:   6,
   458  					To:     9,
   459  				},
   460  				Peer: node.ID(),
   461  			},
   462  			{
   463  				Code: 1,
   464  				Msg: &OfferedHashesMsg{
   465  					Stream: stream,
   466  					HandoverProof: &HandoverProof{
   467  						Handover: &Handover{},
   468  					},
   469  					From:   11,
   470  					To:     0,
   471  					Hashes: make([]byte, HashSize),
   472  				},
   473  				Peer: node.ID(),
   474  			},
   475  		},
   476  	})
   477  
   478  	if err != nil {
   479  		t.Fatal(err)
   480  	}
   481  }
   482  
   483  func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) {
   484  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   485  	if err != nil {
   486  		t.Fatal(err)
   487  	}
   488  	defer teardown()
   489  
   490  	stream := NewStream("foo", "", true)
   491  
   492  	var tc *testClient
   493  
   494  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   495  		tc = newTestClient(t)
   496  		return tc, nil
   497  	})
   498  
   499  	node := tester.Nodes[0]
   500  
   501  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   502  	if err != nil {
   503  		t.Fatalf("Expected no error, got %v", err)
   504  	}
   505  
   506  	err = tester.TestExchanges(p2ptest.Exchange{
   507  		Label: "Subscribe message",
   508  		Expects: []p2ptest.Expect{
   509  			{
   510  				Code: 4,
   511  				Msg: &SubscribeMsg{
   512  					Stream:   stream,
   513  					History:  NewRange(5, 8),
   514  					Priority: Top,
   515  				},
   516  				Peer: node.ID(),
   517  			},
   518  		},
   519  	},
   520  		p2ptest.Exchange{
   521  			Label: "Corrupt offered hash message",
   522  			Triggers: []p2ptest.Trigger{
   523  				{
   524  					Code: 1,
   525  					Msg: &OfferedHashesMsg{
   526  						HandoverProof: &HandoverProof{
   527  							Handover: &Handover{},
   528  						},
   529  						Hashes: corruptHashes,
   530  						From:   5,
   531  						To:     8,
   532  						Stream: stream,
   533  					},
   534  					Peer: node.ID(),
   535  				},
   536  			},
   537  		})
   538  	if err != nil {
   539  		t.Fatal(err)
   540  	}
   541  
   542  	expectedError := errors.New("Message handler error: (msg code 1): error invalid hashes length (len: 40)")
   543  	if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID(), Error: expectedError}); err != nil {
   544  		t.Fatal(err)
   545  	}
   546  }
   547  
   548  func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) {
   549  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   550  	if err != nil {
   551  		t.Fatal(err)
   552  	}
   553  	defer teardown()
   554  
   555  	stream := NewStream("foo", "", true)
   556  
   557  	var tc *testClient
   558  
   559  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   560  		tc = newTestClient(t)
   561  		return tc, nil
   562  	})
   563  
   564  	node := tester.Nodes[0]
   565  
   566  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   567  	if err != nil {
   568  		t.Fatalf("Expected no error, got %v", err)
   569  	}
   570  
   571  	err = tester.TestExchanges(p2ptest.Exchange{
   572  		Label: "Subscribe message",
   573  		Expects: []p2ptest.Expect{
   574  			{
   575  				Code: 4,
   576  				Msg: &SubscribeMsg{
   577  					Stream:   stream,
   578  					History:  NewRange(5, 8),
   579  					Priority: Top,
   580  				},
   581  				Peer: node.ID(),
   582  			},
   583  		},
   584  	},
   585  		p2ptest.Exchange{
   586  			Label: "WantedHashes message",
   587  			Triggers: []p2ptest.Trigger{
   588  				{
   589  					Code: 1,
   590  					Msg: &OfferedHashesMsg{
   591  						HandoverProof: &HandoverProof{
   592  							Handover: &Handover{},
   593  						},
   594  						Hashes: hashes,
   595  						From:   5,
   596  						To:     8,
   597  						Stream: stream,
   598  					},
   599  					Peer: node.ID(),
   600  				},
   601  			},
   602  			Expects: []p2ptest.Expect{
   603  				{
   604  					Code: 2,
   605  					Msg: &WantedHashesMsg{
   606  						Stream: stream,
   607  						Want:   []byte{5},
   608  						From:   9,
   609  						To:     0,
   610  					},
   611  					Peer: node.ID(),
   612  				},
   613  			},
   614  		})
   615  	if err != nil {
   616  		t.Fatal(err)
   617  	}
   618  
   619  	if len(tc.receivedHashes) != 3 {
   620  		t.Fatalf("Expected number of received hashes %v, got %v", 3, len(tc.receivedHashes))
   621  	}
   622  
   623  	close(tc.wait0)
   624  
   625  	timeout := time.NewTimer(100 * time.Millisecond)
   626  	defer timeout.Stop()
   627  
   628  	select {
   629  	case <-tc.batchDone:
   630  		t.Fatal("batch done early")
   631  	case <-timeout.C:
   632  	}
   633  
   634  	close(tc.wait2)
   635  
   636  	timeout2 := time.NewTimer(10000 * time.Millisecond)
   637  	defer timeout2.Stop()
   638  
   639  	select {
   640  	case <-tc.batchDone:
   641  	case <-timeout2.C:
   642  		t.Fatal("timeout waiting batchdone call")
   643  	}
   644  
   645  }
   646  
   647  func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) {
   648  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   649  	if err != nil {
   650  		t.Fatal(err)
   651  	}
   652  	defer teardown()
   653  
   654  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   655  		return newTestServer(t, 10), nil
   656  	})
   657  
   658  	node := tester.Nodes[0]
   659  
   660  	stream := NewStream("foo", "", true)
   661  	err = streamer.RequestSubscription(node.ID(), stream, NewRange(5, 8), Top)
   662  	if err != nil {
   663  		t.Fatalf("Expected no error, got %v", err)
   664  	}
   665  
   666  	err = tester.TestExchanges(
   667  		p2ptest.Exchange{
   668  			Label: "RequestSubscription message",
   669  			Expects: []p2ptest.Expect{
   670  				{
   671  					Code: 8,
   672  					Msg: &RequestSubscriptionMsg{
   673  						Stream:   stream,
   674  						History:  NewRange(5, 8),
   675  						Priority: Top,
   676  					},
   677  					Peer: node.ID(),
   678  				},
   679  			},
   680  		},
   681  		p2ptest.Exchange{
   682  			Label: "Subscribe message",
   683  			Triggers: []p2ptest.Trigger{
   684  				{
   685  					Code: 4,
   686  					Msg: &SubscribeMsg{
   687  						Stream:   stream,
   688  						History:  NewRange(5, 8),
   689  						Priority: Top,
   690  					},
   691  					Peer: node.ID(),
   692  				},
   693  			},
   694  			Expects: []p2ptest.Expect{
   695  				{
   696  					Code: 1,
   697  					Msg: &OfferedHashesMsg{
   698  						Stream: NewStream("foo", "", false),
   699  						HandoverProof: &HandoverProof{
   700  							Handover: &Handover{},
   701  						},
   702  						Hashes: make([]byte, HashSize),
   703  						From:   6,
   704  						To:     9,
   705  					},
   706  					Peer: node.ID(),
   707  				},
   708  				{
   709  					Code: 1,
   710  					Msg: &OfferedHashesMsg{
   711  						Stream: stream,
   712  						HandoverProof: &HandoverProof{
   713  							Handover: &Handover{},
   714  						},
   715  						From:   11,
   716  						To:     0,
   717  						Hashes: make([]byte, HashSize),
   718  					},
   719  					Peer: node.ID(),
   720  				},
   721  			},
   722  		},
   723  	)
   724  	if err != nil {
   725  		t.Fatal(err)
   726  	}
   727  
   728  	err = streamer.Quit(node.ID(), stream)
   729  	if err != nil {
   730  		t.Fatalf("Expected no error, got %v", err)
   731  	}
   732  
   733  	err = tester.TestExchanges(p2ptest.Exchange{
   734  		Label: "Quit message",
   735  		Expects: []p2ptest.Expect{
   736  			{
   737  				Code: 9,
   738  				Msg: &QuitMsg{
   739  					Stream: stream,
   740  				},
   741  				Peer: node.ID(),
   742  			},
   743  		},
   744  	})
   745  
   746  	if err != nil {
   747  		t.Fatal(err)
   748  	}
   749  
   750  	historyStream := getHistoryStream(stream)
   751  
   752  	err = streamer.Quit(node.ID(), historyStream)
   753  	if err != nil {
   754  		t.Fatalf("Expected no error, got %v", err)
   755  	}
   756  
   757  	err = tester.TestExchanges(p2ptest.Exchange{
   758  		Label: "Quit message",
   759  		Expects: []p2ptest.Expect{
   760  			{
   761  				Code: 9,
   762  				Msg: &QuitMsg{
   763  					Stream: historyStream,
   764  				},
   765  				Peer: node.ID(),
   766  			},
   767  		},
   768  	})
   769  
   770  	if err != nil {
   771  		t.Fatal(err)
   772  	}
   773  }
   774  
   775  // TestMaxPeerServersWithUnsubscribe creates a registry with a limited
   776  // number of stream servers, and performs a test with subscriptions and
   777  // unsubscriptions, checking if unsubscriptions will remove streams,
   778  // leaving place for new streams.
   779  func TestMaxPeerServersWithUnsubscribe(t *testing.T) {
   780  	var maxPeerServers = 6
   781  	tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
   782  		Retrieval:      RetrievalDisabled,
   783  		Syncing:        SyncingDisabled,
   784  		MaxPeerServers: maxPeerServers,
   785  	})
   786  	if err != nil {
   787  		t.Fatal(err)
   788  	}
   789  	defer teardown()
   790  
   791  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   792  		return newTestServer(t, 0), nil
   793  	})
   794  
   795  	node := tester.Nodes[0]
   796  
   797  	for i := 0; i < maxPeerServers+10; i++ {
   798  		stream := NewStream("foo", strconv.Itoa(i), true)
   799  
   800  		err = tester.TestExchanges(p2ptest.Exchange{
   801  			Label: "Subscribe message",
   802  			Triggers: []p2ptest.Trigger{
   803  				{
   804  					Code: 4,
   805  					Msg: &SubscribeMsg{
   806  						Stream:   stream,
   807  						Priority: Top,
   808  					},
   809  					Peer: node.ID(),
   810  				},
   811  			},
   812  			Expects: []p2ptest.Expect{
   813  				{
   814  					Code: 1,
   815  					Msg: &OfferedHashesMsg{
   816  						Stream: stream,
   817  						HandoverProof: &HandoverProof{
   818  							Handover: &Handover{},
   819  						},
   820  						Hashes: make([]byte, HashSize),
   821  						From:   1,
   822  						To:     0,
   823  					},
   824  					Peer: node.ID(),
   825  				},
   826  			},
   827  		})
   828  
   829  		if err != nil {
   830  			t.Fatal(err)
   831  		}
   832  
   833  		err = tester.TestExchanges(p2ptest.Exchange{
   834  			Label: "unsubscribe message",
   835  			Triggers: []p2ptest.Trigger{
   836  				{
   837  					Code: 0,
   838  					Msg: &UnsubscribeMsg{
   839  						Stream: stream,
   840  					},
   841  					Peer: node.ID(),
   842  				},
   843  			},
   844  		})
   845  
   846  		if err != nil {
   847  			t.Fatal(err)
   848  		}
   849  	}
   850  }
   851  
   852  // TestMaxPeerServersWithoutUnsubscribe creates a registry with a limited
   853  // number of stream servers, and performs subscriptions to detect subscriptions
   854  // error message exchange.
   855  func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) {
   856  	var maxPeerServers = 6
   857  	tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
   858  		MaxPeerServers: maxPeerServers,
   859  	})
   860  	if err != nil {
   861  		t.Fatal(err)
   862  	}
   863  	defer teardown()
   864  
   865  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   866  		return newTestServer(t, 0), nil
   867  	})
   868  
   869  	node := tester.Nodes[0]
   870  
   871  	for i := 0; i < maxPeerServers+10; i++ {
   872  		stream := NewStream("foo", strconv.Itoa(i), true)
   873  
   874  		if i >= maxPeerServers {
   875  			err = tester.TestExchanges(p2ptest.Exchange{
   876  				Label: "Subscribe message",
   877  				Triggers: []p2ptest.Trigger{
   878  					{
   879  						Code: 4,
   880  						Msg: &SubscribeMsg{
   881  							Stream:   stream,
   882  							Priority: Top,
   883  						},
   884  						Peer: node.ID(),
   885  					},
   886  				},
   887  				Expects: []p2ptest.Expect{
   888  					{
   889  						Code: 7,
   890  						Msg: &SubscribeErrorMsg{
   891  							Error: ErrMaxPeerServers.Error(),
   892  						},
   893  						Peer: node.ID(),
   894  					},
   895  				},
   896  			})
   897  
   898  			if err != nil {
   899  				t.Fatal(err)
   900  			}
   901  			continue
   902  		}
   903  
   904  		err = tester.TestExchanges(p2ptest.Exchange{
   905  			Label: "Subscribe message",
   906  			Triggers: []p2ptest.Trigger{
   907  				{
   908  					Code: 4,
   909  					Msg: &SubscribeMsg{
   910  						Stream:   stream,
   911  						Priority: Top,
   912  					},
   913  					Peer: node.ID(),
   914  				},
   915  			},
   916  			Expects: []p2ptest.Expect{
   917  				{
   918  					Code: 1,
   919  					Msg: &OfferedHashesMsg{
   920  						Stream: stream,
   921  						HandoverProof: &HandoverProof{
   922  							Handover: &Handover{},
   923  						},
   924  						Hashes: make([]byte, HashSize),
   925  						From:   1,
   926  						To:     0,
   927  					},
   928  					Peer: node.ID(),
   929  				},
   930  			},
   931  		})
   932  
   933  		if err != nil {
   934  			t.Fatal(err)
   935  		}
   936  	}
   937  }
   938  
   939  //TestHasPriceImplementation is to check that the Registry has a
   940  //`Price` interface implementation
   941  func TestHasPriceImplementation(t *testing.T) {
   942  	_, r, _, teardown, err := newStreamerTester(&RegistryOptions{
   943  		Retrieval: RetrievalDisabled,
   944  		Syncing:   SyncingDisabled,
   945  	})
   946  	if err != nil {
   947  		t.Fatal(err)
   948  	}
   949  	defer teardown()
   950  
   951  	if r.prices == nil {
   952  		t.Fatal("No prices implementation available for the stream protocol")
   953  	}
   954  
   955  	pricesInstance, ok := r.prices.(*StreamerPrices)
   956  	if !ok {
   957  		t.Fatal("`Registry` does not have the expected Prices instance")
   958  	}
   959  	price := pricesInstance.Price(&ChunkDeliveryMsgRetrieval{})
   960  	if price == nil || price.Value == 0 || price.Value != pricesInstance.getChunkDeliveryMsgRetrievalPrice() {
   961  		t.Fatal("No prices set for chunk delivery msg")
   962  	}
   963  
   964  	price = pricesInstance.Price(&RetrieveRequestMsg{})
   965  	if price == nil || price.Value == 0 || price.Value != pricesInstance.getRetrieveRequestMsgPrice() {
   966  		t.Fatal("No prices set for chunk delivery msg")
   967  	}
   968  }
   969  
   970  /*
   971  TestRequestPeerSubscriptions is a unit test for stream's pull sync subscriptions.
   972  
   973  The test does:
   974  	* assign each connected peer to a bin map
   975    * build up a known kademlia in advance
   976  	* run the EachConn function, which returns supposed subscription bins
   977  	* store all supposed bins per peer in a map
   978  	* check that all peers have the expected subscriptions
   979  
   980  This kad table and its peers are copied from network.TestKademliaCase1,
   981  it represents an edge case but for the purpose of testing the
   982  syncing subscriptions it is just fine.
   983  
   984  Addresses used in this test are discovered as part of the simulation network
   985  in higher level tests for streaming. They were generated randomly.
   986  
   987  The resulting kademlia looks like this:
   988  =========================================================================
   989  Fri Dec 21 20:02:39 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
   990  population: 12 (12), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
   991  000  2 8196 835f                    |  2 8196 (0) 835f (0)
   992  001  2 2690 28f0                    |  2 2690 (0) 28f0 (0)
   993  002  2 4d72 4a45                    |  2 4d72 (0) 4a45 (0)
   994  003  1 646e                         |  1 646e (0)
   995  004  3 769c 76d1 7656               |  3 769c (0) 76d1 (0) 7656 (0)
   996  ============ DEPTH: 5 ==========================================
   997  005  1 7a48                         |  1 7a48 (0)
   998  006  1 7cbd                         |  1 7cbd (0)
   999  007  0                              |  0
  1000  008  0                              |  0
  1001  009  0                              |  0
  1002  010  0                              |  0
  1003  011  0                              |  0
  1004  012  0                              |  0
  1005  013  0                              |  0
  1006  014  0                              |  0
  1007  015  0                              |  0
  1008  =========================================================================
  1009  */
  1010  func TestRequestPeerSubscriptions(t *testing.T) {
  1011  	// the pivot address; this is the actual kademlia node
  1012  	pivotAddr := "7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e"
  1013  
  1014  	// a map of bin number to addresses from the given kademlia
  1015  	binMap := make(map[int][]string)
  1016  	binMap[0] = []string{
  1017  		"835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc",
  1018  		"81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009",
  1019  	}
  1020  	binMap[1] = []string{
  1021  		"28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19",
  1022  		"2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e",
  1023  	}
  1024  	binMap[2] = []string{
  1025  		"4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51",
  1026  		"4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112",
  1027  	}
  1028  	binMap[3] = []string{
  1029  		"646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d",
  1030  	}
  1031  	binMap[4] = []string{
  1032  		"7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3",
  1033  		"76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0",
  1034  		"769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a",
  1035  	}
  1036  	binMap[5] = []string{
  1037  		"7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8",
  1038  	}
  1039  	binMap[6] = []string{
  1040  		"7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0",
  1041  	}
  1042  
  1043  	// create the pivot's kademlia
  1044  	addr := common.FromHex(pivotAddr)
  1045  	k := network.NewKademlia(addr, network.NewKadParams())
  1046  
  1047  	// construct the peers and the kademlia
  1048  	for _, binaddrs := range binMap {
  1049  		for _, a := range binaddrs {
  1050  			addr := common.FromHex(a)
  1051  			k.On(network.NewPeer(&network.BzzPeer{BzzAddr: &network.BzzAddr{OAddr: addr}}, k))
  1052  		}
  1053  	}
  1054  
  1055  	// TODO: check kad table is same
  1056  	// currently k.String() prints date so it will never be the same :)
  1057  	// --> implement JSON representation of kad table
  1058  	log.Debug(k.String())
  1059  
  1060  	// simulate that we would do subscriptions: just store the bin numbers
  1061  	fakeSubscriptions := make(map[string][]int)
  1062  	//after the test, we need to reset the subscriptionFunc to the default
  1063  	defer func() { subscriptionFunc = doRequestSubscription }()
  1064  	// define the function which should run for each connection
  1065  	// instead of doing real subscriptions, we just store the bin numbers
  1066  	subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
  1067  		// get the peer ID
  1068  		peerstr := fmt.Sprintf("%x", p.Over())
  1069  		// create the array of bins per peer
  1070  		if _, ok := fakeSubscriptions[peerstr]; !ok {
  1071  			fakeSubscriptions[peerstr] = make([]int, 0)
  1072  		}
  1073  		// store the (fake) bin subscription
  1074  		log.Debug(fmt.Sprintf("Adding fake subscription for peer %s with bin %d", peerstr, bin))
  1075  		fakeSubscriptions[peerstr] = append(fakeSubscriptions[peerstr], int(bin))
  1076  		return true
  1077  	}
  1078  	// create just a simple Registry object in order to be able to call...
  1079  	r := &Registry{}
  1080  	r.requestPeerSubscriptions(k, nil)
  1081  	// calculate the kademlia depth
  1082  	kdepth := k.NeighbourhoodDepth()
  1083  
  1084  	// now, check that all peers have the expected (fake) subscriptions
  1085  	// iterate the bin map
  1086  	for bin, peers := range binMap {
  1087  		// for every peer...
  1088  		for _, peer := range peers {
  1089  			// ...get its (fake) subscriptions
  1090  			fakeSubsForPeer := fakeSubscriptions[peer]
  1091  			// if the peer's bin is shallower than the kademlia depth...
  1092  			if bin < kdepth {
  1093  				// (iterate all (fake) subscriptions)
  1094  				for _, subbin := range fakeSubsForPeer {
  1095  					// ...only the peer's bin should be "subscribed"
  1096  					// (and thus have only one subscription)
  1097  					if subbin != bin || len(fakeSubsForPeer) != 1 {
  1098  						t.Fatalf("Did not get expected subscription for bin < depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
  1099  					}
  1100  				}
  1101  			} else { //if the peer's bin is equal or higher than the kademlia depth...
  1102  				// (iterate all (fake) subscriptions)
  1103  				for i, subbin := range fakeSubsForPeer {
  1104  					// ...each bin from the peer's bin number up to k.MaxProxDisplay should be "subscribed"
  1105  					// as we start from depth we can use the iteration index to check
  1106  					if subbin != i+kdepth {
  1107  						t.Fatalf("Did not get expected subscription for bin > depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
  1108  					}
  1109  					// the last "subscription" should be k.MaxProxDisplay
  1110  					if i == len(fakeSubsForPeer)-1 && subbin != k.MaxProxDisplay {
  1111  						t.Fatalf("Expected last subscription to be: %d, but is: %d", k.MaxProxDisplay, subbin)
  1112  					}
  1113  				}
  1114  			}
  1115  		}
  1116  	}
  1117  	// print some output
  1118  	for p, subs := range fakeSubscriptions {
  1119  		log.Debug(fmt.Sprintf("Peer %s has the following fake subscriptions: ", p))
  1120  		for _, bin := range subs {
  1121  			log.Debug(fmt.Sprintf("%d,", bin))
  1122  		}
  1123  	}
  1124  }
  1125  
  1126  // TestGetSubscriptions is a unit test for the api.GetPeerSubscriptions() function
  1127  func TestGetSubscriptions(t *testing.T) {
  1128  	// create an amount of dummy peers
  1129  	testPeerCount := 8
  1130  	// every peer will have this amount of dummy servers
  1131  	testServerCount := 4
  1132  	// the peerMap which will store this data for the registry
  1133  	peerMap := make(map[enode.ID]*Peer)
  1134  	// create the registry
  1135  	r := &Registry{}
  1136  	api := NewAPI(r)
  1137  	// call once, at this point should be empty
  1138  	regs := api.GetPeerSubscriptions()
  1139  	if len(regs) != 0 {
  1140  		t.Fatal("Expected subscription count to be 0, but it is not")
  1141  	}
  1142  
  1143  	// now create a number of dummy servers for each node
  1144  	for i := 0; i < testPeerCount; i++ {
  1145  		addr := network.RandomAddr()
  1146  		id := addr.ID()
  1147  		p := &Peer{}
  1148  		p.servers = make(map[Stream]*server)
  1149  		for k := 0; k < testServerCount; k++ {
  1150  			s := Stream{
  1151  				Name: strconv.Itoa(k),
  1152  				Key:  "",
  1153  				Live: false,
  1154  			}
  1155  			p.servers[s] = &server{}
  1156  		}
  1157  		peerMap[id] = p
  1158  	}
  1159  	r.peers = peerMap
  1160  
  1161  	// call the subscriptions again
  1162  	regs = api.GetPeerSubscriptions()
  1163  	// count how many (fake) subscriptions there are
  1164  	cnt := 0
  1165  	for _, reg := range regs {
  1166  		for range reg {
  1167  			cnt++
  1168  		}
  1169  	}
  1170  	// check expected value
  1171  	expectedCount := testPeerCount * testServerCount
  1172  	if cnt != expectedCount {
  1173  		t.Fatalf("Expected %d subscriptions, but got %d", expectedCount, cnt)
  1174  	}
  1175  }
  1176  
  1177  /*
  1178  TestGetSubscriptionsRPC sets up a simulation network of `nodeCount` nodes,
  1179  starts the simulation, waits for SyncUpdateDelay in order to kick off
  1180  stream registration, then tests that there are subscriptions.
  1181  */
  1182  func TestGetSubscriptionsRPC(t *testing.T) {
  1183  
  1184  	if testutil.RaceEnabled && os.Getenv("TRAVIS") == "true" {
  1185  		t.Skip("flaky with -race on Travis")
  1186  		// Note: related ticket https://github.com/ethersphere/go-ethereum/issues/1234
  1187  	}
  1188  
  1189  	// arbitrarily set to 4
  1190  	nodeCount := 4
  1191  	// set the syncUpdateDelay for sync registrations to start
  1192  	syncUpdateDelay := 200 * time.Millisecond
  1193  	// run with more nodes if `longrunning` flag is set
  1194  	if *longrunning {
  1195  		nodeCount = 64
  1196  		syncUpdateDelay = 10 * time.Second
  1197  	}
  1198  	// holds the msg code for SubscribeMsg
  1199  	var subscribeMsgCode uint64
  1200  	var ok bool
  1201  	var expectedMsgCount counter
  1202  
  1203  	// this channel signalizes that the expected amount of subscriptiosn is done
  1204  	allSubscriptionsDone := make(chan struct{})
  1205  	// after the test, we need to reset the subscriptionFunc to the default
  1206  	defer func() { subscriptionFunc = doRequestSubscription }()
  1207  
  1208  	// we use this subscriptionFunc for this test: just increases count and calls the actual subscription
  1209  	subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
  1210  		expectedMsgCount.inc()
  1211  		doRequestSubscription(r, p, bin, subs)
  1212  		return true
  1213  	}
  1214  	// create a standard sim
  1215  	sim := simulation.New(map[string]simulation.ServiceFunc{
  1216  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
  1217  			addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers)
  1218  			if err != nil {
  1219  				return nil, nil, err
  1220  			}
  1221  
  1222  			// configure so that sync registrations actually happen
  1223  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
  1224  				Retrieval:       RetrievalEnabled,
  1225  				Syncing:         SyncingAutoSubscribe, //enable sync registrations
  1226  				SyncUpdateDelay: syncUpdateDelay,
  1227  			}, nil)
  1228  
  1229  			// get the SubscribeMsg code
  1230  			subscribeMsgCode, ok = r.GetSpec().GetCode(SubscribeMsg{})
  1231  			if !ok {
  1232  				t.Fatal("Message code for SubscribeMsg not found")
  1233  			}
  1234  
  1235  			cleanup = func() {
  1236  				r.Close()
  1237  				clean()
  1238  			}
  1239  
  1240  			return r, cleanup, nil
  1241  		},
  1242  	})
  1243  	defer sim.Close()
  1244  
  1245  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute)
  1246  	defer cancelSimRun()
  1247  
  1248  	// upload a snapshot
  1249  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
  1250  	if err != nil {
  1251  		t.Fatal(err)
  1252  	}
  1253  
  1254  	// setup the filter for SubscribeMsg
  1255  	msgs := sim.PeerEvents(
  1256  		context.Background(),
  1257  		sim.NodeIDs(),
  1258  		simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("stream").MsgCode(subscribeMsgCode),
  1259  	)
  1260  
  1261  	// strategy: listen to all SubscribeMsg events; after every event we wait
  1262  	// if after `waitDuration` no more messages are being received, we assume the
  1263  	// subscription phase has terminated!
  1264  
  1265  	// the loop in this go routine will either wait for new message events
  1266  	// or times out after 1 second, which signals that we are not receiving
  1267  	// any new subscriptions any more
  1268  	go func() {
  1269  		//for long running sims, waiting 1 sec will not be enough
  1270  		waitDuration := time.Duration(nodeCount/16) * time.Second
  1271  		if *longrunning {
  1272  			waitDuration = syncUpdateDelay
  1273  		}
  1274  		for {
  1275  			select {
  1276  			case <-ctx.Done():
  1277  				return
  1278  			case m := <-msgs: // just reset the loop
  1279  				if m.Error != nil {
  1280  					log.Error("stream message", "err", m.Error)
  1281  					continue
  1282  				}
  1283  				log.Trace("stream message", "node", m.NodeID, "peer", m.PeerID)
  1284  			case <-time.After(waitDuration):
  1285  				// one second passed, don't assume more subscriptions
  1286  				allSubscriptionsDone <- struct{}{}
  1287  				log.Info("All subscriptions received")
  1288  				return
  1289  
  1290  			}
  1291  		}
  1292  	}()
  1293  
  1294  	//run the simulation
  1295  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
  1296  		log.Info("Simulation running")
  1297  		nodes := sim.Net.Nodes
  1298  
  1299  		//wait until all subscriptions are done
  1300  		select {
  1301  		case <-allSubscriptionsDone:
  1302  		case <-ctx.Done():
  1303  			return errors.New("Context timed out")
  1304  		}
  1305  
  1306  		log.Debug("Expected message count: ", "expectedMsgCount", expectedMsgCount.count())
  1307  		//now iterate again, this time we call each node via RPC to get its subscriptions
  1308  		realCount := 0
  1309  		for _, node := range nodes {
  1310  			//create rpc client
  1311  			client, err := node.Client()
  1312  			if err != nil {
  1313  				return fmt.Errorf("create node 1 rpc client fail: %v", err)
  1314  			}
  1315  
  1316  			//ask it for subscriptions
  1317  			pstreams := make(map[string][]string)
  1318  			err = client.Call(&pstreams, "stream_getPeerSubscriptions")
  1319  			if err != nil {
  1320  				return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
  1321  			}
  1322  			//length of the subscriptions can not be smaller than number of peers
  1323  			log.Debug("node subscriptions", "node", node.String())
  1324  			for p, ps := range pstreams {
  1325  				log.Debug("... with", "peer", p)
  1326  				for _, s := range ps {
  1327  					log.Debug(".......", "stream", s)
  1328  					// each node also has subscriptions to RETRIEVE_REQUEST streams,
  1329  					// we need to ignore those, we are only counting SYNC streams
  1330  					if !strings.HasPrefix(s, "RETRIEVE_REQUEST") {
  1331  						realCount++
  1332  					}
  1333  				}
  1334  			}
  1335  			log.Debug("All node streams counted", "realCount", realCount)
  1336  		}
  1337  		emc := expectedMsgCount.count()
  1338  		if realCount != emc {
  1339  			return fmt.Errorf("Real subscriptions and expected amount don't match; real: %d, expected: %d", realCount, emc)
  1340  		}
  1341  		return nil
  1342  	})
  1343  	if result.Error != nil {
  1344  		t.Fatal(result.Error)
  1345  	}
  1346  }
  1347  
  1348  // counter is used to concurrently increment
  1349  // and read an integer value.
  1350  type counter struct {
  1351  	v  int
  1352  	mu sync.RWMutex
  1353  }
  1354  
  1355  // Increment the counter.
  1356  func (c *counter) inc() {
  1357  	c.mu.Lock()
  1358  	defer c.mu.Unlock()
  1359  
  1360  	c.v++
  1361  }
  1362  
  1363  // Read the counter value.
  1364  func (c *counter) count() int {
  1365  	c.mu.RLock()
  1366  	defer c.mu.RUnlock()
  1367  
  1368  	return c.v
  1369  }