github.com/susy-go/susy-graviton@v0.0.0-20190614130430-36cddae42305/swarm/network/stream/streamer_test.go (about)

     1  // Copyleft 2018 The susy-graviton Authors
     2  // This file is part of the susy-graviton library.
     3  //
     4  // The susy-graviton library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The susy-graviton library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MSRCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the susy-graviton library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"strconv"
    25  	"strings"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/susy-go/susy-graviton/common"
    31  	"github.com/susy-go/susy-graviton/log"
    32  	"github.com/susy-go/susy-graviton/node"
    33  	"github.com/susy-go/susy-graviton/p2p/enode"
    34  	"github.com/susy-go/susy-graviton/p2p/simulations/adapters"
    35  	p2ptest "github.com/susy-go/susy-graviton/p2p/testing"
    36  	"github.com/susy-go/susy-graviton/swarm/network"
    37  	"github.com/susy-go/susy-graviton/swarm/network/simulation"
    38  	"github.com/susy-go/susy-graviton/swarm/state"
    39  	"golang.org/x/crypto/sha3"
    40  )
    41  
    42  func TestStreamerSubscribe(t *testing.T) {
    43  	tester, streamer, _, teardown, err := newStreamerTester(nil)
    44  	if err != nil {
    45  		t.Fatal(err)
    46  	}
    47  	defer teardown()
    48  
    49  	stream := NewStream("foo", "", true)
    50  	err = streamer.Subscribe(tester.Nodes[0].ID(), stream, NewRange(0, 0), Top)
    51  	if err == nil || err.Error() != "stream foo not registered" {
    52  		t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
    53  	}
    54  }
    55  
    56  func TestStreamerRequestSubscription(t *testing.T) {
    57  	tester, streamer, _, teardown, err := newStreamerTester(nil)
    58  	if err != nil {
    59  		t.Fatal(err)
    60  	}
    61  	defer teardown()
    62  
    63  	stream := NewStream("foo", "", false)
    64  	err = streamer.RequestSubscription(tester.Nodes[0].ID(), stream, &Range{}, Top)
    65  	if err == nil || err.Error() != "stream foo not registered" {
    66  		t.Fatalf("Expected error %v, got %v", "stream foo not registered", err)
    67  	}
    68  }
    69  
    70  var (
    71  	hash0         = sha3.Sum256([]byte{0})
    72  	hash1         = sha3.Sum256([]byte{1})
    73  	hash2         = sha3.Sum256([]byte{2})
    74  	hashesTmp     = append(hash0[:], hash1[:]...)
    75  	hashes        = append(hashesTmp, hash2[:]...)
    76  	corruptHashes = append(hashes[:40])
    77  )
    78  
    79  type testClient struct {
    80  	t              string
    81  	wait0          chan bool
    82  	wait2          chan bool
    83  	batchDone      chan bool
    84  	receivedHashes map[string][]byte
    85  }
    86  
    87  func newTestClient(t string) *testClient {
    88  	return &testClient{
    89  		t:              t,
    90  		wait0:          make(chan bool),
    91  		wait2:          make(chan bool),
    92  		batchDone:      make(chan bool),
    93  		receivedHashes: make(map[string][]byte),
    94  	}
    95  }
    96  
    97  func (self *testClient) NeedData(ctx context.Context, hash []byte) func(context.Context) error {
    98  	self.receivedHashes[string(hash)] = hash
    99  	if bytes.Equal(hash, hash0[:]) {
   100  		return func(context.Context) error {
   101  			<-self.wait0
   102  			return nil
   103  		}
   104  	} else if bytes.Equal(hash, hash2[:]) {
   105  		return func(context.Context) error {
   106  			<-self.wait2
   107  			return nil
   108  		}
   109  	}
   110  	return nil
   111  }
   112  
   113  func (self *testClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
   114  	close(self.batchDone)
   115  	return nil
   116  }
   117  
   118  func (self *testClient) Close() {}
   119  
   120  type testServer struct {
   121  	t            string
   122  	sessionIndex uint64
   123  }
   124  
   125  func newTestServer(t string, sessionIndex uint64) *testServer {
   126  	return &testServer{
   127  		t:            t,
   128  		sessionIndex: sessionIndex,
   129  	}
   130  }
   131  
   132  func (s *testServer) SessionIndex() (uint64, error) {
   133  	return s.sessionIndex, nil
   134  }
   135  
   136  func (self *testServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
   137  	return make([]byte, HashSize), from + 1, to + 1, nil, nil
   138  }
   139  
   140  func (self *testServer) GetData(context.Context, []byte) ([]byte, error) {
   141  	return nil, nil
   142  }
   143  
   144  func (self *testServer) Close() {
   145  }
   146  
   147  func TestStreamerDownstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
   148  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   149  	if err != nil {
   150  		t.Fatal(err)
   151  	}
   152  	defer teardown()
   153  
   154  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   155  		return newTestClient(t), nil
   156  	})
   157  
   158  	node := tester.Nodes[0]
   159  
   160  	stream := NewStream("foo", "", true)
   161  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   162  	if err != nil {
   163  		t.Fatalf("Expected no error, got %v", err)
   164  	}
   165  
   166  	err = tester.TestExchanges(
   167  		p2ptest.Exchange{
   168  			Label: "Subscribe message",
   169  			Expects: []p2ptest.Expect{
   170  				{
   171  					Code: 4,
   172  					Msg: &SubscribeMsg{
   173  						Stream:   stream,
   174  						History:  NewRange(5, 8),
   175  						Priority: Top,
   176  					},
   177  					Peer: node.ID(),
   178  				},
   179  			},
   180  		},
   181  		// trigger OfferedHashesMsg to actually create the client
   182  		p2ptest.Exchange{
   183  			Label: "OfferedHashes message",
   184  			Triggers: []p2ptest.Trigger{
   185  				{
   186  					Code: 1,
   187  					Msg: &OfferedHashesMsg{
   188  						HandoverProof: &HandoverProof{
   189  							Handover: &Handover{},
   190  						},
   191  						Hashes: hashes,
   192  						From:   5,
   193  						To:     8,
   194  						Stream: stream,
   195  					},
   196  					Peer: node.ID(),
   197  				},
   198  			},
   199  			Expects: []p2ptest.Expect{
   200  				{
   201  					Code: 2,
   202  					Msg: &WantedHashesMsg{
   203  						Stream: stream,
   204  						Want:   []byte{5},
   205  						From:   9,
   206  						To:     0,
   207  					},
   208  					Peer: node.ID(),
   209  				},
   210  			},
   211  		},
   212  	)
   213  	if err != nil {
   214  		t.Fatal(err)
   215  	}
   216  
   217  	err = streamer.Unsubscribe(node.ID(), stream)
   218  	if err != nil {
   219  		t.Fatalf("Expected no error, got %v", err)
   220  	}
   221  
   222  	err = tester.TestExchanges(p2ptest.Exchange{
   223  		Label: "Unsubscribe message",
   224  		Expects: []p2ptest.Expect{
   225  			{
   226  				Code: 0,
   227  				Msg: &UnsubscribeMsg{
   228  					Stream: stream,
   229  				},
   230  				Peer: node.ID(),
   231  			},
   232  		},
   233  	})
   234  
   235  	if err != nil {
   236  		t.Fatal(err)
   237  	}
   238  }
   239  
   240  func TestStreamerUpstreamSubscribeUnsubscribeMsgExchange(t *testing.T) {
   241  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   242  	if err != nil {
   243  		t.Fatal(err)
   244  	}
   245  	defer teardown()
   246  
   247  	stream := NewStream("foo", "", false)
   248  
   249  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   250  		return newTestServer(t, 10), nil
   251  	})
   252  
   253  	node := tester.Nodes[0]
   254  
   255  	err = tester.TestExchanges(p2ptest.Exchange{
   256  		Label: "Subscribe message",
   257  		Triggers: []p2ptest.Trigger{
   258  			{
   259  				Code: 4,
   260  				Msg: &SubscribeMsg{
   261  					Stream:   stream,
   262  					History:  NewRange(5, 8),
   263  					Priority: Top,
   264  				},
   265  				Peer: node.ID(),
   266  			},
   267  		},
   268  		Expects: []p2ptest.Expect{
   269  			{
   270  				Code: 1,
   271  				Msg: &OfferedHashesMsg{
   272  					Stream: stream,
   273  					HandoverProof: &HandoverProof{
   274  						Handover: &Handover{},
   275  					},
   276  					Hashes: make([]byte, HashSize),
   277  					From:   6,
   278  					To:     9,
   279  				},
   280  				Peer: node.ID(),
   281  			},
   282  		},
   283  	})
   284  
   285  	if err != nil {
   286  		t.Fatal(err)
   287  	}
   288  
   289  	err = tester.TestExchanges(p2ptest.Exchange{
   290  		Label: "unsubscribe message",
   291  		Triggers: []p2ptest.Trigger{
   292  			{
   293  				Code: 0,
   294  				Msg: &UnsubscribeMsg{
   295  					Stream: stream,
   296  				},
   297  				Peer: node.ID(),
   298  			},
   299  		},
   300  	})
   301  
   302  	if err != nil {
   303  		t.Fatal(err)
   304  	}
   305  }
   306  
   307  func TestStreamerUpstreamSubscribeUnsubscribeMsgExchangeLive(t *testing.T) {
   308  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   309  	if err != nil {
   310  		t.Fatal(err)
   311  	}
   312  	defer teardown()
   313  
   314  	stream := NewStream("foo", "", true)
   315  
   316  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   317  		return newTestServer(t, 0), nil
   318  	})
   319  
   320  	node := tester.Nodes[0]
   321  
   322  	err = tester.TestExchanges(p2ptest.Exchange{
   323  		Label: "Subscribe message",
   324  		Triggers: []p2ptest.Trigger{
   325  			{
   326  				Code: 4,
   327  				Msg: &SubscribeMsg{
   328  					Stream:   stream,
   329  					Priority: Top,
   330  				},
   331  				Peer: node.ID(),
   332  			},
   333  		},
   334  		Expects: []p2ptest.Expect{
   335  			{
   336  				Code: 1,
   337  				Msg: &OfferedHashesMsg{
   338  					Stream: stream,
   339  					HandoverProof: &HandoverProof{
   340  						Handover: &Handover{},
   341  					},
   342  					Hashes: make([]byte, HashSize),
   343  					From:   1,
   344  					To:     0,
   345  				},
   346  				Peer: node.ID(),
   347  			},
   348  		},
   349  	})
   350  
   351  	if err != nil {
   352  		t.Fatal(err)
   353  	}
   354  
   355  	err = tester.TestExchanges(p2ptest.Exchange{
   356  		Label: "unsubscribe message",
   357  		Triggers: []p2ptest.Trigger{
   358  			{
   359  				Code: 0,
   360  				Msg: &UnsubscribeMsg{
   361  					Stream: stream,
   362  				},
   363  				Peer: node.ID(),
   364  			},
   365  		},
   366  	})
   367  
   368  	if err != nil {
   369  		t.Fatal(err)
   370  	}
   371  }
   372  
   373  func TestStreamerUpstreamSubscribeErrorMsgExchange(t *testing.T) {
   374  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   375  	if err != nil {
   376  		t.Fatal(err)
   377  	}
   378  	defer teardown()
   379  
   380  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   381  		return newTestServer(t, 0), nil
   382  	})
   383  
   384  	stream := NewStream("bar", "", true)
   385  
   386  	node := tester.Nodes[0]
   387  
   388  	err = tester.TestExchanges(p2ptest.Exchange{
   389  		Label: "Subscribe message",
   390  		Triggers: []p2ptest.Trigger{
   391  			{
   392  				Code: 4,
   393  				Msg: &SubscribeMsg{
   394  					Stream:   stream,
   395  					History:  NewRange(5, 8),
   396  					Priority: Top,
   397  				},
   398  				Peer: node.ID(),
   399  			},
   400  		},
   401  		Expects: []p2ptest.Expect{
   402  			{
   403  				Code: 7,
   404  				Msg: &SubscribeErrorMsg{
   405  					Error: "stream bar not registered",
   406  				},
   407  				Peer: node.ID(),
   408  			},
   409  		},
   410  	})
   411  
   412  	if err != nil {
   413  		t.Fatal(err)
   414  	}
   415  }
   416  
   417  func TestStreamerUpstreamSubscribeLiveAndHistory(t *testing.T) {
   418  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   419  	if err != nil {
   420  		t.Fatal(err)
   421  	}
   422  	defer teardown()
   423  
   424  	stream := NewStream("foo", "", true)
   425  
   426  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   427  		return newTestServer(t, 10), nil
   428  	})
   429  
   430  	node := tester.Nodes[0]
   431  
   432  	err = tester.TestExchanges(p2ptest.Exchange{
   433  		Label: "Subscribe message",
   434  		Triggers: []p2ptest.Trigger{
   435  			{
   436  				Code: 4,
   437  				Msg: &SubscribeMsg{
   438  					Stream:   stream,
   439  					History:  NewRange(5, 8),
   440  					Priority: Top,
   441  				},
   442  				Peer: node.ID(),
   443  			},
   444  		},
   445  		Expects: []p2ptest.Expect{
   446  			{
   447  				Code: 1,
   448  				Msg: &OfferedHashesMsg{
   449  					Stream: NewStream("foo", "", false),
   450  					HandoverProof: &HandoverProof{
   451  						Handover: &Handover{},
   452  					},
   453  					Hashes: make([]byte, HashSize),
   454  					From:   6,
   455  					To:     9,
   456  				},
   457  				Peer: node.ID(),
   458  			},
   459  			{
   460  				Code: 1,
   461  				Msg: &OfferedHashesMsg{
   462  					Stream: stream,
   463  					HandoverProof: &HandoverProof{
   464  						Handover: &Handover{},
   465  					},
   466  					From:   11,
   467  					To:     0,
   468  					Hashes: make([]byte, HashSize),
   469  				},
   470  				Peer: node.ID(),
   471  			},
   472  		},
   473  	})
   474  
   475  	if err != nil {
   476  		t.Fatal(err)
   477  	}
   478  }
   479  
   480  func TestStreamerDownstreamCorruptHashesMsgExchange(t *testing.T) {
   481  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   482  	if err != nil {
   483  		t.Fatal(err)
   484  	}
   485  	defer teardown()
   486  
   487  	stream := NewStream("foo", "", true)
   488  
   489  	var tc *testClient
   490  
   491  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   492  		tc = newTestClient(t)
   493  		return tc, nil
   494  	})
   495  
   496  	node := tester.Nodes[0]
   497  
   498  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   499  	if err != nil {
   500  		t.Fatalf("Expected no error, got %v", err)
   501  	}
   502  
   503  	err = tester.TestExchanges(p2ptest.Exchange{
   504  		Label: "Subscribe message",
   505  		Expects: []p2ptest.Expect{
   506  			{
   507  				Code: 4,
   508  				Msg: &SubscribeMsg{
   509  					Stream:   stream,
   510  					History:  NewRange(5, 8),
   511  					Priority: Top,
   512  				},
   513  				Peer: node.ID(),
   514  			},
   515  		},
   516  	},
   517  		p2ptest.Exchange{
   518  			Label: "Corrupt offered hash message",
   519  			Triggers: []p2ptest.Trigger{
   520  				{
   521  					Code: 1,
   522  					Msg: &OfferedHashesMsg{
   523  						HandoverProof: &HandoverProof{
   524  							Handover: &Handover{},
   525  						},
   526  						Hashes: corruptHashes,
   527  						From:   5,
   528  						To:     8,
   529  						Stream: stream,
   530  					},
   531  					Peer: node.ID(),
   532  				},
   533  			},
   534  		})
   535  	if err != nil {
   536  		t.Fatal(err)
   537  	}
   538  
   539  	expectedError := errors.New("Message handler error: (msg code 1): error invalid hashes length (len: 40)")
   540  	if err := tester.TestDisconnected(&p2ptest.Disconnect{Peer: node.ID(), Error: expectedError}); err != nil {
   541  		t.Fatal(err)
   542  	}
   543  }
   544  
   545  func TestStreamerDownstreamOfferedHashesMsgExchange(t *testing.T) {
   546  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   547  	if err != nil {
   548  		t.Fatal(err)
   549  	}
   550  	defer teardown()
   551  
   552  	stream := NewStream("foo", "", true)
   553  
   554  	var tc *testClient
   555  
   556  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   557  		tc = newTestClient(t)
   558  		return tc, nil
   559  	})
   560  
   561  	node := tester.Nodes[0]
   562  
   563  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   564  	if err != nil {
   565  		t.Fatalf("Expected no error, got %v", err)
   566  	}
   567  
   568  	err = tester.TestExchanges(p2ptest.Exchange{
   569  		Label: "Subscribe message",
   570  		Expects: []p2ptest.Expect{
   571  			{
   572  				Code: 4,
   573  				Msg: &SubscribeMsg{
   574  					Stream:   stream,
   575  					History:  NewRange(5, 8),
   576  					Priority: Top,
   577  				},
   578  				Peer: node.ID(),
   579  			},
   580  		},
   581  	},
   582  		p2ptest.Exchange{
   583  			Label: "WantedHashes message",
   584  			Triggers: []p2ptest.Trigger{
   585  				{
   586  					Code: 1,
   587  					Msg: &OfferedHashesMsg{
   588  						HandoverProof: &HandoverProof{
   589  							Handover: &Handover{},
   590  						},
   591  						Hashes: hashes,
   592  						From:   5,
   593  						To:     8,
   594  						Stream: stream,
   595  					},
   596  					Peer: node.ID(),
   597  				},
   598  			},
   599  			Expects: []p2ptest.Expect{
   600  				{
   601  					Code: 2,
   602  					Msg: &WantedHashesMsg{
   603  						Stream: stream,
   604  						Want:   []byte{5},
   605  						From:   9,
   606  						To:     0,
   607  					},
   608  					Peer: node.ID(),
   609  				},
   610  			},
   611  		})
   612  	if err != nil {
   613  		t.Fatal(err)
   614  	}
   615  
   616  	if len(tc.receivedHashes) != 3 {
   617  		t.Fatalf("Expected number of received hashes %v, got %v", 3, len(tc.receivedHashes))
   618  	}
   619  
   620  	close(tc.wait0)
   621  
   622  	timeout := time.NewTimer(100 * time.Millisecond)
   623  	defer timeout.Stop()
   624  
   625  	select {
   626  	case <-tc.batchDone:
   627  		t.Fatal("batch done early")
   628  	case <-timeout.C:
   629  	}
   630  
   631  	close(tc.wait2)
   632  
   633  	timeout2 := time.NewTimer(10000 * time.Millisecond)
   634  	defer timeout2.Stop()
   635  
   636  	select {
   637  	case <-tc.batchDone:
   638  	case <-timeout2.C:
   639  		t.Fatal("timeout waiting batchdone call")
   640  	}
   641  
   642  }
   643  
   644  func TestStreamerRequestSubscriptionQuitMsgExchange(t *testing.T) {
   645  	tester, streamer, _, teardown, err := newStreamerTester(nil)
   646  	if err != nil {
   647  		t.Fatal(err)
   648  	}
   649  	defer teardown()
   650  
   651  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   652  		return newTestServer(t, 10), nil
   653  	})
   654  
   655  	node := tester.Nodes[0]
   656  
   657  	stream := NewStream("foo", "", true)
   658  	err = streamer.RequestSubscription(node.ID(), stream, NewRange(5, 8), Top)
   659  	if err != nil {
   660  		t.Fatalf("Expected no error, got %v", err)
   661  	}
   662  
   663  	err = tester.TestExchanges(
   664  		p2ptest.Exchange{
   665  			Label: "RequestSubscription message",
   666  			Expects: []p2ptest.Expect{
   667  				{
   668  					Code: 8,
   669  					Msg: &RequestSubscriptionMsg{
   670  						Stream:   stream,
   671  						History:  NewRange(5, 8),
   672  						Priority: Top,
   673  					},
   674  					Peer: node.ID(),
   675  				},
   676  			},
   677  		},
   678  		p2ptest.Exchange{
   679  			Label: "Subscribe message",
   680  			Triggers: []p2ptest.Trigger{
   681  				{
   682  					Code: 4,
   683  					Msg: &SubscribeMsg{
   684  						Stream:   stream,
   685  						History:  NewRange(5, 8),
   686  						Priority: Top,
   687  					},
   688  					Peer: node.ID(),
   689  				},
   690  			},
   691  			Expects: []p2ptest.Expect{
   692  				{
   693  					Code: 1,
   694  					Msg: &OfferedHashesMsg{
   695  						Stream: NewStream("foo", "", false),
   696  						HandoverProof: &HandoverProof{
   697  							Handover: &Handover{},
   698  						},
   699  						Hashes: make([]byte, HashSize),
   700  						From:   6,
   701  						To:     9,
   702  					},
   703  					Peer: node.ID(),
   704  				},
   705  				{
   706  					Code: 1,
   707  					Msg: &OfferedHashesMsg{
   708  						Stream: stream,
   709  						HandoverProof: &HandoverProof{
   710  							Handover: &Handover{},
   711  						},
   712  						From:   11,
   713  						To:     0,
   714  						Hashes: make([]byte, HashSize),
   715  					},
   716  					Peer: node.ID(),
   717  				},
   718  			},
   719  		},
   720  	)
   721  	if err != nil {
   722  		t.Fatal(err)
   723  	}
   724  
   725  	err = streamer.Quit(node.ID(), stream)
   726  	if err != nil {
   727  		t.Fatalf("Expected no error, got %v", err)
   728  	}
   729  
   730  	err = tester.TestExchanges(p2ptest.Exchange{
   731  		Label: "Quit message",
   732  		Expects: []p2ptest.Expect{
   733  			{
   734  				Code: 9,
   735  				Msg: &QuitMsg{
   736  					Stream: stream,
   737  				},
   738  				Peer: node.ID(),
   739  			},
   740  		},
   741  	})
   742  
   743  	if err != nil {
   744  		t.Fatal(err)
   745  	}
   746  
   747  	historyStream := getHistoryStream(stream)
   748  
   749  	err = streamer.Quit(node.ID(), historyStream)
   750  	if err != nil {
   751  		t.Fatalf("Expected no error, got %v", err)
   752  	}
   753  
   754  	err = tester.TestExchanges(p2ptest.Exchange{
   755  		Label: "Quit message",
   756  		Expects: []p2ptest.Expect{
   757  			{
   758  				Code: 9,
   759  				Msg: &QuitMsg{
   760  					Stream: historyStream,
   761  				},
   762  				Peer: node.ID(),
   763  			},
   764  		},
   765  	})
   766  
   767  	if err != nil {
   768  		t.Fatal(err)
   769  	}
   770  }
   771  
   772  // TestMaxPeerServersWithUnsubscribe creates a registry with a limited
   773  // number of stream servers, and performs a test with subscriptions and
   774  // unsubscriptions, checking if unsubscriptions will remove streams,
   775  // leaving place for new streams.
   776  func TestMaxPeerServersWithUnsubscribe(t *testing.T) {
   777  	var maxPeerServers = 6
   778  	tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
   779  		Retrieval:      RetrievalDisabled,
   780  		Syncing:        SyncingDisabled,
   781  		MaxPeerServers: maxPeerServers,
   782  	})
   783  	if err != nil {
   784  		t.Fatal(err)
   785  	}
   786  	defer teardown()
   787  
   788  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   789  		return newTestServer(t, 0), nil
   790  	})
   791  
   792  	node := tester.Nodes[0]
   793  
   794  	for i := 0; i < maxPeerServers+10; i++ {
   795  		stream := NewStream("foo", strconv.Itoa(i), true)
   796  
   797  		err = tester.TestExchanges(p2ptest.Exchange{
   798  			Label: "Subscribe message",
   799  			Triggers: []p2ptest.Trigger{
   800  				{
   801  					Code: 4,
   802  					Msg: &SubscribeMsg{
   803  						Stream:   stream,
   804  						Priority: Top,
   805  					},
   806  					Peer: node.ID(),
   807  				},
   808  			},
   809  			Expects: []p2ptest.Expect{
   810  				{
   811  					Code: 1,
   812  					Msg: &OfferedHashesMsg{
   813  						Stream: stream,
   814  						HandoverProof: &HandoverProof{
   815  							Handover: &Handover{},
   816  						},
   817  						Hashes: make([]byte, HashSize),
   818  						From:   1,
   819  						To:     0,
   820  					},
   821  					Peer: node.ID(),
   822  				},
   823  			},
   824  		})
   825  
   826  		if err != nil {
   827  			t.Fatal(err)
   828  		}
   829  
   830  		err = tester.TestExchanges(p2ptest.Exchange{
   831  			Label: "unsubscribe message",
   832  			Triggers: []p2ptest.Trigger{
   833  				{
   834  					Code: 0,
   835  					Msg: &UnsubscribeMsg{
   836  						Stream: stream,
   837  					},
   838  					Peer: node.ID(),
   839  				},
   840  			},
   841  		})
   842  
   843  		if err != nil {
   844  			t.Fatal(err)
   845  		}
   846  	}
   847  }
   848  
   849  // TestMaxPeerServersWithoutUnsubscribe creates a registry with a limited
   850  // number of stream servers, and performs subscriptions to detect subscriptions
   851  // error message exchange.
   852  func TestMaxPeerServersWithoutUnsubscribe(t *testing.T) {
   853  	var maxPeerServers = 6
   854  	tester, streamer, _, teardown, err := newStreamerTester(&RegistryOptions{
   855  		MaxPeerServers: maxPeerServers,
   856  	})
   857  	if err != nil {
   858  		t.Fatal(err)
   859  	}
   860  	defer teardown()
   861  
   862  	streamer.RegisterServerFunc("foo", func(p *Peer, t string, live bool) (Server, error) {
   863  		return newTestServer(t, 0), nil
   864  	})
   865  
   866  	node := tester.Nodes[0]
   867  
   868  	for i := 0; i < maxPeerServers+10; i++ {
   869  		stream := NewStream("foo", strconv.Itoa(i), true)
   870  
   871  		if i >= maxPeerServers {
   872  			err = tester.TestExchanges(p2ptest.Exchange{
   873  				Label: "Subscribe message",
   874  				Triggers: []p2ptest.Trigger{
   875  					{
   876  						Code: 4,
   877  						Msg: &SubscribeMsg{
   878  							Stream:   stream,
   879  							Priority: Top,
   880  						},
   881  						Peer: node.ID(),
   882  					},
   883  				},
   884  				Expects: []p2ptest.Expect{
   885  					{
   886  						Code: 7,
   887  						Msg: &SubscribeErrorMsg{
   888  							Error: ErrMaxPeerServers.Error(),
   889  						},
   890  						Peer: node.ID(),
   891  					},
   892  				},
   893  			})
   894  
   895  			if err != nil {
   896  				t.Fatal(err)
   897  			}
   898  			continue
   899  		}
   900  
   901  		err = tester.TestExchanges(p2ptest.Exchange{
   902  			Label: "Subscribe message",
   903  			Triggers: []p2ptest.Trigger{
   904  				{
   905  					Code: 4,
   906  					Msg: &SubscribeMsg{
   907  						Stream:   stream,
   908  						Priority: Top,
   909  					},
   910  					Peer: node.ID(),
   911  				},
   912  			},
   913  			Expects: []p2ptest.Expect{
   914  				{
   915  					Code: 1,
   916  					Msg: &OfferedHashesMsg{
   917  						Stream: stream,
   918  						HandoverProof: &HandoverProof{
   919  							Handover: &Handover{},
   920  						},
   921  						Hashes: make([]byte, HashSize),
   922  						From:   1,
   923  						To:     0,
   924  					},
   925  					Peer: node.ID(),
   926  				},
   927  			},
   928  		})
   929  
   930  		if err != nil {
   931  			t.Fatal(err)
   932  		}
   933  	}
   934  }
   935  
   936  //TestHasPriceImplementation is to check that the Registry has a
   937  //`Price` interface implementation
   938  func TestHasPriceImplementation(t *testing.T) {
   939  	_, r, _, teardown, err := newStreamerTester(&RegistryOptions{
   940  		Retrieval: RetrievalDisabled,
   941  		Syncing:   SyncingDisabled,
   942  	})
   943  	if err != nil {
   944  		t.Fatal(err)
   945  	}
   946  	defer teardown()
   947  
   948  	if r.prices == nil {
   949  		t.Fatal("No prices implementation available for the stream protocol")
   950  	}
   951  
   952  	pricesInstance, ok := r.prices.(*StreamerPrices)
   953  	if !ok {
   954  		t.Fatal("`Registry` does not have the expected Prices instance")
   955  	}
   956  	price := pricesInstance.Price(&ChunkDeliveryMsgRetrieval{})
   957  	if price == nil || price.Value == 0 || price.Value != pricesInstance.getChunkDeliveryMsgRetrievalPrice() {
   958  		t.Fatal("No prices set for chunk delivery msg")
   959  	}
   960  
   961  	price = pricesInstance.Price(&RetrieveRequestMsg{})
   962  	if price == nil || price.Value == 0 || price.Value != pricesInstance.getRetrieveRequestMsgPrice() {
   963  		t.Fatal("No prices set for chunk delivery msg")
   964  	}
   965  }
   966  
   967  /*
   968  TestRequestPeerSubscriptions is a unit test for stream's pull sync subscriptions.
   969  
   970  The test does:
   971  	* assign each connected peer to a bin map
   972    * build up a known kademlia in advance
   973  	* run the EachConn function, which returns supposed subscription bins
   974  	* store all supposed bins per peer in a map
   975  	* check that all peers have the expected subscriptions
   976  
   977  This kad table and its peers are copied from network.TestKademliaCase1,
   978  it represents an edge case but for the purpose of testing the
   979  syncing subscriptions it is just fine.
   980  
   981  Addresses used in this test are discovered as part of the simulation network
   982  in higher level tests for streaming. They were generated randomly.
   983  
   984  The resulting kademlia looks like this:
   985  =========================================================================
   986  Fri Dec 21 20:02:39 UTC 2018 KΛÐΞMLIΛ hive: queen's address: 7efef1
   987  population: 12 (12), MinProxBinSize: 2, MinBinSize: 2, MaxBinSize: 4
   988  000  2 8196 835f                    |  2 8196 (0) 835f (0)
   989  001  2 2690 28f0                    |  2 2690 (0) 28f0 (0)
   990  002  2 4d72 4a45                    |  2 4d72 (0) 4a45 (0)
   991  003  1 646e                         |  1 646e (0)
   992  004  3 769c 76d1 7656               |  3 769c (0) 76d1 (0) 7656 (0)
   993  ============ DEPTH: 5 ==========================================
   994  005  1 7a48                         |  1 7a48 (0)
   995  006  1 7cbd                         |  1 7cbd (0)
   996  007  0                              |  0
   997  008  0                              |  0
   998  009  0                              |  0
   999  010  0                              |  0
  1000  011  0                              |  0
  1001  012  0                              |  0
  1002  013  0                              |  0
  1003  014  0                              |  0
  1004  015  0                              |  0
  1005  =========================================================================
  1006  */
  1007  func TestRequestPeerSubscriptions(t *testing.T) {
  1008  	// the pivot address; this is the actual kademlia node
  1009  	pivotAddr := "7efef1c41d77f843ad167be95f6660567eb8a4a59f39240000cce2e0d65baf8e"
  1010  
  1011  	// a map of bin number to addresses from the given kademlia
  1012  	binMap := make(map[int][]string)
  1013  	binMap[0] = []string{
  1014  		"835fbbf1d16ba7347b6e2fc552d6e982148d29c624ea20383850df3c810fa8fc",
  1015  		"81968a2d8fb39114342ee1da85254ec51e0608d7f0f6997c2a8354c260a71009",
  1016  	}
  1017  	binMap[1] = []string{
  1018  		"28f0bc1b44658548d6e05dd16d4c2fe77f1da5d48b6774bc4263b045725d0c19",
  1019  		"2690a910c33ee37b91eb6c4e0731d1d345e2dc3b46d308503a6e85bbc242c69e",
  1020  	}
  1021  	binMap[2] = []string{
  1022  		"4a45f1fc63e1a9cb9dfa44c98da2f3d20c2923e5d75ff60b2db9d1bdb0c54d51",
  1023  		"4d72a04ddeb851a68cd197ef9a92a3e2ff01fbbff638e64929dd1a9c2e150112",
  1024  	}
  1025  	binMap[3] = []string{
  1026  		"646e9540c84f6a2f9cf6585d45a4c219573b4fd1b64a3c9a1386fc5cf98c0d4d",
  1027  	}
  1028  	binMap[4] = []string{
  1029  		"7656caccdc79cd8d7ce66d415cc96a718e8271c62fb35746bfc2b49faf3eebf3",
  1030  		"76d1e83c71ca246d042e37ff1db181f2776265fbcfdc890ce230bfa617c9c2f0",
  1031  		"769ce86aa90b518b7ed382f9fdacfbed93574e18dc98fe6c342e4f9f409c2d5a",
  1032  	}
  1033  	binMap[5] = []string{
  1034  		"7a48f75f8ca60487ae42d6f92b785581b40b91f2da551ae73d5eae46640e02e8",
  1035  	}
  1036  	binMap[6] = []string{
  1037  		"7cbd42350bde8e18ae5b955b5450f8e2cef3419f92fbf5598160c60fd78619f0",
  1038  	}
  1039  
  1040  	// create the pivot's kademlia
  1041  	addr := common.FromHex(pivotAddr)
  1042  	k := network.NewKademlia(addr, network.NewKadParams())
  1043  
  1044  	// construct the peers and the kademlia
  1045  	for _, binaddrs := range binMap {
  1046  		for _, a := range binaddrs {
  1047  			addr := common.FromHex(a)
  1048  			k.On(network.NewPeer(&network.BzzPeer{BzzAddr: &network.BzzAddr{OAddr: addr}}, k))
  1049  		}
  1050  	}
  1051  
  1052  	// TODO: check kad table is same
  1053  	// currently k.String() prints date so it will never be the same :)
  1054  	// --> implement JSON representation of kad table
  1055  	log.Debug(k.String())
  1056  
  1057  	// simulate that we would do subscriptions: just store the bin numbers
  1058  	fakeSubscriptions := make(map[string][]int)
  1059  	//after the test, we need to reset the subscriptionFunc to the default
  1060  	defer func() { subscriptionFunc = doRequestSubscription }()
  1061  	// define the function which should run for each connection
  1062  	// instead of doing real subscriptions, we just store the bin numbers
  1063  	subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
  1064  		// get the peer ID
  1065  		peerstr := fmt.Sprintf("%x", p.Over())
  1066  		// create the array of bins per peer
  1067  		if _, ok := fakeSubscriptions[peerstr]; !ok {
  1068  			fakeSubscriptions[peerstr] = make([]int, 0)
  1069  		}
  1070  		// store the (fake) bin subscription
  1071  		log.Debug(fmt.Sprintf("Adding fake subscription for peer %s with bin %d", peerstr, bin))
  1072  		fakeSubscriptions[peerstr] = append(fakeSubscriptions[peerstr], int(bin))
  1073  		return true
  1074  	}
  1075  	// create just a simple Registry object in order to be able to call...
  1076  	r := &Registry{}
  1077  	r.requestPeerSubscriptions(k, nil)
  1078  	// calculate the kademlia depth
  1079  	kdepth := k.NeighbourhoodDepth()
  1080  
  1081  	// now, check that all peers have the expected (fake) subscriptions
  1082  	// iterate the bin map
  1083  	for bin, peers := range binMap {
  1084  		// for every peer...
  1085  		for _, peer := range peers {
  1086  			// ...get its (fake) subscriptions
  1087  			fakeSubsForPeer := fakeSubscriptions[peer]
  1088  			// if the peer's bin is shallower than the kademlia depth...
  1089  			if bin < kdepth {
  1090  				// (iterate all (fake) subscriptions)
  1091  				for _, subbin := range fakeSubsForPeer {
  1092  					// ...only the peer's bin should be "subscribed"
  1093  					// (and thus have only one subscription)
  1094  					if subbin != bin || len(fakeSubsForPeer) != 1 {
  1095  						t.Fatalf("Did not get expected subscription for bin < depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
  1096  					}
  1097  				}
  1098  			} else { //if the peer's bin is equal or higher than the kademlia depth...
  1099  				// (iterate all (fake) subscriptions)
  1100  				for i, subbin := range fakeSubsForPeer {
  1101  					// ...each bin from the peer's bin number up to k.MaxProxDisplay should be "subscribed"
  1102  					// as we start from depth we can use the iteration index to check
  1103  					if subbin != i+kdepth {
  1104  						t.Fatalf("Did not get expected subscription for bin > depth; bin of peer %s: %d, subscription: %d", peer, bin, subbin)
  1105  					}
  1106  					// the last "subscription" should be k.MaxProxDisplay
  1107  					if i == len(fakeSubsForPeer)-1 && subbin != k.MaxProxDisplay {
  1108  						t.Fatalf("Expected last subscription to be: %d, but is: %d", k.MaxProxDisplay, subbin)
  1109  					}
  1110  				}
  1111  			}
  1112  		}
  1113  	}
  1114  	// print some output
  1115  	for p, subs := range fakeSubscriptions {
  1116  		log.Debug(fmt.Sprintf("Peer %s has the following fake subscriptions: ", p))
  1117  		for _, bin := range subs {
  1118  			log.Debug(fmt.Sprintf("%d,", bin))
  1119  		}
  1120  	}
  1121  }
  1122  
  1123  // TestGetSubscriptions is a unit test for the api.GetPeerSubscriptions() function
  1124  func TestGetSubscriptions(t *testing.T) {
  1125  	// create an amount of dummy peers
  1126  	testPeerCount := 8
  1127  	// every peer will have this amount of dummy servers
  1128  	testServerCount := 4
  1129  	// the peerMap which will store this data for the registry
  1130  	peerMap := make(map[enode.ID]*Peer)
  1131  	// create the registry
  1132  	r := &Registry{}
  1133  	api := NewAPI(r)
  1134  	// call once, at this point should be empty
  1135  	regs := api.GetPeerSubscriptions()
  1136  	if len(regs) != 0 {
  1137  		t.Fatal("Expected subscription count to be 0, but it is not")
  1138  	}
  1139  
  1140  	// now create a number of dummy servers for each node
  1141  	for i := 0; i < testPeerCount; i++ {
  1142  		addr := network.RandomAddr()
  1143  		id := addr.ID()
  1144  		p := &Peer{}
  1145  		p.servers = make(map[Stream]*server)
  1146  		for k := 0; k < testServerCount; k++ {
  1147  			s := Stream{
  1148  				Name: strconv.Itoa(k),
  1149  				Key:  "",
  1150  				Live: false,
  1151  			}
  1152  			p.servers[s] = &server{}
  1153  		}
  1154  		peerMap[id] = p
  1155  	}
  1156  	r.peers = peerMap
  1157  
  1158  	// call the subscriptions again
  1159  	regs = api.GetPeerSubscriptions()
  1160  	// count how many (fake) subscriptions there are
  1161  	cnt := 0
  1162  	for _, reg := range regs {
  1163  		for range reg {
  1164  			cnt++
  1165  		}
  1166  	}
  1167  	// check expected value
  1168  	expectedCount := testPeerCount * testServerCount
  1169  	if cnt != expectedCount {
  1170  		t.Fatalf("Expected %d subscriptions, but got %d", expectedCount, cnt)
  1171  	}
  1172  }
  1173  
  1174  /*
  1175  TestGetSubscriptionsRPC sets up a simulation network of `nodeCount` nodes,
  1176  starts the simulation, waits for SyncUpdateDelay in order to kick off
  1177  stream registration, then tests that there are subscriptions.
  1178  */
  1179  func TestGetSubscriptionsRPC(t *testing.T) {
  1180  
  1181  	// arbitrarily set to 4
  1182  	nodeCount := 4
  1183  	// run with more nodes if `longrunning` flag is set
  1184  	if *longrunning {
  1185  		nodeCount = 64
  1186  	}
  1187  	// set the syncUpdateDelay for sync registrations to start
  1188  	syncUpdateDelay := 200 * time.Millisecond
  1189  	// holds the msg code for SubscribeMsg
  1190  	var subscribeMsgCode uint64
  1191  	var ok bool
  1192  	var expectedMsgCount counter
  1193  
  1194  	// this channel signalizes that the expected amount of subscriptiosn is done
  1195  	allSubscriptionsDone := make(chan struct{})
  1196  	// after the test, we need to reset the subscriptionFunc to the default
  1197  	defer func() { subscriptionFunc = doRequestSubscription }()
  1198  
  1199  	// we use this subscriptionFunc for this test: just increases count and calls the actual subscription
  1200  	subscriptionFunc = func(r *Registry, p *network.Peer, bin uint8, subs map[enode.ID]map[Stream]struct{}) bool {
  1201  		expectedMsgCount.inc()
  1202  		doRequestSubscription(r, p, bin, subs)
  1203  		return true
  1204  	}
  1205  	// create a standard sim
  1206  	sim := simulation.New(map[string]simulation.ServiceFunc{
  1207  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
  1208  			addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers)
  1209  			if err != nil {
  1210  				return nil, nil, err
  1211  			}
  1212  
  1213  			// configure so that sync registrations actually happen
  1214  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
  1215  				Retrieval:       RetrievalEnabled,
  1216  				Syncing:         SyncingAutoSubscribe, //enable sync registrations
  1217  				SyncUpdateDelay: syncUpdateDelay,
  1218  			}, nil)
  1219  
  1220  			// get the SubscribeMsg code
  1221  			subscribeMsgCode, ok = r.GetSpec().GetCode(SubscribeMsg{})
  1222  			if !ok {
  1223  				t.Fatal("Message code for SubscribeMsg not found")
  1224  			}
  1225  
  1226  			cleanup = func() {
  1227  				r.Close()
  1228  				clean()
  1229  			}
  1230  
  1231  			return r, cleanup, nil
  1232  		},
  1233  	})
  1234  	defer sim.Close()
  1235  
  1236  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
  1237  	defer cancelSimRun()
  1238  
  1239  	// upload a snapshot
  1240  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
  1241  	if err != nil {
  1242  		t.Fatal(err)
  1243  	}
  1244  
  1245  	// setup the filter for SubscribeMsg
  1246  	msgs := sim.PeerEvents(
  1247  		context.Background(),
  1248  		sim.NodeIDs(),
  1249  		simulation.NewPeerEventsFilter().ReceivedMessages().Protocol("stream").MsgCode(subscribeMsgCode),
  1250  	)
  1251  
  1252  	// strategy: listen to all SubscribeMsg events; after every event we wait
  1253  	// if after `waitDuration` no more messages are being received, we assume the
  1254  	// subscription phase has terminated!
  1255  
  1256  	// the loop in this go routine will either wait for new message events
  1257  	// or times out after 1 second, which signals that we are not receiving
  1258  	// any new subscriptions any more
  1259  	go func() {
  1260  		//for long running sims, waiting 1 sec will not be enough
  1261  		waitDuration := time.Duration(nodeCount/16) * time.Second
  1262  		for {
  1263  			select {
  1264  			case <-ctx.Done():
  1265  				return
  1266  			case m := <-msgs: // just reset the loop
  1267  				if m.Error != nil {
  1268  					log.Error("stream message", "err", m.Error)
  1269  					continue
  1270  				}
  1271  				log.Trace("stream message", "node", m.NodeID, "peer", m.PeerID)
  1272  			case <-time.After(waitDuration):
  1273  				// one second passed, don't assume more subscriptions
  1274  				allSubscriptionsDone <- struct{}{}
  1275  				log.Info("All subscriptions received")
  1276  				return
  1277  
  1278  			}
  1279  		}
  1280  	}()
  1281  
  1282  	//run the simulation
  1283  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
  1284  		log.Info("Simulation running")
  1285  		nodes := sim.Net.Nodes
  1286  
  1287  		//wait until all subscriptions are done
  1288  		select {
  1289  		case <-allSubscriptionsDone:
  1290  		case <-ctx.Done():
  1291  			return errors.New("Context timed out")
  1292  		}
  1293  
  1294  		log.Debug("Expected message count: ", "expectedMsgCount", expectedMsgCount.count())
  1295  		//now iterate again, this time we call each node via RPC to get its subscriptions
  1296  		realCount := 0
  1297  		for _, node := range nodes {
  1298  			//create rpc client
  1299  			client, err := node.Client()
  1300  			if err != nil {
  1301  				return fmt.Errorf("create node 1 rpc client fail: %v", err)
  1302  			}
  1303  
  1304  			//ask it for subscriptions
  1305  			pstreams := make(map[string][]string)
  1306  			err = client.Call(&pstreams, "stream_getPeerSubscriptions")
  1307  			if err != nil {
  1308  				return fmt.Errorf("client call stream_getPeerSubscriptions: %v", err)
  1309  			}
  1310  			//length of the subscriptions can not be smaller than number of peers
  1311  			log.Debug("node subscriptions", "node", node.String())
  1312  			for p, ps := range pstreams {
  1313  				log.Debug("... with", "peer", p)
  1314  				for _, s := range ps {
  1315  					log.Debug(".......", "stream", s)
  1316  					// each node also has subscriptions to RETRIEVE_REQUEST streams,
  1317  					// we need to ignore those, we are only counting SYNC streams
  1318  					if !strings.HasPrefix(s, "RETRIEVE_REQUEST") {
  1319  						realCount++
  1320  					}
  1321  				}
  1322  			}
  1323  		}
  1324  		// every node is mutually subscribed to each other, so the actual count is half of it
  1325  		emc := expectedMsgCount.count()
  1326  		if realCount/2 != emc {
  1327  			return fmt.Errorf("Real subscriptions and expected amount don't match; real: %d, expected: %d", realCount/2, emc)
  1328  		}
  1329  		return nil
  1330  	})
  1331  	if result.Error != nil {
  1332  		t.Fatal(result.Error)
  1333  	}
  1334  }
  1335  
  1336  // counter is used to concurrently increment
  1337  // and read an integer value.
  1338  type counter struct {
  1339  	v  int
  1340  	mu sync.RWMutex
  1341  }
  1342  
  1343  // Increment the counter.
  1344  func (c *counter) inc() {
  1345  	c.mu.Lock()
  1346  	defer c.mu.Unlock()
  1347  
  1348  	c.v++
  1349  }
  1350  
  1351  // Read the counter value.
  1352  func (c *counter) count() int {
  1353  	c.mu.RLock()
  1354  	defer c.mu.RUnlock()
  1355  
  1356  	return c.v
  1357  }