github.com/letterj/go-ethereum@v1.8.22-0.20190204142846-520024dfd689/swarm/network/stream/syncer_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"io/ioutil"
    24  	"math"
    25  	"sync"
    26  	"sync/atomic"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/node"
    32  	"github.com/ethereum/go-ethereum/p2p/enode"
    33  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  	"github.com/ethereum/go-ethereum/swarm/network"
    36  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    37  	"github.com/ethereum/go-ethereum/swarm/state"
    38  	"github.com/ethereum/go-ethereum/swarm/storage"
    39  	"github.com/ethereum/go-ethereum/swarm/storage/mock"
    40  	"github.com/ethereum/go-ethereum/swarm/testutil"
    41  )
    42  
    43  const dataChunkCount = 200
    44  
    45  func TestSyncerSimulation(t *testing.T) {
    46  	testSyncBetweenNodes(t, 2, dataChunkCount, true, 1)
    47  	testSyncBetweenNodes(t, 4, dataChunkCount, true, 1)
    48  	testSyncBetweenNodes(t, 8, dataChunkCount, true, 1)
    49  	testSyncBetweenNodes(t, 16, dataChunkCount, true, 1)
    50  }
    51  
    52  func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
    53  	address := common.BytesToAddress(id.Bytes())
    54  	mockStore := globalStore.NewNodeStore(address)
    55  	params := storage.NewDefaultLocalStoreParams()
    56  
    57  	datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
    58  	if err != nil {
    59  		return nil, "", err
    60  	}
    61  	params.Init(datadir)
    62  	params.BaseKey = addr.Over()
    63  	lstore, err = storage.NewLocalStore(params, mockStore)
    64  	if err != nil {
    65  		return nil, "", err
    66  	}
    67  	return lstore, datadir, nil
    68  }
    69  
    70  func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
    71  
    72  	sim := simulation.New(map[string]simulation.ServiceFunc{
    73  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
    74  			addr := network.NewAddr(ctx.Config.Node())
    75  			//hack to put addresses in same space
    76  			addr.OAddr[0] = byte(0)
    77  
    78  			netStore, delivery, clean, err := newNetStoreAndDeliveryWithBzzAddr(ctx, bucket, addr)
    79  			if err != nil {
    80  				return nil, nil, err
    81  			}
    82  
    83  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
    84  				Retrieval: RetrievalDisabled,
    85  				Syncing:   SyncingAutoSubscribe,
    86  				SkipCheck: skipCheck,
    87  			}, nil)
    88  
    89  			cleanup = func() {
    90  				r.Close()
    91  				clean()
    92  			}
    93  
    94  			return r, cleanup, nil
    95  		},
    96  	})
    97  	defer sim.Close()
    98  
    99  	// create context for simulation run
   100  	timeout := 30 * time.Second
   101  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   102  	// defer cancel should come before defer simulation teardown
   103  	defer cancel()
   104  
   105  	_, err := sim.AddNodesAndConnectChain(nodes)
   106  	if err != nil {
   107  		t.Fatal(err)
   108  	}
   109  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   110  		nodeIDs := sim.UpNodeIDs()
   111  
   112  		nodeIndex := make(map[enode.ID]int)
   113  		for i, id := range nodeIDs {
   114  			nodeIndex[id] = i
   115  		}
   116  
   117  		disconnections := sim.PeerEvents(
   118  			context.Background(),
   119  			sim.NodeIDs(),
   120  			simulation.NewPeerEventsFilter().Drop(),
   121  		)
   122  
   123  		var disconnected atomic.Value
   124  		go func() {
   125  			for d := range disconnections {
   126  				if d.Error != nil {
   127  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   128  					disconnected.Store(true)
   129  				}
   130  			}
   131  		}()
   132  		defer func() {
   133  			if err != nil {
   134  				if yes, ok := disconnected.Load().(bool); ok && yes {
   135  					err = errors.New("disconnect events received")
   136  				}
   137  			}
   138  		}()
   139  
   140  		// each node Subscribes to each other's swarmChunkServerStreamName
   141  		for j := 0; j < nodes-1; j++ {
   142  			id := nodeIDs[j]
   143  			client, err := sim.Net.GetNode(id).Client()
   144  			if err != nil {
   145  				t.Fatal(err)
   146  			}
   147  			sid := nodeIDs[j+1]
   148  			client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
   149  			if err != nil {
   150  				return err
   151  			}
   152  			if j > 0 || nodes == 2 {
   153  				item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore)
   154  				if !ok {
   155  					return fmt.Errorf("No filestore")
   156  				}
   157  				fileStore := item.(*storage.FileStore)
   158  				size := chunkCount * chunkSize
   159  				_, wait, err := fileStore.Store(ctx, testutil.RandomReader(j, size), int64(size), false)
   160  				if err != nil {
   161  					t.Fatal(err.Error())
   162  				}
   163  				wait(ctx)
   164  			}
   165  		}
   166  		// here we distribute chunks of a random file into stores 1...nodes
   167  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   168  			return err
   169  		}
   170  
   171  		// collect hashes in po 1 bin for each node
   172  		hashes := make([][]storage.Address, nodes)
   173  		totalHashes := 0
   174  		hashCounts := make([]int, nodes)
   175  		for i := nodes - 1; i >= 0; i-- {
   176  			if i < nodes-1 {
   177  				hashCounts[i] = hashCounts[i+1]
   178  			}
   179  			item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
   180  			if !ok {
   181  				return fmt.Errorf("No DB")
   182  			}
   183  			netStore := item.(*storage.NetStore)
   184  			netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
   185  				hashes[i] = append(hashes[i], addr)
   186  				totalHashes++
   187  				hashCounts[i]++
   188  				return true
   189  			})
   190  		}
   191  		var total, found int
   192  		for _, node := range nodeIDs {
   193  			i := nodeIndex[node]
   194  
   195  			for j := i; j < nodes; j++ {
   196  				total += len(hashes[j])
   197  				for _, key := range hashes[j] {
   198  					item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
   199  					if !ok {
   200  						return fmt.Errorf("No DB")
   201  					}
   202  					db := item.(*storage.NetStore)
   203  					_, err := db.Get(ctx, key)
   204  					if err == nil {
   205  						found++
   206  					}
   207  				}
   208  			}
   209  			log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total)
   210  		}
   211  		if total == found && total > 0 {
   212  			return nil
   213  		}
   214  		return fmt.Errorf("Total not equallying found: total is %d", total)
   215  	})
   216  
   217  	if result.Error != nil {
   218  		t.Fatal(result.Error)
   219  	}
   220  }
   221  
   222  //TestSameVersionID just checks that if the version is not changed,
   223  //then streamer peers see each other
   224  func TestSameVersionID(t *testing.T) {
   225  	//test version ID
   226  	v := uint(1)
   227  	sim := simulation.New(map[string]simulation.ServiceFunc{
   228  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   229  			addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   230  			if err != nil {
   231  				return nil, nil, err
   232  			}
   233  
   234  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   235  				Retrieval: RetrievalDisabled,
   236  				Syncing:   SyncingAutoSubscribe,
   237  			}, nil)
   238  			bucket.Store(bucketKeyRegistry, r)
   239  
   240  			//assign to each node the same version ID
   241  			r.spec.Version = v
   242  
   243  			cleanup = func() {
   244  				r.Close()
   245  				clean()
   246  			}
   247  
   248  			return r, cleanup, nil
   249  		},
   250  	})
   251  	defer sim.Close()
   252  
   253  	//connect just two nodes
   254  	log.Info("Adding nodes to simulation")
   255  	_, err := sim.AddNodesAndConnectChain(2)
   256  	if err != nil {
   257  		t.Fatal(err)
   258  	}
   259  
   260  	log.Info("Starting simulation")
   261  	ctx := context.Background()
   262  	//make sure they have time to connect
   263  	time.Sleep(200 * time.Millisecond)
   264  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   265  		//get the pivot node's filestore
   266  		nodes := sim.UpNodeIDs()
   267  
   268  		item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
   269  		if !ok {
   270  			return fmt.Errorf("No filestore")
   271  		}
   272  		registry := item.(*Registry)
   273  
   274  		//the peers should connect, thus getting the peer should not return nil
   275  		if registry.getPeer(nodes[1]) == nil {
   276  			t.Fatal("Expected the peer to not be nil, but it is")
   277  		}
   278  		return nil
   279  	})
   280  	if result.Error != nil {
   281  		t.Fatal(result.Error)
   282  	}
   283  	log.Info("Simulation ended")
   284  }
   285  
   286  //TestDifferentVersionID proves that if the streamer protocol version doesn't match,
   287  //then the peers are not connected at streamer level
   288  func TestDifferentVersionID(t *testing.T) {
   289  	//create a variable to hold the version ID
   290  	v := uint(0)
   291  	sim := simulation.New(map[string]simulation.ServiceFunc{
   292  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   293  			addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   294  			if err != nil {
   295  				return nil, nil, err
   296  			}
   297  
   298  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   299  				Retrieval: RetrievalDisabled,
   300  				Syncing:   SyncingAutoSubscribe,
   301  			}, nil)
   302  			bucket.Store(bucketKeyRegistry, r)
   303  
   304  			//increase the version ID for each node
   305  			v++
   306  			r.spec.Version = v
   307  
   308  			cleanup = func() {
   309  				r.Close()
   310  				clean()
   311  			}
   312  
   313  			return r, cleanup, nil
   314  		},
   315  	})
   316  	defer sim.Close()
   317  
   318  	//connect the nodes
   319  	log.Info("Adding nodes to simulation")
   320  	_, err := sim.AddNodesAndConnectChain(2)
   321  	if err != nil {
   322  		t.Fatal(err)
   323  	}
   324  
   325  	log.Info("Starting simulation")
   326  	ctx := context.Background()
   327  	//make sure they have time to connect
   328  	time.Sleep(200 * time.Millisecond)
   329  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   330  		//get the pivot node's filestore
   331  		nodes := sim.UpNodeIDs()
   332  
   333  		item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
   334  		if !ok {
   335  			return fmt.Errorf("No filestore")
   336  		}
   337  		registry := item.(*Registry)
   338  
   339  		//getting the other peer should fail due to the different version numbers
   340  		if registry.getPeer(nodes[1]) != nil {
   341  			t.Fatal("Expected the peer to be nil, but it is not")
   342  		}
   343  		return nil
   344  	})
   345  	if result.Error != nil {
   346  		t.Fatal(result.Error)
   347  	}
   348  	log.Info("Simulation ended")
   349  
   350  }