github.com/aaa256/atlantis@v0.0.0-20210707112435-42ee889287a2/swarm/network/stream/syncer_test.go (about)

     1  // Copyright 2018 The go-athereum Authors
     2  // This file is part of the go-athereum library.
     3  //
     4  // The go-athereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-athereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-athereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	crand "crypto/rand"
    22  	"fmt"
    23  	"io"
    24  	"io/ioutil"
    25  	"math"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/athereum/go-athereum/common"
    31  	"github.com/athereum/go-athereum/p2p/discover"
    32  	"github.com/athereum/go-athereum/p2p/simulations"
    33  	"github.com/athereum/go-athereum/rpc"
    34  	"github.com/athereum/go-athereum/swarm/log"
    35  	"github.com/athereum/go-athereum/swarm/network"
    36  	streamTesting "github.com/athereum/go-athereum/swarm/network/stream/testing"
    37  	"github.com/athereum/go-athereum/swarm/storage"
    38  )
    39  
    40  const dataChunkCount = 200
    41  
    42  func TestSyncerSimulation(t *testing.T) {
    43  	testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1)
    44  	testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1)
    45  	testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1)
    46  	testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
    47  }
    48  
    49  func createMockStore(id discover.NodeID, addr *network.BzzAddr) (storage.ChunkStore, error) {
    50  	var err error
    51  	address := common.BytesToAddress(id.Bytes())
    52  	mockStore := globalStore.NewNodeStore(address)
    53  	params := storage.NewDefaultLocalStoreParams()
    54  	datadirs[id], err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
    55  	if err != nil {
    56  		return nil, err
    57  	}
    58  	params.Init(datadirs[id])
    59  	params.BaseKey = addr.Over()
    60  	lstore, err := storage.NewLocalStore(params, mockStore)
    61  	return lstore, nil
    62  }
    63  
    64  func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
    65  	defer setDefaultSkipCheck(defaultSkipCheck)
    66  	defaultSkipCheck = skipCheck
    67  	//data directories for each node and store
    68  	datadirs = make(map[discover.NodeID]string)
    69  	if *useMockStore {
    70  		createStoreFunc = createMockStore
    71  		createGlobalStore()
    72  	} else {
    73  		createStoreFunc = createTestLocalStorageFromSim
    74  	}
    75  	defer datadirsCleanup()
    76  
    77  	registries = make(map[discover.NodeID]*TestRegistry)
    78  	toAddr = func(id discover.NodeID) *network.BzzAddr {
    79  		addr := network.NewAddrFromNodeID(id)
    80  		//hack to put addresses in same space
    81  		addr.OAddr[0] = byte(0)
    82  		return addr
    83  	}
    84  	conf := &streamTesting.RunConfig{
    85  		Adapter:         *adapter,
    86  		NodeCount:       nodes,
    87  		ConnLevel:       conns,
    88  		ToAddr:          toAddr,
    89  		Services:        services,
    90  		EnableMsgEvents: false,
    91  	}
    92  	// HACK: these are global variables in the test so that they are available for
    93  	// the service constructor function
    94  	// TODO: will this work with exec/docker adapter?
    95  	// localstore of nodes made available for action and check calls
    96  	stores = make(map[discover.NodeID]storage.ChunkStore)
    97  	deliveries = make(map[discover.NodeID]*Delivery)
    98  	// create context for simulation run
    99  	timeout := 30 * time.Second
   100  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   101  	// defer cancel should come before defer simulation teardown
   102  	defer cancel()
   103  
   104  	// create simulation network with the config
   105  	sim, teardown, err := streamTesting.NewSimulation(conf)
   106  	var rpcSubscriptionsWg sync.WaitGroup
   107  	defer func() {
   108  		rpcSubscriptionsWg.Wait()
   109  		teardown()
   110  	}()
   111  	if err != nil {
   112  		t.Fatal(err.Error())
   113  	}
   114  
   115  	nodeIndex := make(map[discover.NodeID]int)
   116  	for i, id := range sim.IDs {
   117  		nodeIndex[id] = i
   118  		if !*useMockStore {
   119  			stores[id] = sim.Stores[i]
   120  			sim.Stores[i] = stores[id]
   121  		}
   122  	}
   123  	// peerCount function gives the number of peer connections for a nodeID
   124  	// this is needed for the service run function to wait until
   125  	// each protocol  instance runs and the streamer peers are available
   126  	peerCount = func(id discover.NodeID) int {
   127  		if sim.IDs[0] == id || sim.IDs[nodes-1] == id {
   128  			return 1
   129  		}
   130  		return 2
   131  	}
   132  	waitPeerErrC = make(chan error)
   133  
   134  	// create DBAPI-s for all nodes
   135  	dbs := make([]*storage.DBAPI, nodes)
   136  	for i := 0; i < nodes; i++ {
   137  		dbs[i] = storage.NewDBAPI(sim.Stores[i].(*storage.LocalStore))
   138  	}
   139  
   140  	// collect hashes in po 1 bin for each node
   141  	hashes := make([][]storage.Address, nodes)
   142  	totalHashes := 0
   143  	hashCounts := make([]int, nodes)
   144  	for i := nodes - 1; i >= 0; i-- {
   145  		if i < nodes-1 {
   146  			hashCounts[i] = hashCounts[i+1]
   147  		}
   148  		dbs[i].Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
   149  			hashes[i] = append(hashes[i], addr)
   150  			totalHashes++
   151  			hashCounts[i]++
   152  			return true
   153  		})
   154  	}
   155  
   156  	// errc is error channel for simulation
   157  	errc := make(chan error, 1)
   158  	quitC := make(chan struct{})
   159  	defer close(quitC)
   160  
   161  	// action is subscribe
   162  	action := func(ctx context.Context) error {
   163  		// need to wait till an aynchronous process registers the peers in streamer.peers
   164  		// that is used by Subscribe
   165  		// the global peerCount function tells how many connections each node has
   166  		// TODO: this is to be reimplemented with peerEvent watcher without global var
   167  		i := 0
   168  		for err := range waitPeerErrC {
   169  			if err != nil {
   170  				return fmt.Errorf("error waiting for peers: %s", err)
   171  			}
   172  			i++
   173  			if i == nodes {
   174  				break
   175  			}
   176  		}
   177  		// each node Subscribes to each other's swarmChunkServerStreamName
   178  		for j := 0; j < nodes-1; j++ {
   179  			id := sim.IDs[j]
   180  			sim.Stores[j] = stores[id]
   181  			err := sim.CallClient(id, func(client *rpc.Client) error {
   182  				// report disconnect events to the error channel cos peers should not disconnect
   183  				doneC, err := streamTesting.WatchDisconnections(id, client, errc, quitC)
   184  				if err != nil {
   185  					return err
   186  				}
   187  				rpcSubscriptionsWg.Add(1)
   188  				go func() {
   189  					<-doneC
   190  					rpcSubscriptionsWg.Done()
   191  				}()
   192  				ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
   193  				defer cancel()
   194  				// start syncing, i.e., subscribe to upstream peers po 1 bin
   195  				sid := sim.IDs[j+1]
   196  				return client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
   197  			})
   198  			if err != nil {
   199  				return err
   200  			}
   201  		}
   202  		// here we distribute chunks of a random file into stores 1...nodes
   203  		rrFileStore := storage.NewFileStore(newRoundRobinStore(sim.Stores[1:]...), storage.NewFileStoreParams())
   204  		size := chunkCount * chunkSize
   205  		_, wait, err := rrFileStore.Store(io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   206  		// need to wait cos we then immediately collect the relevant bin content
   207  		wait()
   208  		if err != nil {
   209  			t.Fatal(err.Error())
   210  		}
   211  
   212  		return nil
   213  	}
   214  
   215  	// this makes sure check is not called before the previous call finishes
   216  	check := func(ctx context.Context, id discover.NodeID) (bool, error) {
   217  		select {
   218  		case err := <-errc:
   219  			return false, err
   220  		case <-ctx.Done():
   221  			return false, ctx.Err()
   222  		default:
   223  		}
   224  
   225  		i := nodeIndex[id]
   226  		var total, found int
   227  
   228  		for j := i; j < nodes; j++ {
   229  			total += len(hashes[j])
   230  			for _, key := range hashes[j] {
   231  				chunk, err := dbs[i].Get(key)
   232  				if err == storage.ErrFetching {
   233  					<-chunk.ReqC
   234  				} else if err != nil {
   235  					continue
   236  				}
   237  				// needed for leveldb not to be closed?
   238  				// chunk.WaitToStore()
   239  				found++
   240  			}
   241  		}
   242  		log.Debug("sync check", "node", id, "index", i, "bin", po, "found", found, "total", total)
   243  		return total == found, nil
   244  	}
   245  
   246  	conf.Step = &simulations.Step{
   247  		Action:  action,
   248  		Trigger: streamTesting.Trigger(500*time.Millisecond, quitC, sim.IDs[0:nodes-1]...),
   249  		Expect: &simulations.Expectation{
   250  			Nodes: sim.IDs[0:1],
   251  			Check: check,
   252  		},
   253  	}
   254  	startedAt := time.Now()
   255  	result, err := sim.Run(ctx, conf)
   256  	finishedAt := time.Now()
   257  	if err != nil {
   258  		t.Fatalf("Setting up simulation failed: %v", err)
   259  	}
   260  	if result.Error != nil {
   261  		t.Fatalf("Simulation failed: %s", result.Error)
   262  	}
   263  	streamTesting.CheckResult(t, result, startedAt, finishedAt)
   264  }