github.com/codingfuture/orig-energi3@v0.8.4/swarm/network/stream/snapshot_sync_test.go (about)

     1  // Copyright 2018 The Energi Core Authors
     2  // Copyright 2018 The go-ethereum Authors
     3  // This file is part of the Energi Core library.
     4  //
     5  // The Energi Core library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The Energi Core library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package stream
    19  
    20  import (
    21  	"context"
    22  	"errors"
    23  	"fmt"
    24  	"io/ioutil"
    25  	"os"
    26  	"runtime"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/log"
    33  	"github.com/ethereum/go-ethereum/node"
    34  	"github.com/ethereum/go-ethereum/p2p/enode"
    35  	"github.com/ethereum/go-ethereum/p2p/simulations"
    36  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    37  	"github.com/ethereum/go-ethereum/swarm/network"
    38  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    39  	"github.com/ethereum/go-ethereum/swarm/pot"
    40  	"github.com/ethereum/go-ethereum/swarm/state"
    41  	"github.com/ethereum/go-ethereum/swarm/storage"
    42  	"github.com/ethereum/go-ethereum/swarm/storage/mock"
    43  	mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    44  	"github.com/ethereum/go-ethereum/swarm/testutil"
    45  )
    46  
    47  const MaxTimeout = 600
    48  
    49  type synctestConfig struct {
    50  	addrs         [][]byte
    51  	hashes        []storage.Address
    52  	idToChunksMap map[enode.ID][]int
    53  	//chunksToNodesMap map[string][]int
    54  	addrToIDMap map[string]enode.ID
    55  }
    56  
    57  const (
    58  	// EventTypeNode is the type of event emitted when a node is either
    59  	// created, started or stopped
    60  	EventTypeChunkCreated   simulations.EventType = "chunkCreated"
    61  	EventTypeChunkOffered   simulations.EventType = "chunkOffered"
    62  	EventTypeChunkWanted    simulations.EventType = "chunkWanted"
    63  	EventTypeChunkDelivered simulations.EventType = "chunkDelivered"
    64  	EventTypeChunkArrived   simulations.EventType = "chunkArrived"
    65  	EventTypeSimTerminated  simulations.EventType = "simTerminated"
    66  )
    67  
    68  // Tests in this file should not request chunks from peers.
    69  // This function will panic indicating that there is a problem if request has been made.
    70  func dummyRequestFromPeers(_ context.Context, req *network.Request) (*enode.ID, chan struct{}, error) {
    71  	panic(fmt.Sprintf("unexpected request: address %s, source %s", req.Addr.String(), req.Source.String()))
    72  }
    73  
    74  //This test is a syncing test for nodes.
    75  //One node is randomly selected to be the pivot node.
    76  //A configurable number of chunks and nodes can be
    77  //provided to the test, the number of chunks is uploaded
    78  //to the pivot node, and we check that nodes get the chunks
    79  //they are expected to store based on the syncing protocol.
    80  //Number of chunks and nodes can be provided via commandline too.
    81  func TestSyncingViaGlobalSync(t *testing.T) {
    82  	if runtime.GOOS == "darwin" && os.Getenv("TRAVIS") == "true" {
    83  		t.Skip("Flaky on mac on travis")
    84  	}
    85  	//if nodes/chunks have been provided via commandline,
    86  	//run the tests with these values
    87  	if *nodes != 0 && *chunks != 0 {
    88  		log.Info(fmt.Sprintf("Running test with %d chunks and %d nodes...", *chunks, *nodes))
    89  		testSyncingViaGlobalSync(t, *chunks, *nodes)
    90  	} else {
    91  		var nodeCnt []int
    92  		var chnkCnt []int
    93  		//if the `longrunning` flag has been provided
    94  		//run more test combinations
    95  		if *longrunning {
    96  			chnkCnt = []int{1, 8, 32, 256, 1024}
    97  			nodeCnt = []int{16, 32, 64, 128, 256}
    98  		} else if raceTest {
    99  			// TestSyncingViaGlobalSync allocates a lot of memory
   100  			// with race detector. By reducing the number of chunks
   101  			// and nodes, memory consumption is lower and data races
   102  			// are still checked, while correctness of syncing is
   103  			// tested with more chunks and nodes in regular (!race)
   104  			// tests.
   105  			chnkCnt = []int{4}
   106  			nodeCnt = []int{16}
   107  		} else {
   108  			//default test
   109  			chnkCnt = []int{4, 32}
   110  			nodeCnt = []int{32, 16}
   111  		}
   112  		for _, chnk := range chnkCnt {
   113  			for _, n := range nodeCnt {
   114  				log.Info(fmt.Sprintf("Long running test with %d chunks and %d nodes...", chnk, n))
   115  				testSyncingViaGlobalSync(t, chnk, n)
   116  			}
   117  		}
   118  	}
   119  }
   120  
   121  var simServiceMap = map[string]simulation.ServiceFunc{
   122  	"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   123  		addr, netStore, delivery, clean, err := newNetStoreAndDeliveryWithRequestFunc(ctx, bucket, dummyRequestFromPeers)
   124  		if err != nil {
   125  			return nil, nil, err
   126  		}
   127  
   128  		var dir string
   129  		var store *state.DBStore
   130  		if raceTest {
   131  			// Use on-disk DBStore to reduce memory consumption in race tests.
   132  			dir, err = ioutil.TempDir("", "swarm-stream-")
   133  			if err != nil {
   134  				return nil, nil, err
   135  			}
   136  			store, err = state.NewDBStore(dir)
   137  			if err != nil {
   138  				return nil, nil, err
   139  			}
   140  		} else {
   141  			store = state.NewInmemoryStore()
   142  		}
   143  
   144  		r := NewRegistry(addr.ID(), delivery, netStore, store, &RegistryOptions{
   145  			Retrieval:       RetrievalDisabled,
   146  			Syncing:         SyncingAutoSubscribe,
   147  			SyncUpdateDelay: 3 * time.Second,
   148  		}, nil)
   149  
   150  		bucket.Store(bucketKeyRegistry, r)
   151  
   152  		cleanup = func() {
   153  			r.Close()
   154  			clean()
   155  		}
   156  
   157  		return r, cleanup, nil
   158  	},
   159  }
   160  
   161  func testSyncingViaGlobalSync(t *testing.T, chunkCount int, nodeCount int) {
   162  	sim := simulation.New(simServiceMap)
   163  	defer sim.Close()
   164  
   165  	log.Info("Initializing test config")
   166  
   167  	conf := &synctestConfig{}
   168  	//map of discover ID to indexes of chunks expected at that ID
   169  	conf.idToChunksMap = make(map[enode.ID][]int)
   170  	//map of overlay address to discover ID
   171  	conf.addrToIDMap = make(map[string]enode.ID)
   172  	//array where the generated chunk hashes will be stored
   173  	conf.hashes = make([]storage.Address, 0)
   174  
   175  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   176  	if err != nil {
   177  		t.Fatal(err)
   178  	}
   179  
   180  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 2*time.Minute)
   181  	defer cancelSimRun()
   182  
   183  	if _, err := sim.WaitTillHealthy(ctx); err != nil {
   184  		t.Fatal(err)
   185  	}
   186  
   187  	result := runSim(conf, ctx, sim, chunkCount)
   188  
   189  	if result.Error != nil {
   190  		t.Fatal(result.Error)
   191  	}
   192  	log.Info("Simulation ended")
   193  }
   194  
   195  func runSim(conf *synctestConfig, ctx context.Context, sim *simulation.Simulation, chunkCount int) simulation.Result {
   196  
   197  	return sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   198  		disconnected := watchDisconnections(ctx, sim)
   199  		defer func() {
   200  			if err != nil && disconnected.bool() {
   201  				err = errors.New("disconnect events received")
   202  			}
   203  		}()
   204  
   205  		nodeIDs := sim.UpNodeIDs()
   206  		for _, n := range nodeIDs {
   207  			//get the kademlia overlay address from this ID
   208  			a := n.Bytes()
   209  			//append it to the array of all overlay addresses
   210  			conf.addrs = append(conf.addrs, a)
   211  			//the proximity calculation is on overlay addr,
   212  			//the p2p/simulations check func triggers on enode.ID,
   213  			//so we need to know which overlay addr maps to which nodeID
   214  			conf.addrToIDMap[string(a)] = n
   215  		}
   216  
   217  		//get the node at that index
   218  		//this is the node selected for upload
   219  		node := sim.Net.GetRandomUpNode()
   220  		item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
   221  		if !ok {
   222  			return fmt.Errorf("No localstore")
   223  		}
   224  		lstore := item.(*storage.LocalStore)
   225  		hashes, err := uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
   226  		if err != nil {
   227  			return err
   228  		}
   229  		for _, h := range hashes {
   230  			evt := &simulations.Event{
   231  				Type: EventTypeChunkCreated,
   232  				Node: sim.Net.GetNode(node.ID()),
   233  				Data: h.String(),
   234  			}
   235  			sim.Net.Events().Send(evt)
   236  		}
   237  		conf.hashes = append(conf.hashes, hashes...)
   238  		mapKeysToNodes(conf)
   239  
   240  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   241  		// or until the timeout is reached.
   242  		var globalStore mock.GlobalStorer
   243  		if *useMockStore {
   244  			globalStore = mockmem.NewGlobalStore()
   245  		}
   246  	REPEAT:
   247  		for {
   248  			for _, id := range nodeIDs {
   249  				//for each expected chunk, check if it is in the local store
   250  				localChunks := conf.idToChunksMap[id]
   251  				for _, ch := range localChunks {
   252  					//get the real chunk by the index in the index array
   253  					chunk := conf.hashes[ch]
   254  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   255  					//check if the expected chunk is indeed in the localstore
   256  					var err error
   257  					if *useMockStore {
   258  						//use the globalStore if the mockStore should be used; in that case,
   259  						//the complete localStore stack is bypassed for getting the chunk
   260  						_, err = globalStore.Get(common.BytesToAddress(id.Bytes()), chunk)
   261  					} else {
   262  						//use the actual localstore
   263  						item, ok := sim.NodeItem(id, bucketKeyStore)
   264  						if !ok {
   265  							return fmt.Errorf("Error accessing localstore")
   266  						}
   267  						lstore := item.(*storage.LocalStore)
   268  						_, err = lstore.Get(ctx, chunk)
   269  					}
   270  					if err != nil {
   271  						log.Debug(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   272  						// Do not get crazy with logging the warn message
   273  						time.Sleep(500 * time.Millisecond)
   274  						continue REPEAT
   275  					}
   276  					evt := &simulations.Event{
   277  						Type: EventTypeChunkArrived,
   278  						Node: sim.Net.GetNode(id),
   279  						Data: chunk.String(),
   280  					}
   281  					sim.Net.Events().Send(evt)
   282  					log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   283  				}
   284  			}
   285  			return nil
   286  		}
   287  	})
   288  }
   289  
   290  //map chunk keys to addresses which are responsible
   291  func mapKeysToNodes(conf *synctestConfig) {
   292  	nodemap := make(map[string][]int)
   293  	//build a pot for chunk hashes
   294  	np := pot.NewPot(nil, 0)
   295  	indexmap := make(map[string]int)
   296  	for i, a := range conf.addrs {
   297  		indexmap[string(a)] = i
   298  		np, _, _ = pot.Add(np, a, pof)
   299  	}
   300  
   301  	ppmap := network.NewPeerPotMap(network.NewKadParams().NeighbourhoodSize, conf.addrs)
   302  
   303  	//for each address, run EachNeighbour on the chunk hashes pot to identify closest nodes
   304  	log.Trace(fmt.Sprintf("Generated hash chunk(s): %v", conf.hashes))
   305  	for i := 0; i < len(conf.hashes); i++ {
   306  		var a []byte
   307  		np.EachNeighbour([]byte(conf.hashes[i]), pof, func(val pot.Val, po int) bool {
   308  			// take the first address
   309  			a = val.([]byte)
   310  			return false
   311  		})
   312  
   313  		nns := ppmap[common.Bytes2Hex(a)].NNSet
   314  		nns = append(nns, a)
   315  
   316  		for _, p := range nns {
   317  			nodemap[string(p)] = append(nodemap[string(p)], i)
   318  		}
   319  	}
   320  	for addr, chunks := range nodemap {
   321  		//this selects which chunks are expected to be found with the given node
   322  		conf.idToChunksMap[conf.addrToIDMap[addr]] = chunks
   323  	}
   324  	log.Debug(fmt.Sprintf("Map of expected chunks by ID: %v", conf.idToChunksMap))
   325  }
   326  
   327  //upload a file(chunks) to a single local node store
   328  func uploadFileToSingleNodeStore(id enode.ID, chunkCount int, lstore *storage.LocalStore) ([]storage.Address, error) {
   329  	log.Debug(fmt.Sprintf("Uploading to node id: %s", id))
   330  	fileStore := storage.NewFileStore(lstore, storage.NewFileStoreParams())
   331  	size := chunkSize
   332  	var rootAddrs []storage.Address
   333  	for i := 0; i < chunkCount; i++ {
   334  		rk, wait, err := fileStore.Store(context.TODO(), testutil.RandomReader(i, size), int64(size), false)
   335  		if err != nil {
   336  			return nil, err
   337  		}
   338  		err = wait(context.TODO())
   339  		if err != nil {
   340  			return nil, err
   341  		}
   342  		rootAddrs = append(rootAddrs, (rk))
   343  	}
   344  
   345  	return rootAddrs, nil
   346  }