github.com/shyftnetwork/go-empyrean@v1.8.3-0.20191127201940-fbfca9338f04/swarm/network/stream/snapshot_retrieval_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"os"
    22  	"sync"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/ShyftNetwork/go-empyrean/node"
    27  	"github.com/ShyftNetwork/go-empyrean/p2p/enode"
    28  	"github.com/ShyftNetwork/go-empyrean/p2p/simulations/adapters"
    29  	"github.com/ShyftNetwork/go-empyrean/swarm/log"
    30  	"github.com/ShyftNetwork/go-empyrean/swarm/network"
    31  	"github.com/ShyftNetwork/go-empyrean/swarm/network/simulation"
    32  	"github.com/ShyftNetwork/go-empyrean/swarm/state"
    33  	"github.com/ShyftNetwork/go-empyrean/swarm/storage"
    34  )
    35  
    36  //constants for random file generation
    37  const (
    38  	minFileSize = 2
    39  	maxFileSize = 40
    40  )
    41  
    42  //This test is a retrieval test for nodes.
    43  //A configurable number of nodes can be
    44  //provided to the test.
    45  //Files are uploaded to nodes, other nodes try to retrieve the file
    46  //Number of nodes can be provided via commandline too.
    47  func TestFileRetrieval(t *testing.T) {
    48  	if *nodes != 0 {
    49  		err := runFileRetrievalTest(*nodes)
    50  		if err != nil {
    51  			t.Fatal(err)
    52  		}
    53  	} else {
    54  		nodeCnt := []int{16}
    55  		//if the `longrunning` flag has been provided
    56  		//run more test combinations
    57  		if *longrunning {
    58  			nodeCnt = append(nodeCnt, 32, 64, 128)
    59  		}
    60  		for _, n := range nodeCnt {
    61  			err := runFileRetrievalTest(n)
    62  			if err != nil {
    63  				t.Fatal(err)
    64  			}
    65  		}
    66  	}
    67  }
    68  
    69  //This test is a retrieval test for nodes.
    70  //One node is randomly selected to be the pivot node.
    71  //A configurable number of chunks and nodes can be
    72  //provided to the test, the number of chunks is uploaded
    73  //to the pivot node and other nodes try to retrieve the chunk(s).
    74  //Number of chunks and nodes can be provided via commandline too.
    75  func TestRetrieval(t *testing.T) {
    76  	//if nodes/chunks have been provided via commandline,
    77  	//run the tests with these values
    78  	if *nodes != 0 && *chunks != 0 {
    79  		err := runRetrievalTest(*chunks, *nodes)
    80  		if err != nil {
    81  			t.Fatal(err)
    82  		}
    83  	} else {
    84  		var nodeCnt []int
    85  		var chnkCnt []int
    86  		//if the `longrunning` flag has been provided
    87  		//run more test combinations
    88  		if *longrunning {
    89  			nodeCnt = []int{16, 32, 128}
    90  			chnkCnt = []int{4, 32, 256}
    91  		} else {
    92  			//default test
    93  			nodeCnt = []int{16}
    94  			chnkCnt = []int{32}
    95  		}
    96  		for _, n := range nodeCnt {
    97  			for _, c := range chnkCnt {
    98  				err := runRetrievalTest(c, n)
    99  				if err != nil {
   100  					t.Fatal(err)
   101  				}
   102  			}
   103  		}
   104  	}
   105  }
   106  
   107  var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
   108  	"streamer": retrievalStreamerFunc,
   109  }
   110  
   111  func retrievalStreamerFunc(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   112  	n := ctx.Config.Node()
   113  	addr := network.NewAddr(n)
   114  	store, datadir, err := createTestLocalStorageForID(n.ID(), addr)
   115  	if err != nil {
   116  		return nil, nil, err
   117  	}
   118  	bucket.Store(bucketKeyStore, store)
   119  
   120  	localStore := store.(*storage.LocalStore)
   121  	netStore, err := storage.NewNetStore(localStore, nil)
   122  	if err != nil {
   123  		return nil, nil, err
   124  	}
   125  	kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   126  	delivery := NewDelivery(kad, netStore)
   127  	netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   128  
   129  	r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   130  		Retrieval:       RetrievalEnabled,
   131  		Syncing:         SyncingAutoSubscribe,
   132  		SyncUpdateDelay: 3 * time.Second,
   133  	}, nil)
   134  
   135  	fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   136  	bucket.Store(bucketKeyFileStore, fileStore)
   137  
   138  	cleanup = func() {
   139  		os.RemoveAll(datadir)
   140  		netStore.Close()
   141  		r.Close()
   142  	}
   143  
   144  	return r, cleanup, nil
   145  }
   146  
   147  /*
   148  The test loads a snapshot file to construct the swarm network,
   149  assuming that the snapshot file identifies a healthy
   150  kademlia network. Nevertheless a health check runs in the
   151  simulation's `action` function.
   152  
   153  The snapshot should have 'streamer' in its service list.
   154  */
   155  func runFileRetrievalTest(nodeCount int) error {
   156  	sim := simulation.New(retrievalSimServiceMap)
   157  	defer sim.Close()
   158  
   159  	log.Info("Initializing test config")
   160  
   161  	conf := &synctestConfig{}
   162  	//map of discover ID to indexes of chunks expected at that ID
   163  	conf.idToChunksMap = make(map[enode.ID][]int)
   164  	//map of overlay address to discover ID
   165  	conf.addrToIDMap = make(map[string]enode.ID)
   166  	//array where the generated chunk hashes will be stored
   167  	conf.hashes = make([]storage.Address, 0)
   168  
   169  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   170  	if err != nil {
   171  		return err
   172  	}
   173  
   174  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
   175  	defer cancelSimRun()
   176  
   177  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   178  		nodeIDs := sim.UpNodeIDs()
   179  		for _, n := range nodeIDs {
   180  			//get the kademlia overlay address from this ID
   181  			a := n.Bytes()
   182  			//append it to the array of all overlay addresses
   183  			conf.addrs = append(conf.addrs, a)
   184  			//the proximity calculation is on overlay addr,
   185  			//the p2p/simulations check func triggers on enode.ID,
   186  			//so we need to know which overlay addr maps to which nodeID
   187  			conf.addrToIDMap[string(a)] = n
   188  		}
   189  
   190  		//an array for the random files
   191  		var randomFiles []string
   192  		//channel to signal when the upload has finished
   193  		//uploadFinished := make(chan struct{})
   194  		//channel to trigger new node checks
   195  
   196  		conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
   197  		if err != nil {
   198  			return err
   199  		}
   200  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   201  			return err
   202  		}
   203  
   204  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   205  		// or until the timeout is reached.
   206  	REPEAT:
   207  		for {
   208  			for _, id := range nodeIDs {
   209  				//for each expected file, check if it is in the local store
   210  				item, ok := sim.NodeItem(id, bucketKeyFileStore)
   211  				if !ok {
   212  					return fmt.Errorf("No filestore")
   213  				}
   214  				fileStore := item.(*storage.FileStore)
   215  				//check all chunks
   216  				for i, hash := range conf.hashes {
   217  					reader, _ := fileStore.Retrieve(context.TODO(), hash)
   218  					//check that we can read the file size and that it corresponds to the generated file size
   219  					if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
   220  						log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id)
   221  						time.Sleep(500 * time.Millisecond)
   222  						continue REPEAT
   223  					}
   224  					log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
   225  				}
   226  			}
   227  			return nil
   228  		}
   229  	})
   230  
   231  	if result.Error != nil {
   232  		return result.Error
   233  	}
   234  
   235  	return nil
   236  }
   237  
   238  /*
   239  The test generates the given number of chunks.
   240  
   241  The test loads a snapshot file to construct the swarm network,
   242  assuming that the snapshot file identifies a healthy
   243  kademlia network. Nevertheless a health check runs in the
   244  simulation's `action` function.
   245  
   246  The snapshot should have 'streamer' in its service list.
   247  */
   248  func runRetrievalTest(chunkCount int, nodeCount int) error {
   249  	sim := simulation.New(retrievalSimServiceMap)
   250  	defer sim.Close()
   251  
   252  	conf := &synctestConfig{}
   253  	//map of discover ID to indexes of chunks expected at that ID
   254  	conf.idToChunksMap = make(map[enode.ID][]int)
   255  	//map of overlay address to discover ID
   256  	conf.addrToIDMap = make(map[string]enode.ID)
   257  	//array where the generated chunk hashes will be stored
   258  	conf.hashes = make([]storage.Address, 0)
   259  
   260  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   261  	if err != nil {
   262  		return err
   263  	}
   264  
   265  	ctx := context.Background()
   266  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   267  		nodeIDs := sim.UpNodeIDs()
   268  		for _, n := range nodeIDs {
   269  			//get the kademlia overlay address from this ID
   270  			a := n.Bytes()
   271  			//append it to the array of all overlay addresses
   272  			conf.addrs = append(conf.addrs, a)
   273  			//the proximity calculation is on overlay addr,
   274  			//the p2p/simulations check func triggers on enode.ID,
   275  			//so we need to know which overlay addr maps to which nodeID
   276  			conf.addrToIDMap[string(a)] = n
   277  		}
   278  
   279  		//this is the node selected for upload
   280  		node := sim.Net.GetRandomUpNode()
   281  		item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
   282  		if !ok {
   283  			return fmt.Errorf("No localstore")
   284  		}
   285  		lstore := item.(*storage.LocalStore)
   286  		conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
   287  		if err != nil {
   288  			return err
   289  		}
   290  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   291  			return err
   292  		}
   293  
   294  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   295  		// or until the timeout is reached.
   296  	REPEAT:
   297  		for {
   298  			for _, id := range nodeIDs {
   299  				//for each expected chunk, check if it is in the local store
   300  				//check on the node's FileStore (netstore)
   301  				item, ok := sim.NodeItem(id, bucketKeyFileStore)
   302  				if !ok {
   303  					return fmt.Errorf("No filestore")
   304  				}
   305  				fileStore := item.(*storage.FileStore)
   306  				//check all chunks
   307  				for _, hash := range conf.hashes {
   308  					reader, _ := fileStore.Retrieve(context.TODO(), hash)
   309  					//check that we can read the chunk size and that it corresponds to the generated chunk size
   310  					if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) {
   311  						log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s)
   312  						time.Sleep(500 * time.Millisecond)
   313  						continue REPEAT
   314  					}
   315  					log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash))
   316  				}
   317  			}
   318  			// all nodes and files found, exit loop and return without error
   319  			return nil
   320  		}
   321  	})
   322  
   323  	if result.Error != nil {
   324  		return result.Error
   325  	}
   326  
   327  	return nil
   328  }