github.com/FUSIONFoundation/efsn@v3.6.2-0.20200916075423-dbb5dd5d2cc7+incompatible/swarm/network/stream/snapshot_retrieval_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"os"
    22  	"sync"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/FusionFoundation/efsn/node"
    27  	"github.com/FusionFoundation/efsn/p2p/discover"
    28  	"github.com/FusionFoundation/efsn/p2p/simulations/adapters"
    29  	"github.com/FusionFoundation/efsn/swarm/log"
    30  	"github.com/FusionFoundation/efsn/swarm/network"
    31  	"github.com/FusionFoundation/efsn/swarm/network/simulation"
    32  	"github.com/FusionFoundation/efsn/swarm/state"
    33  	"github.com/FusionFoundation/efsn/swarm/storage"
    34  )
    35  
    36  //constants for random file generation
    37  const (
    38  	minFileSize = 2
    39  	maxFileSize = 40
    40  )
    41  
    42  //This test is a retrieval test for nodes.
    43  //A configurable number of nodes can be
    44  //provided to the test.
    45  //Files are uploaded to nodes, other nodes try to retrieve the file
    46  //Number of nodes can be provided via commandline too.
    47  func TestFileRetrieval(t *testing.T) {
    48  	if *nodes != 0 {
    49  		err := runFileRetrievalTest(*nodes)
    50  		if err != nil {
    51  			t.Fatal(err)
    52  		}
    53  	} else {
    54  		nodeCnt := []int{16}
    55  		//if the `longrunning` flag has been provided
    56  		//run more test combinations
    57  		if *longrunning {
    58  			nodeCnt = append(nodeCnt, 32, 64, 128)
    59  		}
    60  		for _, n := range nodeCnt {
    61  			err := runFileRetrievalTest(n)
    62  			if err != nil {
    63  				t.Fatal(err)
    64  			}
    65  		}
    66  	}
    67  }
    68  
    69  //This test is a retrieval test for nodes.
    70  //One node is randomly selected to be the pivot node.
    71  //A configurable number of chunks and nodes can be
    72  //provided to the test, the number of chunks is uploaded
    73  //to the pivot node and other nodes try to retrieve the chunk(s).
    74  //Number of chunks and nodes can be provided via commandline too.
    75  func TestRetrieval(t *testing.T) {
    76  	//if nodes/chunks have been provided via commandline,
    77  	//run the tests with these values
    78  	if *nodes != 0 && *chunks != 0 {
    79  		err := runRetrievalTest(*chunks, *nodes)
    80  		if err != nil {
    81  			t.Fatal(err)
    82  		}
    83  	} else {
    84  		var nodeCnt []int
    85  		var chnkCnt []int
    86  		//if the `longrunning` flag has been provided
    87  		//run more test combinations
    88  		if *longrunning {
    89  			nodeCnt = []int{16, 32, 128}
    90  			chnkCnt = []int{4, 32, 256}
    91  		} else {
    92  			//default test
    93  			nodeCnt = []int{16}
    94  			chnkCnt = []int{32}
    95  		}
    96  		for _, n := range nodeCnt {
    97  			for _, c := range chnkCnt {
    98  				err := runRetrievalTest(c, n)
    99  				if err != nil {
   100  					t.Fatal(err)
   101  				}
   102  			}
   103  		}
   104  	}
   105  }
   106  
   107  /*
   108  
   109  The test loads a snapshot file to construct the swarm network,
   110  assuming that the snapshot file identifies a healthy
   111  kademlia network. Nevertheless a health check runs in the
   112  simulation's `action` function.
   113  
   114  The snapshot should have 'streamer' in its service list.
   115  */
   116  func runFileRetrievalTest(nodeCount int) error {
   117  	sim := simulation.New(map[string]simulation.ServiceFunc{
   118  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   119  
   120  			id := ctx.Config.ID
   121  			addr := network.NewAddrFromNodeID(id)
   122  			store, datadir, err := createTestLocalStorageForID(id, addr)
   123  			if err != nil {
   124  				return nil, nil, err
   125  			}
   126  			bucket.Store(bucketKeyStore, store)
   127  
   128  			localStore := store.(*storage.LocalStore)
   129  			netStore, err := storage.NewNetStore(localStore, nil)
   130  			if err != nil {
   131  				return nil, nil, err
   132  			}
   133  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   134  			delivery := NewDelivery(kad, netStore)
   135  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   136  
   137  			r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   138  				DoSync:          true,
   139  				SyncUpdateDelay: 3 * time.Second,
   140  			})
   141  
   142  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   143  			bucket.Store(bucketKeyFileStore, fileStore)
   144  
   145  			cleanup = func() {
   146  				os.RemoveAll(datadir)
   147  				netStore.Close()
   148  				r.Close()
   149  			}
   150  
   151  			return r, cleanup, nil
   152  
   153  		},
   154  	})
   155  	defer sim.Close()
   156  
   157  	log.Info("Initializing test config")
   158  
   159  	conf := &synctestConfig{}
   160  	//map of discover ID to indexes of chunks expected at that ID
   161  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   162  	//map of overlay address to discover ID
   163  	conf.addrToIDMap = make(map[string]discover.NodeID)
   164  	//array where the generated chunk hashes will be stored
   165  	conf.hashes = make([]storage.Address, 0)
   166  
   167  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   168  	if err != nil {
   169  		return err
   170  	}
   171  
   172  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
   173  	defer cancelSimRun()
   174  
   175  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   176  		nodeIDs := sim.UpNodeIDs()
   177  		for _, n := range nodeIDs {
   178  			//get the kademlia overlay address from this ID
   179  			a := network.ToOverlayAddr(n.Bytes())
   180  			//append it to the array of all overlay addresses
   181  			conf.addrs = append(conf.addrs, a)
   182  			//the proximity calculation is on overlay addr,
   183  			//the p2p/simulations check func triggers on discover.NodeID,
   184  			//so we need to know which overlay addr maps to which nodeID
   185  			conf.addrToIDMap[string(a)] = n
   186  		}
   187  
   188  		//an array for the random files
   189  		var randomFiles []string
   190  		//channel to signal when the upload has finished
   191  		//uploadFinished := make(chan struct{})
   192  		//channel to trigger new node checks
   193  
   194  		conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
   195  		if err != nil {
   196  			return err
   197  		}
   198  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   199  			return err
   200  		}
   201  
   202  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   203  		// or until the timeout is reached.
   204  		allSuccess := false
   205  		for !allSuccess {
   206  			for _, id := range nodeIDs {
   207  				//for each expected chunk, check if it is in the local store
   208  				localChunks := conf.idToChunksMap[id]
   209  				localSuccess := true
   210  				for _, ch := range localChunks {
   211  					//get the real chunk by the index in the index array
   212  					chunk := conf.hashes[ch]
   213  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   214  					//check if the expected chunk is indeed in the localstore
   215  					var err error
   216  					//check on the node's FileStore (netstore)
   217  					item, ok := sim.NodeItem(id, bucketKeyFileStore)
   218  					if !ok {
   219  						return fmt.Errorf("No registry")
   220  					}
   221  					fileStore := item.(*storage.FileStore)
   222  					//check all chunks
   223  					for i, hash := range conf.hashes {
   224  						reader, _ := fileStore.Retrieve(context.TODO(), hash)
   225  						//check that we can read the file size and that it corresponds to the generated file size
   226  						if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
   227  							allSuccess = false
   228  							log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
   229  						} else {
   230  							log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
   231  						}
   232  					}
   233  					if err != nil {
   234  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   235  						localSuccess = false
   236  					} else {
   237  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   238  					}
   239  				}
   240  				allSuccess = localSuccess
   241  			}
   242  		}
   243  		if !allSuccess {
   244  			return fmt.Errorf("Not all chunks succeeded!")
   245  		}
   246  		return nil
   247  	})
   248  
   249  	if result.Error != nil {
   250  		return result.Error
   251  	}
   252  
   253  	return nil
   254  }
   255  
   256  /*
   257  The test generates the given number of chunks.
   258  
   259  The test loads a snapshot file to construct the swarm network,
   260  assuming that the snapshot file identifies a healthy
   261  kademlia network. Nevertheless a health check runs in the
   262  simulation's `action` function.
   263  
   264  The snapshot should have 'streamer' in its service list.
   265  */
   266  func runRetrievalTest(chunkCount int, nodeCount int) error {
   267  	sim := simulation.New(map[string]simulation.ServiceFunc{
   268  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   269  
   270  			id := ctx.Config.ID
   271  			addr := network.NewAddrFromNodeID(id)
   272  			store, datadir, err := createTestLocalStorageForID(id, addr)
   273  			if err != nil {
   274  				return nil, nil, err
   275  			}
   276  			bucket.Store(bucketKeyStore, store)
   277  
   278  			localStore := store.(*storage.LocalStore)
   279  			netStore, err := storage.NewNetStore(localStore, nil)
   280  			if err != nil {
   281  				return nil, nil, err
   282  			}
   283  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   284  			delivery := NewDelivery(kad, netStore)
   285  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   286  
   287  			r := NewRegistry(addr, delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   288  				DoSync:          true,
   289  				SyncUpdateDelay: 0,
   290  			})
   291  
   292  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   293  			bucketKeyFileStore = simulation.BucketKey("filestore")
   294  			bucket.Store(bucketKeyFileStore, fileStore)
   295  
   296  			cleanup = func() {
   297  				os.RemoveAll(datadir)
   298  				netStore.Close()
   299  				r.Close()
   300  			}
   301  
   302  			return r, cleanup, nil
   303  
   304  		},
   305  	})
   306  	defer sim.Close()
   307  
   308  	conf := &synctestConfig{}
   309  	//map of discover ID to indexes of chunks expected at that ID
   310  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   311  	//map of overlay address to discover ID
   312  	conf.addrToIDMap = make(map[string]discover.NodeID)
   313  	//array where the generated chunk hashes will be stored
   314  	conf.hashes = make([]storage.Address, 0)
   315  
   316  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   317  	if err != nil {
   318  		return err
   319  	}
   320  
   321  	ctx := context.Background()
   322  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   323  		nodeIDs := sim.UpNodeIDs()
   324  		for _, n := range nodeIDs {
   325  			//get the kademlia overlay address from this ID
   326  			a := network.ToOverlayAddr(n.Bytes())
   327  			//append it to the array of all overlay addresses
   328  			conf.addrs = append(conf.addrs, a)
   329  			//the proximity calculation is on overlay addr,
   330  			//the p2p/simulations check func triggers on discover.NodeID,
   331  			//so we need to know which overlay addr maps to which nodeID
   332  			conf.addrToIDMap[string(a)] = n
   333  		}
   334  
   335  		//an array for the random files
   336  		var randomFiles []string
   337  		//this is the node selected for upload
   338  		node := sim.RandomUpNode()
   339  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   340  		if !ok {
   341  			return fmt.Errorf("No localstore")
   342  		}
   343  		lstore := item.(*storage.LocalStore)
   344  		conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   345  		if err != nil {
   346  			return err
   347  		}
   348  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   349  			return err
   350  		}
   351  
   352  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   353  		// or until the timeout is reached.
   354  		allSuccess := false
   355  		for !allSuccess {
   356  			for _, id := range nodeIDs {
   357  				//for each expected chunk, check if it is in the local store
   358  				localChunks := conf.idToChunksMap[id]
   359  				localSuccess := true
   360  				for _, ch := range localChunks {
   361  					//get the real chunk by the index in the index array
   362  					chunk := conf.hashes[ch]
   363  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   364  					//check if the expected chunk is indeed in the localstore
   365  					var err error
   366  					//check on the node's FileStore (netstore)
   367  					item, ok := sim.NodeItem(id, bucketKeyFileStore)
   368  					if !ok {
   369  						return fmt.Errorf("No registry")
   370  					}
   371  					fileStore := item.(*storage.FileStore)
   372  					//check all chunks
   373  					for i, hash := range conf.hashes {
   374  						reader, _ := fileStore.Retrieve(context.TODO(), hash)
   375  						//check that we can read the file size and that it corresponds to the generated file size
   376  						if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
   377  							allSuccess = false
   378  							log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
   379  						} else {
   380  							log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
   381  						}
   382  					}
   383  					if err != nil {
   384  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   385  						localSuccess = false
   386  					} else {
   387  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   388  					}
   389  				}
   390  				allSuccess = localSuccess
   391  			}
   392  		}
   393  		if !allSuccess {
   394  			return fmt.Errorf("Not all chunks succeeded!")
   395  		}
   396  		return nil
   397  	})
   398  
   399  	if result.Error != nil {
   400  		return result.Error
   401  	}
   402  
   403  	return nil
   404  }