github.com/nitinawathare/ethereumassignment3@v0.0.0-20211021213010-f07344c2b868/go-ethereum/swarm/network/stream/snapshot_retrieval_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"sync"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/node"
    26  	"github.com/ethereum/go-ethereum/p2p/enode"
    27  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    28  	"github.com/ethereum/go-ethereum/swarm/log"
    29  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    30  	"github.com/ethereum/go-ethereum/swarm/state"
    31  	"github.com/ethereum/go-ethereum/swarm/storage"
    32  	"github.com/ethereum/go-ethereum/swarm/testutil"
    33  )
    34  
    35  //constants for random file generation
    36  const (
    37  	minFileSize = 2
    38  	maxFileSize = 40
    39  )
    40  
    41  //This test is a retrieval test for nodes.
    42  //A configurable number of nodes can be
    43  //provided to the test.
    44  //Files are uploaded to nodes, other nodes try to retrieve the file
    45  //Number of nodes can be provided via commandline too.
    46  func TestFileRetrieval(t *testing.T) {
    47  	var nodeCount []int
    48  
    49  	if *nodes != 0 {
    50  		nodeCount = []int{*nodes}
    51  	} else {
    52  		nodeCount = []int{16}
    53  
    54  		if *longrunning {
    55  			nodeCount = append(nodeCount, 32, 64)
    56  		} else if testutil.RaceEnabled {
    57  			nodeCount = []int{4}
    58  		}
    59  
    60  	}
    61  
    62  	for _, nc := range nodeCount {
    63  		if err := runFileRetrievalTest(nc); err != nil {
    64  			t.Error(err)
    65  		}
    66  	}
    67  }
    68  
    69  //This test is a retrieval test for nodes.
    70  //One node is randomly selected to be the pivot node.
    71  //A configurable number of chunks and nodes can be
    72  //provided to the test, the number of chunks is uploaded
    73  //to the pivot node and other nodes try to retrieve the chunk(s).
    74  //Number of chunks and nodes can be provided via commandline too.
    75  func TestRetrieval(t *testing.T) {
    76  	//if nodes/chunks have been provided via commandline,
    77  	//run the tests with these values
    78  	if *nodes != 0 && *chunks != 0 {
    79  		err := runRetrievalTest(t, *chunks, *nodes)
    80  		if err != nil {
    81  			t.Fatal(err)
    82  		}
    83  	} else {
    84  		nodeCnt := []int{16}
    85  		chnkCnt := []int{32}
    86  
    87  		if *longrunning {
    88  			nodeCnt = []int{16, 32, 64}
    89  			chnkCnt = []int{4, 32, 256}
    90  		} else if testutil.RaceEnabled {
    91  			nodeCnt = []int{4}
    92  			chnkCnt = []int{4}
    93  		}
    94  
    95  		for _, n := range nodeCnt {
    96  			for _, c := range chnkCnt {
    97  				t.Run(fmt.Sprintf("TestRetrieval_%d_%d", n, c), func(t *testing.T) {
    98  					err := runRetrievalTest(t, c, n)
    99  					if err != nil {
   100  						t.Fatal(err)
   101  					}
   102  				})
   103  			}
   104  		}
   105  	}
   106  }
   107  
   108  var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
   109  	"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   110  		addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   111  		if err != nil {
   112  			return nil, nil, err
   113  		}
   114  
   115  		syncUpdateDelay := 1 * time.Second
   116  		if *longrunning {
   117  			syncUpdateDelay = 3 * time.Second
   118  		}
   119  
   120  		r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   121  			Retrieval:       RetrievalEnabled,
   122  			Syncing:         SyncingAutoSubscribe,
   123  			SyncUpdateDelay: syncUpdateDelay,
   124  		}, nil)
   125  
   126  		cleanup = func() {
   127  			r.Close()
   128  			clean()
   129  		}
   130  
   131  		return r, cleanup, nil
   132  	},
   133  }
   134  
   135  /*
   136  The test loads a snapshot file to construct the swarm network,
   137  assuming that the snapshot file identifies a healthy
   138  kademlia network. Nevertheless a health check runs in the
   139  simulation's `action` function.
   140  
   141  The snapshot should have 'streamer' in its service list.
   142  */
   143  func runFileRetrievalTest(nodeCount int) error {
   144  	sim := simulation.New(retrievalSimServiceMap)
   145  	defer sim.Close()
   146  
   147  	log.Info("Initializing test config", "node count", nodeCount)
   148  
   149  	conf := &synctestConfig{}
   150  	//map of discover ID to indexes of chunks expected at that ID
   151  	conf.idToChunksMap = make(map[enode.ID][]int)
   152  	//map of overlay address to discover ID
   153  	conf.addrToIDMap = make(map[string]enode.ID)
   154  	//array where the generated chunk hashes will be stored
   155  	conf.hashes = make([]storage.Address, 0)
   156  
   157  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute)
   158  	defer cancelSimRun()
   159  
   160  	filename := fmt.Sprintf("testing/snapshot_%d.json", nodeCount)
   161  	err := sim.UploadSnapshot(ctx, filename)
   162  	if err != nil {
   163  		return err
   164  	}
   165  
   166  	log.Info("Starting simulation")
   167  
   168  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   169  		nodeIDs := sim.UpNodeIDs()
   170  		for _, n := range nodeIDs {
   171  			//get the kademlia overlay address from this ID
   172  			a := n.Bytes()
   173  			//append it to the array of all overlay addresses
   174  			conf.addrs = append(conf.addrs, a)
   175  			//the proximity calculation is on overlay addr,
   176  			//the p2p/simulations check func triggers on enode.ID,
   177  			//so we need to know which overlay addr maps to which nodeID
   178  			conf.addrToIDMap[string(a)] = n
   179  		}
   180  
   181  		//an array for the random files
   182  		var randomFiles []string
   183  		//channel to signal when the upload has finished
   184  		//uploadFinished := make(chan struct{})
   185  		//channel to trigger new node checks
   186  
   187  		conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
   188  		if err != nil {
   189  			return err
   190  		}
   191  
   192  		log.Info("network healthy, start file checks")
   193  
   194  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   195  		// or until the timeout is reached.
   196  	REPEAT:
   197  		for {
   198  			for _, id := range nodeIDs {
   199  				//for each expected file, check if it is in the local store
   200  				item, ok := sim.NodeItem(id, bucketKeyFileStore)
   201  				if !ok {
   202  					return fmt.Errorf("No filestore")
   203  				}
   204  				fileStore := item.(*storage.FileStore)
   205  				//check all chunks
   206  				for i, hash := range conf.hashes {
   207  					reader, _ := fileStore.Retrieve(context.TODO(), hash)
   208  					//check that we can read the file size and that it corresponds to the generated file size
   209  					if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
   210  						log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id)
   211  						time.Sleep(500 * time.Millisecond)
   212  						continue REPEAT
   213  					}
   214  					log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
   215  				}
   216  			}
   217  			return nil
   218  		}
   219  	})
   220  
   221  	log.Info("Simulation terminated")
   222  
   223  	if result.Error != nil {
   224  		return result.Error
   225  	}
   226  
   227  	return nil
   228  }
   229  
   230  /*
   231  The test generates the given number of chunks.
   232  
   233  The test loads a snapshot file to construct the swarm network,
   234  assuming that the snapshot file identifies a healthy
   235  kademlia network. Nevertheless a health check runs in the
   236  simulation's `action` function.
   237  
   238  The snapshot should have 'streamer' in its service list.
   239  */
   240  func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) error {
   241  	t.Helper()
   242  	sim := simulation.New(retrievalSimServiceMap)
   243  	defer sim.Close()
   244  
   245  	conf := &synctestConfig{}
   246  	//map of discover ID to indexes of chunks expected at that ID
   247  	conf.idToChunksMap = make(map[enode.ID][]int)
   248  	//map of overlay address to discover ID
   249  	conf.addrToIDMap = make(map[string]enode.ID)
   250  	//array where the generated chunk hashes will be stored
   251  	conf.hashes = make([]storage.Address, 0)
   252  
   253  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
   254  	defer cancel()
   255  
   256  	filename := fmt.Sprintf("testing/snapshot_%d.json", nodeCount)
   257  	err := sim.UploadSnapshot(ctx, filename)
   258  	if err != nil {
   259  		return err
   260  	}
   261  
   262  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   263  		nodeIDs := sim.UpNodeIDs()
   264  		for _, n := range nodeIDs {
   265  			//get the kademlia overlay address from this ID
   266  			a := n.Bytes()
   267  			//append it to the array of all overlay addresses
   268  			conf.addrs = append(conf.addrs, a)
   269  			//the proximity calculation is on overlay addr,
   270  			//the p2p/simulations check func triggers on enode.ID,
   271  			//so we need to know which overlay addr maps to which nodeID
   272  			conf.addrToIDMap[string(a)] = n
   273  		}
   274  
   275  		//this is the node selected for upload
   276  		node := sim.Net.GetRandomUpNode()
   277  		item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
   278  		if !ok {
   279  			return fmt.Errorf("No localstore")
   280  		}
   281  		lstore := item.(*storage.LocalStore)
   282  		conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
   283  		if err != nil {
   284  			return err
   285  		}
   286  
   287  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   288  		// or until the timeout is reached.
   289  	REPEAT:
   290  		for {
   291  			for _, id := range nodeIDs {
   292  				//for each expected chunk, check if it is in the local store
   293  				//check on the node's FileStore (netstore)
   294  				item, ok := sim.NodeItem(id, bucketKeyFileStore)
   295  				if !ok {
   296  					return fmt.Errorf("No filestore")
   297  				}
   298  				fileStore := item.(*storage.FileStore)
   299  				//check all chunks
   300  				for _, hash := range conf.hashes {
   301  					reader, _ := fileStore.Retrieve(context.TODO(), hash)
   302  					//check that we can read the chunk size and that it corresponds to the generated chunk size
   303  					if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) {
   304  						log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s)
   305  						time.Sleep(500 * time.Millisecond)
   306  						continue REPEAT
   307  					}
   308  					log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash))
   309  				}
   310  			}
   311  			// all nodes and files found, exit loop and return without error
   312  			return nil
   313  		}
   314  	})
   315  
   316  	if result.Error != nil {
   317  		return result.Error
   318  	}
   319  
   320  	return nil
   321  }