github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/network/stream/snapshot_retrieval_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  package stream
    17  
    18  import (
    19  	"context"
    20  	"fmt"
    21  	"sync"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/swarm/testutil"
    26  
    27  	"github.com/ethereum/go-ethereum/node"
    28  	"github.com/ethereum/go-ethereum/p2p/enode"
    29  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    30  	"github.com/ethereum/go-ethereum/swarm/log"
    31  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    32  	"github.com/ethereum/go-ethereum/swarm/state"
    33  	"github.com/ethereum/go-ethereum/swarm/storage"
    34  )
    35  
    36  //constants for random file generation
    37  const (
    38  	minFileSize = 2
    39  	maxFileSize = 40
    40  )
    41  
    42  //This test is a retrieval test for nodes.
    43  //A configurable number of nodes can be
    44  //provided to the test.
    45  //Files are uploaded to nodes, other nodes try to retrieve the file
    46  //Number of nodes can be provided via commandline too.
    47  func TestFileRetrieval(t *testing.T) {
    48  	var nodeCount []int
    49  
    50  	if *nodes != 0 {
    51  		nodeCount = []int{*nodes}
    52  	} else {
    53  		nodeCount = []int{16}
    54  
    55  		if *longrunning {
    56  			nodeCount = append(nodeCount, 32, 64)
    57  		} else if testutil.RaceEnabled {
    58  			nodeCount = []int{4}
    59  		}
    60  
    61  	}
    62  
    63  	for _, nc := range nodeCount {
    64  		if err := runFileRetrievalTest(nc); err != nil {
    65  			t.Error(err)
    66  		}
    67  	}
    68  }
    69  
    70  //This test is a retrieval test for nodes.
    71  //One node is randomly selected to be the pivot node.
    72  //A configurable number of chunks and nodes can be
    73  //provided to the test, the number of chunks is uploaded
    74  //to the pivot node and other nodes try to retrieve the chunk(s).
    75  //Number of chunks and nodes can be provided via commandline too.
    76  func TestRetrieval(t *testing.T) {
    77  	//if nodes/chunks have been provided via commandline,
    78  	//run the tests with these values
    79  	if *nodes != 0 && *chunks != 0 {
    80  		err := runRetrievalTest(t, *chunks, *nodes)
    81  		if err != nil {
    82  			t.Fatal(err)
    83  		}
    84  	} else {
    85  		nodeCnt := []int{16}
    86  		chnkCnt := []int{32}
    87  
    88  		if *longrunning {
    89  			nodeCnt = []int{16, 32, 64}
    90  			chnkCnt = []int{4, 32, 256}
    91  		} else if testutil.RaceEnabled {
    92  			nodeCnt = []int{4}
    93  			chnkCnt = []int{4}
    94  		}
    95  
    96  		for _, n := range nodeCnt {
    97  			for _, c := range chnkCnt {
    98  				t.Run(fmt.Sprintf("TestRetrieval_%d_%d", n, c), func(t *testing.T) {
    99  					err := runRetrievalTest(t, c, n)
   100  					if err != nil {
   101  						t.Fatal(err)
   102  					}
   103  				})
   104  			}
   105  		}
   106  	}
   107  }
   108  
   109  var retrievalSimServiceMap = map[string]simulation.ServiceFunc{
   110  	"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   111  		addr, netStore, delivery, clean, err := newNetStoreAndDelivery(ctx, bucket)
   112  		if err != nil {
   113  			return nil, nil, err
   114  		}
   115  
   116  		syncUpdateDelay := 1 * time.Second
   117  		if *longrunning {
   118  			syncUpdateDelay = 3 * time.Second
   119  		}
   120  
   121  		r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   122  			Retrieval:       RetrievalEnabled,
   123  			Syncing:         SyncingAutoSubscribe,
   124  			SyncUpdateDelay: syncUpdateDelay,
   125  		}, nil)
   126  
   127  		cleanup = func() {
   128  			r.Close()
   129  			clean()
   130  		}
   131  
   132  		return r, cleanup, nil
   133  	},
   134  }
   135  
   136  /*
   137  The test loads a snapshot file to construct the swarm network,
   138  assuming that the snapshot file identifies a healthy
   139  kademlia network. Nevertheless a health check runs in the
   140  simulation's `action` function.
   141  
   142  The snapshot should have 'streamer' in its service list.
   143  */
   144  func runFileRetrievalTest(nodeCount int) error {
   145  	sim := simulation.New(retrievalSimServiceMap)
   146  	defer sim.Close()
   147  
   148  	log.Info("Initializing test config", "node count", nodeCount)
   149  
   150  	conf := &synctestConfig{}
   151  	//map of discover ID to indexes of chunks expected at that ID
   152  	conf.idToChunksMap = make(map[enode.ID][]int)
   153  	//map of overlay address to discover ID
   154  	conf.addrToIDMap = make(map[string]enode.ID)
   155  	//array where the generated chunk hashes will be stored
   156  	conf.hashes = make([]storage.Address, 0)
   157  
   158  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   159  	if err != nil {
   160  		return err
   161  	}
   162  
   163  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 3*time.Minute)
   164  	defer cancelSimRun()
   165  
   166  	log.Info("Starting simulation")
   167  
   168  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   169  		nodeIDs := sim.UpNodeIDs()
   170  		for _, n := range nodeIDs {
   171  			//get the kademlia overlay address from this ID
   172  			a := n.Bytes()
   173  			//append it to the array of all overlay addresses
   174  			conf.addrs = append(conf.addrs, a)
   175  			//the proximity calculation is on overlay addr,
   176  			//the p2p/simulations check func triggers on enode.ID,
   177  			//so we need to know which overlay addr maps to which nodeID
   178  			conf.addrToIDMap[string(a)] = n
   179  		}
   180  
   181  		//an array for the random files
   182  		var randomFiles []string
   183  		//channel to signal when the upload has finished
   184  		//uploadFinished := make(chan struct{})
   185  		//channel to trigger new node checks
   186  
   187  		conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
   188  		if err != nil {
   189  			return err
   190  		}
   191  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   192  			return err
   193  		}
   194  
   195  		log.Info("network healthy, start file checks")
   196  
   197  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   198  		// or until the timeout is reached.
   199  	REPEAT:
   200  		for {
   201  			for _, id := range nodeIDs {
   202  				//for each expected file, check if it is in the local store
   203  				item, ok := sim.NodeItem(id, bucketKeyFileStore)
   204  				if !ok {
   205  					return fmt.Errorf("No filestore")
   206  				}
   207  				fileStore := item.(*storage.FileStore)
   208  				//check all chunks
   209  				for i, hash := range conf.hashes {
   210  					reader, _ := fileStore.Retrieve(context.TODO(), hash)
   211  					//check that we can read the file size and that it corresponds to the generated file size
   212  					if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
   213  						log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id)
   214  						time.Sleep(500 * time.Millisecond)
   215  						continue REPEAT
   216  					}
   217  					log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
   218  				}
   219  			}
   220  			return nil
   221  		}
   222  	})
   223  
   224  	log.Info("Simulation terminated")
   225  
   226  	if result.Error != nil {
   227  		return result.Error
   228  	}
   229  
   230  	return nil
   231  }
   232  
   233  /*
   234  The test generates the given number of chunks.
   235  
   236  The test loads a snapshot file to construct the swarm network,
   237  assuming that the snapshot file identifies a healthy
   238  kademlia network. Nevertheless a health check runs in the
   239  simulation's `action` function.
   240  
   241  The snapshot should have 'streamer' in its service list.
   242  */
   243  func runRetrievalTest(t *testing.T, chunkCount int, nodeCount int) error {
   244  	t.Helper()
   245  	sim := simulation.New(retrievalSimServiceMap)
   246  	defer sim.Close()
   247  
   248  	conf := &synctestConfig{}
   249  	//map of discover ID to indexes of chunks expected at that ID
   250  	conf.idToChunksMap = make(map[enode.ID][]int)
   251  	//map of overlay address to discover ID
   252  	conf.addrToIDMap = make(map[string]enode.ID)
   253  	//array where the generated chunk hashes will be stored
   254  	conf.hashes = make([]storage.Address, 0)
   255  
   256  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   257  	if err != nil {
   258  		return err
   259  	}
   260  
   261  	ctx := context.Background()
   262  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   263  		nodeIDs := sim.UpNodeIDs()
   264  		for _, n := range nodeIDs {
   265  			//get the kademlia overlay address from this ID
   266  			a := n.Bytes()
   267  			//append it to the array of all overlay addresses
   268  			conf.addrs = append(conf.addrs, a)
   269  			//the proximity calculation is on overlay addr,
   270  			//the p2p/simulations check func triggers on enode.ID,
   271  			//so we need to know which overlay addr maps to which nodeID
   272  			conf.addrToIDMap[string(a)] = n
   273  		}
   274  
   275  		//this is the node selected for upload
   276  		node := sim.Net.GetRandomUpNode()
   277  		item, ok := sim.NodeItem(node.ID(), bucketKeyStore)
   278  		if !ok {
   279  			return fmt.Errorf("No localstore")
   280  		}
   281  		lstore := item.(*storage.LocalStore)
   282  		conf.hashes, err = uploadFileToSingleNodeStore(node.ID(), chunkCount, lstore)
   283  		if err != nil {
   284  			return err
   285  		}
   286  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   287  			return err
   288  		}
   289  
   290  		// File retrieval check is repeated until all uploaded files are retrieved from all nodes
   291  		// or until the timeout is reached.
   292  	REPEAT:
   293  		for {
   294  			for _, id := range nodeIDs {
   295  				//for each expected chunk, check if it is in the local store
   296  				//check on the node's FileStore (netstore)
   297  				item, ok := sim.NodeItem(id, bucketKeyFileStore)
   298  				if !ok {
   299  					return fmt.Errorf("No filestore")
   300  				}
   301  				fileStore := item.(*storage.FileStore)
   302  				//check all chunks
   303  				for _, hash := range conf.hashes {
   304  					reader, _ := fileStore.Retrieve(context.TODO(), hash)
   305  					//check that we can read the chunk size and that it corresponds to the generated chunk size
   306  					if s, err := reader.Size(ctx, nil); err != nil || s != int64(chunkSize) {
   307  						log.Debug("Retrieve error", "err", err, "hash", hash, "nodeId", id, "size", s)
   308  						time.Sleep(500 * time.Millisecond)
   309  						continue REPEAT
   310  					}
   311  					log.Debug(fmt.Sprintf("Chunk with root hash %x successfully retrieved", hash))
   312  				}
   313  			}
   314  			// all nodes and files found, exit loop and return without error
   315  			return nil
   316  		}
   317  	})
   318  
   319  	if result.Error != nil {
   320  		return result.Error
   321  	}
   322  
   323  	return nil
   324  }