github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/common_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	crand "crypto/rand"
    22  	"errors"
    23  	"flag"
    24  	"fmt"
    25  	"io"
    26  	"io/ioutil"
    27  	"math/rand"
    28  	"os"
    29  	"strings"
    30  	"sync/atomic"
    31  	"testing"
    32  	"time"
    33  
    34  	"github.com/ethereum/go-ethereum/log"
    35  	"github.com/ethereum/go-ethereum/p2p/enode"
    36  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    37  	"github.com/ethereum/go-ethereum/swarm/network"
    38  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    39  	"github.com/ethereum/go-ethereum/swarm/pot"
    40  	"github.com/ethereum/go-ethereum/swarm/state"
    41  	"github.com/ethereum/go-ethereum/swarm/storage"
    42  	mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
    43  	colorable "github.com/mattn/go-colorable"
    44  )
    45  
    46  var (
    47  	loglevel     = flag.Int("loglevel", 2, "verbosity of logs")
    48  	nodes        = flag.Int("nodes", 0, "number of nodes")
    49  	chunks       = flag.Int("chunks", 0, "number of chunks")
    50  	useMockStore = flag.Bool("mockstore", false, "disabled mock store (default: enabled)")
    51  	longrunning  = flag.Bool("longrunning", false, "do run long-running tests")
    52  
    53  	bucketKeyDB        = simulation.BucketKey("db")
    54  	bucketKeyStore     = simulation.BucketKey("store")
    55  	bucketKeyFileStore = simulation.BucketKey("filestore")
    56  	bucketKeyNetStore  = simulation.BucketKey("netstore")
    57  	bucketKeyDelivery  = simulation.BucketKey("delivery")
    58  	bucketKeyRegistry  = simulation.BucketKey("registry")
    59  
    60  	chunkSize = 4096
    61  	pof       = pot.DefaultPof(256)
    62  )
    63  
    64  func init() {
    65  	flag.Parse()
    66  	rand.Seed(time.Now().UnixNano())
    67  
    68  	log.PrintOrigins(true)
    69  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))
    70  }
    71  
    72  func createGlobalStore() (string, *mockdb.GlobalStore, error) {
    73  	var globalStore *mockdb.GlobalStore
    74  	globalStoreDir, err := ioutil.TempDir("", "global.store")
    75  	if err != nil {
    76  		log.Error("Error initiating global store temp directory!", "err", err)
    77  		return "", nil, err
    78  	}
    79  	globalStore, err = mockdb.NewGlobalStore(globalStoreDir)
    80  	if err != nil {
    81  		log.Error("Error initiating global store!", "err", err)
    82  		return "", nil, err
    83  	}
    84  	return globalStoreDir, globalStore, nil
    85  }
    86  
    87  func newStreamerTester(t *testing.T, registryOptions *RegistryOptions) (*p2ptest.ProtocolTester, *Registry, *storage.LocalStore, func(), error) {
    88  	// setup
    89  	addr := network.RandomAddr() // tested peers peer address
    90  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
    91  
    92  	// temp datadir
    93  	datadir, err := ioutil.TempDir("", "streamer")
    94  	if err != nil {
    95  		return nil, nil, nil, func() {}, err
    96  	}
    97  	removeDataDir := func() {
    98  		os.RemoveAll(datadir)
    99  	}
   100  
   101  	params := storage.NewDefaultLocalStoreParams()
   102  	params.Init(datadir)
   103  	params.BaseKey = addr.Over()
   104  
   105  	localStore, err := storage.NewTestLocalStoreForAddr(params)
   106  	if err != nil {
   107  		return nil, nil, nil, removeDataDir, err
   108  	}
   109  
   110  	netStore, err := storage.NewNetStore(localStore, nil)
   111  	if err != nil {
   112  		return nil, nil, nil, removeDataDir, err
   113  	}
   114  
   115  	delivery := NewDelivery(to, netStore)
   116  	netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   117  	streamer := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), registryOptions)
   118  	teardown := func() {
   119  		streamer.Close()
   120  		removeDataDir()
   121  	}
   122  	protocolTester := p2ptest.NewProtocolTester(t, addr.ID(), 1, streamer.runProtocol)
   123  
   124  	err = waitForPeers(streamer, 1*time.Second, 1)
   125  	if err != nil {
   126  		return nil, nil, nil, nil, errors.New("timeout: peer is not created")
   127  	}
   128  
   129  	return protocolTester, streamer, localStore, teardown, nil
   130  }
   131  
   132  func waitForPeers(streamer *Registry, timeout time.Duration, expectedPeers int) error {
   133  	ticker := time.NewTicker(10 * time.Millisecond)
   134  	timeoutTimer := time.NewTimer(timeout)
   135  	for {
   136  		select {
   137  		case <-ticker.C:
   138  			if streamer.peersCount() >= expectedPeers {
   139  				return nil
   140  			}
   141  		case <-timeoutTimer.C:
   142  			return errors.New("timeout")
   143  		}
   144  	}
   145  }
   146  
   147  type roundRobinStore struct {
   148  	index  uint32
   149  	stores []storage.ChunkStore
   150  }
   151  
   152  func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore {
   153  	return &roundRobinStore{
   154  		stores: stores,
   155  	}
   156  }
   157  
   158  func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (storage.Chunk, error) {
   159  	return nil, errors.New("get not well defined on round robin store")
   160  }
   161  
   162  func (rrs *roundRobinStore) Put(ctx context.Context, chunk storage.Chunk) error {
   163  	i := atomic.AddUint32(&rrs.index, 1)
   164  	idx := int(i) % len(rrs.stores)
   165  	return rrs.stores[idx].Put(ctx, chunk)
   166  }
   167  
   168  func (rrs *roundRobinStore) Close() {
   169  	for _, store := range rrs.stores {
   170  		store.Close()
   171  	}
   172  }
   173  
   174  func readAll(fileStore *storage.FileStore, hash []byte) (int64, error) {
   175  	r, _ := fileStore.Retrieve(context.TODO(), hash)
   176  	buf := make([]byte, 1024)
   177  	var n int
   178  	var total int64
   179  	var err error
   180  	for (total == 0 || n > 0) && err == nil {
   181  		n, err = r.ReadAt(buf, total)
   182  		total += int64(n)
   183  	}
   184  	if err != nil && err != io.EOF {
   185  		return total, err
   186  	}
   187  	return total, nil
   188  }
   189  
   190  func uploadFilesToNodes(sim *simulation.Simulation) ([]storage.Address, []string, error) {
   191  	nodes := sim.UpNodeIDs()
   192  	nodeCnt := len(nodes)
   193  	log.Debug(fmt.Sprintf("Uploading %d files to nodes", nodeCnt))
   194  	//array holding generated files
   195  	rfiles := make([]string, nodeCnt)
   196  	//array holding the root hashes of the files
   197  	rootAddrs := make([]storage.Address, nodeCnt)
   198  
   199  	var err error
   200  	//for every node, generate a file and upload
   201  	for i, id := range nodes {
   202  		item, ok := sim.NodeItem(id, bucketKeyFileStore)
   203  		if !ok {
   204  			return nil, nil, fmt.Errorf("Error accessing localstore")
   205  		}
   206  		fileStore := item.(*storage.FileStore)
   207  		//generate a file
   208  		rfiles[i], err = generateRandomFile()
   209  		if err != nil {
   210  			return nil, nil, err
   211  		}
   212  		//store it (upload it) on the FileStore
   213  		ctx := context.TODO()
   214  		rk, wait, err := fileStore.Store(ctx, strings.NewReader(rfiles[i]), int64(len(rfiles[i])), false)
   215  		log.Debug("Uploaded random string file to node")
   216  		if err != nil {
   217  			return nil, nil, err
   218  		}
   219  		err = wait(ctx)
   220  		if err != nil {
   221  			return nil, nil, err
   222  		}
   223  		rootAddrs[i] = rk
   224  	}
   225  	return rootAddrs, rfiles, nil
   226  }
   227  
   228  //generate a random file (string)
   229  func generateRandomFile() (string, error) {
   230  	//generate a random file size between minFileSize and maxFileSize
   231  	fileSize := rand.Intn(maxFileSize-minFileSize) + minFileSize
   232  	log.Debug(fmt.Sprintf("Generated file with filesize %d kB", fileSize))
   233  	b := make([]byte, fileSize*1024)
   234  	_, err := crand.Read(b)
   235  	if err != nil {
   236  		log.Error("Error generating random file.", "err", err)
   237  		return "", err
   238  	}
   239  	return string(b), nil
   240  }
   241  
   242  //create a local store for the given node
   243  func createTestLocalStorageForID(id enode.ID, addr *network.BzzAddr) (storage.ChunkStore, string, error) {
   244  	var datadir string
   245  	var err error
   246  	datadir, err = ioutil.TempDir("", fmt.Sprintf("syncer-test-%s", id.TerminalString()))
   247  	if err != nil {
   248  		return nil, "", err
   249  	}
   250  	var store storage.ChunkStore
   251  	params := storage.NewDefaultLocalStoreParams()
   252  	params.ChunkDbPath = datadir
   253  	params.BaseKey = addr.Over()
   254  	store, err = storage.NewTestLocalStoreForAddr(params)
   255  	if err != nil {
   256  		os.RemoveAll(datadir)
   257  		return nil, "", err
   258  	}
   259  	return store, datadir, nil
   260  }