github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/swarm/network/stream/syncer_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"context"
    21  	crand "crypto/rand"
    22  	"fmt"
    23  	"io"
    24  	"io/ioutil"
    25  	"math"
    26  	"os"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/common"
    32  	"github.com/ethereum/go-ethereum/node"
    33  	"github.com/ethereum/go-ethereum/p2p"
    34  	"github.com/ethereum/go-ethereum/p2p/discover"
    35  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    36  	"github.com/ethereum/go-ethereum/swarm/log"
    37  	"github.com/ethereum/go-ethereum/swarm/network"
    38  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    39  	"github.com/ethereum/go-ethereum/swarm/state"
    40  	"github.com/ethereum/go-ethereum/swarm/storage"
    41  	mockdb "github.com/ethereum/go-ethereum/swarm/storage/mock/db"
    42  )
    43  
    44  const dataChunkCount = 200
    45  
    46  func TestSyncerSimulation(t *testing.T) {
    47  	testSyncBetweenNodes(t, 2, 1, dataChunkCount, true, 1)
    48  	testSyncBetweenNodes(t, 4, 1, dataChunkCount, true, 1)
    49  	testSyncBetweenNodes(t, 8, 1, dataChunkCount, true, 1)
    50  	testSyncBetweenNodes(t, 16, 1, dataChunkCount, true, 1)
    51  }
    52  
    53  func createMockStore(globalStore *mockdb.GlobalStore, id discover.NodeID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
    54  	address := common.BytesToAddress(id.Bytes())
    55  	mockStore := globalStore.NewNodeStore(address)
    56  	params := storage.NewDefaultLocalStoreParams()
    57  
    58  	datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
    59  	if err != nil {
    60  		return nil, "", err
    61  	}
    62  	params.Init(datadir)
    63  	params.BaseKey = addr.Over()
    64  	lstore, err = storage.NewLocalStore(params, mockStore)
    65  	return lstore, datadir, nil
    66  }
    67  
    68  func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool, po uint8) {
    69  	sim := simulation.New(map[string]simulation.ServiceFunc{
    70  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
    71  			var store storage.ChunkStore
    72  			var globalStore *mockdb.GlobalStore
    73  			var gDir, datadir string
    74  
    75  			id := ctx.Config.ID
    76  			addr := network.NewAddrFromNodeID(id)
    77  			//hack to put addresses in same space
    78  			addr.OAddr[0] = byte(0)
    79  
    80  			if *useMockStore {
    81  				gDir, globalStore, err = createGlobalStore()
    82  				if err != nil {
    83  					return nil, nil, fmt.Errorf("Something went wrong; using mockStore enabled but globalStore is nil")
    84  				}
    85  				store, datadir, err = createMockStore(globalStore, id, addr)
    86  			} else {
    87  				store, datadir, err = createTestLocalStorageForID(id, addr)
    88  			}
    89  			if err != nil {
    90  				return nil, nil, err
    91  			}
    92  			bucket.Store(bucketKeyStore, store)
    93  			cleanup = func() {
    94  				store.Close()
    95  				os.RemoveAll(datadir)
    96  				if *useMockStore {
    97  					err := globalStore.Close()
    98  					if err != nil {
    99  						log.Error("Error closing global store! %v", "err", err)
   100  					}
   101  					os.RemoveAll(gDir)
   102  				}
   103  			}
   104  			localStore := store.(*storage.LocalStore)
   105  			db := storage.NewDBAPI(localStore)
   106  			bucket.Store(bucketKeyDB, db)
   107  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   108  			delivery := NewDelivery(kad, db)
   109  			bucket.Store(bucketKeyDelivery, delivery)
   110  
   111  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   112  				SkipCheck: skipCheck,
   113  			})
   114  
   115  			fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
   116  			bucket.Store(bucketKeyFileStore, fileStore)
   117  
   118  			return r, cleanup, nil
   119  
   120  		},
   121  	})
   122  	defer sim.Close()
   123  
   124  	// create context for simulation run
   125  	timeout := 30 * time.Second
   126  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   127  	// defer cancel should come before defer simulation teardown
   128  	defer cancel()
   129  
   130  	_, err := sim.AddNodesAndConnectChain(nodes)
   131  	if err != nil {
   132  		t.Fatal(err)
   133  	}
   134  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   135  		nodeIDs := sim.UpNodeIDs()
   136  
   137  		nodeIndex := make(map[discover.NodeID]int)
   138  		for i, id := range nodeIDs {
   139  			nodeIndex[id] = i
   140  		}
   141  
   142  		disconnections := sim.PeerEvents(
   143  			context.Background(),
   144  			sim.NodeIDs(),
   145  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   146  		)
   147  
   148  		go func() {
   149  			for d := range disconnections {
   150  				if d.Error != nil {
   151  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   152  					t.Fatal(d.Error)
   153  				}
   154  			}
   155  		}()
   156  
   157  		// each node Subscribes to each other's swarmChunkServerStreamName
   158  		for j := 0; j < nodes-1; j++ {
   159  			id := nodeIDs[j]
   160  			client, err := sim.Net.GetNode(id).Client()
   161  			if err != nil {
   162  				t.Fatal(err)
   163  			}
   164  			sid := nodeIDs[j+1]
   165  			client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
   166  			if err != nil {
   167  				return err
   168  			}
   169  			if j > 0 || nodes == 2 {
   170  				item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore)
   171  				if !ok {
   172  					return fmt.Errorf("No filestore")
   173  				}
   174  				fileStore := item.(*storage.FileStore)
   175  				size := chunkCount * chunkSize
   176  				_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   177  				if err != nil {
   178  					t.Fatal(err.Error())
   179  				}
   180  				wait(ctx)
   181  			}
   182  		}
   183  		// here we distribute chunks of a random file into stores 1...nodes
   184  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   185  			return err
   186  		}
   187  
   188  		// collect hashes in po 1 bin for each node
   189  		hashes := make([][]storage.Address, nodes)
   190  		totalHashes := 0
   191  		hashCounts := make([]int, nodes)
   192  		for i := nodes - 1; i >= 0; i-- {
   193  			if i < nodes-1 {
   194  				hashCounts[i] = hashCounts[i+1]
   195  			}
   196  			item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
   197  			if !ok {
   198  				return fmt.Errorf("No DB")
   199  			}
   200  			db := item.(*storage.DBAPI)
   201  			db.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
   202  				hashes[i] = append(hashes[i], addr)
   203  				totalHashes++
   204  				hashCounts[i]++
   205  				return true
   206  			})
   207  		}
   208  		var total, found int
   209  		for _, node := range nodeIDs {
   210  			i := nodeIndex[node]
   211  
   212  			for j := i; j < nodes; j++ {
   213  				total += len(hashes[j])
   214  				for _, key := range hashes[j] {
   215  					item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
   216  					if !ok {
   217  						return fmt.Errorf("No DB")
   218  					}
   219  					db := item.(*storage.DBAPI)
   220  					chunk, err := db.Get(ctx, key)
   221  					if err == storage.ErrFetching {
   222  						<-chunk.ReqC
   223  					} else if err != nil {
   224  						continue
   225  					}
   226  					// needed for leveldb not to be closed?
   227  					// chunk.WaitToStore()
   228  					found++
   229  				}
   230  			}
   231  			log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total)
   232  		}
   233  		if total == found && total > 0 {
   234  			return nil
   235  		}
   236  		return fmt.Errorf("Total not equallying found: total is %d", total)
   237  	})
   238  
   239  	if result.Error != nil {
   240  		t.Fatal(result.Error)
   241  	}
   242  }