github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/swarm/network/stream/syncer_test.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 19:16:44</date>
    10  //</624450115922300928>
    11  
    12  
    13  package stream
    14  
    15  import (
    16  	"context"
    17  	"errors"
    18  	"fmt"
    19  	"io/ioutil"
    20  	"math"
    21  	"os"
    22  	"sync"
    23  	"sync/atomic"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/ethereum/go-ethereum/common"
    28  	"github.com/ethereum/go-ethereum/node"
    29  	"github.com/ethereum/go-ethereum/p2p/enode"
    30  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    31  	"github.com/ethereum/go-ethereum/swarm/log"
    32  	"github.com/ethereum/go-ethereum/swarm/network"
    33  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    34  	"github.com/ethereum/go-ethereum/swarm/state"
    35  	"github.com/ethereum/go-ethereum/swarm/storage"
    36  	"github.com/ethereum/go-ethereum/swarm/storage/mock"
    37  	mockmem "github.com/ethereum/go-ethereum/swarm/storage/mock/mem"
    38  	"github.com/ethereum/go-ethereum/swarm/testutil"
    39  )
    40  
    41  const dataChunkCount = 200
    42  
    43  func TestSyncerSimulation(t *testing.T) {
    44  	testSyncBetweenNodes(t, 2, dataChunkCount, true, 1)
    45  	testSyncBetweenNodes(t, 4, dataChunkCount, true, 1)
    46  	testSyncBetweenNodes(t, 8, dataChunkCount, true, 1)
    47  	testSyncBetweenNodes(t, 16, dataChunkCount, true, 1)
    48  }
    49  
    50  func createMockStore(globalStore mock.GlobalStorer, id enode.ID, addr *network.BzzAddr) (lstore storage.ChunkStore, datadir string, err error) {
    51  	address := common.BytesToAddress(id.Bytes())
    52  	mockStore := globalStore.NewNodeStore(address)
    53  	params := storage.NewDefaultLocalStoreParams()
    54  
    55  	datadir, err = ioutil.TempDir("", "localMockStore-"+id.TerminalString())
    56  	if err != nil {
    57  		return nil, "", err
    58  	}
    59  	params.Init(datadir)
    60  	params.BaseKey = addr.Over()
    61  	lstore, err = storage.NewLocalStore(params, mockStore)
    62  	if err != nil {
    63  		return nil, "", err
    64  	}
    65  	return lstore, datadir, nil
    66  }
    67  
    68  func testSyncBetweenNodes(t *testing.T, nodes, chunkCount int, skipCheck bool, po uint8) {
    69  
    70  	sim := simulation.New(map[string]simulation.ServiceFunc{
    71  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
    72  			var store storage.ChunkStore
    73  			var datadir string
    74  
    75  			node := ctx.Config.Node()
    76  			addr := network.NewAddr(node)
    77  //黑客把地址放在同一个空间
    78  			addr.OAddr[0] = byte(0)
    79  
    80  			if *useMockStore {
    81  				store, datadir, err = createMockStore(mockmem.NewGlobalStore(), node.ID(), addr)
    82  			} else {
    83  				store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
    84  			}
    85  			if err != nil {
    86  				return nil, nil, err
    87  			}
    88  			bucket.Store(bucketKeyStore, store)
    89  			cleanup = func() {
    90  				store.Close()
    91  				os.RemoveAll(datadir)
    92  			}
    93  			localStore := store.(*storage.LocalStore)
    94  			netStore, err := storage.NewNetStore(localStore, nil)
    95  			if err != nil {
    96  				return nil, nil, err
    97  			}
    98  			bucket.Store(bucketKeyDB, netStore)
    99  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   100  			delivery := NewDelivery(kad, netStore)
   101  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   102  
   103  			bucket.Store(bucketKeyDelivery, delivery)
   104  
   105  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   106  				Retrieval: RetrievalDisabled,
   107  				Syncing:   SyncingAutoSubscribe,
   108  				SkipCheck: skipCheck,
   109  			}, nil)
   110  
   111  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   112  			bucket.Store(bucketKeyFileStore, fileStore)
   113  
   114  			return r, cleanup, nil
   115  
   116  		},
   117  	})
   118  	defer sim.Close()
   119  
   120  //为模拟运行创建上下文
   121  	timeout := 30 * time.Second
   122  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   123  //延迟取消应在延迟模拟拆卸之前出现
   124  	defer cancel()
   125  
   126  	_, err := sim.AddNodesAndConnectChain(nodes)
   127  	if err != nil {
   128  		t.Fatal(err)
   129  	}
   130  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   131  		nodeIDs := sim.UpNodeIDs()
   132  
   133  		nodeIndex := make(map[enode.ID]int)
   134  		for i, id := range nodeIDs {
   135  			nodeIndex[id] = i
   136  		}
   137  
   138  		disconnections := sim.PeerEvents(
   139  			context.Background(),
   140  			sim.NodeIDs(),
   141  			simulation.NewPeerEventsFilter().Drop(),
   142  		)
   143  
   144  		var disconnected atomic.Value
   145  		go func() {
   146  			for d := range disconnections {
   147  				if d.Error != nil {
   148  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   149  					disconnected.Store(true)
   150  				}
   151  			}
   152  		}()
   153  		defer func() {
   154  			if err != nil {
   155  				if yes, ok := disconnected.Load().(bool); ok && yes {
   156  					err = errors.New("disconnect events received")
   157  				}
   158  			}
   159  		}()
   160  
   161  //每个节点都订阅彼此的swarmChunkServerStreamName
   162  		for j := 0; j < nodes-1; j++ {
   163  			id := nodeIDs[j]
   164  			client, err := sim.Net.GetNode(id).Client()
   165  			if err != nil {
   166  				t.Fatal(err)
   167  			}
   168  			sid := nodeIDs[j+1]
   169  			client.CallContext(ctx, nil, "stream_subscribeStream", sid, NewStream("SYNC", FormatSyncBinKey(1), false), NewRange(0, 0), Top)
   170  			if err != nil {
   171  				return err
   172  			}
   173  			if j > 0 || nodes == 2 {
   174  				item, ok := sim.NodeItem(nodeIDs[j], bucketKeyFileStore)
   175  				if !ok {
   176  					return fmt.Errorf("No filestore")
   177  				}
   178  				fileStore := item.(*storage.FileStore)
   179  				size := chunkCount * chunkSize
   180  				_, wait, err := fileStore.Store(ctx, testutil.RandomReader(j, size), int64(size), false)
   181  				if err != nil {
   182  					t.Fatal(err.Error())
   183  				}
   184  				wait(ctx)
   185  			}
   186  		}
   187  //在这里,我们将随机文件的块分发到存储1…节点中
   188  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   189  			return err
   190  		}
   191  
   192  //为每个节点收集po 1 bin中的哈希
   193  		hashes := make([][]storage.Address, nodes)
   194  		totalHashes := 0
   195  		hashCounts := make([]int, nodes)
   196  		for i := nodes - 1; i >= 0; i-- {
   197  			if i < nodes-1 {
   198  				hashCounts[i] = hashCounts[i+1]
   199  			}
   200  			item, ok := sim.NodeItem(nodeIDs[i], bucketKeyDB)
   201  			if !ok {
   202  				return fmt.Errorf("No DB")
   203  			}
   204  			netStore := item.(*storage.NetStore)
   205  			netStore.Iterator(0, math.MaxUint64, po, func(addr storage.Address, index uint64) bool {
   206  				hashes[i] = append(hashes[i], addr)
   207  				totalHashes++
   208  				hashCounts[i]++
   209  				return true
   210  			})
   211  		}
   212  		var total, found int
   213  		for _, node := range nodeIDs {
   214  			i := nodeIndex[node]
   215  
   216  			for j := i; j < nodes; j++ {
   217  				total += len(hashes[j])
   218  				for _, key := range hashes[j] {
   219  					item, ok := sim.NodeItem(nodeIDs[j], bucketKeyDB)
   220  					if !ok {
   221  						return fmt.Errorf("No DB")
   222  					}
   223  					db := item.(*storage.NetStore)
   224  					_, err := db.Get(ctx, key)
   225  					if err == nil {
   226  						found++
   227  					}
   228  				}
   229  			}
   230  			log.Debug("sync check", "node", node, "index", i, "bin", po, "found", found, "total", total)
   231  		}
   232  		if total == found && total > 0 {
   233  			return nil
   234  		}
   235  		return fmt.Errorf("Total not equallying found: total is %d", total)
   236  	})
   237  
   238  	if result.Error != nil {
   239  		t.Fatal(result.Error)
   240  	}
   241  }
   242  
   243  //testsameversionid只检查如果版本没有更改,
   244  //然后流媒体对等端看到彼此
   245  func TestSameVersionID(t *testing.T) {
   246  //测试版本ID
   247  	v := uint(1)
   248  	sim := simulation.New(map[string]simulation.ServiceFunc{
   249  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   250  			var store storage.ChunkStore
   251  			var datadir string
   252  
   253  			node := ctx.Config.Node()
   254  			addr := network.NewAddr(node)
   255  
   256  			store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
   257  			if err != nil {
   258  				return nil, nil, err
   259  			}
   260  			bucket.Store(bucketKeyStore, store)
   261  			cleanup = func() {
   262  				store.Close()
   263  				os.RemoveAll(datadir)
   264  			}
   265  			localStore := store.(*storage.LocalStore)
   266  			netStore, err := storage.NewNetStore(localStore, nil)
   267  			if err != nil {
   268  				return nil, nil, err
   269  			}
   270  			bucket.Store(bucketKeyDB, netStore)
   271  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   272  			delivery := NewDelivery(kad, netStore)
   273  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   274  
   275  			bucket.Store(bucketKeyDelivery, delivery)
   276  
   277  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   278  				Retrieval: RetrievalDisabled,
   279  				Syncing:   SyncingAutoSubscribe,
   280  			}, nil)
   281  //为每个节点分配相同的版本ID
   282  			r.spec.Version = v
   283  
   284  			bucket.Store(bucketKeyRegistry, r)
   285  
   286  			return r, cleanup, nil
   287  
   288  		},
   289  	})
   290  	defer sim.Close()
   291  
   292  //只连接两个节点
   293  	log.Info("Adding nodes to simulation")
   294  	_, err := sim.AddNodesAndConnectChain(2)
   295  	if err != nil {
   296  		t.Fatal(err)
   297  	}
   298  
   299  	log.Info("Starting simulation")
   300  	ctx := context.Background()
   301  //确保他们有时间连接
   302  	time.Sleep(200 * time.Millisecond)
   303  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   304  //获取透视节点的文件存储
   305  		nodes := sim.UpNodeIDs()
   306  
   307  		item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
   308  		if !ok {
   309  			return fmt.Errorf("No filestore")
   310  		}
   311  		registry := item.(*Registry)
   312  
   313  //对等端应连接,因此获取对等端不应返回零
   314  		if registry.getPeer(nodes[1]) == nil {
   315  			t.Fatal("Expected the peer to not be nil, but it is")
   316  		}
   317  		return nil
   318  	})
   319  	if result.Error != nil {
   320  		t.Fatal(result.Error)
   321  	}
   322  	log.Info("Simulation ended")
   323  }
   324  
   325  //TestDifferentVersionID证明如果拖缆协议版本不匹配,
   326  //然后,在拖缆级别上没有连接对等端。
   327  func TestDifferentVersionID(t *testing.T) {
   328  //创建保存版本ID的变量
   329  	v := uint(0)
   330  	sim := simulation.New(map[string]simulation.ServiceFunc{
   331  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   332  			var store storage.ChunkStore
   333  			var datadir string
   334  
   335  			node := ctx.Config.Node()
   336  			addr := network.NewAddr(node)
   337  
   338  			store, datadir, err = createTestLocalStorageForID(node.ID(), addr)
   339  			if err != nil {
   340  				return nil, nil, err
   341  			}
   342  			bucket.Store(bucketKeyStore, store)
   343  			cleanup = func() {
   344  				store.Close()
   345  				os.RemoveAll(datadir)
   346  			}
   347  			localStore := store.(*storage.LocalStore)
   348  			netStore, err := storage.NewNetStore(localStore, nil)
   349  			if err != nil {
   350  				return nil, nil, err
   351  			}
   352  			bucket.Store(bucketKeyDB, netStore)
   353  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   354  			delivery := NewDelivery(kad, netStore)
   355  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   356  
   357  			bucket.Store(bucketKeyDelivery, delivery)
   358  
   359  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   360  				Retrieval: RetrievalDisabled,
   361  				Syncing:   SyncingAutoSubscribe,
   362  			}, nil)
   363  
   364  //增加每个节点的版本ID
   365  			v++
   366  			r.spec.Version = v
   367  
   368  			bucket.Store(bucketKeyRegistry, r)
   369  
   370  			return r, cleanup, nil
   371  
   372  		},
   373  	})
   374  	defer sim.Close()
   375  
   376  //连接节点
   377  	log.Info("Adding nodes to simulation")
   378  	_, err := sim.AddNodesAndConnectChain(2)
   379  	if err != nil {
   380  		t.Fatal(err)
   381  	}
   382  
   383  	log.Info("Starting simulation")
   384  	ctx := context.Background()
   385  //确保他们有时间连接
   386  	time.Sleep(200 * time.Millisecond)
   387  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   388  //获取透视节点的文件存储
   389  		nodes := sim.UpNodeIDs()
   390  
   391  		item, ok := sim.NodeItem(nodes[0], bucketKeyRegistry)
   392  		if !ok {
   393  			return fmt.Errorf("No filestore")
   394  		}
   395  		registry := item.(*Registry)
   396  
   397  //由于版本号不同,获取另一个对等机应该失败
   398  		if registry.getPeer(nodes[1]) != nil {
   399  			t.Fatal("Expected the peer to be nil, but it is not")
   400  		}
   401  		return nil
   402  	})
   403  	if result.Error != nil {
   404  		t.Fatal(result.Error)
   405  	}
   406  	log.Info("Simulation ended")
   407  
   408  }
   409