github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/swarm/network/stream/delivery_test.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 19:16:43</date>
    10  //</624450115230240768>
    11  
    12  
    13  package stream
    14  
    15  import (
    16  	"bytes"
    17  	"context"
    18  	"errors"
    19  	"fmt"
    20  	"os"
    21  	"sync"
    22  	"sync/atomic"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/node"
    27  	"github.com/ethereum/go-ethereum/p2p"
    28  	"github.com/ethereum/go-ethereum/p2p/enode"
    29  	"github.com/ethereum/go-ethereum/p2p/protocols"
    30  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    31  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    32  	"github.com/ethereum/go-ethereum/swarm/log"
    33  	"github.com/ethereum/go-ethereum/swarm/network"
    34  	pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue"
    35  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    36  	"github.com/ethereum/go-ethereum/swarm/state"
    37  	"github.com/ethereum/go-ethereum/swarm/storage"
    38  	"github.com/ethereum/go-ethereum/swarm/testutil"
    39  )
    40  
    41  //初始化检索请求的测试
    42  func TestStreamerRetrieveRequest(t *testing.T) {
    43  	regOpts := &RegistryOptions{
    44  		Retrieval: RetrievalClientOnly,
    45  		Syncing:   SyncingDisabled,
    46  	}
    47  	tester, streamer, _, teardown, err := newStreamerTester(t, regOpts)
    48  	defer teardown()
    49  	if err != nil {
    50  		t.Fatal(err)
    51  	}
    52  
    53  	node := tester.Nodes[0]
    54  
    55  	ctx := context.Background()
    56  	req := network.NewRequest(
    57  		storage.Address(hash0[:]),
    58  		true,
    59  		&sync.Map{},
    60  	)
    61  	streamer.delivery.RequestFromPeers(ctx, req)
    62  
    63  	stream := NewStream(swarmChunkServerStreamName, "", true)
    64  
    65  	err = tester.TestExchanges(p2ptest.Exchange{
    66  		Label: "RetrieveRequestMsg",
    67  		Expects: []p2ptest.Expect{
    68  { //由于'retrievalclientonly',开始要求为retrieve请求订阅
    69  				Code: 4,
    70  				Msg: &SubscribeMsg{
    71  					Stream:   stream,
    72  					History:  nil,
    73  					Priority: Top,
    74  				},
    75  				Peer: node.ID(),
    76  			},
    77  { //期望给定哈希的检索请求消息
    78  				Code: 5,
    79  				Msg: &RetrieveRequestMsg{
    80  					Addr:      hash0[:],
    81  					SkipCheck: true,
    82  				},
    83  				Peer: node.ID(),
    84  			},
    85  		},
    86  	})
    87  
    88  	if err != nil {
    89  		t.Fatalf("Expected no error, got %v", err)
    90  	}
    91  }
    92  
    93  //测试从对等端请求一个块,然后发出一个“空的”offeredhashemsg(还没有可用的哈希)
    94  //应超时,因为对等端没有块(以前未发生同步)
    95  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    96  	tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
    97  		Retrieval: RetrievalEnabled,
    98  Syncing:   SyncingDisabled, //不同步
    99  	})
   100  	defer teardown()
   101  	if err != nil {
   102  		t.Fatal(err)
   103  	}
   104  
   105  	node := tester.Nodes[0]
   106  
   107  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
   108  
   109  	peer := streamer.getPeer(node.ID())
   110  
   111  	stream := NewStream(swarmChunkServerStreamName, "", true)
   112  //模拟预订阅以检索对等机上的请求流
   113  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   114  		Stream:   stream,
   115  		History:  nil,
   116  		Priority: Top,
   117  	})
   118  
   119  //测试交换
   120  	err = tester.TestExchanges(p2ptest.Exchange{
   121  		Expects: []p2ptest.Expect{
   122  { //首先需要对检索请求流的订阅
   123  				Code: 4,
   124  				Msg: &SubscribeMsg{
   125  					Stream:   stream,
   126  					History:  nil,
   127  					Priority: Top,
   128  				},
   129  				Peer: node.ID(),
   130  			},
   131  		},
   132  	}, p2ptest.Exchange{
   133  		Label: "RetrieveRequestMsg",
   134  		Triggers: []p2ptest.Trigger{
   135  { //然后实际的检索请求….
   136  				Code: 5,
   137  				Msg: &RetrieveRequestMsg{
   138  					Addr: chunk.Address()[:],
   139  				},
   140  				Peer: node.ID(),
   141  			},
   142  		},
   143  		Expects: []p2ptest.Expect{
   144  { //对等端用提供的哈希响应
   145  				Code: 1,
   146  				Msg: &OfferedHashesMsg{
   147  					HandoverProof: nil,
   148  					Hashes:        nil,
   149  					From:          0,
   150  					To:            0,
   151  				},
   152  				Peer: node.ID(),
   153  			},
   154  		},
   155  	})
   156  
   157  //作为我们请求的对等机,应该失败并超时
   158  //来自的块没有块
   159  	expectedError := `exchange #1 "RetrieveRequestMsg": timed out`
   160  	if err == nil || err.Error() != expectedError {
   161  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   162  	}
   163  }
   164  
   165  //上游请求服务器接收检索请求并用
   166  //如果skipphash设置为true,则提供哈希或传递
   167  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   168  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   169  		Retrieval: RetrievalEnabled,
   170  		Syncing:   SyncingDisabled,
   171  	})
   172  	defer teardown()
   173  	if err != nil {
   174  		t.Fatal(err)
   175  	}
   176  
   177  	node := tester.Nodes[0]
   178  
   179  	peer := streamer.getPeer(node.ID())
   180  
   181  	stream := NewStream(swarmChunkServerStreamName, "", true)
   182  
   183  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   184  		Stream:   stream,
   185  		History:  nil,
   186  		Priority: Top,
   187  	})
   188  
   189  	hash := storage.Address(hash0[:])
   190  	chunk := storage.NewChunk(hash, hash)
   191  	err = localStore.Put(context.TODO(), chunk)
   192  	if err != nil {
   193  		t.Fatalf("Expected no err got %v", err)
   194  	}
   195  
   196  	err = tester.TestExchanges(p2ptest.Exchange{
   197  		Expects: []p2ptest.Expect{
   198  			{
   199  				Code: 4,
   200  				Msg: &SubscribeMsg{
   201  					Stream:   stream,
   202  					History:  nil,
   203  					Priority: Top,
   204  				},
   205  				Peer: node.ID(),
   206  			},
   207  		},
   208  	}, p2ptest.Exchange{
   209  		Label: "RetrieveRequestMsg",
   210  		Triggers: []p2ptest.Trigger{
   211  			{
   212  				Code: 5,
   213  				Msg: &RetrieveRequestMsg{
   214  					Addr: hash,
   215  				},
   216  				Peer: node.ID(),
   217  			},
   218  		},
   219  		Expects: []p2ptest.Expect{
   220  			{
   221  				Code: 1,
   222  				Msg: &OfferedHashesMsg{
   223  					HandoverProof: &HandoverProof{
   224  						Handover: &Handover{},
   225  					},
   226  					Hashes: hash,
   227  					From:   0,
   228  //托多:这是为什么32????
   229  					To:     32,
   230  					Stream: stream,
   231  				},
   232  				Peer: node.ID(),
   233  			},
   234  		},
   235  	})
   236  
   237  	if err != nil {
   238  		t.Fatal(err)
   239  	}
   240  
   241  	hash = storage.Address(hash1[:])
   242  	chunk = storage.NewChunk(hash, hash1[:])
   243  	err = localStore.Put(context.TODO(), chunk)
   244  	if err != nil {
   245  		t.Fatalf("Expected no err got %v", err)
   246  	}
   247  
   248  	err = tester.TestExchanges(p2ptest.Exchange{
   249  		Label: "RetrieveRequestMsg",
   250  		Triggers: []p2ptest.Trigger{
   251  			{
   252  				Code: 5,
   253  				Msg: &RetrieveRequestMsg{
   254  					Addr:      hash,
   255  					SkipCheck: true,
   256  				},
   257  				Peer: node.ID(),
   258  			},
   259  		},
   260  		Expects: []p2ptest.Expect{
   261  			{
   262  				Code: 6,
   263  				Msg: &ChunkDeliveryMsg{
   264  					Addr:  hash,
   265  					SData: hash,
   266  				},
   267  				Peer: node.ID(),
   268  			},
   269  		},
   270  	})
   271  
   272  	if err != nil {
   273  		t.Fatal(err)
   274  	}
   275  }
   276  
   277  //如果Kademlia中有一个对等点,则对等点的请求应返回它。
   278  func TestRequestFromPeers(t *testing.T) {
   279  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   280  
   281  	addr := network.RandomAddr()
   282  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   283  	delivery := NewDelivery(to, nil)
   284  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   285  	peer := network.NewPeer(&network.BzzPeer{
   286  		BzzAddr:   network.RandomAddr(),
   287  		LightNode: false,
   288  		Peer:      protocolsPeer,
   289  	}, to)
   290  	to.On(peer)
   291  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   292  
   293  //必须创建空的PriorityQueue,以防止在测试完成后调用Goroutine。
   294  	sp := &Peer{
   295  		Peer:     protocolsPeer,
   296  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   297  		streamer: r,
   298  	}
   299  	r.setPeer(sp)
   300  	req := network.NewRequest(
   301  		storage.Address(hash0[:]),
   302  		true,
   303  		&sync.Map{},
   304  	)
   305  	ctx := context.Background()
   306  	id, _, err := delivery.RequestFromPeers(ctx, req)
   307  
   308  	if err != nil {
   309  		t.Fatal(err)
   310  	}
   311  	if *id != dummyPeerID {
   312  		t.Fatalf("Expected an id, got %v", id)
   313  	}
   314  }
   315  
   316  //对等方的请求不应返回轻节点
   317  func TestRequestFromPeersWithLightNode(t *testing.T) {
   318  	dummyPeerID := enode.HexID("3431c3939e1ee2a6345e976a8234f9870152d64879f30bc272a074f6859e75e8")
   319  
   320  	addr := network.RandomAddr()
   321  	to := network.NewKademlia(addr.OAddr, network.NewKadParams())
   322  	delivery := NewDelivery(to, nil)
   323  
   324  	protocolsPeer := protocols.NewPeer(p2p.NewPeer(dummyPeerID, "dummy", nil), nil, nil)
   325  //设置lightnode
   326  	peer := network.NewPeer(&network.BzzPeer{
   327  		BzzAddr:   network.RandomAddr(),
   328  		LightNode: true,
   329  		Peer:      protocolsPeer,
   330  	}, to)
   331  	to.On(peer)
   332  	r := NewRegistry(addr.ID(), delivery, nil, nil, nil, nil)
   333  //必须创建空的PriorityQueue,以防止在测试完成后调用Goroutine。
   334  	sp := &Peer{
   335  		Peer:     protocolsPeer,
   336  		pq:       pq.New(int(PriorityQueue), PriorityQueueCap),
   337  		streamer: r,
   338  	}
   339  	r.setPeer(sp)
   340  
   341  	req := network.NewRequest(
   342  		storage.Address(hash0[:]),
   343  		true,
   344  		&sync.Map{},
   345  	)
   346  
   347  	ctx := context.Background()
   348  //提出一个应返回“未找到对等方”的请求
   349  	_, _, err := delivery.RequestFromPeers(ctx, req)
   350  
   351  	expectedError := "no peer found"
   352  	if err.Error() != expectedError {
   353  		t.Fatalf("expected '%v', got %v", expectedError, err)
   354  	}
   355  }
   356  
   357  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   358  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   359  		Retrieval: RetrievalDisabled,
   360  		Syncing:   SyncingDisabled,
   361  	})
   362  	defer teardown()
   363  	if err != nil {
   364  		t.Fatal(err)
   365  	}
   366  
   367  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   368  		return &testClient{
   369  			t: t,
   370  		}, nil
   371  	})
   372  
   373  	node := tester.Nodes[0]
   374  
   375  //订阅自定义流
   376  	stream := NewStream("foo", "", true)
   377  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   378  	if err != nil {
   379  		t.Fatalf("Expected no error, got %v", err)
   380  	}
   381  
   382  	chunkKey := hash0[:]
   383  	chunkData := hash1[:]
   384  
   385  	err = tester.TestExchanges(p2ptest.Exchange{
   386  		Label: "Subscribe message",
   387  		Expects: []p2ptest.Expect{
   388  { //首先需要订阅自定义流…
   389  				Code: 4,
   390  				Msg: &SubscribeMsg{
   391  					Stream:   stream,
   392  					History:  NewRange(5, 8),
   393  					Priority: Top,
   394  				},
   395  				Peer: node.ID(),
   396  			},
   397  		},
   398  	},
   399  		p2ptest.Exchange{
   400  			Label: "ChunkDelivery message",
   401  			Triggers: []p2ptest.Trigger{
   402  { //…然后从对等端触发给定块的块传递,以便
   403  //本地节点以获取块传递
   404  					Code: 6,
   405  					Msg: &ChunkDeliveryMsg{
   406  						Addr:  chunkKey,
   407  						SData: chunkData,
   408  					},
   409  					Peer: node.ID(),
   410  				},
   411  			},
   412  		})
   413  
   414  	if err != nil {
   415  		t.Fatalf("Expected no error, got %v", err)
   416  	}
   417  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   418  	defer cancel()
   419  
   420  //等待区块存储
   421  	storedChunk, err := localStore.Get(ctx, chunkKey)
   422  	for err != nil {
   423  		select {
   424  		case <-ctx.Done():
   425  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   426  		default:
   427  		}
   428  		storedChunk, err = localStore.Get(ctx, chunkKey)
   429  		time.Sleep(50 * time.Millisecond)
   430  	}
   431  
   432  	if err != nil {
   433  		t.Fatalf("Expected no error, got %v", err)
   434  	}
   435  
   436  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   437  		t.Fatal("Retrieved chunk has different data than original")
   438  	}
   439  
   440  }
   441  
   442  func TestDeliveryFromNodes(t *testing.T) {
   443  	testDeliveryFromNodes(t, 2, dataChunkCount, true)
   444  	testDeliveryFromNodes(t, 2, dataChunkCount, false)
   445  	testDeliveryFromNodes(t, 4, dataChunkCount, true)
   446  	testDeliveryFromNodes(t, 4, dataChunkCount, false)
   447  	testDeliveryFromNodes(t, 8, dataChunkCount, true)
   448  	testDeliveryFromNodes(t, 8, dataChunkCount, false)
   449  	testDeliveryFromNodes(t, 16, dataChunkCount, true)
   450  	testDeliveryFromNodes(t, 16, dataChunkCount, false)
   451  }
   452  
   453  func testDeliveryFromNodes(t *testing.T, nodes, chunkCount int, skipCheck bool) {
   454  	sim := simulation.New(map[string]simulation.ServiceFunc{
   455  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   456  			node := ctx.Config.Node()
   457  			addr := network.NewAddr(node)
   458  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   459  			if err != nil {
   460  				return nil, nil, err
   461  			}
   462  			bucket.Store(bucketKeyStore, store)
   463  			cleanup = func() {
   464  				os.RemoveAll(datadir)
   465  				store.Close()
   466  			}
   467  			localStore := store.(*storage.LocalStore)
   468  			netStore, err := storage.NewNetStore(localStore, nil)
   469  			if err != nil {
   470  				return nil, nil, err
   471  			}
   472  
   473  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   474  			delivery := NewDelivery(kad, netStore)
   475  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   476  
   477  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   478  				SkipCheck: skipCheck,
   479  				Syncing:   SyncingDisabled,
   480  				Retrieval: RetrievalEnabled,
   481  			}, nil)
   482  			bucket.Store(bucketKeyRegistry, r)
   483  
   484  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   485  			bucket.Store(bucketKeyFileStore, fileStore)
   486  
   487  			return r, cleanup, nil
   488  
   489  		},
   490  	})
   491  	defer sim.Close()
   492  
   493  	log.Info("Adding nodes to simulation")
   494  	_, err := sim.AddNodesAndConnectChain(nodes)
   495  	if err != nil {
   496  		t.Fatal(err)
   497  	}
   498  
   499  	log.Info("Starting simulation")
   500  	ctx := context.Background()
   501  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   502  		nodeIDs := sim.UpNodeIDs()
   503  //确定要作为模拟的第一个节点的轴节点
   504  		pivot := nodeIDs[0]
   505  
   506  //将随机文件的块分布到节点1到节点的存储中
   507  //我们将通过创建具有底层循环存储的文件存储来实现这一点:
   508  //文件存储将为上载的文件创建哈希,但每个块都将
   509  //通过循环调度分发到不同的节点
   510  		log.Debug("Writing file to round-robin file store")
   511  //为此,我们为chunkstores创建一个数组(长度减去1,即透视节点)。
   512  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   513  //然后我们需要从SIM卡获取所有商店…
   514  		lStores := sim.NodesItems(bucketKeyStore)
   515  		i := 0
   516  //…迭代存储桶…
   517  		for id, bucketVal := range lStores {
   518  //…并移除作为轴心节点的节点
   519  			if id == pivot {
   520  				continue
   521  			}
   522  //其他的被添加到数组中…
   523  			stores[i] = bucketVal.(storage.ChunkStore)
   524  			i++
   525  		}
   526  //…然后传递到循环文件存储
   527  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   528  //现在我们可以将一个(随机)文件上传到循环存储
   529  		size := chunkCount * chunkSize
   530  		log.Debug("Storing data to file store")
   531  		fileHash, wait, err := roundRobinFileStore.Store(ctx, testutil.RandomReader(1, size), int64(size), false)
   532  //等待所有块存储
   533  		if err != nil {
   534  			return err
   535  		}
   536  		err = wait(ctx)
   537  		if err != nil {
   538  			return err
   539  		}
   540  
   541  		log.Debug("Waiting for kademlia")
   542  //Todo这似乎不是函数的正确用法,因为模拟可能没有kademlias
   543  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   544  			return err
   545  		}
   546  
   547  //获取透视节点的文件存储
   548  		item, ok := sim.NodeItem(pivot, bucketKeyFileStore)
   549  		if !ok {
   550  			return fmt.Errorf("No filestore")
   551  		}
   552  		pivotFileStore := item.(*storage.FileStore)
   553  		log.Debug("Starting retrieval routine")
   554  		retErrC := make(chan error)
   555  		go func() {
   556  //在透视节点上启动检索-这将为丢失的块生成检索请求
   557  //在请求之前,我们必须等待对等连接启动
   558  			n, err := readAll(pivotFileStore, fileHash)
   559  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   560  			retErrC <- err
   561  		}()
   562  
   563  		log.Debug("Watching for disconnections")
   564  		disconnections := sim.PeerEvents(
   565  			context.Background(),
   566  			sim.NodeIDs(),
   567  			simulation.NewPeerEventsFilter().Drop(),
   568  		)
   569  
   570  		var disconnected atomic.Value
   571  		go func() {
   572  			for d := range disconnections {
   573  				if d.Error != nil {
   574  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   575  					disconnected.Store(true)
   576  				}
   577  			}
   578  		}()
   579  		defer func() {
   580  			if err != nil {
   581  				if yes, ok := disconnected.Load().(bool); ok && yes {
   582  					err = errors.New("disconnect events received")
   583  				}
   584  			}
   585  		}()
   586  
   587  //最后检查透视节点是否通过根哈希获取所有块
   588  		log.Debug("Check retrieval")
   589  		success := true
   590  		var total int64
   591  		total, err = readAll(pivotFileStore, fileHash)
   592  		if err != nil {
   593  			return err
   594  		}
   595  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   596  		if err != nil || total != int64(size) {
   597  			success = false
   598  		}
   599  
   600  		if !success {
   601  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   602  		}
   603  		if err := <-retErrC; err != nil {
   604  			t.Fatalf("requesting chunks: %v", err)
   605  		}
   606  		log.Debug("Test terminated successfully")
   607  		return nil
   608  	})
   609  	if result.Error != nil {
   610  		t.Fatal(result.Error)
   611  	}
   612  }
   613  
   614  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   615  	for chunks := 32; chunks <= 128; chunks *= 2 {
   616  		for i := 2; i < 32; i *= 2 {
   617  			b.Run(
   618  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   619  				func(b *testing.B) {
   620  					benchmarkDeliveryFromNodes(b, i, chunks, true)
   621  				},
   622  			)
   623  		}
   624  	}
   625  }
   626  
   627  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   628  	for chunks := 32; chunks <= 128; chunks *= 2 {
   629  		for i := 2; i < 32; i *= 2 {
   630  			b.Run(
   631  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   632  				func(b *testing.B) {
   633  					benchmarkDeliveryFromNodes(b, i, chunks, false)
   634  				},
   635  			)
   636  		}
   637  	}
   638  }
   639  
   640  func benchmarkDeliveryFromNodes(b *testing.B, nodes, chunkCount int, skipCheck bool) {
   641  	sim := simulation.New(map[string]simulation.ServiceFunc{
   642  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   643  			node := ctx.Config.Node()
   644  			addr := network.NewAddr(node)
   645  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   646  			if err != nil {
   647  				return nil, nil, err
   648  			}
   649  			bucket.Store(bucketKeyStore, store)
   650  			cleanup = func() {
   651  				os.RemoveAll(datadir)
   652  				store.Close()
   653  			}
   654  			localStore := store.(*storage.LocalStore)
   655  			netStore, err := storage.NewNetStore(localStore, nil)
   656  			if err != nil {
   657  				return nil, nil, err
   658  			}
   659  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   660  			delivery := NewDelivery(kad, netStore)
   661  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   662  
   663  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   664  				SkipCheck:       skipCheck,
   665  				Syncing:         SyncingDisabled,
   666  				Retrieval:       RetrievalDisabled,
   667  				SyncUpdateDelay: 0,
   668  			}, nil)
   669  
   670  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   671  			bucket.Store(bucketKeyFileStore, fileStore)
   672  
   673  			return r, cleanup, nil
   674  
   675  		},
   676  	})
   677  	defer sim.Close()
   678  
   679  	log.Info("Initializing test config")
   680  	_, err := sim.AddNodesAndConnectChain(nodes)
   681  	if err != nil {
   682  		b.Fatal(err)
   683  	}
   684  
   685  	ctx := context.Background()
   686  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) (err error) {
   687  		nodeIDs := sim.UpNodeIDs()
   688  		node := nodeIDs[len(nodeIDs)-1]
   689  
   690  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   691  		if !ok {
   692  			b.Fatal("No filestore")
   693  		}
   694  		remoteFileStore := item.(*storage.FileStore)
   695  
   696  		pivotNode := nodeIDs[0]
   697  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   698  		if !ok {
   699  			b.Fatal("No filestore")
   700  		}
   701  		netStore := item.(*storage.NetStore)
   702  
   703  		if _, err := sim.WaitTillHealthy(ctx); err != nil {
   704  			return err
   705  		}
   706  
   707  		disconnections := sim.PeerEvents(
   708  			context.Background(),
   709  			sim.NodeIDs(),
   710  			simulation.NewPeerEventsFilter().Drop(),
   711  		)
   712  
   713  		var disconnected atomic.Value
   714  		go func() {
   715  			for d := range disconnections {
   716  				if d.Error != nil {
   717  					log.Error("peer drop", "node", d.NodeID, "peer", d.PeerID)
   718  					disconnected.Store(true)
   719  				}
   720  			}
   721  		}()
   722  		defer func() {
   723  			if err != nil {
   724  				if yes, ok := disconnected.Load().(bool); ok && yes {
   725  					err = errors.New("disconnect events received")
   726  				}
   727  			}
   728  		}()
   729  //基准环
   730  		b.ResetTimer()
   731  		b.StopTimer()
   732  	Loop:
   733  		for i := 0; i < b.N; i++ {
   734  //将chunkcount随机块上载到最后一个节点
   735  			hashes := make([]storage.Address, chunkCount)
   736  			for i := 0; i < chunkCount; i++ {
   737  //创建实际大小的实际块
   738  				ctx := context.TODO()
   739  				hash, wait, err := remoteFileStore.Store(ctx, testutil.RandomReader(i, chunkSize), int64(chunkSize), false)
   740  				if err != nil {
   741  					b.Fatalf("expected no error. got %v", err)
   742  				}
   743  //等待所有块存储
   744  				err = wait(ctx)
   745  				if err != nil {
   746  					b.Fatalf("expected no error. got %v", err)
   747  				}
   748  //收集哈希
   749  				hashes[i] = hash
   750  			}
   751  //现在以实际检索为基准
   752  //对go例程中的每个哈希调用netstore.get,并收集错误
   753  			b.StartTimer()
   754  			errs := make(chan error)
   755  			for _, hash := range hashes {
   756  				go func(h storage.Address) {
   757  					_, err := netStore.Get(ctx, h)
   758  					log.Warn("test check netstore get", "hash", h, "err", err)
   759  					errs <- err
   760  				}(hash)
   761  			}
   762  //计数和报告检索错误
   763  //如果有未命中,则块超时对于距离和音量而言太低(?)
   764  			var total, misses int
   765  			for err := range errs {
   766  				if err != nil {
   767  					log.Warn(err.Error())
   768  					misses++
   769  				}
   770  				total++
   771  				if total == chunkCount {
   772  					break
   773  				}
   774  			}
   775  			b.StopTimer()
   776  
   777  			if misses > 0 {
   778  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   779  				break Loop
   780  			}
   781  		}
   782  		if err != nil {
   783  			b.Fatal(err)
   784  		}
   785  		return nil
   786  	})
   787  	if result.Error != nil {
   788  		b.Fatal(result.Error)
   789  	}
   790  
   791  }
   792