github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/swarm/network/stream/delivery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package stream
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	crand "crypto/rand"
    23  	"fmt"
    24  	"io"
    25  	"os"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/ethereum/go-ethereum/node"
    31  	"github.com/ethereum/go-ethereum/p2p"
    32  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    33  	p2ptest "github.com/ethereum/go-ethereum/p2p/testing"
    34  	"github.com/ethereum/go-ethereum/swarm/log"
    35  	"github.com/ethereum/go-ethereum/swarm/network"
    36  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    37  	"github.com/ethereum/go-ethereum/swarm/state"
    38  	"github.com/ethereum/go-ethereum/swarm/storage"
    39  )
    40  
    41  func TestStreamerRetrieveRequest(t *testing.T) {
    42  	tester, streamer, _, teardown, err := newStreamerTester(t, nil)
    43  	defer teardown()
    44  	if err != nil {
    45  		t.Fatal(err)
    46  	}
    47  
    48  	node := tester.Nodes[0]
    49  
    50  	ctx := context.Background()
    51  	req := network.NewRequest(
    52  		storage.Address(hash0[:]),
    53  		true,
    54  		&sync.Map{},
    55  	)
    56  	streamer.delivery.RequestFromPeers(ctx, req)
    57  
    58  	err = tester.TestExchanges(p2ptest.Exchange{
    59  		Label: "RetrieveRequestMsg",
    60  		Expects: []p2ptest.Expect{
    61  			{
    62  				Code: 5,
    63  				Msg: &RetrieveRequestMsg{
    64  					Addr:      hash0[:],
    65  					SkipCheck: true,
    66  				},
    67  				Peer: node.ID(),
    68  			},
    69  		},
    70  	})
    71  
    72  	if err != nil {
    73  		t.Fatalf("Expected no error, got %v", err)
    74  	}
    75  }
    76  
    77  func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) {
    78  	tester, streamer, _, teardown, err := newStreamerTester(t, &RegistryOptions{
    79  		DoServeRetrieve: true,
    80  	})
    81  	defer teardown()
    82  	if err != nil {
    83  		t.Fatal(err)
    84  	}
    85  
    86  	node := tester.Nodes[0]
    87  
    88  	chunk := storage.NewChunk(storage.Address(hash0[:]), nil)
    89  
    90  	peer := streamer.getPeer(node.ID())
    91  
    92  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
    93  		Stream:   NewStream(swarmChunkServerStreamName, "", true),
    94  		History:  nil,
    95  		Priority: Top,
    96  	})
    97  
    98  	err = tester.TestExchanges(p2ptest.Exchange{
    99  		Label: "RetrieveRequestMsg",
   100  		Triggers: []p2ptest.Trigger{
   101  			{
   102  				Code: 5,
   103  				Msg: &RetrieveRequestMsg{
   104  					Addr: chunk.Address()[:],
   105  				},
   106  				Peer: node.ID(),
   107  			},
   108  		},
   109  		Expects: []p2ptest.Expect{
   110  			{
   111  				Code: 1,
   112  				Msg: &OfferedHashesMsg{
   113  					HandoverProof: nil,
   114  					Hashes:        nil,
   115  					From:          0,
   116  					To:            0,
   117  				},
   118  				Peer: node.ID(),
   119  			},
   120  		},
   121  	})
   122  
   123  	expectedError := `exchange #0 "RetrieveRequestMsg": timed out`
   124  	if err == nil || err.Error() != expectedError {
   125  		t.Fatalf("Expected error %v, got %v", expectedError, err)
   126  	}
   127  }
   128  
   129  // upstream request server receives a retrieve Request and responds with
   130  // offered hashes or delivery if skipHash is set to true
   131  func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) {
   132  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   133  		DoServeRetrieve: true,
   134  	})
   135  	defer teardown()
   136  	if err != nil {
   137  		t.Fatal(err)
   138  	}
   139  
   140  	node := tester.Nodes[0]
   141  	peer := streamer.getPeer(node.ID())
   142  
   143  	stream := NewStream(swarmChunkServerStreamName, "", true)
   144  
   145  	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{
   146  		Stream:   stream,
   147  		History:  nil,
   148  		Priority: Top,
   149  	})
   150  
   151  	hash := storage.Address(hash0[:])
   152  	chunk := storage.NewChunk(hash, hash)
   153  	err = localStore.Put(context.TODO(), chunk)
   154  	if err != nil {
   155  		t.Fatalf("Expected no err got %v", err)
   156  	}
   157  
   158  	err = tester.TestExchanges(p2ptest.Exchange{
   159  		Label: "RetrieveRequestMsg",
   160  		Triggers: []p2ptest.Trigger{
   161  			{
   162  				Code: 5,
   163  				Msg: &RetrieveRequestMsg{
   164  					Addr: hash,
   165  				},
   166  				Peer: node.ID(),
   167  			},
   168  		},
   169  		Expects: []p2ptest.Expect{
   170  			{
   171  				Code: 1,
   172  				Msg: &OfferedHashesMsg{
   173  					HandoverProof: &HandoverProof{
   174  						Handover: &Handover{},
   175  					},
   176  					Hashes: hash,
   177  					From:   0,
   178  					// TODO: why is this 32???
   179  					To:     32,
   180  					Stream: stream,
   181  				},
   182  				Peer: node.ID(),
   183  			},
   184  		},
   185  	})
   186  
   187  	if err != nil {
   188  		t.Fatal(err)
   189  	}
   190  
   191  	hash = storage.Address(hash1[:])
   192  	chunk = storage.NewChunk(hash, hash1[:])
   193  	err = localStore.Put(context.TODO(), chunk)
   194  	if err != nil {
   195  		t.Fatalf("Expected no err got %v", err)
   196  	}
   197  
   198  	err = tester.TestExchanges(p2ptest.Exchange{
   199  		Label: "RetrieveRequestMsg",
   200  		Triggers: []p2ptest.Trigger{
   201  			{
   202  				Code: 5,
   203  				Msg: &RetrieveRequestMsg{
   204  					Addr:      hash,
   205  					SkipCheck: true,
   206  				},
   207  				Peer: node.ID(),
   208  			},
   209  		},
   210  		Expects: []p2ptest.Expect{
   211  			{
   212  				Code: 6,
   213  				Msg: &ChunkDeliveryMsg{
   214  					Addr:  hash,
   215  					SData: hash,
   216  				},
   217  				Peer: node.ID(),
   218  			},
   219  		},
   220  	})
   221  
   222  	if err != nil {
   223  		t.Fatal(err)
   224  	}
   225  }
   226  
   227  func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) {
   228  	tester, streamer, localStore, teardown, err := newStreamerTester(t, &RegistryOptions{
   229  		DoServeRetrieve: true,
   230  	})
   231  	defer teardown()
   232  	if err != nil {
   233  		t.Fatal(err)
   234  	}
   235  
   236  	streamer.RegisterClientFunc("foo", func(p *Peer, t string, live bool) (Client, error) {
   237  		return &testClient{
   238  			t: t,
   239  		}, nil
   240  	})
   241  
   242  	node := tester.Nodes[0]
   243  
   244  	stream := NewStream("foo", "", true)
   245  	err = streamer.Subscribe(node.ID(), stream, NewRange(5, 8), Top)
   246  	if err != nil {
   247  		t.Fatalf("Expected no error, got %v", err)
   248  	}
   249  
   250  	chunkKey := hash0[:]
   251  	chunkData := hash1[:]
   252  
   253  	err = tester.TestExchanges(p2ptest.Exchange{
   254  		Label: "Subscribe message",
   255  		Expects: []p2ptest.Expect{
   256  			{
   257  				Code: 4,
   258  				Msg: &SubscribeMsg{
   259  					Stream:   stream,
   260  					History:  NewRange(5, 8),
   261  					Priority: Top,
   262  				},
   263  				Peer: node.ID(),
   264  			},
   265  		},
   266  	},
   267  		p2ptest.Exchange{
   268  			Label: "ChunkDelivery message",
   269  			Triggers: []p2ptest.Trigger{
   270  				{
   271  					Code: 6,
   272  					Msg: &ChunkDeliveryMsg{
   273  						Addr:  chunkKey,
   274  						SData: chunkData,
   275  					},
   276  					Peer: node.ID(),
   277  				},
   278  			},
   279  		})
   280  
   281  	if err != nil {
   282  		t.Fatalf("Expected no error, got %v", err)
   283  	}
   284  	ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
   285  	defer cancel()
   286  
   287  	// wait for the chunk to get stored
   288  	storedChunk, err := localStore.Get(ctx, chunkKey)
   289  	for err != nil {
   290  		select {
   291  		case <-ctx.Done():
   292  			t.Fatalf("Chunk is not in localstore after timeout, err: %v", err)
   293  		default:
   294  		}
   295  		storedChunk, err = localStore.Get(ctx, chunkKey)
   296  		time.Sleep(50 * time.Millisecond)
   297  	}
   298  
   299  	if err != nil {
   300  		t.Fatalf("Expected no error, got %v", err)
   301  	}
   302  
   303  	if !bytes.Equal(storedChunk.Data(), chunkData) {
   304  		t.Fatal("Retrieved chunk has different data than original")
   305  	}
   306  
   307  }
   308  
   309  func TestDeliveryFromNodes(t *testing.T) {
   310  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, true)
   311  	testDeliveryFromNodes(t, 2, 1, dataChunkCount, false)
   312  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, true)
   313  	testDeliveryFromNodes(t, 4, 1, dataChunkCount, false)
   314  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, true)
   315  	testDeliveryFromNodes(t, 8, 1, dataChunkCount, false)
   316  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, true)
   317  	testDeliveryFromNodes(t, 16, 1, dataChunkCount, false)
   318  }
   319  
   320  func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck bool) {
   321  	sim := simulation.New(map[string]simulation.ServiceFunc{
   322  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   323  			node := ctx.Config.Node()
   324  			addr := network.NewAddr(node)
   325  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   326  			if err != nil {
   327  				return nil, nil, err
   328  			}
   329  			bucket.Store(bucketKeyStore, store)
   330  			cleanup = func() {
   331  				os.RemoveAll(datadir)
   332  				store.Close()
   333  			}
   334  			localStore := store.(*storage.LocalStore)
   335  			netStore, err := storage.NewNetStore(localStore, nil)
   336  			if err != nil {
   337  				return nil, nil, err
   338  			}
   339  
   340  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   341  			delivery := NewDelivery(kad, netStore)
   342  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   343  
   344  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   345  				SkipCheck:       skipCheck,
   346  				DoServeRetrieve: true,
   347  			})
   348  			bucket.Store(bucketKeyRegistry, r)
   349  
   350  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   351  			bucket.Store(bucketKeyFileStore, fileStore)
   352  
   353  			return r, cleanup, nil
   354  
   355  		},
   356  	})
   357  	defer sim.Close()
   358  
   359  	log.Info("Adding nodes to simulation")
   360  	_, err := sim.AddNodesAndConnectChain(nodes)
   361  	if err != nil {
   362  		t.Fatal(err)
   363  	}
   364  
   365  	log.Info("Starting simulation")
   366  	ctx := context.Background()
   367  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   368  		nodeIDs := sim.UpNodeIDs()
   369  		//determine the pivot node to be the first node of the simulation
   370  		sim.SetPivotNode(nodeIDs[0])
   371  		//distribute chunks of a random file into Stores of nodes 1 to nodes
   372  		//we will do this by creating a file store with an underlying round-robin store:
   373  		//the file store will create a hash for the uploaded file, but every chunk will be
   374  		//distributed to different nodes via round-robin scheduling
   375  		log.Debug("Writing file to round-robin file store")
   376  		//to do this, we create an array for chunkstores (length minus one, the pivot node)
   377  		stores := make([]storage.ChunkStore, len(nodeIDs)-1)
   378  		//we then need to get all stores from the sim....
   379  		lStores := sim.NodesItems(bucketKeyStore)
   380  		i := 0
   381  		//...iterate the buckets...
   382  		for id, bucketVal := range lStores {
   383  			//...and remove the one which is the pivot node
   384  			if id == *sim.PivotNodeID() {
   385  				continue
   386  			}
   387  			//the other ones are added to the array...
   388  			stores[i] = bucketVal.(storage.ChunkStore)
   389  			i++
   390  		}
   391  		//...which then gets passed to the round-robin file store
   392  		roundRobinFileStore := storage.NewFileStore(newRoundRobinStore(stores...), storage.NewFileStoreParams())
   393  		//now we can actually upload a (random) file to the round-robin store
   394  		size := chunkCount * chunkSize
   395  		log.Debug("Storing data to file store")
   396  		fileHash, wait, err := roundRobinFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   397  		// wait until all chunks stored
   398  		if err != nil {
   399  			return err
   400  		}
   401  		err = wait(ctx)
   402  		if err != nil {
   403  			return err
   404  		}
   405  
   406  		log.Debug("Waiting for kademlia")
   407  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   408  			return err
   409  		}
   410  
   411  		//each of the nodes (except pivot node) subscribes to the stream of the next node
   412  		for j, node := range nodeIDs[0 : nodes-1] {
   413  			sid := nodeIDs[j+1]
   414  			item, ok := sim.NodeItem(node, bucketKeyRegistry)
   415  			if !ok {
   416  				return fmt.Errorf("No registry")
   417  			}
   418  			registry := item.(*Registry)
   419  			err = registry.Subscribe(sid, NewStream(swarmChunkServerStreamName, "", true), nil, Top)
   420  			if err != nil {
   421  				return err
   422  			}
   423  		}
   424  
   425  		//get the pivot node's filestore
   426  		item, ok := sim.NodeItem(*sim.PivotNodeID(), bucketKeyFileStore)
   427  		if !ok {
   428  			return fmt.Errorf("No filestore")
   429  		}
   430  		pivotFileStore := item.(*storage.FileStore)
   431  		log.Debug("Starting retrieval routine")
   432  		go func() {
   433  			// start the retrieval on the pivot node - this will spawn retrieve requests for missing chunks
   434  			// we must wait for the peer connections to have started before requesting
   435  			n, err := readAll(pivotFileStore, fileHash)
   436  			log.Info(fmt.Sprintf("retrieved %v", fileHash), "read", n, "err", err)
   437  			if err != nil {
   438  				t.Fatalf("requesting chunks action error: %v", err)
   439  			}
   440  		}()
   441  
   442  		log.Debug("Watching for disconnections")
   443  		disconnections := sim.PeerEvents(
   444  			context.Background(),
   445  			sim.NodeIDs(),
   446  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   447  		)
   448  
   449  		go func() {
   450  			for d := range disconnections {
   451  				if d.Error != nil {
   452  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   453  					t.Fatal(d.Error)
   454  				}
   455  			}
   456  		}()
   457  
   458  		//finally check that the pivot node gets all chunks via the root hash
   459  		log.Debug("Check retrieval")
   460  		success := true
   461  		var total int64
   462  		total, err = readAll(pivotFileStore, fileHash)
   463  		if err != nil {
   464  			return err
   465  		}
   466  		log.Info(fmt.Sprintf("check if %08x is available locally: number of bytes read %v/%v (error: %v)", fileHash, total, size, err))
   467  		if err != nil || total != int64(size) {
   468  			success = false
   469  		}
   470  
   471  		if !success {
   472  			return fmt.Errorf("Test failed, chunks not available on all nodes")
   473  		}
   474  		log.Debug("Test terminated successfully")
   475  		return nil
   476  	})
   477  	if result.Error != nil {
   478  		t.Fatal(result.Error)
   479  	}
   480  }
   481  
   482  func BenchmarkDeliveryFromNodesWithoutCheck(b *testing.B) {
   483  	for chunks := 32; chunks <= 128; chunks *= 2 {
   484  		for i := 2; i < 32; i *= 2 {
   485  			b.Run(
   486  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   487  				func(b *testing.B) {
   488  					benchmarkDeliveryFromNodes(b, i, 1, chunks, true)
   489  				},
   490  			)
   491  		}
   492  	}
   493  }
   494  
   495  func BenchmarkDeliveryFromNodesWithCheck(b *testing.B) {
   496  	for chunks := 32; chunks <= 128; chunks *= 2 {
   497  		for i := 2; i < 32; i *= 2 {
   498  			b.Run(
   499  				fmt.Sprintf("nodes=%v,chunks=%v", i, chunks),
   500  				func(b *testing.B) {
   501  					benchmarkDeliveryFromNodes(b, i, 1, chunks, false)
   502  				},
   503  			)
   504  		}
   505  	}
   506  }
   507  
   508  func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skipCheck bool) {
   509  	sim := simulation.New(map[string]simulation.ServiceFunc{
   510  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   511  			node := ctx.Config.Node()
   512  			addr := network.NewAddr(node)
   513  			store, datadir, err := createTestLocalStorageForID(node.ID(), addr)
   514  			if err != nil {
   515  				return nil, nil, err
   516  			}
   517  			bucket.Store(bucketKeyStore, store)
   518  			cleanup = func() {
   519  				os.RemoveAll(datadir)
   520  				store.Close()
   521  			}
   522  			localStore := store.(*storage.LocalStore)
   523  			netStore, err := storage.NewNetStore(localStore, nil)
   524  			if err != nil {
   525  				return nil, nil, err
   526  			}
   527  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   528  			delivery := NewDelivery(kad, netStore)
   529  			netStore.NewNetFetcherFunc = network.NewFetcherFactory(delivery.RequestFromPeers, true).New
   530  
   531  			r := NewRegistry(addr.ID(), delivery, netStore, state.NewInmemoryStore(), &RegistryOptions{
   532  				SkipCheck:       skipCheck,
   533  				DoSync:          true,
   534  				SyncUpdateDelay: 0,
   535  			})
   536  
   537  			fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams())
   538  			bucket.Store(bucketKeyFileStore, fileStore)
   539  
   540  			return r, cleanup, nil
   541  
   542  		},
   543  	})
   544  	defer sim.Close()
   545  
   546  	log.Info("Initializing test config")
   547  	_, err := sim.AddNodesAndConnectChain(nodes)
   548  	if err != nil {
   549  		b.Fatal(err)
   550  	}
   551  
   552  	ctx := context.Background()
   553  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   554  		nodeIDs := sim.UpNodeIDs()
   555  		node := nodeIDs[len(nodeIDs)-1]
   556  
   557  		item, ok := sim.NodeItem(node, bucketKeyFileStore)
   558  		if !ok {
   559  			b.Fatal("No filestore")
   560  		}
   561  		remoteFileStore := item.(*storage.FileStore)
   562  
   563  		pivotNode := nodeIDs[0]
   564  		item, ok = sim.NodeItem(pivotNode, bucketKeyNetStore)
   565  		if !ok {
   566  			b.Fatal("No filestore")
   567  		}
   568  		netStore := item.(*storage.NetStore)
   569  
   570  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   571  			return err
   572  		}
   573  
   574  		disconnections := sim.PeerEvents(
   575  			context.Background(),
   576  			sim.NodeIDs(),
   577  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   578  		)
   579  
   580  		go func() {
   581  			for d := range disconnections {
   582  				if d.Error != nil {
   583  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   584  					b.Fatal(d.Error)
   585  				}
   586  			}
   587  		}()
   588  		// benchmark loop
   589  		b.ResetTimer()
   590  		b.StopTimer()
   591  	Loop:
   592  		for i := 0; i < b.N; i++ {
   593  			// uploading chunkCount random chunks to the last node
   594  			hashes := make([]storage.Address, chunkCount)
   595  			for i := 0; i < chunkCount; i++ {
   596  				// create actual size real chunks
   597  				ctx := context.TODO()
   598  				hash, wait, err := remoteFileStore.Store(ctx, io.LimitReader(crand.Reader, int64(chunkSize)), int64(chunkSize), false)
   599  				if err != nil {
   600  					b.Fatalf("expected no error. got %v", err)
   601  				}
   602  				// wait until all chunks stored
   603  				err = wait(ctx)
   604  				if err != nil {
   605  					b.Fatalf("expected no error. got %v", err)
   606  				}
   607  				// collect the hashes
   608  				hashes[i] = hash
   609  			}
   610  			// now benchmark the actual retrieval
   611  			// netstore.Get is called for each hash in a go routine and errors are collected
   612  			b.StartTimer()
   613  			errs := make(chan error)
   614  			for _, hash := range hashes {
   615  				go func(h storage.Address) {
   616  					_, err := netStore.Get(ctx, h)
   617  					log.Warn("test check netstore get", "hash", h, "err", err)
   618  					errs <- err
   619  				}(hash)
   620  			}
   621  			// count and report retrieval errors
   622  			// if there are misses then chunk timeout is too low for the distance and volume (?)
   623  			var total, misses int
   624  			for err := range errs {
   625  				if err != nil {
   626  					log.Warn(err.Error())
   627  					misses++
   628  				}
   629  				total++
   630  				if total == chunkCount {
   631  					break
   632  				}
   633  			}
   634  			b.StopTimer()
   635  
   636  			if misses > 0 {
   637  				err = fmt.Errorf("%v chunk not found out of %v", misses, total)
   638  				break Loop
   639  			}
   640  		}
   641  		if err != nil {
   642  			b.Fatal(err)
   643  		}
   644  		return nil
   645  	})
   646  	if result.Error != nil {
   647  		b.Fatal(result.Error)
   648  	}
   649  
   650  }