github.com/Ethersocial/go-esn@v0.3.7/swarm/network/simulations/discovery/discovery_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package discovery
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"errors"
    23  	"flag"
    24  	"fmt"
    25  	"io/ioutil"
    26  	"math/rand"
    27  	"os"
    28  	"path"
    29  	"strings"
    30  	"sync"
    31  	"testing"
    32  	"time"
    33  
    34  	"github.com/ethersocial/go-esn/log"
    35  	"github.com/ethersocial/go-esn/node"
    36  	"github.com/ethersocial/go-esn/p2p"
    37  	"github.com/ethersocial/go-esn/p2p/enode"
    38  	"github.com/ethersocial/go-esn/p2p/simulations"
    39  	"github.com/ethersocial/go-esn/p2p/simulations/adapters"
    40  	"github.com/ethersocial/go-esn/swarm/network"
    41  	"github.com/ethersocial/go-esn/swarm/state"
    42  	colorable "github.com/mattn/go-colorable"
    43  )
    44  
    45  // serviceName is used with the exec adapter so the exec'd binary knows which
    46  // service to execute
    47  const serviceName = "discovery"
    48  const testMinProxBinSize = 2
    49  const discoveryPersistenceDatadir = "discovery_persistence_test_store"
    50  
    51  var discoveryPersistencePath = path.Join(os.TempDir(), discoveryPersistenceDatadir)
    52  var discoveryEnabled = true
    53  var persistenceEnabled = false
    54  
    55  var services = adapters.Services{
    56  	serviceName: newService,
    57  }
    58  
    59  func cleanDbStores() error {
    60  	entries, err := ioutil.ReadDir(os.TempDir())
    61  	if err != nil {
    62  		return err
    63  	}
    64  
    65  	for _, f := range entries {
    66  		if strings.HasPrefix(f.Name(), discoveryPersistenceDatadir) {
    67  			os.RemoveAll(path.Join(os.TempDir(), f.Name()))
    68  		}
    69  	}
    70  	return nil
    71  
    72  }
    73  
    74  func getDbStore(nodeID string) (*state.DBStore, error) {
    75  	if _, err := os.Stat(discoveryPersistencePath + "_" + nodeID); os.IsNotExist(err) {
    76  		log.Info(fmt.Sprintf("directory for nodeID %s does not exist. creating...", nodeID))
    77  		ioutil.TempDir("", discoveryPersistencePath+"_"+nodeID)
    78  	}
    79  	log.Info(fmt.Sprintf("opening storage directory for nodeID %s", nodeID))
    80  	store, err := state.NewDBStore(discoveryPersistencePath + "_" + nodeID)
    81  	if err != nil {
    82  		return nil, err
    83  	}
    84  	return store, nil
    85  }
    86  
    87  var (
    88  	nodeCount    = flag.Int("nodes", 10, "number of nodes to create (default 10)")
    89  	initCount    = flag.Int("conns", 1, "number of originally connected peers	 (default 1)")
    90  	snapshotFile = flag.String("snapshot", "", "create snapshot")
    91  	loglevel     = flag.Int("loglevel", 3, "verbosity of logs")
    92  	rawlog       = flag.Bool("rawlog", false, "remove terminal formatting from logs")
    93  )
    94  
    95  func init() {
    96  	flag.Parse()
    97  	// register the discovery service which will run as a devp2p
    98  	// protocol when using the exec adapter
    99  	adapters.RegisterServices(services)
   100  
   101  	log.PrintOrigins(true)
   102  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(!*rawlog))))
   103  }
   104  
   105  // Benchmarks to test the average time it takes for an N-node ring
   106  // to full a healthy kademlia topology
   107  func BenchmarkDiscovery_8_1(b *testing.B)   { benchmarkDiscovery(b, 8, 1) }
   108  func BenchmarkDiscovery_16_1(b *testing.B)  { benchmarkDiscovery(b, 16, 1) }
   109  func BenchmarkDiscovery_32_1(b *testing.B)  { benchmarkDiscovery(b, 32, 1) }
   110  func BenchmarkDiscovery_64_1(b *testing.B)  { benchmarkDiscovery(b, 64, 1) }
   111  func BenchmarkDiscovery_128_1(b *testing.B) { benchmarkDiscovery(b, 128, 1) }
   112  func BenchmarkDiscovery_256_1(b *testing.B) { benchmarkDiscovery(b, 256, 1) }
   113  
   114  func BenchmarkDiscovery_8_2(b *testing.B)   { benchmarkDiscovery(b, 8, 2) }
   115  func BenchmarkDiscovery_16_2(b *testing.B)  { benchmarkDiscovery(b, 16, 2) }
   116  func BenchmarkDiscovery_32_2(b *testing.B)  { benchmarkDiscovery(b, 32, 2) }
   117  func BenchmarkDiscovery_64_2(b *testing.B)  { benchmarkDiscovery(b, 64, 2) }
   118  func BenchmarkDiscovery_128_2(b *testing.B) { benchmarkDiscovery(b, 128, 2) }
   119  func BenchmarkDiscovery_256_2(b *testing.B) { benchmarkDiscovery(b, 256, 2) }
   120  
   121  func BenchmarkDiscovery_8_4(b *testing.B)   { benchmarkDiscovery(b, 8, 4) }
   122  func BenchmarkDiscovery_16_4(b *testing.B)  { benchmarkDiscovery(b, 16, 4) }
   123  func BenchmarkDiscovery_32_4(b *testing.B)  { benchmarkDiscovery(b, 32, 4) }
   124  func BenchmarkDiscovery_64_4(b *testing.B)  { benchmarkDiscovery(b, 64, 4) }
   125  func BenchmarkDiscovery_128_4(b *testing.B) { benchmarkDiscovery(b, 128, 4) }
   126  func BenchmarkDiscovery_256_4(b *testing.B) { benchmarkDiscovery(b, 256, 4) }
   127  
   128  func TestDiscoverySimulationDockerAdapter(t *testing.T) {
   129  	testDiscoverySimulationDockerAdapter(t, *nodeCount, *initCount)
   130  }
   131  
   132  func testDiscoverySimulationDockerAdapter(t *testing.T, nodes, conns int) {
   133  	adapter, err := adapters.NewDockerAdapter()
   134  	if err != nil {
   135  		if err == adapters.ErrLinuxOnly {
   136  			t.Skip(err)
   137  		} else {
   138  			t.Fatal(err)
   139  		}
   140  	}
   141  	testDiscoverySimulation(t, nodes, conns, adapter)
   142  }
   143  
   144  func TestDiscoverySimulationExecAdapter(t *testing.T) {
   145  	testDiscoverySimulationExecAdapter(t, *nodeCount, *initCount)
   146  }
   147  
   148  func testDiscoverySimulationExecAdapter(t *testing.T, nodes, conns int) {
   149  	baseDir, err := ioutil.TempDir("", "swarm-test")
   150  	if err != nil {
   151  		t.Fatal(err)
   152  	}
   153  	defer os.RemoveAll(baseDir)
   154  	testDiscoverySimulation(t, nodes, conns, adapters.NewExecAdapter(baseDir))
   155  }
   156  
   157  func TestDiscoverySimulationSimAdapter(t *testing.T) {
   158  	testDiscoverySimulationSimAdapter(t, *nodeCount, *initCount)
   159  }
   160  
   161  func TestDiscoveryPersistenceSimulationSimAdapter(t *testing.T) {
   162  	testDiscoveryPersistenceSimulationSimAdapter(t, *nodeCount, *initCount)
   163  }
   164  
   165  func testDiscoveryPersistenceSimulationSimAdapter(t *testing.T, nodes, conns int) {
   166  	testDiscoveryPersistenceSimulation(t, nodes, conns, adapters.NewSimAdapter(services))
   167  }
   168  
   169  func testDiscoverySimulationSimAdapter(t *testing.T, nodes, conns int) {
   170  	testDiscoverySimulation(t, nodes, conns, adapters.NewSimAdapter(services))
   171  }
   172  
   173  func testDiscoverySimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) {
   174  	startedAt := time.Now()
   175  	result, err := discoverySimulation(nodes, conns, adapter)
   176  	if err != nil {
   177  		t.Fatalf("Setting up simulation failed: %v", err)
   178  	}
   179  	if result.Error != nil {
   180  		t.Fatalf("Simulation failed: %s", result.Error)
   181  	}
   182  	t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt))
   183  	var min, max time.Duration
   184  	var sum int
   185  	for _, pass := range result.Passes {
   186  		duration := pass.Sub(result.StartedAt)
   187  		if sum == 0 || duration < min {
   188  			min = duration
   189  		}
   190  		if duration > max {
   191  			max = duration
   192  		}
   193  		sum += int(duration.Nanoseconds())
   194  	}
   195  	t.Logf("Min: %s, Max: %s, Average: %s", min, max, time.Duration(sum/len(result.Passes))*time.Nanosecond)
   196  	finishedAt := time.Now()
   197  	t.Logf("Setup: %s, shutdown: %s", result.StartedAt.Sub(startedAt), finishedAt.Sub(result.FinishedAt))
   198  }
   199  
   200  func testDiscoveryPersistenceSimulation(t *testing.T, nodes, conns int, adapter adapters.NodeAdapter) map[int][]byte {
   201  	persistenceEnabled = true
   202  	discoveryEnabled = true
   203  
   204  	result, err := discoveryPersistenceSimulation(nodes, conns, adapter)
   205  
   206  	if err != nil {
   207  		t.Fatalf("Setting up simulation failed: %v", err)
   208  	}
   209  	if result.Error != nil {
   210  		t.Fatalf("Simulation failed: %s", result.Error)
   211  	}
   212  	t.Logf("Simulation with %d nodes passed in %s", nodes, result.FinishedAt.Sub(result.StartedAt))
   213  	// set the discovery and persistence flags again to default so other
   214  	// tests will not be affected
   215  	discoveryEnabled = true
   216  	persistenceEnabled = false
   217  	return nil
   218  }
   219  
   220  func benchmarkDiscovery(b *testing.B, nodes, conns int) {
   221  	for i := 0; i < b.N; i++ {
   222  		result, err := discoverySimulation(nodes, conns, adapters.NewSimAdapter(services))
   223  		if err != nil {
   224  			b.Fatalf("setting up simulation failed: %v", err)
   225  		}
   226  		if result.Error != nil {
   227  			b.Logf("simulation failed: %s", result.Error)
   228  		}
   229  	}
   230  }
   231  
   232  func discoverySimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) {
   233  	// create network
   234  	net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
   235  		ID:             "0",
   236  		DefaultService: serviceName,
   237  	})
   238  	defer net.Shutdown()
   239  	trigger := make(chan enode.ID)
   240  	ids := make([]enode.ID, nodes)
   241  	for i := 0; i < nodes; i++ {
   242  		conf := adapters.RandomNodeConfig()
   243  		node, err := net.NewNodeWithConfig(conf)
   244  		if err != nil {
   245  			return nil, fmt.Errorf("error starting node: %s", err)
   246  		}
   247  		if err := net.Start(node.ID()); err != nil {
   248  			return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
   249  		}
   250  		if err := triggerChecks(trigger, net, node.ID()); err != nil {
   251  			return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
   252  		}
   253  		ids[i] = node.ID()
   254  	}
   255  
   256  	// run a simulation which connects the 10 nodes in a ring and waits
   257  	// for full peer discovery
   258  	var addrs [][]byte
   259  	action := func(ctx context.Context) error {
   260  		return nil
   261  	}
   262  	wg := sync.WaitGroup{}
   263  	for i := range ids {
   264  		// collect the overlay addresses, to
   265  		addrs = append(addrs, ids[i].Bytes())
   266  		for j := 0; j < conns; j++ {
   267  			var k int
   268  			if j == 0 {
   269  				k = (i + 1) % len(ids)
   270  			} else {
   271  				k = rand.Intn(len(ids))
   272  			}
   273  			wg.Add(1)
   274  			go func(i, k int) {
   275  				defer wg.Done()
   276  				net.Connect(ids[i], ids[k])
   277  			}(i, k)
   278  		}
   279  	}
   280  	wg.Wait()
   281  	log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
   282  	// construct the peer pot, so that kademlia health can be checked
   283  	ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
   284  	check := func(ctx context.Context, id enode.ID) (bool, error) {
   285  		select {
   286  		case <-ctx.Done():
   287  			return false, ctx.Err()
   288  		default:
   289  		}
   290  
   291  		node := net.GetNode(id)
   292  		if node == nil {
   293  			return false, fmt.Errorf("unknown node: %s", id)
   294  		}
   295  		client, err := node.Client()
   296  		if err != nil {
   297  			return false, fmt.Errorf("error getting node client: %s", err)
   298  		}
   299  		healthy := &network.Health{}
   300  		if err := client.Call(&healthy, "hive_healthy", ppmap[id.String()]); err != nil {
   301  			return false, fmt.Errorf("error getting node health: %s", err)
   302  		}
   303  		log.Debug(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v\n%v", id, healthy.GotNN, healthy.KnowNN, healthy.Full, healthy.Hive))
   304  		return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
   305  	}
   306  
   307  	// 64 nodes ~ 1min
   308  	// 128 nodes ~
   309  	timeout := 300 * time.Second
   310  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   311  	defer cancel()
   312  	result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
   313  		Action:  action,
   314  		Trigger: trigger,
   315  		Expect: &simulations.Expectation{
   316  			Nodes: ids,
   317  			Check: check,
   318  		},
   319  	})
   320  	if result.Error != nil {
   321  		return result, nil
   322  	}
   323  
   324  	if *snapshotFile != "" {
   325  		snap, err := net.Snapshot()
   326  		if err != nil {
   327  			return nil, errors.New("no shapshot dude")
   328  		}
   329  		jsonsnapshot, err := json.Marshal(snap)
   330  		if err != nil {
   331  			return nil, fmt.Errorf("corrupt json snapshot: %v", err)
   332  		}
   333  		log.Info("writing snapshot", "file", *snapshotFile)
   334  		err = ioutil.WriteFile(*snapshotFile, jsonsnapshot, 0755)
   335  		if err != nil {
   336  			return nil, err
   337  		}
   338  	}
   339  	return result, nil
   340  }
   341  
   342  func discoveryPersistenceSimulation(nodes, conns int, adapter adapters.NodeAdapter) (*simulations.StepResult, error) {
   343  	cleanDbStores()
   344  	defer cleanDbStores()
   345  
   346  	// create network
   347  	net := simulations.NewNetwork(adapter, &simulations.NetworkConfig{
   348  		ID:             "0",
   349  		DefaultService: serviceName,
   350  	})
   351  	defer net.Shutdown()
   352  	trigger := make(chan enode.ID)
   353  	ids := make([]enode.ID, nodes)
   354  	var addrs [][]byte
   355  
   356  	for i := 0; i < nodes; i++ {
   357  		conf := adapters.RandomNodeConfig()
   358  		node, err := net.NewNodeWithConfig(conf)
   359  		if err != nil {
   360  			panic(err)
   361  		}
   362  		if err != nil {
   363  			return nil, fmt.Errorf("error starting node: %s", err)
   364  		}
   365  		if err := net.Start(node.ID()); err != nil {
   366  			return nil, fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
   367  		}
   368  		if err := triggerChecks(trigger, net, node.ID()); err != nil {
   369  			return nil, fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
   370  		}
   371  		ids[i] = node.ID()
   372  		a := ids[i].Bytes()
   373  
   374  		addrs = append(addrs, a)
   375  	}
   376  
   377  	// run a simulation which connects the 10 nodes in a ring and waits
   378  	// for full peer discovery
   379  	ppmap := network.NewPeerPotMap(testMinProxBinSize, addrs)
   380  
   381  	var restartTime time.Time
   382  
   383  	action := func(ctx context.Context) error {
   384  		ticker := time.NewTicker(500 * time.Millisecond)
   385  
   386  		for range ticker.C {
   387  			isHealthy := true
   388  			for _, id := range ids {
   389  				//call Healthy RPC
   390  				node := net.GetNode(id)
   391  				if node == nil {
   392  					return fmt.Errorf("unknown node: %s", id)
   393  				}
   394  				client, err := node.Client()
   395  				if err != nil {
   396  					return fmt.Errorf("error getting node client: %s", err)
   397  				}
   398  				healthy := &network.Health{}
   399  				addr := id.String()
   400  				if err := client.Call(&healthy, "hive_healthy", ppmap[addr]); err != nil {
   401  					return fmt.Errorf("error getting node health: %s", err)
   402  				}
   403  
   404  				log.Info(fmt.Sprintf("NODE: %s, IS HEALTHY: %t", addr, healthy.GotNN && healthy.KnowNN && healthy.Full))
   405  				if !healthy.GotNN || !healthy.Full {
   406  					isHealthy = false
   407  					break
   408  				}
   409  			}
   410  			if isHealthy {
   411  				break
   412  			}
   413  		}
   414  		ticker.Stop()
   415  
   416  		log.Info("reached healthy kademlia. starting to shutdown nodes.")
   417  		shutdownStarted := time.Now()
   418  		// stop all ids, then start them again
   419  		for _, id := range ids {
   420  			node := net.GetNode(id)
   421  
   422  			if err := net.Stop(node.ID()); err != nil {
   423  				return fmt.Errorf("error stopping node %s: %s", node.ID().TerminalString(), err)
   424  			}
   425  		}
   426  		log.Info(fmt.Sprintf("shutting down nodes took: %s", time.Since(shutdownStarted)))
   427  		persistenceEnabled = true
   428  		discoveryEnabled = false
   429  		restartTime = time.Now()
   430  		for _, id := range ids {
   431  			node := net.GetNode(id)
   432  			if err := net.Start(node.ID()); err != nil {
   433  				return fmt.Errorf("error starting node %s: %s", node.ID().TerminalString(), err)
   434  			}
   435  			if err := triggerChecks(trigger, net, node.ID()); err != nil {
   436  				return fmt.Errorf("error triggering checks for node %s: %s", node.ID().TerminalString(), err)
   437  			}
   438  		}
   439  
   440  		log.Info(fmt.Sprintf("restarting nodes took: %s", time.Since(restartTime)))
   441  
   442  		return nil
   443  	}
   444  	//connects in a chain
   445  	wg := sync.WaitGroup{}
   446  	//connects in a ring
   447  	for i := range ids {
   448  		for j := 1; j <= conns; j++ {
   449  			k := (i + j) % len(ids)
   450  			if k == i {
   451  				k = (k + 1) % len(ids)
   452  			}
   453  			wg.Add(1)
   454  			go func(i, k int) {
   455  				defer wg.Done()
   456  				net.Connect(ids[i], ids[k])
   457  			}(i, k)
   458  		}
   459  	}
   460  	wg.Wait()
   461  	log.Debug(fmt.Sprintf("nodes: %v", len(addrs)))
   462  	// construct the peer pot, so that kademlia health can be checked
   463  	check := func(ctx context.Context, id enode.ID) (bool, error) {
   464  		select {
   465  		case <-ctx.Done():
   466  			return false, ctx.Err()
   467  		default:
   468  		}
   469  
   470  		node := net.GetNode(id)
   471  		if node == nil {
   472  			return false, fmt.Errorf("unknown node: %s", id)
   473  		}
   474  		client, err := node.Client()
   475  		if err != nil {
   476  			return false, fmt.Errorf("error getting node client: %s", err)
   477  		}
   478  		healthy := &network.Health{}
   479  		if err := client.Call(&healthy, "hive_healthy", ppmap[id.String()]); err != nil {
   480  			return false, fmt.Errorf("error getting node health: %s", err)
   481  		}
   482  		log.Info(fmt.Sprintf("node %4s healthy: got nearest neighbours: %v, know nearest neighbours: %v, saturated: %v", id, healthy.GotNN, healthy.KnowNN, healthy.Full))
   483  
   484  		return healthy.KnowNN && healthy.GotNN && healthy.Full, nil
   485  	}
   486  
   487  	// 64 nodes ~ 1min
   488  	// 128 nodes ~
   489  	timeout := 300 * time.Second
   490  	ctx, cancel := context.WithTimeout(context.Background(), timeout)
   491  	defer cancel()
   492  	result := simulations.NewSimulation(net).Run(ctx, &simulations.Step{
   493  		Action:  action,
   494  		Trigger: trigger,
   495  		Expect: &simulations.Expectation{
   496  			Nodes: ids,
   497  			Check: check,
   498  		},
   499  	})
   500  	if result.Error != nil {
   501  		return result, nil
   502  	}
   503  
   504  	return result, nil
   505  }
   506  
   507  // triggerChecks triggers a simulation step check whenever a peer is added or
   508  // removed from the given node, and also every second to avoid a race between
   509  // peer events and kademlia becoming healthy
   510  func triggerChecks(trigger chan enode.ID, net *simulations.Network, id enode.ID) error {
   511  	node := net.GetNode(id)
   512  	if node == nil {
   513  		return fmt.Errorf("unknown node: %s", id)
   514  	}
   515  	client, err := node.Client()
   516  	if err != nil {
   517  		return err
   518  	}
   519  	events := make(chan *p2p.PeerEvent)
   520  	sub, err := client.Subscribe(context.Background(), "admin", events, "peerEvents")
   521  	if err != nil {
   522  		return fmt.Errorf("error getting peer events for node %v: %s", id, err)
   523  	}
   524  	go func() {
   525  		defer sub.Unsubscribe()
   526  
   527  		tick := time.NewTicker(time.Second)
   528  		defer tick.Stop()
   529  
   530  		for {
   531  			select {
   532  			case <-events:
   533  				trigger <- id
   534  			case <-tick.C:
   535  				trigger <- id
   536  			case err := <-sub.Err():
   537  				if err != nil {
   538  					log.Error(fmt.Sprintf("error getting peer events for node %v", id), "err", err)
   539  				}
   540  				return
   541  			}
   542  		}
   543  	}()
   544  	return nil
   545  }
   546  
   547  func newService(ctx *adapters.ServiceContext) (node.Service, error) {
   548  	node := enode.NewV4(&ctx.Config.PrivateKey.PublicKey, adapters.ExternalIP(), int(ctx.Config.Port), int(ctx.Config.Port))
   549  	addr := network.NewAddr(node)
   550  
   551  	kp := network.NewKadParams()
   552  	kp.MinProxBinSize = testMinProxBinSize
   553  
   554  	if ctx.Config.Reachable != nil {
   555  		kp.Reachable = func(o *network.BzzAddr) bool {
   556  			return ctx.Config.Reachable(o.ID())
   557  		}
   558  	}
   559  	kad := network.NewKademlia(addr.Over(), kp)
   560  	hp := network.NewHiveParams()
   561  	hp.KeepAliveInterval = time.Duration(200) * time.Millisecond
   562  	hp.Discovery = discoveryEnabled
   563  
   564  	log.Info(fmt.Sprintf("discovery for nodeID %s is %t", ctx.Config.ID.String(), hp.Discovery))
   565  
   566  	config := &network.BzzConfig{
   567  		OverlayAddr:  addr.Over(),
   568  		UnderlayAddr: addr.Under(),
   569  		HiveParams:   hp,
   570  	}
   571  
   572  	if persistenceEnabled {
   573  		log.Info(fmt.Sprintf("persistence enabled for nodeID %s", ctx.Config.ID.String()))
   574  		store, err := getDbStore(ctx.Config.ID.String())
   575  		if err != nil {
   576  			return nil, err
   577  		}
   578  		return network.NewBzz(config, kad, store, nil, nil), nil
   579  	}
   580  
   581  	return network.NewBzz(config, kad, nil, nil, nil), nil
   582  }