github.com/daragao/go-ethereum@v1.8.14-0.20180809141559-45eaef243198/cmd/swarm/run_test.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"io/ioutil"
    23  	"net"
    24  	"os"
    25  	"path/filepath"
    26  	"runtime"
    27  	"sync"
    28  	"syscall"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/docker/docker/pkg/reexec"
    33  	"github.com/ethereum/go-ethereum/accounts"
    34  	"github.com/ethereum/go-ethereum/accounts/keystore"
    35  	"github.com/ethereum/go-ethereum/internal/cmdtest"
    36  	"github.com/ethereum/go-ethereum/node"
    37  	"github.com/ethereum/go-ethereum/p2p"
    38  	"github.com/ethereum/go-ethereum/rpc"
    39  	"github.com/ethereum/go-ethereum/swarm"
    40  )
    41  
    42  func init() {
    43  	// Run the app if we've been exec'd as "swarm-test" in runSwarm.
    44  	reexec.Register("swarm-test", func() {
    45  		if err := app.Run(os.Args); err != nil {
    46  			fmt.Fprintln(os.Stderr, err)
    47  			os.Exit(1)
    48  		}
    49  		os.Exit(0)
    50  	})
    51  }
    52  
    53  func TestMain(m *testing.M) {
    54  	// check if we have been reexec'd
    55  	if reexec.Init() {
    56  		return
    57  	}
    58  	os.Exit(m.Run())
    59  }
    60  
    61  func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
    62  	tt := cmdtest.NewTestCmd(t, nil)
    63  
    64  	// Boot "swarm". This actually runs the test binary but the TestMain
    65  	// function will prevent any tests from running.
    66  	tt.Run("swarm-test", args...)
    67  
    68  	return tt
    69  }
    70  
    71  type testCluster struct {
    72  	Nodes  []*testNode
    73  	TmpDir string
    74  }
    75  
    76  // newTestCluster starts a test swarm cluster of the given size.
    77  //
    78  // A temporary directory is created and each node gets a data directory inside
    79  // it.
    80  //
    81  // Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
    82  // ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
    83  // as flags).
    84  //
    85  // When starting more than one node, they are connected together using the
    86  // admin SetPeer RPC method.
    87  
    88  func newTestCluster(t *testing.T, size int) *testCluster {
    89  	cluster := &testCluster{}
    90  	defer func() {
    91  		if t.Failed() {
    92  			cluster.Shutdown()
    93  		}
    94  	}()
    95  
    96  	tmpdir, err := ioutil.TempDir("", "swarm-test")
    97  	if err != nil {
    98  		t.Fatal(err)
    99  	}
   100  	cluster.TmpDir = tmpdir
   101  
   102  	// start the nodes
   103  	cluster.StartNewNodes(t, size)
   104  
   105  	if size == 1 {
   106  		return cluster
   107  	}
   108  
   109  	// connect the nodes together
   110  	for _, node := range cluster.Nodes {
   111  		if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
   112  			t.Fatal(err)
   113  		}
   114  	}
   115  
   116  	// wait until all nodes have the correct number of peers
   117  outer:
   118  	for _, node := range cluster.Nodes {
   119  		var peers []*p2p.PeerInfo
   120  		for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
   121  			if err := node.Client.Call(&peers, "admin_peers"); err != nil {
   122  				t.Fatal(err)
   123  			}
   124  			if len(peers) == len(cluster.Nodes)-1 {
   125  				continue outer
   126  			}
   127  		}
   128  		t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
   129  	}
   130  
   131  	return cluster
   132  }
   133  
   134  func (c *testCluster) Shutdown() {
   135  	for _, node := range c.Nodes {
   136  		node.Shutdown()
   137  	}
   138  	os.RemoveAll(c.TmpDir)
   139  }
   140  
   141  func (c *testCluster) Stop() {
   142  	for _, node := range c.Nodes {
   143  		node.Shutdown()
   144  	}
   145  }
   146  
   147  func (c *testCluster) StartNewNodes(t *testing.T, size int) {
   148  	c.Nodes = make([]*testNode, 0, size)
   149  	for i := 0; i < size; i++ {
   150  		dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
   151  		if err := os.Mkdir(dir, 0700); err != nil {
   152  			t.Fatal(err)
   153  		}
   154  
   155  		node := newTestNode(t, dir)
   156  		node.Name = fmt.Sprintf("swarm%02d", i)
   157  
   158  		c.Nodes = append(c.Nodes, node)
   159  	}
   160  }
   161  
   162  func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
   163  	c.Nodes = make([]*testNode, 0, size)
   164  	for i := 0; i < size; i++ {
   165  		dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
   166  		node := existingTestNode(t, dir, bzzaccount)
   167  		node.Name = fmt.Sprintf("swarm%02d", i)
   168  
   169  		c.Nodes = append(c.Nodes, node)
   170  	}
   171  }
   172  
   173  func (c *testCluster) Cleanup() {
   174  	os.RemoveAll(c.TmpDir)
   175  }
   176  
   177  type testNode struct {
   178  	Name    string
   179  	Addr    string
   180  	URL     string
   181  	Enode   string
   182  	Dir     string
   183  	IpcPath string
   184  	Client  *rpc.Client
   185  	Cmd     *cmdtest.TestCmd
   186  }
   187  
   188  const testPassphrase = "swarm-test-passphrase"
   189  
   190  func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) {
   191  	// create key
   192  	conf = &node.Config{
   193  		DataDir: dir,
   194  		IPCPath: "bzzd.ipc",
   195  		NoUSB:   true,
   196  	}
   197  	n, err := node.New(conf)
   198  	if err != nil {
   199  		t.Fatal(err)
   200  	}
   201  	account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
   202  	if err != nil {
   203  		t.Fatal(err)
   204  	}
   205  
   206  	// use a unique IPCPath when running tests on Windows
   207  	if runtime.GOOS == "windows" {
   208  		conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
   209  	}
   210  
   211  	return conf, account
   212  }
   213  
   214  func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
   215  	conf, _ := getTestAccount(t, dir)
   216  	node := &testNode{Dir: dir}
   217  
   218  	// use a unique IPCPath when running tests on Windows
   219  	if runtime.GOOS == "windows" {
   220  		conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
   221  	}
   222  
   223  	// assign ports
   224  	ports, err := getAvailableTCPPorts(2)
   225  	if err != nil {
   226  		t.Fatal(err)
   227  	}
   228  	p2pPort := ports[0]
   229  	httpPort := ports[1]
   230  
   231  	// start the node
   232  	node.Cmd = runSwarm(t,
   233  		"--port", p2pPort,
   234  		"--nodiscover",
   235  		"--datadir", dir,
   236  		"--ipcpath", conf.IPCPath,
   237  		"--ens-api", "",
   238  		"--bzzaccount", bzzaccount,
   239  		"--bzznetworkid", "321",
   240  		"--bzzport", httpPort,
   241  		"--verbosity", "6",
   242  	)
   243  	node.Cmd.InputLine(testPassphrase)
   244  	defer func() {
   245  		if t.Failed() {
   246  			node.Shutdown()
   247  		}
   248  	}()
   249  
   250  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   251  	defer cancel()
   252  
   253  	// ensure that all ports have active listeners
   254  	// so that the next node will not get the same
   255  	// when calling getAvailableTCPPorts
   256  	err = waitTCPPorts(ctx, ports...)
   257  	if err != nil {
   258  		t.Fatal(err)
   259  	}
   260  
   261  	// wait for the node to start
   262  	for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
   263  		node.Client, err = rpc.Dial(conf.IPCEndpoint())
   264  		if err == nil {
   265  			break
   266  		}
   267  	}
   268  	if node.Client == nil {
   269  		t.Fatal(err)
   270  	}
   271  
   272  	// load info
   273  	var info swarm.Info
   274  	if err := node.Client.Call(&info, "bzz_info"); err != nil {
   275  		t.Fatal(err)
   276  	}
   277  	node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
   278  	node.URL = "http://" + node.Addr
   279  
   280  	var nodeInfo p2p.NodeInfo
   281  	if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
   282  		t.Fatal(err)
   283  	}
   284  	node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
   285  
   286  	return node
   287  }
   288  
   289  func newTestNode(t *testing.T, dir string) *testNode {
   290  
   291  	conf, account := getTestAccount(t, dir)
   292  	node := &testNode{Dir: dir}
   293  
   294  	// assign ports
   295  	ports, err := getAvailableTCPPorts(2)
   296  	if err != nil {
   297  		t.Fatal(err)
   298  	}
   299  	p2pPort := ports[0]
   300  	httpPort := ports[1]
   301  
   302  	// start the node
   303  	node.Cmd = runSwarm(t,
   304  		"--port", p2pPort,
   305  		"--nodiscover",
   306  		"--datadir", dir,
   307  		"--ipcpath", conf.IPCPath,
   308  		"--ens-api", "",
   309  		"--bzzaccount", account.Address.String(),
   310  		"--bzznetworkid", "321",
   311  		"--bzzport", httpPort,
   312  		"--verbosity", "6",
   313  	)
   314  	node.Cmd.InputLine(testPassphrase)
   315  	defer func() {
   316  		if t.Failed() {
   317  			node.Shutdown()
   318  		}
   319  	}()
   320  
   321  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   322  	defer cancel()
   323  
   324  	// ensure that all ports have active listeners
   325  	// so that the next node will not get the same
   326  	// when calling getAvailableTCPPorts
   327  	err = waitTCPPorts(ctx, ports...)
   328  	if err != nil {
   329  		t.Fatal(err)
   330  	}
   331  
   332  	// wait for the node to start
   333  	for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
   334  		node.Client, err = rpc.Dial(conf.IPCEndpoint())
   335  		if err == nil {
   336  			break
   337  		}
   338  	}
   339  	if node.Client == nil {
   340  		t.Fatal(err)
   341  	}
   342  
   343  	// load info
   344  	var info swarm.Info
   345  	if err := node.Client.Call(&info, "bzz_info"); err != nil {
   346  		t.Fatal(err)
   347  	}
   348  	node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
   349  	node.URL = "http://" + node.Addr
   350  
   351  	var nodeInfo p2p.NodeInfo
   352  	if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
   353  		t.Fatal(err)
   354  	}
   355  	node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort)
   356  	node.IpcPath = conf.IPCPath
   357  
   358  	return node
   359  }
   360  
   361  func (n *testNode) Shutdown() {
   362  	if n.Cmd != nil {
   363  		n.Cmd.Kill()
   364  	}
   365  }
   366  
   367  // getAvailableTCPPorts returns a set of ports that
   368  // nothing is listening on at the time.
   369  //
   370  // Function assignTCPPort cannot be called in sequence
   371  // and guardantee that the same port will be returned in
   372  // different calls as the listener is closed within the function,
   373  // not after all listeners are started and selected unique
   374  // available ports.
   375  func getAvailableTCPPorts(count int) (ports []string, err error) {
   376  	for i := 0; i < count; i++ {
   377  		l, err := net.Listen("tcp", "127.0.0.1:0")
   378  		if err != nil {
   379  			return nil, err
   380  		}
   381  		// defer close in the loop to be sure the same port will not
   382  		// be selected in the next iteration
   383  		defer l.Close()
   384  
   385  		_, port, err := net.SplitHostPort(l.Addr().String())
   386  		if err != nil {
   387  			return nil, err
   388  		}
   389  		ports = append(ports, port)
   390  	}
   391  	return ports, nil
   392  }
   393  
   394  // waitTCPPorts blocks until tcp connections can be
   395  // established on all provided ports. It runs all
   396  // ports dialers in parallel, and returns the first
   397  // encountered error.
   398  // See waitTCPPort also.
   399  func waitTCPPorts(ctx context.Context, ports ...string) error {
   400  	var err error
   401  	// mu locks err variable that is assigned in
   402  	// other goroutines
   403  	var mu sync.Mutex
   404  
   405  	// cancel is canceling all goroutines
   406  	// when the firs error is returned
   407  	// to prevent unnecessary waiting
   408  	ctx, cancel := context.WithCancel(ctx)
   409  	defer cancel()
   410  
   411  	var wg sync.WaitGroup
   412  	for _, port := range ports {
   413  		wg.Add(1)
   414  		go func(port string) {
   415  			defer wg.Done()
   416  
   417  			e := waitTCPPort(ctx, port)
   418  
   419  			mu.Lock()
   420  			defer mu.Unlock()
   421  			if e != nil && err == nil {
   422  				err = e
   423  				cancel()
   424  			}
   425  		}(port)
   426  	}
   427  	wg.Wait()
   428  
   429  	return err
   430  }
   431  
   432  // waitTCPPort blocks until tcp connection can be established
   433  // ona provided port. It has a 3 minute timeout as maximum,
   434  // to prevent long waiting, but it can be shortened with
   435  // a provided context instance. Dialer has a 10 second timeout
   436  // in every iteration, and connection refused error will be
   437  // retried in 100 milliseconds periods.
   438  func waitTCPPort(ctx context.Context, port string) error {
   439  	ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
   440  	defer cancel()
   441  
   442  	for {
   443  		c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
   444  		if err != nil {
   445  			if operr, ok := err.(*net.OpError); ok {
   446  				if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
   447  					time.Sleep(100 * time.Millisecond)
   448  					continue
   449  				}
   450  			}
   451  			return err
   452  		}
   453  		return c.Close()
   454  	}
   455  }