github.com/daeglee/go-ethereum@v0.0.0-20190504220456-cad3e8d18e9b/cmd/swarm/run_test.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"context"
    21  	"crypto/ecdsa"
    22  	"flag"
    23  	"fmt"
    24  	"io/ioutil"
    25  	"net"
    26  	"os"
    27  	"path"
    28  	"path/filepath"
    29  	"runtime"
    30  	"sync"
    31  	"syscall"
    32  	"testing"
    33  	"time"
    34  
    35  	"github.com/docker/docker/pkg/reexec"
    36  	"github.com/ethereum/go-ethereum/accounts"
    37  	"github.com/ethereum/go-ethereum/accounts/keystore"
    38  	"github.com/ethereum/go-ethereum/internal/cmdtest"
    39  	"github.com/ethereum/go-ethereum/node"
    40  	"github.com/ethereum/go-ethereum/p2p"
    41  	"github.com/ethereum/go-ethereum/rpc"
    42  	"github.com/ethereum/go-ethereum/swarm"
    43  	"github.com/ethereum/go-ethereum/swarm/api"
    44  	swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
    45  )
    46  
    47  var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
    48  
    49  func init() {
    50  	// Run the app if we've been exec'd as "swarm-test" in runSwarm.
    51  	reexec.Register("swarm-test", func() {
    52  		if err := app.Run(os.Args); err != nil {
    53  			fmt.Fprintln(os.Stderr, err)
    54  			os.Exit(1)
    55  		}
    56  		os.Exit(0)
    57  	})
    58  }
    59  
    60  const clusterSize = 3
    61  
    62  var clusteronce sync.Once
    63  var cluster *testCluster
    64  
    65  func initCluster(t *testing.T) {
    66  	clusteronce.Do(func() {
    67  		cluster = newTestCluster(t, clusterSize)
    68  	})
    69  }
    70  
    71  func serverFunc(api *api.API) swarmhttp.TestServer {
    72  	return swarmhttp.NewServer(api, "")
    73  }
    74  func TestMain(m *testing.M) {
    75  	// check if we have been reexec'd
    76  	if reexec.Init() {
    77  		return
    78  	}
    79  	os.Exit(m.Run())
    80  }
    81  
    82  func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
    83  	tt := cmdtest.NewTestCmd(t, nil)
    84  
    85  	found := false
    86  	for _, v := range args {
    87  		if v == "--bootnodes" {
    88  			found = true
    89  			break
    90  		}
    91  	}
    92  
    93  	if !found {
    94  		args = append([]string{"--bootnodes", ""}, args...)
    95  	}
    96  
    97  	// Boot "swarm". This actually runs the test binary but the TestMain
    98  	// function will prevent any tests from running.
    99  	tt.Run("swarm-test", args...)
   100  
   101  	return tt
   102  }
   103  
   104  type testCluster struct {
   105  	Nodes  []*testNode
   106  	TmpDir string
   107  }
   108  
   109  // newTestCluster starts a test swarm cluster of the given size.
   110  //
   111  // A temporary directory is created and each node gets a data directory inside
   112  // it.
   113  //
   114  // Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
   115  // ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
   116  // as flags).
   117  //
   118  // When starting more than one node, they are connected together using the
   119  // admin SetPeer RPC method.
   120  
   121  func newTestCluster(t *testing.T, size int) *testCluster {
   122  	cluster := &testCluster{}
   123  	defer func() {
   124  		if t.Failed() {
   125  			cluster.Shutdown()
   126  		}
   127  	}()
   128  
   129  	tmpdir, err := ioutil.TempDir("", "swarm-test")
   130  	if err != nil {
   131  		t.Fatal(err)
   132  	}
   133  	cluster.TmpDir = tmpdir
   134  
   135  	// start the nodes
   136  	cluster.StartNewNodes(t, size)
   137  
   138  	if size == 1 {
   139  		return cluster
   140  	}
   141  
   142  	// connect the nodes together
   143  	for _, node := range cluster.Nodes {
   144  		if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
   145  			t.Fatal(err)
   146  		}
   147  	}
   148  
   149  	// wait until all nodes have the correct number of peers
   150  outer:
   151  	for _, node := range cluster.Nodes {
   152  		var peers []*p2p.PeerInfo
   153  		for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
   154  			if err := node.Client.Call(&peers, "admin_peers"); err != nil {
   155  				t.Fatal(err)
   156  			}
   157  			if len(peers) == len(cluster.Nodes)-1 {
   158  				continue outer
   159  			}
   160  		}
   161  		t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
   162  	}
   163  
   164  	return cluster
   165  }
   166  
   167  func (c *testCluster) Shutdown() {
   168  	for _, node := range c.Nodes {
   169  		node.Shutdown()
   170  	}
   171  	os.RemoveAll(c.TmpDir)
   172  }
   173  
   174  func (c *testCluster) Stop() {
   175  	for _, node := range c.Nodes {
   176  		node.Shutdown()
   177  	}
   178  }
   179  
   180  func (c *testCluster) StartNewNodes(t *testing.T, size int) {
   181  	c.Nodes = make([]*testNode, 0, size)
   182  	for i := 0; i < size; i++ {
   183  		dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
   184  		if err := os.Mkdir(dir, 0700); err != nil {
   185  			t.Fatal(err)
   186  		}
   187  
   188  		node := newTestNode(t, dir)
   189  		node.Name = fmt.Sprintf("swarm%02d", i)
   190  
   191  		c.Nodes = append(c.Nodes, node)
   192  	}
   193  }
   194  
   195  func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
   196  	c.Nodes = make([]*testNode, 0, size)
   197  	for i := 0; i < size; i++ {
   198  		dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
   199  		node := existingTestNode(t, dir, bzzaccount)
   200  		node.Name = fmt.Sprintf("swarm%02d", i)
   201  
   202  		c.Nodes = append(c.Nodes, node)
   203  	}
   204  }
   205  
   206  func (c *testCluster) Cleanup() {
   207  	os.RemoveAll(c.TmpDir)
   208  }
   209  
   210  type testNode struct {
   211  	Name       string
   212  	Addr       string
   213  	URL        string
   214  	Enode      string
   215  	Dir        string
   216  	IpcPath    string
   217  	PrivateKey *ecdsa.PrivateKey
   218  	Client     *rpc.Client
   219  	Cmd        *cmdtest.TestCmd
   220  }
   221  
   222  const testPassphrase = "swarm-test-passphrase"
   223  
   224  func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) {
   225  	// create key
   226  	conf = &node.Config{
   227  		DataDir: dir,
   228  		IPCPath: "bzzd.ipc",
   229  		NoUSB:   true,
   230  	}
   231  	n, err := node.New(conf)
   232  	if err != nil {
   233  		t.Fatal(err)
   234  	}
   235  	account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
   236  	if err != nil {
   237  		t.Fatal(err)
   238  	}
   239  
   240  	// use a unique IPCPath when running tests on Windows
   241  	if runtime.GOOS == "windows" {
   242  		conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
   243  	}
   244  
   245  	return conf, account
   246  }
   247  
   248  func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
   249  	conf, _ := getTestAccount(t, dir)
   250  	node := &testNode{Dir: dir}
   251  
   252  	// use a unique IPCPath when running tests on Windows
   253  	if runtime.GOOS == "windows" {
   254  		conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
   255  	}
   256  
   257  	// assign ports
   258  	ports, err := getAvailableTCPPorts(2)
   259  	if err != nil {
   260  		t.Fatal(err)
   261  	}
   262  	p2pPort := ports[0]
   263  	httpPort := ports[1]
   264  
   265  	// start the node
   266  	node.Cmd = runSwarm(t,
   267  		"--bootnodes", "",
   268  		"--port", p2pPort,
   269  		"--nat", "extip:127.0.0.1",
   270  		"--datadir", dir,
   271  		"--ipcpath", conf.IPCPath,
   272  		"--ens-api", "",
   273  		"--bzzaccount", bzzaccount,
   274  		"--bzznetworkid", "321",
   275  		"--bzzport", httpPort,
   276  		"--verbosity", fmt.Sprint(*loglevel),
   277  	)
   278  	node.Cmd.InputLine(testPassphrase)
   279  	defer func() {
   280  		if t.Failed() {
   281  			node.Shutdown()
   282  		}
   283  	}()
   284  
   285  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   286  	defer cancel()
   287  
   288  	// ensure that all ports have active listeners
   289  	// so that the next node will not get the same
   290  	// when calling getAvailableTCPPorts
   291  	err = waitTCPPorts(ctx, ports...)
   292  	if err != nil {
   293  		t.Fatal(err)
   294  	}
   295  
   296  	// wait for the node to start
   297  	for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
   298  		node.Client, err = rpc.Dial(conf.IPCEndpoint())
   299  		if err == nil {
   300  			break
   301  		}
   302  	}
   303  	if node.Client == nil {
   304  		t.Fatal(err)
   305  	}
   306  
   307  	// load info
   308  	var info swarm.Info
   309  	if err := node.Client.Call(&info, "bzz_info"); err != nil {
   310  		t.Fatal(err)
   311  	}
   312  	node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
   313  	node.URL = "http://" + node.Addr
   314  
   315  	var nodeInfo p2p.NodeInfo
   316  	if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
   317  		t.Fatal(err)
   318  	}
   319  	node.Enode = nodeInfo.Enode
   320  	node.IpcPath = conf.IPCPath
   321  	return node
   322  }
   323  
   324  func newTestNode(t *testing.T, dir string) *testNode {
   325  
   326  	conf, account := getTestAccount(t, dir)
   327  	ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1)
   328  
   329  	pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase})
   330  
   331  	node := &testNode{Dir: dir, PrivateKey: pk}
   332  
   333  	// assign ports
   334  	ports, err := getAvailableTCPPorts(2)
   335  	if err != nil {
   336  		t.Fatal(err)
   337  	}
   338  	p2pPort := ports[0]
   339  	httpPort := ports[1]
   340  
   341  	// start the node
   342  	node.Cmd = runSwarm(t,
   343  		"--bootnodes", "",
   344  		"--port", p2pPort,
   345  		"--nat", "extip:127.0.0.1",
   346  		"--datadir", dir,
   347  		"--ipcpath", conf.IPCPath,
   348  		"--ens-api", "",
   349  		"--bzzaccount", account.Address.String(),
   350  		"--bzznetworkid", "321",
   351  		"--bzzport", httpPort,
   352  		"--verbosity", fmt.Sprint(*loglevel),
   353  	)
   354  	node.Cmd.InputLine(testPassphrase)
   355  	defer func() {
   356  		if t.Failed() {
   357  			node.Shutdown()
   358  		}
   359  	}()
   360  
   361  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   362  	defer cancel()
   363  
   364  	// ensure that all ports have active listeners
   365  	// so that the next node will not get the same
   366  	// when calling getAvailableTCPPorts
   367  	err = waitTCPPorts(ctx, ports...)
   368  	if err != nil {
   369  		t.Fatal(err)
   370  	}
   371  
   372  	// wait for the node to start
   373  	for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
   374  		node.Client, err = rpc.Dial(conf.IPCEndpoint())
   375  		if err == nil {
   376  			break
   377  		}
   378  	}
   379  	if node.Client == nil {
   380  		t.Fatal(err)
   381  	}
   382  
   383  	// load info
   384  	var info swarm.Info
   385  	if err := node.Client.Call(&info, "bzz_info"); err != nil {
   386  		t.Fatal(err)
   387  	}
   388  	node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
   389  	node.URL = "http://" + node.Addr
   390  
   391  	var nodeInfo p2p.NodeInfo
   392  	if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
   393  		t.Fatal(err)
   394  	}
   395  	node.Enode = nodeInfo.Enode
   396  	node.IpcPath = conf.IPCPath
   397  	return node
   398  }
   399  
   400  func (n *testNode) Shutdown() {
   401  	if n.Cmd != nil {
   402  		n.Cmd.Kill()
   403  	}
   404  }
   405  
   406  // getAvailableTCPPorts returns a set of ports that
   407  // nothing is listening on at the time.
   408  //
   409  // Function assignTCPPort cannot be called in sequence
   410  // and guardantee that the same port will be returned in
   411  // different calls as the listener is closed within the function,
   412  // not after all listeners are started and selected unique
   413  // available ports.
   414  func getAvailableTCPPorts(count int) (ports []string, err error) {
   415  	for i := 0; i < count; i++ {
   416  		l, err := net.Listen("tcp", "127.0.0.1:0")
   417  		if err != nil {
   418  			return nil, err
   419  		}
   420  		// defer close in the loop to be sure the same port will not
   421  		// be selected in the next iteration
   422  		defer l.Close()
   423  
   424  		_, port, err := net.SplitHostPort(l.Addr().String())
   425  		if err != nil {
   426  			return nil, err
   427  		}
   428  		ports = append(ports, port)
   429  	}
   430  	return ports, nil
   431  }
   432  
   433  // waitTCPPorts blocks until tcp connections can be
   434  // established on all provided ports. It runs all
   435  // ports dialers in parallel, and returns the first
   436  // encountered error.
   437  // See waitTCPPort also.
   438  func waitTCPPorts(ctx context.Context, ports ...string) error {
   439  	var err error
   440  	// mu locks err variable that is assigned in
   441  	// other goroutines
   442  	var mu sync.Mutex
   443  
   444  	// cancel is canceling all goroutines
   445  	// when the firs error is returned
   446  	// to prevent unnecessary waiting
   447  	ctx, cancel := context.WithCancel(ctx)
   448  	defer cancel()
   449  
   450  	var wg sync.WaitGroup
   451  	for _, port := range ports {
   452  		wg.Add(1)
   453  		go func(port string) {
   454  			defer wg.Done()
   455  
   456  			e := waitTCPPort(ctx, port)
   457  
   458  			mu.Lock()
   459  			defer mu.Unlock()
   460  			if e != nil && err == nil {
   461  				err = e
   462  				cancel()
   463  			}
   464  		}(port)
   465  	}
   466  	wg.Wait()
   467  
   468  	return err
   469  }
   470  
   471  // waitTCPPort blocks until tcp connection can be established
   472  // ona provided port. It has a 3 minute timeout as maximum,
   473  // to prevent long waiting, but it can be shortened with
   474  // a provided context instance. Dialer has a 10 second timeout
   475  // in every iteration, and connection refused error will be
   476  // retried in 100 milliseconds periods.
   477  func waitTCPPort(ctx context.Context, port string) error {
   478  	ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
   479  	defer cancel()
   480  
   481  	for {
   482  		c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
   483  		if err != nil {
   484  			if operr, ok := err.(*net.OpError); ok {
   485  				if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
   486  					time.Sleep(100 * time.Millisecond)
   487  					continue
   488  				}
   489  			}
   490  			return err
   491  		}
   492  		return c.Close()
   493  	}
   494  }