github.com/nitinawathare/ethereumassignment3@v0.0.0-20211021213010-f07344c2b868/go-ethereum/cmd/swarm/run_test.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"context"
    21  	"crypto/ecdsa"
    22  	"flag"
    23  	"fmt"
    24  	"io/ioutil"
    25  	"net"
    26  	"os"
    27  	"path"
    28  	"path/filepath"
    29  	"runtime"
    30  	"sync"
    31  	"syscall"
    32  	"testing"
    33  	"time"
    34  
    35  	"github.com/docker/docker/pkg/reexec"
    36  	"github.com/ethereum/go-ethereum/accounts"
    37  	"github.com/ethereum/go-ethereum/accounts/keystore"
    38  	"github.com/ethereum/go-ethereum/internal/cmdtest"
    39  	"github.com/ethereum/go-ethereum/node"
    40  	"github.com/ethereum/go-ethereum/p2p"
    41  	"github.com/ethereum/go-ethereum/rpc"
    42  	"github.com/ethereum/go-ethereum/swarm"
    43  	"github.com/ethereum/go-ethereum/swarm/api"
    44  	swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http"
    45  )
    46  
    47  var loglevel = flag.Int("loglevel", 3, "verbosity of logs")
    48  
    49  func init() {
    50  	// Run the app if we've been exec'd as "swarm-test" in runSwarm.
    51  	reexec.Register("swarm-test", func() {
    52  		if err := app.Run(os.Args); err != nil {
    53  			fmt.Fprintln(os.Stderr, err)
    54  			os.Exit(1)
    55  		}
    56  		os.Exit(0)
    57  	})
    58  }
    59  
    60  const clusterSize = 3
    61  
    62  func serverFunc(api *api.API) swarmhttp.TestServer {
    63  	return swarmhttp.NewServer(api, "")
    64  }
    65  func TestMain(m *testing.M) {
    66  	// check if we have been reexec'd
    67  	if reexec.Init() {
    68  		return
    69  	}
    70  	os.Exit(m.Run())
    71  }
    72  
    73  func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd {
    74  	tt := cmdtest.NewTestCmd(t, nil)
    75  
    76  	found := false
    77  	for _, v := range args {
    78  		if v == "--bootnodes" {
    79  			found = true
    80  			break
    81  		}
    82  	}
    83  
    84  	if !found {
    85  		args = append([]string{"--bootnodes", ""}, args...)
    86  	}
    87  
    88  	// Boot "swarm". This actually runs the test binary but the TestMain
    89  	// function will prevent any tests from running.
    90  	tt.Run("swarm-test", args...)
    91  
    92  	return tt
    93  }
    94  
    95  type testCluster struct {
    96  	Nodes  []*testNode
    97  	TmpDir string
    98  }
    99  
   100  // newTestCluster starts a test swarm cluster of the given size.
   101  //
   102  // A temporary directory is created and each node gets a data directory inside
   103  // it.
   104  //
   105  // Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p
   106  // ports (assigned by first listening on 127.0.0.1:0 and then passing the ports
   107  // as flags).
   108  //
   109  // When starting more than one node, they are connected together using the
   110  // admin SetPeer RPC method.
   111  
   112  func newTestCluster(t *testing.T, size int) *testCluster {
   113  	cluster := &testCluster{}
   114  	defer func() {
   115  		if t.Failed() {
   116  			cluster.Shutdown()
   117  		}
   118  	}()
   119  
   120  	tmpdir, err := ioutil.TempDir("", "swarm-test")
   121  	if err != nil {
   122  		t.Fatal(err)
   123  	}
   124  	cluster.TmpDir = tmpdir
   125  
   126  	// start the nodes
   127  	cluster.StartNewNodes(t, size)
   128  
   129  	if size == 1 {
   130  		return cluster
   131  	}
   132  
   133  	// connect the nodes together
   134  	for _, node := range cluster.Nodes {
   135  		if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil {
   136  			t.Fatal(err)
   137  		}
   138  	}
   139  
   140  	// wait until all nodes have the correct number of peers
   141  outer:
   142  	for _, node := range cluster.Nodes {
   143  		var peers []*p2p.PeerInfo
   144  		for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) {
   145  			if err := node.Client.Call(&peers, "admin_peers"); err != nil {
   146  				t.Fatal(err)
   147  			}
   148  			if len(peers) == len(cluster.Nodes)-1 {
   149  				continue outer
   150  			}
   151  		}
   152  		t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1)
   153  	}
   154  
   155  	return cluster
   156  }
   157  
   158  func (c *testCluster) Shutdown() {
   159  	c.Stop()
   160  	c.Cleanup()
   161  }
   162  
   163  func (c *testCluster) Stop() {
   164  	for _, node := range c.Nodes {
   165  		node.Shutdown()
   166  	}
   167  }
   168  
   169  func (c *testCluster) StartNewNodes(t *testing.T, size int) {
   170  	c.Nodes = make([]*testNode, 0, size)
   171  
   172  	errors := make(chan error, size)
   173  	nodes := make(chan *testNode, size)
   174  	for i := 0; i < size; i++ {
   175  		go func(nodeIndex int) {
   176  			dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", nodeIndex))
   177  			if err := os.Mkdir(dir, 0700); err != nil {
   178  				errors <- err
   179  				return
   180  			}
   181  
   182  			node := newTestNode(t, dir)
   183  			node.Name = fmt.Sprintf("swarm%02d", nodeIndex)
   184  			nodes <- node
   185  		}(i)
   186  	}
   187  
   188  	for i := 0; i < size; i++ {
   189  		select {
   190  		case node := <-nodes:
   191  			c.Nodes = append(c.Nodes, node)
   192  		case err := <-errors:
   193  			t.Error(err)
   194  		}
   195  	}
   196  
   197  	if t.Failed() {
   198  		c.Shutdown()
   199  		t.FailNow()
   200  	}
   201  }
   202  
   203  func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) {
   204  	c.Nodes = make([]*testNode, 0, size)
   205  	for i := 0; i < size; i++ {
   206  		dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i))
   207  		node := existingTestNode(t, dir, bzzaccount)
   208  		node.Name = fmt.Sprintf("swarm%02d", i)
   209  
   210  		c.Nodes = append(c.Nodes, node)
   211  	}
   212  }
   213  
   214  func (c *testCluster) Cleanup() {
   215  	os.RemoveAll(c.TmpDir)
   216  }
   217  
   218  type testNode struct {
   219  	Name       string
   220  	Addr       string
   221  	URL        string
   222  	Enode      string
   223  	Dir        string
   224  	IpcPath    string
   225  	PrivateKey *ecdsa.PrivateKey
   226  	Client     *rpc.Client
   227  	Cmd        *cmdtest.TestCmd
   228  }
   229  
   230  const testPassphrase = "swarm-test-passphrase"
   231  
   232  func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) {
   233  	// create key
   234  	conf = &node.Config{
   235  		DataDir: dir,
   236  		IPCPath: "bzzd.ipc",
   237  		NoUSB:   true,
   238  	}
   239  	n, err := node.New(conf)
   240  	if err != nil {
   241  		t.Fatal(err)
   242  	}
   243  	account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase)
   244  	if err != nil {
   245  		t.Fatal(err)
   246  	}
   247  
   248  	// use a unique IPCPath when running tests on Windows
   249  	if runtime.GOOS == "windows" {
   250  		conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String())
   251  	}
   252  
   253  	return conf, account
   254  }
   255  
   256  func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode {
   257  	conf, _ := getTestAccount(t, dir)
   258  	node := &testNode{Dir: dir}
   259  
   260  	// use a unique IPCPath when running tests on Windows
   261  	if runtime.GOOS == "windows" {
   262  		conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount)
   263  	}
   264  
   265  	// assign ports
   266  	ports, err := getAvailableTCPPorts(2)
   267  	if err != nil {
   268  		t.Fatal(err)
   269  	}
   270  	p2pPort := ports[0]
   271  	httpPort := ports[1]
   272  
   273  	// start the node
   274  	node.Cmd = runSwarm(t,
   275  		"--bootnodes", "",
   276  		"--port", p2pPort,
   277  		"--nat", "extip:127.0.0.1",
   278  		"--datadir", dir,
   279  		"--ipcpath", conf.IPCPath,
   280  		"--ens-api", "",
   281  		"--bzzaccount", bzzaccount,
   282  		"--bzznetworkid", "321",
   283  		"--bzzport", httpPort,
   284  		"--verbosity", fmt.Sprint(*loglevel),
   285  	)
   286  	node.Cmd.InputLine(testPassphrase)
   287  	defer func() {
   288  		if t.Failed() {
   289  			node.Shutdown()
   290  		}
   291  	}()
   292  
   293  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   294  	defer cancel()
   295  
   296  	// ensure that all ports have active listeners
   297  	// so that the next node will not get the same
   298  	// when calling getAvailableTCPPorts
   299  	err = waitTCPPorts(ctx, ports...)
   300  	if err != nil {
   301  		t.Fatal(err)
   302  	}
   303  
   304  	// wait for the node to start
   305  	for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
   306  		node.Client, err = rpc.Dial(conf.IPCEndpoint())
   307  		if err == nil {
   308  			break
   309  		}
   310  	}
   311  	if node.Client == nil {
   312  		t.Fatal(err)
   313  	}
   314  
   315  	// load info
   316  	var info swarm.Info
   317  	if err := node.Client.Call(&info, "bzz_info"); err != nil {
   318  		t.Fatal(err)
   319  	}
   320  	node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
   321  	node.URL = "http://" + node.Addr
   322  
   323  	var nodeInfo p2p.NodeInfo
   324  	if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
   325  		t.Fatal(err)
   326  	}
   327  	node.Enode = nodeInfo.Enode
   328  	node.IpcPath = conf.IPCPath
   329  	return node
   330  }
   331  
   332  func newTestNode(t *testing.T, dir string) *testNode {
   333  
   334  	conf, account := getTestAccount(t, dir)
   335  	ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1)
   336  
   337  	pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase})
   338  
   339  	node := &testNode{Dir: dir, PrivateKey: pk}
   340  
   341  	// assign ports
   342  	ports, err := getAvailableTCPPorts(2)
   343  	if err != nil {
   344  		t.Fatal(err)
   345  	}
   346  	p2pPort := ports[0]
   347  	httpPort := ports[1]
   348  
   349  	// start the node
   350  	node.Cmd = runSwarm(t,
   351  		"--bootnodes", "",
   352  		"--port", p2pPort,
   353  		"--nat", "extip:127.0.0.1",
   354  		"--datadir", dir,
   355  		"--ipcpath", conf.IPCPath,
   356  		"--ens-api", "",
   357  		"--bzzaccount", account.Address.String(),
   358  		"--bzznetworkid", "321",
   359  		"--bzzport", httpPort,
   360  		"--verbosity", fmt.Sprint(*loglevel),
   361  	)
   362  	node.Cmd.InputLine(testPassphrase)
   363  	defer func() {
   364  		if t.Failed() {
   365  			node.Shutdown()
   366  		}
   367  	}()
   368  
   369  	ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
   370  	defer cancel()
   371  
   372  	// ensure that all ports have active listeners
   373  	// so that the next node will not get the same
   374  	// when calling getAvailableTCPPorts
   375  	err = waitTCPPorts(ctx, ports...)
   376  	if err != nil {
   377  		t.Fatal(err)
   378  	}
   379  
   380  	// wait for the node to start
   381  	for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) {
   382  		node.Client, err = rpc.Dial(conf.IPCEndpoint())
   383  		if err == nil {
   384  			break
   385  		}
   386  	}
   387  	if node.Client == nil {
   388  		t.Fatal(err)
   389  	}
   390  
   391  	// load info
   392  	var info swarm.Info
   393  	if err := node.Client.Call(&info, "bzz_info"); err != nil {
   394  		t.Fatal(err)
   395  	}
   396  	node.Addr = net.JoinHostPort("127.0.0.1", info.Port)
   397  	node.URL = "http://" + node.Addr
   398  
   399  	var nodeInfo p2p.NodeInfo
   400  	if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil {
   401  		t.Fatal(err)
   402  	}
   403  	node.Enode = nodeInfo.Enode
   404  	node.IpcPath = conf.IPCPath
   405  	return node
   406  }
   407  
   408  func (n *testNode) Shutdown() {
   409  	if n.Cmd != nil {
   410  		n.Cmd.Kill()
   411  	}
   412  }
   413  
   414  // getAvailableTCPPorts returns a set of ports that
   415  // nothing is listening on at the time.
   416  //
   417  // Function assignTCPPort cannot be called in sequence
   418  // and guardantee that the same port will be returned in
   419  // different calls as the listener is closed within the function,
   420  // not after all listeners are started and selected unique
   421  // available ports.
   422  func getAvailableTCPPorts(count int) (ports []string, err error) {
   423  	for i := 0; i < count; i++ {
   424  		l, err := net.Listen("tcp", "127.0.0.1:0")
   425  		if err != nil {
   426  			return nil, err
   427  		}
   428  		// defer close in the loop to be sure the same port will not
   429  		// be selected in the next iteration
   430  		defer l.Close()
   431  
   432  		_, port, err := net.SplitHostPort(l.Addr().String())
   433  		if err != nil {
   434  			return nil, err
   435  		}
   436  		ports = append(ports, port)
   437  	}
   438  	return ports, nil
   439  }
   440  
   441  // waitTCPPorts blocks until tcp connections can be
   442  // established on all provided ports. It runs all
   443  // ports dialers in parallel, and returns the first
   444  // encountered error.
   445  // See waitTCPPort also.
   446  func waitTCPPorts(ctx context.Context, ports ...string) error {
   447  	var err error
   448  	// mu locks err variable that is assigned in
   449  	// other goroutines
   450  	var mu sync.Mutex
   451  
   452  	// cancel is canceling all goroutines
   453  	// when the firs error is returned
   454  	// to prevent unnecessary waiting
   455  	ctx, cancel := context.WithCancel(ctx)
   456  	defer cancel()
   457  
   458  	var wg sync.WaitGroup
   459  	for _, port := range ports {
   460  		wg.Add(1)
   461  		go func(port string) {
   462  			defer wg.Done()
   463  
   464  			e := waitTCPPort(ctx, port)
   465  
   466  			mu.Lock()
   467  			defer mu.Unlock()
   468  			if e != nil && err == nil {
   469  				err = e
   470  				cancel()
   471  			}
   472  		}(port)
   473  	}
   474  	wg.Wait()
   475  
   476  	return err
   477  }
   478  
   479  // waitTCPPort blocks until tcp connection can be established
   480  // ona provided port. It has a 3 minute timeout as maximum,
   481  // to prevent long waiting, but it can be shortened with
   482  // a provided context instance. Dialer has a 10 second timeout
   483  // in every iteration, and connection refused error will be
   484  // retried in 100 milliseconds periods.
   485  func waitTCPPort(ctx context.Context, port string) error {
   486  	ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
   487  	defer cancel()
   488  
   489  	for {
   490  		c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port)
   491  		if err != nil {
   492  			if operr, ok := err.(*net.OpError); ok {
   493  				if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED {
   494  					time.Sleep(100 * time.Millisecond)
   495  					continue
   496  				}
   497  			}
   498  			return err
   499  		}
   500  		return c.Close()
   501  	}
   502  }