github.com/pslzym/go-ethereum@v1.8.17-0.20180926104442-4b6824e07b1b/cmd/swarm/run_test.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "context" 21 "crypto/ecdsa" 22 "fmt" 23 "io/ioutil" 24 "net" 25 "os" 26 "path" 27 "path/filepath" 28 "runtime" 29 "sync" 30 "syscall" 31 "testing" 32 "time" 33 34 "github.com/docker/docker/pkg/reexec" 35 "github.com/ethereum/go-ethereum/accounts" 36 "github.com/ethereum/go-ethereum/accounts/keystore" 37 "github.com/ethereum/go-ethereum/internal/cmdtest" 38 "github.com/ethereum/go-ethereum/node" 39 "github.com/ethereum/go-ethereum/p2p" 40 "github.com/ethereum/go-ethereum/rpc" 41 "github.com/ethereum/go-ethereum/swarm" 42 ) 43 44 func init() { 45 // Run the app if we've been exec'd as "swarm-test" in runSwarm. 46 reexec.Register("swarm-test", func() { 47 if err := app.Run(os.Args); err != nil { 48 fmt.Fprintln(os.Stderr, err) 49 os.Exit(1) 50 } 51 os.Exit(0) 52 }) 53 } 54 55 func TestMain(m *testing.M) { 56 // check if we have been reexec'd 57 if reexec.Init() { 58 return 59 } 60 os.Exit(m.Run()) 61 } 62 63 func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd { 64 tt := cmdtest.NewTestCmd(t, nil) 65 66 // Boot "swarm". This actually runs the test binary but the TestMain 67 // function will prevent any tests from running. 68 tt.Run("swarm-test", args...) 69 70 return tt 71 } 72 73 type testCluster struct { 74 Nodes []*testNode 75 TmpDir string 76 } 77 78 // newTestCluster starts a test swarm cluster of the given size. 79 // 80 // A temporary directory is created and each node gets a data directory inside 81 // it. 82 // 83 // Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p 84 // ports (assigned by first listening on 127.0.0.1:0 and then passing the ports 85 // as flags). 86 // 87 // When starting more than one node, they are connected together using the 88 // admin SetPeer RPC method. 89 90 func newTestCluster(t *testing.T, size int) *testCluster { 91 cluster := &testCluster{} 92 defer func() { 93 if t.Failed() { 94 cluster.Shutdown() 95 } 96 }() 97 98 tmpdir, err := ioutil.TempDir("", "swarm-test") 99 if err != nil { 100 t.Fatal(err) 101 } 102 cluster.TmpDir = tmpdir 103 104 // start the nodes 105 cluster.StartNewNodes(t, size) 106 107 if size == 1 { 108 return cluster 109 } 110 111 // connect the nodes together 112 for _, node := range cluster.Nodes { 113 if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil { 114 t.Fatal(err) 115 } 116 } 117 118 // wait until all nodes have the correct number of peers 119 outer: 120 for _, node := range cluster.Nodes { 121 var peers []*p2p.PeerInfo 122 for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) { 123 if err := node.Client.Call(&peers, "admin_peers"); err != nil { 124 t.Fatal(err) 125 } 126 if len(peers) == len(cluster.Nodes)-1 { 127 continue outer 128 } 129 } 130 t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1) 131 } 132 133 return cluster 134 } 135 136 func (c *testCluster) Shutdown() { 137 for _, node := range c.Nodes { 138 node.Shutdown() 139 } 140 os.RemoveAll(c.TmpDir) 141 } 142 143 func (c *testCluster) Stop() { 144 for _, node := range c.Nodes { 145 node.Shutdown() 146 } 147 } 148 149 func (c *testCluster) StartNewNodes(t *testing.T, size int) { 150 c.Nodes = make([]*testNode, 0, size) 151 for i := 0; i < size; i++ { 152 dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) 153 if err := os.Mkdir(dir, 0700); err != nil { 154 t.Fatal(err) 155 } 156 157 node := newTestNode(t, dir) 158 node.Name = fmt.Sprintf("swarm%02d", i) 159 160 c.Nodes = append(c.Nodes, node) 161 } 162 } 163 164 func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) { 165 c.Nodes = make([]*testNode, 0, size) 166 for i := 0; i < size; i++ { 167 dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) 168 node := existingTestNode(t, dir, bzzaccount) 169 node.Name = fmt.Sprintf("swarm%02d", i) 170 171 c.Nodes = append(c.Nodes, node) 172 } 173 } 174 175 func (c *testCluster) Cleanup() { 176 os.RemoveAll(c.TmpDir) 177 } 178 179 type testNode struct { 180 Name string 181 Addr string 182 URL string 183 Enode string 184 Dir string 185 IpcPath string 186 PrivateKey *ecdsa.PrivateKey 187 Client *rpc.Client 188 Cmd *cmdtest.TestCmd 189 } 190 191 const testPassphrase = "swarm-test-passphrase" 192 193 func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) { 194 // create key 195 conf = &node.Config{ 196 DataDir: dir, 197 IPCPath: "bzzd.ipc", 198 NoUSB: true, 199 } 200 n, err := node.New(conf) 201 if err != nil { 202 t.Fatal(err) 203 } 204 account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase) 205 if err != nil { 206 t.Fatal(err) 207 } 208 209 // use a unique IPCPath when running tests on Windows 210 if runtime.GOOS == "windows" { 211 conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String()) 212 } 213 214 return conf, account 215 } 216 217 func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { 218 conf, _ := getTestAccount(t, dir) 219 node := &testNode{Dir: dir} 220 221 // use a unique IPCPath when running tests on Windows 222 if runtime.GOOS == "windows" { 223 conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount) 224 } 225 226 // assign ports 227 ports, err := getAvailableTCPPorts(2) 228 if err != nil { 229 t.Fatal(err) 230 } 231 p2pPort := ports[0] 232 httpPort := ports[1] 233 234 // start the node 235 node.Cmd = runSwarm(t, 236 "--port", p2pPort, 237 "--nat", "extip:127.0.0.1", 238 "--nodiscover", 239 "--datadir", dir, 240 "--ipcpath", conf.IPCPath, 241 "--ens-api", "", 242 "--bzzaccount", bzzaccount, 243 "--bzznetworkid", "321", 244 "--bzzport", httpPort, 245 "--verbosity", "3", 246 ) 247 node.Cmd.InputLine(testPassphrase) 248 defer func() { 249 if t.Failed() { 250 node.Shutdown() 251 } 252 }() 253 254 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 255 defer cancel() 256 257 // ensure that all ports have active listeners 258 // so that the next node will not get the same 259 // when calling getAvailableTCPPorts 260 err = waitTCPPorts(ctx, ports...) 261 if err != nil { 262 t.Fatal(err) 263 } 264 265 // wait for the node to start 266 for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { 267 node.Client, err = rpc.Dial(conf.IPCEndpoint()) 268 if err == nil { 269 break 270 } 271 } 272 if node.Client == nil { 273 t.Fatal(err) 274 } 275 276 // load info 277 var info swarm.Info 278 if err := node.Client.Call(&info, "bzz_info"); err != nil { 279 t.Fatal(err) 280 } 281 node.Addr = net.JoinHostPort("127.0.0.1", info.Port) 282 node.URL = "http://" + node.Addr 283 284 var nodeInfo p2p.NodeInfo 285 if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { 286 t.Fatal(err) 287 } 288 node.Enode = nodeInfo.Enode 289 node.IpcPath = conf.IPCPath 290 return node 291 } 292 293 func newTestNode(t *testing.T, dir string) *testNode { 294 295 conf, account := getTestAccount(t, dir) 296 ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1) 297 298 pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase}) 299 300 node := &testNode{Dir: dir, PrivateKey: pk} 301 302 // assign ports 303 ports, err := getAvailableTCPPorts(2) 304 if err != nil { 305 t.Fatal(err) 306 } 307 p2pPort := ports[0] 308 httpPort := ports[1] 309 310 // start the node 311 node.Cmd = runSwarm(t, 312 "--port", p2pPort, 313 "--nat", "extip:127.0.0.1", 314 "--nodiscover", 315 "--datadir", dir, 316 "--ipcpath", conf.IPCPath, 317 "--ens-api", "", 318 "--bzzaccount", account.Address.String(), 319 "--bzznetworkid", "321", 320 "--bzzport", httpPort, 321 "--verbosity", "3", 322 ) 323 node.Cmd.InputLine(testPassphrase) 324 defer func() { 325 if t.Failed() { 326 node.Shutdown() 327 } 328 }() 329 330 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 331 defer cancel() 332 333 // ensure that all ports have active listeners 334 // so that the next node will not get the same 335 // when calling getAvailableTCPPorts 336 err = waitTCPPorts(ctx, ports...) 337 if err != nil { 338 t.Fatal(err) 339 } 340 341 // wait for the node to start 342 for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { 343 node.Client, err = rpc.Dial(conf.IPCEndpoint()) 344 if err == nil { 345 break 346 } 347 } 348 if node.Client == nil { 349 t.Fatal(err) 350 } 351 352 // load info 353 var info swarm.Info 354 if err := node.Client.Call(&info, "bzz_info"); err != nil { 355 t.Fatal(err) 356 } 357 node.Addr = net.JoinHostPort("127.0.0.1", info.Port) 358 node.URL = "http://" + node.Addr 359 360 var nodeInfo p2p.NodeInfo 361 if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { 362 t.Fatal(err) 363 } 364 node.Enode = nodeInfo.Enode 365 node.IpcPath = conf.IPCPath 366 return node 367 } 368 369 func (n *testNode) Shutdown() { 370 if n.Cmd != nil { 371 n.Cmd.Kill() 372 } 373 } 374 375 // getAvailableTCPPorts returns a set of ports that 376 // nothing is listening on at the time. 377 // 378 // Function assignTCPPort cannot be called in sequence 379 // and guardantee that the same port will be returned in 380 // different calls as the listener is closed within the function, 381 // not after all listeners are started and selected unique 382 // available ports. 383 func getAvailableTCPPorts(count int) (ports []string, err error) { 384 for i := 0; i < count; i++ { 385 l, err := net.Listen("tcp", "127.0.0.1:0") 386 if err != nil { 387 return nil, err 388 } 389 // defer close in the loop to be sure the same port will not 390 // be selected in the next iteration 391 defer l.Close() 392 393 _, port, err := net.SplitHostPort(l.Addr().String()) 394 if err != nil { 395 return nil, err 396 } 397 ports = append(ports, port) 398 } 399 return ports, nil 400 } 401 402 // waitTCPPorts blocks until tcp connections can be 403 // established on all provided ports. It runs all 404 // ports dialers in parallel, and returns the first 405 // encountered error. 406 // See waitTCPPort also. 407 func waitTCPPorts(ctx context.Context, ports ...string) error { 408 var err error 409 // mu locks err variable that is assigned in 410 // other goroutines 411 var mu sync.Mutex 412 413 // cancel is canceling all goroutines 414 // when the firs error is returned 415 // to prevent unnecessary waiting 416 ctx, cancel := context.WithCancel(ctx) 417 defer cancel() 418 419 var wg sync.WaitGroup 420 for _, port := range ports { 421 wg.Add(1) 422 go func(port string) { 423 defer wg.Done() 424 425 e := waitTCPPort(ctx, port) 426 427 mu.Lock() 428 defer mu.Unlock() 429 if e != nil && err == nil { 430 err = e 431 cancel() 432 } 433 }(port) 434 } 435 wg.Wait() 436 437 return err 438 } 439 440 // waitTCPPort blocks until tcp connection can be established 441 // ona provided port. It has a 3 minute timeout as maximum, 442 // to prevent long waiting, but it can be shortened with 443 // a provided context instance. Dialer has a 10 second timeout 444 // in every iteration, and connection refused error will be 445 // retried in 100 milliseconds periods. 446 func waitTCPPort(ctx context.Context, port string) error { 447 ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) 448 defer cancel() 449 450 for { 451 c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port) 452 if err != nil { 453 if operr, ok := err.(*net.OpError); ok { 454 if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED { 455 time.Sleep(100 * time.Millisecond) 456 continue 457 } 458 } 459 return err 460 } 461 return c.Close() 462 } 463 }