github.com/XinFinOrg/xdcchain@v1.1.0/cmd/swarm/run_test.go (about) 1 // Copyright 2017 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "context" 21 "crypto/ecdsa" 22 "flag" 23 "fmt" 24 "io/ioutil" 25 "net" 26 "os" 27 "path" 28 "path/filepath" 29 "runtime" 30 "sync" 31 "syscall" 32 "testing" 33 "time" 34 35 "github.com/docker/docker/pkg/reexec" 36 "github.com/ethereum/go-ethereum/accounts" 37 "github.com/ethereum/go-ethereum/accounts/keystore" 38 "github.com/ethereum/go-ethereum/internal/cmdtest" 39 "github.com/ethereum/go-ethereum/node" 40 "github.com/ethereum/go-ethereum/p2p" 41 "github.com/ethereum/go-ethereum/rpc" 42 "github.com/ethereum/go-ethereum/swarm" 43 "github.com/ethereum/go-ethereum/swarm/api" 44 swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" 45 ) 46 47 var loglevel = flag.Int("loglevel", 3, "verbosity of logs") 48 49 func init() { 50 // Run the app if we've been exec'd as "swarm-test" in runSwarm. 51 reexec.Register("swarm-test", func() { 52 if err := app.Run(os.Args); err != nil { 53 fmt.Fprintln(os.Stderr, err) 54 os.Exit(1) 55 } 56 os.Exit(0) 57 }) 58 } 59 60 const clusterSize = 3 61 62 var clusteronce sync.Once 63 var cluster *testCluster 64 65 func initCluster(t *testing.T) { 66 clusteronce.Do(func() { 67 cluster = newTestCluster(t, clusterSize) 68 }) 69 } 70 71 func serverFunc(api *api.API) swarmhttp.TestServer { 72 return swarmhttp.NewServer(api, "") 73 } 74 func TestMain(m *testing.M) { 75 // check if we have been reexec'd 76 if reexec.Init() { 77 return 78 } 79 os.Exit(m.Run()) 80 } 81 82 func runSwarm(t *testing.T, args ...string) *cmdtest.TestCmd { 83 tt := cmdtest.NewTestCmd(t, nil) 84 85 // Boot "swarm". This actually runs the test binary but the TestMain 86 // function will prevent any tests from running. 87 tt.Run("swarm-test", args...) 88 89 return tt 90 } 91 92 type testCluster struct { 93 Nodes []*testNode 94 TmpDir string 95 } 96 97 // newTestCluster starts a test swarm cluster of the given size. 98 // 99 // A temporary directory is created and each node gets a data directory inside 100 // it. 101 // 102 // Each node listens on 127.0.0.1 with random ports for both the HTTP and p2p 103 // ports (assigned by first listening on 127.0.0.1:0 and then passing the ports 104 // as flags). 105 // 106 // When starting more than one node, they are connected together using the 107 // admin SetPeer RPC method. 108 109 func newTestCluster(t *testing.T, size int) *testCluster { 110 cluster := &testCluster{} 111 defer func() { 112 if t.Failed() { 113 cluster.Shutdown() 114 } 115 }() 116 117 tmpdir, err := ioutil.TempDir("", "swarm-test") 118 if err != nil { 119 t.Fatal(err) 120 } 121 cluster.TmpDir = tmpdir 122 123 // start the nodes 124 cluster.StartNewNodes(t, size) 125 126 if size == 1 { 127 return cluster 128 } 129 130 // connect the nodes together 131 for _, node := range cluster.Nodes { 132 if err := node.Client.Call(nil, "admin_addPeer", cluster.Nodes[0].Enode); err != nil { 133 t.Fatal(err) 134 } 135 } 136 137 // wait until all nodes have the correct number of peers 138 outer: 139 for _, node := range cluster.Nodes { 140 var peers []*p2p.PeerInfo 141 for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(50 * time.Millisecond) { 142 if err := node.Client.Call(&peers, "admin_peers"); err != nil { 143 t.Fatal(err) 144 } 145 if len(peers) == len(cluster.Nodes)-1 { 146 continue outer 147 } 148 } 149 t.Fatalf("%s only has %d / %d peers", node.Name, len(peers), len(cluster.Nodes)-1) 150 } 151 152 return cluster 153 } 154 155 func (c *testCluster) Shutdown() { 156 for _, node := range c.Nodes { 157 node.Shutdown() 158 } 159 os.RemoveAll(c.TmpDir) 160 } 161 162 func (c *testCluster) Stop() { 163 for _, node := range c.Nodes { 164 node.Shutdown() 165 } 166 } 167 168 func (c *testCluster) StartNewNodes(t *testing.T, size int) { 169 c.Nodes = make([]*testNode, 0, size) 170 for i := 0; i < size; i++ { 171 dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) 172 if err := os.Mkdir(dir, 0700); err != nil { 173 t.Fatal(err) 174 } 175 176 node := newTestNode(t, dir) 177 node.Name = fmt.Sprintf("swarm%02d", i) 178 179 c.Nodes = append(c.Nodes, node) 180 } 181 } 182 183 func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) { 184 c.Nodes = make([]*testNode, 0, size) 185 for i := 0; i < size; i++ { 186 dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) 187 node := existingTestNode(t, dir, bzzaccount) 188 node.Name = fmt.Sprintf("swarm%02d", i) 189 190 c.Nodes = append(c.Nodes, node) 191 } 192 } 193 194 func (c *testCluster) Cleanup() { 195 os.RemoveAll(c.TmpDir) 196 } 197 198 type testNode struct { 199 Name string 200 Addr string 201 URL string 202 Enode string 203 Dir string 204 IpcPath string 205 PrivateKey *ecdsa.PrivateKey 206 Client *rpc.Client 207 Cmd *cmdtest.TestCmd 208 } 209 210 const testPassphrase = "swarm-test-passphrase" 211 212 func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accounts.Account) { 213 // create key 214 conf = &node.Config{ 215 DataDir: dir, 216 IPCPath: "bzzd.ipc", 217 NoUSB: true, 218 } 219 n, err := node.New(conf) 220 if err != nil { 221 t.Fatal(err) 222 } 223 account, err = n.AccountManager().Backends(keystore.KeyStoreType)[0].(*keystore.KeyStore).NewAccount(testPassphrase) 224 if err != nil { 225 t.Fatal(err) 226 } 227 228 // use a unique IPCPath when running tests on Windows 229 if runtime.GOOS == "windows" { 230 conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", account.Address.String()) 231 } 232 233 return conf, account 234 } 235 236 func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { 237 conf, _ := getTestAccount(t, dir) 238 node := &testNode{Dir: dir} 239 240 // use a unique IPCPath when running tests on Windows 241 if runtime.GOOS == "windows" { 242 conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount) 243 } 244 245 // assign ports 246 ports, err := getAvailableTCPPorts(2) 247 if err != nil { 248 t.Fatal(err) 249 } 250 p2pPort := ports[0] 251 httpPort := ports[1] 252 253 // start the node 254 node.Cmd = runSwarm(t, 255 "--port", p2pPort, 256 "--nat", "extip:127.0.0.1", 257 "--datadir", dir, 258 "--ipcpath", conf.IPCPath, 259 "--ens-api", "", 260 "--bzzaccount", bzzaccount, 261 "--bzznetworkid", "321", 262 "--bzzport", httpPort, 263 "--verbosity", fmt.Sprint(*loglevel), 264 ) 265 node.Cmd.InputLine(testPassphrase) 266 defer func() { 267 if t.Failed() { 268 node.Shutdown() 269 } 270 }() 271 272 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 273 defer cancel() 274 275 // ensure that all ports have active listeners 276 // so that the next node will not get the same 277 // when calling getAvailableTCPPorts 278 err = waitTCPPorts(ctx, ports...) 279 if err != nil { 280 t.Fatal(err) 281 } 282 283 // wait for the node to start 284 for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { 285 node.Client, err = rpc.Dial(conf.IPCEndpoint()) 286 if err == nil { 287 break 288 } 289 } 290 if node.Client == nil { 291 t.Fatal(err) 292 } 293 294 // load info 295 var info swarm.Info 296 if err := node.Client.Call(&info, "bzz_info"); err != nil { 297 t.Fatal(err) 298 } 299 node.Addr = net.JoinHostPort("127.0.0.1", info.Port) 300 node.URL = "http://" + node.Addr 301 302 var nodeInfo p2p.NodeInfo 303 if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { 304 t.Fatal(err) 305 } 306 node.Enode = nodeInfo.Enode 307 node.IpcPath = conf.IPCPath 308 return node 309 } 310 311 func newTestNode(t *testing.T, dir string) *testNode { 312 313 conf, account := getTestAccount(t, dir) 314 ks := keystore.NewKeyStore(path.Join(dir, "keystore"), 1<<18, 1) 315 316 pk := decryptStoreAccount(ks, account.Address.Hex(), []string{testPassphrase}) 317 318 node := &testNode{Dir: dir, PrivateKey: pk} 319 320 // assign ports 321 ports, err := getAvailableTCPPorts(2) 322 if err != nil { 323 t.Fatal(err) 324 } 325 p2pPort := ports[0] 326 httpPort := ports[1] 327 328 // start the node 329 node.Cmd = runSwarm(t, 330 "--port", p2pPort, 331 "--nat", "extip:127.0.0.1", 332 "--datadir", dir, 333 "--ipcpath", conf.IPCPath, 334 "--ens-api", "", 335 "--bzzaccount", account.Address.String(), 336 "--bzznetworkid", "321", 337 "--bzzport", httpPort, 338 "--verbosity", fmt.Sprint(*loglevel), 339 ) 340 node.Cmd.InputLine(testPassphrase) 341 defer func() { 342 if t.Failed() { 343 node.Shutdown() 344 } 345 }() 346 347 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 348 defer cancel() 349 350 // ensure that all ports have active listeners 351 // so that the next node will not get the same 352 // when calling getAvailableTCPPorts 353 err = waitTCPPorts(ctx, ports...) 354 if err != nil { 355 t.Fatal(err) 356 } 357 358 // wait for the node to start 359 for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { 360 node.Client, err = rpc.Dial(conf.IPCEndpoint()) 361 if err == nil { 362 break 363 } 364 } 365 if node.Client == nil { 366 t.Fatal(err) 367 } 368 369 // load info 370 var info swarm.Info 371 if err := node.Client.Call(&info, "bzz_info"); err != nil { 372 t.Fatal(err) 373 } 374 node.Addr = net.JoinHostPort("127.0.0.1", info.Port) 375 node.URL = "http://" + node.Addr 376 377 var nodeInfo p2p.NodeInfo 378 if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { 379 t.Fatal(err) 380 } 381 node.Enode = nodeInfo.Enode 382 node.IpcPath = conf.IPCPath 383 return node 384 } 385 386 func (n *testNode) Shutdown() { 387 if n.Cmd != nil { 388 n.Cmd.Kill() 389 } 390 } 391 392 // getAvailableTCPPorts returns a set of ports that 393 // nothing is listening on at the time. 394 // 395 // Function assignTCPPort cannot be called in sequence 396 // and guardantee that the same port will be returned in 397 // different calls as the listener is closed within the function, 398 // not after all listeners are started and selected unique 399 // available ports. 400 func getAvailableTCPPorts(count int) (ports []string, err error) { 401 for i := 0; i < count; i++ { 402 l, err := net.Listen("tcp", "127.0.0.1:0") 403 if err != nil { 404 return nil, err 405 } 406 // defer close in the loop to be sure the same port will not 407 // be selected in the next iteration 408 defer l.Close() 409 410 _, port, err := net.SplitHostPort(l.Addr().String()) 411 if err != nil { 412 return nil, err 413 } 414 ports = append(ports, port) 415 } 416 return ports, nil 417 } 418 419 // waitTCPPorts blocks until tcp connections can be 420 // established on all provided ports. It runs all 421 // ports dialers in parallel, and returns the first 422 // encountered error. 423 // See waitTCPPort also. 424 func waitTCPPorts(ctx context.Context, ports ...string) error { 425 var err error 426 // mu locks err variable that is assigned in 427 // other goroutines 428 var mu sync.Mutex 429 430 // cancel is canceling all goroutines 431 // when the firs error is returned 432 // to prevent unnecessary waiting 433 ctx, cancel := context.WithCancel(ctx) 434 defer cancel() 435 436 var wg sync.WaitGroup 437 for _, port := range ports { 438 wg.Add(1) 439 go func(port string) { 440 defer wg.Done() 441 442 e := waitTCPPort(ctx, port) 443 444 mu.Lock() 445 defer mu.Unlock() 446 if e != nil && err == nil { 447 err = e 448 cancel() 449 } 450 }(port) 451 } 452 wg.Wait() 453 454 return err 455 } 456 457 // waitTCPPort blocks until tcp connection can be established 458 // ona provided port. It has a 3 minute timeout as maximum, 459 // to prevent long waiting, but it can be shortened with 460 // a provided context instance. Dialer has a 10 second timeout 461 // in every iteration, and connection refused error will be 462 // retried in 100 milliseconds periods. 463 func waitTCPPort(ctx context.Context, port string) error { 464 ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) 465 defer cancel() 466 467 for { 468 c, err := (&net.Dialer{Timeout: 10 * time.Second}).DialContext(ctx, "tcp", "127.0.0.1:"+port) 469 if err != nil { 470 if operr, ok := err.(*net.OpError); ok { 471 if syserr, ok := operr.Err.(*os.SyscallError); ok && syserr.Err == syscall.ECONNREFUSED { 472 time.Sleep(100 * time.Millisecond) 473 continue 474 } 475 } 476 return err 477 } 478 return c.Close() 479 } 480 }