github.com/NebulousLabs/Sia@v1.3.7/modules/gateway/nodes_test.go (about) 1 package gateway 2 3 import ( 4 "errors" 5 "strconv" 6 "sync" 7 "testing" 8 "time" 9 10 "github.com/NebulousLabs/Sia/build" 11 "github.com/NebulousLabs/Sia/encoding" 12 "github.com/NebulousLabs/Sia/modules" 13 "github.com/NebulousLabs/fastrand" 14 ) 15 16 const dummyNode = "111.111.111.111:1111" 17 18 // TestAddNode tries adding a node to the gateway using the unexported addNode 19 // function. Edge case trials are also performed. 20 func TestAddNode(t *testing.T) { 21 if testing.Short() { 22 t.SkipNow() 23 } 24 t.Parallel() 25 g := newTestingGateway(t) 26 defer g.Close() 27 28 g.mu.Lock() 29 defer g.mu.Unlock() 30 if err := g.addNode(dummyNode); err != nil { 31 t.Fatal("addNode failed:", err) 32 } 33 if err := g.addNode(dummyNode); err != errNodeExists { 34 t.Error("addNode added duplicate node") 35 } 36 if err := g.addNode("foo"); err == nil { 37 t.Error("addNode added unroutable address") 38 } 39 if err := g.addNode("foo:9981"); err == nil { 40 t.Error("addNode added a non-IP address") 41 } 42 if err := g.addNode("[::]:9981"); err == nil { 43 t.Error("addNode added unspecified address") 44 } 45 if err := g.addNode(g.myAddr); err != errOurAddress { 46 t.Error("addNode added our own address") 47 } 48 } 49 50 // TestRemoveNode tries remiving a node from the gateway. 51 func TestRemoveNode(t *testing.T) { 52 if testing.Short() { 53 t.SkipNow() 54 } 55 g := newTestingGateway(t) 56 defer g.Close() 57 t.Parallel() 58 59 g.mu.Lock() 60 defer g.mu.Unlock() 61 if err := g.addNode(dummyNode); err != nil { 62 t.Fatal("addNode failed:", err) 63 } 64 if err := g.removeNode(dummyNode); err != nil { 65 t.Fatal("removeNode failed:", err) 66 } 67 if err := g.removeNode("bar"); err == nil { 68 t.Fatal("removeNode removed nonexistent node") 69 } 70 } 71 72 // TestRandomNode tries pulling random nodes from the gateway using 73 // g.randomNode() under a variety of conditions. 74 func TestRandomNode(t *testing.T) { 75 if testing.Short() { 76 t.SkipNow() 77 } 78 t.Parallel() 79 g := newTestingGateway(t) 80 defer g.Close() 81 82 // Test with 0 nodes. 83 g.mu.RLock() 84 _, err := g.randomNode() 85 g.mu.RUnlock() 86 if err != errNoPeers { 87 t.Fatal("randomNode should fail when the gateway has 0 nodes") 88 } 89 90 // Test with 1 node. 91 g.mu.Lock() 92 if err = g.addNode(dummyNode); err != nil { 93 t.Fatal(err) 94 } 95 g.mu.Unlock() 96 g.mu.RLock() 97 addr, err := g.randomNode() 98 g.mu.RUnlock() 99 if err != nil { 100 t.Fatal("randomNode failed:", err) 101 } else if addr != dummyNode { 102 t.Fatal("randomNode returned wrong address:", addr) 103 } 104 105 // Test again with 0 nodes. 106 g.mu.Lock() 107 err = g.removeNode(dummyNode) 108 g.mu.Unlock() 109 if err != nil { 110 t.Fatal(err) 111 } 112 g.mu.RLock() 113 _, err = g.randomNode() 114 g.mu.RUnlock() 115 if err != errNoPeers { 116 t.Fatalf("randomNode returned wrong error: expected %v, got %v", errNoPeers, err) 117 } 118 119 // Test with 3 nodes. 120 nodes := map[modules.NetAddress]int{ 121 "111.111.111.111:1111": 0, 122 "111.111.111.111:2222": 0, 123 "111.111.111.111:3333": 0, 124 } 125 g.mu.Lock() 126 for addr := range nodes { 127 err := g.addNode(addr) 128 if err != nil { 129 t.Error(err) 130 } 131 } 132 g.mu.Unlock() 133 134 for i := 0; i < len(nodes)*10; i++ { 135 g.mu.RLock() 136 addr, err := g.randomNode() 137 g.mu.RUnlock() 138 if err != nil { 139 t.Fatal("randomNode failed:", err) 140 } 141 nodes[addr]++ 142 } 143 for node, count := range nodes { 144 if count == 0 { // 1-in-200000 chance of occurring naturally 145 t.Errorf("node %v was never selected", node) 146 } 147 } 148 } 149 150 // TestShareNodes checks that two gateways will share nodes with eachother 151 // following the desired sharing strategy. 152 func TestShareNodes(t *testing.T) { 153 if testing.Short() { 154 t.SkipNow() 155 } 156 t.Parallel() 157 g1 := newNamedTestingGateway(t, "1") 158 defer g1.Close() 159 g2 := newNamedTestingGateway(t, "2") 160 defer g2.Close() 161 162 // add a node to g2 163 g2.mu.Lock() 164 err := g2.addNode(dummyNode) 165 g2.mu.Unlock() 166 if err != nil { 167 t.Fatal(err) 168 } 169 170 // connect 171 err = g1.Connect(g2.Address()) 172 if err != nil { 173 t.Fatal("couldn't connect:", err) 174 } 175 176 err = build.Retry(50, 100*time.Millisecond, func() error { 177 g1.mu.Lock() 178 _, exists := g1.nodes[dummyNode] 179 g1.mu.Unlock() 180 if !exists { 181 return errors.New("node not added") 182 } 183 return nil 184 }) 185 if err != nil { 186 t.Fatal(err) 187 } 188 189 // g1 should have received the node 190 time.Sleep(100 * time.Millisecond) 191 g1.mu.Lock() 192 err = g1.addNode(dummyNode) 193 g1.mu.Unlock() 194 if err == nil { 195 t.Fatal("gateway did not receive nodes during Connect:", g1.nodes) 196 } 197 198 // remove all nodes from both peers 199 g1.mu.Lock() 200 g1.nodes = map[modules.NetAddress]*node{} 201 g1.mu.Unlock() 202 g2.mu.Lock() 203 g2.nodes = map[modules.NetAddress]*node{} 204 g2.mu.Unlock() 205 206 // SharePeers should now return no peers 207 var nodes []modules.NetAddress 208 err = g1.RPC(g2.Address(), "ShareNodes", func(conn modules.PeerConn) error { 209 return encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength) 210 }) 211 if err != nil { 212 t.Fatal(err) 213 } 214 if len(nodes) != 0 { 215 t.Fatal("gateway gave non-existent addresses:", nodes) 216 } 217 218 // sharing should be capped at maxSharedNodes 219 for i := 1; i < int(maxSharedNodes)+11; i++ { 220 g2.mu.Lock() 221 err := g2.addNode(modules.NetAddress("111.111.111.111:" + strconv.Itoa(i))) 222 g2.mu.Unlock() 223 if err != nil { 224 t.Fatal(err) 225 } 226 } 227 err = g1.RPC(g2.Address(), "ShareNodes", func(conn modules.PeerConn) error { 228 return encoding.ReadObject(conn, &nodes, maxSharedNodes*modules.MaxEncodedNetAddressLength) 229 }) 230 if err != nil { 231 t.Fatal(err) 232 } 233 if uint64(len(nodes)) != maxSharedNodes { 234 t.Fatalf("gateway gave wrong number of nodes: expected %v, got %v", maxSharedNodes, len(nodes)) 235 } 236 } 237 238 // TestNodesAreSharedOnConnect tests that nodes that a gateway has never seen 239 // before are added to the node list when connecting to another gateway that 240 // has seen said nodes. 241 func TestNodesAreSharedOnConnect(t *testing.T) { 242 if testing.Short() { 243 t.SkipNow() 244 } 245 t.Parallel() 246 g1 := newNamedTestingGateway(t, "1") 247 defer g1.Close() 248 g2 := newNamedTestingGateway(t, "2") 249 defer g2.Close() 250 g3 := newNamedTestingGateway(t, "3") 251 defer g3.Close() 252 253 // connect g2 to g1 254 err := g2.Connect(g1.Address()) 255 if err != nil { 256 t.Fatal("couldn't connect:", err) 257 } 258 259 // connect g3 to g1 260 err = g3.Connect(g1.Address()) 261 if err != nil { 262 t.Fatal("couldn't connect:", err) 263 } 264 265 // g3 should have received g2's address from g1 266 time.Sleep(200 * time.Millisecond) 267 g3.mu.Lock() 268 defer g3.mu.Unlock() 269 if _, ok := g3.nodes[g2.Address()]; !ok { 270 t.Fatal("node was not relayed:", g3.nodes) 271 } 272 } 273 274 // TestPruneNodeThreshold checks that gateways will not purge nodes if they are 275 // below the purge threshold, even if those nodes are offline. 276 func TestPruneNodeThreshold(t *testing.T) { 277 if testing.Short() { 278 t.SkipNow() 279 } 280 t.Parallel() 281 282 // The next part of the test expects the pruneNodeListLen to be at least 283 // 'maxSharedNodes * 2 + 2 in size. 284 if uint64(pruneNodeListLen) < (maxSharedNodes*2)+2 { 285 t.Fatal("Constants do not match test, please either adjust the constants or refactor this test", maxSharedNodes, pruneNodeListLen) 286 } 287 288 // Create and connect pruneNodeListLen gateways. 289 var gs []*Gateway 290 for i := 0; i < pruneNodeListLen; i++ { 291 gs = append(gs, newNamedTestingGateway(t, strconv.Itoa(i))) 292 293 // Connect this gateway to the previous gateway. 294 if i != 0 { 295 err := gs[i].Connect(gs[i-1].myAddr) 296 if err != nil { 297 t.Fatal(err) 298 } 299 } 300 } 301 302 // Spin until all gateways have a nearly full node list. 303 success := false 304 for i := 0; i < 50; i++ { 305 success = true 306 for _, g := range gs { 307 g.mu.RLock() 308 gNodeLen := len(g.nodes) 309 g.mu.RUnlock() 310 if gNodeLen < pruneNodeListLen-2 { 311 success = false 312 break 313 } 314 } 315 if !success { 316 time.Sleep(time.Second * 1) 317 } 318 } 319 if !success { 320 t.Fatal("peers are not sharing nodes with eachother") 321 } 322 323 // Gateway node lists have been filled out. Take a bunch of gateways 324 // offline and verify that they do not start pruning eachother. 325 var wg sync.WaitGroup 326 for i := 2; i < len(gs); i++ { 327 wg.Add(1) 328 go func(i int) { 329 err := gs[i].Close() 330 if err != nil { 331 panic(err) 332 } 333 wg.Done() 334 }(i) 335 } 336 wg.Wait() 337 338 // Wait for 5 iterations of the node purge loop. Then verify that the 339 // remaining gateways have not been purging nodes. 340 time.Sleep(nodePurgeDelay * 5) 341 342 // Check that the remaining gateways have not purged any nodes. 343 gs[0].mu.RLock() 344 gs0Nodes := len(gs[0].nodes) 345 gs[0].mu.RUnlock() 346 gs[1].mu.RLock() 347 gs1Nodes := len(gs[1].nodes) 348 gs[1].mu.RUnlock() 349 if gs0Nodes < pruneNodeListLen-2 { 350 t.Error("gateway seems to be pruning nodes below purge threshold") 351 } 352 if gs1Nodes < pruneNodeListLen-2 { 353 t.Error("gateway seems to be pruning nodes below purge threshold") 354 } 355 356 // Close the remaining gateways. 357 err := gs[0].Close() 358 if err != nil { 359 t.Error(err) 360 } 361 err = gs[1].Close() 362 if err != nil { 363 t.Error(err) 364 } 365 } 366 367 // TestHealthyNodeListPruning checks that gateways will purge nodes if they are at 368 // a healthy node threshold and the nodes are offline. 369 func TestHealthyNodeListPruning(t *testing.T) { 370 if testing.Short() { 371 t.SkipNow() 372 } 373 t.Parallel() 374 375 // Create and connect healthyNodeListLen*2 gateways. 376 var gs []*Gateway 377 for i := 0; i < healthyNodeListLen*2; i++ { 378 gs = append(gs, newNamedTestingGateway(t, strconv.Itoa(i))) 379 380 // Connect this gateway to the previous gateway. 381 if i != 0 { 382 err := gs[i].Connect(gs[i-1].myAddr) 383 if err != nil { 384 t.Fatal(err) 385 } 386 } 387 // To help speed the test up, also connect this gateway to the peer two 388 // back. 389 if i > 1 { 390 err := gs[i].Connect(gs[i-2].myAddr) 391 if err != nil { 392 t.Fatal(err) 393 } 394 } 395 // To help speed the test up, also connect this gateway to a random 396 // previous peer. 397 if i > 2 { 398 err := gs[i].Connect(gs[fastrand.Intn(i-2)].myAddr) 399 if err != nil { 400 t.Fatal(err) 401 } 402 } 403 } 404 405 // Spin until all gateways have a nearly full node list. 406 err := build.Retry(1000, 100*time.Millisecond, func() error { 407 for _, g := range gs { 408 g.mu.RLock() 409 gNodeLen := len(g.nodes) 410 g.mu.RUnlock() 411 if gNodeLen < healthyNodeListLen { 412 return errors.New("node is not connected to a sufficient number of peers") 413 } 414 } 415 return nil 416 }) 417 if err != nil { 418 t.Fatal("peers are not sharing nodes with eachother") 419 } 420 421 // Gateway node lists have been filled out. Take a bunch of gateways 422 // offline and verify that the remaining gateways begin pruning their 423 // nodelist. 424 var wg sync.WaitGroup 425 for i := 2; i < len(gs); i++ { 426 wg.Add(1) 427 go func(i int) { 428 err := gs[i].Close() 429 if err != nil { 430 panic(err) 431 } 432 wg.Done() 433 }(i) 434 } 435 wg.Wait() 436 437 // Wait for enough iterations of the node purge loop that over-pruning is 438 // possible. (Over-pruning does not need to be guaranteed, causing this 439 // test to fail once in a while is sufficient.) 440 time.Sleep(nodePurgeDelay * time.Duration(healthyNodeListLen-pruneNodeListLen) * 12) 441 442 // Check that the remaining gateways have pruned nodes. 443 gs[0].mu.RLock() 444 gs0Nodes := len(gs[0].nodes) 445 gs[0].mu.RUnlock() 446 gs[1].mu.RLock() 447 gs1Nodes := len(gs[1].nodes) 448 gs[1].mu.RUnlock() 449 if gs0Nodes >= healthyNodeListLen-1 { 450 t.Error("gateway is not pruning nodes", healthyNodeListLen, gs0Nodes) 451 } 452 if gs1Nodes >= healthyNodeListLen-1 { 453 t.Error("gateway is not pruning nodes", healthyNodeListLen, gs1Nodes) 454 } 455 if gs0Nodes < pruneNodeListLen { 456 t.Error("gateway is pruning too many nodes", gs0Nodes, pruneNodeListLen) 457 } 458 if gs1Nodes < pruneNodeListLen { 459 t.Error("gateway is pruning too many nodes", gs1Nodes, pruneNodeListLen) 460 } 461 462 // Close the remaining gateways. 463 err = gs[0].Close() 464 if err != nil { 465 t.Error(err) 466 } 467 err = gs[1].Close() 468 if err != nil { 469 t.Error(err) 470 } 471 }