github.com/dmmcquay/sia@v1.3.1-0.20180712220038-9f8d535311b9/siatest/testgroup.go (about) 1 package siatest 2 3 import ( 4 "math" 5 "reflect" 6 "strconv" 7 "sync" 8 "time" 9 10 "github.com/NebulousLabs/Sia/build" 11 "github.com/NebulousLabs/Sia/modules" 12 "github.com/NebulousLabs/Sia/node" 13 "github.com/NebulousLabs/Sia/node/api/client" 14 "github.com/NebulousLabs/Sia/types" 15 "github.com/NebulousLabs/errors" 16 "github.com/NebulousLabs/fastrand" 17 ) 18 19 type ( 20 // GroupParams is a helper struct to make creating TestGroups easier. 21 GroupParams struct { 22 Hosts int // number of hosts to create 23 Renters int // number of renters to create 24 Miners int // number of miners to create 25 } 26 27 // TestGroup is a group of of TestNodes that are funded, synced and ready 28 // for upload, download and mining depending on their configuration 29 TestGroup struct { 30 nodes map[*TestNode]struct{} 31 hosts map[*TestNode]struct{} 32 renters map[*TestNode]struct{} 33 miners map[*TestNode]struct{} 34 35 dir string 36 } 37 ) 38 39 var ( 40 // DefaultAllowance is the allowance used for the group's renters 41 DefaultAllowance = modules.Allowance{ 42 Funds: types.SiacoinPrecision.Mul64(1e3), 43 Hosts: 5, 44 Period: 50, 45 RenewWindow: 24, 46 } 47 ) 48 49 // NewGroup creates a group of TestNodes from node params. All the nodes will 50 // be connected, synced and funded. Hosts nodes are also announced. 51 func NewGroup(nodeParams ...node.NodeParams) (*TestGroup, error) { 52 // Create and init group 53 tg := &TestGroup{ 54 nodes: make(map[*TestNode]struct{}), 55 hosts: make(map[*TestNode]struct{}), 56 renters: make(map[*TestNode]struct{}), 57 miners: make(map[*TestNode]struct{}), 58 } 59 60 // Create node and add it to the correct groups 61 nodes := make([]*TestNode, 0, len(nodeParams)) 62 for _, np := range nodeParams { 63 node, err := NewCleanNode(np) 64 if err != nil { 65 return nil, errors.AddContext(err, "failed to create clean node") 66 } 67 // Add node to nodes 68 tg.nodes[node] = struct{}{} 69 nodes = append(nodes, node) 70 // Add node to hosts 71 if np.Host != nil || np.CreateHost { 72 tg.hosts[node] = struct{}{} 73 } 74 // Add node to renters 75 if np.Renter != nil || np.CreateRenter { 76 tg.renters[node] = struct{}{} 77 } 78 // Add node to miners 79 if np.Miner != nil || np.CreateMiner { 80 tg.miners[node] = struct{}{} 81 } 82 } 83 84 // Get a miner and mine some blocks to generate coins 85 if len(tg.miners) == 0 { 86 return nil, errors.New("cannot fund group without miners") 87 } 88 miner := tg.Miners()[0] 89 for i := types.BlockHeight(0); i <= types.MaturityDelay+types.TaxHardforkHeight; i++ { 90 if err := miner.MineBlock(); err != nil { 91 return nil, errors.AddContext(err, "failed to mine block for funding") 92 } 93 } 94 // Fully connect nodes 95 return tg, tg.setupNodes(tg.hosts, tg.nodes, tg.renters) 96 } 97 98 // NewGroupFromTemplate will create hosts, renters and miners according to the 99 // settings in groupParams. 100 func NewGroupFromTemplate(groupParams GroupParams) (*TestGroup, error) { 101 var params []node.NodeParams 102 // Create host params 103 for i := 0; i < groupParams.Hosts; i++ { 104 params = append(params, node.Host(randomDir())) 105 } 106 // Create renter params 107 for i := 0; i < groupParams.Renters; i++ { 108 params = append(params, node.Renter(randomDir())) 109 } 110 // Create miner params 111 for i := 0; i < groupParams.Miners; i++ { 112 params = append(params, Miner(randomDir())) 113 } 114 return NewGroup(params...) 115 } 116 117 // addStorageFolderToHosts adds a single storage folder to each host. 118 func addStorageFolderToHosts(hosts map[*TestNode]struct{}) error { 119 errs := make([]error, len(hosts)) 120 wg := new(sync.WaitGroup) 121 i := 0 122 // The following api call is very slow. Using multiple threads speeds that 123 // process up a lot. 124 for host := range hosts { 125 wg.Add(1) 126 go func(i int, host *TestNode) { 127 errs[i] = host.HostStorageFoldersAddPost(host.Dir, 1048576) 128 wg.Done() 129 }(i, host) 130 i++ 131 } 132 wg.Wait() 133 return errors.Compose(errs...) 134 } 135 136 // announceHosts adds storage to each host and announces them to the group 137 func announceHosts(hosts map[*TestNode]struct{}) error { 138 for host := range hosts { 139 if err := host.HostModifySettingPost(client.HostParamAcceptingContracts, true); err != nil { 140 return errors.AddContext(err, "failed to set host to accepting contracts") 141 } 142 if err := host.HostAnnouncePost(); err != nil { 143 return errors.AddContext(err, "failed to announce host") 144 } 145 } 146 return nil 147 } 148 149 // fullyConnectNodes takes a list of nodes and connects all their gateways 150 func fullyConnectNodes(nodes []*TestNode) error { 151 // Fully connect the nodes 152 for i, nodeA := range nodes { 153 for _, nodeB := range nodes[i+1:] { 154 err := build.Retry(100, 100*time.Millisecond, func() error { 155 if err := nodeA.GatewayConnectPost(nodeB.GatewayAddress()); err != nil && err != client.ErrPeerExists { 156 return errors.AddContext(err, "failed to connect to peer") 157 } 158 isPeer1, err1 := nodeA.hasPeer(nodeB) 159 isPeer2, err2 := nodeB.hasPeer(nodeA) 160 if err1 != nil || err2 != nil { 161 return build.ExtendErr("couldn't determine if nodeA and nodeB are connected", 162 errors.Compose(err1, err2)) 163 } 164 if isPeer1 && isPeer2 { 165 return nil 166 } 167 return errors.New("nodeA and nodeB are not peers of each other") 168 }) 169 if err != nil { 170 return err 171 } 172 } 173 } 174 return nil 175 } 176 177 // fundNodes uses the funds of a miner node to fund all the nodes of the group 178 func fundNodes(miner *TestNode, nodes map[*TestNode]struct{}) error { 179 // Get the miner's balance 180 wg, err := miner.WalletGet() 181 if err != nil { 182 return errors.AddContext(err, "failed to get miner's balance") 183 } 184 // Send txnsPerNode outputs to each node 185 txnsPerNode := uint64(25) 186 scos := make([]types.SiacoinOutput, 0, uint64(len(nodes))*txnsPerNode) 187 funding := wg.ConfirmedSiacoinBalance.Div64(uint64(len(nodes))).Div64(txnsPerNode + 1) 188 for node := range nodes { 189 wag, err := node.WalletAddressGet() 190 if err != nil { 191 return errors.AddContext(err, "failed to get wallet address") 192 } 193 for i := uint64(0); i < txnsPerNode; i++ { 194 scos = append(scos, types.SiacoinOutput{ 195 Value: funding, 196 UnlockHash: wag.Address, 197 }) 198 } 199 } 200 // Send the transaction 201 _, err = miner.WalletSiacoinsMultiPost(scos) 202 if err != nil { 203 return errors.AddContext(err, "failed to send funding txn") 204 } 205 // Mine the transactions 206 if err := miner.MineBlock(); err != nil { 207 return errors.AddContext(err, "failed to mine funding txn") 208 } 209 // Make sure every node has at least one confirmed transaction 210 for node := range nodes { 211 err := Retry(100, 100*time.Millisecond, func() error { 212 wtg, err := node.WalletTransactionsGet(0, math.MaxInt32) 213 if err != nil { 214 return err 215 } 216 if len(wtg.ConfirmedTransactions) == 0 { 217 return errors.New("confirmed transactions should be greater than 0") 218 } 219 return nil 220 }) 221 if err != nil { 222 return err 223 } 224 } 225 return nil 226 } 227 228 // hostsInRenterDBCheck makes sure that all the renters see all hosts in their 229 // database. 230 func hostsInRenterDBCheck(miner *TestNode, renters map[*TestNode]struct{}, hosts map[*TestNode]struct{}) error { 231 for renter := range renters { 232 if renter.params.SkipHostDiscovery { 233 continue 234 } 235 for host := range hosts { 236 numRetries := 0 237 err := Retry(600, 100*time.Millisecond, func() error { 238 numRetries++ 239 if renter == host { 240 // We don't care if the renter is also a host. 241 return nil 242 } 243 // Check if the renter has the host in its db. 244 err := errors.AddContext(renter.KnowsHost(host), "renter doesn't know host") 245 if err != nil && numRetries%10 == 0 { 246 return errors.Compose(err, miner.MineBlock()) 247 } 248 if err != nil { 249 return err 250 } 251 return nil 252 }) 253 if err != nil { 254 return build.ExtendErr("not all renters can see all hosts", err) 255 } 256 } 257 } 258 return nil 259 } 260 261 // mapToSlice converts a map of TestNodes into a slice 262 func mapToSlice(m map[*TestNode]struct{}) []*TestNode { 263 tns := make([]*TestNode, 0, len(m)) 264 for tn := range m { 265 tns = append(tns, tn) 266 } 267 return tns 268 } 269 270 // randomDir is a helper functions that returns a random directory path 271 func randomDir() string { 272 dir, err := TestDir(strconv.Itoa(fastrand.Intn(math.MaxInt32))) 273 if err != nil { 274 panic(errors.AddContext(err, "failed to create testing directory")) 275 } 276 return dir 277 } 278 279 // setRenterAllowances sets the allowance of each renter 280 func setRenterAllowances(renters map[*TestNode]struct{}) error { 281 for renter := range renters { 282 // Set allowance 283 if renter.params.SkipSetAllowance { 284 continue 285 } 286 allowance := DefaultAllowance 287 if !reflect.DeepEqual(renter.params.Allowance, modules.Allowance{}) { 288 allowance = renter.params.Allowance 289 } 290 if err := renter.RenterPostAllowance(allowance); err != nil { 291 return err 292 } 293 } 294 return nil 295 } 296 297 // synchronizationCheck makes sure that all the nodes are synced and follow the 298 func synchronizationCheck(nodes map[*TestNode]struct{}) error { 299 // Get node with longest chain. 300 var longestChainNode *TestNode 301 var longestChain types.BlockHeight 302 for n := range nodes { 303 ncg, err := n.ConsensusGet() 304 if err != nil { 305 return err 306 } 307 if ncg.Height > longestChain { 308 longestChain = ncg.Height 309 longestChainNode = n 310 } 311 } 312 lcg, err := longestChainNode.ConsensusGet() 313 if err != nil { 314 return err 315 } 316 // Loop until all the blocks have the same CurrentBlock. 317 for n := range nodes { 318 err := Retry(600, 100*time.Millisecond, func() error { 319 ncg, err := n.ConsensusGet() 320 if err != nil { 321 return err 322 } 323 // If the CurrentBlock's match we are done. 324 if lcg.CurrentBlock == ncg.CurrentBlock { 325 return nil 326 } 327 // If the miner's height is greater than the node's we need to 328 // wait a bit longer for them to sync. 329 if lcg.Height != ncg.Height { 330 return errors.New("blockHeight doesn't match") 331 } 332 // If the miner's height is smaller than the node's we need a 333 // bit longer for them to sync. 334 if lcg.CurrentBlock != ncg.CurrentBlock { 335 return errors.New("ids don't match") 336 } 337 return nil 338 }) 339 if err != nil { 340 return err 341 } 342 } 343 return nil 344 } 345 346 // waitForContracts waits until the renters have formed contracts with the 347 // hosts in the group. 348 func waitForContracts(miner *TestNode, renters map[*TestNode]struct{}, hosts map[*TestNode]struct{}) error { 349 // Create a map for easier public key lookups. 350 hostMap := make(map[string]struct{}) 351 for host := range hosts { 352 pk, err := host.HostPublicKey() 353 if err != nil { 354 return build.ExtendErr("failed to build hostMap", err) 355 } 356 hostMap[string(pk.Key)] = struct{}{} 357 } 358 // each renter is supposed to have at least expectedContracts with hosts 359 // from the hosts map. 360 for renter := range renters { 361 numRetries := 0 362 // Get expected number of contracts for this renter. 363 rg, err := renter.RenterGet() 364 if err != nil { 365 return err 366 } 367 // If there are less hosts in the group than we need we need to adjust 368 // our expectations. 369 expectedContracts := rg.Settings.Allowance.Hosts 370 if uint64(len(hosts)) < expectedContracts { 371 expectedContracts = uint64(len(hosts)) 372 } 373 // Check if number of contracts is sufficient. 374 err = Retry(1000, 100, func() error { 375 numRetries++ 376 contracts := uint64(0) 377 // Get the renter's contracts. 378 rc, err := renter.RenterContractsGet() 379 if err != nil { 380 return err 381 } 382 // Count number of contracts 383 for _, c := range rc.ActiveContracts { 384 if _, exists := hostMap[string(c.HostPublicKey.Key)]; exists { 385 contracts++ 386 } 387 } 388 // Check if number is sufficient 389 if contracts < expectedContracts { 390 if numRetries%10 == 0 { 391 if err := miner.MineBlock(); err != nil { 392 return err 393 } 394 } 395 return errors.New("renter hasn't formed enough contracts") 396 } 397 return nil 398 }) 399 if err != nil { 400 return err 401 } 402 } 403 // Mine of 1 final block to ensure contracts are mined and show 404 // up in a block 405 return miner.MineBlock() 406 } 407 408 // AddNodeN adds n nodes of a given template to the group. 409 func (tg *TestGroup) AddNodeN(np node.NodeParams, n int) ([]*TestNode, error) { 410 nps := make([]node.NodeParams, n) 411 for i := 0; i < n; i++ { 412 nps[i] = np 413 } 414 return tg.AddNodes(nps...) 415 } 416 417 // AddNodes creates a node and adds it to the group. 418 func (tg *TestGroup) AddNodes(nps ...node.NodeParams) ([]*TestNode, error) { 419 newNodes := make(map[*TestNode]struct{}) 420 newHosts := make(map[*TestNode]struct{}) 421 newRenters := make(map[*TestNode]struct{}) 422 newMiners := make(map[*TestNode]struct{}) 423 for _, np := range nps { 424 // Create the nodes and add them to the group. 425 if np.Dir == "" { 426 np.Dir = randomDir() 427 } 428 node, err := NewCleanNode(np) 429 if err != nil { 430 return mapToSlice(newNodes), build.ExtendErr("failed to create host", err) 431 } 432 // Add node to nodes 433 tg.nodes[node] = struct{}{} 434 newNodes[node] = struct{}{} 435 // Add node to hosts 436 if np.Host != nil || np.CreateHost { 437 tg.hosts[node] = struct{}{} 438 newHosts[node] = struct{}{} 439 } 440 // Add node to renters 441 if np.Renter != nil || np.CreateRenter { 442 tg.renters[node] = struct{}{} 443 newRenters[node] = struct{}{} 444 } 445 // Add node to miners 446 if np.Miner != nil || np.CreateMiner { 447 tg.miners[node] = struct{}{} 448 newMiners[node] = struct{}{} 449 } 450 } 451 452 return mapToSlice(newNodes), tg.setupNodes(newHosts, newNodes, newRenters) 453 } 454 455 // setupNodes does the set up required for creating a test group 456 // and add nodes to a group 457 func (tg *TestGroup) setupNodes(setHosts, setNodes, setRenters map[*TestNode]struct{}) error { 458 // Find richest miner. 459 var miner *TestNode 460 var balance types.Currency 461 for m := range tg.miners { 462 wg, err := m.WalletGet() 463 if err != nil { 464 return errors.New("failed to find richest miner") 465 } 466 if wg.ConfirmedSiacoinBalance.Cmp(balance) > 0 { 467 miner = m 468 balance = wg.ConfirmedSiacoinBalance 469 } 470 } 471 // Get all the nodes. 472 nodes := mapToSlice(tg.nodes) 473 if err := fullyConnectNodes(nodes); err != nil { 474 return build.ExtendErr("failed to fully connect nodes", err) 475 } 476 // Make sure the new nodes are synced. 477 if err := synchronizationCheck(tg.nodes); err != nil { 478 return build.ExtendErr("synchronization check 1 failed", err) 479 } 480 // Fund nodes. 481 if err := fundNodes(miner, setNodes); err != nil { 482 return build.ExtendErr("failed to fund new hosts", err) 483 } 484 // Add storage to host 485 if err := addStorageFolderToHosts(setHosts); err != nil { 486 return build.ExtendErr("failed to add storage to hosts", err) 487 } 488 // Announce host 489 if err := announceHosts(setHosts); err != nil { 490 return build.ExtendErr("failed to announce hosts", err) 491 } 492 // Mine a block to get the announcements confirmed 493 if err := miner.MineBlock(); err != nil { 494 return build.ExtendErr("failed to mine host announcements", err) 495 } 496 // Block until the hosts show up as active in the renters' hostdbs 497 if err := hostsInRenterDBCheck(miner, tg.renters, tg.hosts); err != nil { 498 return build.ExtendErr("renter database check failed", err) 499 } 500 // Set renter allowances 501 if err := setRenterAllowances(setRenters); err != nil { 502 return build.ExtendErr("failed to set renter allowance", err) 503 } 504 // Wait for all the renters to form contracts if the haven't got enough 505 // contracts already. 506 if err := waitForContracts(miner, tg.renters, tg.hosts); err != nil { 507 return build.ExtendErr("renters failed to form contracts", err) 508 } 509 // Make sure all nodes are synced 510 if err := synchronizationCheck(tg.nodes); err != nil { 511 return build.ExtendErr("synchronization check 2 failed", err) 512 } 513 return nil 514 } 515 516 // SetRenterAllowance finished the setup for the renter test node 517 func (tg *TestGroup) SetRenterAllowance(renter *TestNode, allowance modules.Allowance) error { 518 if _, ok := tg.renters[renter]; !ok { 519 return errors.New("Can not set allowance for renter not in test group") 520 } 521 miner := mapToSlice(tg.miners)[0] 522 r := make(map[*TestNode]struct{}) 523 r[renter] = struct{}{} 524 // Set renter allowances 525 renter.params.SkipSetAllowance = false 526 if err := setRenterAllowances(r); err != nil { 527 return build.ExtendErr("failed to set renter allowance", err) 528 } 529 // Wait for all the renters to form contracts if the haven't got enough 530 // contracts already. 531 if err := waitForContracts(miner, r, tg.hosts); err != nil { 532 return build.ExtendErr("renters failed to form contracts", err) 533 } 534 // Make sure all nodes are synced 535 if err := synchronizationCheck(tg.nodes); err != nil { 536 return build.ExtendErr("synchronization check 2 failed", err) 537 } 538 return nil 539 } 540 541 // Close closes the group and all its nodes. Closing a node is usually a slow 542 // process, but we can speed it up a lot by closing each node in a separate 543 // goroutine. 544 func (tg *TestGroup) Close() error { 545 wg := new(sync.WaitGroup) 546 errs := make([]error, len(tg.nodes)) 547 i := 0 548 for n := range tg.nodes { 549 wg.Add(1) 550 go func(i int, n *TestNode) { 551 errs[i] = n.Close() 552 wg.Done() 553 }(i, n) 554 i++ 555 } 556 wg.Wait() 557 return errors.Compose(errs...) 558 } 559 560 // RemoveNode removes a node from the group and shuts it down. 561 func (tg *TestGroup) RemoveNode(tn *TestNode) error { 562 // Remote node from all data structures. 563 delete(tg.nodes, tn) 564 delete(tg.hosts, tn) 565 delete(tg.renters, tn) 566 delete(tg.miners, tn) 567 568 // Close node. 569 return tn.StopNode() 570 } 571 572 // StartNode starts a node from the group that has previously been stopped. 573 func (tg *TestGroup) StartNode(tn *TestNode) error { 574 if _, exists := tg.nodes[tn]; !exists { 575 return errors.New("cannot start node that's not part of the group") 576 } 577 err := tn.StartNode() 578 if err != nil { 579 return err 580 } 581 if err := fullyConnectNodes(tg.Nodes()); err != nil { 582 return err 583 } 584 return synchronizationCheck(tg.nodes) 585 } 586 587 // StopNode stops a node of a group. 588 func (tg *TestGroup) StopNode(tn *TestNode) error { 589 if _, exists := tg.nodes[tn]; !exists { 590 return errors.New("cannot stop node that's not part of the group") 591 } 592 return tn.StopNode() 593 } 594 595 // Sync syncs the node of the test group 596 func (tg *TestGroup) Sync() error { 597 return synchronizationCheck(tg.nodes) 598 } 599 600 // Nodes returns all the nodes of the group 601 func (tg *TestGroup) Nodes() []*TestNode { 602 return mapToSlice(tg.nodes) 603 } 604 605 // Hosts returns all the hosts of the group 606 func (tg *TestGroup) Hosts() []*TestNode { 607 return mapToSlice(tg.hosts) 608 } 609 610 // Renters returns all the renters of the group 611 func (tg *TestGroup) Renters() []*TestNode { 612 return mapToSlice(tg.renters) 613 } 614 615 // Miners returns all the miners of the group 616 func (tg *TestGroup) Miners() []*TestNode { 617 return mapToSlice(tg.miners) 618 }