gitlab.com/jokerrs1/Sia@v1.3.2/siatest/testgroup.go (about) 1 package siatest 2 3 import ( 4 "math" 5 "strconv" 6 "sync" 7 "time" 8 9 "github.com/NebulousLabs/Sia/modules" 10 "github.com/NebulousLabs/Sia/node" 11 "github.com/NebulousLabs/Sia/node/api/client" 12 "github.com/NebulousLabs/Sia/types" 13 "github.com/NebulousLabs/errors" 14 "github.com/NebulousLabs/fastrand" 15 ) 16 17 type ( 18 // GroupParams is a helper struct to make creating TestGroups easier. 19 GroupParams struct { 20 Hosts int // number of hosts to create 21 Renters int // number of renters to create 22 Miners int // number of miners to create 23 } 24 25 // TestGroup is a group of of TestNodes that are funded, synced and ready 26 // for upload, download and mining depending on their configuration 27 TestGroup struct { 28 nodes map[*TestNode]struct{} 29 hosts map[*TestNode]struct{} 30 renters map[*TestNode]struct{} 31 miners map[*TestNode]struct{} 32 33 dir string 34 } 35 ) 36 37 var ( 38 // defaultAllowance is the allowance used for the group's renters 39 defaultAllowance = modules.Allowance{ 40 Funds: types.SiacoinPrecision.Mul64(1e3), 41 Hosts: 5, 42 Period: 50, 43 RenewWindow: 10, 44 } 45 ) 46 47 // NewGroup creates a group of TestNodes from node params. All the nodes will 48 // be connected, synced and funded. Hosts nodes are also announced. 49 func NewGroup(nodeParams ...node.NodeParams) (*TestGroup, error) { 50 // Create and init group 51 tg := &TestGroup{ 52 nodes: make(map[*TestNode]struct{}), 53 hosts: make(map[*TestNode]struct{}), 54 renters: make(map[*TestNode]struct{}), 55 miners: make(map[*TestNode]struct{}), 56 } 57 58 // Create node and add it to the correct groups 59 nodes := make([]*TestNode, 0, len(nodeParams)) 60 for _, np := range nodeParams { 61 node, err := NewCleanNode(np) 62 if err != nil { 63 return nil, errors.AddContext(err, "failed to create clean node") 64 } 65 // Add node to nodes 66 tg.nodes[node] = struct{}{} 67 nodes = append(nodes, node) 68 // Add node to hosts 69 if np.Host != nil || np.CreateHost { 70 tg.hosts[node] = struct{}{} 71 } 72 // Add node to renters 73 if np.Renter != nil || np.CreateRenter { 74 tg.renters[node] = struct{}{} 75 } 76 // Add node to miners 77 if np.Miner != nil || np.CreateMiner { 78 tg.miners[node] = struct{}{} 79 } 80 } 81 82 // Fully connect nodes 83 if err := fullyConnectNodes(nodes); err != nil { 84 return nil, errors.AddContext(err, "failed to fully connect nodes") 85 } 86 // Get a miner and mine some blocks to generate coins 87 if len(tg.miners) == 0 { 88 return nil, errors.New("cannot fund group without miners") 89 } 90 miner := tg.Miners()[0] 91 for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { 92 if err := miner.MineBlock(); err != nil { 93 return nil, errors.AddContext(err, "failed to mine block for funding") 94 } 95 } 96 // Fund nodes 97 if err := fundNodes(miner, tg.nodes); err != nil { 98 return nil, errors.AddContext(err, "failed to fund nodes") 99 } 100 // Add storage to hosts 101 if err := addStorageFolderToHosts(tg.hosts); err != nil { 102 return nil, errors.AddContext(err, "failed to add storage to nodes") 103 } 104 // Announce hosts 105 if err := announceHosts(tg.hosts); err != nil { 106 return nil, errors.AddContext(err, "failed to announce hosts") 107 } 108 // Mine a block to get the announcements confirmed 109 if err := miner.MineBlock(); err != nil { 110 return nil, errors.AddContext(err, "failed to mine host announcements") 111 } 112 // Block until all hosts show up as active in the renters' hostdbs 113 if err := hostsInRenterDBCheck(miner, tg.renters, len(tg.hosts)); err != nil { 114 return nil, errors.AddContext(err, "renter database check failed") 115 } 116 // Set renter allowances 117 if err := setRenterAllowances(tg.renters); err != nil { 118 return nil, errors.AddContext(err, "failed to set renter allowance") 119 } 120 // Wait for all the renters to form contracts 121 if err := waitForContracts(miner, tg.renters, tg.hosts); err != nil { 122 return nil, errors.AddContext(err, "renters failed to form contracts") 123 } 124 // Make sure all nodes are synced 125 if err := synchronizationCheck(miner, tg.nodes); err != nil { 126 return nil, errors.AddContext(err, "synchronization check failed") 127 } 128 return tg, nil 129 } 130 131 // NewGroupFromTemplate will create hosts, renters and miners according to the 132 // settings in groupParams. 133 func NewGroupFromTemplate(groupParams GroupParams) (*TestGroup, error) { 134 var params []node.NodeParams 135 // Create host params 136 for i := 0; i < groupParams.Hosts; i++ { 137 params = append(params, node.Host(randomDir())) 138 } 139 // Create renter params 140 for i := 0; i < groupParams.Renters; i++ { 141 params = append(params, node.Renter(randomDir())) 142 } 143 // Create miner params 144 for i := 0; i < groupParams.Miners; i++ { 145 params = append(params, Miner(randomDir())) 146 } 147 return NewGroup(params...) 148 } 149 150 // addStorageFolderToHosts adds a single storage folder to each host. 151 func addStorageFolderToHosts(hosts map[*TestNode]struct{}) error { 152 errs := make([]error, len(hosts)) 153 wg := new(sync.WaitGroup) 154 i := 0 155 // The following api call is very slow. Using multiple threads speeds that 156 // process up a lot. 157 for host := range hosts { 158 wg.Add(1) 159 go func(i int, host *TestNode) { 160 errs[i] = host.HostStorageFoldersAddPost(host.Dir, 1048576) 161 wg.Done() 162 }(i, host) 163 i++ 164 } 165 wg.Wait() 166 return errors.Compose(errs...) 167 } 168 169 // announceHosts adds storage to each host and announces them to the group 170 func announceHosts(hosts map[*TestNode]struct{}) error { 171 for host := range hosts { 172 if err := host.HostAcceptingContractsPost(true); err != nil { 173 return errors.AddContext(err, "failed to set host to accepting contracts") 174 } 175 if err := host.HostAnnouncePost(); err != nil { 176 return errors.AddContext(err, "failed to announce host") 177 } 178 } 179 return nil 180 } 181 182 // fullyConnectNodes takes a list of nodes and connects all their gateways 183 func fullyConnectNodes(nodes []*TestNode) error { 184 // Fully connect the nodes 185 for i, nodeA := range nodes { 186 for _, nodeB := range nodes[i+1:] { 187 if err := nodeA.GatewayConnectPost(nodeB.GatewayAddress()); err != nil && err != client.ErrPeerExists { 188 return errors.AddContext(err, "failed to connect to peer") 189 } 190 } 191 } 192 return nil 193 } 194 195 // fundNodes uses the funds of a miner node to fund all the nodes of the group 196 func fundNodes(miner *TestNode, nodes map[*TestNode]struct{}) error { 197 // Get the miner's balance 198 wg, err := miner.WalletGet() 199 if err != nil { 200 return errors.AddContext(err, "failed to get miner's balance") 201 } 202 // Send txnsPerNode outputs to each node 203 txnsPerNode := uint64(25) 204 scos := make([]types.SiacoinOutput, 0, uint64(len(nodes))*txnsPerNode) 205 funding := wg.ConfirmedSiacoinBalance.Div64(uint64(len(nodes))).Div64(txnsPerNode + 1) 206 for node := range nodes { 207 wag, err := node.WalletAddressGet() 208 if err != nil { 209 return errors.AddContext(err, "failed to get wallet address") 210 } 211 for i := uint64(0); i < txnsPerNode; i++ { 212 scos = append(scos, types.SiacoinOutput{ 213 Value: funding, 214 UnlockHash: wag.Address, 215 }) 216 } 217 } 218 // Send the transaction 219 _, err = miner.WalletSiacoinsMultiPost(scos) 220 if err != nil { 221 return errors.AddContext(err, "failed to send funding txn") 222 } 223 // Mine the transactions 224 if err := miner.MineBlock(); err != nil { 225 return errors.AddContext(err, "failed to mine funding txn") 226 } 227 // Make sure every node has at least one confirmed transaction 228 for node := range nodes { 229 err := Retry(100, 100*time.Millisecond, func() error { 230 wtg, err := node.WalletTransactionsGet(0, math.MaxInt32) 231 if err != nil { 232 return err 233 } 234 if len(wtg.ConfirmedTransactions) == 0 { 235 return errors.New("confirmed transactions should be greater than 0") 236 } 237 return nil 238 }) 239 if err != nil { 240 return err 241 } 242 } 243 return nil 244 } 245 246 // hostsInRenterDBCheck makes sure that all the renters see numHosts hosts in 247 // their database. 248 func hostsInRenterDBCheck(miner *TestNode, renters map[*TestNode]struct{}, numHosts int) error { 249 for renter := range renters { 250 err := Retry(100, 100*time.Millisecond, func() error { 251 hdag, err := renter.HostDbActiveGet() 252 if err != nil { 253 return err 254 } 255 if len(hdag.Hosts) != numHosts { 256 if err := miner.MineBlock(); err != nil { 257 return err 258 } 259 return errors.New("renter doesn't have enough active hosts in hostdb") 260 } 261 return nil 262 }) 263 if err != nil { 264 return err 265 } 266 } 267 return nil 268 } 269 270 // mapToSlice converts a map of TestNodes into a slice 271 func mapToSlice(m map[*TestNode]struct{}) []*TestNode { 272 tns := make([]*TestNode, 0, len(m)) 273 for tn := range m { 274 tns = append(tns, tn) 275 } 276 return tns 277 } 278 279 // randomDir is a helper functions that returns a random directory path 280 func randomDir() string { 281 dir, err := TestDir(strconv.Itoa(fastrand.Intn(math.MaxInt32))) 282 if err != nil { 283 panic(errors.AddContext(err, "failed to create testing directory")) 284 } 285 return dir 286 } 287 288 // setRenterAllowances sets the allowance of each renter 289 func setRenterAllowances(renters map[*TestNode]struct{}) error { 290 for renter := range renters { 291 if err := renter.RenterPost(defaultAllowance); err != nil { 292 return err 293 } 294 } 295 return nil 296 } 297 298 // synchronizationCheck makes sure that all the nodes are synced and follow the 299 func synchronizationCheck(miner *TestNode, nodes map[*TestNode]struct{}) error { 300 mcg, err := miner.ConsensusGet() 301 if err != nil { 302 return err 303 } 304 for node := range nodes { 305 err := Retry(100, 100*time.Millisecond, func() error { 306 ncg, err := node.ConsensusGet() 307 if err != nil { 308 return err 309 } 310 if mcg.CurrentBlock != ncg.CurrentBlock { 311 return errors.New("the node's current block doesn't equal the miner's") 312 } 313 return nil 314 }) 315 if err != nil { 316 return err 317 } 318 } 319 return nil 320 } 321 322 // waitForContracts waits until the renters have formed contracts with the 323 // hosts in the group. 324 func waitForContracts(miner *TestNode, renters map[*TestNode]struct{}, hosts map[*TestNode]struct{}) error { 325 expectedContracts := defaultAllowance.Hosts 326 if uint64(len(hosts)) < expectedContracts { 327 expectedContracts = uint64(len(hosts)) 328 } 329 for renter := range renters { 330 numRetries := 0 331 err := Retry(1000, 100*time.Millisecond, func() error { 332 if numRetries%10 == 0 { 333 if err := miner.MineBlock(); err != nil { 334 return err 335 } 336 } 337 rc, err := renter.RenterContractsGet() 338 if err != nil { 339 return err 340 } 341 if uint64(len(rc.Contracts)) < expectedContracts { 342 return errors.New("Renter hasn't formed enough contracts") 343 } 344 return nil 345 }) 346 if err != nil { 347 return err 348 } 349 } 350 return nil 351 } 352 353 // Close closes the group and all its nodes. Closing a node is usually a slow 354 // process, but we can speed it up a lot by closing each node in a separate 355 // goroutine. 356 func (tg *TestGroup) Close() error { 357 wg := new(sync.WaitGroup) 358 errs := make([]error, len(tg.nodes)) 359 i := 0 360 for n := range tg.nodes { 361 wg.Add(1) 362 go func(i int, n *TestNode) { 363 errs[i] = n.Close() 364 wg.Done() 365 }(i, n) 366 i++ 367 } 368 wg.Wait() 369 return errors.Compose(errs...) 370 } 371 372 // Nodes returns all the nodes of the group 373 func (tg *TestGroup) Nodes() []*TestNode { 374 return mapToSlice(tg.nodes) 375 } 376 377 // Hosts returns all the hosts of the group 378 func (tg *TestGroup) Hosts() []*TestNode { 379 return mapToSlice(tg.hosts) 380 } 381 382 // Renters returns all the renters of the group 383 func (tg *TestGroup) Renters() []*TestNode { 384 return mapToSlice(tg.renters) 385 } 386 387 // Miners returns all the miners of the group 388 func (tg *TestGroup) Miners() []*TestNode { 389 return mapToSlice(tg.miners) 390 }