github.com/fozzysec/SiaPrime@v0.0.0-20190612043147-66c8e8d11fe3/siatest/testgroup.go (about)

     1  package siatest
     2  
     3  import (
     4  	"fmt"
     5  	"math"
     6  	"path/filepath"
     7  	"reflect"
     8  	"sync"
     9  	"time"
    10  
    11  	"SiaPrime/build"
    12  	"SiaPrime/modules"
    13  	"SiaPrime/node"
    14  	"SiaPrime/node/api/client"
    15  	"SiaPrime/persist"
    16  	"SiaPrime/types"
    17  	"gitlab.com/NebulousLabs/errors"
    18  )
    19  
    20  type (
    21  	// GroupParams is a helper struct to make creating TestGroups easier.
    22  	GroupParams struct {
    23  		Hosts   int // number of hosts to create
    24  		Renters int // number of renters to create
    25  		Miners  int // number of miners to create
    26  	}
    27  
    28  	// TestGroup is a group of of TestNodes that are funded, synced and ready
    29  	// for upload, download and mining depending on their configuration
    30  	TestGroup struct {
    31  		nodes   map[*TestNode]struct{}
    32  		hosts   map[*TestNode]struct{}
    33  		renters map[*TestNode]struct{}
    34  		miners  map[*TestNode]struct{}
    35  
    36  		dir string
    37  	}
    38  )
    39  
    40  var (
    41  	// DefaultAllowance is the allowance used for the group's renters
    42  	DefaultAllowance = modules.Allowance{
    43  		Funds:       types.SiacoinPrecision.Mul64(1e3),
    44  		Hosts:       5,
    45  		Period:      50,
    46  		RenewWindow: 24,
    47  	}
    48  )
    49  
    50  // NewGroup creates a group of TestNodes from node params. All the nodes will
    51  // be connected, synced and funded. Hosts nodes are also announced.
    52  func NewGroup(groupDir string, nodeParams ...node.NodeParams) (*TestGroup, error) {
    53  	// Create and init group
    54  	tg := &TestGroup{
    55  		nodes:   make(map[*TestNode]struct{}),
    56  		hosts:   make(map[*TestNode]struct{}),
    57  		renters: make(map[*TestNode]struct{}),
    58  		miners:  make(map[*TestNode]struct{}),
    59  
    60  		dir: groupDir,
    61  	}
    62  
    63  	// Create node and add it to the correct groups
    64  	nodes := make([]*TestNode, 0, len(nodeParams))
    65  	for _, np := range nodeParams {
    66  		node, err := NewCleanNode(np)
    67  		if err != nil {
    68  			return nil, errors.AddContext(err, "failed to create clean node")
    69  		}
    70  		// Add node to nodes
    71  		tg.nodes[node] = struct{}{}
    72  		nodes = append(nodes, node)
    73  		// Add node to hosts
    74  		if np.Host != nil || np.CreateHost {
    75  			tg.hosts[node] = struct{}{}
    76  		}
    77  		// Add node to renters
    78  		if np.Renter != nil || np.CreateRenter {
    79  			tg.renters[node] = struct{}{}
    80  		}
    81  		// Add node to miners
    82  		if np.Miner != nil || np.CreateMiner {
    83  			tg.miners[node] = struct{}{}
    84  		}
    85  	}
    86  
    87  	// Get a miner and mine some blocks to generate coins
    88  	if len(tg.miners) == 0 {
    89  		return nil, errors.New("cannot fund group without miners")
    90  	}
    91  	miner := tg.Miners()[0]
    92  	renewWindow := types.BlockHeight(DefaultAllowance.RenewWindow)
    93  	for i := types.BlockHeight(0); i <= types.MaturityDelay+types.TaxHardforkHeight+renewWindow; i++ {
    94  		if err := miner.MineBlock(); err != nil {
    95  			return nil, errors.AddContext(err, "failed to mine block for funding")
    96  		}
    97  	}
    98  	// Fully connect nodes
    99  	return tg, tg.setupNodes(tg.hosts, tg.nodes, tg.renters)
   100  }
   101  
   102  // NewGroupFromTemplate will create hosts, renters and miners according to the
   103  // settings in groupParams.
   104  func NewGroupFromTemplate(groupDir string, groupParams GroupParams) (*TestGroup, error) {
   105  	var params []node.NodeParams
   106  	// Create host params
   107  	for i := 0; i < groupParams.Hosts; i++ {
   108  		params = append(params, node.HostTemplate)
   109  		randomNodeDir(groupDir, &params[len(params)-1])
   110  	}
   111  	// Create renter params
   112  	for i := 0; i < groupParams.Renters; i++ {
   113  		params = append(params, node.RenterTemplate)
   114  		randomNodeDir(groupDir, &params[len(params)-1])
   115  	}
   116  	// Create miner params
   117  	for i := 0; i < groupParams.Miners; i++ {
   118  		params = append(params, MinerTemplate)
   119  		randomNodeDir(groupDir, &params[len(params)-1])
   120  	}
   121  	return NewGroup(groupDir, params...)
   122  }
   123  
   124  // addStorageFolderToHosts adds a single storage folder to each host.
   125  func addStorageFolderToHosts(hosts map[*TestNode]struct{}) error {
   126  	errs := make([]error, len(hosts))
   127  	wg := new(sync.WaitGroup)
   128  	i := 0
   129  	// The following api call is very slow. Using multiple threads speeds that
   130  	// process up a lot.
   131  	for host := range hosts {
   132  		wg.Add(1)
   133  		go func(i int, host *TestNode) {
   134  			errs[i] = host.HostStorageFoldersAddPost(host.Dir, 1048576)
   135  			wg.Done()
   136  		}(i, host)
   137  		i++
   138  	}
   139  	wg.Wait()
   140  	return errors.Compose(errs...)
   141  }
   142  
   143  // announceHosts adds storage to each host and announces them to the group
   144  func announceHosts(hosts map[*TestNode]struct{}) error {
   145  	for host := range hosts {
   146  		if host.params.SkipHostAnnouncement {
   147  			continue
   148  		}
   149  		if err := host.HostModifySettingPost(client.HostParamAcceptingContracts, true); err != nil {
   150  			return errors.AddContext(err, "failed to set host to accepting contracts")
   151  		}
   152  		if err := host.HostAnnouncePost(); err != nil {
   153  			return errors.AddContext(err, "failed to announce host")
   154  		}
   155  	}
   156  	return nil
   157  }
   158  
   159  // fullyConnectNodes takes a list of nodes and connects all their gateways
   160  func fullyConnectNodes(nodes []*TestNode) error {
   161  	// Fully connect the nodes
   162  	for i, nodeA := range nodes {
   163  		for _, nodeB := range nodes[i+1:] {
   164  			err := build.Retry(100, 100*time.Millisecond, func() error {
   165  				if err := nodeA.GatewayConnectPost(nodeB.GatewayAddress()); err != nil && err != client.ErrPeerExists {
   166  					return errors.AddContext(err, "failed to connect to peer")
   167  				}
   168  				isPeer1, err1 := nodeA.hasPeer(nodeB)
   169  				isPeer2, err2 := nodeB.hasPeer(nodeA)
   170  				if err1 != nil || err2 != nil {
   171  					return build.ExtendErr("couldn't determine if nodeA and nodeB are connected",
   172  						errors.Compose(err1, err2))
   173  				}
   174  				if isPeer1 && isPeer2 {
   175  					return nil
   176  				}
   177  				return errors.New("nodeA and nodeB are not peers of each other")
   178  			})
   179  			if err != nil {
   180  				return err
   181  			}
   182  		}
   183  	}
   184  	return nil
   185  }
   186  
   187  // fundNodes uses the funds of a miner node to fund all the nodes of the group
   188  func fundNodes(miner *TestNode, nodes map[*TestNode]struct{}) error {
   189  	// Get the miner's balance
   190  	wg, err := miner.WalletGet()
   191  	if err != nil {
   192  		return errors.AddContext(err, "failed to get miner's balance")
   193  	}
   194  	// Send txnsPerNode outputs to each node
   195  	txnsPerNode := uint64(25)
   196  	scos := make([]types.SiacoinOutput, 0, uint64(len(nodes))*txnsPerNode)
   197  	funding := wg.ConfirmedSiacoinBalance.Div64(uint64(len(nodes))).Div64(txnsPerNode + 1)
   198  	for node := range nodes {
   199  		wag, err := node.WalletAddressGet()
   200  		if err != nil {
   201  			return errors.AddContext(err, "failed to get wallet address")
   202  		}
   203  		for i := uint64(0); i < txnsPerNode; i++ {
   204  			scos = append(scos, types.SiacoinOutput{
   205  				Value:      funding,
   206  				UnlockHash: wag.Address,
   207  			})
   208  		}
   209  	}
   210  	// Send the transaction
   211  	_, err = miner.WalletSiacoinsMultiPost(scos)
   212  	if err != nil {
   213  		return errors.AddContext(err, "failed to send funding txn")
   214  	}
   215  	// Mine the transactions
   216  	if err := miner.MineBlock(); err != nil {
   217  		return errors.AddContext(err, "failed to mine funding txn")
   218  	}
   219  	// Make sure every node has at least one confirmed transaction
   220  	for node := range nodes {
   221  		err := Retry(100, 100*time.Millisecond, func() error {
   222  			wtg, err := node.WalletTransactionsGet(0, math.MaxInt32)
   223  			if err != nil {
   224  				return err
   225  			}
   226  			if len(wtg.ConfirmedTransactions) == 0 {
   227  				return errors.New("confirmed transactions should be greater than 0")
   228  			}
   229  			return nil
   230  		})
   231  		if err != nil {
   232  			return err
   233  		}
   234  	}
   235  	return nil
   236  }
   237  
   238  // hostsInRenterDBCheck makes sure that all the renters see all hosts in their
   239  // database.
   240  func hostsInRenterDBCheck(miner *TestNode, renters map[*TestNode]struct{}, hosts map[*TestNode]struct{}) error {
   241  	for renter := range renters {
   242  		if renter.params.SkipHostDiscovery {
   243  			continue
   244  		}
   245  		for host := range hosts {
   246  			if host.params.SkipHostAnnouncement {
   247  				continue
   248  			}
   249  			numRetries := 0
   250  			err := Retry(600, 100*time.Millisecond, func() error {
   251  				numRetries++
   252  				if renter == host {
   253  					// We don't care if the renter is also a host.
   254  					return nil
   255  				}
   256  				// Check if the renter has the host in its db.
   257  				err := errors.AddContext(renter.KnowsHost(host), "renter doesn't know host")
   258  				if err != nil && numRetries%100 == 0 {
   259  					return errors.Compose(err, miner.MineBlock())
   260  				}
   261  				if err != nil {
   262  					return err
   263  				}
   264  				return nil
   265  			})
   266  			if err != nil {
   267  				return build.ExtendErr("not all renters can see all hosts", err)
   268  			}
   269  		}
   270  	}
   271  	return nil
   272  }
   273  
   274  // mapToSlice converts a map of TestNodes into a slice
   275  func mapToSlice(m map[*TestNode]struct{}) []*TestNode {
   276  	tns := make([]*TestNode, 0, len(m))
   277  	for tn := range m {
   278  		tns = append(tns, tn)
   279  	}
   280  	return tns
   281  }
   282  
   283  // randomNodeDir generates a random directory for the provided node params if
   284  // Dir wasn't set using the provided parentDir and a randomized suffix.
   285  func randomNodeDir(parentDir string, nodeParams *node.NodeParams) {
   286  	if nodeParams.Dir != "" {
   287  		return
   288  	}
   289  	nodeDir := ""
   290  	if nodeParams.Gateway != nil || nodeParams.CreateGateway {
   291  		nodeDir += "g"
   292  	}
   293  	if nodeParams.ConsensusSet != nil || nodeParams.CreateConsensusSet {
   294  		nodeDir += "c"
   295  	}
   296  	if nodeParams.TransactionPool != nil || nodeParams.CreateTransactionPool {
   297  		nodeDir += "t"
   298  	}
   299  	if nodeParams.Wallet != nil || nodeParams.CreateWallet {
   300  		nodeDir += "w"
   301  	}
   302  	if nodeParams.Renter != nil || nodeParams.CreateRenter {
   303  		nodeDir += "r"
   304  	}
   305  	if nodeParams.Host != nil || nodeParams.CreateHost {
   306  		nodeDir += "h"
   307  	}
   308  	if nodeParams.Miner != nil || nodeParams.CreateMiner {
   309  		nodeDir += "m"
   310  	}
   311  	nodeDir += fmt.Sprintf("-%s", persist.RandomSuffix())
   312  	nodeParams.Dir = filepath.Join(parentDir, nodeDir)
   313  }
   314  
   315  // setRenterAllowances sets the allowance of each renter
   316  func setRenterAllowances(renters map[*TestNode]struct{}) error {
   317  	for renter := range renters {
   318  		// Set allowance
   319  		if renter.params.SkipSetAllowance {
   320  			continue
   321  		}
   322  		allowance := DefaultAllowance
   323  		if !reflect.DeepEqual(renter.params.Allowance, modules.Allowance{}) {
   324  			allowance = renter.params.Allowance
   325  		}
   326  		if err := renter.RenterPostAllowance(allowance); err != nil {
   327  			return err
   328  		}
   329  	}
   330  	return nil
   331  }
   332  
   333  // synchronizationCheck makes sure that all the nodes are synced and follow the
   334  func synchronizationCheck(nodes map[*TestNode]struct{}) error {
   335  	// Get node with longest chain.
   336  	var longestChainNode *TestNode
   337  	var longestChain types.BlockHeight
   338  	for n := range nodes {
   339  		ncg, err := n.ConsensusGet()
   340  		if err != nil {
   341  			return err
   342  		}
   343  		if ncg.Height > longestChain {
   344  			longestChain = ncg.Height
   345  			longestChainNode = n
   346  		}
   347  	}
   348  	lcg, err := longestChainNode.ConsensusGet()
   349  	if err != nil {
   350  		return err
   351  	}
   352  	// Loop until all the blocks have the same CurrentBlock.
   353  	for n := range nodes {
   354  		err := Retry(600, 100*time.Millisecond, func() error {
   355  			ncg, err := n.ConsensusGet()
   356  			if err != nil {
   357  				return err
   358  			}
   359  			// If the CurrentBlock's match we are done.
   360  			if lcg.CurrentBlock == ncg.CurrentBlock {
   361  				return nil
   362  			}
   363  			// If the miner's height is greater than the node's we need to
   364  			// wait a bit longer for them to sync.
   365  			if lcg.Height != ncg.Height {
   366  				return errors.New("blockHeight doesn't match")
   367  			}
   368  			// If the miner's height is smaller than the node's we need a
   369  			// bit longer for them to sync.
   370  			if lcg.CurrentBlock != ncg.CurrentBlock {
   371  				return errors.New("ids don't match")
   372  			}
   373  			return nil
   374  		})
   375  		if err != nil {
   376  			return err
   377  		}
   378  	}
   379  	return nil
   380  }
   381  
   382  // waitForContracts waits until the renters have formed contracts with the
   383  // hosts in the group.
   384  func waitForContracts(miner *TestNode, renters map[*TestNode]struct{}, hosts map[*TestNode]struct{}) error {
   385  	// Create a map for easier public key lookups.
   386  	hostMap := make(map[string]struct{})
   387  	for host := range hosts {
   388  		pk, err := host.HostPublicKey()
   389  		if err != nil {
   390  			return build.ExtendErr("failed to build hostMap", err)
   391  		}
   392  		hostMap[string(pk.Key)] = struct{}{}
   393  	}
   394  	// each renter is supposed to have at least expectedContracts with hosts
   395  	// from the hosts map.
   396  	for renter := range renters {
   397  		numRetries := 0
   398  		// Get expected number of contracts for this renter.
   399  		rg, err := renter.RenterGet()
   400  		if err != nil {
   401  			return err
   402  		}
   403  		// If there are less hosts in the group than we need we need to adjust
   404  		// our expectations.
   405  		expectedContracts := rg.Settings.Allowance.Hosts
   406  		if uint64(len(hosts)) < expectedContracts {
   407  			expectedContracts = uint64(len(hosts))
   408  		}
   409  		// Subtract hosts which the renter doesn't know yet because they
   410  		// weren't announced automatically.
   411  		for host := range hosts {
   412  			if host.params.SkipHostAnnouncement && renter.KnowsHost(host) != nil {
   413  				expectedContracts--
   414  			}
   415  		}
   416  		// Check if number of contracts is sufficient.
   417  		err = Retry(1000, 100*time.Millisecond, func() error {
   418  			numRetries++
   419  			contracts := uint64(0)
   420  			// Get the renter's contracts.
   421  			rc, err := renter.RenterInactiveContractsGet()
   422  			if err != nil {
   423  				return err
   424  			}
   425  			// Count number of contracts
   426  			for _, c := range rc.ActiveContracts {
   427  				if _, exists := hostMap[string(c.HostPublicKey.Key)]; exists {
   428  					contracts++
   429  				}
   430  			}
   431  			for _, c := range rc.InactiveContracts {
   432  				if _, exists := hostMap[string(c.HostPublicKey.Key)]; exists {
   433  					contracts++
   434  				}
   435  			}
   436  			// Check if number is sufficient
   437  			if contracts < expectedContracts {
   438  				if numRetries%100 == 0 {
   439  					if err := miner.MineBlock(); err != nil {
   440  						return err
   441  					}
   442  				}
   443  				return fmt.Errorf("renter hasn't formed enough contracts: expected %v got %v",
   444  					expectedContracts, contracts)
   445  			}
   446  			return nil
   447  		})
   448  		if err != nil {
   449  			return err
   450  		}
   451  	}
   452  	// Mine of 1 final block to ensure contracts are mined and show
   453  	// up in a block
   454  	return miner.MineBlock()
   455  }
   456  
   457  // AddNodeN adds n nodes of a given template to the group.
   458  func (tg *TestGroup) AddNodeN(np node.NodeParams, n int) ([]*TestNode, error) {
   459  	nps := make([]node.NodeParams, n)
   460  	for i := 0; i < n; i++ {
   461  		nps[i] = np
   462  	}
   463  	return tg.AddNodes(nps...)
   464  }
   465  
   466  // AddNodes creates a node and adds it to the group.
   467  func (tg *TestGroup) AddNodes(nps ...node.NodeParams) ([]*TestNode, error) {
   468  	newNodes := make(map[*TestNode]struct{})
   469  	newHosts := make(map[*TestNode]struct{})
   470  	newRenters := make(map[*TestNode]struct{})
   471  	newMiners := make(map[*TestNode]struct{})
   472  	for _, np := range nps {
   473  		// Create the nodes and add them to the group.
   474  		randomNodeDir(tg.dir, &np)
   475  		node, err := NewCleanNode(np)
   476  		if err != nil {
   477  			return mapToSlice(newNodes), build.ExtendErr("failed to create host", err)
   478  		}
   479  		// Add node to nodes
   480  		tg.nodes[node] = struct{}{}
   481  		newNodes[node] = struct{}{}
   482  		// Add node to hosts
   483  		if np.Host != nil || np.CreateHost {
   484  			tg.hosts[node] = struct{}{}
   485  			newHosts[node] = struct{}{}
   486  		}
   487  		// Add node to renters
   488  		if np.Renter != nil || np.CreateRenter {
   489  			tg.renters[node] = struct{}{}
   490  			newRenters[node] = struct{}{}
   491  		}
   492  		// Add node to miners
   493  		if np.Miner != nil || np.CreateMiner {
   494  			tg.miners[node] = struct{}{}
   495  			newMiners[node] = struct{}{}
   496  		}
   497  	}
   498  
   499  	return mapToSlice(newNodes), tg.setupNodes(newHosts, newNodes, newRenters)
   500  }
   501  
   502  // setupNodes does the set up required for creating a test group
   503  // and add nodes to a group
   504  func (tg *TestGroup) setupNodes(setHosts, setNodes, setRenters map[*TestNode]struct{}) error {
   505  	// Find richest miner.
   506  	var miner *TestNode
   507  	var balance types.Currency
   508  	for m := range tg.miners {
   509  		wg, err := m.WalletGet()
   510  		if err != nil {
   511  			return errors.New("failed to find richest miner")
   512  		}
   513  		if wg.ConfirmedSiacoinBalance.Cmp(balance) > 0 {
   514  			miner = m
   515  			balance = wg.ConfirmedSiacoinBalance
   516  		}
   517  	}
   518  	// Get all the nodes.
   519  	nodes := mapToSlice(tg.nodes)
   520  	if err := fullyConnectNodes(nodes); err != nil {
   521  		return build.ExtendErr("failed to fully connect nodes", err)
   522  	}
   523  	// Make sure the new nodes are synced.
   524  	if err := synchronizationCheck(tg.nodes); err != nil {
   525  		return build.ExtendErr("synchronization check 1 failed", err)
   526  	}
   527  	// Fund nodes.
   528  	if err := fundNodes(miner, setNodes); err != nil {
   529  		return build.ExtendErr("failed to fund new hosts", err)
   530  	}
   531  	// Add storage to host
   532  	if err := addStorageFolderToHosts(setHosts); err != nil {
   533  		return build.ExtendErr("failed to add storage to hosts", err)
   534  	}
   535  	// Announce host
   536  	if err := announceHosts(setHosts); err != nil {
   537  		return build.ExtendErr("failed to announce hosts", err)
   538  	}
   539  	// Mine a block to get the announcements confirmed
   540  	if err := miner.MineBlock(); err != nil {
   541  		return build.ExtendErr("failed to mine host announcements", err)
   542  	}
   543  	// Block until the hosts show up as active in the renters' hostdbs
   544  	if err := hostsInRenterDBCheck(miner, tg.renters, tg.hosts); err != nil {
   545  		return build.ExtendErr("renter database check failed", err)
   546  	}
   547  	// Set renter allowances
   548  	if err := setRenterAllowances(setRenters); err != nil {
   549  		return build.ExtendErr("failed to set renter allowance", err)
   550  	}
   551  	// Wait for all the renters to form contracts if the haven't got enough
   552  	// contracts already.
   553  	if err := waitForContracts(miner, tg.renters, tg.hosts); err != nil {
   554  		return build.ExtendErr("renters failed to form contracts", err)
   555  	}
   556  	// Make sure all nodes are synced
   557  	if err := synchronizationCheck(tg.nodes); err != nil {
   558  		return build.ExtendErr("synchronization check 2 failed", err)
   559  	}
   560  	return nil
   561  }
   562  
   563  // SetRenterAllowance finished the setup for the renter test node
   564  func (tg *TestGroup) SetRenterAllowance(renter *TestNode, allowance modules.Allowance) error {
   565  	if _, ok := tg.renters[renter]; !ok {
   566  		return errors.New("Can not set allowance for renter not in test group")
   567  	}
   568  	miner := mapToSlice(tg.miners)[0]
   569  	r := make(map[*TestNode]struct{})
   570  	r[renter] = struct{}{}
   571  	// Set renter allowances
   572  	renter.params.SkipSetAllowance = false
   573  	if err := setRenterAllowances(r); err != nil {
   574  		return build.ExtendErr("failed to set renter allowance", err)
   575  	}
   576  	// Wait for all the renters to form contracts if the haven't got enough
   577  	// contracts already.
   578  	if err := waitForContracts(miner, r, tg.hosts); err != nil {
   579  		return build.ExtendErr("renters failed to form contracts", err)
   580  	}
   581  	// Make sure all nodes are synced
   582  	if err := synchronizationCheck(tg.nodes); err != nil {
   583  		return build.ExtendErr("synchronization check 2 failed", err)
   584  	}
   585  	return nil
   586  }
   587  
   588  // Close closes the group and all its nodes. Closing a node is usually a slow
   589  // process, but we can speed it up a lot by closing each node in a separate
   590  // goroutine.
   591  func (tg *TestGroup) Close() error {
   592  	wg := new(sync.WaitGroup)
   593  	errs := make([]error, len(tg.nodes))
   594  	i := 0
   595  	for n := range tg.nodes {
   596  		wg.Add(1)
   597  		go func(i int, n *TestNode) {
   598  			errs[i] = n.Close()
   599  			wg.Done()
   600  		}(i, n)
   601  		i++
   602  	}
   603  	wg.Wait()
   604  	return errors.Compose(errs...)
   605  }
   606  
   607  // RemoveNode removes a node from the group and shuts it down.
   608  func (tg *TestGroup) RemoveNode(tn *TestNode) error {
   609  	// Remote node from all data structures.
   610  	delete(tg.nodes, tn)
   611  	delete(tg.hosts, tn)
   612  	delete(tg.renters, tn)
   613  	delete(tg.miners, tn)
   614  
   615  	// Close node.
   616  	return tn.StopNode()
   617  }
   618  
   619  // StartNode starts a node from the group that has previously been stopped.
   620  func (tg *TestGroup) StartNode(tn *TestNode) error {
   621  	if _, exists := tg.nodes[tn]; !exists {
   622  		return errors.New("cannot start node that's not part of the group")
   623  	}
   624  	err := tn.StartNode()
   625  	if err != nil {
   626  		return err
   627  	}
   628  	if err := fullyConnectNodes(tg.Nodes()); err != nil {
   629  		return err
   630  	}
   631  	return synchronizationCheck(tg.nodes)
   632  }
   633  
   634  // StopNode stops a node of a group.
   635  func (tg *TestGroup) StopNode(tn *TestNode) error {
   636  	if _, exists := tg.nodes[tn]; !exists {
   637  		return errors.New("cannot stop node that's not part of the group")
   638  	}
   639  	return tn.StopNode()
   640  }
   641  
   642  // Sync syncs the node of the test group
   643  func (tg *TestGroup) Sync() error {
   644  	return synchronizationCheck(tg.nodes)
   645  }
   646  
   647  // Nodes returns all the nodes of the group
   648  func (tg *TestGroup) Nodes() []*TestNode {
   649  	return mapToSlice(tg.nodes)
   650  }
   651  
   652  // Hosts returns all the hosts of the group
   653  func (tg *TestGroup) Hosts() []*TestNode {
   654  	return mapToSlice(tg.hosts)
   655  }
   656  
   657  // Renters returns all the renters of the group
   658  func (tg *TestGroup) Renters() []*TestNode {
   659  	return mapToSlice(tg.renters)
   660  }
   661  
   662  // Miners returns all the miners of the group
   663  func (tg *TestGroup) Miners() []*TestNode {
   664  	return mapToSlice(tg.miners)
   665  }