github.com/jlmeeker/kismatic@v1.10.1-0.20180612190640-57f9005a1f1a/pkg/install/add_node_test.go (about)

     1  package install
     2  
     3  import (
     4  	"errors"
     5  	"io"
     6  	"io/ioutil"
     7  	"testing"
     8  
     9  	"github.com/apprenda/kismatic/pkg/ansible"
    10  	"github.com/apprenda/kismatic/pkg/install/explain"
    11  	"github.com/apprenda/kismatic/pkg/tls"
    12  )
    13  
    14  func mustGetTempDir(t *testing.T) string {
    15  	dir, err := ioutil.TempDir("", "add-node-test")
    16  	if err != nil {
    17  		t.Fatalf("failed to create temp dir: %v", err)
    18  	}
    19  	return dir
    20  }
    21  
    22  func TestAddNodeCertMissingCAMissing(t *testing.T) {
    23  	e := ansibleExecutor{
    24  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
    25  		stdout:              ioutil.Discard,
    26  		consoleOutputFormat: ansible.RawFormat,
    27  		pki:                 &fakePKI{},
    28  		certsDir:            mustGetTempDir(t),
    29  	}
    30  	originalPlan := &Plan{
    31  		Worker: NodeGroup{
    32  			Nodes: []Node{},
    33  		},
    34  	}
    35  	newNode := Node{}
    36  	newPlan, err := e.AddNode(originalPlan, newNode, []string{"worker"}, true)
    37  	if newPlan != nil {
    38  		t.Errorf("add worker returned an updated plan")
    39  	}
    40  	if err != errMissingClusterCA {
    41  		t.Errorf("AddNode did not return the expected error. Instead returned: %v", err)
    42  	}
    43  }
    44  
    45  // Verify that cert gets generated
    46  func TestAddNodeCertMissingCAExists(t *testing.T) {
    47  	pki := &fakePKI{
    48  		caExists: true,
    49  	}
    50  	e := ansibleExecutor{
    51  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
    52  		stdout:              ioutil.Discard,
    53  		consoleOutputFormat: ansible.RawFormat,
    54  		pki:                 pki,
    55  		runnerExplainerFactory: fakeRunnerExplainer(nil),
    56  		certsDir:               mustGetTempDir(t),
    57  	}
    58  	originalPlan := &Plan{
    59  		Master: MasterNodeGroup{
    60  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
    61  		},
    62  		Worker: NodeGroup{
    63  			Nodes: []Node{},
    64  		},
    65  		Cluster: Cluster{
    66  			Version: "v1.10.3",
    67  			Networking: NetworkConfig{
    68  				ServiceCIDRBlock: "10.0.0.0/16",
    69  			},
    70  		},
    71  	}
    72  	newNode := Node{}
    73  	_, err := e.AddNode(originalPlan, newNode, []string{"worker"}, true)
    74  	if err != nil {
    75  		t.Errorf("unexpected error while adding worker: %v", err)
    76  	}
    77  	if pki.generateCACalled {
    78  		t.Errorf("CA was generated, even though it already existed")
    79  	}
    80  	if !pki.generateNodeCertCalled {
    81  		t.Error("node certificate was not generated")
    82  	}
    83  }
    84  
    85  func TestAddNodePlanIsUpdated(t *testing.T) {
    86  	e := ansibleExecutor{
    87  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
    88  		stdout:              ioutil.Discard,
    89  		consoleOutputFormat: ansible.RawFormat,
    90  		pki: &fakePKI{
    91  			caExists: true,
    92  		},
    93  		runnerExplainerFactory: fakeRunnerExplainer(nil),
    94  		certsDir:               mustGetTempDir(t),
    95  	}
    96  	originalPlan := &Plan{
    97  		Master: MasterNodeGroup{
    98  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
    99  		},
   100  		Worker: NodeGroup{
   101  			ExpectedCount: 1,
   102  			Nodes: []Node{
   103  				{
   104  					Host: "existingWorker",
   105  				},
   106  			},
   107  		},
   108  		Cluster: Cluster{
   109  			Version: "v1.10.3",
   110  			Networking: NetworkConfig{
   111  				ServiceCIDRBlock: "10.0.0.0/16",
   112  			},
   113  		},
   114  	}
   115  	newNode := Node{
   116  		Host: "test",
   117  	}
   118  	updatedPlan, err := e.AddNode(originalPlan, newNode, []string{"worker"}, true)
   119  	if err != nil {
   120  		t.Errorf("unexpected error while adding worker: %v", err)
   121  	}
   122  	if updatedPlan.Worker.ExpectedCount != 2 {
   123  		t.Errorf("expected count was not incremented")
   124  	}
   125  	if updatedPlan.Ingress.ExpectedCount != 0 {
   126  		t.Errorf("expected ingress count was not 0")
   127  	}
   128  	if updatedPlan.Storage.ExpectedCount != 0 {
   129  		t.Errorf("expected storage count was not 0")
   130  	}
   131  	found := false
   132  	for _, w := range updatedPlan.Worker.Nodes {
   133  		if w.Equal(newNode) {
   134  			found = true
   135  		}
   136  	}
   137  	if !found {
   138  		t.Errorf("the updated plan does not include the new node")
   139  	}
   140  }
   141  
   142  func TestAddIngressPlanIsUpdated(t *testing.T) {
   143  	e := ansibleExecutor{
   144  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
   145  		stdout:              ioutil.Discard,
   146  		consoleOutputFormat: ansible.RawFormat,
   147  		pki: &fakePKI{
   148  			caExists: true,
   149  		},
   150  		runnerExplainerFactory: fakeRunnerExplainer(nil),
   151  		certsDir:               mustGetTempDir(t),
   152  	}
   153  	originalPlan := &Plan{
   154  		Master: MasterNodeGroup{
   155  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
   156  		},
   157  		Ingress: OptionalNodeGroup{
   158  			ExpectedCount: 1,
   159  			Nodes: []Node{
   160  				{
   161  					Host: "existingWorker",
   162  				},
   163  			},
   164  		},
   165  		Cluster: Cluster{
   166  			Version: "v1.10.3",
   167  			Networking: NetworkConfig{
   168  				ServiceCIDRBlock: "10.0.0.0/16",
   169  			},
   170  		},
   171  	}
   172  	newNode := Node{
   173  		Host: "test",
   174  	}
   175  	updatedPlan, err := e.AddNode(originalPlan, newNode, []string{"ingress"}, true)
   176  	if err != nil {
   177  		t.Errorf("unexpected error while adding worker: %v", err)
   178  	}
   179  	if updatedPlan.Ingress.ExpectedCount != 2 {
   180  		t.Errorf("expected count was not incremented")
   181  	}
   182  	if updatedPlan.Worker.ExpectedCount != 0 {
   183  		t.Errorf("expected worker count was not 0")
   184  	}
   185  	if updatedPlan.Storage.ExpectedCount != 0 {
   186  		t.Errorf("expected storage count was not 0")
   187  	}
   188  	found := false
   189  	for _, w := range updatedPlan.Ingress.Nodes {
   190  		if w.Equal(newNode) {
   191  			found = true
   192  		}
   193  	}
   194  	if !found {
   195  		t.Errorf("the updated plan does not include the new node")
   196  	}
   197  }
   198  
   199  func TestAddStoragePlanIsUpdated(t *testing.T) {
   200  	e := ansibleExecutor{
   201  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
   202  		stdout:              ioutil.Discard,
   203  		consoleOutputFormat: ansible.RawFormat,
   204  		pki: &fakePKI{
   205  			caExists: true,
   206  		},
   207  		runnerExplainerFactory: fakeRunnerExplainer(nil),
   208  		certsDir:               mustGetTempDir(t),
   209  	}
   210  	originalPlan := &Plan{
   211  		Master: MasterNodeGroup{
   212  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
   213  		},
   214  		Storage: OptionalNodeGroup{
   215  			ExpectedCount: 1,
   216  			Nodes: []Node{
   217  				{
   218  					Host: "existingWorker",
   219  				},
   220  			},
   221  		},
   222  		Cluster: Cluster{
   223  			Version: "v1.10.3",
   224  			Networking: NetworkConfig{
   225  				ServiceCIDRBlock: "10.0.0.0/16",
   226  			},
   227  		},
   228  	}
   229  	newNode := Node{
   230  		Host: "test",
   231  	}
   232  	updatedPlan, err := e.AddNode(originalPlan, newNode, []string{"storage"}, true)
   233  	if err != nil {
   234  		t.Errorf("unexpected error while adding worker: %v", err)
   235  	}
   236  	if updatedPlan.Storage.ExpectedCount != 2 {
   237  		t.Errorf("expected count was not incremented")
   238  	}
   239  	if updatedPlan.Worker.ExpectedCount != 0 {
   240  		t.Errorf("expected worker count was not 0")
   241  	}
   242  	if updatedPlan.Ingress.ExpectedCount != 0 {
   243  		t.Errorf("expected ingress count was not 0")
   244  	}
   245  	found := false
   246  	for _, w := range updatedPlan.Storage.Nodes {
   247  		if w.Equal(newNode) {
   248  			found = true
   249  		}
   250  	}
   251  	if !found {
   252  		t.Errorf("the updated plan does not include the new node")
   253  	}
   254  }
   255  
   256  func TestAddAllRolesPlanIsUpdated(t *testing.T) {
   257  	e := ansibleExecutor{
   258  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
   259  		stdout:              ioutil.Discard,
   260  		consoleOutputFormat: ansible.RawFormat,
   261  		pki: &fakePKI{
   262  			caExists: true,
   263  		},
   264  		runnerExplainerFactory: fakeRunnerExplainer(nil),
   265  		certsDir:               mustGetTempDir(t),
   266  	}
   267  	originalPlan := &Plan{
   268  		Master: MasterNodeGroup{
   269  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
   270  		},
   271  		Worker: NodeGroup{
   272  			ExpectedCount: 1,
   273  			Nodes: []Node{
   274  				{
   275  					Host: "existingWorker",
   276  				},
   277  			},
   278  		},
   279  		Ingress: OptionalNodeGroup{
   280  			ExpectedCount: 1,
   281  			Nodes: []Node{
   282  				{
   283  					Host: "existingWorker",
   284  				},
   285  			},
   286  		},
   287  		Storage: OptionalNodeGroup{
   288  			ExpectedCount: 1,
   289  			Nodes: []Node{
   290  				{
   291  					Host: "existingWorker",
   292  				},
   293  			},
   294  		},
   295  		Cluster: Cluster{
   296  			Version: "v1.10.3",
   297  			Networking: NetworkConfig{
   298  				ServiceCIDRBlock: "10.0.0.0/16",
   299  			},
   300  		},
   301  	}
   302  	newNode := Node{
   303  		Host: "test",
   304  	}
   305  	updatedPlan, err := e.AddNode(originalPlan, newNode, []string{"worker", "ingress", "storage"}, true)
   306  	if err != nil {
   307  		t.Errorf("unexpected error while adding worker: %v", err)
   308  	}
   309  	if updatedPlan.Worker.ExpectedCount != 2 {
   310  		t.Errorf("expected worker count was not incremented")
   311  	}
   312  	if updatedPlan.Ingress.ExpectedCount != 2 {
   313  		t.Errorf("expected ingress count was not incremented")
   314  	}
   315  	if updatedPlan.Storage.ExpectedCount != 2 {
   316  		t.Errorf("expected storage count was not incremented")
   317  	}
   318  	found := false
   319  	for _, w := range updatedPlan.Worker.Nodes {
   320  		if w.Equal(newNode) {
   321  			found = true
   322  		}
   323  	}
   324  	if !found {
   325  		t.Errorf("the updated plan does not include the new worker node")
   326  	}
   327  	found = false
   328  	for _, w := range updatedPlan.Ingress.Nodes {
   329  		if w.Equal(newNode) {
   330  			found = true
   331  		}
   332  	}
   333  	if !found {
   334  		t.Errorf("the updated plan does not include the new ingress node")
   335  	}
   336  	found = false
   337  	for _, w := range updatedPlan.Storage.Nodes {
   338  		if w.Equal(newNode) {
   339  			found = true
   340  		}
   341  	}
   342  	if !found {
   343  		t.Errorf("the updated plan does not include the new storage node")
   344  	}
   345  }
   346  
   347  func TestAddNodePlanNotUpdatedAfterFailure(t *testing.T) {
   348  	e := ansibleExecutor{
   349  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
   350  		stdout:              ioutil.Discard,
   351  		consoleOutputFormat: ansible.RawFormat,
   352  		pki: &fakePKI{
   353  			caExists: true,
   354  		},
   355  		runnerExplainerFactory: fakeRunnerExplainer(errors.New("exec error")),
   356  		certsDir:               mustGetTempDir(t),
   357  	}
   358  	originalPlan := &Plan{
   359  		Master: MasterNodeGroup{
   360  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
   361  		},
   362  		Worker: NodeGroup{
   363  			ExpectedCount: 1,
   364  			Nodes: []Node{
   365  				{
   366  					Host: "existingWorker",
   367  				},
   368  			},
   369  		},
   370  		Cluster: Cluster{
   371  			Version: "v1.10.3",
   372  			Networking: NetworkConfig{
   373  				ServiceCIDRBlock: "10.0.0.0/16",
   374  			},
   375  		},
   376  	}
   377  	newNode := Node{
   378  		Host: "test",
   379  	}
   380  	updatedPlan, err := e.AddNode(originalPlan, newNode, []string{"worker"}, true)
   381  	if err == nil {
   382  		t.Errorf("expected an error, but didn't get one")
   383  	}
   384  	if updatedPlan != nil {
   385  		t.Error("plan was updated, even though adding worker failed")
   386  	}
   387  }
   388  
   389  func TestAddNodeRestartServicesEnabled(t *testing.T) {
   390  	fakeRunner := fakeRunner{}
   391  	e := ansibleExecutor{
   392  		certsDir:            mustGetTempDir(t),
   393  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
   394  		stdout:              ioutil.Discard,
   395  		consoleOutputFormat: ansible.RawFormat,
   396  		pki: &fakePKI{
   397  			caExists: true,
   398  		},
   399  		runnerExplainerFactory: func(explain.AnsibleEventExplainer, io.Writer) (ansible.Runner, *explain.AnsibleEventStreamExplainer, error) {
   400  			return &fakeRunner, &explain.AnsibleEventStreamExplainer{}, nil
   401  		},
   402  	}
   403  	originalPlan := &Plan{
   404  		Master: MasterNodeGroup{
   405  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
   406  		},
   407  		Worker: NodeGroup{
   408  			ExpectedCount: 1,
   409  			Nodes: []Node{
   410  				{
   411  					Host: "existingWorker",
   412  				},
   413  			},
   414  		},
   415  		Cluster: Cluster{
   416  			Version: "v1.10.3",
   417  			Networking: NetworkConfig{
   418  				ServiceCIDRBlock: "10.0.0.0/16",
   419  			},
   420  		},
   421  	}
   422  	newNode := Node{
   423  		Host: "test",
   424  	}
   425  	_, err := e.AddNode(originalPlan, newNode, []string{"worker"}, true)
   426  	if err != nil {
   427  		t.Errorf("unexpected error")
   428  	}
   429  
   430  	if !fakeRunner.incomingCatalog.ForceProxyRestart {
   431  		t.Errorf("missing restart flag for service kube-proxy")
   432  	}
   433  
   434  	if !fakeRunner.incomingCatalog.ForceKubeletRestart {
   435  		t.Errorf("missing restart flag for service kubelet")
   436  	}
   437  
   438  	if !fakeRunner.incomingCatalog.ForceCalicoNodeRestart {
   439  		t.Errorf("missing restart flag for service calico-node")
   440  	}
   441  
   442  	if !fakeRunner.incomingCatalog.ForceDockerRestart {
   443  		t.Errorf("missing restart flag for service docker")
   444  	}
   445  }
   446  
   447  func TestAddNodeHostsFilesDNSEnabled(t *testing.T) {
   448  	fakeRunner := fakeRunner{}
   449  	e := ansibleExecutor{
   450  		options:             ExecutorOptions{RunsDirectory: mustGetTempDir(t)},
   451  		stdout:              ioutil.Discard,
   452  		consoleOutputFormat: ansible.RawFormat,
   453  		pki: &fakePKI{
   454  			caExists: true,
   455  		},
   456  		runnerExplainerFactory: func(explain.AnsibleEventExplainer, io.Writer) (ansible.Runner, *explain.AnsibleEventStreamExplainer, error) {
   457  			return &fakeRunner, &explain.AnsibleEventStreamExplainer{}, nil
   458  		},
   459  		certsDir: mustGetTempDir(t),
   460  	}
   461  	originalPlan := &Plan{
   462  		Master: MasterNodeGroup{
   463  			Nodes: []Node{{InternalIP: "10.10.2.20"}},
   464  		},
   465  		Worker: NodeGroup{
   466  			ExpectedCount: 1,
   467  			Nodes: []Node{
   468  				{
   469  					Host: "existingWorker",
   470  				},
   471  			},
   472  		},
   473  		Cluster: Cluster{
   474  			Version: "v1.10.3",
   475  			Networking: NetworkConfig{
   476  				ServiceCIDRBlock: "10.0.0.0/16",
   477  				UpdateHostsFiles: true,
   478  			},
   479  		},
   480  	}
   481  	newNode := Node{
   482  		Host: "test",
   483  	}
   484  	_, err := e.AddNode(originalPlan, newNode, []string{"worker"}, false)
   485  	if err != nil {
   486  		t.Errorf("unexpected error")
   487  	}
   488  	expectedPlaybook := "hosts.yaml"
   489  	found := false
   490  	for _, p := range fakeRunner.allNodesPlaybooks {
   491  		if p == expectedPlaybook {
   492  			found = true
   493  		}
   494  	}
   495  	if !found {
   496  		t.Errorf("expected playbook %s was not run during add-worker. The following plays ran: %v", expectedPlaybook, fakeRunner.allNodesPlaybooks)
   497  	}
   498  }
   499  
   500  //// Fakes for testing
   501  type fakePKI struct {
   502  	caExists                    bool
   503  	nodeCertExists              bool
   504  	err                         error
   505  	generateCACalled            bool
   506  	generateProxyClientCACalled bool
   507  	generateNodeCertCalled      bool
   508  }
   509  
   510  func (f *fakePKI) CertificateAuthorityExists() (bool, error)     { return f.caExists, f.err }
   511  func (f *fakePKI) NodeCertificateExists(node Node) (bool, error) { return f.nodeCertExists, f.err }
   512  func (f *fakePKI) GenerateNodeCertificate(plan *Plan, node Node, ca *tls.CA) error {
   513  	f.generateNodeCertCalled = true
   514  	return f.err
   515  }
   516  func (f *fakePKI) GetClusterCA() (*tls.CA, error) { return nil, f.err }
   517  func (f *fakePKI) GenerateClusterCA(p *Plan) (*tls.CA, error) {
   518  	f.generateCACalled = true
   519  	return nil, f.err
   520  }
   521  func (f *fakePKI) GetProxyClientCA() (*tls.CA, error) { return nil, f.err }
   522  func (f *fakePKI) GenerateProxyClientCA(p *Plan) (*tls.CA, error) {
   523  	f.generateProxyClientCACalled = true
   524  	return nil, f.err
   525  }
   526  func (f *fakePKI) GenerateClusterCertificates(p *Plan, clusterCA *tls.CA, proxyClientCA *tls.CA) error {
   527  	return f.err
   528  }
   529  func (f *fakePKI) GenerateCertificate(name string, validityPeriod string, commonName string, subjectAlternateNames []string, organizations []string, ca *tls.CA, overwrite bool) (bool, error) {
   530  	return false, f.err
   531  }
   532  
   533  type fakeRunner struct {
   534  	eventChan         chan ansible.Event
   535  	err               error
   536  	incomingCatalog   ansible.ClusterCatalog
   537  	allNodesPlaybooks []string
   538  }
   539  
   540  func (f *fakeRunner) StartPlaybook(playbookFile string, inventory ansible.Inventory, cc ansible.ClusterCatalog) (<-chan ansible.Event, error) {
   541  	f.allNodesPlaybooks = append(f.allNodesPlaybooks, playbookFile)
   542  	return f.eventChan, f.err
   543  }
   544  func (f *fakeRunner) WaitPlaybook() error { return f.err }
   545  func (f *fakeRunner) StartPlaybookOnNode(playbookFile string, inventory ansible.Inventory, cc ansible.ClusterCatalog, node ...string) (<-chan ansible.Event, error) {
   546  	f.incomingCatalog = cc
   547  	return f.eventChan, f.err
   548  }
   549  
   550  func fakeRunnerExplainer(execError error) func(explain.AnsibleEventExplainer, io.Writer) (ansible.Runner, *explain.AnsibleEventStreamExplainer, error) {
   551  	return func(explain.AnsibleEventExplainer, io.Writer) (ansible.Runner, *explain.AnsibleEventStreamExplainer, error) {
   552  		return &fakeRunner{err: execError}, &explain.AnsibleEventStreamExplainer{}, nil
   553  	}
   554  }