github.com/jlmeeker/kismatic@v1.10.1-0.20180612190640-57f9005a1f1a/pkg/cli/add_node.go (about)

     1  package cli
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io"
     7  	"os"
     8  	"strings"
     9  
    10  	"github.com/apprenda/kismatic/pkg/install"
    11  	"github.com/apprenda/kismatic/pkg/util"
    12  	"github.com/spf13/cobra"
    13  )
    14  
    15  type addNodeOpts struct {
    16  	Roles                    []string
    17  	NodeLabels               []string
    18  	GeneratedAssetsDirectory string
    19  	RestartServices          bool
    20  	OutputFormat             string
    21  	Verbose                  bool
    22  	SkipPreFlight            bool
    23  }
    24  
    25  var validRoles = []string{"worker", "ingress", "storage"}
    26  
    27  // NewCmdAddNode returns the command for adding node to the cluster
    28  func NewCmdAddNode(out io.Writer, installOpts *installOpts) *cobra.Command {
    29  	opts := &addNodeOpts{}
    30  	cmd := &cobra.Command{
    31  		Use:     "add-node NODE_NAME NODE_IP [NODE_INTERNAL_IP]",
    32  		Short:   "add a new node to an existing Kubernetes cluster",
    33  		Aliases: []string{"add-worker"},
    34  		RunE: func(cmd *cobra.Command, args []string) error {
    35  			if len(args) < 2 || len(args) > 3 {
    36  				return cmd.Usage()
    37  			}
    38  			newNode := install.Node{
    39  				Host: args[0],
    40  				IP:   args[1],
    41  			}
    42  			if len(args) == 3 {
    43  				newNode.InternalIP = args[2]
    44  			}
    45  			// default to 'worker'
    46  			if len(opts.Roles) == 0 {
    47  				opts.Roles = append(opts.Roles, "worker")
    48  			}
    49  			for _, r := range opts.Roles {
    50  				if !util.Contains(r, validRoles) {
    51  					return fmt.Errorf("invalid role %q, options %v", r, validRoles)
    52  				}
    53  			}
    54  			if len(opts.NodeLabels) > 0 {
    55  				newNode.Labels = make(map[string]string)
    56  				for _, l := range opts.NodeLabels {
    57  					pair := strings.Split(l, "=")
    58  					if len(pair) != 2 {
    59  						return fmt.Errorf("invalid label %q provided, must be key=value pair", l)
    60  					}
    61  					newNode.Labels[pair[0]] = pair[1]
    62  				}
    63  			}
    64  			return doAddNode(out, installOpts.planFilename, opts, newNode)
    65  		},
    66  	}
    67  	cmd.Flags().StringSliceVar(&opts.Roles, "roles", []string{}, "roles separated by ',' (options \"worker\"|\"ingress\"|\"storage\")")
    68  	cmd.Flags().StringSliceVarP(&opts.NodeLabels, "labels", "l", []string{}, "key=value pairs separated by ','")
    69  	cmd.Flags().StringVar(&opts.GeneratedAssetsDirectory, "generated-assets-dir", "generated", "path to the directory where assets generated during the installation process will be stored")
    70  	cmd.Flags().BoolVar(&opts.RestartServices, "restart-services", false, "force restart clusters services (Use with care)")
    71  	cmd.Flags().BoolVar(&opts.Verbose, "verbose", false, "enable verbose logging from the installation")
    72  	cmd.Flags().StringVarP(&opts.OutputFormat, "output", "o", "simple", "installation output format (options \"simple\"|\"raw\")")
    73  	cmd.Flags().BoolVar(&opts.SkipPreFlight, "skip-preflight", false, "skip pre-flight checks, useful when rerunning kismatic")
    74  	return cmd
    75  }
    76  
    77  func doAddNode(out io.Writer, planFile string, opts *addNodeOpts, newNode install.Node) error {
    78  	planner := &install.FilePlanner{File: planFile}
    79  	if !planner.PlanExists() {
    80  		return planFileNotFoundErr{filename: planFile}
    81  	}
    82  	execOpts := install.ExecutorOptions{
    83  		GeneratedAssetsDirectory: opts.GeneratedAssetsDirectory,
    84  		OutputFormat:             opts.OutputFormat,
    85  		Verbose:                  opts.Verbose,
    86  	}
    87  	executor, err := install.NewExecutor(out, os.Stderr, execOpts)
    88  	if err != nil {
    89  		return err
    90  	}
    91  	plan, err := planner.Read()
    92  	if err != nil {
    93  		return fmt.Errorf("failed to read plan file: %v", err)
    94  	}
    95  	if _, errs := install.ValidateNode(&newNode); errs != nil {
    96  		util.PrintValidationErrors(out, errs)
    97  		return errors.New("information provided about the new node is invalid")
    98  	}
    99  	// add new node to the plan just for validation
   100  	validatePlan := install.AddNodeToPlan(*plan, newNode, opts.Roles)
   101  	if _, errs := install.ValidatePlan(&validatePlan); errs != nil {
   102  		util.PrintValidationErrors(out, errs)
   103  		return errors.New("the plan file failed validation")
   104  	}
   105  	nodeSSHCon := &install.SSHConnection{
   106  		SSHConfig: &plan.Cluster.SSH,
   107  		Node:      &newNode,
   108  	}
   109  	if _, errs := install.ValidateSSHConnection(nodeSSHCon, "New node"); errs != nil {
   110  		util.PrintValidationErrors(out, errs)
   111  		return errors.New("could not establish SSH connection to the new node")
   112  	}
   113  	if err = ensureNodeIsNew(*plan, newNode); err != nil {
   114  		return err
   115  	}
   116  	if !opts.SkipPreFlight {
   117  		util.PrintHeader(out, "Running Pre-Flight Checks On New Node", '=')
   118  		if err = executor.RunNewNodePreFlightCheck(*plan, newNode); err != nil {
   119  			return err
   120  		}
   121  	}
   122  	updatedPlan, err := executor.AddNode(plan, newNode, opts.Roles, opts.RestartServices)
   123  	if err != nil {
   124  		return err
   125  	}
   126  	if err := planner.Write(updatedPlan); err != nil {
   127  		return fmt.Errorf("error updating plan file to include the new node: %v", err)
   128  	}
   129  	return nil
   130  }
   131  
   132  // returns an error if the plan contains a node that is "equivalent"
   133  // to the new node that is being added
   134  func ensureNodeIsNew(plan install.Plan, newNode install.Node) error {
   135  	for _, n := range plan.Worker.Nodes {
   136  		if n.Host == newNode.Host {
   137  			return fmt.Errorf("according to the plan file, the host name of the new node is already being used by another worker node")
   138  		}
   139  		if n.IP == newNode.IP {
   140  			return fmt.Errorf("according to the plan file, the IP of the new node is already being used by another worker node")
   141  		}
   142  		if newNode.InternalIP != "" && n.InternalIP == newNode.InternalIP {
   143  			return fmt.Errorf("according to the plan file, the internal IP of the new node is already being used by another worker node")
   144  		}
   145  	}
   146  	for _, n := range plan.Ingress.Nodes {
   147  		if n.Host == newNode.Host {
   148  			return fmt.Errorf("according to the plan file, the host name of the new node is already being used by another ingress node")
   149  		}
   150  		if n.IP == newNode.IP {
   151  			return fmt.Errorf("according to the plan file, the IP of the new node is already being used by another ingress node")
   152  		}
   153  		if newNode.InternalIP != "" && n.InternalIP == newNode.InternalIP {
   154  			return fmt.Errorf("according to the plan file, the internal IP of the new node is already being used by another ingress node")
   155  		}
   156  	}
   157  	for _, n := range plan.Storage.Nodes {
   158  		if n.Host == newNode.Host {
   159  			return fmt.Errorf("according to the plan file, the host name of the new node is already being used by another storage node")
   160  		}
   161  		if n.IP == newNode.IP {
   162  			return fmt.Errorf("according to the plan file, the IP of the new node is already being used by another storage node")
   163  		}
   164  		if newNode.InternalIP != "" && n.InternalIP == newNode.InternalIP {
   165  			return fmt.Errorf("according to the plan file, the internal IP of the new node is already being used by another storage node")
   166  		}
   167  	}
   168  	return nil
   169  }