github.com/jlmeeker/kismatic@v1.10.1-0.20180612190640-57f9005a1f1a/pkg/install/plan.go (about)

     1  package install
     2  
     3  import (
     4  	"bufio"
     5  	"bytes"
     6  	"errors"
     7  	"fmt"
     8  	"io/ioutil"
     9  	"os"
    10  	"regexp"
    11  	"strings"
    12  	"sync"
    13  
    14  	"github.com/apprenda/kismatic/pkg/util"
    15  
    16  	yaml "gopkg.in/yaml.v2"
    17  )
    18  
    19  const (
    20  	ket133PackageManagerProvider = "helm"
    21  	defaultCAExpiry              = "17520h"
    22  )
    23  
    24  // PlanTemplateOptions contains the options that are desired when generating
    25  // a plan file template.
    26  type PlanTemplateOptions struct {
    27  	EtcdNodes       int
    28  	MasterNodes     int
    29  	WorkerNodes     int
    30  	IngressNodes    int
    31  	StorageNodes    int
    32  	AdditionalFiles int
    33  	AdminPassword   string
    34  }
    35  
    36  // PlanReadWriter is capable of reading/writing a Plan
    37  type PlanReadWriter interface {
    38  	Read() (*Plan, error)
    39  	Write(*Plan) error
    40  }
    41  
    42  // Planner is used to plan the installation
    43  type Planner interface {
    44  	PlanReadWriter
    45  	PlanExists() bool
    46  }
    47  
    48  // FilePlanner is a file-based installation planner
    49  type FilePlanner struct {
    50  	File string
    51  }
    52  
    53  // Read the plan from the file system
    54  func (fp *FilePlanner) Read() (*Plan, error) {
    55  	d, err := ioutil.ReadFile(fp.File)
    56  	if err != nil {
    57  		return nil, fmt.Errorf("could not read file: %v", err)
    58  	}
    59  
    60  	p := &Plan{}
    61  	if err = yaml.Unmarshal(d, p); err != nil {
    62  		return nil, fmt.Errorf("failed to unmarshal plan: %v", err)
    63  	}
    64  
    65  	// read deprecated fields and set it the new version of the cluster file
    66  	readDeprecatedFields(p)
    67  
    68  	// set nil values to defaults
    69  	setDefaults(p)
    70  
    71  	return p, nil
    72  }
    73  
    74  func readDeprecatedFields(p *Plan) {
    75  	// only set if not already being set by the user
    76  	// package_manager moved from features: to add_ons: after KET v1.3.3
    77  	if p.Features != nil && p.Features.PackageManager != nil {
    78  		p.AddOns.PackageManager.Disable = !p.Features.PackageManager.Enabled
    79  		// KET v1.3.3 did not have a provider field
    80  		p.AddOns.PackageManager.Provider = ket133PackageManagerProvider
    81  	}
    82  	// allow_package_installation renamed to disable_package_installation after KET v1.4.0
    83  	if p.Cluster.AllowPackageInstallation != nil {
    84  		p.Cluster.DisablePackageInstallation = !*p.Cluster.AllowPackageInstallation
    85  	}
    86  
    87  	if p.DockerRegistry.Server == "" && p.DockerRegistry.Address != "" && p.DockerRegistry.Port != 0 {
    88  		p.DockerRegistry.Server = fmt.Sprintf("%s:%d", p.DockerRegistry.Address, p.DockerRegistry.Port)
    89  	}
    90  
    91  	if p.Docker.Storage.DirectLVM != nil && p.Docker.Storage.DirectLVM.Enabled && (p.Docker.Storage.Opts == nil || len(p.Docker.Storage.Opts) == 0) {
    92  		p.Docker.Storage.Driver = "devicemapper"
    93  		p.Docker.Storage.Opts = map[string]string{
    94  			"dm.thinpooldev":           "/dev/mapper/docker-thinpool",
    95  			"dm.use_deferred_removal":  "true",
    96  			"dm.use_deferred_deletion": fmt.Sprintf("%t", p.Docker.Storage.DirectLVM.EnableDeferredDeletion),
    97  		}
    98  		p.Docker.Storage.DirectLVMBlockDevice.Path = p.Docker.Storage.DirectLVM.BlockDevice
    99  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolPercent = "95"
   100  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolMetaPercent = "1"
   101  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendThreshold = "80"
   102  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendPercent = "20"
   103  		p.Docker.Storage.DirectLVM = nil
   104  	}
   105  }
   106  
   107  func setDefaults(p *Plan) {
   108  	// Set to either the latest version or the tested one if an error occurs
   109  	if p.Cluster.Version == "" {
   110  		p.Cluster.Version = kubernetesVersionString
   111  	}
   112  
   113  	if p.Docker.Logs.Driver == "" {
   114  		p.Docker.Logs.Driver = "json-file"
   115  		p.Docker.Logs.Opts = map[string]string{
   116  			"max-size": "50m",
   117  			"max-file": "1",
   118  		}
   119  	}
   120  
   121  	// set options that were previously set in Ansible
   122  	// only set them when creating a block device
   123  	if p.Docker.Storage.Driver == "devicemapper" && p.Docker.Storage.DirectLVMBlockDevice.Path != "" {
   124  		if _, ok := p.Docker.Storage.Opts["dm.thinpooldev"]; !ok {
   125  			p.Docker.Storage.Opts["dm.thinpooldev"] = "/dev/mapper/docker-thinpool"
   126  		}
   127  		if _, ok := p.Docker.Storage.Opts["dm.use_deferred_removal"]; !ok {
   128  			p.Docker.Storage.Opts["dm.use_deferred_removal"] = "true"
   129  		}
   130  		if _, ok := p.Docker.Storage.Opts["dm.use_deferred_deletion"]; !ok {
   131  			p.Docker.Storage.Opts["dm.use_deferred_deletion"] = "false"
   132  		}
   133  	}
   134  	if p.Docker.Storage.DirectLVMBlockDevice.ThinpoolPercent == "" {
   135  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolPercent = "95"
   136  	}
   137  	if p.Docker.Storage.DirectLVMBlockDevice.ThinpoolMetaPercent == "" {
   138  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolMetaPercent = "1"
   139  	}
   140  	if p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendThreshold == "" {
   141  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendThreshold = "80"
   142  	}
   143  	if p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendPercent == "" {
   144  		p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendPercent = "20"
   145  	}
   146  
   147  	if p.AddOns.CNI == nil {
   148  		p.AddOns.CNI = &CNI{}
   149  		p.AddOns.CNI.Provider = cniProviderCalico
   150  		p.AddOns.CNI.Options.Calico.Mode = "overlay"
   151  		p.AddOns.CNI.Options.Calico.LogLevel = "info"
   152  		// read KET <v1.5.0 plan option
   153  		if p.Cluster.Networking.Type != "" {
   154  			p.AddOns.CNI.Options.Calico.Mode = p.Cluster.Networking.Type
   155  		}
   156  	}
   157  	if p.AddOns.CNI.Options.Calico.LogLevel == "" {
   158  		p.AddOns.CNI.Options.Calico.LogLevel = "info"
   159  	}
   160  	if p.AddOns.CNI.Options.Calico.FelixInputMTU == 0 {
   161  		p.AddOns.CNI.Options.Calico.FelixInputMTU = 1440
   162  	}
   163  
   164  	if p.AddOns.CNI.Options.Calico.WorkloadMTU == 0 {
   165  		p.AddOns.CNI.Options.Calico.WorkloadMTU = 1500
   166  	}
   167  
   168  	if p.AddOns.CNI.Options.Calico.IPAutodetectionMethod == "" {
   169  		p.AddOns.CNI.Options.Calico.IPAutodetectionMethod = "first-found"
   170  	}
   171  
   172  	if p.AddOns.DNS.Provider == "" {
   173  		p.AddOns.DNS.Provider = "kubedns"
   174  	}
   175  	if p.AddOns.DNS.Options.Replicas <= 0 {
   176  		p.AddOns.DNS.Options.Replicas = 2
   177  	}
   178  
   179  	if p.AddOns.HeapsterMonitoring == nil {
   180  		p.AddOns.HeapsterMonitoring = &HeapsterMonitoring{}
   181  	}
   182  	if p.AddOns.HeapsterMonitoring.Options.Heapster.Replicas == 0 {
   183  		p.AddOns.HeapsterMonitoring.Options.Heapster.Replicas = 2
   184  	}
   185  	// read field from KET < v1.5.0
   186  	if p.AddOns.HeapsterMonitoring.Options.HeapsterReplicas != 0 {
   187  		p.AddOns.HeapsterMonitoring.Options.Heapster.Replicas = p.AddOns.HeapsterMonitoring.Options.HeapsterReplicas
   188  	}
   189  	if p.AddOns.HeapsterMonitoring.Options.Heapster.Sink == "" {
   190  		p.AddOns.HeapsterMonitoring.Options.Heapster.Sink = "influxdb:http://heapster-influxdb.kube-system.svc:8086"
   191  	}
   192  	if p.AddOns.HeapsterMonitoring.Options.Heapster.ServiceType == "" {
   193  		p.AddOns.HeapsterMonitoring.Options.Heapster.ServiceType = "ClusterIP"
   194  	}
   195  	if p.AddOns.HeapsterMonitoring.Options.InfluxDBPVCName != "" {
   196  		p.AddOns.HeapsterMonitoring.Options.InfluxDB.PVCName = p.AddOns.HeapsterMonitoring.Options.InfluxDBPVCName
   197  	}
   198  
   199  	if p.Cluster.Certificates.CAExpiry == "" {
   200  		p.Cluster.Certificates.CAExpiry = defaultCAExpiry
   201  	}
   202  
   203  	if p.AddOns.Dashboard.Options.ServiceType == "" {
   204  		p.AddOns.Dashboard.Options.ServiceType = "ClusterIP"
   205  	}
   206  
   207  	if p.AddOns.PackageManager.Options.Helm.Namespace == "" {
   208  		p.AddOns.PackageManager.Options.Helm.Namespace = "kube-system"
   209  	}
   210  }
   211  
   212  var yamlKeyRE = regexp.MustCompile(`[^a-zA-Z]*([a-z_\-\/A-Z.\d]+)[ ]*:`)
   213  
   214  // Write the plan to the file system
   215  func (fp *FilePlanner) Write(p *Plan) error {
   216  	// make a copy of the global comment map
   217  	oneTimeComments := map[string][]string{}
   218  	for k, v := range commentMap {
   219  		oneTimeComments[k] = v
   220  	}
   221  	bytez, marshalErr := yaml.Marshal(p)
   222  	if marshalErr != nil {
   223  		return fmt.Errorf("error marshalling plan to yaml: %v", marshalErr)
   224  	}
   225  
   226  	f, err := os.Create(fp.File)
   227  	if err != nil {
   228  		return fmt.Errorf("error making plan file: %v", err)
   229  	}
   230  	defer f.Close()
   231  
   232  	// the stack keeps track of the object we are in
   233  	// for example, when we are inside cluster.networking, looking at the key 'foo'
   234  	// the stack will have [cluster, networking, foo]
   235  	s := newStack()
   236  	scanner := bufio.NewScanner(bytes.NewReader(bytez))
   237  	prevIndent := -1
   238  	addNewLineBeforeComment := true
   239  	var etcdBlock bool
   240  	for scanner.Scan() {
   241  		text := scanner.Text()
   242  		matched := yamlKeyRE.FindStringSubmatch(text)
   243  		if matched != nil && len(matched) > 1 {
   244  			indent := strings.Count(matched[0], " ") / 2
   245  
   246  			// Figure out if we are in the etcd block
   247  			if indent == 0 {
   248  				etcdBlock = (text == "etcd:")
   249  			}
   250  			// Don't print labels: {} for etcd group
   251  			if etcdBlock && strings.Contains(text, "labels: {}") {
   252  				continue
   253  			}
   254  			// Don't print taints: [] for etcd group
   255  			if etcdBlock && strings.Contains(text, "taints: []") {
   256  				continue
   257  			}
   258  			// Add a new line if we are leaving a major indentation block
   259  			// (leaving a struct)..
   260  			if indent < prevIndent {
   261  				f.WriteString("\n")
   262  				// suppress the new line that would be added if this
   263  				// field has a comment
   264  				addNewLineBeforeComment = false
   265  			}
   266  			if indent <= prevIndent {
   267  				for i := 0; i <= (prevIndent - indent); i++ {
   268  					// Pop from the stack when we have left an object
   269  					// (we know because the indentation level has decreased)
   270  					if _, err := s.Pop(); err != nil {
   271  						return err
   272  					}
   273  				}
   274  			}
   275  			s.Push(matched[1])
   276  			prevIndent = indent
   277  
   278  			// Full key match (e.g. "cluster.networking.pod_cidr")
   279  			if thiscomment, ok := oneTimeComments[strings.Join(s.s, ".")]; ok {
   280  				if _, err := f.WriteString(getCommentedLine(text, thiscomment, addNewLineBeforeComment)); err != nil {
   281  					return err
   282  				}
   283  				delete(oneTimeComments, matched[1])
   284  				addNewLineBeforeComment = true
   285  				continue
   286  			}
   287  		}
   288  		// we don't want to comment this line... just print it out
   289  		if _, err := f.WriteString(text + "\n"); err != nil {
   290  			return err
   291  		}
   292  		addNewLineBeforeComment = true
   293  	}
   294  
   295  	return nil
   296  }
   297  
   298  func getCommentedLine(line string, commentLines []string, addNewLine bool) string {
   299  	var b bytes.Buffer
   300  	// Print out a new line before each comment block
   301  	if addNewLine {
   302  		b.WriteString("\n")
   303  	}
   304  	// Print out the comment lines
   305  	for _, c := range commentLines {
   306  		// Indent the comment to the same level as the field we are commenting
   307  		b.WriteString(strings.Repeat(" ", countLeadingSpace(line)))
   308  		b.WriteString(fmt.Sprintf("# %s\n", c))
   309  	}
   310  	// Print out the line
   311  	b.WriteString(line + "\n")
   312  	return b.String()
   313  }
   314  
   315  func countLeadingSpace(s string) int {
   316  	var i int
   317  	for _, r := range s {
   318  		if r == ' ' {
   319  			i++
   320  			continue
   321  		}
   322  		break
   323  	}
   324  	return i
   325  }
   326  
   327  // PlanExists return true if the plan exists on the file system
   328  func (fp *FilePlanner) PlanExists() bool {
   329  	_, err := os.Stat(fp.File)
   330  	return !os.IsNotExist(err)
   331  }
   332  
   333  // WritePlanTemplate writes an installation plan with pre-filled defaults.
   334  func WritePlanTemplate(planTemplateOpts PlanTemplateOptions, w PlanReadWriter) error {
   335  	p := buildPlanFromTemplateOptions(planTemplateOpts)
   336  	if err := w.Write(&p); err != nil {
   337  		return fmt.Errorf("error writing installation plan template: %v", err)
   338  	}
   339  	return nil
   340  }
   341  
   342  // fills out a plan with sensible defaults, according to the requested
   343  // template options
   344  func buildPlanFromTemplateOptions(templateOpts PlanTemplateOptions) Plan {
   345  	p := Plan{}
   346  	p.Cluster.Name = "kubernetes"
   347  	p.Cluster.Version = kubernetesVersionString
   348  	p.Cluster.AdminPassword = templateOpts.AdminPassword
   349  	p.Cluster.DisablePackageInstallation = false
   350  	p.Cluster.DisconnectedInstallation = false
   351  
   352  	// Set SSH defaults
   353  	p.Cluster.SSH.User = "kismaticuser"
   354  	p.Cluster.SSH.Key = "kismaticuser.key"
   355  	p.Cluster.SSH.Port = 22
   356  
   357  	// Set Networking defaults
   358  	p.Cluster.Networking.PodCIDRBlock = "172.16.0.0/16"
   359  	p.Cluster.Networking.ServiceCIDRBlock = "172.20.0.0/16"
   360  	p.Cluster.Networking.UpdateHostsFiles = false
   361  
   362  	// Set Certificate defaults
   363  	p.Cluster.Certificates.Expiry = "17520h"
   364  	p.Cluster.Certificates.CAExpiry = defaultCAExpiry
   365  
   366  	// Docker
   367  	p.Docker.Logs = DockerLogs{
   368  		Driver: "json-file",
   369  		Opts: map[string]string{
   370  			"max-size": "50m",
   371  			"max-file": "1",
   372  		},
   373  	}
   374  	p.Docker.Storage.DirectLVMBlockDevice.ThinpoolPercent = "95"
   375  	p.Docker.Storage.DirectLVMBlockDevice.ThinpoolMetaPercent = "1"
   376  	p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendThreshold = "80"
   377  	p.Docker.Storage.DirectLVMBlockDevice.ThinpoolAutoextendPercent = "20"
   378  
   379  	// Add-Ons
   380  	// CNI
   381  	p.AddOns.CNI = &CNI{}
   382  	p.AddOns.CNI.Provider = cniProviderCalico
   383  	p.AddOns.CNI.Options.Calico.Mode = "overlay"
   384  	p.AddOns.CNI.Options.Calico.LogLevel = "info"
   385  	p.AddOns.CNI.Options.Calico.WorkloadMTU = 1500
   386  	p.AddOns.CNI.Options.Calico.FelixInputMTU = 1440
   387  	p.AddOns.CNI.Options.Calico.IPAutodetectionMethod = "first-found"
   388  	// DNS
   389  	p.AddOns.DNS.Provider = "kubedns"
   390  	p.AddOns.DNS.Options.Replicas = 2
   391  	// Heapster
   392  	p.AddOns.HeapsterMonitoring = &HeapsterMonitoring{}
   393  	p.AddOns.HeapsterMonitoring.Options.Heapster.Replicas = 2
   394  	p.AddOns.HeapsterMonitoring.Options.Heapster.ServiceType = "ClusterIP"
   395  	p.AddOns.HeapsterMonitoring.Options.Heapster.Sink = "influxdb:http://heapster-influxdb.kube-system.svc:8086"
   396  
   397  	// Package Manager
   398  	p.AddOns.PackageManager.Provider = "helm"
   399  	p.AddOns.PackageManager.Options.Helm.Namespace = "kube-system"
   400  
   401  	// Dashboard
   402  	p.AddOns.Dashboard.Disable = false
   403  	p.AddOns.Dashboard.Options.ServiceType = "ClusterIP"
   404  
   405  	// Generate entries for all node types
   406  	p.Etcd.ExpectedCount = templateOpts.EtcdNodes
   407  	p.Master.ExpectedCount = templateOpts.MasterNodes
   408  	p.Worker.ExpectedCount = templateOpts.WorkerNodes
   409  	p.Ingress.ExpectedCount = templateOpts.IngressNodes
   410  	p.Storage.ExpectedCount = templateOpts.StorageNodes
   411  
   412  	for i := 0; i < templateOpts.AdditionalFiles; i++ {
   413  		f := AdditionalFile{}
   414  		p.AdditionalFiles = append(p.AdditionalFiles, f)
   415  	}
   416  
   417  	n := Node{}
   418  	for i := 0; i < p.Etcd.ExpectedCount; i++ {
   419  		p.Etcd.Nodes = append(p.Etcd.Nodes, n)
   420  	}
   421  
   422  	for i := 0; i < p.Master.ExpectedCount; i++ {
   423  		p.Master.Nodes = append(p.Master.Nodes, n)
   424  	}
   425  
   426  	for i := 0; i < p.Worker.ExpectedCount; i++ {
   427  		p.Worker.Nodes = append(p.Worker.Nodes, n)
   428  	}
   429  
   430  	if p.Ingress.ExpectedCount > 0 {
   431  		for i := 0; i < p.Ingress.ExpectedCount; i++ {
   432  			p.Ingress.Nodes = append(p.Ingress.Nodes, n)
   433  		}
   434  	}
   435  
   436  	if p.Storage.ExpectedCount > 0 {
   437  		for i := 0; i < p.Storage.ExpectedCount; i++ {
   438  			p.Storage.Nodes = append(p.Storage.Nodes, n)
   439  		}
   440  	}
   441  
   442  	return p
   443  }
   444  
   445  func getKubernetesServiceIP(p *Plan) (string, error) {
   446  	ip, err := util.GetIPFromCIDR(p.Cluster.Networking.ServiceCIDRBlock, 1)
   447  	if err != nil {
   448  		return "", fmt.Errorf("error getting kubernetes service IP: %v", err)
   449  	}
   450  	return ip.To4().String(), nil
   451  }
   452  
   453  func getDNSServiceIP(p *Plan) (string, error) {
   454  	ip, err := util.GetIPFromCIDR(p.Cluster.Networking.ServiceCIDRBlock, 2)
   455  	if err != nil {
   456  		return "", fmt.Errorf("error getting DNS service IP: %v", err)
   457  	}
   458  	return ip.To4().String(), nil
   459  }
   460  
   461  // The comment map contains is keyed by the value that should be commented
   462  // in the plan file. The value of the map contains the comment, split into
   463  // separate lines.
   464  var commentMap = map[string][]string{
   465  	"cluster.admin_password":                             []string{"This password is used to login to the Kubernetes Dashboard and can also be", "used for administration without a security certificate."},
   466  	"cluster.version":                                    []string{fmt.Sprintf("Kubernetes cluster version (supported minor version %q).", kubernetesMinorVersionString)},
   467  	"cluster.disable_package_installation":               []string{"Set to true if the nodes have the required packages installed."},
   468  	"cluster.disconnected_installation":                  []string{"Set to true if you are performing a disconnected installation."},
   469  	"cluster.networking":                                 []string{"Networking configuration of your cluster."},
   470  	"cluster.networking.pod_cidr_block":                  []string{"Kubernetes will assign pods IPs in this range. Do not use a range that is", "already in use on your local network!"},
   471  	"cluster.networking.service_cidr_block":              []string{"Kubernetes will assign services IPs in this range. Do not use a range", "that is already in use by your local network or pod network!"},
   472  	"cluster.networking.update_hosts_files":              []string{"Set to true if your nodes cannot resolve each others' names using DNS."},
   473  	"cluster.networking.http_proxy":                      []string{"Set the proxy server to use for HTTP connections."},
   474  	"cluster.networking.https_proxy":                     []string{"Set the proxy server to use for HTTPs connections."},
   475  	"cluster.networking.no_proxy":                        []string{"List of host names and/or IPs that shouldn't go through any proxy.", "All nodes' 'host' and 'IPs' are always set."},
   476  	"cluster.certificates":                               []string{"Generated certs configuration."},
   477  	"cluster.certificates.expiry":                        []string{"Self-signed certificate expiration period in hours; default is 2 years."},
   478  	"cluster.certificates.ca_expiry":                     []string{"CA certificate expiration period in hours; default is 2 years."},
   479  	"cluster.ssh":                                        []string{"SSH configuration for cluster nodes."},
   480  	"cluster.ssh.user":                                   []string{"This user must be able to sudo without password."},
   481  	"cluster.ssh.ssh_key":                                []string{"Absolute path to the ssh private key we should use to manage nodes."},
   482  	"cluster.kube_apiserver":                             []string{"Override configuration of Kubernetes components."},
   483  	"cluster.cloud_provider":                             []string{"Kubernetes cloud provider integration."},
   484  	"cluster.cloud_provider.provider":                    []string{"Options: 'aws','azure','cloudstack','fake','gce','mesos','openstack',", "'ovirt','photon','rackspace','vsphere'.", "Leave empty for bare metal setups or other unsupported providers."},
   485  	"cluster.cloud_provider.config":                      []string{"Path to the config file, leave empty if provider does not require it."},
   486  	"docker":                                             []string{"Docker daemon configuration of all cluster nodes."},
   487  	"docker.disable":                                     []string{"Set to true if docker is already installed and configured."},
   488  	"docker.storage.driver":                              []string{"Leave empty to have docker automatically select the driver."},
   489  	"docker.storage.direct_lvm_block_device":             []string{"Used for setting up Device Mapper storage driver in direct-lvm mode."},
   490  	"docker.storage.direct_lvm_block_device.path":        []string{"Absolute path to the block device that will be used for direct-lvm mode.", "This device will be wiped and used exclusively by docker."},
   491  	"docker_registry":                                    []string{"If you want to use an internal registry for the installation or upgrade, you", "must provide its information here. You must seed this registry before the", "installation or upgrade of your cluster. This registry must be accessible from", "all nodes on the cluster."},
   492  	"docker_registry.server":                             []string{"IP or hostname and port for your registry."},
   493  	"docker_registry.CA":                                 []string{"Absolute path to the certificate authority that should be trusted when", "connecting to your registry."},
   494  	"docker_registry.username":                           []string{"Leave blank for unauthenticated access."},
   495  	"docker_registry.password":                           []string{"Leave blank for unauthenticated access."},
   496  	"add_ons":                                            []string{"Add-ons are additional components that KET installs on the cluster."},
   497  	"add_ons.cni.provider":                               []string{"Selecting 'custom' will result in a CNI ready cluster, however it is up to", "you to configure a plugin after the install.", "Options: 'calico','weave','contiv','custom'."},
   498  	"add_ons.cni.options.calico.mode":                    []string{"Options: 'overlay','routed'."},
   499  	"add_ons.cni.options.calico.log_level":               []string{"Options: 'warning','info','debug'."},
   500  	"add_ons.cni.options.calico.workload_mtu":            []string{"MTU for the workload interface, configures the CNI config."},
   501  	"add_ons.cni.options.calico.felix_input_mtu":         []string{"MTU for the tunnel device used if IPIP is enabled."},
   502  	"add_ons.cni.options.calico.ip_autodetection_method": []string{"Used to detect the IPv4 address of the host."},
   503  	"add_ons.cni.options.weave.password":                 []string{"Used by Weave for network traffic encryption.", "Should be reasonably strong, with at least 50 bits of entropy."},
   504  	"add_ons.dns.provider":                               []string{"Options: 'kubedns','coredns'."},
   505  	"add_ons.heapster.options.influxdb.pvc_name":         []string{"Provide the name of the persistent volume claim that you will create", "after installation. If not specified, the data will be stored in", "ephemeral storage."},
   506  	"add_ons.heapster.options.heapster.service_type":     []string{"Specify kubernetes ServiceType. Defaults to 'ClusterIP'.", "Options: 'ClusterIP','NodePort','LoadBalancer','ExternalName'."},
   507  	"add_ons.heapster.options.heapster.sink":             []string{"Specify the sink to store heapster data. Defaults to an influxdb pod", "running on the cluster."},
   508  	"add_ons.metrics_server":                             []string{"Metrics Server is a cluster-wide aggregator of resource usage data."},
   509  	"add_ons.package_manager.provider":                   []string{"Options: 'helm'."},
   510  	"add_ons.rescheduler":                                []string{"The rescheduler ensures that critical add-ons remain running on the cluster."},
   511  	"etcd":                                               []string{"Etcd nodes are the ones that run the etcd distributed key-value database."},
   512  	"etcd.nodes":                                         []string{"Provide the hostname and IP of each node. If the node has an IP for internal", "traffic, provide it in the internalip field. Otherwise, that field can be", "left blank."},
   513  	"master":                                             []string{"Master nodes are the ones that run the Kubernetes control plane components."},
   514  	"worker":                                             []string{"Worker nodes are the ones that will run your workloads on the cluster."},
   515  	"ingress":                                            []string{"Ingress nodes will run the ingress controllers."},
   516  	"storage":                                            []string{"Storage nodes will be used to create a distributed storage cluster that can", "be consumed by your workloads."},
   517  	"master.load_balanced_fqdn":                          []string{"If you have set up load balancing for master nodes, enter the FQDN name here.", "Otherwise, use the IP address of a single master node."},
   518  	"master.load_balanced_short_name":                    []string{"If you have set up load balancing for master nodes, enter the short name here.", "Otherwise, use the IP address of a single master node."},
   519  	"additional_files":                                   []string{"A set of files or directories to copy from the local machine to any of the nodes in the cluster."},
   520  }
   521  
   522  type stack struct {
   523  	lock sync.Mutex
   524  	s    []string
   525  }
   526  
   527  func newStack() *stack {
   528  	return &stack{sync.Mutex{}, make([]string, 0)}
   529  }
   530  
   531  func (s *stack) Push(v string) {
   532  	s.lock.Lock()
   533  	defer s.lock.Unlock()
   534  
   535  	s.s = append(s.s, v)
   536  }
   537  
   538  func (s *stack) Pop() (string, error) {
   539  	s.lock.Lock()
   540  	defer s.lock.Unlock()
   541  
   542  	l := len(s.s)
   543  	if l == 0 {
   544  		return "", errors.New("Empty Stack")
   545  	}
   546  
   547  	res := s.s[l-1]
   548  	s.s = s.s[:l-1]
   549  	return res, nil
   550  }