github.com/kubernetes-incubator/kube-aws@v0.16.4/test/integration/maincluster_test.go (about)

     1  package integration
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"os"
     7  	"reflect"
     8  	"strings"
     9  	"testing"
    10  
    11  	"github.com/google/go-cmp/cmp"
    12  	"github.com/kubernetes-incubator/kube-aws/builtin"
    13  	"github.com/kubernetes-incubator/kube-aws/cfnstack"
    14  	"github.com/kubernetes-incubator/kube-aws/core/root"
    15  	"github.com/kubernetes-incubator/kube-aws/core/root/config"
    16  	"github.com/kubernetes-incubator/kube-aws/pkg/api"
    17  	"github.com/kubernetes-incubator/kube-aws/pkg/model"
    18  	"github.com/kubernetes-incubator/kube-aws/test/helper"
    19  )
    20  
    21  type ConfigTester func(c *config.Config, t *testing.T)
    22  type ClusterTester func(c *root.Cluster, t *testing.T)
    23  
    24  // Integration testing with real AWS services including S3, KMS, CloudFormation
    25  func TestMainClusterConfig(t *testing.T) {
    26  	kubeAwsSettings := newKubeAwsSettingsFromEnv(t)
    27  
    28  	s3URI, s3URIExists := os.LookupEnv("KUBE_AWS_S3_DIR_URI")
    29  
    30  	if !s3URIExists || s3URI == "" {
    31  		s3URI = "s3://mybucket/mydir"
    32  		t.Logf(`Falling back s3URI to a stub value "%s" for tests of validating stack templates. No assets will actually be uploaded to S3`, s3URI)
    33  	} else {
    34  		log.Printf("s3URI is %s", s3URI)
    35  	}
    36  
    37  	s3Loc, err := cfnstack.S3URIFromString(s3URI)
    38  	if err != nil {
    39  		t.Errorf("failed to parse s3 uri: %v", err)
    40  		t.FailNow()
    41  	}
    42  	s3Bucket := s3Loc.Bucket()
    43  	s3Dir := s3Loc.KeyComponents()[0]
    44  
    45  	firstAz := kubeAwsSettings.region + "c"
    46  
    47  	hasDefaultEtcdSettings := func(c *config.Config, t *testing.T) {
    48  		subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
    49  		subnet1.Name = "Subnet0"
    50  		expected := api.EtcdSettings{
    51  			Etcd: api.Etcd{
    52  				Cluster: api.EtcdCluster{
    53  					Version: "v3.3.17",
    54  				},
    55  				EC2Instance: api.EC2Instance{
    56  					Count:        1,
    57  					InstanceType: "t2.medium",
    58  					Tenancy:      "default",
    59  					RootVolume: api.RootVolume{
    60  						Size: 30,
    61  						Type: "gp2",
    62  						IOPS: 0,
    63  					},
    64  				},
    65  				DataVolume: api.DataVolume{
    66  					Size:      30,
    67  					Type:      "gp2",
    68  					IOPS:      0,
    69  					Ephemeral: false,
    70  				},
    71  				Subnets: api.Subnets{
    72  					subnet1,
    73  				},
    74  				UserSuppliedArgs: api.UserSuppliedArgs{
    75  					QuotaBackendBytes: api.DefaultQuotaBackendBytes,
    76  				},
    77  			},
    78  		}
    79  		actual := c.EtcdSettings
    80  		if diff := cmp.Diff(actual, expected); diff != "" {
    81  			t.Errorf("EtcdSettings didn't match: %s", diff)
    82  		}
    83  	}
    84  
    85  	hasDefaultExperimentalFeatures := func(c *config.Config, t *testing.T) {
    86  		expected := api.Experimental{
    87  			Admission: api.Admission{
    88  				AlwaysPullImages: api.AlwaysPullImages{
    89  					Enabled: false,
    90  				},
    91  				EventRateLimit: api.EventRateLimit{
    92  					Enabled: true,
    93  					Limits: `- type: Namespace
    94    qps: 250
    95    burst: 500
    96    cacheSize: 4096
    97  - type: User
    98    qps: 50
    99    burst: 250`,
   100  				},
   101  			},
   102  			AuditLog: api.AuditLog{
   103  				Enabled:   false,
   104  				LogPath:   "/var/log/kube-apiserver-audit.log",
   105  				MaxAge:    30,
   106  				MaxBackup: 1,
   107  				MaxSize:   100,
   108  			},
   109  			Authentication: api.Authentication{
   110  				Webhook: api.Webhook{
   111  					Enabled:  false,
   112  					CacheTTL: "5m0s",
   113  					Config:   "",
   114  				},
   115  			},
   116  			AwsEnvironment: api.AwsEnvironment{
   117  				Enabled: false,
   118  			},
   119  			AwsNodeLabels: api.AwsNodeLabels{
   120  				Enabled: false,
   121  			},
   122  			EphemeralImageStorage: api.EphemeralImageStorage{
   123  				Enabled:    false,
   124  				Disk:       "xvdb",
   125  				Filesystem: "xfs",
   126  			},
   127  			GpuSupport: api.GpuSupport{
   128  				Enabled:      false,
   129  				Version:      "",
   130  				InstallImage: "shelmangroup/coreos-nvidia-driver-installer:latest",
   131  			},
   132  			LoadBalancer: api.LoadBalancer{
   133  				Enabled: false,
   134  			},
   135  			Oidc: api.Oidc{
   136  				Enabled:       false,
   137  				IssuerUrl:     "https://accounts.google.com",
   138  				ClientId:      "kubernetes",
   139  				UsernameClaim: "email",
   140  				GroupsClaim:   "groups",
   141  			},
   142  			CloudControllerManager: api.CloudControllerManager{
   143  				Enabled: false,
   144  			},
   145  			ContainerStorageInterface: api.ContainerStorageInterface{
   146  				Enabled: false,
   147  				CSIProvisioner: api.Image{
   148  					Repo: "quay.io/k8scsi/csi-provisioner",
   149  					Tag:  api.CSIDefaultProvisionerImageTag,
   150  				},
   151  				CSIAttacher: api.Image{
   152  					Repo: "quay.io/k8scsi/csi-attacher",
   153  					Tag:  api.CSIDefaultAttacherImageTag,
   154  				},
   155  				CSILivenessProbe: api.Image{
   156  					Repo: "quay.io/k8scsi/livenessprobe",
   157  					Tag:  api.CSIDefaultLivenessProbeImageTag,
   158  				},
   159  				CSINodeDriverRegistrar: api.Image{
   160  					Repo: "quay.io/k8scsi/csi-node-driver-registrar",
   161  					Tag:  api.CSIDefaultNodeDriverRegistrarTag,
   162  				},
   163  				AmazonEBSDriver: api.Image{
   164  					Repo: "amazon/aws-ebs-csi-driver",
   165  					Tag:  api.CSIDefaultAmazonEBSDriverImageTag,
   166  				},
   167  			},
   168  			NodeDrainer: api.NodeDrainer{
   169  				Enabled:      false,
   170  				DrainTimeout: 5,
   171  			},
   172  		}
   173  
   174  		actual := c.Experimental
   175  
   176  		if !reflect.DeepEqual(expected, actual) {
   177  			t.Errorf("experimental settings didn't match :\nexpected=%v\nactual=%v", expected, actual)
   178  		}
   179  
   180  		if !c.WaitSignal.Enabled() {
   181  			t.Errorf("waitSignal should be enabled but was not: %v", c.WaitSignal)
   182  		}
   183  
   184  		if c.WaitSignal.MaxBatchSize(1) != 1 {
   185  			t.Errorf("waitSignal.maxBatchSize should be 1 but was %d: %v", c.WaitSignal.MaxBatchSize(1), c.WaitSignal)
   186  		}
   187  	}
   188  
   189  	everyPublicSubnetHasRouteToIGW := func(c *config.Config, t *testing.T) {
   190  		for i, s := range c.PublicSubnets() {
   191  			if !s.ManageRouteToInternet() {
   192  				t.Errorf("Public subnet %d should have a route to the IGW but it doesn't: %+v", i, s)
   193  			}
   194  		}
   195  	}
   196  
   197  	hasDefaultLaunchSpecifications := func(c *config.Config, t *testing.T) {
   198  		expected := []api.LaunchSpecification{
   199  			{
   200  				WeightedCapacity: 1,
   201  				InstanceType:     "c4.large",
   202  				SpotPrice:        "",
   203  				RootVolume:       api.NewGp2RootVolume(30),
   204  			},
   205  			{
   206  				WeightedCapacity: 2,
   207  				InstanceType:     "c4.xlarge",
   208  				SpotPrice:        "",
   209  				RootVolume:       api.NewGp2RootVolume(60),
   210  			},
   211  		}
   212  		p := c.NodePools[0]
   213  		actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
   214  		if !reflect.DeepEqual(expected, actual) {
   215  			t.Errorf(
   216  				"LaunchSpecifications didn't match: expected=%v actual=%v",
   217  				expected,
   218  				actual,
   219  			)
   220  		}
   221  
   222  		globalSpotPrice := p.WorkerNodePool.SpotFleet.SpotPrice
   223  		if globalSpotPrice != "0.06" {
   224  			t.Errorf("Default spot price is expected to be 0.06 but was: %s", globalSpotPrice)
   225  		}
   226  	}
   227  
   228  	spotFleetBasedNodePoolHasWaitSignalDisabled := func(c *config.Config, t *testing.T) {
   229  		p := c.NodePools[0]
   230  
   231  		if !p.SpotFleet.Enabled() {
   232  			t.Errorf("1st node pool is expected to be a spot fleet based one but was not: %+v", p)
   233  		}
   234  
   235  		if p.WaitSignal.Enabled() {
   236  			t.Errorf(
   237  				"WaitSignal should be enabled but was not: %v",
   238  				p.WaitSignal,
   239  			)
   240  		}
   241  	}
   242  
   243  	asgBasedNodePoolHasWaitSignalEnabled := func(c *config.Config, t *testing.T) {
   244  		p := c.NodePools[0]
   245  
   246  		if p.SpotFleet.Enabled() {
   247  			t.Errorf("1st node pool is expected to be an asg-based one but was not: %+v", p)
   248  		}
   249  
   250  		if !p.WaitSignal.Enabled() {
   251  			t.Errorf(
   252  				"WaitSignal should be disabled but was not: %v",
   253  				p.WaitSignal,
   254  			)
   255  		}
   256  	}
   257  
   258  	hasDefaultNodePoolRollingStrategy := func(c *config.Config, t *testing.T) {
   259  		s := c.NodePools[0].NodePoolRollingStrategy
   260  
   261  		if s != "AvailabilityZone" {
   262  			t.Errorf("Default nodePool rolling strategy should be 'AvailabilityZone' but is not: %v", s)
   263  		}
   264  	}
   265  
   266  	hasSpecificNodePoolRollingStrategy := func(expRollingStrategy string) func(c *config.Config, t *testing.T) {
   267  		return func(c *config.Config, t *testing.T) {
   268  			actRollingStrategy := c.NodePools[0].NodePoolRollingStrategy
   269  			if actRollingStrategy != expRollingStrategy {
   270  				t.Errorf("The nodePool Rolling Strategy (%s) does not match with the expected one: %s", actRollingStrategy, expRollingStrategy)
   271  			}
   272  		}
   273  	}
   274  
   275  	hasWorkerAndNodePoolStrategy := func(expWorkerStrategy, expNodePoolStrategy string) func(c *config.Config, t *testing.T) {
   276  		return func(c *config.Config, t *testing.T) {
   277  			actWorkerStrategy := c.NodePools[0].NodePoolRollingStrategy
   278  			actNodePoolStrategy := c.NodePools[1].NodePoolRollingStrategy
   279  
   280  			if expWorkerStrategy != actWorkerStrategy {
   281  				t.Errorf("The nodePool Rolling Strategy (%s) does not match with the expected one: %s", actWorkerStrategy, expWorkerStrategy)
   282  			}
   283  			if expNodePoolStrategy != actNodePoolStrategy {
   284  				t.Errorf("The nodePool Rolling Strategy (%s) does not match with the expected one: %s", actNodePoolStrategy, expNodePoolStrategy)
   285  			}
   286  		}
   287  	}
   288  	hasPrivateSubnetsWithManagedNGWs := func(numExpectedNum int) func(c *config.Config, t *testing.T) {
   289  		return func(c *config.Config, t *testing.T) {
   290  			for i, s := range c.PrivateSubnets() {
   291  				if !s.ManageNATGateway() {
   292  					t.Errorf("NAT gateway for the existing private subnet #%d should be created by kube-aws but was not", i)
   293  				}
   294  
   295  				if s.ManageRouteToInternet() {
   296  					t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
   297  				}
   298  			}
   299  		}
   300  	}
   301  
   302  	hasSpecificNumOfManagedNGWsWithUnmanagedEIPs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
   303  		return func(c *config.Config, t *testing.T) {
   304  			ngwActualNum := len(c.NATGateways())
   305  			if ngwActualNum != ngwExpectedNum {
   306  				t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
   307  			}
   308  			for i, n := range c.NATGateways() {
   309  				if !n.ManageNATGateway() {
   310  					t.Errorf("NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
   311  				}
   312  				if n.ManageEIP() {
   313  					t.Errorf("EIP for NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
   314  				}
   315  				if !n.ManageRoute() {
   316  					t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
   317  				}
   318  			}
   319  		}
   320  	}
   321  
   322  	hasSpecificNumOfManagedNGWsAndEIPs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
   323  		return func(c *config.Config, t *testing.T) {
   324  			ngwActualNum := len(c.NATGateways())
   325  			if ngwActualNum != ngwExpectedNum {
   326  				t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
   327  			}
   328  			for i, n := range c.NATGateways() {
   329  				if !n.ManageNATGateway() {
   330  					t.Errorf("NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
   331  				}
   332  				if !n.ManageEIP() {
   333  					t.Errorf("EIP for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
   334  				}
   335  				if !n.ManageRoute() {
   336  					t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
   337  				}
   338  			}
   339  		}
   340  	}
   341  
   342  	hasTwoManagedNGWsAndEIPs := hasSpecificNumOfManagedNGWsAndEIPs(2)
   343  
   344  	hasNoManagedNGWsButSpecificNumOfRoutesToUnmanagedNGWs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
   345  		return func(c *config.Config, t *testing.T) {
   346  			ngwActualNum := len(c.NATGateways())
   347  			if ngwActualNum != ngwExpectedNum {
   348  				t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
   349  			}
   350  			for i, n := range c.NATGateways() {
   351  				if n.ManageNATGateway() {
   352  					t.Errorf("NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
   353  				}
   354  				if n.ManageEIP() {
   355  					t.Errorf("EIP for NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
   356  				}
   357  				if !n.ManageRoute() {
   358  					t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
   359  				}
   360  			}
   361  		}
   362  	}
   363  
   364  	hasNoNGWsOrEIPsOrRoutes := func(c *config.Config, t *testing.T) {
   365  		ngwActualNum := len(c.NATGateways())
   366  		ngwExpectedNum := 0
   367  		if ngwActualNum != ngwExpectedNum {
   368  			t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
   369  		}
   370  	}
   371  
   372  	hasDefaultCluster := func(c *root.Cluster, t *testing.T) {
   373  		assets, err := c.EnsureAllAssetsGenerated()
   374  		if err != nil {
   375  			t.Errorf("failed to list assets: %v", err)
   376  			t.FailNow()
   377  		}
   378  
   379  		t.Run("Assets/RootStackTemplate", func(t *testing.T) {
   380  			cluster := kubeAwsSettings.clusterName
   381  			stack := kubeAwsSettings.clusterName
   382  			file := "stack.json"
   383  			expected := api.Asset{
   384  				Content: "",
   385  				AssetLocation: api.AssetLocation{
   386  					ID:     api.NewAssetID(stack, file),
   387  					Bucket: s3Bucket,
   388  					Key:    s3Dir + "/kube-aws/clusters/" + cluster + "/exported/stacks/" + stack + "/" + file,
   389  					Path:   stack + "/stack.json",
   390  				},
   391  			}
   392  			actual, err := assets.FindAssetByStackAndFileName(stack, file)
   393  			if err != nil {
   394  				t.Errorf("failed to find asset: %v", err)
   395  			}
   396  			if expected.ID != actual.ID {
   397  				t.Errorf(
   398  					"Asset id didn't match: expected=%v actual=%v",
   399  					expected.ID,
   400  					actual.ID,
   401  				)
   402  			}
   403  			if expected.Key != actual.Key {
   404  				t.Errorf(
   405  					"Asset key didn't match: expected=%v actual=%v",
   406  					expected.Key,
   407  					actual.Key,
   408  				)
   409  			}
   410  		})
   411  
   412  		t.Run("Assets/ControlplaneStackTemplate", func(t *testing.T) {
   413  			cluster := kubeAwsSettings.clusterName
   414  			stack := "control-plane"
   415  			file := "stack.json"
   416  			expected := api.Asset{
   417  				Content: builtin.String("stack-templates/control-plane.json.tmpl"),
   418  				AssetLocation: api.AssetLocation{
   419  					ID:     api.NewAssetID(stack, file),
   420  					Bucket: s3Bucket,
   421  					Key:    s3Dir + "/kube-aws/clusters/" + cluster + "/exported/stacks/" + stack + "/" + file,
   422  					Path:   stack + "/stack.json",
   423  				},
   424  			}
   425  			actual, err := assets.FindAssetByStackAndFileName(stack, file)
   426  			if err != nil {
   427  				t.Errorf("failed to find asset: %v", err)
   428  			}
   429  			if expected.ID != actual.ID {
   430  				t.Errorf(
   431  					"Asset id didn't match: expected=%v actual=%v",
   432  					expected.ID,
   433  					actual.ID,
   434  				)
   435  			}
   436  			if expected.Key != actual.Key {
   437  				t.Errorf(
   438  					"Asset key didn't match: expected=%v actual=%v",
   439  					expected.Key,
   440  					actual.Key,
   441  				)
   442  			}
   443  		})
   444  	}
   445  
   446  	mainClusterYaml := kubeAwsSettings.mainClusterYaml()
   447  	minimalValidConfigYaml := kubeAwsSettings.minimumValidClusterYamlWithAZ("c")
   448  	configYamlWithoutExernalDNSName := kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
   449  availabilityZone: us-west-1c
   450  `
   451  
   452  	validCases := []struct {
   453  		context       string
   454  		configYaml    string
   455  		assertConfig  []ConfigTester
   456  		assertCluster []ClusterTester
   457  	}{
   458  		{
   459  			context: "WithAddons",
   460  			configYaml: minimalValidConfigYaml + `
   461  addons:
   462    rescheduler:
   463      enabled: true
   464    metricsServer:
   465      enabled: true
   466  worker:
   467    nodePools:
   468    - name: pool1
   469  `,
   470  			assertConfig: []ConfigTester{
   471  				hasDefaultEtcdSettings,
   472  				asgBasedNodePoolHasWaitSignalEnabled,
   473  				func(c *config.Config, t *testing.T) {
   474  					expected := api.Addons{
   475  						Rescheduler: api.Rescheduler{
   476  							Enabled: true,
   477  						},
   478  						MetricsServer: api.MetricsServer{
   479  							Enabled: true,
   480  						},
   481  						APIServerAggregator: api.APIServerAggregator{
   482  							Enabled: true,
   483  						},
   484  					}
   485  
   486  					actual := c.Addons
   487  
   488  					if !reflect.DeepEqual(expected, actual) {
   489  						t.Errorf("addons didn't match : expected=%+v actual=%+v", expected, actual)
   490  					}
   491  				},
   492  			},
   493  			assertCluster: []ClusterTester{
   494  				hasDefaultCluster,
   495  			},
   496  		},
   497  		{
   498  			context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsSpecified",
   499  			configYaml: configYamlWithoutExernalDNSName + `
   500  apiEndpoints:
   501  - name: default
   502    dnsName: k8s.example.com
   503    loadBalancer:
   504      apiAccessAllowedSourceCIDRs:
   505      - 1.2.3.255/32
   506      hostedZone:
   507        id: a1b2c4
   508  `,
   509  			assertConfig: []ConfigTester{
   510  				func(c *config.Config, t *testing.T) {
   511  					l := len(c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs)
   512  					if l != 1 {
   513  						t.Errorf("unexpected size of apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs: %d", l)
   514  						t.FailNow()
   515  					}
   516  					actual := c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs[0].String()
   517  					expected := "1.2.3.255/32"
   518  					if actual != expected {
   519  						t.Errorf("unexpected cidr in apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs[0]. expected = %s, actual = %s", expected, actual)
   520  					}
   521  				},
   522  			},
   523  		},
   524  		{
   525  			context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsOmitted",
   526  			configYaml: configYamlWithoutExernalDNSName + `
   527  apiEndpoints:
   528  - name: default
   529    dnsName: k8s.example.com
   530    loadBalancer:
   531      hostedZone:
   532        id: a1b2c4
   533  `,
   534  			assertConfig: []ConfigTester{
   535  				func(c *config.Config, t *testing.T) {
   536  					l := len(c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs)
   537  					if l != 1 {
   538  						t.Errorf("unexpected size of apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs: %d", l)
   539  						t.FailNow()
   540  					}
   541  					actual := c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs[0].String()
   542  					expected := "0.0.0.0/0"
   543  					if actual != expected {
   544  						t.Errorf("unexpected cidr in apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs[0]. expected = %s, actual = %s", expected, actual)
   545  					}
   546  				},
   547  			},
   548  		},
   549  		{
   550  			context:    "WithKubeProxyIPVSModeDisabledByDefault",
   551  			configYaml: minimalValidConfigYaml,
   552  			assertConfig: []ConfigTester{
   553  				func(c *config.Config, t *testing.T) {
   554  					if c.KubeProxy.IPVSMode.Enabled != false {
   555  						t.Errorf("kube-proxy IPVS mode must be disabled by default")
   556  					}
   557  
   558  					expectedScheduler := "rr"
   559  					if c.KubeProxy.IPVSMode.Scheduler != expectedScheduler {
   560  						t.Errorf("IPVS scheduler should be by default set to: %s (actual = %s)", expectedScheduler, c.KubeProxy.IPVSMode.Scheduler)
   561  					}
   562  
   563  					expectedSyncPeriod := "60s"
   564  					if c.KubeProxy.IPVSMode.SyncPeriod != expectedSyncPeriod {
   565  						t.Errorf("Sync period should be by default set to: %s (actual = %s)", expectedSyncPeriod, c.KubeProxy.IPVSMode.SyncPeriod)
   566  					}
   567  
   568  					expectedMinSyncPeriod := "10s"
   569  					if c.KubeProxy.IPVSMode.MinSyncPeriod != expectedMinSyncPeriod {
   570  						t.Errorf("Minimal sync period should be by default set to: %s (actual = %s)", expectedMinSyncPeriod, c.KubeProxy.IPVSMode.MinSyncPeriod)
   571  					}
   572  				},
   573  			},
   574  		},
   575  		{
   576  			context: "WithKubeProxyIPVSModeEnabled",
   577  			configYaml: minimalValidConfigYaml + `
   578  kubeProxy:
   579    ipvsMode:
   580      enabled: true
   581      scheduler: lc
   582      syncPeriod: 90s
   583      minSyncPeriod: 15s
   584  `,
   585  			assertConfig: []ConfigTester{
   586  				func(c *config.Config, t *testing.T) {
   587  					if c.KubeProxy.IPVSMode.Enabled != true {
   588  						t.Errorf("kube-proxy IPVS mode must be enabled")
   589  					}
   590  
   591  					expectedScheduler := "lc"
   592  					if c.KubeProxy.IPVSMode.Scheduler != expectedScheduler {
   593  						t.Errorf("IPVS scheduler should be set to: %s (actual = %s)", expectedScheduler, c.KubeProxy.IPVSMode.Scheduler)
   594  					}
   595  
   596  					expectedSyncPeriod := "90s"
   597  					if c.KubeProxy.IPVSMode.SyncPeriod != expectedSyncPeriod {
   598  						t.Errorf("Sync period should be set to: %s (actual = %s)", expectedSyncPeriod, c.KubeProxy.IPVSMode.SyncPeriod)
   599  					}
   600  
   601  					expectedMinSyncPeriod := "15s"
   602  					if c.KubeProxy.IPVSMode.MinSyncPeriod != expectedMinSyncPeriod {
   603  						t.Errorf("Minimal sync period should be set to: %s (actual = %s)", expectedMinSyncPeriod, c.KubeProxy.IPVSMode.MinSyncPeriod)
   604  					}
   605  				},
   606  			},
   607  		},
   608  		{
   609  			// See https://github.com/kubernetes-incubator/kube-aws/issues/365
   610  			context:    "WithClusterNameContainsHyphens",
   611  			configYaml: kubeAwsSettings.withClusterName("my-cluster").minimumValidClusterYaml(),
   612  		},
   613  		{
   614  			context: "WithcustomApiServerSettings",
   615  			configYaml: minimalValidConfigYaml + `
   616  customApiServerSettings:
   617    additionalDnsSans:
   618    - my.host.com
   619    additionalIPAddressSans:
   620    - 0.0.0.0
   621  `,
   622  			assertConfig: []ConfigTester{
   623  				func(c *config.Config, t *testing.T) {
   624  					expectedDnsSans := []string{"my.host.com"}
   625  					actualDnsSans := c.CustomApiServerSettings.AdditionalDnsSANs
   626  					if !reflect.DeepEqual(expectedDnsSans, actualDnsSans) {
   627  						t.Errorf("additionalDnsSans didn't match : expected=%v actual=%v", expectedDnsSans, actualDnsSans)
   628  					}
   629  
   630  					expectedIPSans := []string{"0.0.0.0"}
   631  					actualIPSans := c.CustomApiServerSettings.AdditionalIPAddresses
   632  					if !reflect.DeepEqual(expectedIPSans, actualIPSans) {
   633  						t.Errorf("additionalIPAddressSans didn't match : expected=%v actual=%v", expectedIPSans, actualIPSans)
   634  					}
   635  				},
   636  			},
   637  			assertCluster: []ClusterTester{
   638  				hasDefaultCluster,
   639  			},
   640  		},
   641  		{
   642  			context: "WithCustomSettings",
   643  			configYaml: minimalValidConfigYaml + `
   644  customSettings:
   645    stack-type: control-plane
   646  worker:
   647    nodePools:
   648    - name: pool1
   649      customSettings:
   650        stack-type: node-pool
   651  `,
   652  			assertConfig: []ConfigTester{
   653  				hasDefaultEtcdSettings,
   654  				asgBasedNodePoolHasWaitSignalEnabled,
   655  				func(c *config.Config, t *testing.T) {
   656  					p := c.NodePools[0]
   657  
   658  					{
   659  						expected := map[string]interface{}{
   660  							"stack-type": "control-plane",
   661  						}
   662  						actual := c.CustomSettings
   663  						if !reflect.DeepEqual(expected, actual) {
   664  							t.Errorf("customSettings didn't match : expected=%v actual=%v", expected, actual)
   665  						}
   666  					}
   667  
   668  					{
   669  						expected := map[string]interface{}{
   670  							"stack-type": "node-pool",
   671  						}
   672  						actual := p.CustomSettings
   673  						if !reflect.DeepEqual(expected, actual) {
   674  							t.Errorf("customSettings didn't match : expected=%v actual=%v", expected, actual)
   675  						}
   676  					}
   677  				},
   678  			},
   679  			assertCluster: []ClusterTester{
   680  				hasDefaultCluster,
   681  			},
   682  		},
   683  		{
   684  			context: "WithDifferentReleaseChannels",
   685  			configYaml: minimalValidConfigYaml + `
   686  releaseChannel: stable
   687  worker:
   688    nodePools:
   689    - name: pool1
   690      releaseChannel: alpha
   691  `,
   692  			assertConfig: []ConfigTester{
   693  				hasDefaultEtcdSettings,
   694  				asgBasedNodePoolHasWaitSignalEnabled,
   695  			},
   696  			assertCluster: []ClusterTester{
   697  				func(c *root.Cluster, t *testing.T) {
   698  					cp := c.ControlPlane().Config.AMI
   699  					np := c.NodePools()[0].NodePoolConfig.AMI
   700  
   701  					if cp == "" {
   702  						t.Error("the default AMI ID should not be empty but it was")
   703  					}
   704  
   705  					if np == "" {
   706  						t.Error("the AMI ID for the node pool should not be empty but it was")
   707  					}
   708  
   709  					if cp == np {
   710  						t.Errorf("the default AMI ID and the AMI ID for the node pool should not match but they did: default=%s, nodepool=%s", cp, np)
   711  					}
   712  				},
   713  			},
   714  		},
   715  		{
   716  			context: "WithElasticFileSystemId",
   717  			configYaml: minimalValidConfigYaml + `
   718  elasticFileSystemId: efs-12345
   719  worker:
   720    nodePools:
   721    - name: pool1
   722  `,
   723  			assertConfig: []ConfigTester{
   724  				func(c *config.Config, t *testing.T) {
   725  					if c.NodePools[0].ElasticFileSystemID != "efs-12345" {
   726  						t.Errorf("The value of worker.nodePools[0].elasticFileSystemId should match the one for the top-leve elasticFileSystemId, but it wan't: worker.nodePools[0].elasticFileSystemId=%s", c.NodePools[0].ElasticFileSystemID)
   727  					}
   728  				},
   729  			},
   730  		},
   731  		{
   732  			context: "WithElasticFileSystemIdInSpecificNodePool",
   733  			configYaml: mainClusterYaml + `
   734  subnets:
   735  - name: existing1
   736    id: subnet-12345
   737    availabilityZone: us-west-1a
   738  worker:
   739    nodePools:
   740    - name: pool1
   741      subnets:
   742      - name: existing1
   743      elasticFileSystemId: efs-12345
   744    - name: pool2
   745  `,
   746  			assertConfig: []ConfigTester{
   747  				func(c *config.Config, t *testing.T) {
   748  					if c.NodePools[0].ElasticFileSystemID != "efs-12345" {
   749  						t.Errorf("Unexpected worker.nodePools[0].elasticFileSystemId: %s", c.NodePools[0].ElasticFileSystemID)
   750  					}
   751  					if c.NodePools[1].ElasticFileSystemID != "" {
   752  						t.Errorf("Unexpected worker.nodePools[1].elasticFileSystemId: %s", c.NodePools[1].ElasticFileSystemID)
   753  					}
   754  				},
   755  			},
   756  		},
   757  		{
   758  			context: "WithEtcdDataVolumeEncrypted",
   759  			configYaml: minimalValidConfigYaml + `
   760  etcd:
   761    dataVolume:
   762      encrypted: true
   763  `,
   764  			assertConfig: []ConfigTester{
   765  				func(c *config.Config, t *testing.T) {
   766  					if !c.Etcd.DataVolume.Encrypted {
   767  						t.Errorf("Etcd data volume should be encrypted but was not: %v", c.Etcd)
   768  					}
   769  				},
   770  			},
   771  		},
   772  		{
   773  			context: "WithEtcdDataVolumeEncryptedKMSKeyARN",
   774  			configYaml: minimalValidConfigYaml + `
   775  etcd:
   776    dataVolume:
   777      encrypted: true
   778    kmsKeyArn: arn:aws:kms:eu-west-1:XXX:key/XXX
   779  `,
   780  			assertConfig: []ConfigTester{
   781  				func(c *config.Config, t *testing.T) {
   782  					expected := "arn:aws:kms:eu-west-1:XXX:key/XXX"
   783  					if c.Etcd.KMSKeyARN() != expected {
   784  						t.Errorf("Etcd data volume KMS Key ARN didn't match : expected=%v actual=%v", expected, c.Etcd.KMSKeyARN())
   785  					}
   786  					if !c.Etcd.DataVolume.Encrypted {
   787  						t.Error("Etcd data volume should be encrypted but was not")
   788  					}
   789  				},
   790  			},
   791  		},
   792  		{
   793  			context: "WithEtcdMemberIdentityProviderEIP",
   794  			configYaml: minimalValidConfigYaml + `
   795  etcd:
   796    memberIdentityProvider: eip
   797  `,
   798  			assertConfig: []ConfigTester{
   799  				func(c *config.Config, t *testing.T) {
   800  					subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
   801  					subnet1.Name = "Subnet0"
   802  					expected := api.EtcdSettings{
   803  						Etcd: api.Etcd{
   804  							Cluster: api.EtcdCluster{
   805  								MemberIdentityProvider: "eip",
   806  								Version:                "v3.3.17",
   807  							},
   808  							EC2Instance: api.EC2Instance{
   809  								Count:        1,
   810  								InstanceType: "t2.medium",
   811  								Tenancy:      "default",
   812  								RootVolume: api.RootVolume{
   813  									Size: 30,
   814  									Type: "gp2",
   815  									IOPS: 0,
   816  								},
   817  							},
   818  							DataVolume: api.DataVolume{
   819  								Size:      30,
   820  								Type:      "gp2",
   821  								IOPS:      0,
   822  								Ephemeral: false,
   823  							},
   824  							Subnets: api.Subnets{
   825  								subnet1,
   826  							},
   827  							UserSuppliedArgs: api.UserSuppliedArgs{
   828  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
   829  							},
   830  						},
   831  					}
   832  					actual := c.EtcdSettings
   833  					if diff := cmp.Diff(actual, expected); diff != "" {
   834  						t.Errorf("EtcdSettings didn't match: %s", diff)
   835  					}
   836  
   837  					if !actual.NodeShouldHaveEIP() {
   838  						t.Errorf(
   839  							"NodeShouldHaveEIP returned unexpected value: %v",
   840  							actual.NodeShouldHaveEIP(),
   841  						)
   842  					}
   843  				},
   844  			},
   845  			assertCluster: []ClusterTester{
   846  				hasDefaultCluster,
   847  			},
   848  		},
   849  		{
   850  			context: "WithEtcdMemberIdentityProviderENI",
   851  			configYaml: minimalValidConfigYaml + `
   852  etcd:
   853    memberIdentityProvider: eni
   854  `,
   855  			assertConfig: []ConfigTester{
   856  				func(c *config.Config, t *testing.T) {
   857  					subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
   858  					subnet1.Name = "Subnet0"
   859  					expected := api.EtcdSettings{
   860  						Etcd: api.Etcd{
   861  							EC2Instance: api.EC2Instance{
   862  								Count:        1,
   863  								InstanceType: "t2.medium",
   864  								RootVolume: api.RootVolume{
   865  									Size: 30,
   866  									Type: "gp2",
   867  									IOPS: 0,
   868  								},
   869  								Tenancy: "default",
   870  							},
   871  							DataVolume: api.DataVolume{
   872  								Size:      30,
   873  								Type:      "gp2",
   874  								IOPS:      0,
   875  								Ephemeral: false,
   876  							},
   877  							Cluster: api.EtcdCluster{
   878  								MemberIdentityProvider: "eni",
   879  								Version:                "v3.3.17",
   880  							},
   881  							Subnets: api.Subnets{
   882  								subnet1,
   883  							},
   884  							UserSuppliedArgs: api.UserSuppliedArgs{
   885  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
   886  							},
   887  						},
   888  					}
   889  					actual := c.EtcdSettings
   890  					if diff := cmp.Diff(actual, expected); diff != "" {
   891  						t.Errorf("EtcdSettings didn't match: %s", diff)
   892  					}
   893  
   894  					if !actual.NodeShouldHaveSecondaryENI() {
   895  						t.Errorf(
   896  							"NodeShouldHaveSecondaryENI returned unexpected value: %v",
   897  							actual.NodeShouldHaveSecondaryENI(),
   898  						)
   899  					}
   900  				},
   901  			},
   902  			assertCluster: []ClusterTester{
   903  				hasDefaultCluster,
   904  			},
   905  		},
   906  		{
   907  			context: "WithEtcdMemberIdentityProviderENIWithCustomDomain",
   908  			configYaml: minimalValidConfigYaml + `
   909  etcd:
   910    memberIdentityProvider: eni
   911    internalDomainName: internal.example.com
   912  `,
   913  			assertConfig: []ConfigTester{
   914  				func(c *config.Config, t *testing.T) {
   915  					subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
   916  					subnet1.Name = "Subnet0"
   917  					expected := api.EtcdSettings{
   918  						Etcd: api.Etcd{
   919  							Cluster: api.EtcdCluster{
   920  								MemberIdentityProvider: "eni",
   921  								InternalDomainName:     "internal.example.com",
   922  								Version:                "v3.3.17",
   923  							},
   924  							EC2Instance: api.EC2Instance{
   925  								Count:        1,
   926  								InstanceType: "t2.medium",
   927  								RootVolume: api.RootVolume{
   928  									Size: 30,
   929  									Type: "gp2",
   930  									IOPS: 0,
   931  								},
   932  								Tenancy: "default",
   933  							},
   934  							DataVolume: api.DataVolume{
   935  								Size:      30,
   936  								Type:      "gp2",
   937  								IOPS:      0,
   938  								Ephemeral: false,
   939  							},
   940  							Subnets: api.Subnets{
   941  								subnet1,
   942  							},
   943  							UserSuppliedArgs: api.UserSuppliedArgs{
   944  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
   945  							},
   946  						},
   947  					}
   948  					actual := c.EtcdSettings
   949  					if diff := cmp.Diff(actual, expected); diff != "" {
   950  						t.Errorf("EtcdSettings didn't match: %s", diff)
   951  					}
   952  
   953  					if !actual.NodeShouldHaveSecondaryENI() {
   954  						t.Errorf(
   955  							"NodeShouldHaveSecondaryENI returned unexpected value: %v",
   956  							actual.NodeShouldHaveSecondaryENI(),
   957  						)
   958  					}
   959  				},
   960  			},
   961  			assertCluster: []ClusterTester{
   962  				hasDefaultCluster,
   963  			},
   964  		},
   965  		{
   966  			context: "WithEtcdMemberIdentityProviderENIWithCustomFQDNs",
   967  			configYaml: minimalValidConfigYaml + `
   968  etcd:
   969    memberIdentityProvider: eni
   970    internalDomainName: internal.example.com
   971    nodes:
   972    - fqdn: etcd1a.internal.example.com
   973    - fqdn: etcd1b.internal.example.com
   974    - fqdn: etcd1c.internal.example.com
   975  `,
   976  			assertConfig: []ConfigTester{
   977  				func(c *config.Config, t *testing.T) {
   978  					subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
   979  					subnet1.Name = "Subnet0"
   980  					expected := api.EtcdSettings{
   981  						Etcd: api.Etcd{
   982  							Cluster: api.EtcdCluster{
   983  								MemberIdentityProvider: "eni",
   984  								InternalDomainName:     "internal.example.com",
   985  								Version:                "v3.3.17",
   986  							},
   987  							EC2Instance: api.EC2Instance{
   988  								Count:        1,
   989  								InstanceType: "t2.medium",
   990  								RootVolume: api.RootVolume{
   991  									Size: 30,
   992  									Type: "gp2",
   993  									IOPS: 0,
   994  								},
   995  								Tenancy: "default",
   996  							},
   997  							DataVolume: api.DataVolume{
   998  								Size:      30,
   999  								Type:      "gp2",
  1000  								IOPS:      0,
  1001  								Ephemeral: false,
  1002  							},
  1003  							Nodes: []api.EtcdNode{
  1004  								api.EtcdNode{
  1005  									FQDN: "etcd1a.internal.example.com",
  1006  								},
  1007  								api.EtcdNode{
  1008  									FQDN: "etcd1b.internal.example.com",
  1009  								},
  1010  								api.EtcdNode{
  1011  									FQDN: "etcd1c.internal.example.com",
  1012  								},
  1013  							},
  1014  							Subnets: api.Subnets{
  1015  								subnet1,
  1016  							},
  1017  							UserSuppliedArgs: api.UserSuppliedArgs{
  1018  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
  1019  							},
  1020  						},
  1021  					}
  1022  					actual := c.EtcdSettings
  1023  					if diff := cmp.Diff(actual, expected); diff != "" {
  1024  						t.Errorf("EtcdSettings didn't match: %s", diff)
  1025  					}
  1026  
  1027  					if !actual.NodeShouldHaveSecondaryENI() {
  1028  						t.Errorf(
  1029  							"NodeShouldHaveSecondaryENI returned unexpected value: %v",
  1030  							actual.NodeShouldHaveSecondaryENI(),
  1031  						)
  1032  					}
  1033  				},
  1034  			},
  1035  			assertCluster: []ClusterTester{
  1036  				hasDefaultCluster,
  1037  			},
  1038  		},
  1039  		{
  1040  			context: "WithEtcdMemberIdentityProviderENIWithCustomNames",
  1041  			configYaml: minimalValidConfigYaml + `
  1042  etcd:
  1043    memberIdentityProvider: eni
  1044    internalDomainName: internal.example.com
  1045    nodes:
  1046    - name: etcd1a
  1047    - name: etcd1b
  1048    - name: etcd1c
  1049  `,
  1050  			assertConfig: []ConfigTester{
  1051  				func(c *config.Config, t *testing.T) {
  1052  					subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
  1053  					subnet1.Name = "Subnet0"
  1054  					expected := api.EtcdSettings{
  1055  						Etcd: api.Etcd{
  1056  							Cluster: api.EtcdCluster{
  1057  								MemberIdentityProvider: "eni",
  1058  								InternalDomainName:     "internal.example.com",
  1059  								Version:                "v3.3.17",
  1060  							},
  1061  							EC2Instance: api.EC2Instance{
  1062  								Count:        1,
  1063  								InstanceType: "t2.medium",
  1064  								RootVolume: api.RootVolume{
  1065  									Size: 30,
  1066  									Type: "gp2",
  1067  									IOPS: 0,
  1068  								},
  1069  								Tenancy: "default",
  1070  							},
  1071  							DataVolume: api.DataVolume{
  1072  								Size:      30,
  1073  								Type:      "gp2",
  1074  								IOPS:      0,
  1075  								Ephemeral: false,
  1076  							},
  1077  							Nodes: []api.EtcdNode{
  1078  								api.EtcdNode{
  1079  									Name: "etcd1a",
  1080  								},
  1081  								api.EtcdNode{
  1082  									Name: "etcd1b",
  1083  								},
  1084  								api.EtcdNode{
  1085  									Name: "etcd1c",
  1086  								},
  1087  							},
  1088  							Subnets: api.Subnets{
  1089  								subnet1,
  1090  							},
  1091  							UserSuppliedArgs: api.UserSuppliedArgs{
  1092  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
  1093  							},
  1094  						},
  1095  					}
  1096  					actual := c.EtcdSettings
  1097  					if diff := cmp.Diff(actual, expected); diff != "" {
  1098  						t.Errorf("EtcdSettings didn't match: %s", diff)
  1099  					}
  1100  
  1101  					if !actual.NodeShouldHaveSecondaryENI() {
  1102  						t.Errorf(
  1103  							"NodeShouldHaveSecondaryENI returned unexpected value: %v",
  1104  							actual.NodeShouldHaveSecondaryENI(),
  1105  						)
  1106  					}
  1107  				},
  1108  			},
  1109  			assertCluster: []ClusterTester{
  1110  				hasDefaultCluster,
  1111  			},
  1112  		},
  1113  		{
  1114  			context: "WithEtcdMemberIdentityProviderENIWithoutRecordSets",
  1115  			configYaml: minimalValidConfigYaml + `
  1116  etcd:
  1117    memberIdentityProvider: eni
  1118    internalDomainName: internal.example.com
  1119    manageRecordSets: false
  1120    nodes:
  1121    - name: etcd1a
  1122    - name: etcd1b
  1123    - name: etcd1c
  1124  `,
  1125  			assertConfig: []ConfigTester{
  1126  				func(c *config.Config, t *testing.T) {
  1127  					subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
  1128  					subnet1.Name = "Subnet0"
  1129  					manageRecordSets := false
  1130  					expected := api.EtcdSettings{
  1131  						Etcd: api.Etcd{
  1132  							Cluster: api.EtcdCluster{
  1133  								ManageRecordSets:       &manageRecordSets,
  1134  								MemberIdentityProvider: "eni",
  1135  								InternalDomainName:     "internal.example.com",
  1136  								Version:                "v3.3.17",
  1137  							},
  1138  							EC2Instance: api.EC2Instance{
  1139  								Count:        1,
  1140  								InstanceType: "t2.medium",
  1141  								RootVolume: api.RootVolume{
  1142  									Size: 30,
  1143  									Type: "gp2",
  1144  									IOPS: 0,
  1145  								},
  1146  								Tenancy: "default",
  1147  							},
  1148  							DataVolume: api.DataVolume{
  1149  								Size:      30,
  1150  								Type:      "gp2",
  1151  								IOPS:      0,
  1152  								Ephemeral: false,
  1153  							},
  1154  							Nodes: []api.EtcdNode{
  1155  								api.EtcdNode{
  1156  									Name: "etcd1a",
  1157  								},
  1158  								api.EtcdNode{
  1159  									Name: "etcd1b",
  1160  								},
  1161  								api.EtcdNode{
  1162  									Name: "etcd1c",
  1163  								},
  1164  							},
  1165  							Subnets: api.Subnets{
  1166  								subnet1,
  1167  							},
  1168  							UserSuppliedArgs: api.UserSuppliedArgs{
  1169  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
  1170  							},
  1171  						},
  1172  					}
  1173  					actual := c.EtcdSettings
  1174  					if diff := cmp.Diff(actual, expected); diff != "" {
  1175  						t.Errorf("EtcdSettings didn't match: %s", diff)
  1176  					}
  1177  
  1178  					if !actual.NodeShouldHaveSecondaryENI() {
  1179  						t.Errorf(
  1180  							"NodeShouldHaveSecondaryENI returned unexpected value: %v",
  1181  							actual.NodeShouldHaveSecondaryENI(),
  1182  						)
  1183  					}
  1184  				},
  1185  			},
  1186  			assertCluster: []ClusterTester{
  1187  				hasDefaultCluster,
  1188  			},
  1189  		},
  1190  		{
  1191  			context: "WithEtcdMemberIdentityProviderENIWithHostedZoneID",
  1192  			configYaml: minimalValidConfigYaml + `
  1193  etcd:
  1194    memberIdentityProvider: eni
  1195    internalDomainName: internal.example.com
  1196    hostedZone:
  1197      id: hostedzone-abcdefg
  1198    nodes:
  1199    - name: etcd1a
  1200    - name: etcd1b
  1201    - name: etcd1c
  1202  `,
  1203  			assertConfig: []ConfigTester{
  1204  				func(c *config.Config, t *testing.T) {
  1205  					subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
  1206  					subnet1.Name = "Subnet0"
  1207  					expected := api.EtcdSettings{
  1208  						Etcd: api.Etcd{
  1209  							Cluster: api.EtcdCluster{
  1210  								HostedZone:             api.Identifier{ID: "hostedzone-abcdefg"},
  1211  								MemberIdentityProvider: "eni",
  1212  								InternalDomainName:     "internal.example.com",
  1213  								Version:                "v3.3.17",
  1214  							},
  1215  							EC2Instance: api.EC2Instance{
  1216  								Count:        1,
  1217  								InstanceType: "t2.medium",
  1218  								RootVolume: api.RootVolume{
  1219  									Size: 30,
  1220  									Type: "gp2",
  1221  									IOPS: 0,
  1222  								},
  1223  								Tenancy: "default",
  1224  							},
  1225  							DataVolume: api.DataVolume{
  1226  								Size:      30,
  1227  								Type:      "gp2",
  1228  								IOPS:      0,
  1229  								Ephemeral: false,
  1230  							},
  1231  							Nodes: []api.EtcdNode{
  1232  								api.EtcdNode{
  1233  									Name: "etcd1a",
  1234  								},
  1235  								api.EtcdNode{
  1236  									Name: "etcd1b",
  1237  								},
  1238  								api.EtcdNode{
  1239  									Name: "etcd1c",
  1240  								},
  1241  							},
  1242  							Subnets: api.Subnets{
  1243  								subnet1,
  1244  							},
  1245  							UserSuppliedArgs: api.UserSuppliedArgs{
  1246  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
  1247  							},
  1248  						},
  1249  					}
  1250  					actual := c.EtcdSettings
  1251  					if diff := cmp.Diff(actual, expected); diff != "" {
  1252  						t.Errorf("EtcdSettings didn't match: %s", diff)
  1253  					}
  1254  
  1255  					if !actual.NodeShouldHaveSecondaryENI() {
  1256  						t.Errorf(
  1257  							"NodeShouldHaveSecondaryENI returned unexpected value: %v",
  1258  							actual.NodeShouldHaveSecondaryENI(),
  1259  						)
  1260  					}
  1261  				},
  1262  			},
  1263  			assertCluster: []ClusterTester{
  1264  				hasDefaultCluster,
  1265  			},
  1266  		},
  1267  		{
  1268  			context: "WithExperimentalFeatures",
  1269  			configYaml: minimalValidConfigYaml + `
  1270  experimental:
  1271    admission:
  1272      alwaysPullImages:
  1273        enabled: true
  1274    auditLog:
  1275      enabled: true
  1276      logPath: "/var/log/audit.log"
  1277      maxAge: 100
  1278      maxBackup: 10
  1279      maxSize: 5
  1280    authentication:
  1281      webhook:
  1282        enabled: true
  1283        cacheTTL: "1234s"
  1284        configBase64: "e30k"
  1285    awsEnvironment:
  1286      enabled: true
  1287      environment:
  1288        CFNSTACK: '{ "Ref" : "AWS::StackId" }'
  1289    awsNodeLabels:
  1290      enabled: true
  1291    ephemeralImageStorage:
  1292      enabled: true
  1293    gpuSupport:
  1294      enabled: true
  1295      version: "375.66"
  1296      installImage: "shelmangroup/coreos-nvidia-driver-installer:latest"
  1297    kubeletOpts: '--image-gc-low-threshold 60 --image-gc-high-threshold 70'
  1298    loadBalancer:
  1299      enabled: true
  1300      names:
  1301        - manuallymanagedlb
  1302      securityGroupIds:
  1303        - sg-12345678
  1304    targetGroup:
  1305      enabled: true
  1306      arns:
  1307        - arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx
  1308      securityGroupIds:
  1309        - sg-12345678
  1310    oidc:
  1311      enabled: true
  1312      oidc-issuer-url: "https://accounts.google.com"
  1313      oidc-client-id: "kubernetes"
  1314      oidc-username-claim: "email"
  1315      oidc-groups-claim: "groups"
  1316    nodeDrainer:
  1317      enabled: true
  1318      drainTimeout: 3
  1319  cloudWatchLogging:
  1320    enabled: true
  1321  amazonSsmAgent:
  1322    enabled: true
  1323  worker:
  1324    nodePools:
  1325    - name: pool1
  1326  `,
  1327  			assertConfig: []ConfigTester{
  1328  				hasDefaultEtcdSettings,
  1329  				asgBasedNodePoolHasWaitSignalEnabled,
  1330  				func(c *config.Config, t *testing.T) {
  1331  					expected := api.Experimental{
  1332  						Admission: api.Admission{
  1333  							AlwaysPullImages: api.AlwaysPullImages{
  1334  								Enabled: true,
  1335  							},
  1336  							EventRateLimit: api.EventRateLimit{
  1337  								Enabled: true,
  1338  								Limits: `- type: Namespace
  1339    qps: 250
  1340    burst: 500
  1341    cacheSize: 4096
  1342  - type: User
  1343    qps: 50
  1344    burst: 250`,
  1345  							},
  1346  						},
  1347  						AuditLog: api.AuditLog{
  1348  							Enabled:   true,
  1349  							LogPath:   "/var/log/audit.log",
  1350  							MaxAge:    100,
  1351  							MaxBackup: 10,
  1352  							MaxSize:   5,
  1353  						},
  1354  						Authentication: api.Authentication{
  1355  							Webhook: api.Webhook{
  1356  								Enabled:  true,
  1357  								CacheTTL: "1234s",
  1358  								Config:   "e30k",
  1359  							},
  1360  						},
  1361  						AwsEnvironment: api.AwsEnvironment{
  1362  							Enabled: true,
  1363  							Environment: map[string]string{
  1364  								"CFNSTACK": `{ "Ref" : "AWS::StackId" }`,
  1365  							},
  1366  						},
  1367  						AwsNodeLabels: api.AwsNodeLabels{
  1368  							Enabled: true,
  1369  						},
  1370  						EphemeralImageStorage: api.EphemeralImageStorage{
  1371  							Enabled:    true,
  1372  							Disk:       "xvdb",
  1373  							Filesystem: "xfs",
  1374  						},
  1375  						GpuSupport: api.GpuSupport{
  1376  							Enabled:      true,
  1377  							Version:      "375.66",
  1378  							InstallImage: "shelmangroup/coreos-nvidia-driver-installer:latest",
  1379  						},
  1380  						KubeletOpts: "--image-gc-low-threshold 60 --image-gc-high-threshold 70",
  1381  						LoadBalancer: api.LoadBalancer{
  1382  							Enabled:          true,
  1383  							Names:            []string{"manuallymanagedlb"},
  1384  							SecurityGroupIds: []string{"sg-12345678"},
  1385  						},
  1386  						TargetGroup: api.TargetGroup{
  1387  							Enabled:          true,
  1388  							Arns:             []string{"arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx"},
  1389  							SecurityGroupIds: []string{"sg-12345678"},
  1390  						},
  1391  						Oidc: api.Oidc{
  1392  							Enabled:       true,
  1393  							IssuerUrl:     "https://accounts.google.com",
  1394  							ClientId:      "kubernetes",
  1395  							UsernameClaim: "email",
  1396  							GroupsClaim:   "groups",
  1397  						},
  1398  						CloudControllerManager: api.CloudControllerManager{
  1399  							Enabled: false,
  1400  						},
  1401  						ContainerStorageInterface: api.ContainerStorageInterface{
  1402  							Enabled: false,
  1403  							CSIProvisioner: api.Image{
  1404  								Repo: "quay.io/k8scsi/csi-provisioner",
  1405  								Tag:  api.CSIDefaultProvisionerImageTag,
  1406  							},
  1407  							CSIAttacher: api.Image{
  1408  								Repo: "quay.io/k8scsi/csi-attacher",
  1409  								Tag:  api.CSIDefaultAttacherImageTag,
  1410  							},
  1411  							CSILivenessProbe: api.Image{
  1412  								Repo: "quay.io/k8scsi/livenessprobe",
  1413  								Tag:  api.CSIDefaultLivenessProbeImageTag,
  1414  							},
  1415  							CSINodeDriverRegistrar: api.Image{
  1416  								Repo: "quay.io/k8scsi/csi-node-driver-registrar",
  1417  								Tag:  api.CSIDefaultNodeDriverRegistrarTag,
  1418  							},
  1419  							AmazonEBSDriver: api.Image{
  1420  								Repo: "amazon/aws-ebs-csi-driver",
  1421  								Tag:  api.CSIDefaultAmazonEBSDriverImageTag,
  1422  							},
  1423  						},
  1424  						NodeDrainer: api.NodeDrainer{
  1425  							Enabled:      true,
  1426  							DrainTimeout: 3,
  1427  						},
  1428  					}
  1429  
  1430  					actual := c.Experimental
  1431  
  1432  					if !reflect.DeepEqual(expected, actual) {
  1433  						t.Errorf("experimental settings didn't match : expected=%+v actual=%+v", expected, actual)
  1434  					}
  1435  
  1436  					p := c.NodePools[0]
  1437  					if reflect.DeepEqual(expected, p.Experimental) {
  1438  						t.Errorf("experimental settings shouldn't be inherited to a node pool but it did : toplevel=%v nodepool=%v", expected, p.Experimental)
  1439  					}
  1440  
  1441  				},
  1442  			},
  1443  		},
  1444  		{
  1445  			context: "WithExperimentalFeaturesForWorkerNodePool",
  1446  			configYaml: minimalValidConfigYaml + `
  1447  worker:
  1448    nodePools:
  1449    - name: pool1
  1450      auditLog:
  1451        enabled: true
  1452        maxage: 100
  1453        logpath: "/var/log/audit.log"
  1454      awsEnvironment:
  1455        enabled: true
  1456        environment:
  1457          CFNSTACK: '{ "Ref" : "AWS::StackId" }'
  1458      awsNodeLabels:
  1459        enabled: true
  1460      ephemeralImageStorage:
  1461        enabled: true
  1462      loadBalancer:
  1463        enabled: true
  1464        names:
  1465          - manuallymanagedlb
  1466        securityGroupIds:
  1467          - sg-12345678
  1468      targetGroup:
  1469        enabled: true
  1470        arns:
  1471          - arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx
  1472        securityGroupIds:
  1473          - sg-12345678
  1474      # Ignored, uses global setting
  1475      nodeDrainer:
  1476        enabled: true
  1477        drainTimeout: 5
  1478      nodeLabels:
  1479        kube-aws.coreos.com/role: worker
  1480      taints:
  1481        - key: reservation
  1482          value: spot
  1483          effect: NoSchedule
  1484  `,
  1485  			assertConfig: []ConfigTester{
  1486  				hasDefaultEtcdSettings,
  1487  				asgBasedNodePoolHasWaitSignalEnabled,
  1488  				func(c *config.Config, t *testing.T) {
  1489  					expected := api.Experimental{
  1490  						AwsEnvironment: api.AwsEnvironment{
  1491  							Enabled: true,
  1492  							Environment: map[string]string{
  1493  								"CFNSTACK": `{ "Ref" : "AWS::StackId" }`,
  1494  							},
  1495  						},
  1496  						AwsNodeLabels: api.AwsNodeLabels{
  1497  							Enabled: true,
  1498  						},
  1499  						EphemeralImageStorage: api.EphemeralImageStorage{
  1500  							Enabled:    true,
  1501  							Disk:       "xvdb",
  1502  							Filesystem: "xfs",
  1503  						},
  1504  						LoadBalancer: api.LoadBalancer{
  1505  							Enabled:          true,
  1506  							Names:            []string{"manuallymanagedlb"},
  1507  							SecurityGroupIds: []string{"sg-12345678"},
  1508  						},
  1509  						TargetGroup: api.TargetGroup{
  1510  							Enabled:          true,
  1511  							Arns:             []string{"arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx"},
  1512  							SecurityGroupIds: []string{"sg-12345678"},
  1513  						},
  1514  						NodeDrainer: api.NodeDrainer{
  1515  							Enabled:      false,
  1516  							DrainTimeout: 0,
  1517  						},
  1518  					}
  1519  					p := c.NodePools[0]
  1520  					if reflect.DeepEqual(expected, p.Experimental) {
  1521  						t.Errorf("experimental settings for node pool didn't match : expected=%v actual=%v", expected, p.Experimental)
  1522  					}
  1523  
  1524  					expectedNodeLabels := api.NodeLabels{
  1525  						"kube-aws.coreos.com/role": "worker",
  1526  					}
  1527  					actualNodeLabels := c.NodePools[0].NodeLabels()
  1528  					if !reflect.DeepEqual(expectedNodeLabels, actualNodeLabels) {
  1529  						t.Errorf("worker node labels didn't match: expected=%v, actual=%v", expectedNodeLabels, actualNodeLabels)
  1530  					}
  1531  
  1532  					expectedTaints := api.Taints{
  1533  						{Key: "reservation", Value: "spot", Effect: "NoSchedule"},
  1534  					}
  1535  					actualTaints := c.NodePools[0].Taints
  1536  					if !reflect.DeepEqual(expectedTaints, actualTaints) {
  1537  						t.Errorf("worker node taints didn't match: expected=%v, actual=%v", expectedTaints, actualTaints)
  1538  					}
  1539  				},
  1540  			},
  1541  		},
  1542  		{
  1543  			context:    "WithControllerIAMDefaultManageExternally",
  1544  			configYaml: minimalValidConfigYaml,
  1545  			assertConfig: []ConfigTester{
  1546  				func(c *config.Config, t *testing.T) {
  1547  					expectedValue := false
  1548  
  1549  					if c.Controller.IAMConfig.Role.ManageExternally != expectedValue {
  1550  						t.Errorf("controller's iam.role.manageExternally didn't match : expected=%v actual=%v", expectedValue, c.Controller.IAMConfig.Role.ManageExternally)
  1551  					}
  1552  				},
  1553  			},
  1554  		},
  1555  		{
  1556  			context: "WithControllerIAMEnabledManageExternally",
  1557  			configYaml: minimalValidConfigYaml + `
  1558  controller:
  1559    iam:
  1560     role:
  1561       name: myrole1
  1562       manageExternally: true
  1563  `,
  1564  			assertConfig: []ConfigTester{
  1565  				func(c *config.Config, t *testing.T) {
  1566  					expectedManageExternally := true
  1567  					expectedRoleName := "myrole1"
  1568  
  1569  					if expectedRoleName != c.Controller.IAMConfig.Role.Name {
  1570  						t.Errorf("controller's iam.role.name didn't match : expected=%v actual=%v", expectedRoleName, c.Controller.IAMConfig.Role.Name)
  1571  					}
  1572  
  1573  					if expectedManageExternally != c.Controller.IAMConfig.Role.ManageExternally {
  1574  						t.Errorf("controller's iam.role.manageExternally didn't matchg : expected=%v actual=%v", expectedManageExternally, c.Controller.IAMConfig.Role.ManageExternally)
  1575  					}
  1576  				},
  1577  			},
  1578  		},
  1579  		{
  1580  			context: "WithControllerIAMEnabledStrictName",
  1581  			configYaml: minimalValidConfigYaml + `
  1582  controller:
  1583    iam:
  1584     role:
  1585       name: myrole1
  1586       strictName: true
  1587  `,
  1588  			assertConfig: []ConfigTester{
  1589  				func(c *config.Config, t *testing.T) {
  1590  					expectedRoleName := "myrole1"
  1591  					if expectedRoleName != c.Controller.IAMConfig.Role.Name {
  1592  						t.Errorf("controller's iam.role.name didn't match : expected=%v actual=%v", expectedRoleName, c.Controller.IAMConfig.Role.Name)
  1593  					}
  1594  				},
  1595  			},
  1596  		},
  1597  		{
  1598  			context: "WithWaitSignalDisabled",
  1599  			configYaml: minimalValidConfigYaml + `
  1600  waitSignal:
  1601    enabled: false
  1602  `,
  1603  			assertConfig: []ConfigTester{
  1604  				hasDefaultEtcdSettings,
  1605  				func(c *config.Config, t *testing.T) {
  1606  					if c.WaitSignal.Enabled() {
  1607  						t.Errorf("waitSignal should be disabled but was not: %v", c.WaitSignal)
  1608  					}
  1609  				},
  1610  			},
  1611  		},
  1612  		{
  1613  			context: "WithWaitSignalEnabled",
  1614  			configYaml: minimalValidConfigYaml + `
  1615  waitSignal:
  1616    enabled: true
  1617  `,
  1618  			assertConfig: []ConfigTester{
  1619  				hasDefaultEtcdSettings,
  1620  				func(c *config.Config, t *testing.T) {
  1621  					if !c.WaitSignal.Enabled() {
  1622  						t.Errorf("waitSignal should be enabled but was not: %v", c.WaitSignal)
  1623  					}
  1624  				},
  1625  			},
  1626  		},
  1627  		{
  1628  			context: "WithNodePoolWithWaitSignalDisabled",
  1629  			configYaml: minimalValidConfigYaml + `
  1630  worker:
  1631    nodePools:
  1632    - name: pool1
  1633      waitSignal:
  1634        enabled: false
  1635    - name: pool2
  1636      waitSignal:
  1637        enabled: false
  1638        maxBatchSize: 2
  1639  `,
  1640  			assertConfig: []ConfigTester{
  1641  				hasDefaultEtcdSettings,
  1642  				func(c *config.Config, t *testing.T) {
  1643  					if c.NodePools[0].WaitSignal.Enabled() {
  1644  						t.Errorf("waitSignal should be disabled for node pool at index %d but was not", 0)
  1645  					}
  1646  					if c.NodePools[1].WaitSignal.Enabled() {
  1647  						t.Errorf("waitSignal should be disabled for node pool at index %d but was not", 1)
  1648  					}
  1649  				},
  1650  			},
  1651  		},
  1652  		{
  1653  			context: "WithNodePoolWithWaitSignalEnabled",
  1654  			configYaml: minimalValidConfigYaml + `
  1655  worker:
  1656    nodePools:
  1657    - name: pool1
  1658      waitSignal:
  1659        enabled: true
  1660    - name: pool2
  1661      waitSignal:
  1662        enabled: true
  1663        maxBatchSize: 2
  1664  `,
  1665  			assertConfig: []ConfigTester{
  1666  				hasDefaultEtcdSettings,
  1667  				func(c *config.Config, t *testing.T) {
  1668  					if !c.NodePools[0].WaitSignal.Enabled() {
  1669  						t.Errorf("waitSignal should be enabled for node pool at index %d but was not", 0)
  1670  					}
  1671  					if c.NodePools[0].WaitSignal.MaxBatchSize(1) != 1 {
  1672  						t.Errorf("waitSignal.maxBatchSize should be 1 for node pool at index %d but was %d", 0, c.NodePools[0].WaitSignal.MaxBatchSize(1))
  1673  					}
  1674  					if !c.NodePools[1].WaitSignal.Enabled() {
  1675  						t.Errorf("waitSignal should be enabled for node pool at index %d but was not", 1)
  1676  					}
  1677  					if c.NodePools[1].WaitSignal.MaxBatchSize(1) != 2 {
  1678  						t.Errorf("waitSignal.maxBatchSize should be 2 for node pool at index %d but was %d", 1, c.NodePools[1].WaitSignal.MaxBatchSize(1))
  1679  					}
  1680  				},
  1681  			},
  1682  		},
  1683  		{
  1684  			context: "WithDefaultNodePoolRollingStrategy",
  1685  			configYaml: minimalValidConfigYaml + `
  1686  worker:
  1687    nodePools:
  1688    - name: pool1
  1689  `,
  1690  			assertConfig: []ConfigTester{
  1691  				hasDefaultNodePoolRollingStrategy,
  1692  			},
  1693  		},
  1694  		{
  1695  			context: "WithSpecificNodePoolRollingStrategy",
  1696  			configYaml: minimalValidConfigYaml + `
  1697  worker:
  1698    nodePools:
  1699    - name: pool1
  1700      nodePoolRollingStrategy: Sequential`,
  1701  			assertConfig: []ConfigTester{
  1702  				hasSpecificNodePoolRollingStrategy("Sequential"),
  1703  			},
  1704  		},
  1705  		{
  1706  			context: "WithSpecificWorkerRollingStrategy",
  1707  			configYaml: minimalValidConfigYaml + `
  1708  worker:
  1709    nodePoolRollingStrategy: Sequential
  1710    nodePools:
  1711    - name: pool1`,
  1712  			assertConfig: []ConfigTester{
  1713  				hasSpecificNodePoolRollingStrategy("Sequential"),
  1714  			},
  1715  		},
  1716  		{
  1717  			context: "WithWorkerAndNodePoolStrategy",
  1718  			configYaml: minimalValidConfigYaml + `
  1719  worker:
  1720    nodePoolRollingStrategy: Sequential
  1721    nodePools:
  1722    - name: pool1
  1723    - name: pool2
  1724      nodePoolRollingStrategy: Parallel
  1725  `,
  1726  			assertConfig: []ConfigTester{
  1727  				hasWorkerAndNodePoolStrategy("Sequential", "Parallel"),
  1728  			},
  1729  		},
  1730  		{
  1731  			context:    "WithMinimalValidConfig",
  1732  			configYaml: minimalValidConfigYaml,
  1733  			assertConfig: []ConfigTester{
  1734  				hasDefaultEtcdSettings,
  1735  				hasDefaultExperimentalFeatures,
  1736  			},
  1737  		},
  1738  		{
  1739  			context: "WithVaryingWorkerCountPerNodePool",
  1740  			configYaml: minimalValidConfigYaml + `
  1741  worker:
  1742    nodePools:
  1743    - name: pool1
  1744    - name: pool2
  1745      count: 2
  1746    - name: pool3
  1747      count: 0
  1748  `,
  1749  			assertConfig: []ConfigTester{
  1750  				hasDefaultEtcdSettings,
  1751  				hasDefaultExperimentalFeatures,
  1752  				func(c *config.Config, t *testing.T) {
  1753  					if c.NodePools[0].Count != 1 {
  1754  						t.Errorf("default worker count should be 1 but was: %d", c.NodePools[0].Count)
  1755  					}
  1756  					if c.NodePools[1].Count != 2 {
  1757  						t.Errorf("worker count should be set to 2 but was: %d", c.NodePools[1].Count)
  1758  					}
  1759  					if c.NodePools[2].Count != 0 {
  1760  						t.Errorf("worker count should be be set to 0 but was: %d", c.NodePools[2].Count)
  1761  					}
  1762  				},
  1763  			},
  1764  		},
  1765  		{
  1766  			context: "WithVaryingWorkerASGSizePerNodePool",
  1767  			configYaml: minimalValidConfigYaml + `
  1768  worker:
  1769    nodePools:
  1770    - name: pool1
  1771    - name: pool2
  1772      count: 2
  1773    - name: pool3
  1774      autoScalingGroup:
  1775        minSize: 0
  1776        maxSize: 10
  1777  `,
  1778  			assertConfig: []ConfigTester{
  1779  				hasDefaultEtcdSettings,
  1780  				hasDefaultExperimentalFeatures,
  1781  				func(c *config.Config, t *testing.T) {
  1782  					if c.NodePools[0].MaxCount() != 1 {
  1783  						t.Errorf("worker max count should be 1 but was: %d", c.NodePools[0].MaxCount())
  1784  					}
  1785  					if c.NodePools[0].MinCount() != 1 {
  1786  						t.Errorf("worker min count should be 1 but was: %d", c.NodePools[0].MinCount())
  1787  					}
  1788  					if c.NodePools[1].MaxCount() != 2 {
  1789  						t.Errorf("worker max count should be 2 but was: %d", c.NodePools[1].MaxCount())
  1790  					}
  1791  					if c.NodePools[1].MinCount() != 2 {
  1792  						t.Errorf("worker min count should be 2 but was: %d", c.NodePools[1].MinCount())
  1793  					}
  1794  					if c.NodePools[2].MaxCount() != 10 {
  1795  						t.Errorf("worker max count should be 10 but was: %d", c.NodePools[2].MaxCount())
  1796  					}
  1797  					if c.NodePools[2].MinCount() != 0 {
  1798  						t.Errorf("worker min count should be 0 but was: %d", c.NodePools[2].MinCount())
  1799  					}
  1800  				},
  1801  			},
  1802  		},
  1803  		{
  1804  			context: "WithMultiAPIEndpoints",
  1805  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  1806  vpc:
  1807    id: vpc-1a2b3c4d
  1808  internetGateway:
  1809    id: igw-1a2b3c4d
  1810  
  1811  subnets:
  1812  - name: privateSubnet1
  1813    availabilityZone: us-west-1a
  1814    instanceCIDR: "10.0.1.0/24"
  1815    private: true
  1816  - name: privateSubnet2
  1817    availabilityZone: us-west-1b
  1818    instanceCIDR: "10.0.2.0/24"
  1819    private: true
  1820  - name: publicSubnet1
  1821    availabilityZone: us-west-1a
  1822    instanceCIDR: "10.0.3.0/24"
  1823  - name: publicSubnet2
  1824    availabilityZone: us-west-1b
  1825    instanceCIDR: "10.0.4.0/24"
  1826  
  1827  worker:
  1828    # cant be possibly "unversioned" one w/ existing elb because doing so would result in a worker kubelet has chances to
  1829    # connect to multiple masters from different clusters!
  1830    apiEndpointName: versionedPrivate
  1831    # btw apiEndpointName can be defaulted to a private/public managed(hence unstable/possibly versioned but not stable/unversioned)
  1832    # elb/round-robin if and only if there is only one. However we dont do the complex defaulting like that for now.
  1833  
  1834  adminAPIEndpointName: versionedPublic
  1835  
  1836  apiEndpoints:
  1837  - name: unversionedPublic
  1838    dnsName: api.example.com
  1839    loadBalancer:
  1840      id: elb-internet-facing
  1841      ##you cant configure existing elb like below
  1842      #private: true
  1843      #subnets:
  1844      #- name: privateSubnet1
  1845      ##hostedZone must be omitted when elb id is specified.
  1846      ##in other words, it your responsibility to create an alias record for the elb
  1847      #hostedZone:
  1848      #  id: hostedzone-private
  1849  - name: unversionedPrivate
  1850    dnsName: api.internal.example.com
  1851    loadBalancer:
  1852      id: elb-internal
  1853  - name: versionedPublic
  1854    dnsName: v1api.example.com
  1855    loadBalancer:
  1856      subnets:
  1857      - name: publicSubnet1
  1858      hostedZone:
  1859        id: hostedzone-public
  1860  - name: versionedPrivate
  1861    dnsName: v1api.internal.example.com
  1862    loadBalancer:
  1863      private: true
  1864      subnets:
  1865      - name: privateSubnet1
  1866      hostedZone:
  1867        id: hostedzone-private
  1868  - name: versionedPublicAlt
  1869    dnsName: v1apialt.example.com
  1870    loadBalancer:
  1871      # "private: false" implies all the private subnets defined in the top-level "subnets"
  1872      #subnets:
  1873      #- name: publicSubnet1
  1874      #- name: publicSubnet2
  1875      hostedZone:
  1876        id: hostedzone-public
  1877  - name: versionedPrivateAlt
  1878    dnsName: v1apialt.internal.example.com
  1879    loadBalancer:
  1880      private: true
  1881      # "private: true" implies all the private subnets defined in the top-level "subnets"
  1882      #subnets:
  1883      #- name: privateSubnet1
  1884      #- name: privateSubnet2
  1885      hostedZone:
  1886        id: hostedzone-private
  1887  - name: addedToCertCommonNames
  1888    dnsName: api-alt.example.com
  1889    loadBalancer:
  1890      managed: false
  1891  - name: elbOnly
  1892    dnsName: registerme.example.com
  1893    loadBalancer:
  1894      recordSetManaged: false
  1895  `,
  1896  			assertCluster: []ClusterTester{
  1897  				func(rootCluster *root.Cluster, t *testing.T) {
  1898  					c := rootCluster.ControlPlane().Config
  1899  
  1900  					private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
  1901  					private1.Name = "privateSubnet1"
  1902  
  1903  					private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
  1904  					private2.Name = "privateSubnet2"
  1905  
  1906  					public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
  1907  					public1.Name = "publicSubnet1"
  1908  
  1909  					public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
  1910  					public2.Name = "publicSubnet2"
  1911  					//private1 := api.NewPrivateSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PrivateSubnet1"}}`)
  1912  					//private1.Name = "privateSubnet1"
  1913  					//
  1914  					//private2 := api.NewPrivateSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PrivateSubnet2"}}`)
  1915  					//private2.Name = "privateSubnet2"
  1916  					//
  1917  					//public1 := api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PublicSubnet1"}}`)
  1918  					//public1.Name = "publicSubnet1"
  1919  					//
  1920  					//public2 := api.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PublicSubnet2"}}`)
  1921  					//public2.Name = "publicSubnet2"
  1922  
  1923  					subnets := api.Subnets{
  1924  						private1,
  1925  						private2,
  1926  						public1,
  1927  						public2,
  1928  					}
  1929  					if !reflect.DeepEqual(c.AllSubnets(), subnets) {
  1930  						t.Errorf("Managed subnets didn't match: expected=%+v actual=%+v", subnets, c.AllSubnets())
  1931  					}
  1932  
  1933  					publicSubnets := api.Subnets{
  1934  						public1,
  1935  						public2,
  1936  					}
  1937  
  1938  					privateSubnets := api.Subnets{
  1939  						private1,
  1940  						private2,
  1941  					}
  1942  
  1943  					unversionedPublic := c.APIEndpoints["unversionedPublic"]
  1944  					unversionedPrivate := c.APIEndpoints["unversionedPrivate"]
  1945  					versionedPublic := c.APIEndpoints["versionedPublic"]
  1946  					versionedPrivate := c.APIEndpoints["versionedPrivate"]
  1947  					versionedPublicAlt := c.APIEndpoints["versionedPublicAlt"]
  1948  					versionedPrivateAlt := c.APIEndpoints["versionedPrivateAlt"]
  1949  					addedToCertCommonNames := c.APIEndpoints["addedToCertCommonNames"]
  1950  					elbOnly := c.APIEndpoints["elbOnly"]
  1951  
  1952  					if len(unversionedPublic.LoadBalancer.Subnets) != 0 {
  1953  						t.Errorf("unversionedPublic: subnets shuold be empty but was not: actual=%+v", unversionedPublic.LoadBalancer.Subnets)
  1954  					}
  1955  					if !unversionedPublic.LoadBalancer.Enabled() {
  1956  						t.Errorf("unversionedPublic: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", unversionedPublic.LoadBalancer)
  1957  					}
  1958  
  1959  					if len(unversionedPrivate.LoadBalancer.Subnets) != 0 {
  1960  						t.Errorf("unversionedPrivate: subnets shuold be empty but was not: actual=%+v", unversionedPrivate.LoadBalancer.Subnets)
  1961  					}
  1962  					if !unversionedPrivate.LoadBalancer.Enabled() {
  1963  						t.Errorf("unversionedPrivate: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", unversionedPrivate.LoadBalancer)
  1964  					}
  1965  
  1966  					if diff := cmp.Diff(versionedPublic.LoadBalancer.Subnets, api.Subnets{public1}); diff != "" {
  1967  						t.Errorf("versionedPublic: subnets didn't match: %s", diff)
  1968  					}
  1969  					if !versionedPublic.LoadBalancer.Enabled() {
  1970  						t.Errorf("versionedPublic: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPublic.LoadBalancer)
  1971  					}
  1972  
  1973  					if diff := cmp.Diff(versionedPrivate.LoadBalancer.Subnets, api.Subnets{private1}); diff != "" {
  1974  						t.Errorf("versionedPrivate: subnets didn't match: %s", diff)
  1975  					}
  1976  					if !versionedPrivate.LoadBalancer.Enabled() {
  1977  						t.Errorf("versionedPrivate: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPrivate.LoadBalancer)
  1978  					}
  1979  
  1980  					if diff := cmp.Diff(versionedPublicAlt.LoadBalancer.Subnets, publicSubnets); diff != "" {
  1981  						t.Errorf("versionedPublicAlt: subnets didn't match: %s", diff)
  1982  					}
  1983  					if !versionedPublicAlt.LoadBalancer.Enabled() {
  1984  						t.Errorf("versionedPublicAlt: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPublicAlt.LoadBalancer)
  1985  					}
  1986  
  1987  					if diff := cmp.Diff(versionedPrivateAlt.LoadBalancer.Subnets, privateSubnets); diff != "" {
  1988  						t.Errorf("versionedPrivateAlt: subnets didn't match: %s", diff)
  1989  					}
  1990  					if !versionedPrivateAlt.LoadBalancer.Enabled() {
  1991  						t.Errorf("versionedPrivateAlt: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPrivateAlt.LoadBalancer)
  1992  					}
  1993  
  1994  					if len(addedToCertCommonNames.LoadBalancer.Subnets) != 0 {
  1995  						t.Errorf("addedToCertCommonNames: subnets should be empty but was not: actual=%+v", addedToCertCommonNames.LoadBalancer.Subnets)
  1996  					}
  1997  					if addedToCertCommonNames.LoadBalancer.Enabled() {
  1998  						t.Errorf("addedToCertCommonNames: it should not be enabled as the lb to which controller nodes are added, but it was: loadBalancer=%+v", addedToCertCommonNames.LoadBalancer)
  1999  					}
  2000  
  2001  					if diff := cmp.Diff(elbOnly.LoadBalancer.Subnets, publicSubnets); diff != "" {
  2002  						t.Errorf("elbOnly: subnets didn't match: %s", diff)
  2003  					}
  2004  					if !elbOnly.LoadBalancer.Enabled() {
  2005  						t.Errorf("elbOnly: it should be enabled but it was not: loadBalancer=%+v", elbOnly.LoadBalancer)
  2006  					}
  2007  					if elbOnly.LoadBalancer.ManageELBRecordSet() {
  2008  						t.Errorf("elbOnly: record set should not be managed but it was: loadBalancer=%+v", elbOnly.LoadBalancer)
  2009  					}
  2010  
  2011  					if diff := cmp.Diff(c.ExternalDNSNames(), []string{"api-alt.example.com", "api.example.com", "api.internal.example.com", "registerme.example.com", "v1api.example.com", "v1api.internal.example.com", "v1apialt.example.com", "v1apialt.internal.example.com"}); diff != "" {
  2012  						t.Errorf("unexpected external DNS names: %s", diff)
  2013  					}
  2014  
  2015  					if !reflect.DeepEqual(c.APIEndpoints.ManagedELBLogicalNames(), []string{"APIEndpointElbOnlyELB", "APIEndpointVersionedPrivateAltELB", "APIEndpointVersionedPrivateELB", "APIEndpointVersionedPublicAltELB", "APIEndpointVersionedPublicELB"}) {
  2016  						t.Errorf("unexpected managed ELB logical names: %s", strings.Join(c.APIEndpoints.ManagedELBLogicalNames(), ", "))
  2017  					}
  2018  				},
  2019  			},
  2020  		},
  2021  		{
  2022  			context: "WithNetworkTopologyExplicitSubnets",
  2023  			configYaml: mainClusterYaml + `
  2024  vpc:
  2025    id: vpc-1a2b3c4d
  2026  internetGateway:
  2027    id: igw-1a2b3c4d
  2028  # routeTableId must be omitted
  2029  # See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
  2030  # routeTableId: rtb-1a2b3c4d
  2031  subnets:
  2032  - name: private1
  2033    availabilityZone: us-west-1a
  2034    instanceCIDR: "10.0.1.0/24"
  2035    private: true
  2036  - name: private2
  2037    availabilityZone: us-west-1b
  2038    instanceCIDR: "10.0.2.0/24"
  2039    private: true
  2040  - name: public1
  2041    availabilityZone: us-west-1a
  2042    instanceCIDR: "10.0.3.0/24"
  2043  - name: public2
  2044    availabilityZone: us-west-1b
  2045    instanceCIDR: "10.0.4.0/24"
  2046  controller:
  2047    subnets:
  2048    - name: private1
  2049    - name: private2
  2050    loadBalancer:
  2051      subnets:
  2052      - name: public1
  2053      - name: public2
  2054      private: false
  2055  etcd:
  2056    subnets:
  2057    - name: private1
  2058    - name: private2
  2059  worker:
  2060    nodePools:
  2061    - name: pool1
  2062      subnets:
  2063      - name: public1
  2064    - name: pool2
  2065      subnets:
  2066      - name: public2
  2067  `,
  2068  			assertConfig: []ConfigTester{
  2069  				hasDefaultExperimentalFeatures,
  2070  				everyPublicSubnetHasRouteToIGW,
  2071  				hasTwoManagedNGWsAndEIPs,
  2072  				func(c *config.Config, t *testing.T) {
  2073  					private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
  2074  					private1.Name = "private1"
  2075  
  2076  					private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
  2077  					private2.Name = "private2"
  2078  
  2079  					public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
  2080  					public1.Name = "public1"
  2081  
  2082  					public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
  2083  					public2.Name = "public2"
  2084  
  2085  					subnets := api.Subnets{
  2086  						private1,
  2087  						private2,
  2088  						public1,
  2089  						public2,
  2090  					}
  2091  					if !reflect.DeepEqual(c.AllSubnets(), subnets) {
  2092  						t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
  2093  					}
  2094  
  2095  					publicSubnets := api.Subnets{
  2096  						public1,
  2097  						public2,
  2098  					}
  2099  					importedPublicSubnets := api.Subnets{
  2100  						api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
  2101  					}
  2102  
  2103  					p := c.NodePools[0]
  2104  					if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
  2105  						t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
  2106  					}
  2107  
  2108  					privateSubnets := api.Subnets{
  2109  						private1,
  2110  						private2,
  2111  					}
  2112  					if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) {
  2113  						t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
  2114  					}
  2115  					if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
  2116  						t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets)
  2117  					}
  2118  					if diff := cmp.Diff(c.Etcd.Subnets, privateSubnets); diff != "" {
  2119  						t.Errorf("Etcd subnets didn't match: %s", diff)
  2120  					}
  2121  
  2122  					for i, s := range c.PrivateSubnets() {
  2123  						if !s.ManageNATGateway() {
  2124  							t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
  2125  						}
  2126  
  2127  						if s.ManageRouteToInternet() {
  2128  							t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
  2129  						}
  2130  					}
  2131  				},
  2132  			},
  2133  		},
  2134  		{
  2135  			context: "WithNetworkTopologyImplicitSubnets",
  2136  			configYaml: mainClusterYaml + `
  2137  vpc:
  2138    id: vpc-1a2b3c4d
  2139  internetGateway:
  2140    id: igw-1a2b3c4d
  2141  # routeTableId must be omitted
  2142  # See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
  2143  # routeTableId: rtb-1a2b3c4d
  2144  subnets:
  2145  - name: private1
  2146    availabilityZone: us-west-1a
  2147    instanceCIDR: "10.0.1.0/24"
  2148    private: true
  2149  - name: private2
  2150    availabilityZone: us-west-1b
  2151    instanceCIDR: "10.0.2.0/24"
  2152    private: true
  2153  - name: public1
  2154    availabilityZone: us-west-1a
  2155    instanceCIDR: "10.0.3.0/24"
  2156  - name: public2
  2157    availabilityZone: us-west-1b
  2158    instanceCIDR: "10.0.4.0/24"
  2159  `,
  2160  			assertConfig: []ConfigTester{
  2161  				hasDefaultExperimentalFeatures,
  2162  				everyPublicSubnetHasRouteToIGW,
  2163  				hasTwoManagedNGWsAndEIPs,
  2164  				func(c *config.Config, t *testing.T) {
  2165  					private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
  2166  					private1.Name = "private1"
  2167  
  2168  					private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
  2169  					private2.Name = "private2"
  2170  
  2171  					public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
  2172  					public1.Name = "public1"
  2173  
  2174  					public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
  2175  					public2.Name = "public2"
  2176  
  2177  					subnets := api.Subnets{
  2178  						private1,
  2179  						private2,
  2180  						public1,
  2181  						public2,
  2182  					}
  2183  					if !reflect.DeepEqual(c.AllSubnets(), subnets) {
  2184  						t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
  2185  					}
  2186  
  2187  					publicSubnets := api.Subnets{
  2188  						public1,
  2189  						public2,
  2190  					}
  2191  
  2192  					if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
  2193  						t.Errorf("Controller subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.Subnets)
  2194  					}
  2195  					if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
  2196  						t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets)
  2197  					}
  2198  					if !reflect.DeepEqual(c.Etcd.Subnets, publicSubnets) {
  2199  						t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", publicSubnets, c.Etcd.Subnets)
  2200  					}
  2201  
  2202  					for i, s := range c.PrivateSubnets() {
  2203  						if !s.ManageNATGateway() {
  2204  							t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
  2205  						}
  2206  
  2207  						if s.ManageRouteToInternet() {
  2208  							t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
  2209  						}
  2210  					}
  2211  				},
  2212  			},
  2213  		},
  2214  		{
  2215  			context: "WithNetworkTopologyControllerPrivateLB",
  2216  			configYaml: mainClusterYaml + `
  2217  vpc:
  2218    id: vpc-1a2b3c4d
  2219  internetGateway:
  2220    id: igw-1a2b3c4d
  2221  # routeTableId must be omitted
  2222  # See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
  2223  # routeTableId: rtb-1a2b3c4d
  2224  subnets:
  2225  - name: private1
  2226    availabilityZone: us-west-1a
  2227    instanceCIDR: "10.0.1.0/24"
  2228    private: true
  2229  - name: private2
  2230    availabilityZone: us-west-1b
  2231    instanceCIDR: "10.0.2.0/24"
  2232    private: true
  2233  - name: public1
  2234    availabilityZone: us-west-1a
  2235    instanceCIDR: "10.0.3.0/24"
  2236  - name: public2
  2237    availabilityZone: us-west-1b
  2238    instanceCIDR: "10.0.4.0/24"
  2239  controller:
  2240    subnets:
  2241    - name: private1
  2242    - name: private2
  2243    loadBalancer:
  2244      private: true
  2245  etcd:
  2246    subnets:
  2247    - name: private1
  2248    - name: private2
  2249  worker:
  2250    nodePools:
  2251    - name: pool1
  2252      subnets:
  2253      - name: public1
  2254    - name: pool2
  2255      subnets:
  2256      - name: public2
  2257  `,
  2258  			assertConfig: []ConfigTester{
  2259  				hasDefaultExperimentalFeatures,
  2260  				everyPublicSubnetHasRouteToIGW,
  2261  				hasTwoManagedNGWsAndEIPs,
  2262  				func(c *config.Config, t *testing.T) {
  2263  					private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
  2264  					private1.Name = "private1"
  2265  
  2266  					private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
  2267  					private2.Name = "private2"
  2268  
  2269  					public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
  2270  					public1.Name = "public1"
  2271  
  2272  					public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
  2273  					public2.Name = "public2"
  2274  
  2275  					subnets := api.Subnets{
  2276  						private1,
  2277  						private2,
  2278  						public1,
  2279  						public2,
  2280  					}
  2281  					if !reflect.DeepEqual(c.AllSubnets(), subnets) {
  2282  						t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
  2283  					}
  2284  
  2285  					importedPublicSubnets := api.Subnets{
  2286  						api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
  2287  					}
  2288  					p := c.NodePools[0]
  2289  					if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
  2290  						t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
  2291  					}
  2292  
  2293  					privateSubnets := api.Subnets{
  2294  						private1,
  2295  						private2,
  2296  					}
  2297  					if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) {
  2298  						t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
  2299  					}
  2300  					if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, privateSubnets) {
  2301  						t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
  2302  					}
  2303  					if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
  2304  						t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
  2305  					}
  2306  
  2307  					for i, s := range c.PrivateSubnets() {
  2308  						if !s.ManageNATGateway() {
  2309  							t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
  2310  						}
  2311  
  2312  						if s.ManageRouteToInternet() {
  2313  							t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
  2314  						}
  2315  					}
  2316  				},
  2317  			},
  2318  		},
  2319  		{
  2320  			context: "WithNetworkTopologyControllerPublicLB",
  2321  			configYaml: mainClusterYaml + `
  2322  vpc:
  2323    id: vpc-1a2b3c4d
  2324  internetGateway:
  2325    id: igw-1a2b3c4d
  2326  # routeTableId must be omitted
  2327  # See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
  2328  # routeTableId: rtb-1a2b3c4d
  2329  subnets:
  2330  - name: private1
  2331    availabilityZone: us-west-1a
  2332    instanceCIDR: "10.0.1.0/24"
  2333    private: true
  2334  - name: private2
  2335    availabilityZone: us-west-1b
  2336    instanceCIDR: "10.0.2.0/24"
  2337    private: true
  2338  - name: public1
  2339    availabilityZone: us-west-1a
  2340    instanceCIDR: "10.0.3.0/24"
  2341  - name: public2
  2342    availabilityZone: us-west-1b
  2343    instanceCIDR: "10.0.4.0/24"
  2344  controller:
  2345    loadBalancer:
  2346      private: false
  2347  etcd:
  2348    subnets:
  2349    - name: private1
  2350    - name: private2
  2351  worker:
  2352    nodePools:
  2353    - name: pool1
  2354      subnets:
  2355      - name: public1
  2356  `,
  2357  			assertConfig: []ConfigTester{
  2358  				hasDefaultExperimentalFeatures,
  2359  				everyPublicSubnetHasRouteToIGW,
  2360  				hasTwoManagedNGWsAndEIPs,
  2361  				func(c *config.Config, t *testing.T) {
  2362  					private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
  2363  					private1.Name = "private1"
  2364  
  2365  					private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
  2366  					private2.Name = "private2"
  2367  
  2368  					public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
  2369  					public1.Name = "public1"
  2370  
  2371  					public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
  2372  					public2.Name = "public2"
  2373  
  2374  					subnets := api.Subnets{
  2375  						private1,
  2376  						private2,
  2377  						public1,
  2378  						public2,
  2379  					}
  2380  					publicSubnets := api.Subnets{
  2381  						public1,
  2382  						public2,
  2383  					}
  2384  					privateSubnets := api.Subnets{
  2385  						private1,
  2386  						private2,
  2387  					}
  2388  					importedPublicSubnets := api.Subnets{
  2389  						api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
  2390  					}
  2391  
  2392  					if !reflect.DeepEqual(c.AllSubnets(), subnets) {
  2393  						t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
  2394  					}
  2395  					p := c.NodePools[0]
  2396  					if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
  2397  						t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
  2398  					}
  2399  					if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
  2400  						t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
  2401  					}
  2402  					if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
  2403  						t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
  2404  					}
  2405  					if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
  2406  						t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
  2407  					}
  2408  
  2409  					for i, s := range c.PrivateSubnets() {
  2410  						if !s.ManageNATGateway() {
  2411  							t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
  2412  						}
  2413  
  2414  						if s.ManageRouteToInternet() {
  2415  							t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
  2416  						}
  2417  					}
  2418  				},
  2419  			},
  2420  		},
  2421  		{
  2422  			context: "WithNetworkTopologyExistingVaryingSubnets",
  2423  			configYaml: mainClusterYaml + `
  2424  vpc:
  2425    id: vpc-1a2b3c4d
  2426  subnets:
  2427  - name: private1
  2428    availabilityZone: us-west-1a
  2429    id: subnet-1
  2430    private: true
  2431  - name: private2
  2432    availabilityZone: us-west-1b
  2433    idFromStackOutput: mycluster-private-subnet-1
  2434    private: true
  2435  - name: public1
  2436    availabilityZone: us-west-1a
  2437    id: subnet-2
  2438  - name: public2
  2439    availabilityZone: us-west-1b
  2440    idFromStackOutput: mycluster-public-subnet-1
  2441  controller:
  2442    loadBalancer:
  2443      private: false
  2444  etcd:
  2445    subnets:
  2446    - name: private1
  2447    - name: private2
  2448  worker:
  2449    nodePools:
  2450    - name: pool1
  2451      subnets:
  2452      - name: public1
  2453    - name: pool2
  2454      subnets:
  2455      - name: public2
  2456  `,
  2457  			assertConfig: []ConfigTester{
  2458  				hasDefaultExperimentalFeatures,
  2459  				hasNoNGWsOrEIPsOrRoutes,
  2460  				func(c *config.Config, t *testing.T) {
  2461  					private1 := api.NewExistingPrivateSubnet("us-west-1a", "subnet-1")
  2462  					private1.Name = "private1"
  2463  
  2464  					private2 := api.NewImportedPrivateSubnet("us-west-1b", "mycluster-private-subnet-1")
  2465  					private2.Name = "private2"
  2466  
  2467  					public1 := api.NewExistingPublicSubnet("us-west-1a", "subnet-2")
  2468  					public1.Name = "public1"
  2469  
  2470  					public2 := api.NewImportedPublicSubnet("us-west-1b", "mycluster-public-subnet-1")
  2471  					public2.Name = "public2"
  2472  
  2473  					subnets := api.Subnets{
  2474  						private1,
  2475  						private2,
  2476  						public1,
  2477  						public2,
  2478  					}
  2479  					controllerPublicSubnets := api.Subnets{
  2480  						public1,
  2481  						public2,
  2482  					}
  2483  					nodepoolPublicSubnets := api.Subnets{
  2484  						public1,
  2485  					}
  2486  					privateSubnets := api.Subnets{
  2487  						private1,
  2488  						private2,
  2489  					}
  2490  
  2491  					if !reflect.DeepEqual(c.AllSubnets(), subnets) {
  2492  						t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
  2493  					}
  2494  					p := c.NodePools[0]
  2495  					if !reflect.DeepEqual(p.Subnets, nodepoolPublicSubnets) {
  2496  						t.Errorf("Worker subnets didn't match: expected=%v actual=%v", nodepoolPublicSubnets, p.Subnets)
  2497  					}
  2498  					if !reflect.DeepEqual(c.Controller.Subnets, controllerPublicSubnets) {
  2499  						t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
  2500  					}
  2501  					if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, controllerPublicSubnets) {
  2502  						t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
  2503  					}
  2504  					if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
  2505  						t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
  2506  					}
  2507  
  2508  					for i, s := range c.PrivateSubnets() {
  2509  						if s.ManageNATGateway() {
  2510  							t.Errorf("NAT gateway for the existing private subnet #%d should not be created by kube-aws", i)
  2511  						}
  2512  
  2513  						if s.ManageRouteToInternet() {
  2514  							t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
  2515  						}
  2516  					}
  2517  				},
  2518  			},
  2519  		},
  2520  		{
  2521  			context: "WithNetworkTopologyAllExistingPrivateSubnets",
  2522  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + fmt.Sprintf(`
  2523  vpc:
  2524    id: vpc-1a2b3c4d
  2525  subnets:
  2526  - name: private1
  2527    availabilityZone: us-west-1a
  2528    id: subnet-1
  2529    private: true
  2530  - name: private2
  2531    availabilityZone: us-west-1b
  2532    idFromStackOutput: mycluster-private-subnet-1
  2533    private: true
  2534  controller:
  2535    subnets:
  2536    - name: private1
  2537    - name: private2
  2538  etcd:
  2539    subnets:
  2540    - name: private1
  2541    - name: private2
  2542  worker:
  2543    nodePools:
  2544    - name: pool1
  2545      subnets:
  2546      - name: private1
  2547    - name: pool2
  2548      subnets:
  2549      - name: private2
  2550  apiEndpoints:
  2551  - name: public
  2552    dnsName: "%s"
  2553    loadBalancer:
  2554      hostedZone:
  2555        id: hostedzone-xxxx
  2556      private: true
  2557  `, kubeAwsSettings.externalDNSName),
  2558  			assertConfig: []ConfigTester{
  2559  				hasDefaultExperimentalFeatures,
  2560  				hasNoNGWsOrEIPsOrRoutes,
  2561  			},
  2562  		},
  2563  		{
  2564  			context: "WithNetworkTopologyAllExistingPublicSubnets",
  2565  			configYaml: mainClusterYaml + `
  2566  vpc:
  2567    id: vpc-1a2b3c4d
  2568  subnets:
  2569  - name: public1
  2570    availabilityZone: us-west-1a
  2571    id: subnet-2
  2572  - name: public2
  2573    availabilityZone: us-west-1b
  2574    idFromStackOutput: mycluster-public-subnet-1
  2575  etcd:
  2576    subnets:
  2577    - name: public1
  2578    - name: public2
  2579  worker:
  2580    nodePools:
  2581    - name: pool1
  2582      subnets:
  2583      - name: public1
  2584    - name: pool2
  2585      subnets:
  2586      - name: public2
  2587  `,
  2588  			assertConfig: []ConfigTester{
  2589  				hasDefaultExperimentalFeatures,
  2590  				hasNoNGWsOrEIPsOrRoutes,
  2591  			},
  2592  		},
  2593  		{
  2594  			context: "WithNetworkTopologyExistingNATGateways",
  2595  			configYaml: mainClusterYaml + `
  2596  vpc:
  2597    id: vpc-1a2b3c4d
  2598  internetGateway:
  2599    id: igw-1a2b3c4d
  2600  subnets:
  2601  - name: private1
  2602    availabilityZone: us-west-1a
  2603    instanceCIDR: "10.0.1.0/24"
  2604    private: true
  2605    natGateway:
  2606      id: ngw-11111111
  2607  - name: private2
  2608    availabilityZone: us-west-1b
  2609    instanceCIDR: "10.0.2.0/24"
  2610    private: true
  2611    natGateway:
  2612      id: ngw-22222222
  2613  - name: public1
  2614    availabilityZone: us-west-1a
  2615    instanceCIDR: "10.0.3.0/24"
  2616  - name: public2
  2617    availabilityZone: us-west-1b
  2618    instanceCIDR: "10.0.4.0/24"
  2619  etcd:
  2620    subnets:
  2621    - name: private1
  2622    - name: private2
  2623  worker:
  2624    nodePools:
  2625    - name: pool1
  2626      subnets:
  2627      - name: public1
  2628    - name: pool2
  2629      subnets:
  2630      - name: public2
  2631  `,
  2632  			assertConfig: []ConfigTester{
  2633  				hasDefaultExperimentalFeatures,
  2634  				hasNoManagedNGWsButSpecificNumOfRoutesToUnmanagedNGWs(2),
  2635  				func(c *config.Config, t *testing.T) {
  2636  					private1 := api.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1a", "10.0.1.0/24", "ngw-11111111")
  2637  					private1.Name = "private1"
  2638  
  2639  					private2 := api.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1b", "10.0.2.0/24", "ngw-22222222")
  2640  					private2.Name = "private2"
  2641  
  2642  					public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
  2643  					public1.Name = "public1"
  2644  
  2645  					public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
  2646  					public2.Name = "public2"
  2647  
  2648  					subnets := api.Subnets{
  2649  						private1,
  2650  						private2,
  2651  						public1,
  2652  						public2,
  2653  					}
  2654  					publicSubnets := api.Subnets{
  2655  						public1,
  2656  						public2,
  2657  					}
  2658  					privateSubnets := api.Subnets{
  2659  						private1,
  2660  						private2,
  2661  					}
  2662  					importedPublicSubnets := api.Subnets{
  2663  						api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
  2664  					}
  2665  
  2666  					if diff := cmp.Diff(c.AllSubnets(), subnets); diff != "" {
  2667  						t.Errorf("Managed subnets didn't match: %s", diff)
  2668  					}
  2669  					p := c.NodePools[0]
  2670  					if diff := cmp.Diff(p.Subnets, importedPublicSubnets); diff != "" {
  2671  						t.Errorf("Worker subnets didn't match: %s", diff)
  2672  					}
  2673  					if diff := cmp.Diff(c.Controller.Subnets, publicSubnets); diff != "" {
  2674  						t.Errorf("Controller subnets didn't match: %s", diff)
  2675  					}
  2676  					if diff := cmp.Diff(c.Controller.LoadBalancer.Subnets, publicSubnets); diff != "" {
  2677  						t.Errorf("Controller loadbalancer subnets didn't match: %s", diff)
  2678  					}
  2679  					if diff := cmp.Diff(c.Etcd.Subnets, privateSubnets); diff != "" {
  2680  						t.Errorf("Etcd subnets didn't match: %s", diff)
  2681  					}
  2682  
  2683  					for i, s := range c.PrivateSubnets() {
  2684  						if s.ManageNATGateway() {
  2685  							t.Errorf("NAT gateway for the existing private subnet #%d should not be created by kube-aws", i)
  2686  						}
  2687  
  2688  						if s.ManageRouteToInternet() {
  2689  							t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
  2690  						}
  2691  					}
  2692  				},
  2693  			},
  2694  		},
  2695  		{
  2696  			context: "WithNetworkTopologyExistingNATGatewayEIPs",
  2697  			configYaml: mainClusterYaml + `
  2698  vpc:
  2699    id: vpc-1a2b3c4d
  2700  internetGateway:
  2701    id: igw-1a2b3c4d
  2702  subnets:
  2703  - name: private1
  2704    availabilityZone: us-west-1a
  2705    instanceCIDR: "10.0.1.0/24"
  2706    private: true
  2707    natGateway:
  2708      eipAllocationId: eipalloc-11111111
  2709  - name: private2
  2710    availabilityZone: us-west-1b
  2711    instanceCIDR: "10.0.2.0/24"
  2712    private: true
  2713    natGateway:
  2714      eipAllocationId: eipalloc-22222222
  2715  - name: public1
  2716    availabilityZone: us-west-1a
  2717    instanceCIDR: "10.0.3.0/24"
  2718  - name: public2
  2719    availabilityZone: us-west-1b
  2720    instanceCIDR: "10.0.4.0/24"
  2721  etcd:
  2722    subnets:
  2723    - name: private1
  2724    - name: private2
  2725  worker:
  2726    nodePools:
  2727    - name: pool1
  2728      subnets:
  2729      - name: public1
  2730    - name: pool2
  2731      subnets:
  2732      - name: public2
  2733  `,
  2734  			assertConfig: []ConfigTester{
  2735  				hasDefaultExperimentalFeatures,
  2736  				hasSpecificNumOfManagedNGWsWithUnmanagedEIPs(2),
  2737  				hasPrivateSubnetsWithManagedNGWs(2),
  2738  				func(c *config.Config, t *testing.T) {
  2739  					private1 := api.NewPrivateSubnetWithPreconfiguredNATGatewayEIP("us-west-1a", "10.0.1.0/24", "eipalloc-11111111")
  2740  					private1.Name = "private1"
  2741  
  2742  					private2 := api.NewPrivateSubnetWithPreconfiguredNATGatewayEIP("us-west-1b", "10.0.2.0/24", "eipalloc-22222222")
  2743  					private2.Name = "private2"
  2744  
  2745  					public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
  2746  					public1.Name = "public1"
  2747  
  2748  					public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
  2749  					public2.Name = "public2"
  2750  
  2751  					subnets := api.Subnets{
  2752  						private1,
  2753  						private2,
  2754  						public1,
  2755  						public2,
  2756  					}
  2757  					publicSubnets := api.Subnets{
  2758  						public1,
  2759  						public2,
  2760  					}
  2761  					privateSubnets := api.Subnets{
  2762  						private1,
  2763  						private2,
  2764  					}
  2765  					importedPublicSubnets := api.Subnets{
  2766  						api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
  2767  					}
  2768  
  2769  					if diff := cmp.Diff(c.AllSubnets(), subnets); diff != "" {
  2770  						t.Errorf("Managed subnets didn't match: %s", diff)
  2771  					}
  2772  					p := c.NodePools[0]
  2773  					if diff := cmp.Diff(p.Subnets, importedPublicSubnets); diff != "" {
  2774  						t.Errorf("Worker subnets didn't match: %s", diff)
  2775  					}
  2776  					if diff := cmp.Diff(c.Controller.Subnets, publicSubnets); diff != "" {
  2777  						t.Errorf("Controller subnets didn't match: %s", diff)
  2778  					}
  2779  					if diff := cmp.Diff(c.Controller.LoadBalancer.Subnets, publicSubnets); diff != "" {
  2780  						t.Errorf("Controller loadbalancer subnets didn't match: %s", diff)
  2781  					}
  2782  					if diff := cmp.Diff(c.Etcd.Subnets, privateSubnets); diff != "" {
  2783  						t.Errorf("Etcd subnets didn't match: %s", diff)
  2784  					}
  2785  				},
  2786  			},
  2787  		},
  2788  		{
  2789  			context: "WithNetworkTopologyVaryingPublicSubnets",
  2790  			configYaml: mainClusterYaml + `
  2791  vpc:
  2792    id: vpc-1a2b3c4d
  2793  #required only for the managed subnet "public1"
  2794  # "public2" is assumed to have an existing route table and an igw already associated to it
  2795  internetGateway:
  2796    id: igw-1a2b3c4d
  2797  subnets:
  2798  - name: public1
  2799    availabilityZone: us-west-1a
  2800    instanceCIDR: "10.0.1.0/24"
  2801  - name: public2
  2802    availabilityZone: us-west-1b
  2803    id: subnet-2
  2804  controller:
  2805    loadBalancer:
  2806      private: false
  2807  etcd:
  2808    subnets:
  2809    - name: public1
  2810    - name: public2
  2811  worker:
  2812    nodePools:
  2813    - name: pool1
  2814      subnets:
  2815      - name: public1
  2816    - name: pool2
  2817      subnets:
  2818      - name: public2
  2819  `,
  2820  			assertConfig: []ConfigTester{},
  2821  		},
  2822  		{
  2823  			context: "WithSpotFleetEnabled",
  2824  			configYaml: minimalValidConfigYaml + `
  2825  worker:
  2826    nodePools:
  2827    - name: pool1
  2828      spotFleet:
  2829        targetCapacity: 10
  2830  `,
  2831  			assertConfig: []ConfigTester{
  2832  				hasDefaultExperimentalFeatures,
  2833  				hasDefaultLaunchSpecifications,
  2834  				spotFleetBasedNodePoolHasWaitSignalDisabled,
  2835  			},
  2836  		},
  2837  		{
  2838  			context: "WithSpotFleetEnabledWithCustomIamRole",
  2839  			configYaml: minimalValidConfigYaml + `
  2840  worker:
  2841    nodePools:
  2842    - name: pool1
  2843      spotFleet:
  2844        targetCapacity: 10
  2845        iamFleetRoleArn: custom-iam-role
  2846  `,
  2847  			assertConfig: []ConfigTester{
  2848  				hasDefaultExperimentalFeatures,
  2849  				hasDefaultLaunchSpecifications,
  2850  				spotFleetBasedNodePoolHasWaitSignalDisabled,
  2851  			},
  2852  		},
  2853  		{
  2854  			context: "WithSpotFleetWithCustomGp2RootVolumeSettings",
  2855  			configYaml: minimalValidConfigYaml + `
  2856  worker:
  2857    nodePools:
  2858    - name: pool1
  2859      spotFleet:
  2860        targetCapacity: 10
  2861        unitRootVolumeSize: 40
  2862        launchSpecifications:
  2863        - weightedCapacity: 1
  2864          instanceType: c4.large
  2865        - weightedCapacity: 2
  2866          instanceType: c4.xlarge
  2867          rootVolume:
  2868            size: 100
  2869  `,
  2870  			assertConfig: []ConfigTester{
  2871  				hasDefaultExperimentalFeatures,
  2872  				spotFleetBasedNodePoolHasWaitSignalDisabled,
  2873  				func(c *config.Config, t *testing.T) {
  2874  					expected := []api.LaunchSpecification{
  2875  						{
  2876  							WeightedCapacity: 1,
  2877  							InstanceType:     "c4.large",
  2878  							SpotPrice:        "",
  2879  							// RootVolumeSize was not specified in the configYaml but should default to workerRootVolumeSize * weightedCapacity
  2880  							// RootVolumeType was not specified in the configYaml but should default to "gp2"
  2881  							RootVolume: api.NewGp2RootVolume(40),
  2882  						},
  2883  						{
  2884  							WeightedCapacity: 2,
  2885  							InstanceType:     "c4.xlarge",
  2886  							SpotPrice:        "",
  2887  							RootVolume:       api.NewGp2RootVolume(100),
  2888  						},
  2889  					}
  2890  					p := c.NodePools[0]
  2891  					actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
  2892  					if !reflect.DeepEqual(expected, actual) {
  2893  						t.Errorf(
  2894  							"LaunchSpecifications didn't match: expected=%v actual=%v",
  2895  							expected,
  2896  							actual,
  2897  						)
  2898  					}
  2899  				},
  2900  			},
  2901  		},
  2902  		{
  2903  			context: "WithSpotFleetWithCustomInstanceTypes",
  2904  			configYaml: minimalValidConfigYaml + `
  2905  worker:
  2906    nodePools:
  2907    - name: pool1
  2908      spotFleet:
  2909        targetCapacity: 10
  2910        unitRootVolumeSize: 40
  2911        launchSpecifications:
  2912        - weightedCapacity: 1
  2913          instanceType: m4.large
  2914        - weightedCapacity: 2
  2915          instanceType: m4.xlarge
  2916  `,
  2917  			assertConfig: []ConfigTester{
  2918  				hasDefaultExperimentalFeatures,
  2919  				spotFleetBasedNodePoolHasWaitSignalDisabled,
  2920  				func(c *config.Config, t *testing.T) {
  2921  					expected := []api.LaunchSpecification{
  2922  						{
  2923  							WeightedCapacity: 1,
  2924  							InstanceType:     "m4.large",
  2925  							SpotPrice:        "",
  2926  							// RootVolumeType was not specified in the configYaml but should default to gp2:
  2927  							RootVolume: api.NewGp2RootVolume(40),
  2928  						},
  2929  						{
  2930  							WeightedCapacity: 2,
  2931  							InstanceType:     "m4.xlarge",
  2932  							SpotPrice:        "",
  2933  							RootVolume:       api.NewGp2RootVolume(80),
  2934  						},
  2935  					}
  2936  					p := c.NodePools[0]
  2937  					actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
  2938  					if !reflect.DeepEqual(expected, actual) {
  2939  						t.Errorf(
  2940  							"LaunchSpecifications didn't match: expected=%v actual=%v",
  2941  							expected,
  2942  							actual,
  2943  						)
  2944  					}
  2945  				},
  2946  			},
  2947  		},
  2948  		{
  2949  			context: "WithSpotFleetWithCustomIo1RootVolumeSettings",
  2950  			configYaml: minimalValidConfigYaml + `
  2951  worker:
  2952    nodePools:
  2953    - name: pool1
  2954      spotFleet:
  2955        targetCapacity: 10
  2956        rootVolumeType: io1
  2957        unitRootVolumeSize: 40
  2958        unitRootVolumeIOPS: 100
  2959        launchSpecifications:
  2960        - weightedCapacity: 1
  2961          instanceType: c4.large
  2962        - weightedCapacity: 2
  2963          instanceType: c4.xlarge
  2964          rootVolume:
  2965            iops: 500
  2966  `,
  2967  			assertConfig: []ConfigTester{
  2968  				hasDefaultExperimentalFeatures,
  2969  				spotFleetBasedNodePoolHasWaitSignalDisabled,
  2970  				func(c *config.Config, t *testing.T) {
  2971  					expected := []api.LaunchSpecification{
  2972  						{
  2973  							WeightedCapacity: 1,
  2974  							InstanceType:     "c4.large",
  2975  							SpotPrice:        "",
  2976  							// RootVolumeSize was not specified in the configYaml but should default to workerRootVolumeSize * weightedCapacity
  2977  							// RootVolumeIOPS was not specified in the configYaml but should default to workerRootVolumeIOPS * weightedCapacity
  2978  							// RootVolumeType was not specified in the configYaml but should default to "io1"
  2979  							RootVolume: api.NewIo1RootVolume(40, 100),
  2980  						},
  2981  						{
  2982  							WeightedCapacity: 2,
  2983  							InstanceType:     "c4.xlarge",
  2984  							SpotPrice:        "",
  2985  							// RootVolumeType was not specified in the configYaml but should default to:
  2986  							RootVolume: api.NewIo1RootVolume(80, 500),
  2987  						},
  2988  					}
  2989  					p := c.NodePools[0]
  2990  					actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
  2991  					if !reflect.DeepEqual(expected, actual) {
  2992  						t.Errorf(
  2993  							"LaunchSpecifications didn't match: expected=%v actual=%v",
  2994  							expected,
  2995  							actual,
  2996  						)
  2997  					}
  2998  				},
  2999  			},
  3000  		},
  3001  		{
  3002  			context: "WithVpcIdSpecified",
  3003  			configYaml: minimalValidConfigYaml + `
  3004  vpc:
  3005    id: vpc-1a2b3c4d
  3006  internetGateway:
  3007    id: igw-1a2b3c4d
  3008  `,
  3009  			assertConfig: []ConfigTester{
  3010  				hasDefaultEtcdSettings,
  3011  				hasDefaultExperimentalFeatures,
  3012  				func(c *config.Config, t *testing.T) {
  3013  					vpcId := "vpc-1a2b3c4d"
  3014  					if c.VPC.ID != vpcId {
  3015  						t.Errorf("vpc id didn't match: expected=%v actual=%v", vpcId, c.VPC.ID)
  3016  					}
  3017  					igwId := "igw-1a2b3c4d"
  3018  					if c.InternetGateway.ID != igwId {
  3019  						t.Errorf("internet gateway id didn't match: expected=%v actual=%v", igwId, c.InternetGateway.ID)
  3020  					}
  3021  				},
  3022  			},
  3023  		},
  3024  		{
  3025  			context: "WithLegacyVpcAndIGWIdSpecified",
  3026  			configYaml: minimalValidConfigYaml + `
  3027  vpcId: vpc-1a2b3c4d
  3028  internetGatewayId: igw-1a2b3c4d
  3029  `,
  3030  			assertConfig: []ConfigTester{
  3031  				hasDefaultEtcdSettings,
  3032  				hasDefaultExperimentalFeatures,
  3033  				func(c *config.Config, t *testing.T) {
  3034  					vpcId := "vpc-1a2b3c4d"
  3035  					if c.VPC.ID != vpcId {
  3036  						t.Errorf("vpc id didn't match: expected=%v actual=%v", vpcId, c.VPC.ID)
  3037  					}
  3038  					igwId := "igw-1a2b3c4d"
  3039  					if c.InternetGateway.ID != igwId {
  3040  						t.Errorf("internet gateway id didn't match: expected=%v actual=%v", igwId, c.InternetGateway.ID)
  3041  					}
  3042  				},
  3043  			},
  3044  		},
  3045  		{
  3046  			context: "WithVpcIdAndRouteTableIdSpecified",
  3047  			configYaml: mainClusterYaml + `
  3048  vpc:
  3049    id: vpc-1a2b3c4d
  3050  subnets:
  3051  - name: Subnet0
  3052    availabilityZone: ` + firstAz + `
  3053    instanceCIDR: "10.0.0.0/24"
  3054    routeTable:
  3055      id: rtb-1a2b3c4d
  3056  `,
  3057  			assertConfig: []ConfigTester{
  3058  				hasDefaultExperimentalFeatures,
  3059  				func(c *config.Config, t *testing.T) {
  3060  					subnet1 := api.NewPublicSubnetWithPreconfiguredRouteTable(firstAz, "10.0.0.0/24", "rtb-1a2b3c4d")
  3061  					subnet1.Name = "Subnet0"
  3062  					subnets := api.Subnets{
  3063  						subnet1,
  3064  					}
  3065  					expected := api.EtcdSettings{
  3066  						Etcd: api.Etcd{
  3067  							Cluster: api.EtcdCluster{
  3068  								Version: "v3.3.17",
  3069  							},
  3070  							EC2Instance: api.EC2Instance{
  3071  								Count:        1,
  3072  								InstanceType: "t2.medium",
  3073  								RootVolume: api.RootVolume{
  3074  									Size: 30,
  3075  									Type: "gp2",
  3076  									IOPS: 0,
  3077  								},
  3078  								Tenancy: "default",
  3079  							},
  3080  							DataVolume: api.DataVolume{
  3081  								Size:      30,
  3082  								Type:      "gp2",
  3083  								IOPS:      0,
  3084  								Ephemeral: false,
  3085  							},
  3086  							Subnets: subnets,
  3087  							UserSuppliedArgs: api.UserSuppliedArgs{
  3088  								QuotaBackendBytes: api.DefaultQuotaBackendBytes,
  3089  							},
  3090  						},
  3091  					}
  3092  					actual := c.EtcdSettings
  3093  					if !reflect.DeepEqual(expected, actual) {
  3094  						t.Errorf(
  3095  							"EtcdSettings didn't match: expected=%v actual=%v",
  3096  							expected,
  3097  							actual,
  3098  						)
  3099  					}
  3100  				},
  3101  			},
  3102  		},
  3103  		{
  3104  			context: "WithWorkerManagedIamRoleName",
  3105  			configYaml: minimalValidConfigYaml + `
  3106  worker:
  3107    nodePools:
  3108    - name: pool1
  3109      iam:
  3110        role:
  3111          name: "myManagedRole"
  3112  `,
  3113  			assertConfig: []ConfigTester{
  3114  				hasDefaultEtcdSettings,
  3115  				hasDefaultExperimentalFeatures,
  3116  				func(c *config.Config, t *testing.T) {
  3117  					if c.NodePools[0].IAMConfig.Role.Name != "myManagedRole" {
  3118  						t.Errorf("iam.role.name: expected=myManagedRole actual=%s", c.NodePools[0].IAMConfig.Role.Name)
  3119  					}
  3120  				},
  3121  			},
  3122  		},
  3123  		{
  3124  			context: "WithWorkerManagedPolicies",
  3125  			configYaml: minimalValidConfigYaml + `
  3126  worker:
  3127    nodePools:
  3128    - name: pool1
  3129      iam:
  3130        role:
  3131          managedPolicies:
  3132           - arn: "arn:aws:iam::aws:policy/AdministratorAccess"
  3133           - arn: "arn:aws:iam::000000000000:policy/myManagedPolicy"
  3134  `,
  3135  			assertConfig: []ConfigTester{
  3136  				hasDefaultEtcdSettings,
  3137  				hasDefaultExperimentalFeatures,
  3138  				func(c *config.Config, t *testing.T) {
  3139  					if len(c.NodePools[0].IAMConfig.Role.ManagedPolicies) < 2 {
  3140  						t.Errorf("iam.role.managedPolicies: incorrect number of policies expected=2 actual=%d", len(c.NodePools[0].IAMConfig.Role.ManagedPolicies))
  3141  					}
  3142  					if c.NodePools[0].IAMConfig.Role.ManagedPolicies[0].Arn != "arn:aws:iam::aws:policy/AdministratorAccess" {
  3143  						t.Errorf("iam.role.managedPolicies: expected=arn:aws:iam::aws:policy/AdministratorAccess actual=%s", c.NodePools[0].IAMConfig.Role.ManagedPolicies[0].Arn)
  3144  					}
  3145  					if c.NodePools[0].IAMConfig.Role.ManagedPolicies[1].Arn != "arn:aws:iam::000000000000:policy/myManagedPolicy" {
  3146  						t.Errorf("iam.role.managedPolicies: expected=arn:aws:iam::000000000000:policy/myManagedPolicy actual=%s", c.NodePools[0].IAMConfig.Role.ManagedPolicies[1].Arn)
  3147  					}
  3148  				},
  3149  			},
  3150  		},
  3151  		{
  3152  			context: "WithWorkerExistingInstanceProfile",
  3153  			configYaml: minimalValidConfigYaml + `
  3154  worker:
  3155    nodePools:
  3156    - name: pool1
  3157      iam:
  3158        instanceProfile:
  3159          arn: "arn:aws:iam::000000000000:instance-profile/myInstanceProfile"
  3160  `,
  3161  			assertConfig: []ConfigTester{
  3162  				hasDefaultEtcdSettings,
  3163  				hasDefaultExperimentalFeatures,
  3164  				func(c *config.Config, t *testing.T) {
  3165  					if c.NodePools[0].IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myInstanceProfile" {
  3166  						t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myInstanceProfile actual=%s", c.NodePools[0].IAMConfig.InstanceProfile.Arn)
  3167  					}
  3168  				},
  3169  			},
  3170  		},
  3171  		{
  3172  			context: "WithWorkerAndControllerExistingInstanceProfile",
  3173  			configYaml: minimalValidConfigYaml + `
  3174  controller:
  3175    iam:
  3176      instanceProfile:
  3177        arn: "arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile"
  3178  worker:
  3179    nodePools:
  3180    - name: pool1
  3181      iam:
  3182        instanceProfile:
  3183          arn: "arn:aws:iam::000000000000:instance-profile/myInstanceProfile"
  3184  `,
  3185  			assertConfig: []ConfigTester{
  3186  				hasDefaultEtcdSettings,
  3187  				hasDefaultExperimentalFeatures,
  3188  				func(c *config.Config, t *testing.T) {
  3189  					if c.Controller.IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile" {
  3190  						t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile actual=%s", c.Controller.IAMConfig.InstanceProfile.Arn)
  3191  					}
  3192  					if c.NodePools[0].IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myInstanceProfile" {
  3193  						t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myInstanceProfile actual=%s", c.NodePools[0].IAMConfig.InstanceProfile.Arn)
  3194  					}
  3195  				},
  3196  			},
  3197  		},
  3198  		{
  3199  			context: "WithWorkerSecurityGroupIds",
  3200  			configYaml: minimalValidConfigYaml + `
  3201  worker:
  3202    nodePools:
  3203    - name: pool1
  3204      securityGroupIds:
  3205      - sg-12345678
  3206      - sg-abcdefab
  3207      - sg-23456789
  3208      - sg-bcdefabc
  3209  `,
  3210  			assertConfig: []ConfigTester{
  3211  				hasDefaultEtcdSettings,
  3212  				hasDefaultExperimentalFeatures,
  3213  				func(c *config.Config, t *testing.T) {
  3214  					p := c.NodePools[0]
  3215  					expectedWorkerSecurityGroupIds := []string{
  3216  						`sg-12345678`, `sg-abcdefab`, `sg-23456789`, `sg-bcdefabc`,
  3217  					}
  3218  					if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
  3219  						t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
  3220  					}
  3221  
  3222  					expectedWorkerSecurityGroupRefs := []string{
  3223  						`"sg-12345678"`, `"sg-abcdefab"`, `"sg-23456789"`, `"sg-bcdefabc"`,
  3224  						`{"Fn::ImportValue" : {"Fn::Sub" : "${NetworkStackName}-WorkerSecurityGroup"}}`,
  3225  					}
  3226  					if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
  3227  						t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
  3228  					}
  3229  				},
  3230  			},
  3231  		},
  3232  		{
  3233  			context: "WithWorkerAndLBSecurityGroupIds",
  3234  			configYaml: minimalValidConfigYaml + `
  3235  worker:
  3236    nodePools:
  3237    - name: pool1
  3238      securityGroupIds:
  3239      - sg-12345678
  3240      - sg-abcdefab
  3241      loadBalancer:
  3242        enabled: true
  3243        securityGroupIds:
  3244          - sg-23456789
  3245          - sg-bcdefabc
  3246  `,
  3247  			assertConfig: []ConfigTester{
  3248  				hasDefaultEtcdSettings,
  3249  				func(c *config.Config, t *testing.T) {
  3250  					p := c.NodePools[0]
  3251  					expectedWorkerSecurityGroupIds := []string{
  3252  						`sg-12345678`, `sg-abcdefab`,
  3253  					}
  3254  					if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
  3255  						t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
  3256  					}
  3257  
  3258  					expectedLBSecurityGroupIds := []string{
  3259  						`sg-23456789`, `sg-bcdefabc`,
  3260  					}
  3261  					if !reflect.DeepEqual(p.LoadBalancer.SecurityGroupIds, expectedLBSecurityGroupIds) {
  3262  						t.Errorf("LBSecurityGroupIds didn't match: expected=%v actual=%v", expectedLBSecurityGroupIds, p.LoadBalancer.SecurityGroupIds)
  3263  					}
  3264  
  3265  					expectedWorkerSecurityGroupRefs := []string{
  3266  						`"sg-23456789"`, `"sg-bcdefabc"`, `"sg-12345678"`, `"sg-abcdefab"`,
  3267  						`{"Fn::ImportValue" : {"Fn::Sub" : "${NetworkStackName}-WorkerSecurityGroup"}}`,
  3268  					}
  3269  					if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
  3270  						t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
  3271  					}
  3272  				},
  3273  			},
  3274  		},
  3275  		{
  3276  			context: "WithWorkerAndALBSecurityGroupIds",
  3277  			configYaml: minimalValidConfigYaml + `
  3278  worker:
  3279    nodePools:
  3280    - name: pool1
  3281      securityGroupIds:
  3282      - sg-12345678
  3283      - sg-abcdefab
  3284      targetGroup:
  3285        enabled: true
  3286        securityGroupIds:
  3287          - sg-23456789
  3288          - sg-bcdefabc
  3289  `,
  3290  			assertConfig: []ConfigTester{
  3291  				hasDefaultEtcdSettings,
  3292  				func(c *config.Config, t *testing.T) {
  3293  					p := c.NodePools[0]
  3294  					expectedWorkerSecurityGroupIds := []string{
  3295  						`sg-12345678`, `sg-abcdefab`,
  3296  					}
  3297  					if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
  3298  						t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
  3299  					}
  3300  
  3301  					expectedALBSecurityGroupIds := []string{
  3302  						`sg-23456789`, `sg-bcdefabc`,
  3303  					}
  3304  					if !reflect.DeepEqual(p.TargetGroup.SecurityGroupIds, expectedALBSecurityGroupIds) {
  3305  						t.Errorf("LBSecurityGroupIds didn't match: expected=%v actual=%v", expectedALBSecurityGroupIds, p.TargetGroup.SecurityGroupIds)
  3306  					}
  3307  
  3308  					expectedWorkerSecurityGroupRefs := []string{
  3309  						`"sg-23456789"`, `"sg-bcdefabc"`, `"sg-12345678"`, `"sg-abcdefab"`,
  3310  						`{"Fn::ImportValue" : {"Fn::Sub" : "${NetworkStackName}-WorkerSecurityGroup"}}`,
  3311  					}
  3312  					if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
  3313  						t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
  3314  					}
  3315  				},
  3316  			},
  3317  		},
  3318  		{
  3319  			context: "WithDedicatedInstanceTenancy",
  3320  			configYaml: minimalValidConfigYaml + `
  3321  workerTenancy: dedicated
  3322  controller:
  3323    tenancy: dedicated
  3324  etcd:
  3325    tenancy: dedicated
  3326  `,
  3327  			assertConfig: []ConfigTester{
  3328  				func(c *config.Config, t *testing.T) {
  3329  					if c.Etcd.Tenancy != "dedicated" {
  3330  						t.Errorf("Etcd.Tenancy didn't match: expected=dedicated actual=%s", c.Etcd.Tenancy)
  3331  					}
  3332  					if c.WorkerTenancy != "dedicated" {
  3333  						t.Errorf("WorkerTenancy didn't match: expected=dedicated actual=%s", c.WorkerTenancy)
  3334  					}
  3335  					if c.Controller.Tenancy != "dedicated" {
  3336  						t.Errorf("Controller.Tenancy didn't match: expected=dedicated actual=%s", c.Controller.Tenancy)
  3337  					}
  3338  				},
  3339  			},
  3340  		},
  3341  		{
  3342  			context: "WithControllerNodeLabels",
  3343  			configYaml: minimalValidConfigYaml + `
  3344  controller:
  3345    nodeLabels:
  3346      kube-aws.coreos.com/role: controller
  3347  `,
  3348  			assertConfig: []ConfigTester{
  3349  				hasDefaultExperimentalFeatures,
  3350  				func(c *config.Config, t *testing.T) {
  3351  					expected := api.NodeLabels{"kube-aws.coreos.com/role": "controller"}
  3352  					actual := c.NodeLabels()
  3353  					if !reflect.DeepEqual(expected, actual) {
  3354  						t.Errorf("unexpected controller node labels: expected=%v, actual=%v", expected, actual)
  3355  					}
  3356  				},
  3357  			},
  3358  		},
  3359  		{
  3360  			context: "WithSSHAccessAllowedSourceCIDRsSpecified",
  3361  			configYaml: minimalValidConfigYaml + `
  3362  sshAccessAllowedSourceCIDRs:
  3363  - 1.2.3.255/32
  3364  `,
  3365  			assertConfig: []ConfigTester{
  3366  				func(c *config.Config, t *testing.T) {
  3367  					l := len(c.SSHAccessAllowedSourceCIDRs)
  3368  					if l != 1 {
  3369  						t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
  3370  						t.FailNow()
  3371  					}
  3372  					actual := c.SSHAccessAllowedSourceCIDRs[0].String()
  3373  					expected := "1.2.3.255/32"
  3374  					if actual != expected {
  3375  						t.Errorf("unexpected cidr in sshAccessAllowedSourecCIDRs[0]. expected = %s, actual = %s", expected, actual)
  3376  					}
  3377  				},
  3378  			},
  3379  		},
  3380  		{
  3381  			context:    "WithSSHAccessAllowedSourceCIDRsOmitted",
  3382  			configYaml: minimalValidConfigYaml,
  3383  			assertConfig: []ConfigTester{
  3384  				func(c *config.Config, t *testing.T) {
  3385  					l := len(c.SSHAccessAllowedSourceCIDRs)
  3386  					if l != 1 {
  3387  						t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
  3388  						t.FailNow()
  3389  					}
  3390  					actual := c.SSHAccessAllowedSourceCIDRs[0].String()
  3391  					expected := "0.0.0.0/0"
  3392  					if actual != expected {
  3393  						t.Errorf("unexpected cidr in sshAccessAllowedSourecCIDRs[0]. expected = %s, actual = %s", expected, actual)
  3394  					}
  3395  				},
  3396  			},
  3397  		},
  3398  		{
  3399  			context: "WithSSHAccessAllowedSourceCIDRsEmptied",
  3400  			configYaml: minimalValidConfigYaml + `
  3401  sshAccessAllowedSourceCIDRs:
  3402  `,
  3403  			assertConfig: []ConfigTester{
  3404  				func(c *config.Config, t *testing.T) {
  3405  					l := len(c.SSHAccessAllowedSourceCIDRs)
  3406  					if l != 0 {
  3407  						t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
  3408  						t.FailNow()
  3409  					}
  3410  				},
  3411  			},
  3412  		},
  3413  		{
  3414  			context: "WithWorkerWithoutGPUSettings",
  3415  			configYaml: minimalValidConfigYaml + `
  3416  worker:
  3417    nodePools:
  3418    - name: pool1
  3419  `,
  3420  			assertConfig: []ConfigTester{
  3421  				func(c *config.Config, t *testing.T) {
  3422  					enabled := c.NodePools[0].Gpu.Nvidia.Enabled
  3423  					if enabled {
  3424  						t.Errorf("unexpected enabled of gpu.nvidia: %v.  its default value should be false", enabled)
  3425  						t.FailNow()
  3426  					}
  3427  				},
  3428  			},
  3429  		},
  3430  		{
  3431  			context: "WithGPUEnabledWorker",
  3432  			configYaml: minimalValidConfigYaml + `
  3433  worker:
  3434    nodePools:
  3435    - name: pool1
  3436      instanceType: p2.xlarge
  3437      gpu:
  3438        nvidia:
  3439          enabled: true
  3440          version: "123.45"
  3441  `,
  3442  			assertConfig: []ConfigTester{
  3443  				func(c *config.Config, t *testing.T) {
  3444  					enabled := c.NodePools[0].Gpu.Nvidia.Enabled
  3445  					version := c.NodePools[0].Gpu.Nvidia.Version
  3446  					if !enabled {
  3447  						t.Errorf("unexpected enabled value of gpu.nvidia: %v.", enabled)
  3448  						t.FailNow()
  3449  					}
  3450  					if version != "123.45" {
  3451  						t.Errorf("unexpected version value of gpu.nvidia: %v.", version)
  3452  						t.FailNow()
  3453  					}
  3454  				},
  3455  			},
  3456  		},
  3457  		{
  3458  			context: "WithGPUDisabledWorker",
  3459  			configYaml: minimalValidConfigYaml + `
  3460  worker:
  3461    nodePools:
  3462    - name: pool1
  3463      gpu:
  3464        nvidia:
  3465          enabled: false
  3466          version: "123.45"
  3467  `,
  3468  			assertConfig: []ConfigTester{
  3469  				func(c *config.Config, t *testing.T) {
  3470  					enabled := c.NodePools[0].Gpu.Nvidia.Enabled
  3471  					version := c.NodePools[0].Gpu.Nvidia.Version
  3472  					if enabled {
  3473  						t.Errorf("unexpected enabled value of gpu.nvidia: %v.", enabled)
  3474  						t.FailNow()
  3475  					}
  3476  					if version != "123.45" {
  3477  						t.Errorf("unexpected version value of gpu.nvidia: %v.", version)
  3478  						t.FailNow()
  3479  					}
  3480  				},
  3481  			},
  3482  		},
  3483  	}
  3484  
  3485  	for _, validCase := range validCases {
  3486  		t.Run(validCase.context, func(t *testing.T) {
  3487  			configBytes := validCase.configYaml
  3488  			// TODO Allow including plugins in test data?
  3489  			plugins := []*api.Plugin{}
  3490  			providedConfig, err := config.ConfigFromBytes([]byte(configBytes), plugins)
  3491  			if err != nil {
  3492  				t.Errorf("failed to parse config %s: %+v", configBytes, err)
  3493  				t.FailNow()
  3494  			}
  3495  
  3496  			t.Run("AssertConfig", func(t *testing.T) {
  3497  				for _, assertion := range validCase.assertConfig {
  3498  					assertion(providedConfig, t)
  3499  				}
  3500  			})
  3501  
  3502  			helper.WithDummyCredentials(func(dummyAssetsDir string) {
  3503  				var stackTemplateOptions = root.NewOptions(false, false)
  3504  				stackTemplateOptions.AssetsDir = dummyAssetsDir
  3505  				stackTemplateOptions.ControllerTmplFile = "../../builtin/files/userdata/cloud-config-controller"
  3506  				stackTemplateOptions.WorkerTmplFile = "../../builtin/files/userdata/cloud-config-worker"
  3507  				stackTemplateOptions.EtcdTmplFile = "../../builtin/files/userdata/cloud-config-etcd"
  3508  				stackTemplateOptions.RootStackTemplateTmplFile = "../../builtin/files/stack-templates/root.json.tmpl"
  3509  				stackTemplateOptions.NodePoolStackTemplateTmplFile = "../../builtin/files/stack-templates/node-pool.json.tmpl"
  3510  				stackTemplateOptions.ControlPlaneStackTemplateTmplFile = "../../builtin/files/stack-templates/control-plane.json.tmpl"
  3511  				stackTemplateOptions.NetworkStackTemplateTmplFile = "../../builtin/files/stack-templates/network.json.tmpl"
  3512  				stackTemplateOptions.EtcdStackTemplateTmplFile = "../../builtin/files/stack-templates/etcd.json.tmpl"
  3513  
  3514  				cl, err := root.CompileClusterFromConfig(providedConfig, stackTemplateOptions, false)
  3515  				if err != nil {
  3516  					t.Errorf("failed to create cluster driver : %v", err)
  3517  					t.FailNow()
  3518  				}
  3519  
  3520  				cl.Context = &model.Context{
  3521  					ProvidedEncryptService:  helper.DummyEncryptService{},
  3522  					ProvidedCFInterrogator:  helper.DummyCFInterrogator{},
  3523  					ProvidedEC2Interrogator: helper.DummyEC2Interrogator{},
  3524  					StackTemplateGetter:     helper.DummyStackTemplateGetter{},
  3525  				}
  3526  
  3527  				_, err = cl.EnsureAllAssetsGenerated()
  3528  				if err != nil {
  3529  					t.Errorf("%v", err)
  3530  					t.FailNow()
  3531  				}
  3532  
  3533  				t.Run("AssertCluster", func(t *testing.T) {
  3534  					for _, assertion := range validCase.assertCluster {
  3535  						assertion(cl, t)
  3536  					}
  3537  				})
  3538  
  3539  				t.Run("ValidateTemplates", func(t *testing.T) {
  3540  					if err := cl.ValidateTemplates(); err != nil {
  3541  						t.Errorf("failed to render stack template: %v", err)
  3542  					}
  3543  				})
  3544  
  3545  				if os.Getenv("KUBE_AWS_INTEGRATION_TEST") == "" {
  3546  					t.Skipf("`export KUBE_AWS_INTEGRATION_TEST=1` is required to run integration tests. Skipping.")
  3547  					t.SkipNow()
  3548  				} else {
  3549  					t.Run("ValidateStack", func(t *testing.T) {
  3550  						if !s3URIExists {
  3551  							t.Errorf("failed to obtain value for KUBE_AWS_S3_DIR_URI")
  3552  							t.FailNow()
  3553  						}
  3554  
  3555  						report, err := cl.ValidateStack()
  3556  
  3557  						if err != nil {
  3558  							t.Errorf("failed to validate stack: %s %v", report, err)
  3559  						}
  3560  					})
  3561  				}
  3562  			})
  3563  		})
  3564  	}
  3565  
  3566  	parseErrorCases := []struct {
  3567  		context              string
  3568  		configYaml           string
  3569  		expectedErrorMessage string
  3570  	}{
  3571  		{
  3572  			context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsEmptied",
  3573  			configYaml: configYamlWithoutExernalDNSName + `
  3574  apiEndpoints:
  3575  - name: default
  3576    dnsName: k8s.example.com
  3577    loadBalancer:
  3578      apiAccessAllowedSourceCIDRs:
  3579      hostedZone:
  3580        id: a1b2c4
  3581  `,
  3582  			expectedErrorMessage: `invalid cluster: invalid apiEndpoint "default" at index 0: invalid loadBalancer: either apiAccessAllowedSourceCIDRs or securityGroupIds must be present. Try not to explicitly empty apiAccessAllowedSourceCIDRs or set one or more securityGroupIDs`,
  3583  		},
  3584  		{
  3585  			// See https://github.com/kubernetes-incubator/kube-aws/issues/365
  3586  			context:              "WithClusterNameContainsDots",
  3587  			configYaml:           kubeAwsSettings.withClusterName("my.cluster").minimumValidClusterYaml(),
  3588  			expectedErrorMessage: "clusterName(=my.cluster) is malformed. It must consist only of alphanumeric characters, colons, or hyphens",
  3589  		},
  3590  		{
  3591  			context: "WithControllerTaint",
  3592  			configYaml: minimalValidConfigYaml + `
  3593  controller:
  3594    taints:
  3595    - key: foo
  3596      value: bar
  3597      effect: NoSchedule
  3598  `,
  3599  			expectedErrorMessage: "`controller.taints` must not be specified because tainting controller nodes breaks the cluster",
  3600  		},
  3601  		{
  3602  			context: "WithElasticFileSystemIdInSpecificNodePoolWithManagedSubnets",
  3603  			configYaml: mainClusterYaml + `
  3604  subnets:
  3605  - name: managed1
  3606    availabilityZone: us-west-1a
  3607    instanceCIDR: 10.0.1.0/24
  3608  worker:
  3609    nodePools:
  3610    - name: pool1
  3611      subnets:
  3612      - name: managed1
  3613      elasticFileSystemId: efs-12345
  3614    - name: pool2
  3615  `,
  3616  			expectedErrorMessage: "invalid node pool at index 0: elasticFileSystemId cannot be specified for a node pool in managed subnet(s), but was: efs-12345",
  3617  		},
  3618  		{
  3619  			context: "WithEtcdAutomatedDisasterRecoveryRequiresAutomatedSnapshot",
  3620  			configYaml: minimalValidConfigYaml + `
  3621  etcd:
  3622    version: 3
  3623    snapshot:
  3624      automated: false
  3625    disasterRecovery:
  3626      automated: true
  3627  `,
  3628  			expectedErrorMessage: "`etcd.disasterRecovery.automated` is set to true but `etcd.snapshot.automated` is not - automated disaster recovery requires snapshot to be also automated",
  3629  		},
  3630  		{
  3631  			context: "WithInvalidNodeDrainTimeout",
  3632  			configYaml: minimalValidConfigYaml + `
  3633  experimental:
  3634    nodeDrainer:
  3635      enabled: true
  3636      drainTimeout: 100
  3637  `,
  3638  			expectedErrorMessage: "Drain timeout must be an integer between 1 and 60, but was 100",
  3639  		},
  3640  		{
  3641  			context: "WithInvalidTaint",
  3642  			configYaml: minimalValidConfigYaml + `
  3643  worker:
  3644    nodePools:
  3645    - name: pool1
  3646      taints:
  3647      - key: foo
  3648        value: bar
  3649        effect: UnknownEffect
  3650  `,
  3651  			expectedErrorMessage: "invalid taint effect: UnknownEffect",
  3652  		},
  3653  		{
  3654  			context: "WithLegacyControllerSettingKeys",
  3655  			configYaml: minimalValidConfigYaml + `
  3656  vpc:
  3657    id: vpc-1a2b3c4d
  3658  internetGateway:
  3659    id: igw-1a2b3c4d
  3660  routeTableId: rtb-1a2b3c4d
  3661  controllerCount: 2
  3662  controllerCreateTimeout: PT10M
  3663  controllerInstanceType: t2.large
  3664  controllerRootVolumeSize: 101
  3665  controllerRootVolumeType: io1
  3666  controllerRootVolumeIOPS: 102
  3667  controllerTenancy: dedicated
  3668  `,
  3669  			expectedErrorMessage: "unknown keys found: controllerCount, controllerCreateTimeout, controllerInstanceType, controllerRootVolumeIOPS, controllerRootVolumeSize, controllerRootVolumeType, controllerTenancy",
  3670  		},
  3671  		{
  3672  			context: "WithLegacyEtcdSettingKeys",
  3673  			configYaml: minimalValidConfigYaml + `
  3674  vpc:
  3675    id: vpc-1a2b3c4d
  3676  internetGateway:
  3677    id: igw-1a2b3c4d
  3678  routeTableId: rtb-1a2b3c4d
  3679  etcdCount: 2
  3680  etcdTenancy: dedicated
  3681  etcdInstanceType: t2.large
  3682  etcdRootVolumeSize: 101
  3683  etcdRootVolumeType: io1
  3684  etcdRootVolumeIOPS: 102
  3685  etcdDataVolumeSize: 103
  3686  etcdDataVolumeType: io1
  3687  etcdDataVolumeIOPS: 104
  3688  etcdDataVolumeEncrypted: true
  3689  `,
  3690  			expectedErrorMessage: "unknown keys found: etcdCount, etcdDataVolumeEncrypted, etcdDataVolumeIOPS, etcdDataVolumeSize, etcdDataVolumeType, etcdInstanceType, etcdRootVolumeIOPS, etcdRootVolumeSize, etcdRootVolumeType, etcdTenancy",
  3691  		},
  3692  		{
  3693  			context: "WithAwsNodeLabelEnabledForTooLongClusterNameAndPoolName",
  3694  			configYaml: minimalValidConfigYaml + `
  3695  # clusterName + nodePools[].name should be less than or equal to 25 characters or the launch configuration name
  3696  # "mykubeawsclustername-mynestedstackname-1N2C4K3LLBEDZ-WorkersLC-BC2S9P3JG2QD" exceeds the limit of 63 characters
  3697  # See https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set
  3698  clusterName: my-cluster1 # 11 characters
  3699  worker:
  3700    nodePools:
  3701    - name: workernodepool1 # 15 characters
  3702      awsNodeLabels:
  3703        enabled: true
  3704  `,
  3705  			expectedErrorMessage: "awsNodeLabels can't be enabled for node pool because the total number of characters in clusterName(=\"my-cluster1\") + node pool's name(=\"workernodepool1\") exceeds the limit of 25",
  3706  		},
  3707  		{
  3708  			context: "WithAwsNodeLabelEnabledForTooLongClusterName",
  3709  			configYaml: minimalValidConfigYaml + `
  3710  # clusterName should be less than or equal to 21 characters or the launch configuration name
  3711  # "mykubeawsclustername-mynestedstackname-1N2C4K3LLBEDZ-ControllersLC-BC2S9P3JG2QD" exceeds the limit of 63 characters
  3712  # See https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set
  3713  clusterName: mycluster # 9
  3714  experimental:
  3715    awsNodeLabels:
  3716       enabled: true
  3717  `,
  3718  			expectedErrorMessage: "awsNodeLabels can't be enabled for controllers because the total number of characters in clusterName(=\"mycluster\") exceeds the limit of 8",
  3719  		},
  3720  		{
  3721  			context: "WithMultiAPIEndpointsInvalidLB",
  3722  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3723  vpc:
  3724    id: vpc-1a2b3c4d
  3725  internetGateway:
  3726    id: igw-1a2b3c4d
  3727  
  3728  subnets:
  3729  - name: publicSubnet1
  3730    availabilityZone: us-west-1a
  3731    instanceCIDR: "10.0.1.0/24"
  3732  
  3733  worker:
  3734    apiEndpointName: unversionedPublic
  3735  
  3736  apiEndpoints:
  3737  - name: unversionedPublic
  3738    dnsName: api.example.com
  3739    loadBalancer:
  3740      id: elb-internet-facing
  3741      private: true
  3742      subnets:
  3743      - name: publicSubnet1
  3744      hostedZone:
  3745        id: hostedzone-public
  3746  `,
  3747  			expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: type, private, subnets, hostedZone must be omitted when id is specified to reuse an existing ELB",
  3748  		},
  3749  		{
  3750  			context: "WithMultiAPIEndpointsInvalidWorkerAPIEndpointName",
  3751  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3752  vpc:
  3753    id: vpc-1a2b3c4d
  3754  internetGateway:
  3755    id: igw-1a2b3c4d
  3756  
  3757  subnets:
  3758  - name: publicSubnet1
  3759    availabilityZone: us-west-1a
  3760    instanceCIDR: "10.0.1.0/24"
  3761  
  3762  worker:
  3763    # no api endpoint named like that exists!
  3764    apiEndpointName: unknownEndpoint
  3765  
  3766  adminAPIEndpointName: versionedPublic
  3767  
  3768  apiEndpoints:
  3769  - name: unversionedPublic
  3770    dnsName: api.example.com
  3771    loadBalancer:
  3772      subnets:
  3773      - name: publicSubnet1
  3774      hostedZone:
  3775        id: hostedzone-public
  3776  - name: versionedPublic
  3777    dnsName: apiv1.example.com
  3778    loadBalancer:
  3779      subnets:
  3780      - name: publicSubnet1
  3781      hostedZone:
  3782        id: hostedzone-public
  3783  `,
  3784  			expectedErrorMessage: "invalid value for worker.apiEndpointName: no API endpoint named \"unknownEndpoint\" found",
  3785  		},
  3786  		{
  3787  			context: "WithMultiAPIEndpointsInvalidWorkerNodePoolAPIEndpointName",
  3788  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3789  vpc:
  3790    id: vpc-1a2b3c4d
  3791  internetGateway:
  3792    id: igw-1a2b3c4d
  3793  
  3794  subnets:
  3795  - name: publicSubnet1
  3796    availabilityZone: us-west-1a
  3797    instanceCIDR: "10.0.1.0/24"
  3798  
  3799  worker:
  3800    # this one is ok but...
  3801    apiEndpointName: versionedPublic
  3802    nodePools:
  3803    - name: pool1
  3804      # this one is ng; no api endpoint named this exists!
  3805      apiEndpointName: unknownEndpoint
  3806  
  3807  adminAPIEndpointName: versionedPublic
  3808  
  3809  apiEndpoints:
  3810  - name: unversionedPublic
  3811    dnsName: api.example.com
  3812    loadBalancer:
  3813      subnets:
  3814      - name: publicSubnet1
  3815      hostedZone:
  3816        id: hostedzone-public
  3817  - name: versionedPublic
  3818    dnsName: apiv1.example.com
  3819    loadBalancer:
  3820      subnets:
  3821      - name: publicSubnet1
  3822      hostedZone:
  3823        id: hostedzone-public
  3824  `,
  3825  			expectedErrorMessage: "invalid node pool at index 0: failed to find an API endpoint named \"unknownEndpoint\": no API endpoint named \"unknownEndpoint\" defined under the `apiEndpoints[]`",
  3826  		},
  3827  		{
  3828  			context: "WithMultiAPIEndpointsMissingDNSName",
  3829  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3830  vpc:
  3831    id: vpc-1a2b3c4d
  3832  internetGateway:
  3833    id: igw-1a2b3c4d
  3834  
  3835  subnets:
  3836  - name: publicSubnet1
  3837    availabilityZone: us-west-1a
  3838    instanceCIDR: "10.0.1.0/24"
  3839  
  3840  apiEndpoints:
  3841  - name: unversionedPublic
  3842    dnsName:
  3843    loadBalancer:
  3844      hostedZone:
  3845        id: hostedzone-public
  3846  `,
  3847  			expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: dnsName must be set",
  3848  		},
  3849  		{
  3850  			context: "WithMultiAPIEndpointsMissingGlobalAPIEndpointName",
  3851  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3852  vpc:
  3853    id: vpc-1a2b3c4d
  3854  internetGateway:
  3855    id: igw-1a2b3c4d
  3856  
  3857  subnets:
  3858  - name: publicSubnet1
  3859    availabilityZone: us-west-1a
  3860    instanceCIDR: "10.0.1.0/24"
  3861  
  3862  worker:
  3863    nodePools:
  3864    - name: pool1
  3865      # this one is ng; no api endpoint named this exists!
  3866      apiEndpointName: unknownEndpoint
  3867    - name: pool1
  3868      # this one is ng; missing apiEndpointName
  3869  
  3870  adminAPIEndpointName: versionedPublic
  3871  
  3872  apiEndpoints:
  3873  - name: unversionedPublic
  3874    dnsName: api.example.com
  3875    loadBalancer:
  3876      subnets:
  3877      - name: publicSubnet1
  3878      hostedZone:
  3879        id: hostedzone-public
  3880  - name: versionedPublic
  3881    dnsName: apiv1.example.com
  3882    loadBalancer:
  3883      subnets:
  3884      - name: publicSubnet1
  3885      hostedZone:
  3886        id: hostedzone-public
  3887  `,
  3888  			expectedErrorMessage: "worker.apiEndpointName must not be empty when there're 2 or more API endpoints under the key `apiEndpoints` and one of worker.nodePools[] are missing apiEndpointName",
  3889  		},
  3890  		{
  3891  			context: "WithMultiAPIEndpointsRecordSetImpliedBySubnetsMissingHostedZoneID",
  3892  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3893  vpc:
  3894    id: vpc-1a2b3c4d
  3895  internetGateway:
  3896    id: igw-1a2b3c4d
  3897  
  3898  subnets:
  3899  - name: publicSubnet1
  3900    availabilityZone: us-west-1a
  3901    instanceCIDR: "10.0.1.0/24"
  3902  
  3903  worker:
  3904    apiEndpointName: unversionedPublic
  3905  
  3906  apiEndpoints:
  3907  - name: unversionedPublic
  3908    dnsName: api.example.com
  3909    loadBalancer:
  3910      # an internet-facing(which is the default) lb in the public subnet is going to be created with a corresponding record set
  3911      # however no hosted zone for the record set is provided!
  3912      subnets:
  3913      - name: publicSubnet1
  3914      # missing hosted zone id here!
  3915  `,
  3916  			expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
  3917  		},
  3918  		{
  3919  			context: "WithMultiAPIEndpointsRecordSetImpliedByExplicitPublicMissingHostedZoneID",
  3920  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3921  vpc:
  3922    id: vpc-1a2b3c4d
  3923  internetGateway:
  3924    id: igw-1a2b3c4d
  3925  
  3926  subnets:
  3927  - name: publicSubnet1
  3928    availabilityZone: us-west-1a
  3929    instanceCIDR: "10.0.1.0/24"
  3930  
  3931  worker:
  3932    apiEndpointName: unversionedPublic
  3933  
  3934  apiEndpoints:
  3935  - name: unversionedPublic
  3936    dnsName: api.example.com
  3937    loadBalancer:
  3938      # an internet-facing lb is going to be created with a corresponding record set
  3939      # however no hosted zone for the record set is provided!
  3940      private: false
  3941      # missing hosted zone id here!
  3942  `,
  3943  			expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
  3944  		},
  3945  		{
  3946  			context: "WithMultiAPIEndpointsRecordSetImpliedByExplicitPrivateMissingHostedZoneID",
  3947  			configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
  3948  vpc:
  3949    id: vpc-1a2b3c4d
  3950  internetGateway:
  3951    id: igw-1a2b3c4d
  3952  
  3953  subnets:
  3954  - name: publicSubnet1
  3955    availabilityZone: us-west-1a
  3956    instanceCIDR: "10.0.1.0/24"
  3957  - name: privateSubnet1
  3958    availabilityZone: us-west-1a
  3959    instanceCIDR: "10.0.2.0/24"
  3960  
  3961  worker:
  3962    apiEndpointName: unversionedPublic
  3963  
  3964  apiEndpoints:
  3965  - name: unversionedPublic
  3966    dnsName: api.example.com
  3967    loadBalancer:
  3968      # an internal lb is going to be created with a corresponding record set
  3969      # however no hosted zone for the record set is provided!
  3970      private: true
  3971      # missing hosted zone id here!
  3972  `,
  3973  			expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
  3974  		},
  3975  		{
  3976  			context: "WithNetworkTopologyAllExistingPrivateSubnetsRejectingExistingIGW",
  3977  			configYaml: mainClusterYaml + `
  3978  vpc:
  3979    id: vpc-1a2b3c4d
  3980  internetGateway:
  3981    id: igw-1a2b3c4d
  3982  subnets:
  3983  - name: private1
  3984    availabilityZone: us-west-1a
  3985    id: subnet-1
  3986    private: true
  3987  controller:
  3988    loadBalancer:
  3989      private: true
  3990  etcd:
  3991    subnets:
  3992    - name: private1
  3993  worker:
  3994    nodePools:
  3995    - name: pool1
  3996      subnets:
  3997      - name: private1
  3998  `,
  3999  			expectedErrorMessage: `internet gateway id can't be specified when all the subnets are existing private subnets`,
  4000  		},
  4001  		{
  4002  			context: "WithNetworkTopologyAllExistingPublicSubnetsRejectingExistingIGW",
  4003  			configYaml: mainClusterYaml + `
  4004  vpc:
  4005    id: vpc-1a2b3c4d
  4006  internetGateway:
  4007    id: igw-1a2b3c4d
  4008  subnets:
  4009  - name: public1
  4010    availabilityZone: us-west-1a
  4011    id: subnet-1
  4012  controller:
  4013    loadBalancer:
  4014      private: false
  4015  etcd:
  4016    subnets:
  4017    - name: public1
  4018  worker:
  4019    nodePools:
  4020    - name: pool1
  4021      subnets:
  4022      - name: public1
  4023  `,
  4024  			expectedErrorMessage: `internet gateway id can't be specified when all the public subnets have existing route tables associated. kube-aws doesn't try to modify an exisinting route table to include a route to the internet gateway`,
  4025  		},
  4026  		{
  4027  			context: "WithNetworkTopologyAllManagedPublicSubnetsWithExistingRouteTableRejectingExistingIGW",
  4028  			configYaml: mainClusterYaml + `
  4029  vpc:
  4030    id: vpc-1a2b3c4d
  4031  internetGateway:
  4032    id: igw-1a2b3c4d
  4033  subnets:
  4034  - name: public1
  4035    availabilityZone: us-west-1a
  4036    instanceCIDR: 10.0.1.0/24
  4037    routeTable:
  4038      id: subnet-1
  4039  controller:
  4040    loadBalancer:
  4041      private: false
  4042  etcd:
  4043    subnets:
  4044    - name: public1
  4045  worker:
  4046    nodePools:
  4047    - name: pool1
  4048      subnets:
  4049      - name: public1
  4050  `,
  4051  			expectedErrorMessage: `internet gateway id can't be specified when all the public subnets have existing route tables associated. kube-aws doesn't try to modify an exisinting route table to include a route to the internet gateway`,
  4052  		},
  4053  		{
  4054  			context: "WithNetworkTopologyAllManagedPublicSubnetsMissingExistingIGW",
  4055  			configYaml: mainClusterYaml + `
  4056  vpc:
  4057    id: vpc-1a2b3c4d
  4058  #misses this
  4059  #internetGateway:
  4060  #  id: igw-1a2b3c4d
  4061  subnets:
  4062  - name: public1
  4063    availabilityZone: us-west-1a
  4064    instanceCIDR: "10.0.1.0/24"
  4065  controller:
  4066    loadBalancer:
  4067      private: false
  4068  etcd:
  4069    subnets:
  4070    - name: public1
  4071  worker:
  4072    nodePools:
  4073    - name: pool1
  4074      subnets:
  4075      - name: public1
  4076  `,
  4077  			expectedErrorMessage: `internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC`,
  4078  		},
  4079  		{
  4080  			context: "WithNetworkTopologyAllPreconfiguredPrivateDeprecatedAndThenRemoved",
  4081  			configYaml: mainClusterYaml + `
  4082  vpc:
  4083    id: vpc-1a2b3c4d
  4084  # This, in combination with mapPublicIPs=false, had been implying that the route table contains a route to a preconfigured NAT gateway
  4085  # See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-276008202
  4086  routeTableId: rtb-1a2b3c4d
  4087  # This had been implied that all the subnets created by kube-aws should be private
  4088  mapPublicIPs: false
  4089  subnets:
  4090  - availabilityZone: us-west-1a
  4091    instanceCIDR: "10.0.1.0/24"
  4092    # implies
  4093    # private: true
  4094    # routeTable
  4095    #   id: rtb-1a2b3c4d
  4096  - availabilityZone: us-west-1b
  4097    instanceCIDR: "10.0.2.0/24"
  4098    # implies
  4099    # private: true
  4100    # routeTable
  4101    #   id: rtb-1a2b3c4d
  4102  `,
  4103  			expectedErrorMessage: "internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC",
  4104  		},
  4105  		{
  4106  			context: "WithNetworkTopologyAllPreconfiguredPublicDeprecatedAndThenRemoved",
  4107  			configYaml: mainClusterYaml + `
  4108  vpc:
  4109    id: vpc-1a2b3c4d
  4110  # This, in combination with mapPublicIPs=true, had been implying that the route table contains a route to a preconfigured internet gateway
  4111  # See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-276008202
  4112  routeTableId: rtb-1a2b3c4d
  4113  # This had been implied that all the subnets created by kube-aws should be public
  4114  mapPublicIPs: true
  4115  # internetGateway.id should be omitted as we assume that the route table specified by routeTableId already contain a route to one
  4116  #internetGateway:
  4117  #  id:
  4118  subnets:
  4119  - availabilityZone: us-west-1a
  4120    instanceCIDR: "10.0.1.0/24"
  4121    # #implies
  4122    # private: false
  4123    # routeTable
  4124    #   id: rtb-1a2b3c4d
  4125  - availabilityZone: us-west-1b
  4126    instanceCIDR: "10.0.2.0/24"
  4127    # #implies
  4128    # private: false
  4129    # routeTable
  4130    #   id: rtb-1a2b3c4d
  4131  `,
  4132  			expectedErrorMessage: "internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC",
  4133  		},
  4134  		{
  4135  			context: "WithVpcIdAndVPCCIDRSpecified",
  4136  			configYaml: minimalValidConfigYaml + `
  4137  vpc:
  4138    id: vpc-1a2b3c4d
  4139  internetGateway:
  4140    id: igw-1a2b3c4d
  4141  # vpcCIDR (10.1.0.0/16) does not contain instanceCIDR (10.0.1.0/24)
  4142  vpcCIDR: "10.1.0.0/16"
  4143  `,
  4144  		},
  4145  		{
  4146  			context: "WithRouteTableIdSpecified",
  4147  			configYaml: minimalValidConfigYaml + `
  4148  # vpc.id must be specified if routeTableId is specified
  4149  routeTableId: rtb-1a2b3c4d
  4150  `,
  4151  		},
  4152  		{
  4153  			context: "WithWorkerSecurityGroupIds",
  4154  			configYaml: minimalValidConfigYaml + `
  4155  worker:
  4156    nodePools:
  4157    - name: pool1
  4158      securityGroupIds:
  4159      - sg-12345678
  4160      - sg-abcdefab
  4161      - sg-23456789
  4162      - sg-bcdefabc
  4163      - sg-34567890
  4164  `,
  4165  			expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
  4166  		},
  4167  		{
  4168  			context: "WithWorkerAndLBSecurityGroupIds",
  4169  			configYaml: minimalValidConfigYaml + `
  4170  worker:
  4171    nodePools:
  4172    - name: pool1
  4173      securityGroupIds:
  4174      - sg-12345678
  4175      - sg-abcdefab
  4176      - sg-23456789
  4177      loadBalancer:
  4178        enabled: true
  4179        securityGroupIds:
  4180          - sg-bcdefabc
  4181          - sg-34567890
  4182  `,
  4183  			expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
  4184  		},
  4185  		{
  4186  			context: "WithWorkerAndALBSecurityGroupIds",
  4187  			configYaml: minimalValidConfigYaml + `
  4188  worker:
  4189    nodePools:
  4190    - name: pool1
  4191      securityGroupIds:
  4192      - sg-12345678
  4193      - sg-abcdefab
  4194      - sg-23456789
  4195      targetGroup:
  4196        enabled: true
  4197        securityGroupIds:
  4198          - sg-bcdefabc
  4199          - sg-34567890
  4200  `,
  4201  			expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
  4202  		},
  4203  		{
  4204  			context: "WithUnknownKeyInRoot",
  4205  			configYaml: minimalValidConfigYaml + `
  4206  foo: bar
  4207  `,
  4208  			expectedErrorMessage: "unknown keys found: foo",
  4209  		},
  4210  		{
  4211  			context: "WithUnknownKeyInController",
  4212  			configYaml: minimalValidConfigYaml + `
  4213  controller:
  4214    foo: 1
  4215  `,
  4216  			expectedErrorMessage: "unknown keys found in controller: foo",
  4217  		},
  4218  		{
  4219  			context: "WithUnknownKeyInControllerASG",
  4220  			configYaml: minimalValidConfigYaml + `
  4221  controller:
  4222    autoScalingGroup:
  4223      foo: 1
  4224  `,
  4225  			expectedErrorMessage: "unknown keys found in controller.autoScalingGroup: foo",
  4226  		},
  4227  		{
  4228  			context: "WithUnknownKeyInEtcd",
  4229  			configYaml: minimalValidConfigYaml + `
  4230  etcd:
  4231    foo: 1
  4232  `,
  4233  			expectedErrorMessage: "unknown keys found in etcd: foo",
  4234  		},
  4235  		{
  4236  			context: "WithUnknownKeyInWorkerNodePool",
  4237  			configYaml: minimalValidConfigYaml + `
  4238  worker:
  4239    nodePools:
  4240    - name: pool1
  4241      clusterAutoscaler:
  4242        enabled: true
  4243  `,
  4244  			expectedErrorMessage: "unknown keys found in worker.nodePools[0]: clusterAutoscaler",
  4245  		},
  4246  		{
  4247  			context: "WithUnknownKeyInWorkerNodePoolASG",
  4248  			configYaml: minimalValidConfigYaml + `
  4249  worker:
  4250    nodePools:
  4251    - name: pool1
  4252      autoScalingGroup:
  4253        foo: 1
  4254  `,
  4255  			expectedErrorMessage: "unknown keys found in worker.nodePools[0].autoScalingGroup: foo",
  4256  		},
  4257  		{
  4258  			context: "WithUnknownKeyInWorkerNodePoolSpotFleet",
  4259  			configYaml: minimalValidConfigYaml + `
  4260  worker:
  4261    nodePools:
  4262    - name: pool1
  4263      spotFleet:
  4264        bar: 1
  4265  `,
  4266  			expectedErrorMessage: "unknown keys found in worker.nodePools[0].spotFleet: bar",
  4267  		},
  4268  		{
  4269  			context: "WithUnknownKeyInAddons",
  4270  			configYaml: minimalValidConfigYaml + `
  4271  addons:
  4272    blah: 5
  4273  `,
  4274  			expectedErrorMessage: "unknown keys found in addons: blah",
  4275  		},
  4276  		{
  4277  			context: "WithUnknownKeyInReschedulerAddon",
  4278  			configYaml: minimalValidConfigYaml + `
  4279  addons:
  4280    rescheduler:
  4281      foo: yeah
  4282  `,
  4283  			expectedErrorMessage: "unknown keys found in addons.rescheduler: foo",
  4284  		},
  4285  		{
  4286  			context: "WithTooLongControllerIAMRoleName",
  4287  			configYaml: kubeAwsSettings.withClusterName("kubeaws-it-main").withRegion("ap-northeast-1").minimumValidClusterYaml() + `
  4288  controller:
  4289    iam:
  4290      role:
  4291        name: foobarba-foobarba-foobarba-foobarba-foobarba-foobarba
  4292  `,
  4293  			expectedErrorMessage: "IAM role name(=kubeaws-it-main-ap-northeast-1-foobarba-foobarba-foobarba-foobarba-foobarba-foobarba) will be 84 characters long. It exceeds the AWS limit of 64 characters: clusterName(=kubeaws-it-main) + region name(=ap-northeast-1) + managed iam role name(=foobarba-foobarba-foobarba-foobarba-foobarba-foobarba) should be less than or equal to 33",
  4294  		},
  4295  		{
  4296  			context: "WithTooLongWorkerIAMRoleName",
  4297  			configYaml: kubeAwsSettings.withClusterName("kubeaws-it-main").withRegion("ap-northeast-1").minimumValidClusterYaml() + `
  4298  worker:
  4299    nodePools:
  4300    - name: pool1
  4301      iam:
  4302        role:
  4303          name: foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz
  4304  `,
  4305  			expectedErrorMessage: "IAM role name(=kubeaws-it-main-ap-northeast-1-foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz) will be 87 characters long. It exceeds the AWS limit of 64 characters: clusterName(=kubeaws-it-main) + region name(=ap-northeast-1) + managed iam role name(=foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz) should be less than or equal to 33",
  4306  		},
  4307  		{
  4308  			context: "WithInvalidEtcdInstanceProfileArn",
  4309  			configYaml: minimalValidConfigYaml + `
  4310  etcd:
  4311    iam:
  4312      instanceProfile:
  4313        arn: "badArn"
  4314  `,
  4315  			expectedErrorMessage: "invalid etcd settings: invalid instance profile, your instance profile must match (=arn:aws:iam::YOURACCOUNTID:instance-profile/INSTANCEPROFILENAME), provided (badArn)",
  4316  		},
  4317  		{
  4318  			context: "WithInvalidEtcdManagedPolicyArn",
  4319  			configYaml: minimalValidConfigYaml + `
  4320  etcd:
  4321    iam:
  4322      role:
  4323        managedPolicies:
  4324        - arn: "badArn"
  4325  `,
  4326  			expectedErrorMessage: "invalid etcd settings: invalid managed policy arn, your managed policy must match this (=arn:aws:iam::(YOURACCOUNTID|aws):policy/POLICYNAME), provided this (badArn)",
  4327  		},
  4328  		{
  4329  			context: "WithInvalidWorkerInstanceProfileArn",
  4330  			configYaml: minimalValidConfigYaml + `
  4331  worker:
  4332    nodePools:
  4333    - name: pool1
  4334      iam:
  4335        instanceProfile:
  4336          arn: "badArn"
  4337  `,
  4338  			expectedErrorMessage: "invalid instance profile, your instance profile must match (=arn:aws:iam::YOURACCOUNTID:instance-profile/INSTANCEPROFILENAME), provided (badArn)",
  4339  		},
  4340  		{
  4341  			context: "WithInvalidWorkerManagedPolicyArn",
  4342  			configYaml: minimalValidConfigYaml + `
  4343  worker:
  4344    nodePools:
  4345    - name: pool1
  4346      iam:
  4347        role:
  4348          managedPolicies:
  4349            - arn: "badArn"
  4350  `,
  4351  			expectedErrorMessage: "invalid managed policy arn, your managed policy must match this (=arn:aws:iam::(YOURACCOUNTID|aws):policy/POLICYNAME), provided this (badArn)",
  4352  		},
  4353  		{
  4354  			context: "WithGPUEnabledWorkerButEmptyVersion",
  4355  			configYaml: minimalValidConfigYaml + `
  4356  worker:
  4357    nodePools:
  4358    - name: pool1
  4359      instanceType: p2.xlarge
  4360      gpu:
  4361        nvidia:
  4362          enabled: true
  4363          version: ""
  4364  `,
  4365  			expectedErrorMessage: `gpu.nvidia.version must not be empty when gpu.nvidia is enabled.`,
  4366  		},
  4367  		{
  4368  			context: "WithGPUDisabledWorkerButIntallationSupportEnabled",
  4369  			configYaml: minimalValidConfigYaml + `
  4370  worker:
  4371    nodePools:
  4372    - name: pool1
  4373      instanceType: t2.medium
  4374      gpu:
  4375        nvidia:
  4376          enabled: true
  4377          version: ""
  4378  `,
  4379  			expectedErrorMessage: `instance type t2.medium doesn't support GPU. You can enable Nvidia driver intallation support only when use [p2 p3 g2 g3] instance family.`,
  4380  		},
  4381  	}
  4382  
  4383  	for _, invalidCase := range parseErrorCases {
  4384  		t.Run(invalidCase.context, func(t *testing.T) {
  4385  			configBytes := invalidCase.configYaml
  4386  			// TODO Allow including plugins in test data?
  4387  			plugins := []*api.Plugin{}
  4388  			providedConfig, err := config.ConfigFromBytes([]byte(configBytes), plugins)
  4389  			if err == nil {
  4390  				t.Errorf("expected to fail parsing config %s: %+v: %+v", configBytes, *providedConfig, err)
  4391  				t.FailNow()
  4392  			}
  4393  
  4394  			errorMsg := fmt.Sprintf("%v", err)
  4395  			if !strings.Contains(errorMsg, invalidCase.expectedErrorMessage) {
  4396  				t.Errorf(`expected "%s" to be contained in the error message : %s`, invalidCase.expectedErrorMessage, errorMsg)
  4397  			}
  4398  		})
  4399  	}
  4400  }