sigs.k8s.io/cluster-api-provider-aws@v1.5.5/pkg/cloud/services/eks/nodegroup.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8  	http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package eks
    18  
    19  import (
    20  	"fmt"
    21  
    22  	"github.com/aws/aws-sdk-go/aws"
    23  	"github.com/aws/aws-sdk-go/aws/awserr"
    24  	"github.com/aws/aws-sdk-go/service/autoscaling"
    25  	"github.com/aws/aws-sdk-go/service/eks"
    26  	"github.com/aws/aws-sdk-go/service/iam"
    27  	"github.com/google/go-cmp/cmp"
    28  	"github.com/pkg/errors"
    29  	"k8s.io/apimachinery/pkg/util/version"
    30  
    31  	infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
    32  	ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1beta1"
    33  	expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1beta1"
    34  	"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/awserrors"
    35  	"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/converters"
    36  	"sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/wait"
    37  	"sigs.k8s.io/cluster-api-provider-aws/pkg/record"
    38  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    39  )
    40  
    41  func (s *NodegroupService) describeNodegroup() (*eks.Nodegroup, error) {
    42  	eksClusterName := s.scope.KubernetesClusterName()
    43  	nodegroupName := s.scope.NodegroupName()
    44  	s.scope.V(2).Info("describing eks node group", "cluster", eksClusterName, "nodegroup", nodegroupName)
    45  	input := &eks.DescribeNodegroupInput{
    46  		ClusterName:   aws.String(eksClusterName),
    47  		NodegroupName: aws.String(nodegroupName),
    48  	}
    49  
    50  	out, err := s.EKSClient.DescribeNodegroup(input)
    51  	if err != nil {
    52  		if aerr, ok := err.(awserr.Error); ok {
    53  			switch aerr.Code() {
    54  			case eks.ErrCodeResourceNotFoundException:
    55  				return nil, nil
    56  			default:
    57  				return nil, errors.Wrap(err, "failed to describe nodegroup")
    58  			}
    59  		} else {
    60  			return nil, errors.Wrap(err, "failed to describe nodegroup")
    61  		}
    62  	}
    63  
    64  	return out.Nodegroup, nil
    65  }
    66  
    67  func (s *NodegroupService) describeASGs(ng *eks.Nodegroup) (*autoscaling.Group, error) {
    68  	eksClusterName := s.scope.KubernetesClusterName()
    69  	nodegroupName := s.scope.NodegroupName()
    70  	s.scope.V(2).Info("describing node group ASG", "cluster", eksClusterName, "nodegroup", nodegroupName)
    71  
    72  	if len(ng.Resources.AutoScalingGroups) == 0 {
    73  		return nil, nil
    74  	}
    75  
    76  	input := &autoscaling.DescribeAutoScalingGroupsInput{
    77  		AutoScalingGroupNames: []*string{
    78  			ng.Resources.AutoScalingGroups[0].Name,
    79  		},
    80  	}
    81  
    82  	out, err := s.AutoscalingClient.DescribeAutoScalingGroups(input)
    83  	switch {
    84  	case awserrors.IsNotFound(err):
    85  		return nil, nil
    86  	case err != nil:
    87  		return nil, errors.Wrap(err, "failed to describe ASGs")
    88  	case len(out.AutoScalingGroups) == 0:
    89  		return nil, errors.Wrap(err, "no ASG found")
    90  	}
    91  
    92  	return out.AutoScalingGroups[0], nil
    93  }
    94  
    95  func (s *NodegroupService) scalingConfig() *eks.NodegroupScalingConfig {
    96  	var replicas int32 = 1
    97  	if s.scope.MachinePool.Spec.Replicas != nil {
    98  		replicas = *s.scope.MachinePool.Spec.Replicas
    99  	}
   100  	cfg := eks.NodegroupScalingConfig{
   101  		DesiredSize: aws.Int64(int64(replicas)),
   102  	}
   103  	scaling := s.scope.ManagedMachinePool.Spec.Scaling
   104  	if scaling == nil {
   105  		return &cfg
   106  	}
   107  	if scaling.MaxSize != nil {
   108  		cfg.MaxSize = aws.Int64(int64(*scaling.MaxSize))
   109  	}
   110  	if scaling.MaxSize != nil {
   111  		cfg.MinSize = aws.Int64(int64(*scaling.MinSize))
   112  	}
   113  	return &cfg
   114  }
   115  
   116  func (s *NodegroupService) updateConfig() *eks.NodegroupUpdateConfig {
   117  	updateConfig := s.scope.ManagedMachinePool.Spec.UpdateConfig
   118  
   119  	return converters.NodegroupUpdateconfigToSDK(updateConfig)
   120  }
   121  
   122  func (s *NodegroupService) roleArn() (*string, error) {
   123  	var role *iam.Role
   124  	if s.scope.RoleName() != "" {
   125  		var err error
   126  		role, err = s.GetIAMRole(s.scope.RoleName())
   127  		if err != nil {
   128  			return nil, errors.Wrapf(err, "error getting node group IAM role: %s", s.scope.RoleName())
   129  		}
   130  	}
   131  	return role.Arn, nil
   132  }
   133  
   134  func ngTags(key string, additionalTags infrav1.Tags) map[string]string {
   135  	tags := additionalTags.DeepCopy()
   136  	tags[infrav1.ClusterAWSCloudProviderTagKey(key)] = string(infrav1.ResourceLifecycleOwned)
   137  	return tags
   138  }
   139  
   140  func (s *NodegroupService) remoteAccess() (*eks.RemoteAccessConfig, error) {
   141  	pool := s.scope.ManagedMachinePool.Spec
   142  	if pool.RemoteAccess == nil {
   143  		return nil, nil
   144  	}
   145  
   146  	controlPlane := s.scope.ControlPlane
   147  
   148  	// SourceSecurityGroups is validated to be empty if PublicAccess is true
   149  	// but just in case we use an empty list to take advantage of the documented
   150  	// API behavior
   151  	var sSGs = []string{}
   152  
   153  	if !pool.RemoteAccess.Public {
   154  		sSGs = pool.RemoteAccess.SourceSecurityGroups
   155  		// We add the EKS created cluster security group to the allowed security
   156  		// groups by default to prevent the API default of 0.0.0.0/0 from taking effect
   157  		// in case SourceSecurityGroups is empty
   158  		clusterSG, ok := controlPlane.Status.Network.SecurityGroups[ekscontrolplanev1.SecurityGroupCluster]
   159  		if !ok {
   160  			return nil, errors.Errorf("%s security group not found on control plane", ekscontrolplanev1.SecurityGroupCluster)
   161  		}
   162  		sSGs = append(sSGs, clusterSG.ID)
   163  
   164  		if controlPlane.Spec.Bastion.Enabled {
   165  			bastionSG, ok := controlPlane.Status.Network.SecurityGroups[infrav1.SecurityGroupBastion]
   166  			if !ok {
   167  				return nil, errors.Errorf("%s security group not found on control plane", infrav1.SecurityGroupBastion)
   168  			}
   169  			sSGs = append(
   170  				sSGs,
   171  				bastionSG.ID,
   172  			)
   173  		}
   174  	}
   175  
   176  	sshKeyName := pool.RemoteAccess.SSHKeyName
   177  	if sshKeyName == nil {
   178  		sshKeyName = controlPlane.Spec.SSHKeyName
   179  	}
   180  
   181  	return &eks.RemoteAccessConfig{
   182  		SourceSecurityGroups: aws.StringSlice(sSGs),
   183  		Ec2SshKey:            sshKeyName,
   184  	}, nil
   185  }
   186  
   187  func (s *NodegroupService) createNodegroup() (*eks.Nodegroup, error) {
   188  	eksClusterName := s.scope.KubernetesClusterName()
   189  	nodegroupName := s.scope.NodegroupName()
   190  	additionalTags := s.scope.AdditionalTags()
   191  	roleArn, err := s.roleArn()
   192  	if err != nil {
   193  		return nil, err
   194  	}
   195  	managedPool := s.scope.ManagedMachinePool.Spec
   196  	tags := ngTags(s.scope.ClusterName(), additionalTags)
   197  
   198  	remoteAccess, err := s.remoteAccess()
   199  	if err != nil {
   200  		return nil, errors.Wrap(err, "failed to create remote access configuration")
   201  	}
   202  
   203  	subnets, err := s.scope.SubnetIDs()
   204  	if err != nil {
   205  		return nil, fmt.Errorf("failed getting nodegroup subnets: %w", err)
   206  	}
   207  
   208  	input := &eks.CreateNodegroupInput{
   209  		ScalingConfig: s.scalingConfig(),
   210  		ClusterName:   aws.String(eksClusterName),
   211  		NodegroupName: aws.String(nodegroupName),
   212  		Subnets:       aws.StringSlice(subnets),
   213  		NodeRole:      roleArn,
   214  		Labels:        aws.StringMap(managedPool.Labels),
   215  		Tags:          aws.StringMap(tags),
   216  		RemoteAccess:  remoteAccess,
   217  		UpdateConfig:  s.updateConfig(),
   218  	}
   219  	if managedPool.AMIType != nil {
   220  		input.AmiType = aws.String(string(*managedPool.AMIType))
   221  	}
   222  	if managedPool.DiskSize != nil {
   223  		input.DiskSize = aws.Int64(int64(*managedPool.DiskSize))
   224  	}
   225  	if managedPool.InstanceType != nil {
   226  		input.InstanceTypes = []*string{managedPool.InstanceType}
   227  	}
   228  	if len(managedPool.Taints) > 0 {
   229  		s.Info("adding taints to nodegroup", "nodegroup", nodegroupName)
   230  		taints, err := converters.TaintsToSDK(managedPool.Taints)
   231  		if err != nil {
   232  			return nil, fmt.Errorf("converting taints: %w", err)
   233  		}
   234  		input.Taints = taints
   235  	}
   236  	if managedPool.CapacityType != nil {
   237  		capacityType, err := converters.CapacityTypeToSDK(*managedPool.CapacityType)
   238  		if err != nil {
   239  			return nil, fmt.Errorf("converting capacity type: %w", err)
   240  		}
   241  		input.CapacityType = aws.String(capacityType)
   242  	}
   243  
   244  	if err := input.Validate(); err != nil {
   245  		return nil, errors.Wrap(err, "created invalid CreateNodegroupInput")
   246  	}
   247  
   248  	out, err := s.EKSClient.CreateNodegroup(input)
   249  	if err != nil {
   250  		if aerr, ok := err.(awserr.Error); ok {
   251  			switch aerr.Code() {
   252  			// TODO
   253  			case eks.ErrCodeResourceNotFoundException:
   254  				return nil, nil
   255  			default:
   256  				return nil, errors.Wrap(err, "failed to create nodegroup")
   257  			}
   258  		} else {
   259  			return nil, errors.Wrap(err, "failed to create nodegroup")
   260  		}
   261  	}
   262  
   263  	return out.Nodegroup, nil
   264  }
   265  
   266  func (s *NodegroupService) deleteNodegroupAndWait() (reterr error) {
   267  	eksClusterName := s.scope.KubernetesClusterName()
   268  	nodegroupName := s.scope.NodegroupName()
   269  	if err := s.scope.NodegroupReadyFalse(clusterv1.DeletingReason, ""); err != nil {
   270  		return err
   271  	}
   272  	defer func() {
   273  		if reterr != nil {
   274  			record.Warnf(
   275  				s.scope.ManagedMachinePool, "FailedDeleteEKSNodegroup", "Failed to delete EKS nodegroup %s: %v", s.scope.NodegroupName(), reterr,
   276  			)
   277  			if err := s.scope.NodegroupReadyFalse("DeletingFailed", reterr.Error()); err != nil {
   278  				reterr = err
   279  			}
   280  		} else if err := s.scope.NodegroupReadyFalse(clusterv1.DeletedReason, ""); err != nil {
   281  			reterr = err
   282  		}
   283  	}()
   284  	input := &eks.DeleteNodegroupInput{
   285  		ClusterName:   aws.String(eksClusterName),
   286  		NodegroupName: aws.String(nodegroupName),
   287  	}
   288  	if err := input.Validate(); err != nil {
   289  		return errors.Wrap(err, "created invalid DeleteNodegroupInput")
   290  	}
   291  
   292  	_, err := s.EKSClient.DeleteNodegroup(input)
   293  	if err != nil {
   294  		if aerr, ok := err.(awserr.Error); ok {
   295  			switch aerr.Code() {
   296  			// TODO
   297  			case eks.ErrCodeResourceNotFoundException:
   298  				return nil
   299  			default:
   300  				return errors.Wrap(err, "failed to delete nodegroup")
   301  			}
   302  		} else {
   303  			return errors.Wrap(err, "failed to delete nodegroup")
   304  		}
   305  	}
   306  
   307  	waitInput := &eks.DescribeNodegroupInput{
   308  		ClusterName:   aws.String(eksClusterName),
   309  		NodegroupName: aws.String(nodegroupName),
   310  	}
   311  	err = s.EKSClient.WaitUntilNodegroupDeleted(waitInput)
   312  	if err != nil {
   313  		return errors.Wrapf(err, "failed waiting for EKS nodegroup %s to delete", nodegroupName)
   314  	}
   315  
   316  	return nil
   317  }
   318  
   319  func (s *NodegroupService) reconcileNodegroupVersion(ng *eks.Nodegroup) error {
   320  	var specVersion *version.Version
   321  	if s.scope.Version() != nil {
   322  		specVersion = parseEKSVersion(*s.scope.Version())
   323  	}
   324  	ngVersion := version.MustParseGeneric(*ng.Version)
   325  	specAMI := s.scope.ManagedMachinePool.Spec.AMIVersion
   326  	ngAMI := *ng.ReleaseVersion
   327  
   328  	eksClusterName := s.scope.KubernetesClusterName()
   329  	if (specVersion != nil && ngVersion.LessThan(specVersion)) || (specAMI != nil && *specAMI != ngAMI) {
   330  		input := &eks.UpdateNodegroupVersionInput{
   331  			ClusterName:   aws.String(eksClusterName),
   332  			NodegroupName: aws.String(s.scope.NodegroupName()),
   333  		}
   334  
   335  		var updateMsg string
   336  		// Either update k8s version or AMI version
   337  		if specVersion != nil && ngVersion.LessThan(specVersion) {
   338  			// NOTE: you can only upgrade increments of minor versions. If you want to upgrade 1.14 to 1.16 we
   339  			// need to go 1.14-> 1.15 and then 1.15 -> 1.16.
   340  			input.Version = aws.String(versionToEKS(ngVersion.WithMinor(ngVersion.Minor() + 1)))
   341  			updateMsg = fmt.Sprintf("to version %s", *input.Version)
   342  		} else if specAMI != nil && *specAMI != ngAMI {
   343  			input.ReleaseVersion = specAMI
   344  			updateMsg = fmt.Sprintf("to AMI version %s", *input.ReleaseVersion)
   345  		}
   346  
   347  		if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) {
   348  			if _, err := s.EKSClient.UpdateNodegroupVersion(input); err != nil {
   349  				if aerr, ok := err.(awserr.Error); ok {
   350  					return false, aerr
   351  				}
   352  				return false, err
   353  			}
   354  			record.Eventf(s.scope.ManagedMachinePool, "SuccessfulUpdateEKSNodegroup", "Updated EKS nodegroup %s %s", eksClusterName, updateMsg)
   355  			return true, nil
   356  		}); err != nil {
   357  			record.Warnf(s.scope.ManagedMachinePool, "FailedUpdateEKSNodegroup", "failed to update the EKS nodegroup %s %s: %v", eksClusterName, updateMsg, err)
   358  			return errors.Wrapf(err, "failed to update EKS nodegroup")
   359  		}
   360  	}
   361  	return nil
   362  }
   363  
   364  func createLabelUpdate(specLabels map[string]string, ng *eks.Nodegroup) *eks.UpdateLabelsPayload {
   365  	current := ng.Labels
   366  	payload := eks.UpdateLabelsPayload{
   367  		AddOrUpdateLabels: map[string]*string{},
   368  	}
   369  	for k, v := range specLabels {
   370  		if currentV, ok := current[k]; !ok || currentV == nil || v != *currentV {
   371  			payload.AddOrUpdateLabels[k] = aws.String(v)
   372  		}
   373  	}
   374  	for k := range current {
   375  		if _, ok := specLabels[k]; !ok {
   376  			payload.RemoveLabels = append(payload.RemoveLabels, aws.String(k))
   377  		}
   378  	}
   379  	if len(payload.AddOrUpdateLabels) > 0 || len(payload.RemoveLabels) > 0 {
   380  		return &payload
   381  	}
   382  	return nil
   383  }
   384  
   385  func (s *NodegroupService) createTaintsUpdate(specTaints expinfrav1.Taints, ng *eks.Nodegroup) (*eks.UpdateTaintsPayload, error) {
   386  	s.V(2).Info("Creating taints update for node group", "name", *ng.NodegroupName, "num_current", len(ng.Taints), "num_required", len(specTaints))
   387  	current, err := converters.TaintsFromSDK(ng.Taints)
   388  	if err != nil {
   389  		return nil, fmt.Errorf("converting taints: %w", err)
   390  	}
   391  	payload := eks.UpdateTaintsPayload{}
   392  	for _, specTaint := range specTaints {
   393  		st := specTaint.DeepCopy()
   394  		if !current.Contains(st) {
   395  			sdkTaint, err := converters.TaintToSDK(*st)
   396  			if err != nil {
   397  				return nil, fmt.Errorf("converting taint to sdk: %w", err)
   398  			}
   399  			payload.AddOrUpdateTaints = append(payload.AddOrUpdateTaints, sdkTaint)
   400  		}
   401  	}
   402  	for _, currentTaint := range current {
   403  		ct := currentTaint.DeepCopy()
   404  		if !specTaints.Contains(ct) {
   405  			sdkTaint, err := converters.TaintToSDK(*ct)
   406  			if err != nil {
   407  				return nil, fmt.Errorf("converting taint to sdk: %w", err)
   408  			}
   409  			payload.RemoveTaints = append(payload.RemoveTaints, sdkTaint)
   410  		}
   411  	}
   412  	if len(payload.AddOrUpdateTaints) > 0 || len(payload.RemoveTaints) > 0 {
   413  		s.V(2).Info("Node group taints update required", "name", *ng.NodegroupName, "addupdate", len(payload.AddOrUpdateTaints), "remove", len(payload.RemoveTaints))
   414  		return &payload, nil
   415  	}
   416  
   417  	s.V(2).Info("No updates required for node group taints", "name", *ng.NodegroupName)
   418  	return nil, nil
   419  }
   420  
   421  func (s *NodegroupService) reconcileNodegroupConfig(ng *eks.Nodegroup) error {
   422  	eksClusterName := s.scope.KubernetesClusterName()
   423  	s.V(2).Info("reconciling node group config", "cluster", eksClusterName, "name", *ng.NodegroupName)
   424  
   425  	managedPool := s.scope.ManagedMachinePool.Spec
   426  	input := &eks.UpdateNodegroupConfigInput{
   427  		ClusterName:   aws.String(eksClusterName),
   428  		NodegroupName: aws.String(managedPool.EKSNodegroupName),
   429  	}
   430  	var needsUpdate bool
   431  	if labelPayload := createLabelUpdate(managedPool.Labels, ng); labelPayload != nil {
   432  		s.V(2).Info("Nodegroup labels need an update", "nodegroup", ng.NodegroupName)
   433  		input.Labels = labelPayload
   434  		needsUpdate = true
   435  	}
   436  	taintsPayload, err := s.createTaintsUpdate(managedPool.Taints, ng)
   437  	if err != nil {
   438  		return fmt.Errorf("creating taints update payload: %w", err)
   439  	}
   440  	if taintsPayload != nil {
   441  		s.V(2).Info("nodegroup taints need updating")
   442  		input.Taints = taintsPayload
   443  		needsUpdate = true
   444  	}
   445  	if machinePool := s.scope.MachinePool.Spec; machinePool.Replicas == nil {
   446  		if ng.ScalingConfig.DesiredSize != nil && *ng.ScalingConfig.DesiredSize != 1 {
   447  			s.V(2).Info("Nodegroup desired size differs from spec, updating scaling configuration", "nodegroup", ng.NodegroupName)
   448  			input.ScalingConfig = s.scalingConfig()
   449  			needsUpdate = true
   450  		}
   451  	} else if ng.ScalingConfig.DesiredSize == nil || int64(*machinePool.Replicas) != *ng.ScalingConfig.DesiredSize {
   452  		s.V(2).Info("Nodegroup has no desired size or differs from replicas, updating scaling configuration", "nodegroup", ng.NodegroupName)
   453  		input.ScalingConfig = s.scalingConfig()
   454  		needsUpdate = true
   455  	}
   456  	if managedPool.Scaling != nil && ((aws.Int64Value(ng.ScalingConfig.MaxSize) != int64(aws.Int32Value(managedPool.Scaling.MaxSize))) ||
   457  		(aws.Int64Value(ng.ScalingConfig.MinSize) != int64(aws.Int32Value(managedPool.Scaling.MinSize)))) {
   458  		s.V(2).Info("Nodegroup min/max differ from spec, updating scaling configuration", "nodegroup", ng.NodegroupName)
   459  		input.ScalingConfig = s.scalingConfig()
   460  		needsUpdate = true
   461  	}
   462  	currentUpdateConfig := converters.NodegroupUpdateconfigFromSDK(ng.UpdateConfig)
   463  	if !cmp.Equal(managedPool.UpdateConfig, currentUpdateConfig) {
   464  		s.V(2).Info("Nodegroup update configuration differs from spec, updating the nodegroup update config", "nodegroup", ng.NodegroupName)
   465  		input.UpdateConfig = s.updateConfig()
   466  		needsUpdate = true
   467  	}
   468  	if !needsUpdate {
   469  		s.V(2).Info("node group config update not needed", "cluster", eksClusterName, "name", *ng.NodegroupName)
   470  		return nil
   471  	}
   472  	if err := input.Validate(); err != nil {
   473  		return errors.Wrap(err, "created invalid UpdateNodegroupConfigInput")
   474  	}
   475  
   476  	_, err = s.EKSClient.UpdateNodegroupConfig(input)
   477  	if err != nil {
   478  		return errors.Wrap(err, "failed to update nodegroup config")
   479  	}
   480  
   481  	return nil
   482  }
   483  
   484  func (s *NodegroupService) reconcileNodegroup() error {
   485  	ng, err := s.describeNodegroup()
   486  	if err != nil {
   487  		return errors.Wrap(err, "failed to describe nodegroup")
   488  	}
   489  
   490  	if eksClusterName, eksNodegroupName := s.scope.KubernetesClusterName(), s.scope.NodegroupName(); ng == nil {
   491  		ng, err = s.createNodegroup()
   492  		if err != nil {
   493  			return errors.Wrap(err, "failed to create nodegroup")
   494  		}
   495  		s.scope.Info("Created EKS nodegroup in AWS", "cluster-name", eksClusterName, "nodegroup-name", eksNodegroupName)
   496  	} else {
   497  		tagKey := infrav1.ClusterAWSCloudProviderTagKey(s.scope.ClusterName())
   498  		ownedTag := ng.Tags[tagKey]
   499  		if ownedTag == nil {
   500  			return errors.Errorf("owner of %s mismatch: %s", eksNodegroupName, s.scope.ClusterName())
   501  		}
   502  		s.scope.V(2).Info("Found owned EKS nodegroup in AWS", "cluster-name", eksClusterName, "nodegroup-name", eksNodegroupName)
   503  	}
   504  
   505  	if err := s.setStatus(ng); err != nil {
   506  		return errors.Wrap(err, "failed to set status")
   507  	}
   508  
   509  	switch *ng.Status {
   510  	case eks.NodegroupStatusCreating, eks.NodegroupStatusUpdating:
   511  		ng, err = s.waitForNodegroupActive()
   512  	default:
   513  		break
   514  	}
   515  
   516  	if err != nil {
   517  		return errors.Wrap(err, "failed to wait for nodegroup to be active")
   518  	}
   519  
   520  	if err := s.reconcileNodegroupVersion(ng); err != nil {
   521  		return errors.Wrap(err, "failed to reconcile nodegroup version")
   522  	}
   523  
   524  	if err := s.reconcileNodegroupConfig(ng); err != nil {
   525  		return errors.Wrap(err, "failed to reconcile nodegroup config")
   526  	}
   527  
   528  	if err := s.reconcileTags(ng); err != nil {
   529  		return errors.Wrapf(err, "failed to reconcile nodegroup tags")
   530  	}
   531  
   532  	if err := s.reconcileASGTags(ng); err != nil {
   533  		return errors.Wrapf(err, "failed to reconcile asg tags")
   534  	}
   535  
   536  	return nil
   537  }
   538  
   539  func (s *NodegroupService) setStatus(ng *eks.Nodegroup) error {
   540  	managedPool := s.scope.ManagedMachinePool
   541  	switch *ng.Status {
   542  	case eks.NodegroupStatusDeleting:
   543  		managedPool.Status.Ready = false
   544  	case eks.NodegroupStatusCreateFailed, eks.NodegroupStatusDeleteFailed:
   545  		managedPool.Status.Ready = false
   546  		// TODO FailureReason
   547  		failureMsg := fmt.Sprintf("EKS nodegroup in failed %s status", *ng.Status)
   548  		managedPool.Status.FailureMessage = &failureMsg
   549  	case eks.NodegroupStatusActive:
   550  		managedPool.Status.Ready = true
   551  		managedPool.Status.FailureMessage = nil
   552  		// TODO FailureReason
   553  	case eks.NodegroupStatusCreating:
   554  		managedPool.Status.Ready = false
   555  	case eks.NodegroupStatusUpdating:
   556  		managedPool.Status.Ready = true
   557  	default:
   558  		return errors.Errorf("unexpected EKS nodegroup status %s", *ng.Status)
   559  	}
   560  	if managedPool.Status.Ready && ng.Resources != nil && len(ng.Resources.AutoScalingGroups) > 0 {
   561  		req := autoscaling.DescribeAutoScalingGroupsInput{}
   562  		for _, asg := range ng.Resources.AutoScalingGroups {
   563  			req.AutoScalingGroupNames = append(req.AutoScalingGroupNames, asg.Name)
   564  		}
   565  		groups, err := s.AutoscalingClient.DescribeAutoScalingGroups(&req)
   566  		if err != nil {
   567  			return errors.Wrap(err, "failed to describe AutoScalingGroup for nodegroup")
   568  		}
   569  
   570  		var replicas int32
   571  		var providerIDList []string
   572  		for _, group := range groups.AutoScalingGroups {
   573  			replicas += int32(len(group.Instances))
   574  			for _, instance := range group.Instances {
   575  				providerIDList = append(providerIDList, fmt.Sprintf("aws:///%s/%s", *instance.AvailabilityZone, *instance.InstanceId))
   576  			}
   577  		}
   578  		managedPool.Spec.ProviderIDList = providerIDList
   579  		managedPool.Status.Replicas = replicas
   580  	}
   581  	if err := s.scope.PatchObject(); err != nil {
   582  		return errors.Wrap(err, "failed to update nodegroup")
   583  	}
   584  	return nil
   585  }
   586  
   587  func (s *NodegroupService) waitForNodegroupActive() (*eks.Nodegroup, error) {
   588  	eksClusterName := s.scope.KubernetesClusterName()
   589  	eksNodegroupName := s.scope.NodegroupName()
   590  	req := eks.DescribeNodegroupInput{
   591  		ClusterName:   aws.String(eksClusterName),
   592  		NodegroupName: aws.String(eksNodegroupName),
   593  	}
   594  	if err := s.EKSClient.WaitUntilNodegroupActive(&req); err != nil {
   595  		return nil, errors.Wrapf(err, "failed to wait for EKS nodegroup %q", *req.NodegroupName)
   596  	}
   597  
   598  	s.scope.Info("EKS nodegroup is now available", "nodegroup-name", eksNodegroupName)
   599  
   600  	ng, err := s.describeNodegroup()
   601  	if err != nil {
   602  		return nil, errors.Wrap(err, "failed to describe EKS nodegroup")
   603  	}
   604  	if err := s.setStatus(ng); err != nil {
   605  		return nil, errors.Wrap(err, "failed to set status")
   606  	}
   607  
   608  	return ng, nil
   609  }