sigs.k8s.io/cluster-api-provider-azure@v1.14.3/azure/scope/machinepoolmachine.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package scope
    18  
    19  import (
    20  	"context"
    21  	"reflect"
    22  	"strings"
    23  
    24  	"github.com/pkg/errors"
    25  	corev1 "k8s.io/api/core/v1"
    26  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    27  	"k8s.io/utils/ptr"
    28  	infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
    29  	"sigs.k8s.io/cluster-api-provider-azure/azure"
    30  	"sigs.k8s.io/cluster-api-provider-azure/azure/converters"
    31  	"sigs.k8s.io/cluster-api-provider-azure/azure/services/scalesetvms"
    32  	infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
    33  	azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure"
    34  	"sigs.k8s.io/cluster-api-provider-azure/util/futures"
    35  	"sigs.k8s.io/cluster-api-provider-azure/util/tele"
    36  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    37  	"sigs.k8s.io/cluster-api/controllers/noderefutil"
    38  	"sigs.k8s.io/cluster-api/controllers/remote"
    39  	capierrors "sigs.k8s.io/cluster-api/errors"
    40  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    41  	"sigs.k8s.io/cluster-api/util/conditions"
    42  	"sigs.k8s.io/cluster-api/util/patch"
    43  	"sigs.k8s.io/controller-runtime/pkg/client"
    44  )
    45  
    46  const (
    47  	// MachinePoolMachineScopeName is the sourceName, or more specifically the UserAgent, of client used in cordon and drain.
    48  	MachinePoolMachineScopeName = "azuremachinepoolmachine-scope"
    49  )
    50  
    51  type (
    52  	nodeGetter interface {
    53  		GetNodeByProviderID(ctx context.Context, providerID string) (*corev1.Node, error)
    54  		GetNodeByObjectReference(ctx context.Context, nodeRef corev1.ObjectReference) (*corev1.Node, error)
    55  	}
    56  
    57  	workloadClusterProxy struct {
    58  		Client  client.Client
    59  		Cluster client.ObjectKey
    60  	}
    61  
    62  	// MachinePoolMachineScopeParams defines the input parameters used to create a new MachinePoolScope.
    63  	MachinePoolMachineScopeParams struct {
    64  		AzureMachinePool        *infrav1exp.AzureMachinePool
    65  		AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine
    66  		Client                  client.Client
    67  		ClusterScope            azure.ClusterScoper
    68  		MachinePool             *expv1.MachinePool
    69  		Machine                 *clusterv1.Machine
    70  
    71  		// workloadNodeGetter is only used for testing purposes and provides a way for mocking requests to the workload cluster
    72  		workloadNodeGetter nodeGetter
    73  	}
    74  
    75  	// MachinePoolMachineScope defines a scope defined around a machine pool machine.
    76  	MachinePoolMachineScope struct {
    77  		azure.ClusterScoper
    78  		AzureMachinePoolMachine *infrav1exp.AzureMachinePoolMachine
    79  		AzureMachinePool        *infrav1exp.AzureMachinePool
    80  		MachinePool             *expv1.MachinePool
    81  		Machine                 *clusterv1.Machine
    82  		MachinePoolScope        *MachinePoolScope
    83  		client                  client.Client
    84  		patchHelper             *patch.Helper
    85  		instance                *azure.VMSSVM
    86  
    87  		// workloadNodeGetter is only used for testing purposes and provides a way for mocking requests to the workload cluster
    88  		workloadNodeGetter nodeGetter
    89  	}
    90  )
    91  
    92  // NewMachinePoolMachineScope creates a new MachinePoolMachineScope from the supplied parameters.
    93  // This is meant to be called for each reconcile iteration.
    94  func NewMachinePoolMachineScope(params MachinePoolMachineScopeParams) (*MachinePoolMachineScope, error) {
    95  	if params.Client == nil {
    96  		return nil, errors.New("client is required when creating a MachinePoolScope")
    97  	}
    98  
    99  	if params.ClusterScope == nil {
   100  		return nil, errors.New("cluster scope is required when creating a MachinePoolScope")
   101  	}
   102  
   103  	if params.MachinePool == nil {
   104  		return nil, errors.New("machine pool is required when creating a MachinePoolScope")
   105  	}
   106  
   107  	if params.AzureMachinePool == nil {
   108  		return nil, errors.New("azure machine pool is required when creating a MachinePoolScope")
   109  	}
   110  
   111  	if params.AzureMachinePoolMachine == nil {
   112  		return nil, errors.New("azure machine pool machine is required when creating a MachinePoolScope")
   113  	}
   114  
   115  	if params.Machine == nil {
   116  		return nil, errors.New("machine is required when creating a MachinePoolScope")
   117  	}
   118  
   119  	if params.workloadNodeGetter == nil {
   120  		params.workloadNodeGetter = newWorkloadClusterProxy(
   121  			params.Client,
   122  			client.ObjectKey{
   123  				Namespace: params.MachinePool.Namespace,
   124  				Name:      params.ClusterScope.ClusterName(),
   125  			},
   126  		)
   127  	}
   128  
   129  	mpScope, err := NewMachinePoolScope(MachinePoolScopeParams{
   130  		Client:           params.Client,
   131  		MachinePool:      params.MachinePool,
   132  		AzureMachinePool: params.AzureMachinePool,
   133  		ClusterScope:     params.ClusterScope,
   134  	})
   135  	if err != nil {
   136  		return nil, errors.Wrap(err, "failed to build machine pool scope")
   137  	}
   138  
   139  	helper, err := patch.NewHelper(params.AzureMachinePoolMachine, params.Client)
   140  	if err != nil {
   141  		return nil, errors.Wrap(err, "failed to init patch helper")
   142  	}
   143  
   144  	return &MachinePoolMachineScope{
   145  		AzureMachinePool:        params.AzureMachinePool,
   146  		AzureMachinePoolMachine: params.AzureMachinePoolMachine,
   147  		ClusterScoper:           params.ClusterScope,
   148  		MachinePool:             params.MachinePool,
   149  		Machine:                 params.Machine,
   150  		MachinePoolScope:        mpScope,
   151  		client:                  params.Client,
   152  		patchHelper:             helper,
   153  		workloadNodeGetter:      params.workloadNodeGetter,
   154  	}, nil
   155  }
   156  
   157  // ScaleSetVMSpec returns the VMSS VM spec.
   158  func (s *MachinePoolMachineScope) ScaleSetVMSpec() azure.ResourceSpecGetter {
   159  	spec := &scalesetvms.ScaleSetVMSpec{
   160  		Name:          s.Name(),
   161  		InstanceID:    s.InstanceID(),
   162  		ResourceGroup: s.NodeResourceGroup(),
   163  		ScaleSetName:  s.ScaleSetName(),
   164  		ProviderID:    s.ProviderID(),
   165  		IsFlex:        s.OrchestrationMode() == infrav1.FlexibleOrchestrationMode,
   166  	}
   167  
   168  	if spec.IsFlex {
   169  		spec.ResourceID = strings.TrimPrefix(spec.ProviderID, azureutil.ProviderIDPrefix)
   170  	}
   171  
   172  	return spec
   173  }
   174  
   175  // Name is the name of the Machine Pool Machine.
   176  func (s *MachinePoolMachineScope) Name() string {
   177  	return s.AzureMachinePoolMachine.Name
   178  }
   179  
   180  // InstanceID is the unique ID of the machine within the Machine Pool.
   181  func (s *MachinePoolMachineScope) InstanceID() string {
   182  	return s.AzureMachinePoolMachine.Spec.InstanceID
   183  }
   184  
   185  // ScaleSetName is the name of the VMSS.
   186  func (s *MachinePoolMachineScope) ScaleSetName() string {
   187  	return s.MachinePoolScope.Name()
   188  }
   189  
   190  // OrchestrationMode is the VMSS orchestration mode, either Uniform or Flexible.
   191  func (s *MachinePoolMachineScope) OrchestrationMode() infrav1.OrchestrationModeType {
   192  	return s.AzureMachinePool.Spec.OrchestrationMode
   193  }
   194  
   195  // SetLongRunningOperationState will set the future on the AzureMachinePoolMachine status to allow the resource to continue
   196  // in the next reconciliation.
   197  func (s *MachinePoolMachineScope) SetLongRunningOperationState(future *infrav1.Future) {
   198  	futures.Set(s.AzureMachinePoolMachine, future)
   199  }
   200  
   201  // GetLongRunningOperationState will get the future on the AzureMachinePoolMachine status.
   202  func (s *MachinePoolMachineScope) GetLongRunningOperationState(name, service, futureType string) *infrav1.Future {
   203  	return futures.Get(s.AzureMachinePoolMachine, name, service, futureType)
   204  }
   205  
   206  // DeleteLongRunningOperationState will delete the future from the AzureMachinePoolMachine status.
   207  func (s *MachinePoolMachineScope) DeleteLongRunningOperationState(name, service, futureType string) {
   208  	futures.Delete(s.AzureMachinePoolMachine, name, service, futureType)
   209  }
   210  
   211  // UpdateDeleteStatus updates a condition on the AzureMachinePoolMachine status after a DELETE operation.
   212  func (s *MachinePoolMachineScope) UpdateDeleteStatus(condition clusterv1.ConditionType, service string, err error) {
   213  	switch {
   214  	case err == nil:
   215  		conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletedReason, clusterv1.ConditionSeverityInfo, "%s successfully deleted", service)
   216  	case azure.IsOperationNotDoneError(err):
   217  		conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletingReason, clusterv1.ConditionSeverityInfo, "%s deleting", service)
   218  	default:
   219  		conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.DeletionFailedReason, clusterv1.ConditionSeverityError, "%s failed to delete. err: %s", service, err.Error())
   220  	}
   221  }
   222  
   223  // UpdatePutStatus updates a condition on the AzureMachinePoolMachine status after a PUT operation.
   224  func (s *MachinePoolMachineScope) UpdatePutStatus(condition clusterv1.ConditionType, service string, err error) {
   225  	switch {
   226  	case err == nil:
   227  		conditions.MarkTrue(s.AzureMachinePoolMachine, condition)
   228  	case azure.IsOperationNotDoneError(err):
   229  		conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.CreatingReason, clusterv1.ConditionSeverityInfo, "%s creating or updating", service)
   230  	default:
   231  		conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to create or update. err: %s", service, err.Error())
   232  	}
   233  }
   234  
   235  // UpdatePatchStatus updates a condition on the AzureMachinePoolMachine status after a PATCH operation.
   236  func (s *MachinePoolMachineScope) UpdatePatchStatus(condition clusterv1.ConditionType, service string, err error) {
   237  	switch {
   238  	case err == nil:
   239  		conditions.MarkTrue(s.AzureMachinePoolMachine, condition)
   240  	case azure.IsOperationNotDoneError(err):
   241  		conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.UpdatingReason, clusterv1.ConditionSeverityInfo, "%s updating", service)
   242  	default:
   243  		conditions.MarkFalse(s.AzureMachinePoolMachine, condition, infrav1.FailedReason, clusterv1.ConditionSeverityError, "%s failed to update. err: %s", service, err.Error())
   244  	}
   245  }
   246  
   247  // SetVMSSVM update the scope with the current state of the VMSS VM.
   248  func (s *MachinePoolMachineScope) SetVMSSVM(instance *azure.VMSSVM) {
   249  	s.instance = instance
   250  }
   251  
   252  // SetVMSSVMState update the scope with the current provisioning state of the VMSS VM.
   253  func (s *MachinePoolMachineScope) SetVMSSVMState(state infrav1.ProvisioningState) {
   254  	if s.instance != nil {
   255  		s.instance.State = state
   256  	}
   257  }
   258  
   259  // ProvisioningState returns the AzureMachinePoolMachine provisioning state.
   260  func (s *MachinePoolMachineScope) ProvisioningState() infrav1.ProvisioningState {
   261  	if s.AzureMachinePoolMachine.Status.ProvisioningState != nil {
   262  		return *s.AzureMachinePoolMachine.Status.ProvisioningState
   263  	}
   264  	return ""
   265  }
   266  
   267  // IsReady indicates the machine has successfully provisioned and has a node ref associated.
   268  func (s *MachinePoolMachineScope) IsReady() bool {
   269  	state := s.AzureMachinePoolMachine.Status.ProvisioningState
   270  	return s.AzureMachinePoolMachine.Status.Ready && state != nil && *state == infrav1.Succeeded
   271  }
   272  
   273  // SetFailureMessage sets the AzureMachinePoolMachine status failure message.
   274  func (s *MachinePoolMachineScope) SetFailureMessage(v error) {
   275  	s.AzureMachinePoolMachine.Status.FailureMessage = ptr.To(v.Error())
   276  }
   277  
   278  // SetFailureReason sets the AzureMachinePoolMachine status failure reason.
   279  func (s *MachinePoolMachineScope) SetFailureReason(v capierrors.MachineStatusError) {
   280  	s.AzureMachinePoolMachine.Status.FailureReason = &v
   281  }
   282  
   283  // ProviderID returns the AzureMachinePool ID by parsing Spec.FakeProviderID.
   284  func (s *MachinePoolMachineScope) ProviderID() string {
   285  	return s.AzureMachinePoolMachine.Spec.ProviderID
   286  }
   287  
   288  // updateDeleteMachineAnnotation sets the clusterv1.DeleteMachineAnnotation on the AzureMachinePoolMachine if it exists on the owner Machine.
   289  func (s *MachinePoolMachineScope) updateDeleteMachineAnnotation() {
   290  	if s.Machine.Annotations != nil {
   291  		if _, ok := s.Machine.Annotations[clusterv1.DeleteMachineAnnotation]; ok {
   292  			if s.AzureMachinePoolMachine.Annotations == nil {
   293  				s.AzureMachinePoolMachine.Annotations = map[string]string{}
   294  			}
   295  
   296  			s.AzureMachinePoolMachine.Annotations[clusterv1.DeleteMachineAnnotation] = "true"
   297  		}
   298  	}
   299  }
   300  
   301  // PatchObject persists the MachinePoolMachine spec and status.
   302  func (s *MachinePoolMachineScope) PatchObject(ctx context.Context) error {
   303  	conditions.SetSummary(s.AzureMachinePoolMachine)
   304  
   305  	return s.patchHelper.Patch(
   306  		ctx,
   307  		s.AzureMachinePoolMachine,
   308  		patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
   309  			clusterv1.ReadyCondition,
   310  			clusterv1.MachineNodeHealthyCondition,
   311  		}})
   312  }
   313  
   314  // Close updates the state of MachinePoolMachine.
   315  func (s *MachinePoolMachineScope) Close(ctx context.Context) error {
   316  	ctx, _, done := tele.StartSpanWithLogger(
   317  		ctx,
   318  		"scope.MachinePoolMachineScope.Close",
   319  	)
   320  	defer done()
   321  
   322  	s.updateDeleteMachineAnnotation()
   323  
   324  	return s.PatchObject(ctx)
   325  }
   326  
   327  // UpdateNodeStatus updates AzureMachinePoolMachine conditions and ready status. It will also update the node ref and the Kubernetes
   328  // version of the VM instance if the node is found.
   329  // Note: This func should be called at the end of a reconcile request and after updating the scope with the most recent Azure data.
   330  func (s *MachinePoolMachineScope) UpdateNodeStatus(ctx context.Context) error {
   331  	ctx, log, done := tele.StartSpanWithLogger(
   332  		ctx,
   333  		"scope.MachinePoolMachineScope.UpdateNodeStatus",
   334  	)
   335  	defer done()
   336  
   337  	if s.instance != nil {
   338  		switch s.instance.BootstrappingState {
   339  		case infrav1.Creating:
   340  			conditions.MarkFalse(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapInProgressReason, clusterv1.ConditionSeverityInfo, "VM bootstrapping")
   341  		case infrav1.Failed:
   342  			log.Info("VM bootstrapping failed")
   343  			conditions.MarkFalse(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityInfo, "VM bootstrapping failed")
   344  		case infrav1.Succeeded:
   345  			log.Info("VM bootstrapping succeeded")
   346  			conditions.MarkTrue(s.AzureMachinePoolMachine, infrav1.BootstrapSucceededCondition)
   347  		}
   348  	}
   349  
   350  	var node *corev1.Node
   351  	nodeRef := s.AzureMachinePoolMachine.Status.NodeRef
   352  
   353  	// See if we can fetch a node using either the providerID or the nodeRef
   354  	node, found, err := s.GetNode(ctx)
   355  	switch {
   356  	case err != nil && apierrors.IsNotFound(err) && nodeRef != nil && nodeRef.Name != "":
   357  		// Node was not found due to 404 when finding by ObjectReference.
   358  		conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityError, "")
   359  	case err != nil:
   360  		// Failed due to an unexpected error
   361  		return err
   362  	case !found && s.ProviderID() == "":
   363  		// Node was not found due to not having a providerID set
   364  		conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.WaitingForNodeRefReason, clusterv1.ConditionSeverityInfo, "")
   365  	case !found && s.ProviderID() != "":
   366  		// Node was not found due to not finding a matching node by providerID
   367  		conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeProvisioningReason, clusterv1.ConditionSeverityInfo, "")
   368  	default:
   369  		// Node was found. Check if it is ready.
   370  		nodeReady := noderefutil.IsNodeReady(node)
   371  		s.AzureMachinePoolMachine.Status.Ready = nodeReady
   372  		if nodeReady {
   373  			conditions.MarkTrue(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition)
   374  		} else {
   375  			conditions.MarkFalse(s.AzureMachinePoolMachine, clusterv1.MachineNodeHealthyCondition, clusterv1.NodeConditionsFailedReason, clusterv1.ConditionSeverityWarning, "")
   376  		}
   377  
   378  		s.AzureMachinePoolMachine.Status.NodeRef = &corev1.ObjectReference{
   379  			Kind:       node.Kind,
   380  			Namespace:  node.Namespace,
   381  			Name:       node.Name,
   382  			UID:        node.UID,
   383  			APIVersion: node.APIVersion,
   384  		}
   385  
   386  		s.AzureMachinePoolMachine.Status.Version = node.Status.NodeInfo.KubeletVersion
   387  	}
   388  
   389  	return nil
   390  }
   391  
   392  // UpdateInstanceStatus updates the provisioning state of the AzureMachinePoolMachine and if it has the latest model applied
   393  // using the VMSS VM instance.
   394  // Note: This func should be called at the end of a reconcile request and after updating the scope with the most recent Azure data.
   395  func (s *MachinePoolMachineScope) UpdateInstanceStatus(ctx context.Context) error {
   396  	ctx, _, done := tele.StartSpanWithLogger(
   397  		ctx,
   398  		"scope.MachinePoolMachineScope.UpdateInstanceStatus",
   399  	)
   400  	defer done()
   401  
   402  	if s.instance != nil {
   403  		s.AzureMachinePoolMachine.Status.ProvisioningState = &s.instance.State
   404  		hasLatestModel, err := s.hasLatestModelApplied(ctx)
   405  		if err != nil {
   406  			return errors.Wrap(err, "failed to determine if the VMSS instance has the latest model")
   407  		}
   408  
   409  		s.AzureMachinePoolMachine.Status.LatestModelApplied = hasLatestModel
   410  	}
   411  
   412  	return nil
   413  }
   414  
   415  func (s *MachinePoolMachineScope) hasLatestModelApplied(ctx context.Context) (bool, error) {
   416  	ctx, _, done := tele.StartSpanWithLogger(
   417  		ctx,
   418  		"scope.MachinePoolMachineScope.hasLatestModelApplied",
   419  	)
   420  	defer done()
   421  
   422  	if s.instance == nil {
   423  		return false, errors.New("instance must not be nil")
   424  	}
   425  
   426  	image, err := s.MachinePoolScope.GetVMImage(ctx)
   427  	if err != nil {
   428  		return false, errors.Wrap(err, "unable to build vm image information from MachinePoolScope")
   429  	}
   430  
   431  	// this should never happen as GetVMImage should only return nil when err != nil. Just in case.
   432  	if image == nil {
   433  		return false, errors.New("machinepoolscope image must not be nil")
   434  	}
   435  
   436  	// check if image.ID is actually a compute gallery image
   437  	if s.instance.Image.ComputeGallery != nil && image.ID != nil {
   438  		newImage := converters.IDImageRefToImage(*image.ID)
   439  
   440  		// this means the ID was a compute gallery image ID
   441  		if newImage.ComputeGallery != nil {
   442  			return reflect.DeepEqual(s.instance.Image, newImage), nil
   443  		}
   444  	}
   445  
   446  	// if the images match, then the VM is of the same model
   447  	return reflect.DeepEqual(s.instance.Image, *image), nil
   448  }
   449  
   450  func newWorkloadClusterProxy(c client.Client, cluster client.ObjectKey) *workloadClusterProxy {
   451  	return &workloadClusterProxy{
   452  		Client:  c,
   453  		Cluster: cluster,
   454  	}
   455  }
   456  
   457  // GetNode returns the node associated with the AzureMachinePoolMachine. Returns an error if one occurred, and a boolean
   458  // indicating if the node was found if there was no error.
   459  func (s *MachinePoolMachineScope) GetNode(ctx context.Context) (*corev1.Node, bool, error) {
   460  	ctx, _, done := tele.StartSpanWithLogger(
   461  		ctx,
   462  		"scope.MachinePoolMachineScope.GetNode",
   463  	)
   464  	defer done()
   465  
   466  	var (
   467  		nodeRef = s.AzureMachinePoolMachine.Status.NodeRef
   468  		node    *corev1.Node
   469  		err     error
   470  	)
   471  
   472  	if nodeRef == nil || nodeRef.Name == "" {
   473  		node, err = s.workloadNodeGetter.GetNodeByProviderID(ctx, s.ProviderID())
   474  		if err != nil {
   475  			return nil, false, errors.Wrap(err, "failed to get node by providerID")
   476  		}
   477  	} else {
   478  		node, err = s.workloadNodeGetter.GetNodeByObjectReference(ctx, *nodeRef)
   479  		if err != nil {
   480  			return nil, false, errors.Wrap(err, "failed to get node by object reference")
   481  		}
   482  	}
   483  
   484  	if node == nil {
   485  		return nil, false, nil
   486  	}
   487  
   488  	return node, true, nil
   489  }
   490  
   491  // GetNodeByObjectReference will fetch a *corev1.Node via a node object reference.
   492  func (np *workloadClusterProxy) GetNodeByObjectReference(ctx context.Context, nodeRef corev1.ObjectReference) (*corev1.Node, error) {
   493  	workloadClient, err := getWorkloadClient(ctx, np.Client, np.Cluster)
   494  	if err != nil {
   495  		return nil, errors.Wrap(err, "failed to create the workload cluster client")
   496  	}
   497  
   498  	var node corev1.Node
   499  	err = workloadClient.Get(ctx, client.ObjectKey{
   500  		Namespace: nodeRef.Namespace,
   501  		Name:      nodeRef.Name,
   502  	}, &node)
   503  
   504  	return &node, err
   505  }
   506  
   507  // GetNodeByProviderID will fetch a node from the workload cluster by it's providerID.
   508  func (np *workloadClusterProxy) GetNodeByProviderID(ctx context.Context, providerID string) (*corev1.Node, error) {
   509  	ctx, _, done := tele.StartSpanWithLogger(
   510  		ctx,
   511  		"scope.MachinePoolMachineScope.getNode",
   512  	)
   513  	defer done()
   514  
   515  	workloadClient, err := getWorkloadClient(ctx, np.Client, np.Cluster)
   516  	if err != nil {
   517  		return nil, errors.Wrap(err, "failed to create the workload cluster client")
   518  	}
   519  
   520  	return getNodeByProviderID(ctx, workloadClient, providerID)
   521  }
   522  
   523  func getNodeByProviderID(ctx context.Context, workloadClient client.Client, providerID string) (*corev1.Node, error) {
   524  	ctx, _, done := tele.StartSpanWithLogger(
   525  		ctx,
   526  		"scope.MachinePoolMachineScope.getNodeRefForProviderID",
   527  	)
   528  	defer done()
   529  
   530  	nodeList := corev1.NodeList{}
   531  	for {
   532  		if err := workloadClient.List(ctx, &nodeList, client.Continue(nodeList.Continue)); err != nil {
   533  			return nil, errors.Wrapf(err, "failed to List nodes")
   534  		}
   535  
   536  		for _, node := range nodeList.Items {
   537  			if node.Spec.ProviderID == providerID {
   538  				return &node, nil
   539  			}
   540  		}
   541  
   542  		if nodeList.Continue == "" {
   543  			break
   544  		}
   545  	}
   546  
   547  	return nil, nil
   548  }
   549  
   550  func getWorkloadClient(ctx context.Context, c client.Client, cluster client.ObjectKey) (client.Client, error) {
   551  	ctx, _, done := tele.StartSpanWithLogger(
   552  		ctx,
   553  		"scope.MachinePoolMachineScope.getWorkloadClient",
   554  	)
   555  	defer done()
   556  
   557  	return remote.NewClusterClient(ctx, MachinePoolMachineScopeName, c, cluster)
   558  }