sigs.k8s.io/cluster-api@v1.7.1/exp/internal/controllers/machinepool_controller_phases.go (about) 1 /* 2 Copyright 2019 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package controllers 18 19 import ( 20 "context" 21 "fmt" 22 "reflect" 23 "time" 24 25 "github.com/pkg/errors" 26 corev1 "k8s.io/api/core/v1" 27 apierrors "k8s.io/apimachinery/pkg/api/errors" 28 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 30 kerrors "k8s.io/apimachinery/pkg/util/errors" 31 "k8s.io/apimachinery/pkg/util/wait" 32 "k8s.io/klog/v2" 33 "k8s.io/utils/ptr" 34 ctrl "sigs.k8s.io/controller-runtime" 35 "sigs.k8s.io/controller-runtime/pkg/client" 36 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 37 "sigs.k8s.io/controller-runtime/pkg/handler" 38 39 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 40 "sigs.k8s.io/cluster-api/controllers/external" 41 capierrors "sigs.k8s.io/cluster-api/errors" 42 expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" 43 utilexp "sigs.k8s.io/cluster-api/exp/util" 44 "sigs.k8s.io/cluster-api/internal/util/ssa" 45 "sigs.k8s.io/cluster-api/util" 46 "sigs.k8s.io/cluster-api/util/annotations" 47 "sigs.k8s.io/cluster-api/util/conditions" 48 utilconversion "sigs.k8s.io/cluster-api/util/conversion" 49 "sigs.k8s.io/cluster-api/util/labels" 50 "sigs.k8s.io/cluster-api/util/labels/format" 51 "sigs.k8s.io/cluster-api/util/patch" 52 ) 53 54 func (r *MachinePoolReconciler) reconcilePhase(mp *expv1.MachinePool) { 55 // Set the phase to "pending" if nil. 56 if mp.Status.Phase == "" { 57 mp.Status.SetTypedPhase(expv1.MachinePoolPhasePending) 58 } 59 60 // Set the phase to "provisioning" if bootstrap is ready and the infrastructure isn't. 61 if mp.Status.BootstrapReady && !mp.Status.InfrastructureReady { 62 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseProvisioning) 63 } 64 65 // Set the phase to "provisioned" if the infrastructure is ready. 66 if len(mp.Status.NodeRefs) != 0 { 67 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseProvisioned) 68 } 69 70 // Set the phase to "running" if the number of ready replicas is equal to desired replicas. 71 if mp.Status.InfrastructureReady && *mp.Spec.Replicas == mp.Status.ReadyReplicas { 72 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseRunning) 73 } 74 75 // Set the appropriate phase in response to the MachinePool replica count being greater than the observed infrastructure replicas. 76 if mp.Status.InfrastructureReady && *mp.Spec.Replicas > mp.Status.ReadyReplicas { 77 // If we are being managed by an external autoscaler and can't predict scaling direction, set to "Scaling". 78 if annotations.ReplicasManagedByExternalAutoscaler(mp) { 79 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseScaling) 80 } else { 81 // Set the phase to "ScalingUp" if we are actively scaling the infrastructure out. 82 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseScalingUp) 83 } 84 } 85 86 // Set the appropriate phase in response to the MachinePool replica count being less than the observed infrastructure replicas. 87 if mp.Status.InfrastructureReady && *mp.Spec.Replicas < mp.Status.ReadyReplicas { 88 // If we are being managed by an external autoscaler and can't predict scaling direction, set to "Scaling". 89 if annotations.ReplicasManagedByExternalAutoscaler(mp) { 90 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseScaling) 91 } else { 92 // Set the phase to "ScalingDown" if we are actively scaling the infrastructure in. 93 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseScalingDown) 94 } 95 } 96 97 // Set the phase to "failed" if any of Status.FailureReason or Status.FailureMessage is not-nil. 98 if mp.Status.FailureReason != nil || mp.Status.FailureMessage != nil { 99 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseFailed) 100 } 101 102 // Set the phase to "deleting" if the deletion timestamp is set. 103 if !mp.DeletionTimestamp.IsZero() { 104 mp.Status.SetTypedPhase(expv1.MachinePoolPhaseDeleting) 105 } 106 } 107 108 // reconcileExternal handles generic unstructured objects referenced by a MachinePool. 109 func (r *MachinePoolReconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool, ref *corev1.ObjectReference) (external.ReconcileOutput, error) { 110 log := ctrl.LoggerFrom(ctx) 111 112 if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, ref); err != nil { 113 return external.ReconcileOutput{}, err 114 } 115 116 obj, err := external.Get(ctx, r.Client, ref, m.Namespace) 117 if err != nil { 118 if apierrors.IsNotFound(errors.Cause(err)) { 119 return external.ReconcileOutput{}, errors.Wrapf(err, "could not find %v %q for MachinePool %q in namespace %q, requeuing", 120 ref.GroupVersionKind(), ref.Name, m.Name, m.Namespace) 121 } 122 return external.ReconcileOutput{}, err 123 } 124 125 // Ensure we add a watch to the external object, if there isn't one already. 126 if err := r.externalTracker.Watch(log, obj, handler.EnqueueRequestForOwner(r.Client.Scheme(), r.Client.RESTMapper(), &expv1.MachinePool{})); err != nil { 127 return external.ReconcileOutput{}, err 128 } 129 130 // if external ref is paused, return error. 131 if annotations.IsPaused(cluster, obj) { 132 log.V(3).Info("External object referenced is paused") 133 return external.ReconcileOutput{Paused: true}, nil 134 } 135 136 // Initialize the patch helper. 137 patchHelper, err := patch.NewHelper(obj, r.Client) 138 if err != nil { 139 return external.ReconcileOutput{}, err 140 } 141 142 // Set external object ControllerReference to the MachinePool. 143 if err := controllerutil.SetControllerReference(m, obj, r.Client.Scheme()); err != nil { 144 return external.ReconcileOutput{}, err 145 } 146 147 // Set the Cluster label. 148 labels := obj.GetLabels() 149 if labels == nil { 150 labels = make(map[string]string) 151 } 152 labels[clusterv1.ClusterNameLabel] = m.Spec.ClusterName 153 obj.SetLabels(labels) 154 155 // Always attempt to Patch the external object. 156 if err := patchHelper.Patch(ctx, obj); err != nil { 157 return external.ReconcileOutput{}, err 158 } 159 160 // Set failure reason and message, if any. 161 failureReason, failureMessage, err := external.FailuresFrom(obj) 162 if err != nil { 163 return external.ReconcileOutput{}, err 164 } 165 if failureReason != "" { 166 machineStatusFailure := capierrors.MachinePoolStatusFailure(failureReason) 167 m.Status.FailureReason = &machineStatusFailure 168 } 169 if failureMessage != "" { 170 m.Status.FailureMessage = ptr.To( 171 fmt.Sprintf("Failure detected from referenced resource %v with name %q: %s", 172 obj.GroupVersionKind(), obj.GetName(), failureMessage), 173 ) 174 } 175 176 return external.ReconcileOutput{Result: obj}, nil 177 } 178 179 // reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a MachinePool. 180 func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *expv1.MachinePool) (ctrl.Result, error) { 181 log := ctrl.LoggerFrom(ctx) 182 183 // Call generic external reconciler if we have an external reference. 184 var bootstrapConfig *unstructured.Unstructured 185 if m.Spec.Template.Spec.Bootstrap.ConfigRef != nil { 186 bootstrapReconcileResult, err := r.reconcileExternal(ctx, cluster, m, m.Spec.Template.Spec.Bootstrap.ConfigRef) 187 if err != nil { 188 return ctrl.Result{}, err 189 } 190 // if the external object is paused, return without any further processing 191 if bootstrapReconcileResult.Paused { 192 return ctrl.Result{}, nil 193 } 194 bootstrapConfig = bootstrapReconcileResult.Result 195 196 // If the bootstrap config is being deleted, return early. 197 if !bootstrapConfig.GetDeletionTimestamp().IsZero() { 198 return ctrl.Result{}, nil 199 } 200 201 // Determine if the bootstrap provider is ready. 202 ready, err := external.IsReady(bootstrapConfig) 203 if err != nil { 204 return ctrl.Result{}, err 205 } 206 207 // Report a summary of current status of the bootstrap object defined for this machine pool. 208 conditions.SetMirror(m, clusterv1.BootstrapReadyCondition, 209 conditions.UnstructuredGetter(bootstrapConfig), 210 conditions.WithFallbackValue(ready, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), 211 ) 212 213 if !ready { 214 log.Info("Waiting for bootstrap provider to generate data secret and report status.ready", bootstrapConfig.GetKind(), klog.KObj(bootstrapConfig)) 215 m.Status.BootstrapReady = ready 216 return ctrl.Result{}, nil 217 } 218 219 // Get and set the name of the secret containing the bootstrap data. 220 secretName, _, err := unstructured.NestedString(bootstrapConfig.Object, "status", "dataSecretName") 221 if err != nil { 222 return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve dataSecretName from bootstrap provider for MachinePool %q in namespace %q", m.Name, m.Namespace) 223 } else if secretName == "" { 224 return ctrl.Result{}, errors.Errorf("retrieved empty dataSecretName from bootstrap provider for MachinePool %q in namespace %q", m.Name, m.Namespace) 225 } 226 227 m.Spec.Template.Spec.Bootstrap.DataSecretName = ptr.To(secretName) 228 m.Status.BootstrapReady = true 229 return ctrl.Result{}, nil 230 } 231 232 // If dataSecretName is set without a ConfigRef, this means the user brought their own bootstrap data. 233 if m.Spec.Template.Spec.Bootstrap.DataSecretName != nil { 234 m.Status.BootstrapReady = true 235 conditions.MarkTrue(m, clusterv1.BootstrapReadyCondition) 236 return ctrl.Result{}, nil 237 } 238 239 // This should never happen because the MachinePool webhook would not allow neither ConfigRef nor DataSecretName to be set. 240 return ctrl.Result{}, errors.Errorf("neither .spec.bootstrap.configRef nor .spec.bootstrap.dataSecretName are set for MachinePool %q in namespace %q", m.Name, m.Namespace) 241 } 242 243 // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a MachinePool. 244 func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, mp *expv1.MachinePool) (ctrl.Result, error) { 245 log := ctrl.LoggerFrom(ctx) 246 247 // Call generic external reconciler. 248 infraReconcileResult, err := r.reconcileExternal(ctx, cluster, mp, &mp.Spec.Template.Spec.InfrastructureRef) 249 if err != nil { 250 if apierrors.IsNotFound(errors.Cause(err)) { 251 log.Error(err, "infrastructure reference could not be found") 252 if mp.Status.InfrastructureReady { 253 // Infra object went missing after the machine pool was up and running 254 log.Error(err, "infrastructure reference has been deleted after being ready, setting failure state") 255 mp.Status.FailureReason = ptr.To(capierrors.InvalidConfigurationMachinePoolError) 256 mp.Status.FailureMessage = ptr.To(fmt.Sprintf("MachinePool infrastructure resource %v with name %q has been deleted after being ready", 257 mp.Spec.Template.Spec.InfrastructureRef.GroupVersionKind(), mp.Spec.Template.Spec.InfrastructureRef.Name)) 258 } 259 conditions.MarkFalse(mp, clusterv1.InfrastructureReadyCondition, clusterv1.IncorrectExternalRefReason, clusterv1.ConditionSeverityError, fmt.Sprintf("could not find infra reference of kind %s with name %s", mp.Spec.Template.Spec.InfrastructureRef.Kind, mp.Spec.Template.Spec.InfrastructureRef.Name)) 260 } 261 return ctrl.Result{}, err 262 } 263 // if the external object is paused, return without any further processing 264 if infraReconcileResult.Paused { 265 return ctrl.Result{}, nil 266 } 267 infraConfig := infraReconcileResult.Result 268 269 if !infraConfig.GetDeletionTimestamp().IsZero() { 270 return ctrl.Result{}, nil 271 } 272 273 ready, err := external.IsReady(infraConfig) 274 if err != nil { 275 return ctrl.Result{}, err 276 } 277 278 mp.Status.InfrastructureReady = ready 279 280 // Report a summary of current status of the infrastructure object defined for this machine pool. 281 conditions.SetMirror(mp, clusterv1.InfrastructureReadyCondition, 282 conditions.UnstructuredGetter(infraConfig), 283 conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), 284 ) 285 286 if err := r.reconcileMachines(ctx, mp, infraConfig); err != nil { 287 return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile Machines for MachinePool %s", klog.KObj(mp)) 288 } 289 290 if !mp.Status.InfrastructureReady { 291 log.Info("Infrastructure provider is not yet ready", infraConfig.GetKind(), klog.KObj(infraConfig)) 292 return ctrl.Result{}, nil 293 } 294 295 var providerIDList []string 296 // Get Spec.ProviderIDList from the infrastructure provider. 297 if err := util.UnstructuredUnmarshalField(infraConfig, &providerIDList, "spec", "providerIDList"); err != nil && !errors.Is(err, util.ErrUnstructuredFieldNotFound) { 298 return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve data from infrastructure provider for MachinePool %q in namespace %q", mp.Name, mp.Namespace) 299 } 300 301 // Get and set Status.Replicas from the infrastructure provider. 302 err = util.UnstructuredUnmarshalField(infraConfig, &mp.Status.Replicas, "status", "replicas") 303 if err != nil { 304 if err != util.ErrUnstructuredFieldNotFound { 305 return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve replicas from infrastructure provider for MachinePool %q in namespace %q", mp.Name, mp.Namespace) 306 } 307 } 308 309 if len(providerIDList) == 0 && mp.Status.Replicas != 0 { 310 log.Info("Retrieved empty spec.providerIDList from infrastructure provider but status.replicas is not zero.", "replicas", mp.Status.Replicas) 311 return ctrl.Result{}, nil 312 } 313 314 if !reflect.DeepEqual(mp.Spec.ProviderIDList, providerIDList) { 315 mp.Spec.ProviderIDList = providerIDList 316 mp.Status.ReadyReplicas = 0 317 mp.Status.AvailableReplicas = 0 318 mp.Status.UnavailableReplicas = mp.Status.Replicas 319 } 320 321 return ctrl.Result{}, nil 322 } 323 324 // reconcileMachines reconciles Machines associated with a MachinePool. 325 // 326 // Note: In the case of MachinePools the machines are created in order to surface in CAPI what exists in the 327 // infrastructure while instead on MachineDeployments, machines are created in CAPI first and then the 328 // infrastructure is created accordingly. 329 // Note: When supported by the cloud provider implementation of the MachinePool, machines will provide a means to interact 330 // with the corresponding infrastructure (e.g. delete a specific machine in case MachineHealthCheck detects it is unhealthy). 331 func (r *MachinePoolReconciler) reconcileMachines(ctx context.Context, mp *expv1.MachinePool, infraMachinePool *unstructured.Unstructured) error { 332 log := ctrl.LoggerFrom(ctx) 333 334 var infraMachineKind string 335 if err := util.UnstructuredUnmarshalField(infraMachinePool, &infraMachineKind, "status", "infrastructureMachineKind"); err != nil { 336 if errors.Is(err, util.ErrUnstructuredFieldNotFound) { 337 log.V(4).Info("MachinePool Machines not supported, no infraMachineKind found") 338 return nil 339 } 340 341 return errors.Wrapf(err, "failed to retrieve infraMachineKind from infrastructure provider for MachinePool %s", klog.KObj(mp)) 342 } 343 344 infraMachineSelector := metav1.LabelSelector{ 345 MatchLabels: map[string]string{ 346 clusterv1.MachinePoolNameLabel: format.MustFormatValue(mp.Name), 347 clusterv1.ClusterNameLabel: mp.Spec.ClusterName, 348 }, 349 } 350 351 log.V(4).Info("Reconciling MachinePool Machines", "infrastructureMachineKind", infraMachineKind, "infrastructureMachineSelector", infraMachineSelector) 352 var infraMachineList unstructured.UnstructuredList 353 354 // Get the list of infraMachines, which are maintained by the InfraMachinePool controller. 355 infraMachineList.SetAPIVersion(infraMachinePool.GetAPIVersion()) 356 infraMachineList.SetKind(infraMachineKind + "List") 357 if err := r.Client.List(ctx, &infraMachineList, client.InNamespace(mp.Namespace), client.MatchingLabels(infraMachineSelector.MatchLabels)); err != nil { 358 return errors.Wrapf(err, "failed to list infra machines for MachinePool %q in namespace %q", mp.Name, mp.Namespace) 359 } 360 361 // Add watcher for infraMachine, if there isn't one already; this will allow this controller to reconcile 362 // immediately changes made by the InfraMachinePool controller. 363 sampleInfraMachine := &unstructured.Unstructured{} 364 sampleInfraMachine.SetAPIVersion(infraMachinePool.GetAPIVersion()) 365 sampleInfraMachine.SetKind(infraMachineKind) 366 367 // Add watcher for infraMachine, if there isn't one already. 368 if err := r.externalTracker.Watch(log, sampleInfraMachine, handler.EnqueueRequestsFromMapFunc(r.infraMachineToMachinePoolMapper)); err != nil { 369 return err 370 } 371 372 // Get the list of machines managed by this controller, and align it with the infra machines managed by 373 // the InfraMachinePool controller. 374 machineList := &clusterv1.MachineList{} 375 if err := r.Client.List(ctx, machineList, client.InNamespace(mp.Namespace), client.MatchingLabels(infraMachineSelector.MatchLabels)); err != nil { 376 return err 377 } 378 379 if err := r.createOrUpdateMachines(ctx, mp, machineList.Items, infraMachineList.Items); err != nil { 380 return errors.Wrapf(err, "failed to create machines for MachinePool %q in namespace %q", mp.Name, mp.Namespace) 381 } 382 383 return nil 384 } 385 386 // createOrUpdateMachines creates a MachinePool Machine for each infraMachine if it doesn't already exist and sets the owner reference and infraRef. 387 func (r *MachinePoolReconciler) createOrUpdateMachines(ctx context.Context, mp *expv1.MachinePool, machines []clusterv1.Machine, infraMachines []unstructured.Unstructured) error { 388 log := ctrl.LoggerFrom(ctx) 389 390 // Construct a set of names of infraMachines that already have a Machine. 391 infraMachineToMachine := map[string]clusterv1.Machine{} 392 for _, machine := range machines { 393 infraRef := machine.Spec.InfrastructureRef 394 infraMachineToMachine[infraRef.Name] = machine 395 } 396 397 createdMachines := []clusterv1.Machine{} 398 var errs []error 399 for i := range infraMachines { 400 infraMachine := &infraMachines[i] 401 // If infraMachine already has a Machine, update it if needed. 402 if existingMachine, ok := infraMachineToMachine[infraMachine.GetName()]; ok { 403 log.V(2).Info("Patching existing Machine for infraMachine", "infraMachine", klog.KObj(infraMachine), "machine", klog.KObj(&existingMachine)) 404 405 desiredMachine := computeDesiredMachine(mp, infraMachine, &existingMachine) 406 if err := ssa.Patch(ctx, r.Client, MachinePoolControllerName, desiredMachine, ssa.WithCachingProxy{Cache: r.ssaCache, Original: &existingMachine}); err != nil { 407 log.Error(err, "failed to update Machine", "Machine", klog.KObj(desiredMachine)) 408 errs = append(errs, errors.Wrapf(err, "failed to update Machine %q", klog.KObj(desiredMachine))) 409 } 410 } else { 411 // Otherwise create a new Machine for the infraMachine. 412 log.Info("Creating new Machine for infraMachine", "infraMachine", klog.KObj(infraMachine)) 413 machine := computeDesiredMachine(mp, infraMachine, nil) 414 415 if err := ssa.Patch(ctx, r.Client, MachinePoolControllerName, machine); err != nil { 416 errs = append(errs, errors.Wrapf(err, "failed to create new Machine for infraMachine %q in namespace %q", infraMachine.GetName(), infraMachine.GetNamespace())) 417 continue 418 } 419 420 createdMachines = append(createdMachines, *machine) 421 } 422 } 423 if err := r.waitForMachineCreation(ctx, createdMachines); err != nil { 424 errs = append(errs, errors.Wrapf(err, "failed to wait for machines to be created")) 425 } 426 if len(errs) > 0 { 427 return kerrors.NewAggregate(errs) 428 } 429 430 return nil 431 } 432 433 // computeDesiredMachine constructs the desired Machine for an infraMachine. 434 // If the Machine exists, it ensures the Machine always owned by the MachinePool. 435 func computeDesiredMachine(mp *expv1.MachinePool, infraMachine *unstructured.Unstructured, existingMachine *clusterv1.Machine) *clusterv1.Machine { 436 infraRef := corev1.ObjectReference{ 437 APIVersion: infraMachine.GetAPIVersion(), 438 Kind: infraMachine.GetKind(), 439 Name: infraMachine.GetName(), 440 Namespace: infraMachine.GetNamespace(), 441 } 442 443 machine := &clusterv1.Machine{ 444 ObjectMeta: metav1.ObjectMeta{ 445 Name: infraMachine.GetName(), 446 // Note: by setting the ownerRef on creation we signal to the Machine controller that this is not a stand-alone Machine. 447 OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(mp, machinePoolKind)}, 448 Namespace: mp.Namespace, 449 Labels: make(map[string]string), 450 Annotations: make(map[string]string), 451 }, 452 Spec: clusterv1.MachineSpec{ 453 ClusterName: mp.Spec.ClusterName, 454 InfrastructureRef: infraRef, 455 }, 456 } 457 458 if existingMachine != nil { 459 machine.SetName(existingMachine.Name) 460 machine.SetUID(existingMachine.UID) 461 } 462 463 for k, v := range mp.Spec.Template.Annotations { 464 machine.Annotations[k] = v 465 } 466 467 // Set the labels from machinePool.Spec.Template.Labels as labels for the new Machine. 468 // Note: We can't just set `machinePool.Spec.Template.Labels` directly and thus "share" the labels 469 // map between Machine and machinePool.Spec.Template.Labels. This would mean that adding the 470 // MachinePoolNameLabel later on the Machine would also add the labels to machinePool.Spec.Template.Labels 471 // and thus modify the labels of the MachinePool. 472 for k, v := range mp.Spec.Template.Labels { 473 machine.Labels[k] = v 474 } 475 476 // Enforce that the MachinePoolNameLabel and ClusterNameLabel are present on the Machine. 477 machine.Labels[clusterv1.MachinePoolNameLabel] = format.MustFormatValue(mp.Name) 478 machine.Labels[clusterv1.ClusterNameLabel] = mp.Spec.ClusterName 479 480 return machine 481 } 482 483 // infraMachineToMachinePoolMapper is a mapper function that maps an InfraMachine to the MachinePool that owns it. 484 // This is used to trigger an update of the MachinePool when a InfraMachine is changed. 485 func (r *MachinePoolReconciler) infraMachineToMachinePoolMapper(ctx context.Context, o client.Object) []ctrl.Request { 486 log := ctrl.LoggerFrom(ctx) 487 488 if labels.IsMachinePoolOwned(o) { 489 machinePool, err := utilexp.GetMachinePoolByLabels(ctx, r.Client, o.GetNamespace(), o.GetLabels()) 490 if err != nil { 491 log.Error(err, "failed to get MachinePool for InfraMachine", "infraMachine", klog.KObj(o), "labels", o.GetLabels()) 492 return nil 493 } 494 if machinePool != nil { 495 return []ctrl.Request{ 496 { 497 NamespacedName: client.ObjectKey{ 498 Namespace: machinePool.Namespace, 499 Name: machinePool.Name, 500 }, 501 }, 502 } 503 } 504 } 505 506 return nil 507 } 508 509 func (r *MachinePoolReconciler) waitForMachineCreation(ctx context.Context, machineList []clusterv1.Machine) error { 510 _ = ctrl.LoggerFrom(ctx) 511 512 // waitForCacheUpdateTimeout is the amount of time allowed to wait for desired state. 513 const waitForCacheUpdateTimeout = 10 * time.Second 514 515 // waitForCacheUpdateInterval is the amount of time between polling for the desired state. 516 // The polling is against a local memory cache. 517 const waitForCacheUpdateInterval = 100 * time.Millisecond 518 519 for i := 0; i < len(machineList); i++ { 520 machine := machineList[i] 521 pollErr := wait.PollUntilContextTimeout(ctx, waitForCacheUpdateInterval, waitForCacheUpdateTimeout, true, func(ctx context.Context) (bool, error) { 522 key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} 523 if err := r.Client.Get(ctx, key, &clusterv1.Machine{}); err != nil { 524 if apierrors.IsNotFound(err) { 525 return false, nil 526 } 527 return false, err 528 } 529 530 return true, nil 531 }) 532 533 if pollErr != nil { 534 return errors.Wrapf(pollErr, "failed waiting for machine object %v to be created", klog.KObj(&machine)) 535 } 536 } 537 538 return nil 539 }