sigs.k8s.io/cluster-api@v1.7.1/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package controllers
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  	"time"
    24  
    25  	"github.com/blang/semver/v4"
    26  	"github.com/go-logr/logr"
    27  	"github.com/pkg/errors"
    28  	corev1 "k8s.io/api/core/v1"
    29  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/runtime"
    32  	"k8s.io/apimachinery/pkg/types"
    33  	kerrors "k8s.io/apimachinery/pkg/util/errors"
    34  	bootstrapapi "k8s.io/cluster-bootstrap/token/api"
    35  	bootstrapsecretutil "k8s.io/cluster-bootstrap/util/secrets"
    36  	"k8s.io/klog/v2"
    37  	"k8s.io/utils/ptr"
    38  	ctrl "sigs.k8s.io/controller-runtime"
    39  	"sigs.k8s.io/controller-runtime/pkg/builder"
    40  	"sigs.k8s.io/controller-runtime/pkg/client"
    41  	"sigs.k8s.io/controller-runtime/pkg/controller"
    42  	"sigs.k8s.io/controller-runtime/pkg/handler"
    43  
    44  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    45  	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    46  	"sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/cloudinit"
    47  	"sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/ignition"
    48  	"sigs.k8s.io/cluster-api/bootstrap/kubeadm/internal/locking"
    49  	kubeadmtypes "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types"
    50  	bsutil "sigs.k8s.io/cluster-api/bootstrap/util"
    51  	"sigs.k8s.io/cluster-api/controllers/remote"
    52  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    53  	"sigs.k8s.io/cluster-api/feature"
    54  	"sigs.k8s.io/cluster-api/internal/util/taints"
    55  	"sigs.k8s.io/cluster-api/util"
    56  	"sigs.k8s.io/cluster-api/util/annotations"
    57  	"sigs.k8s.io/cluster-api/util/conditions"
    58  	clog "sigs.k8s.io/cluster-api/util/log"
    59  	"sigs.k8s.io/cluster-api/util/patch"
    60  	"sigs.k8s.io/cluster-api/util/predicates"
    61  	"sigs.k8s.io/cluster-api/util/secret"
    62  )
    63  
    64  const (
    65  	// DefaultTokenTTL is the default TTL used for tokens.
    66  	DefaultTokenTTL = 15 * time.Minute
    67  )
    68  
    69  // InitLocker is a lock that is used around kubeadm init.
    70  type InitLocker interface {
    71  	Lock(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine) bool
    72  	Unlock(ctx context.Context, cluster *clusterv1.Cluster) bool
    73  }
    74  
    75  // +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kubeadmconfigs;kubeadmconfigs/status;kubeadmconfigs/finalizers,verbs=get;list;watch;create;update;patch;delete
    76  // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machinesets;machines;machines/status;machinepools;machinepools/status,verbs=get;list;watch
    77  // +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete
    78  
    79  // KubeadmConfigReconciler reconciles a KubeadmConfig object.
    80  type KubeadmConfigReconciler struct {
    81  	Client              client.Client
    82  	SecretCachingClient client.Client
    83  	Tracker             *remote.ClusterCacheTracker
    84  	KubeadmInitLock     InitLocker
    85  
    86  	// WatchFilterValue is the label value used to filter events prior to reconciliation.
    87  	WatchFilterValue string
    88  
    89  	// TokenTTL is the amount of time a bootstrap token (and therefore a KubeadmConfig) will be valid.
    90  	TokenTTL time.Duration
    91  }
    92  
    93  // Scope is a scoped struct used during reconciliation.
    94  type Scope struct {
    95  	logr.Logger
    96  	Config      *bootstrapv1.KubeadmConfig
    97  	ConfigOwner *bsutil.ConfigOwner
    98  	Cluster     *clusterv1.Cluster
    99  }
   100  
   101  // SetupWithManager sets up the reconciler with the Manager.
   102  func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
   103  	if r.KubeadmInitLock == nil {
   104  		r.KubeadmInitLock = locking.NewControlPlaneInitMutex(mgr.GetClient())
   105  	}
   106  	if r.TokenTTL == 0 {
   107  		r.TokenTTL = DefaultTokenTTL
   108  	}
   109  
   110  	b := ctrl.NewControllerManagedBy(mgr).
   111  		For(&bootstrapv1.KubeadmConfig{}).
   112  		WithOptions(options).
   113  		Watches(
   114  			&clusterv1.Machine{},
   115  			handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc),
   116  		).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue))
   117  
   118  	if feature.Gates.Enabled(feature.MachinePool) {
   119  		b = b.Watches(
   120  			&expv1.MachinePool{},
   121  			handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc),
   122  		)
   123  	}
   124  
   125  	b = b.Watches(
   126  		&clusterv1.Cluster{},
   127  		handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmConfigs),
   128  		builder.WithPredicates(
   129  			predicates.All(ctrl.LoggerFrom(ctx),
   130  				predicates.ClusterUnpausedAndInfrastructureReady(ctrl.LoggerFrom(ctx)),
   131  				predicates.ResourceHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue),
   132  			),
   133  		),
   134  	)
   135  
   136  	if err := b.Complete(r); err != nil {
   137  		return errors.Wrap(err, "failed setting up with a controller manager")
   138  	}
   139  	return nil
   140  }
   141  
   142  // Reconcile handles KubeadmConfig events.
   143  func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, rerr error) {
   144  	log := ctrl.LoggerFrom(ctx)
   145  
   146  	// Look up the kubeadm config
   147  	config := &bootstrapv1.KubeadmConfig{}
   148  	if err := r.Client.Get(ctx, req.NamespacedName, config); err != nil {
   149  		if apierrors.IsNotFound(err) {
   150  			return ctrl.Result{}, nil
   151  		}
   152  		return ctrl.Result{}, err
   153  	}
   154  
   155  	// Look up the owner of this kubeadm config if there is one
   156  	configOwner, err := bsutil.GetTypedConfigOwner(ctx, r.Client, config)
   157  	if apierrors.IsNotFound(err) {
   158  		// Could not find the owner yet, this is not an error and will rereconcile when the owner gets set.
   159  		return ctrl.Result{}, nil
   160  	}
   161  	if err != nil {
   162  		return ctrl.Result{}, errors.Wrapf(err, "failed to get owner")
   163  	}
   164  	if configOwner == nil {
   165  		return ctrl.Result{}, nil
   166  	}
   167  	log = log.WithValues(configOwner.GetKind(), klog.KRef(configOwner.GetNamespace(), configOwner.GetName()), "resourceVersion", configOwner.GetResourceVersion())
   168  	ctx = ctrl.LoggerInto(ctx, log)
   169  
   170  	if configOwner.GetKind() == "Machine" {
   171  		// AddOwners adds the owners of Machine as k/v pairs to the logger.
   172  		// Specifically, it will add KubeadmControlPlane, MachineSet and MachineDeployment.
   173  		ctx, log, err = clog.AddOwners(ctx, r.Client, configOwner)
   174  		if err != nil {
   175  			return ctrl.Result{}, err
   176  		}
   177  	}
   178  
   179  	log = log.WithValues("Cluster", klog.KRef(configOwner.GetNamespace(), configOwner.ClusterName()))
   180  	ctx = ctrl.LoggerInto(ctx, log)
   181  
   182  	// Lookup the cluster the config owner is associated with
   183  	cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
   184  	if err != nil {
   185  		if errors.Cause(err) == util.ErrNoCluster {
   186  			log.Info(fmt.Sprintf("%s does not belong to a cluster yet, waiting until it's part of a cluster", configOwner.GetKind()))
   187  			return ctrl.Result{}, nil
   188  		}
   189  
   190  		if apierrors.IsNotFound(err) {
   191  			log.Info("Cluster does not exist yet, waiting until it is created")
   192  			return ctrl.Result{}, nil
   193  		}
   194  		log.Error(err, "Could not get cluster with metadata")
   195  		return ctrl.Result{}, err
   196  	}
   197  
   198  	if annotations.IsPaused(cluster, config) {
   199  		log.Info("Reconciliation is paused for this object")
   200  		return ctrl.Result{}, nil
   201  	}
   202  
   203  	scope := &Scope{
   204  		Logger:      log,
   205  		Config:      config,
   206  		ConfigOwner: configOwner,
   207  		Cluster:     cluster,
   208  	}
   209  
   210  	// Initialize the patch helper.
   211  	patchHelper, err := patch.NewHelper(config, r.Client)
   212  	if err != nil {
   213  		return ctrl.Result{}, err
   214  	}
   215  
   216  	// Attempt to Patch the KubeadmConfig object and status after each reconciliation if no error occurs.
   217  	defer func() {
   218  		// always update the readyCondition; the summary is represented using the "1 of x completed" notation.
   219  		conditions.SetSummary(config,
   220  			conditions.WithConditions(
   221  				bootstrapv1.DataSecretAvailableCondition,
   222  				bootstrapv1.CertificatesAvailableCondition,
   223  			),
   224  		)
   225  		// Patch ObservedGeneration only if the reconciliation completed successfully
   226  		patchOpts := []patch.Option{}
   227  		if rerr == nil {
   228  			patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
   229  		}
   230  		if err := patchHelper.Patch(ctx, config, patchOpts...); err != nil {
   231  			rerr = kerrors.NewAggregate([]error{rerr, err})
   232  		}
   233  	}()
   234  
   235  	// Ignore deleted KubeadmConfigs.
   236  	if !config.DeletionTimestamp.IsZero() {
   237  		return ctrl.Result{}, nil
   238  	}
   239  
   240  	res, err := r.reconcile(ctx, scope, cluster, config, configOwner)
   241  	if err != nil && errors.Is(err, remote.ErrClusterLocked) {
   242  		// Requeue if the reconcile failed because the ClusterCacheTracker was locked for
   243  		// the current cluster because of concurrent access.
   244  		log.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker")
   245  		return ctrl.Result{RequeueAfter: time.Minute}, nil
   246  	}
   247  	return res, err
   248  }
   249  
   250  func (r *KubeadmConfigReconciler) reconcile(ctx context.Context, scope *Scope, cluster *clusterv1.Cluster, config *bootstrapv1.KubeadmConfig, configOwner *bsutil.ConfigOwner) (ctrl.Result, error) {
   251  	log := ctrl.LoggerFrom(ctx)
   252  
   253  	// Ensure the bootstrap secret associated with this KubeadmConfig has the correct ownerReference.
   254  	if err := r.ensureBootstrapSecretOwnersRef(ctx, scope); err != nil {
   255  		return ctrl.Result{}, err
   256  	}
   257  	switch {
   258  	// Wait for the infrastructure to be ready.
   259  	case !cluster.Status.InfrastructureReady:
   260  		log.Info("Cluster infrastructure is not ready, waiting")
   261  		conditions.MarkFalse(config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "")
   262  		return ctrl.Result{}, nil
   263  	// Reconcile status for machines that already have a secret reference, but our status isn't up to date.
   264  	// This case solves the pivoting scenario (or a backup restore) which doesn't preserve the status subresource on objects.
   265  	case configOwner.DataSecretName() != nil && (!config.Status.Ready || config.Status.DataSecretName == nil):
   266  		config.Status.Ready = true
   267  		config.Status.DataSecretName = configOwner.DataSecretName()
   268  		conditions.MarkTrue(config, bootstrapv1.DataSecretAvailableCondition)
   269  		return ctrl.Result{}, nil
   270  	// Status is ready means a config has been generated.
   271  	case config.Status.Ready:
   272  		if config.Spec.JoinConfiguration != nil && config.Spec.JoinConfiguration.Discovery.BootstrapToken != nil {
   273  			if !configOwner.HasNodeRefs() {
   274  				// If the BootstrapToken has been generated for a join but the config owner has no nodeRefs,
   275  				// this indicates that the node has not yet joined and the token in the join config has not
   276  				// been consumed and it may need a refresh.
   277  				return r.refreshBootstrapTokenIfNeeded(ctx, config, cluster)
   278  			}
   279  			if configOwner.IsMachinePool() {
   280  				// If the BootstrapToken has been generated and infrastructure is ready but the configOwner is a MachinePool,
   281  				// we rotate the token to keep it fresh for future scale ups.
   282  				return r.rotateMachinePoolBootstrapToken(ctx, config, cluster, scope)
   283  			}
   284  		}
   285  		// In any other case just return as the config is already generated and need not be generated again.
   286  		return ctrl.Result{}, nil
   287  	}
   288  
   289  	// Note: can't use IsFalse here because we need to handle the absence of the condition as well as false.
   290  	if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) {
   291  		return r.handleClusterNotInitialized(ctx, scope)
   292  	}
   293  
   294  	// Every other case it's a join scenario
   295  	// Nb. in this case ClusterConfiguration and InitConfiguration should not be defined by users, but in case of misconfigurations, CABPK simply ignore them
   296  
   297  	// Unlock any locks that might have been set during init process
   298  	r.KubeadmInitLock.Unlock(ctx, cluster)
   299  
   300  	// if the JoinConfiguration is missing, create a default one
   301  	if config.Spec.JoinConfiguration == nil {
   302  		log.Info("Creating default JoinConfiguration")
   303  		config.Spec.JoinConfiguration = &bootstrapv1.JoinConfiguration{}
   304  	}
   305  
   306  	// it's a control plane join
   307  	if configOwner.IsControlPlaneMachine() {
   308  		return r.joinControlplane(ctx, scope)
   309  	}
   310  
   311  	// It's a worker join
   312  	return r.joinWorker(ctx, scope)
   313  }
   314  
   315  func (r *KubeadmConfigReconciler) refreshBootstrapTokenIfNeeded(ctx context.Context, config *bootstrapv1.KubeadmConfig, cluster *clusterv1.Cluster) (ctrl.Result, error) {
   316  	log := ctrl.LoggerFrom(ctx)
   317  	token := config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token
   318  
   319  	remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
   320  	if err != nil {
   321  		return ctrl.Result{}, err
   322  	}
   323  
   324  	secret, err := getToken(ctx, remoteClient, token)
   325  	if err != nil {
   326  		return ctrl.Result{}, errors.Wrapf(err, "failed to get bootstrap token secret in order to refresh it")
   327  	}
   328  	log = log.WithValues("Secret", klog.KObj(secret))
   329  
   330  	secretExpiration := bootstrapsecretutil.GetData(secret, bootstrapapi.BootstrapTokenExpirationKey)
   331  	if secretExpiration == "" {
   332  		log.Info(fmt.Sprintf("Token has no valid value for %s, writing new expiration timestamp", bootstrapapi.BootstrapTokenExpirationKey))
   333  	} else {
   334  		// Assuming UTC, since we create the label value with that timezone
   335  		expiration, err := time.Parse(time.RFC3339, secretExpiration)
   336  		if err != nil {
   337  			return ctrl.Result{}, errors.Wrapf(err, "can't parse expiration time of bootstrap token")
   338  		}
   339  
   340  		now := time.Now().UTC()
   341  		skipTokenRefreshIfExpiringAfter := now.Add(r.skipTokenRefreshIfExpiringAfter())
   342  		if expiration.After(skipTokenRefreshIfExpiringAfter) {
   343  			log.V(3).Info("Token needs no refresh", "tokenExpiresInSeconds", expiration.Sub(now).Seconds())
   344  			return ctrl.Result{
   345  				RequeueAfter: r.tokenCheckRefreshOrRotationInterval(),
   346  			}, nil
   347  		}
   348  	}
   349  
   350  	// Extend TTL for existing token
   351  	newExpiration := time.Now().UTC().Add(r.TokenTTL).Format(time.RFC3339)
   352  	secret.Data[bootstrapapi.BootstrapTokenExpirationKey] = []byte(newExpiration)
   353  	log.Info("Refreshing token until the infrastructure has a chance to consume it", "oldExpiration", secretExpiration, "newExpiration", newExpiration)
   354  	err = remoteClient.Update(ctx, secret)
   355  	if err != nil {
   356  		return ctrl.Result{}, errors.Wrapf(err, "failed to refresh bootstrap token")
   357  	}
   358  	return ctrl.Result{
   359  		RequeueAfter: r.tokenCheckRefreshOrRotationInterval(),
   360  	}, nil
   361  }
   362  
   363  func (r *KubeadmConfigReconciler) rotateMachinePoolBootstrapToken(ctx context.Context, config *bootstrapv1.KubeadmConfig, cluster *clusterv1.Cluster, scope *Scope) (ctrl.Result, error) {
   364  	log := ctrl.LoggerFrom(ctx)
   365  	log.V(2).Info("Config is owned by a MachinePool, checking if token should be rotated")
   366  	remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
   367  	if err != nil {
   368  		return ctrl.Result{}, err
   369  	}
   370  
   371  	token := config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token
   372  	shouldRotate, err := shouldRotate(ctx, remoteClient, token, r.TokenTTL)
   373  	if err != nil {
   374  		return ctrl.Result{}, err
   375  	}
   376  	if shouldRotate {
   377  		log.Info("Creating new bootstrap token, the existing one should be rotated")
   378  		token, err := createToken(ctx, remoteClient, r.TokenTTL)
   379  		if err != nil {
   380  			return ctrl.Result{}, errors.Wrapf(err, "failed to create new bootstrap token")
   381  		}
   382  
   383  		config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token
   384  		log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token")
   385  
   386  		// update the bootstrap data
   387  		return r.joinWorker(ctx, scope)
   388  	}
   389  	return ctrl.Result{
   390  		RequeueAfter: r.tokenCheckRefreshOrRotationInterval(),
   391  	}, nil
   392  }
   393  
   394  func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Context, scope *Scope) (_ ctrl.Result, reterr error) {
   395  	// initialize the DataSecretAvailableCondition if missing.
   396  	// this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing
   397  	// using the DataSecretGeneratedFailedReason
   398  	if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason {
   399  		conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
   400  	}
   401  
   402  	// if it's NOT a control plane machine, requeue
   403  	if !scope.ConfigOwner.IsControlPlaneMachine() {
   404  		return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
   405  	}
   406  
   407  	// if the machine has not ClusterConfiguration and InitConfiguration, requeue
   408  	if scope.Config.Spec.InitConfiguration == nil && scope.Config.Spec.ClusterConfiguration == nil {
   409  		scope.Info("Control plane is not ready, requeuing joining control planes until ready.")
   410  		return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
   411  	}
   412  
   413  	machine := &clusterv1.Machine{}
   414  	if err := runtime.DefaultUnstructuredConverter.FromUnstructured(scope.ConfigOwner.Object, machine); err != nil {
   415  		return ctrl.Result{}, errors.Wrapf(err, "cannot convert %s to Machine", scope.ConfigOwner.GetKind())
   416  	}
   417  
   418  	// acquire the init lock so that only the first machine configured
   419  	// as control plane get processed here
   420  	// if not the first, requeue
   421  	if !r.KubeadmInitLock.Lock(ctx, scope.Cluster, machine) {
   422  		scope.Info("A control plane is already being initialized, requeuing until control plane is ready")
   423  		return ctrl.Result{RequeueAfter: 30 * time.Second}, nil
   424  	}
   425  
   426  	defer func() {
   427  		if reterr != nil {
   428  			if !r.KubeadmInitLock.Unlock(ctx, scope.Cluster) {
   429  				reterr = kerrors.NewAggregate([]error{reterr, errors.New("failed to unlock the kubeadm init lock")})
   430  			}
   431  		}
   432  	}()
   433  
   434  	scope.Info("Creating BootstrapData for the first control plane")
   435  
   436  	// Nb. in this case JoinConfiguration should not be defined by users, but in case of misconfigurations, CABPK simply ignore it
   437  
   438  	// get both of ClusterConfiguration and InitConfiguration strings to pass to the cloud init control plane generator
   439  	// kubeadm allows one of these values to be empty; CABPK replace missing values with an empty config, so the cloud init generation
   440  	// should not handle special cases.
   441  
   442  	kubernetesVersion := scope.ConfigOwner.KubernetesVersion()
   443  	parsedVersion, err := semver.ParseTolerant(kubernetesVersion)
   444  	if err != nil {
   445  		return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kubernetesVersion)
   446  	}
   447  
   448  	if scope.Config.Spec.InitConfiguration == nil {
   449  		scope.Config.Spec.InitConfiguration = &bootstrapv1.InitConfiguration{
   450  			TypeMeta: metav1.TypeMeta{
   451  				APIVersion: "kubeadm.k8s.io/v1beta3",
   452  				Kind:       "InitConfiguration",
   453  			},
   454  		}
   455  	}
   456  
   457  	initdata, err := kubeadmtypes.MarshalInitConfigurationForVersion(scope.Config.Spec.InitConfiguration, parsedVersion)
   458  	if err != nil {
   459  		scope.Error(err, "Failed to marshal init configuration")
   460  		return ctrl.Result{}, err
   461  	}
   462  
   463  	if scope.Config.Spec.ClusterConfiguration == nil {
   464  		scope.Config.Spec.ClusterConfiguration = &bootstrapv1.ClusterConfiguration{
   465  			TypeMeta: metav1.TypeMeta{
   466  				APIVersion: "kubeadm.k8s.io/v1beta3",
   467  				Kind:       "ClusterConfiguration",
   468  			},
   469  		}
   470  	}
   471  
   472  	// injects into config.ClusterConfiguration values from top level object
   473  	r.reconcileTopLevelObjectSettings(ctx, scope.Cluster, machine, scope.Config)
   474  
   475  	clusterdata, err := kubeadmtypes.MarshalClusterConfigurationForVersion(scope.Config.Spec.ClusterConfiguration, parsedVersion)
   476  	if err != nil {
   477  		scope.Error(err, "Failed to marshal cluster configuration")
   478  		return ctrl.Result{}, err
   479  	}
   480  
   481  	certificates := secret.NewCertificatesForInitialControlPlane(scope.Config.Spec.ClusterConfiguration)
   482  
   483  	// If the Cluster does not have a ControlPlane reference look up and generate the certificates.
   484  	// Otherwise rely on certificates generated by the ControlPlane controller.
   485  	// Note: A cluster does not have a ControlPlane reference when using standalone CP machines.
   486  	if scope.Cluster.Spec.ControlPlaneRef == nil {
   487  		err = certificates.LookupOrGenerateCached(
   488  			ctx,
   489  			r.SecretCachingClient,
   490  			r.Client,
   491  			util.ObjectKey(scope.Cluster),
   492  			*metav1.NewControllerRef(scope.Config, bootstrapv1.GroupVersion.WithKind("KubeadmConfig")))
   493  	} else {
   494  		err = certificates.LookupCached(ctx,
   495  			r.SecretCachingClient,
   496  			r.Client,
   497  			util.ObjectKey(scope.Cluster))
   498  	}
   499  	if err != nil {
   500  		conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
   501  		return ctrl.Result{}, err
   502  	}
   503  
   504  	conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition)
   505  
   506  	verbosityFlag := ""
   507  	if scope.Config.Spec.Verbosity != nil {
   508  		verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity)))
   509  	}
   510  
   511  	files, err := r.resolveFiles(ctx, scope.Config)
   512  	if err != nil {
   513  		conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
   514  		return ctrl.Result{}, err
   515  	}
   516  
   517  	users, err := r.resolveUsers(ctx, scope.Config)
   518  	if err != nil {
   519  		conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
   520  		return ctrl.Result{}, err
   521  	}
   522  
   523  	controlPlaneInput := &cloudinit.ControlPlaneInput{
   524  		BaseUserData: cloudinit.BaseUserData{
   525  			AdditionalFiles:     files,
   526  			NTP:                 scope.Config.Spec.NTP,
   527  			PreKubeadmCommands:  scope.Config.Spec.PreKubeadmCommands,
   528  			PostKubeadmCommands: scope.Config.Spec.PostKubeadmCommands,
   529  			Users:               users,
   530  			Mounts:              scope.Config.Spec.Mounts,
   531  			DiskSetup:           scope.Config.Spec.DiskSetup,
   532  			KubeadmVerbosity:    verbosityFlag,
   533  		},
   534  		InitConfiguration:    initdata,
   535  		ClusterConfiguration: clusterdata,
   536  		Certificates:         certificates,
   537  	}
   538  
   539  	var bootstrapInitData []byte
   540  	switch scope.Config.Spec.Format {
   541  	case bootstrapv1.Ignition:
   542  		bootstrapInitData, _, err = ignition.NewInitControlPlane(&ignition.ControlPlaneInput{
   543  			ControlPlaneInput: controlPlaneInput,
   544  			Ignition:          scope.Config.Spec.Ignition,
   545  		})
   546  	default:
   547  		bootstrapInitData, err = cloudinit.NewInitControlPlane(controlPlaneInput)
   548  	}
   549  
   550  	if err != nil {
   551  		scope.Error(err, "Failed to generate user data for bootstrap control plane")
   552  		return ctrl.Result{}, err
   553  	}
   554  
   555  	if err := r.storeBootstrapData(ctx, scope, bootstrapInitData); err != nil {
   556  		scope.Error(err, "Failed to store bootstrap data")
   557  		return ctrl.Result{}, err
   558  	}
   559  
   560  	return ctrl.Result{}, nil
   561  }
   562  
   563  func (r *KubeadmConfigReconciler) joinWorker(ctx context.Context, scope *Scope) (ctrl.Result, error) {
   564  	scope.Info("Creating BootstrapData for the worker node")
   565  
   566  	certificates := secret.NewCertificatesForWorker(scope.Config.Spec.JoinConfiguration.CACertPath)
   567  	err := certificates.LookupCached(
   568  		ctx,
   569  		r.SecretCachingClient,
   570  		r.Client,
   571  		util.ObjectKey(scope.Cluster),
   572  	)
   573  	if err != nil {
   574  		conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error())
   575  		return ctrl.Result{}, err
   576  	}
   577  	if err := certificates.EnsureAllExist(); err != nil {
   578  		conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error())
   579  		return ctrl.Result{}, err
   580  	}
   581  	conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition)
   582  
   583  	// Ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster.
   584  	if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config, certificates); err != nil {
   585  		return ctrl.Result{}, err
   586  	} else if !res.IsZero() {
   587  		return res, nil
   588  	}
   589  
   590  	kubernetesVersion := scope.ConfigOwner.KubernetesVersion()
   591  	parsedVersion, err := semver.ParseTolerant(kubernetesVersion)
   592  	if err != nil {
   593  		return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kubernetesVersion)
   594  	}
   595  
   596  	// Add the node uninitialized taint to the list of taints.
   597  	// DeepCopy the JoinConfiguration to prevent updating the actual KubeadmConfig.
   598  	// Do not modify the KubeadmConfig in etcd as this is a temporary taint that will be dropped after the node
   599  	// is initialized by ClusterAPI.
   600  	joinConfiguration := scope.Config.Spec.JoinConfiguration.DeepCopy()
   601  	if !taints.HasTaint(joinConfiguration.NodeRegistration.Taints, clusterv1.NodeUninitializedTaint) {
   602  		joinConfiguration.NodeRegistration.Taints = append(joinConfiguration.NodeRegistration.Taints, clusterv1.NodeUninitializedTaint)
   603  	}
   604  
   605  	joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(joinConfiguration, parsedVersion)
   606  	if err != nil {
   607  		scope.Error(err, "Failed to marshal join configuration")
   608  		return ctrl.Result{}, err
   609  	}
   610  
   611  	if scope.Config.Spec.JoinConfiguration.ControlPlane != nil {
   612  		return ctrl.Result{}, errors.New("Machine is a Worker, but JoinConfiguration.ControlPlane is set in the KubeadmConfig object")
   613  	}
   614  
   615  	verbosityFlag := ""
   616  	if scope.Config.Spec.Verbosity != nil {
   617  		verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity)))
   618  	}
   619  
   620  	files, err := r.resolveFiles(ctx, scope.Config)
   621  	if err != nil {
   622  		conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
   623  		return ctrl.Result{}, err
   624  	}
   625  
   626  	users, err := r.resolveUsers(ctx, scope.Config)
   627  	if err != nil {
   628  		conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
   629  		return ctrl.Result{}, err
   630  	}
   631  
   632  	nodeInput := &cloudinit.NodeInput{
   633  		BaseUserData: cloudinit.BaseUserData{
   634  			AdditionalFiles:      files,
   635  			NTP:                  scope.Config.Spec.NTP,
   636  			PreKubeadmCommands:   scope.Config.Spec.PreKubeadmCommands,
   637  			PostKubeadmCommands:  scope.Config.Spec.PostKubeadmCommands,
   638  			Users:                users,
   639  			Mounts:               scope.Config.Spec.Mounts,
   640  			DiskSetup:            scope.Config.Spec.DiskSetup,
   641  			KubeadmVerbosity:     verbosityFlag,
   642  			UseExperimentalRetry: scope.Config.Spec.UseExperimentalRetryJoin,
   643  		},
   644  		JoinConfiguration: joinData,
   645  	}
   646  
   647  	var bootstrapJoinData []byte
   648  	switch scope.Config.Spec.Format {
   649  	case bootstrapv1.Ignition:
   650  		bootstrapJoinData, _, err = ignition.NewNode(&ignition.NodeInput{
   651  			NodeInput: nodeInput,
   652  			Ignition:  scope.Config.Spec.Ignition,
   653  		})
   654  	default:
   655  		bootstrapJoinData, err = cloudinit.NewNode(nodeInput)
   656  	}
   657  
   658  	if err != nil {
   659  		scope.Error(err, "Failed to create a worker join configuration")
   660  		return ctrl.Result{}, err
   661  	}
   662  
   663  	if err := r.storeBootstrapData(ctx, scope, bootstrapJoinData); err != nil {
   664  		scope.Error(err, "Failed to store bootstrap data")
   665  		return ctrl.Result{}, err
   666  	}
   667  
   668  	// Ensure reconciling this object again so we keep refreshing the bootstrap token until it is consumed
   669  	return ctrl.Result{RequeueAfter: r.tokenCheckRefreshOrRotationInterval()}, nil
   670  }
   671  
   672  func (r *KubeadmConfigReconciler) joinControlplane(ctx context.Context, scope *Scope) (ctrl.Result, error) {
   673  	scope.Info("Creating BootstrapData for the joining control plane")
   674  
   675  	if !scope.ConfigOwner.IsControlPlaneMachine() {
   676  		return ctrl.Result{}, fmt.Errorf("%s is not a valid control plane kind, only Machine is supported", scope.ConfigOwner.GetKind())
   677  	}
   678  
   679  	if scope.Config.Spec.JoinConfiguration.ControlPlane == nil {
   680  		scope.Config.Spec.JoinConfiguration.ControlPlane = &bootstrapv1.JoinControlPlane{}
   681  	}
   682  
   683  	certificates := secret.NewControlPlaneJoinCerts(scope.Config.Spec.ClusterConfiguration)
   684  	err := certificates.LookupCached(
   685  		ctx,
   686  		r.SecretCachingClient,
   687  		r.Client,
   688  		util.ObjectKey(scope.Cluster),
   689  	)
   690  	if err != nil {
   691  		conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error())
   692  		return ctrl.Result{}, err
   693  	}
   694  	if err := certificates.EnsureAllExist(); err != nil {
   695  		conditions.MarkFalse(scope.Config, bootstrapv1.CertificatesAvailableCondition, bootstrapv1.CertificatesCorruptedReason, clusterv1.ConditionSeverityError, err.Error())
   696  		return ctrl.Result{}, err
   697  	}
   698  
   699  	conditions.MarkTrue(scope.Config, bootstrapv1.CertificatesAvailableCondition)
   700  
   701  	// Ensure that joinConfiguration.Discovery is properly set for joining node on the current cluster.
   702  	if res, err := r.reconcileDiscovery(ctx, scope.Cluster, scope.Config, certificates); err != nil {
   703  		return ctrl.Result{}, err
   704  	} else if !res.IsZero() {
   705  		return res, nil
   706  	}
   707  
   708  	kubernetesVersion := scope.ConfigOwner.KubernetesVersion()
   709  	parsedVersion, err := semver.ParseTolerant(kubernetesVersion)
   710  	if err != nil {
   711  		return ctrl.Result{}, errors.Wrapf(err, "failed to parse kubernetes version %q", kubernetesVersion)
   712  	}
   713  
   714  	joinData, err := kubeadmtypes.MarshalJoinConfigurationForVersion(scope.Config.Spec.JoinConfiguration, parsedVersion)
   715  	if err != nil {
   716  		scope.Error(err, "Failed to marshal join configuration")
   717  		return ctrl.Result{}, err
   718  	}
   719  
   720  	verbosityFlag := ""
   721  	if scope.Config.Spec.Verbosity != nil {
   722  		verbosityFlag = fmt.Sprintf("--v %s", strconv.Itoa(int(*scope.Config.Spec.Verbosity)))
   723  	}
   724  
   725  	files, err := r.resolveFiles(ctx, scope.Config)
   726  	if err != nil {
   727  		conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
   728  		return ctrl.Result{}, err
   729  	}
   730  
   731  	users, err := r.resolveUsers(ctx, scope.Config)
   732  	if err != nil {
   733  		conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
   734  		return ctrl.Result{}, err
   735  	}
   736  
   737  	controlPlaneJoinInput := &cloudinit.ControlPlaneJoinInput{
   738  		JoinConfiguration: joinData,
   739  		Certificates:      certificates,
   740  		BaseUserData: cloudinit.BaseUserData{
   741  			AdditionalFiles:      files,
   742  			NTP:                  scope.Config.Spec.NTP,
   743  			PreKubeadmCommands:   scope.Config.Spec.PreKubeadmCommands,
   744  			PostKubeadmCommands:  scope.Config.Spec.PostKubeadmCommands,
   745  			Users:                users,
   746  			Mounts:               scope.Config.Spec.Mounts,
   747  			DiskSetup:            scope.Config.Spec.DiskSetup,
   748  			KubeadmVerbosity:     verbosityFlag,
   749  			UseExperimentalRetry: scope.Config.Spec.UseExperimentalRetryJoin,
   750  		},
   751  	}
   752  
   753  	var bootstrapJoinData []byte
   754  	switch scope.Config.Spec.Format {
   755  	case bootstrapv1.Ignition:
   756  		bootstrapJoinData, _, err = ignition.NewJoinControlPlane(&ignition.ControlPlaneJoinInput{
   757  			ControlPlaneJoinInput: controlPlaneJoinInput,
   758  			Ignition:              scope.Config.Spec.Ignition,
   759  		})
   760  	default:
   761  		bootstrapJoinData, err = cloudinit.NewJoinControlPlane(controlPlaneJoinInput)
   762  	}
   763  
   764  	if err != nil {
   765  		scope.Error(err, "Failed to create a control plane join configuration")
   766  		return ctrl.Result{}, err
   767  	}
   768  
   769  	if err := r.storeBootstrapData(ctx, scope, bootstrapJoinData); err != nil {
   770  		scope.Error(err, "Failed to store bootstrap data")
   771  		return ctrl.Result{}, err
   772  	}
   773  
   774  	// Ensure reconciling this object again so we keep refreshing the bootstrap token until it is consumed
   775  	return ctrl.Result{RequeueAfter: r.tokenCheckRefreshOrRotationInterval()}, nil
   776  }
   777  
   778  // resolveFiles maps .Spec.Files into cloudinit.Files, resolving any object references
   779  // along the way.
   780  func (r *KubeadmConfigReconciler) resolveFiles(ctx context.Context, cfg *bootstrapv1.KubeadmConfig) ([]bootstrapv1.File, error) {
   781  	collected := make([]bootstrapv1.File, 0, len(cfg.Spec.Files))
   782  
   783  	for i := range cfg.Spec.Files {
   784  		in := cfg.Spec.Files[i]
   785  		if in.ContentFrom != nil {
   786  			data, err := r.resolveSecretFileContent(ctx, cfg.Namespace, in)
   787  			if err != nil {
   788  				return nil, errors.Wrapf(err, "failed to resolve file source")
   789  			}
   790  			in.ContentFrom = nil
   791  			in.Content = string(data)
   792  		}
   793  		collected = append(collected, in)
   794  	}
   795  
   796  	return collected, nil
   797  }
   798  
   799  // resolveSecretFileContent returns file content fetched from a referenced secret object.
   800  func (r *KubeadmConfigReconciler) resolveSecretFileContent(ctx context.Context, ns string, source bootstrapv1.File) ([]byte, error) {
   801  	secret := &corev1.Secret{}
   802  	key := types.NamespacedName{Namespace: ns, Name: source.ContentFrom.Secret.Name}
   803  	if err := r.Client.Get(ctx, key, secret); err != nil {
   804  		if apierrors.IsNotFound(err) {
   805  			return nil, errors.Wrapf(err, "secret not found: %s", key)
   806  		}
   807  		return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key)
   808  	}
   809  	data, ok := secret.Data[source.ContentFrom.Secret.Key]
   810  	if !ok {
   811  		return nil, errors.Errorf("secret references non-existent secret key: %q", source.ContentFrom.Secret.Key)
   812  	}
   813  	return data, nil
   814  }
   815  
   816  // resolveUsers maps .Spec.Users into cloudinit.Users, resolving any object references
   817  // along the way.
   818  func (r *KubeadmConfigReconciler) resolveUsers(ctx context.Context, cfg *bootstrapv1.KubeadmConfig) ([]bootstrapv1.User, error) {
   819  	collected := make([]bootstrapv1.User, 0, len(cfg.Spec.Users))
   820  
   821  	for i := range cfg.Spec.Users {
   822  		in := cfg.Spec.Users[i]
   823  		if in.PasswdFrom != nil {
   824  			data, err := r.resolveSecretPasswordContent(ctx, cfg.Namespace, in)
   825  			if err != nil {
   826  				return nil, errors.Wrapf(err, "failed to resolve passwd source")
   827  			}
   828  			in.PasswdFrom = nil
   829  			passwdContent := string(data)
   830  			in.Passwd = &passwdContent
   831  		}
   832  		collected = append(collected, in)
   833  	}
   834  
   835  	return collected, nil
   836  }
   837  
   838  // resolveSecretUserContent returns passwd fetched from a referenced secret object.
   839  func (r *KubeadmConfigReconciler) resolveSecretPasswordContent(ctx context.Context, ns string, source bootstrapv1.User) ([]byte, error) {
   840  	secret := &corev1.Secret{}
   841  	key := types.NamespacedName{Namespace: ns, Name: source.PasswdFrom.Secret.Name}
   842  	if err := r.Client.Get(ctx, key, secret); err != nil {
   843  		if apierrors.IsNotFound(err) {
   844  			return nil, errors.Wrapf(err, "secret not found: %s", key)
   845  		}
   846  		return nil, errors.Wrapf(err, "failed to retrieve Secret %q", key)
   847  	}
   848  	data, ok := secret.Data[source.PasswdFrom.Secret.Key]
   849  	if !ok {
   850  		return nil, errors.Errorf("secret references non-existent secret key: %q", source.PasswdFrom.Secret.Key)
   851  	}
   852  	return data, nil
   853  }
   854  
   855  // skipTokenRefreshIfExpiringAfter returns a duration. If the token's expiry timestamp is after
   856  // `now + skipTokenRefreshIfExpiringAfter()`, it does not yet need a refresh.
   857  func (r *KubeadmConfigReconciler) skipTokenRefreshIfExpiringAfter() time.Duration {
   858  	// Choose according to how often reconciliation is "woken up" by `tokenCheckRefreshOrRotationInterval`.
   859  	// Reconciliation should get triggered at least two times, i.e. have two chances to refresh the token (in case of
   860  	// one temporary failure), while the token is not refreshed.
   861  	return r.TokenTTL * 5 / 6
   862  }
   863  
   864  // tokenCheckRefreshOrRotationInterval defines when to trigger a reconciliation loop again to refresh or rotate a token.
   865  func (r *KubeadmConfigReconciler) tokenCheckRefreshOrRotationInterval() time.Duration {
   866  	// This interval defines how often the reconciler should get triggered.
   867  	//
   868  	// `r.TokenTTL / 3` means reconciliation gets triggered at least 3 times within the expiry time of the token. The
   869  	// third call may be too late, so the first/second call have a chance to extend the expiry (refresh/rotate),
   870  	// allowing for one temporary failure.
   871  	//
   872  	// Related to `skipTokenRefreshIfExpiringAfter` and also token rotation (which is different from refreshing).
   873  	return r.TokenTTL / 3
   874  }
   875  
   876  // ClusterToKubeadmConfigs is a handler.ToRequestsFunc to be used to enqueue
   877  // requests for reconciliation of KubeadmConfigs.
   878  func (r *KubeadmConfigReconciler) ClusterToKubeadmConfigs(ctx context.Context, o client.Object) []ctrl.Request {
   879  	result := []ctrl.Request{}
   880  
   881  	c, ok := o.(*clusterv1.Cluster)
   882  	if !ok {
   883  		panic(fmt.Sprintf("Expected a Cluster but got a %T", o))
   884  	}
   885  
   886  	selectors := []client.ListOption{
   887  		client.InNamespace(c.Namespace),
   888  		client.MatchingLabels{
   889  			clusterv1.ClusterNameLabel: c.Name,
   890  		},
   891  	}
   892  
   893  	machineList := &clusterv1.MachineList{}
   894  	if err := r.Client.List(ctx, machineList, selectors...); err != nil {
   895  		return nil
   896  	}
   897  
   898  	for _, m := range machineList.Items {
   899  		if m.Spec.Bootstrap.ConfigRef != nil &&
   900  			m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("KubeadmConfig").GroupKind() {
   901  			name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
   902  			result = append(result, ctrl.Request{NamespacedName: name})
   903  		}
   904  	}
   905  
   906  	if feature.Gates.Enabled(feature.MachinePool) {
   907  		machinePoolList := &expv1.MachinePoolList{}
   908  		if err := r.Client.List(ctx, machinePoolList, selectors...); err != nil {
   909  			return nil
   910  		}
   911  
   912  		for _, mp := range machinePoolList.Items {
   913  			if mp.Spec.Template.Spec.Bootstrap.ConfigRef != nil &&
   914  				mp.Spec.Template.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("KubeadmConfig").GroupKind() {
   915  				name := client.ObjectKey{Namespace: mp.Namespace, Name: mp.Spec.Template.Spec.Bootstrap.ConfigRef.Name}
   916  				result = append(result, ctrl.Request{NamespacedName: name})
   917  			}
   918  		}
   919  	}
   920  
   921  	return result
   922  }
   923  
   924  // MachineToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue
   925  // request for reconciliation of KubeadmConfig.
   926  func (r *KubeadmConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request {
   927  	m, ok := o.(*clusterv1.Machine)
   928  	if !ok {
   929  		panic(fmt.Sprintf("Expected a Machine but got a %T", o))
   930  	}
   931  
   932  	result := []ctrl.Request{}
   933  	if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == bootstrapv1.GroupVersion.WithKind("KubeadmConfig") {
   934  		name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name}
   935  		result = append(result, ctrl.Request{NamespacedName: name})
   936  	}
   937  	return result
   938  }
   939  
   940  // MachinePoolToBootstrapMapFunc is a handler.ToRequestsFunc to be used to enqueue
   941  // request for reconciliation of KubeadmConfig.
   942  func (r *KubeadmConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request {
   943  	m, ok := o.(*expv1.MachinePool)
   944  	if !ok {
   945  		panic(fmt.Sprintf("Expected a MachinePool but got a %T", o))
   946  	}
   947  
   948  	result := []ctrl.Request{}
   949  	configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef
   950  	if configRef != nil && configRef.GroupVersionKind().GroupKind() == bootstrapv1.GroupVersion.WithKind("KubeadmConfig").GroupKind() {
   951  		name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name}
   952  		result = append(result, ctrl.Request{NamespacedName: name})
   953  	}
   954  	return result
   955  }
   956  
   957  // reconcileDiscovery ensures that config.JoinConfiguration.Discovery is properly set for the joining node.
   958  // The implementation func respect user provided discovery configurations, but in case some of them are missing, a valid BootstrapToken object
   959  // is automatically injected into config.JoinConfiguration.Discovery.
   960  // This allows to simplify configuration UX, by providing the option to delegate to CABPK the configuration of kubeadm join discovery.
   961  func (r *KubeadmConfigReconciler) reconcileDiscovery(ctx context.Context, cluster *clusterv1.Cluster, config *bootstrapv1.KubeadmConfig, certificates secret.Certificates) (ctrl.Result, error) {
   962  	log := ctrl.LoggerFrom(ctx)
   963  
   964  	// if config already contains a file discovery configuration, respect it without further validations
   965  	if config.Spec.JoinConfiguration.Discovery.File != nil {
   966  		return ctrl.Result{}, nil
   967  	}
   968  
   969  	// otherwise it is necessary to ensure token discovery is properly configured
   970  	if config.Spec.JoinConfiguration.Discovery.BootstrapToken == nil {
   971  		config.Spec.JoinConfiguration.Discovery.BootstrapToken = &bootstrapv1.BootstrapTokenDiscovery{}
   972  	}
   973  
   974  	// calculate the ca cert hashes if they are not already set
   975  	if len(config.Spec.JoinConfiguration.Discovery.BootstrapToken.CACertHashes) == 0 {
   976  		hashes, err := certificates.GetByPurpose(secret.ClusterCA).Hashes()
   977  		if err != nil {
   978  			log.Error(err, "Unable to generate Cluster CA certificate hashes")
   979  			return ctrl.Result{}, err
   980  		}
   981  		config.Spec.JoinConfiguration.Discovery.BootstrapToken.CACertHashes = hashes
   982  	}
   983  
   984  	// if BootstrapToken already contains an APIServerEndpoint, respect it; otherwise inject the APIServerEndpoint endpoint defined in cluster status
   985  	apiServerEndpoint := config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint
   986  	if apiServerEndpoint == "" {
   987  		if !cluster.Spec.ControlPlaneEndpoint.IsValid() {
   988  			log.V(1).Info("Waiting for Cluster Controller to set Cluster.Spec.ControlPlaneEndpoint")
   989  			return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
   990  		}
   991  
   992  		apiServerEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
   993  		config.Spec.JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint = apiServerEndpoint
   994  		log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.APIServerEndpoint", "APIServerEndpoint", apiServerEndpoint)
   995  	}
   996  
   997  	// if BootstrapToken already contains a token, respect it; otherwise create a new bootstrap token for the node to join
   998  	if config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token == "" {
   999  		remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(cluster))
  1000  		if err != nil {
  1001  			return ctrl.Result{}, err
  1002  		}
  1003  
  1004  		token, err := createToken(ctx, remoteClient, r.TokenTTL)
  1005  		if err != nil {
  1006  			return ctrl.Result{}, errors.Wrapf(err, "failed to create new bootstrap token")
  1007  		}
  1008  
  1009  		config.Spec.JoinConfiguration.Discovery.BootstrapToken.Token = token
  1010  		log.V(3).Info("Altering JoinConfiguration.Discovery.BootstrapToken.Token")
  1011  	}
  1012  
  1013  	// If the BootstrapToken does not contain any CACertHashes then force skip CA Verification
  1014  	if len(config.Spec.JoinConfiguration.Discovery.BootstrapToken.CACertHashes) == 0 {
  1015  		log.Info("No CAs were provided. Falling back to insecure discover method by skipping CA Cert validation")
  1016  		config.Spec.JoinConfiguration.Discovery.BootstrapToken.UnsafeSkipCAVerification = true
  1017  	}
  1018  
  1019  	return ctrl.Result{}, nil
  1020  }
  1021  
  1022  // reconcileTopLevelObjectSettings injects into config.ClusterConfiguration values from top level objects like cluster and machine.
  1023  // The implementation func respect user provided config values, but in case some of them are missing, values from top level objects are used.
  1024  func (r *KubeadmConfigReconciler) reconcileTopLevelObjectSettings(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, config *bootstrapv1.KubeadmConfig) {
  1025  	log := ctrl.LoggerFrom(ctx)
  1026  
  1027  	// If there is no ControlPlaneEndpoint defined in ClusterConfiguration but
  1028  	// there is a ControlPlaneEndpoint defined at Cluster level (e.g. the load balancer endpoint),
  1029  	// then use Cluster's ControlPlaneEndpoint as a control plane endpoint for the Kubernetes cluster.
  1030  	if config.Spec.ClusterConfiguration.ControlPlaneEndpoint == "" && cluster.Spec.ControlPlaneEndpoint.IsValid() {
  1031  		config.Spec.ClusterConfiguration.ControlPlaneEndpoint = cluster.Spec.ControlPlaneEndpoint.String()
  1032  		log.V(3).Info("Altering ClusterConfiguration.ControlPlaneEndpoint", "ControlPlaneEndpoint", config.Spec.ClusterConfiguration.ControlPlaneEndpoint)
  1033  	}
  1034  
  1035  	// If there are no ClusterName defined in ClusterConfiguration, use Cluster.Name
  1036  	if config.Spec.ClusterConfiguration.ClusterName == "" {
  1037  		config.Spec.ClusterConfiguration.ClusterName = cluster.Name
  1038  		log.V(3).Info("Altering ClusterConfiguration.ClusterName", "ClusterName", config.Spec.ClusterConfiguration.ClusterName)
  1039  	}
  1040  
  1041  	// If there are no Network settings defined in ClusterConfiguration, use ClusterNetwork settings, if defined
  1042  	if cluster.Spec.ClusterNetwork != nil {
  1043  		if config.Spec.ClusterConfiguration.Networking.DNSDomain == "" && cluster.Spec.ClusterNetwork.ServiceDomain != "" {
  1044  			config.Spec.ClusterConfiguration.Networking.DNSDomain = cluster.Spec.ClusterNetwork.ServiceDomain
  1045  			log.V(3).Info("Altering ClusterConfiguration.Networking.DNSDomain", "DNSDomain", config.Spec.ClusterConfiguration.Networking.DNSDomain)
  1046  		}
  1047  		if config.Spec.ClusterConfiguration.Networking.ServiceSubnet == "" &&
  1048  			cluster.Spec.ClusterNetwork.Services != nil &&
  1049  			len(cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 {
  1050  			config.Spec.ClusterConfiguration.Networking.ServiceSubnet = cluster.Spec.ClusterNetwork.Services.String()
  1051  			log.V(3).Info("Altering ClusterConfiguration.Networking.ServiceSubnet", "ServiceSubnet", config.Spec.ClusterConfiguration.Networking.ServiceSubnet)
  1052  		}
  1053  		if config.Spec.ClusterConfiguration.Networking.PodSubnet == "" &&
  1054  			cluster.Spec.ClusterNetwork.Pods != nil &&
  1055  			len(cluster.Spec.ClusterNetwork.Pods.CIDRBlocks) > 0 {
  1056  			config.Spec.ClusterConfiguration.Networking.PodSubnet = cluster.Spec.ClusterNetwork.Pods.String()
  1057  			log.V(3).Info("Altering ClusterConfiguration.Networking.PodSubnet", "PodSubnet", config.Spec.ClusterConfiguration.Networking.PodSubnet)
  1058  		}
  1059  	}
  1060  
  1061  	// If there are no KubernetesVersion settings defined in ClusterConfiguration, use Version from machine, if defined
  1062  	if config.Spec.ClusterConfiguration.KubernetesVersion == "" && machine.Spec.Version != nil {
  1063  		config.Spec.ClusterConfiguration.KubernetesVersion = *machine.Spec.Version
  1064  		log.V(3).Info("Altering ClusterConfiguration.KubernetesVersion", "KubernetesVersion", config.Spec.ClusterConfiguration.KubernetesVersion)
  1065  	}
  1066  }
  1067  
  1068  // storeBootstrapData creates a new secret with the data passed in as input,
  1069  // sets the reference in the configuration status and ready to true.
  1070  func (r *KubeadmConfigReconciler) storeBootstrapData(ctx context.Context, scope *Scope, data []byte) error {
  1071  	log := ctrl.LoggerFrom(ctx)
  1072  
  1073  	secret := &corev1.Secret{
  1074  		ObjectMeta: metav1.ObjectMeta{
  1075  			Name:      scope.Config.Name,
  1076  			Namespace: scope.Config.Namespace,
  1077  			Labels: map[string]string{
  1078  				clusterv1.ClusterNameLabel: scope.Cluster.Name,
  1079  			},
  1080  			OwnerReferences: []metav1.OwnerReference{
  1081  				{
  1082  					APIVersion: bootstrapv1.GroupVersion.String(),
  1083  					Kind:       "KubeadmConfig",
  1084  					Name:       scope.Config.Name,
  1085  					UID:        scope.Config.UID,
  1086  					Controller: ptr.To(true),
  1087  				},
  1088  			},
  1089  		},
  1090  		Data: map[string][]byte{
  1091  			"value":  data,
  1092  			"format": []byte(scope.Config.Spec.Format),
  1093  		},
  1094  		Type: clusterv1.ClusterSecretType,
  1095  	}
  1096  
  1097  	// as secret creation and scope.Config status patch are not atomic operations
  1098  	// it is possible that secret creation happens but the config.Status patches are not applied
  1099  	if err := r.Client.Create(ctx, secret); err != nil {
  1100  		if !apierrors.IsAlreadyExists(err) {
  1101  			return errors.Wrapf(err, "failed to create bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
  1102  		}
  1103  		log.Info("Bootstrap data secret for KubeadmConfig already exists, updating", "Secret", klog.KObj(secret))
  1104  		if err := r.Client.Update(ctx, secret); err != nil {
  1105  			return errors.Wrapf(err, "failed to update bootstrap data secret for KubeadmConfig %s/%s", scope.Config.Namespace, scope.Config.Name)
  1106  		}
  1107  	}
  1108  	scope.Config.Status.DataSecretName = ptr.To(secret.Name)
  1109  	scope.Config.Status.Ready = true
  1110  	conditions.MarkTrue(scope.Config, bootstrapv1.DataSecretAvailableCondition)
  1111  	return nil
  1112  }
  1113  
  1114  // Ensure the bootstrap secret has the KubeadmConfig as a controller OwnerReference.
  1115  func (r *KubeadmConfigReconciler) ensureBootstrapSecretOwnersRef(ctx context.Context, scope *Scope) error {
  1116  	secret := &corev1.Secret{}
  1117  	err := r.SecretCachingClient.Get(ctx, client.ObjectKey{Namespace: scope.Config.Namespace, Name: scope.Config.Name}, secret)
  1118  	if err != nil {
  1119  		// If the secret has not been created yet return early.
  1120  		if apierrors.IsNotFound(err) {
  1121  			return nil
  1122  		}
  1123  		return errors.Wrapf(err, "failed to add KubeadmConfig %s as ownerReference to bootstrap Secret %s", scope.ConfigOwner.GetName(), secret.GetName())
  1124  	}
  1125  	patchHelper, err := patch.NewHelper(secret, r.Client)
  1126  	if err != nil {
  1127  		return errors.Wrapf(err, "failed to add KubeadmConfig %s as ownerReference to bootstrap Secret %s", scope.ConfigOwner.GetName(), secret.GetName())
  1128  	}
  1129  	if c := metav1.GetControllerOf(secret); c != nil && c.Kind != "KubeadmConfig" {
  1130  		secret.SetOwnerReferences(util.RemoveOwnerRef(secret.GetOwnerReferences(), *c))
  1131  	}
  1132  	secret.SetOwnerReferences(util.EnsureOwnerRef(secret.GetOwnerReferences(), metav1.OwnerReference{
  1133  		APIVersion: bootstrapv1.GroupVersion.String(),
  1134  		Kind:       "KubeadmConfig",
  1135  		UID:        scope.Config.UID,
  1136  		Name:       scope.Config.Name,
  1137  		Controller: ptr.To(true),
  1138  	}))
  1139  	err = patchHelper.Patch(ctx, secret)
  1140  	if err != nil {
  1141  		return errors.Wrapf(err, "could not add KubeadmConfig %s as ownerReference to bootstrap Secret %s", scope.ConfigOwner.GetName(), secret.GetName())
  1142  	}
  1143  	return nil
  1144  }