sigs.k8s.io/cluster-api@v1.7.1/cmd/clusterctl/client/cluster/mover.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"time"
    25  
    26  	"github.com/pkg/errors"
    27  	corev1 "k8s.io/api/core/v1"
    28  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    31  	"k8s.io/apimachinery/pkg/runtime"
    32  	"k8s.io/apimachinery/pkg/types"
    33  	kerrors "k8s.io/apimachinery/pkg/util/errors"
    34  	"k8s.io/apimachinery/pkg/util/sets"
    35  	"k8s.io/apimachinery/pkg/util/version"
    36  	"k8s.io/apimachinery/pkg/util/wait"
    37  	"k8s.io/klog/v2"
    38  	"sigs.k8s.io/controller-runtime/pkg/client"
    39  
    40  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    41  	clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
    42  	logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
    43  	"sigs.k8s.io/cluster-api/util/conditions"
    44  	"sigs.k8s.io/cluster-api/util/patch"
    45  	"sigs.k8s.io/cluster-api/util/yaml"
    46  )
    47  
    48  // ResourceMutatorFunc holds the type for mutators to be applied on resources during a move operation.
    49  type ResourceMutatorFunc func(u *unstructured.Unstructured) error
    50  
    51  // ObjectMover defines methods for moving Cluster API objects to another management cluster.
    52  type ObjectMover interface {
    53  	// Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster.
    54  	Move(ctx context.Context, namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error
    55  
    56  	// ToDirectory writes all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target directory.
    57  	ToDirectory(ctx context.Context, namespace string, directory string) error
    58  
    59  	// FromDirectory reads all the Cluster API objects existing in a configured directory to a target management cluster.
    60  	FromDirectory(ctx context.Context, toCluster Client, directory string) error
    61  }
    62  
    63  // objectMover implements the ObjectMover interface.
    64  type objectMover struct {
    65  	fromProxy             Proxy
    66  	fromProviderInventory InventoryClient
    67  	dryRun                bool
    68  }
    69  
    70  // ensure objectMover implements the ObjectMover interface.
    71  var _ ObjectMover = &objectMover{}
    72  
    73  func (o *objectMover) Move(ctx context.Context, namespace string, toCluster Client, dryRun bool, mutators ...ResourceMutatorFunc) error {
    74  	log := logf.Log
    75  	log.Info("Performing move...")
    76  	o.dryRun = dryRun
    77  	if o.dryRun {
    78  		log.Info("********************************************************")
    79  		log.Info("This is a dry-run move, will not perform any real action")
    80  		log.Info("********************************************************")
    81  	}
    82  
    83  	// checks that all the required providers in place in the target cluster.
    84  	if !o.dryRun {
    85  		if err := o.checkTargetProviders(ctx, toCluster.ProviderInventory()); err != nil {
    86  			return errors.Wrap(err, "failed to check providers in target cluster")
    87  		}
    88  	}
    89  
    90  	objectGraph, err := o.getObjectGraph(ctx, namespace)
    91  	if err != nil {
    92  		return errors.Wrap(err, "failed to get object graph")
    93  	}
    94  
    95  	// Move the objects to the target cluster.
    96  	var proxy Proxy
    97  	if !o.dryRun {
    98  		proxy = toCluster.Proxy()
    99  	}
   100  
   101  	return o.move(ctx, objectGraph, proxy, mutators...)
   102  }
   103  
   104  func (o *objectMover) ToDirectory(ctx context.Context, namespace string, directory string) error {
   105  	log := logf.Log
   106  	log.Info("Moving to directory...")
   107  
   108  	objectGraph, err := o.getObjectGraph(ctx, namespace)
   109  	if err != nil {
   110  		return errors.Wrap(err, "failed to get object graph")
   111  	}
   112  
   113  	return o.toDirectory(ctx, objectGraph, directory)
   114  }
   115  
   116  func (o *objectMover) FromDirectory(ctx context.Context, toCluster Client, directory string) error {
   117  	log := logf.Log
   118  	log.Info("Moving from directory...")
   119  
   120  	// Build an empty object graph used for the fromDirectory sequence not tied to a specific namespace
   121  	objectGraph := newObjectGraph(o.fromProxy, o.fromProviderInventory)
   122  
   123  	// Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types.
   124  	err := objectGraph.getDiscoveryTypes(ctx)
   125  	if err != nil {
   126  		return errors.Wrap(err, "failed to retrieve discovery types")
   127  	}
   128  
   129  	objs, err := o.filesToObjs(directory)
   130  	if err != nil {
   131  		return errors.Wrap(err, "failed to process object files")
   132  	}
   133  
   134  	for i := range objs {
   135  		if err = objectGraph.addRestoredObj(&objs[i]); err != nil {
   136  			return err
   137  		}
   138  	}
   139  
   140  	// Completes rebuilding the graph from file by searching for soft ownership relations such as secrets linked to the cluster
   141  	// by a naming convention (without any explicit OwnerReference).
   142  	objectGraph.setSoftOwnership()
   143  
   144  	// Completes the graph by setting for each node the list of tenants the node belongs to.
   145  	objectGraph.setTenants()
   146  
   147  	// Check whether nodes are not included in GVK considered for fromDirectory.
   148  	objectGraph.checkVirtualNode()
   149  
   150  	// Restore the objects to the target cluster.
   151  	proxy := toCluster.Proxy()
   152  
   153  	return o.fromDirectory(ctx, objectGraph, proxy)
   154  }
   155  
   156  func (o *objectMover) filesToObjs(dir string) ([]unstructured.Unstructured, error) {
   157  	log := logf.Log
   158  	log.Info(fmt.Sprintf("Restoring files from %s", dir))
   159  
   160  	files, err := os.ReadDir(dir)
   161  	if err != nil {
   162  		return nil, err
   163  	}
   164  
   165  	rawYAMLs := make([][]byte, 0)
   166  	for i := range files {
   167  		path := filepath.Clean(filepath.Join(dir, files[i].Name()))
   168  
   169  		byObj, err := os.ReadFile(path)
   170  		if err != nil {
   171  			return nil, err
   172  		}
   173  
   174  		rawYAMLs = append(rawYAMLs, byObj)
   175  	}
   176  
   177  	processedYAMLs := yaml.JoinYaml(rawYAMLs...)
   178  
   179  	objs, err := yaml.ToUnstructured(processedYAMLs)
   180  	if err != nil {
   181  		return nil, err
   182  	}
   183  
   184  	return objs, nil
   185  }
   186  
   187  func (o *objectMover) getObjectGraph(ctx context.Context, namespace string) (*objectGraph, error) {
   188  	objectGraph := newObjectGraph(o.fromProxy, o.fromProviderInventory)
   189  
   190  	// Gets all the types defined by the CRDs installed by clusterctl plus the ConfigMap/Secret core types.
   191  	err := objectGraph.getDiscoveryTypes(ctx)
   192  	if err != nil {
   193  		return nil, errors.Wrap(err, "failed to retrieve discovery types")
   194  	}
   195  
   196  	// Discovery the object graph for the selected types:
   197  	// - Nodes are defined the Kubernetes objects (Clusters, Machines etc.) identified during the discovery process.
   198  	// - Edges are derived by the OwnerReferences between nodes.
   199  	if err := objectGraph.Discovery(ctx, namespace); err != nil {
   200  		return nil, errors.Wrap(err, "failed to discover the object graph")
   201  	}
   202  
   203  	// Checks if Cluster API has already completed the provisioning of the infrastructure for the objects involved in the move/toDirectory operation.
   204  	// This is required because if the infrastructure is provisioned, then we can reasonably assume that the objects we are moving/backing up are
   205  	// not currently waiting for long-running reconciliation loops, and so we can safely rely on the pause field on the Cluster object
   206  	// for blocking any further object reconciliation on the source objects.
   207  	if err := o.checkProvisioningCompleted(ctx, objectGraph); err != nil {
   208  		return nil, errors.Wrap(err, "failed to check for provisioned infrastructure")
   209  	}
   210  
   211  	// Check whether nodes are not included in GVK considered for move
   212  	objectGraph.checkVirtualNode()
   213  
   214  	return objectGraph, nil
   215  }
   216  
   217  func newObjectMover(fromProxy Proxy, fromProviderInventory InventoryClient) *objectMover {
   218  	return &objectMover{
   219  		fromProxy:             fromProxy,
   220  		fromProviderInventory: fromProviderInventory,
   221  	}
   222  }
   223  
   224  // checkProvisioningCompleted checks if Cluster API has already completed the provisioning of the infrastructure for the objects involved in the move operation.
   225  func (o *objectMover) checkProvisioningCompleted(ctx context.Context, graph *objectGraph) error {
   226  	if o.dryRun {
   227  		return nil
   228  	}
   229  	errList := []error{}
   230  
   231  	// Checking all the clusters have infrastructure is ready
   232  	readClusterBackoff := newReadBackoff()
   233  	clusters := graph.getClusters()
   234  	for i := range clusters {
   235  		cluster := clusters[i]
   236  		clusterObj := &clusterv1.Cluster{}
   237  		if err := retryWithExponentialBackoff(ctx, readClusterBackoff, func(ctx context.Context) error {
   238  			return getClusterObj(ctx, o.fromProxy, cluster, clusterObj)
   239  		}); err != nil {
   240  			return err
   241  		}
   242  
   243  		if !clusterObj.Status.InfrastructureReady {
   244  			errList = append(errList, errors.Errorf("cannot start the move operation while %q %s/%s is still provisioning the infrastructure", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName()))
   245  			continue
   246  		}
   247  
   248  		// Note: can't use IsFalse here because we need to handle the absence of the condition as well as false.
   249  		if !conditions.IsTrue(clusterObj, clusterv1.ControlPlaneInitializedCondition) {
   250  			errList = append(errList, errors.Errorf("cannot start the move operation while the control plane for %q %s/%s is not yet initialized", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName()))
   251  			continue
   252  		}
   253  
   254  		if clusterObj.Spec.ControlPlaneRef != nil && !clusterObj.Status.ControlPlaneReady {
   255  			errList = append(errList, errors.Errorf("cannot start the move operation while the control plane for %q %s/%s is not yet ready", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName()))
   256  			continue
   257  		}
   258  	}
   259  
   260  	// Checking all the machine have a NodeRef
   261  	// Nb. NodeRef is considered a better signal than InfrastructureReady, because it ensures the node in the workload cluster is up and running.
   262  	readMachinesBackoff := newReadBackoff()
   263  	machines := graph.getMachines()
   264  	for i := range machines {
   265  		machine := machines[i]
   266  		machineObj := &clusterv1.Machine{}
   267  		if err := retryWithExponentialBackoff(ctx, readMachinesBackoff, func(ctx context.Context) error {
   268  			return getMachineObj(ctx, o.fromProxy, machine, machineObj)
   269  		}); err != nil {
   270  			return err
   271  		}
   272  
   273  		if machineObj.Status.NodeRef == nil {
   274  			errList = append(errList, errors.Errorf("cannot start the move operation while %q %s/%s is still provisioning the node", machineObj.GroupVersionKind(), machineObj.GetNamespace(), machineObj.GetName()))
   275  		}
   276  	}
   277  
   278  	return kerrors.NewAggregate(errList)
   279  }
   280  
   281  // getClusterObj retrieves the clusterObj corresponding to a node with type Cluster.
   282  func getClusterObj(ctx context.Context, proxy Proxy, cluster *node, clusterObj *clusterv1.Cluster) error {
   283  	c, err := proxy.NewClient(ctx)
   284  	if err != nil {
   285  		return err
   286  	}
   287  	clusterObjKey := client.ObjectKey{
   288  		Namespace: cluster.identity.Namespace,
   289  		Name:      cluster.identity.Name,
   290  	}
   291  
   292  	if err := c.Get(ctx, clusterObjKey, clusterObj); err != nil {
   293  		return errors.Wrapf(err, "error reading Cluster %s/%s",
   294  			clusterObj.GetNamespace(), clusterObj.GetName())
   295  	}
   296  	return nil
   297  }
   298  
   299  // getMachineObj retrieves the machineObj corresponding to a node with type Machine.
   300  func getMachineObj(ctx context.Context, proxy Proxy, machine *node, machineObj *clusterv1.Machine) error {
   301  	c, err := proxy.NewClient(ctx)
   302  	if err != nil {
   303  		return err
   304  	}
   305  	machineObjKey := client.ObjectKey{
   306  		Namespace: machine.identity.Namespace,
   307  		Name:      machine.identity.Name,
   308  	}
   309  
   310  	if err := c.Get(ctx, machineObjKey, machineObj); err != nil {
   311  		return errors.Wrapf(err, "error reading Machine %s/%s",
   312  			machineObj.GetNamespace(), machineObj.GetName())
   313  	}
   314  	return nil
   315  }
   316  
   317  // Move moves all the Cluster API objects existing in a namespace (or from all the namespaces if empty) to a target management cluster.
   318  func (o *objectMover) move(ctx context.Context, graph *objectGraph, toProxy Proxy, mutators ...ResourceMutatorFunc) error {
   319  	log := logf.Log
   320  
   321  	clusters := graph.getClusters()
   322  	log.Info("Moving Cluster API objects", "Clusters", len(clusters))
   323  
   324  	clusterClasses := graph.getClusterClasses()
   325  	log.Info("Moving Cluster API objects", "ClusterClasses", len(clusterClasses))
   326  
   327  	// Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it.
   328  	log.V(1).Info("Pausing the source cluster")
   329  	if err := setClusterPause(ctx, o.fromProxy, clusters, true, o.dryRun); err != nil {
   330  		return err
   331  	}
   332  
   333  	log.V(1).Info("Pausing the source ClusterClasses")
   334  	if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, true, o.dryRun); err != nil {
   335  		return errors.Wrap(err, "error pausing ClusterClasses")
   336  	}
   337  
   338  	log.Info("Waiting for all resources to be ready to move")
   339  	// exponential backoff configuration which returns durations for a total time of ~2m.
   340  	// Example: 0, 5s, 8s, 11s, 17s, 26s, 38s, 57s, 86s, 128s
   341  	waitForMoveUnblockedBackoff := wait.Backoff{
   342  		Duration: 5 * time.Second,
   343  		Factor:   1.5,
   344  		Steps:    10,
   345  		Jitter:   0.1,
   346  	}
   347  	if err := waitReadyForMove(ctx, o.fromProxy, graph.getMoveNodes(), o.dryRun, waitForMoveUnblockedBackoff); err != nil {
   348  		return errors.Wrap(err, "error waiting for resources to be ready to move")
   349  	}
   350  
   351  	// Nb. DO NOT call ensureNamespaces at this point because:
   352  	// - namespace will be ensured to exist before creating the resource.
   353  	// - If it's done here, we might create a namespace that can end up unused on target cluster (due to mutators).
   354  
   355  	// Define the move sequence by processing the ownerReference chain, so we ensure that a Kubernetes object is moved only after its owners.
   356  	// The sequence is bases on object graph nodes, each one representing a Kubernetes object; nodes are grouped, so bulk of nodes can be moved in parallel. e.g.
   357  	// - All the Clusters should be moved first (group 1, processed in parallel)
   358  	// - All the MachineDeployments should be moved second (group 1, processed in parallel)
   359  	// - then all the MachineSets, then all the Machines, etc.
   360  	moveSequence := getMoveSequence(graph)
   361  
   362  	// Create all objects group by group, ensuring all the ownerReferences are re-created.
   363  	log.Info("Creating objects in the target cluster")
   364  	for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ {
   365  		if err := o.createGroup(ctx, moveSequence.getGroup(groupIndex), toProxy, mutators...); err != nil {
   366  			return err
   367  		}
   368  	}
   369  
   370  	// Nb. mutators used after this point (after creating the resources on target clusters) are mainly intended for
   371  	// using the right namespace to fetch the resource from the target cluster.
   372  	// mutators affecting non metadata fields are no-op after this point.
   373  
   374  	// Delete all objects group by group in reverse order.
   375  	log.Info("Deleting objects from the source cluster")
   376  	for groupIndex := len(moveSequence.groups) - 1; groupIndex >= 0; groupIndex-- {
   377  		if err := o.deleteGroup(ctx, moveSequence.getGroup(groupIndex)); err != nil {
   378  			return err
   379  		}
   380  	}
   381  
   382  	// Resume the ClusterClasses in the target management cluster, so the controllers start reconciling it.
   383  	log.V(1).Info("Resuming the target ClusterClasses")
   384  	if err := setClusterClassPause(ctx, toProxy, clusterClasses, false, o.dryRun, mutators...); err != nil {
   385  		return errors.Wrap(err, "error resuming ClusterClasses")
   386  	}
   387  
   388  	// Reset the pause field on the Cluster object in the target management cluster, so the controllers start reconciling it.
   389  	log.V(1).Info("Resuming the target cluster")
   390  	return setClusterPause(ctx, toProxy, clusters, false, o.dryRun, mutators...)
   391  }
   392  
   393  func (o *objectMover) toDirectory(ctx context.Context, graph *objectGraph, directory string) error {
   394  	log := logf.Log
   395  
   396  	clusters := graph.getClusters()
   397  	log.Info("Starting move of Cluster API objects", "Clusters", len(clusters))
   398  
   399  	clusterClasses := graph.getClusterClasses()
   400  	log.Info("Moving Cluster API objects", "ClusterClasses", len(clusterClasses))
   401  
   402  	// Sets the pause field on the Cluster object in the source management cluster, so the controllers stop reconciling it.
   403  	log.V(1).Info("Pausing the source cluster")
   404  	if err := setClusterPause(ctx, o.fromProxy, clusters, true, o.dryRun); err != nil {
   405  		return err
   406  	}
   407  
   408  	log.V(1).Info("Pausing the source ClusterClasses")
   409  	if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, true, o.dryRun); err != nil {
   410  		return errors.Wrap(err, "error pausing ClusterClasses")
   411  	}
   412  
   413  	// Define the move sequence by processing the ownerReference chain, so we ensure that a Kubernetes object is moved only after its owners.
   414  	// The sequence is bases on object graph nodes, each one representing a Kubernetes object; nodes are grouped, so bulk of nodes can be moved in parallel. e.g.
   415  	// - All the Clusters should be moved first (group 1, processed in parallel)
   416  	// - All the MachineDeployments should be moved second (group 1, processed in parallel)
   417  	// - then all the MachineSets, then all the Machines, etc.
   418  	moveSequence := getMoveSequence(graph)
   419  
   420  	// Save all objects group by group
   421  	log.Info(fmt.Sprintf("Saving files to %s", directory))
   422  	for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ {
   423  		if err := o.backupGroup(ctx, moveSequence.getGroup(groupIndex), directory); err != nil {
   424  			return err
   425  		}
   426  	}
   427  
   428  	// Resume the ClusterClasses in the target management cluster, so the controllers start reconciling it.
   429  	log.V(1).Info("Resuming the target ClusterClasses")
   430  	if err := setClusterClassPause(ctx, o.fromProxy, clusterClasses, false, o.dryRun); err != nil {
   431  		return errors.Wrap(err, "error resuming ClusterClasses")
   432  	}
   433  
   434  	// Reset the pause field on the Cluster object in the target management cluster, so the controllers start reconciling it.
   435  	log.V(1).Info("Resuming the source cluster")
   436  	return setClusterPause(ctx, o.fromProxy, clusters, false, o.dryRun)
   437  }
   438  
   439  func (o *objectMover) fromDirectory(ctx context.Context, graph *objectGraph, toProxy Proxy) error {
   440  	log := logf.Log
   441  
   442  	// Get clusters from graph
   443  	clusters := graph.getClusters()
   444  	// Get clusterclasses from graph
   445  	clusterClasses := graph.getClusterClasses()
   446  
   447  	// Ensure all the expected target namespaces are in place before creating objects.
   448  	log.V(1).Info("Creating target namespaces, if missing")
   449  	if err := o.ensureNamespaces(ctx, graph, toProxy); err != nil {
   450  		return err
   451  	}
   452  
   453  	// Define the move sequence by processing the ownerReference chain, so we ensure that a Kubernetes object is moved only after its owners.
   454  	// The sequence is bases on object graph nodes, each one representing a Kubernetes object; nodes are grouped, so bulk of nodes can be moved in parallel. e.g.
   455  	// - All the Clusters should be moved first (group 1, processed in parallel)
   456  	// - All the MachineDeployments should be moved second (group 1, processed in parallel)
   457  	// - then all the MachineSets, then all the Machines, etc.
   458  	moveSequence := getMoveSequence(graph)
   459  
   460  	// Create all objects group by group, ensuring all the ownerReferences are re-created.
   461  	log.Info("Restoring objects into the target cluster")
   462  	for groupIndex := 0; groupIndex < len(moveSequence.groups); groupIndex++ {
   463  		if err := o.restoreGroup(ctx, moveSequence.getGroup(groupIndex), toProxy); err != nil {
   464  			return err
   465  		}
   466  	}
   467  
   468  	// Resume reconciling the ClusterClasses after being restored from a backup.
   469  	// By default, during backup, ClusterClasses are paused so they must be unpaused to be used again
   470  	log.V(1).Info("Resuming the target ClusterClasses")
   471  	if err := setClusterClassPause(ctx, toProxy, clusterClasses, false, o.dryRun); err != nil {
   472  		return errors.Wrap(err, "error resuming ClusterClasses")
   473  	}
   474  
   475  	// Resume reconciling the Clusters after being restored from a directory.
   476  	// By default, when moved to a directory, Clusters are paused, so they must be unpaused to be used again.
   477  	log.V(1).Info("Resuming the target cluster")
   478  	return setClusterPause(ctx, toProxy, clusters, false, o.dryRun)
   479  }
   480  
   481  // moveSequence defines a list of group of moveGroups.
   482  type moveSequence struct {
   483  	groups   []moveGroup
   484  	nodesMap map[*node]empty
   485  }
   486  
   487  // moveGroup defines is a list of nodes read from the object graph that can be moved in parallel.
   488  type moveGroup []*node
   489  
   490  func (s *moveSequence) addGroup(group moveGroup) {
   491  	// Add the group
   492  	s.groups = append(s.groups, group)
   493  	// Add all the nodes in the group to the nodeMap so we can check if a node is already in the move sequence or not
   494  	for _, n := range group {
   495  		s.nodesMap[n] = empty{}
   496  	}
   497  }
   498  
   499  func (s *moveSequence) hasNode(n *node) bool {
   500  	_, ok := s.nodesMap[n]
   501  	return ok
   502  }
   503  
   504  func (s *moveSequence) getGroup(i int) moveGroup {
   505  	return s.groups[i]
   506  }
   507  
   508  // Define the move sequence by processing the ownerReference chain.
   509  func getMoveSequence(graph *objectGraph) *moveSequence {
   510  	moveSequence := &moveSequence{
   511  		groups:   []moveGroup{},
   512  		nodesMap: make(map[*node]empty),
   513  	}
   514  
   515  	for {
   516  		// Determine the next move group by processing all the nodes in the graph that belong to a Cluster.
   517  		// NB. it is necessary to filter out nodes not belonging to a cluster because e.g. discovery reads all the secrets,
   518  		// but only few of them are related to Clusters/Machines etc.
   519  		moveGroup := moveGroup{}
   520  
   521  		for _, n := range graph.getMoveNodes() {
   522  			// If the node was already included in the moveSequence, skip it.
   523  			if moveSequence.hasNode(n) {
   524  				continue
   525  			}
   526  
   527  			// Check if all the ownerReferences are already included in the move sequence; if yes, add the node to move group,
   528  			// otherwise skip it (the node will be re-processed in the next group).
   529  			ownersInPlace := true
   530  			for owner := range n.owners {
   531  				if !moveSequence.hasNode(owner) {
   532  					ownersInPlace = false
   533  					break
   534  				}
   535  			}
   536  			for owner := range n.softOwners {
   537  				if !moveSequence.hasNode(owner) {
   538  					ownersInPlace = false
   539  					break
   540  				}
   541  			}
   542  			if ownersInPlace {
   543  				moveGroup = append(moveGroup, n)
   544  			}
   545  		}
   546  
   547  		// If the resulting move group is empty it means that all the nodes are already in the sequence, so exit.
   548  		if len(moveGroup) == 0 {
   549  			break
   550  		}
   551  		moveSequence.addGroup(moveGroup)
   552  	}
   553  	return moveSequence
   554  }
   555  
   556  // setClusterPause sets the paused field on nodes referring to Cluster objects.
   557  func setClusterPause(ctx context.Context, proxy Proxy, clusters []*node, value bool, dryRun bool, mutators ...ResourceMutatorFunc) error {
   558  	if dryRun {
   559  		return nil
   560  	}
   561  
   562  	log := logf.Log
   563  	patchValue := "true"
   564  	if !value {
   565  		// If the `value` is false lets drop the field.
   566  		// This makes sure that clusterctl does now own the field and would avoid any ownership conflicts.
   567  		patchValue = "null"
   568  	}
   569  	patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%s}}", patchValue)))
   570  
   571  	setClusterPauseBackoff := newWriteBackoff()
   572  	for i := range clusters {
   573  		cluster := clusters[i]
   574  		log.V(5).Info("Set Cluster.Spec.Paused", "paused", value, "Cluster", klog.KRef(cluster.identity.Namespace, cluster.identity.Name))
   575  
   576  		// Nb. The operation is wrapped in a retry loop to make setClusterPause more resilient to unexpected conditions.
   577  		if err := retryWithExponentialBackoff(ctx, setClusterPauseBackoff, func(ctx context.Context) error {
   578  			return patchCluster(ctx, proxy, cluster, patch, mutators...)
   579  		}); err != nil {
   580  			return errors.Wrapf(err, "error setting Cluster.Spec.Paused=%t", value)
   581  		}
   582  	}
   583  	return nil
   584  }
   585  
   586  // setClusterClassPause sets the paused annotation on nodes referring to ClusterClass objects.
   587  func setClusterClassPause(ctx context.Context, proxy Proxy, clusterclasses []*node, pause bool, dryRun bool, mutators ...ResourceMutatorFunc) error {
   588  	if dryRun {
   589  		return nil
   590  	}
   591  
   592  	log := logf.Log
   593  
   594  	setClusterClassPauseBackoff := newWriteBackoff()
   595  	for i := range clusterclasses {
   596  		clusterclass := clusterclasses[i]
   597  		if pause {
   598  			log.V(5).Info("Set Paused annotation", "ClusterClass", clusterclass.identity.Name, "Namespace", clusterclass.identity.Namespace)
   599  		} else {
   600  			log.V(5).Info("Remove Paused annotation", "ClusterClass", clusterclass.identity.Name, "Namespace", clusterclass.identity.Namespace)
   601  		}
   602  
   603  		// Nb. The operation is wrapped in a retry loop to make setClusterClassPause more resilient to unexpected conditions.
   604  		if err := retryWithExponentialBackoff(ctx, setClusterClassPauseBackoff, func(ctx context.Context) error {
   605  			return pauseClusterClass(ctx, proxy, clusterclass, pause, mutators...)
   606  		}); err != nil {
   607  			return errors.Wrapf(err, "error updating ClusterClass %s/%s", clusterclass.identity.Namespace, clusterclass.identity.Name)
   608  		}
   609  	}
   610  	return nil
   611  }
   612  
   613  func waitReadyForMove(ctx context.Context, proxy Proxy, nodes []*node, dryRun bool, backoff wait.Backoff) error {
   614  	if dryRun {
   615  		return nil
   616  	}
   617  
   618  	log := logf.Log
   619  
   620  	c, err := proxy.NewClient(ctx)
   621  	if err != nil {
   622  		return errors.Wrap(err, "error creating client")
   623  	}
   624  
   625  	for _, n := range nodes {
   626  		log := log.WithValues(
   627  			"apiVersion", n.identity.GroupVersionKind(),
   628  			"resource", klog.ObjectRef{
   629  				Name:      n.identity.Name,
   630  				Namespace: n.identity.Namespace,
   631  			},
   632  		)
   633  		if !n.blockingMove {
   634  			log.V(5).Info("Resource not blocking move")
   635  			continue
   636  		}
   637  
   638  		obj := &metav1.PartialObjectMetadata{
   639  			ObjectMeta: metav1.ObjectMeta{
   640  				Name:      n.identity.Name,
   641  				Namespace: n.identity.Namespace,
   642  			},
   643  			TypeMeta: metav1.TypeMeta{
   644  				APIVersion: n.identity.APIVersion,
   645  				Kind:       n.identity.Kind,
   646  			},
   647  		}
   648  		key := client.ObjectKeyFromObject(obj)
   649  
   650  		blockLogged := false
   651  		if err := retryWithExponentialBackoff(ctx, backoff, func(ctx context.Context) error {
   652  			if err := c.Get(ctx, key, obj); err != nil {
   653  				return errors.Wrapf(err, "error getting %s/%s", obj.GroupVersionKind(), key)
   654  			}
   655  
   656  			if _, exists := obj.GetAnnotations()[clusterctlv1.BlockMoveAnnotation]; exists {
   657  				if !blockLogged {
   658  					log.Info(fmt.Sprintf("Move blocked by %s annotation, waiting for it to be removed", clusterctlv1.BlockMoveAnnotation))
   659  					blockLogged = true
   660  				}
   661  				return errors.Errorf("resource is not ready to move: %s/%s", obj.GroupVersionKind(), key)
   662  			}
   663  			log.V(5).Info("Resource is ready to move")
   664  			return nil
   665  		}); err != nil {
   666  			return err
   667  		}
   668  	}
   669  
   670  	return nil
   671  }
   672  
   673  // patchCluster applies a patch to a node referring to a Cluster object.
   674  func patchCluster(ctx context.Context, proxy Proxy, n *node, patch client.Patch, mutators ...ResourceMutatorFunc) error {
   675  	cFrom, err := proxy.NewClient(ctx)
   676  	if err != nil {
   677  		return err
   678  	}
   679  
   680  	// Since the patch has been generated already in caller of this function, the ONLY affect that mutators can have
   681  	// here is on namespace of the resource.
   682  	clusterObj, err := applyMutators(&clusterv1.Cluster{
   683  		TypeMeta: metav1.TypeMeta{
   684  			Kind:       clusterv1.ClusterKind,
   685  			APIVersion: clusterv1.GroupVersion.String(),
   686  		},
   687  		ObjectMeta: metav1.ObjectMeta{
   688  			Name:      n.identity.Name,
   689  			Namespace: n.identity.Namespace,
   690  		},
   691  	}, mutators...)
   692  	if err != nil {
   693  		return err
   694  	}
   695  
   696  	if err := cFrom.Get(ctx, client.ObjectKeyFromObject(clusterObj), clusterObj); err != nil {
   697  		return errors.Wrapf(err, "error reading Cluster %s/%s",
   698  			clusterObj.GetNamespace(), clusterObj.GetName())
   699  	}
   700  
   701  	if err := cFrom.Patch(ctx, clusterObj, patch); err != nil {
   702  		return errors.Wrapf(err, "error patching Cluster %s/%s",
   703  			clusterObj.GetNamespace(), clusterObj.GetName())
   704  	}
   705  
   706  	return nil
   707  }
   708  
   709  func pauseClusterClass(ctx context.Context, proxy Proxy, n *node, pause bool, mutators ...ResourceMutatorFunc) error {
   710  	cFrom, err := proxy.NewClient(ctx)
   711  	if err != nil {
   712  		return errors.Wrap(err, "error creating client")
   713  	}
   714  
   715  	// Get a mutated copy of the ClusterClass to identify the target namespace.
   716  	// The ClusterClass could have been moved to a different namespace after the move.
   717  	mutatedClusterClass, err := applyMutators(&clusterv1.ClusterClass{
   718  		TypeMeta: metav1.TypeMeta{
   719  			Kind:       clusterv1.ClusterClassKind,
   720  			APIVersion: clusterv1.GroupVersion.String(),
   721  		},
   722  		ObjectMeta: metav1.ObjectMeta{
   723  			Name:      n.identity.Name,
   724  			Namespace: n.identity.Namespace,
   725  		}}, mutators...)
   726  	if err != nil {
   727  		return err
   728  	}
   729  
   730  	clusterClass := &clusterv1.ClusterClass{}
   731  	// Construct an object key using the mutatedClusterClass reflecting any changes to the namespace.
   732  	clusterClassObjKey := client.ObjectKey{
   733  		Name:      mutatedClusterClass.GetName(),
   734  		Namespace: mutatedClusterClass.GetNamespace(),
   735  	}
   736  	// Get a copy of the ClusterClass.
   737  	// This will ensure that any other changes from the mutator are ignored here as we work with a fresh copy of the cluster class.
   738  	if err := cFrom.Get(ctx, clusterClassObjKey, clusterClass); err != nil {
   739  		return errors.Wrapf(err, "error reading ClusterClass %s/%s", n.identity.Namespace, n.identity.Name)
   740  	}
   741  
   742  	patchHelper, err := patch.NewHelper(clusterClass, cFrom)
   743  	if err != nil {
   744  		return err
   745  	}
   746  
   747  	// Update the annotation to the desired state
   748  	ccAnnotations := clusterClass.GetAnnotations()
   749  	if ccAnnotations == nil {
   750  		ccAnnotations = map[string]string{}
   751  	}
   752  	if pause {
   753  		// Set the pause annotation.
   754  		ccAnnotations[clusterv1.PausedAnnotation] = ""
   755  	} else {
   756  		// Delete the pause annotation.
   757  		delete(ccAnnotations, clusterv1.PausedAnnotation)
   758  	}
   759  
   760  	// Update the ClusterClass with the new annotations.
   761  	clusterClass.SetAnnotations(ccAnnotations)
   762  
   763  	return patchHelper.Patch(ctx, clusterClass)
   764  }
   765  
   766  // ensureNamespaces ensures all the expected target namespaces are in place before creating objects.
   767  func (o *objectMover) ensureNamespaces(ctx context.Context, graph *objectGraph, toProxy Proxy) error {
   768  	if o.dryRun {
   769  		return nil
   770  	}
   771  
   772  	ensureNamespaceBackoff := newWriteBackoff()
   773  	namespaces := sets.Set[string]{}
   774  	for _, node := range graph.getMoveNodes() {
   775  		// ignore global/cluster-wide objects
   776  		if node.isGlobal {
   777  			continue
   778  		}
   779  
   780  		namespace := node.identity.Namespace
   781  
   782  		// If the namespace was already processed, skip it.
   783  		if namespaces.Has(namespace) {
   784  			continue
   785  		}
   786  		namespaces.Insert(namespace)
   787  
   788  		if err := retryWithExponentialBackoff(ctx, ensureNamespaceBackoff, func(ctx context.Context) error {
   789  			return o.ensureNamespace(ctx, toProxy, namespace)
   790  		}); err != nil {
   791  			return err
   792  		}
   793  	}
   794  
   795  	return nil
   796  }
   797  
   798  // ensureNamespace ensures a target namespaces is in place before creating objects.
   799  func (o *objectMover) ensureNamespace(ctx context.Context, toProxy Proxy, namespace string) error {
   800  	log := logf.Log
   801  
   802  	cs, err := toProxy.NewClient(ctx)
   803  	if err != nil {
   804  		return err
   805  	}
   806  
   807  	// Otherwise check if namespace exists (also dealing with RBAC restrictions).
   808  	ns := &corev1.Namespace{}
   809  	key := client.ObjectKey{
   810  		Name: namespace,
   811  	}
   812  
   813  	err = cs.Get(ctx, key, ns)
   814  	if err == nil {
   815  		return nil
   816  	}
   817  	if apierrors.IsForbidden(err) {
   818  		namespaces := &corev1.NamespaceList{}
   819  		namespaceExists := false
   820  		for {
   821  			if err := cs.List(ctx, namespaces, client.Continue(namespaces.Continue)); err != nil {
   822  				return err
   823  			}
   824  
   825  			for _, ns := range namespaces.Items {
   826  				if ns.Name == namespace {
   827  					namespaceExists = true
   828  					break
   829  				}
   830  			}
   831  
   832  			if namespaces.Continue == "" {
   833  				break
   834  			}
   835  		}
   836  		if namespaceExists {
   837  			return nil
   838  		}
   839  	}
   840  	if !apierrors.IsNotFound(err) {
   841  		return err
   842  	}
   843  
   844  	// If the namespace does not exist, create it.
   845  	ns = &corev1.Namespace{
   846  		TypeMeta: metav1.TypeMeta{
   847  			APIVersion: "v1",
   848  			Kind:       "Namespace",
   849  		},
   850  		ObjectMeta: metav1.ObjectMeta{
   851  			Name: namespace,
   852  		},
   853  	}
   854  	log.V(1).Info("Creating", ns.Kind, ns.Name)
   855  	if err := cs.Create(ctx, ns); err != nil && !apierrors.IsAlreadyExists(err) {
   856  		return err
   857  	}
   858  	return nil
   859  }
   860  
   861  // createGroup creates all the Kubernetes objects into the target management cluster corresponding to the object graph nodes in a moveGroup.
   862  func (o *objectMover) createGroup(ctx context.Context, group moveGroup, toProxy Proxy, mutators ...ResourceMutatorFunc) error {
   863  	createTargetObjectBackoff := newWriteBackoff()
   864  	errList := []error{}
   865  
   866  	// Maintain a cache of namespaces that have been verified to already exist.
   867  	// Nb. This prevents us from making repetitive (and expensive) calls in listing all namespaces to ensure a namespace exists before creating a resource.
   868  	existingNamespaces := sets.New[string]()
   869  	for _, nodeToCreate := range group {
   870  		// Creates the Kubernetes object corresponding to the nodeToCreate.
   871  		// Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
   872  		err := retryWithExponentialBackoff(ctx, createTargetObjectBackoff, func(ctx context.Context) error {
   873  			return o.createTargetObject(ctx, nodeToCreate, toProxy, mutators, existingNamespaces)
   874  		})
   875  		if err != nil {
   876  			errList = append(errList, err)
   877  		}
   878  	}
   879  
   880  	if len(errList) > 0 {
   881  		return kerrors.NewAggregate(errList)
   882  	}
   883  
   884  	return nil
   885  }
   886  
   887  func (o *objectMover) backupGroup(ctx context.Context, group moveGroup, directory string) error {
   888  	backupTargetObjectBackoff := newWriteBackoff()
   889  	errList := []error{}
   890  
   891  	for _, nodeToBackup := range group {
   892  		// Backs-up the Kubernetes object corresponding to the nodeToBackup.
   893  		// Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
   894  		err := retryWithExponentialBackoff(ctx, backupTargetObjectBackoff, func(ctx context.Context) error {
   895  			return o.backupTargetObject(ctx, nodeToBackup, directory)
   896  		})
   897  		if err != nil {
   898  			errList = append(errList, err)
   899  		}
   900  	}
   901  
   902  	if len(errList) > 0 {
   903  		return kerrors.NewAggregate(errList)
   904  	}
   905  
   906  	return nil
   907  }
   908  
   909  func (o *objectMover) restoreGroup(ctx context.Context, group moveGroup, toProxy Proxy) error {
   910  	restoreTargetObjectBackoff := newWriteBackoff()
   911  	errList := []error{}
   912  
   913  	for _, nodeToRestore := range group {
   914  		// Creates the Kubernetes object corresponding to the nodeToRestore.
   915  		// Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
   916  		err := retryWithExponentialBackoff(ctx, restoreTargetObjectBackoff, func(ctx context.Context) error {
   917  			return o.restoreTargetObject(ctx, nodeToRestore, toProxy)
   918  		})
   919  		if err != nil {
   920  			errList = append(errList, err)
   921  		}
   922  	}
   923  
   924  	if len(errList) > 0 {
   925  		return kerrors.NewAggregate(errList)
   926  	}
   927  
   928  	return nil
   929  }
   930  
   931  // createTargetObject creates the Kubernetes object in the target Management cluster corresponding to the object graph node, taking care of restoring the OwnerReference with the owner nodes, if any.
   932  func (o *objectMover) createTargetObject(ctx context.Context, nodeToCreate *node, toProxy Proxy, mutators []ResourceMutatorFunc, existingNamespaces sets.Set[string]) error {
   933  	log := logf.Log
   934  	log.V(1).Info("Creating", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace)
   935  
   936  	if o.dryRun {
   937  		return nil
   938  	}
   939  
   940  	cFrom, err := o.fromProxy.NewClient(ctx)
   941  	if err != nil {
   942  		return err
   943  	}
   944  
   945  	// Get the source object
   946  	obj := &unstructured.Unstructured{}
   947  	obj.SetAPIVersion(nodeToCreate.identity.APIVersion)
   948  	obj.SetKind(nodeToCreate.identity.Kind)
   949  	objKey := client.ObjectKey{
   950  		Namespace: nodeToCreate.identity.Namespace,
   951  		Name:      nodeToCreate.identity.Name,
   952  	}
   953  
   954  	if err := cFrom.Get(ctx, objKey, obj); err != nil {
   955  		return errors.Wrapf(err, "error reading %q %s/%s",
   956  			obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
   957  	}
   958  
   959  	// New objects cannot have a specified resource version. Clear it out.
   960  	obj.SetResourceVersion("")
   961  
   962  	// Removes current OwnerReferences
   963  	obj.SetOwnerReferences(nil)
   964  
   965  	// Rebuild the owner reference chain
   966  	o.buildOwnerChain(obj, nodeToCreate)
   967  
   968  	// FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed.
   969  	// If the resource already exists, the API server ordinarily returns an AlreadyExists error. Due to the above issue, if the resource has a non-empty metadata.generateName field, the API server returns a ServerTimeoutError. To ensure that the API server returns an AlreadyExists error, we set the metadata.generateName field to an empty string.
   970  	if obj.GetName() != "" && obj.GetGenerateName() != "" {
   971  		obj.SetGenerateName("")
   972  	}
   973  
   974  	// Creates the targetObj into the target management cluster.
   975  	cTo, err := toProxy.NewClient(ctx)
   976  	if err != nil {
   977  		return err
   978  	}
   979  
   980  	obj, err = applyMutators(obj, mutators...)
   981  	if err != nil {
   982  		return err
   983  	}
   984  	// Applying mutators MAY change the namespace, so ensure the namespace exists before creating the resource.
   985  	if !nodeToCreate.isGlobal && !existingNamespaces.Has(obj.GetNamespace()) {
   986  		if err = o.ensureNamespace(ctx, toProxy, obj.GetNamespace()); err != nil {
   987  			return err
   988  		}
   989  		existingNamespaces.Insert(obj.GetNamespace())
   990  	}
   991  	oldManagedFields := obj.GetManagedFields()
   992  	if err := cTo.Create(ctx, obj); err != nil {
   993  		if !apierrors.IsAlreadyExists(err) {
   994  			return errors.Wrapf(err, "error creating %q %s/%s",
   995  				obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
   996  		}
   997  
   998  		// If the object already exists, try to update it if it is node a global object / something belonging to a global object hierarchy (e.g. a secrets owned by a global identity object).
   999  		if nodeToCreate.isGlobal || nodeToCreate.isGlobalHierarchy {
  1000  			log.V(5).Info("Object already exists, skipping upgrade because it is global/it is owned by a global object", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace)
  1001  		} else {
  1002  			// Nb. This should not happen, but it is supported to make move more resilient to unexpected interrupt/restarts of the move process.
  1003  			log.V(5).Info("Object already exists, updating", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace)
  1004  
  1005  			// Retrieve the UID and the resource version for the update.
  1006  			existingTargetObj := &unstructured.Unstructured{}
  1007  			existingTargetObj.SetAPIVersion(obj.GetAPIVersion())
  1008  			existingTargetObj.SetKind(obj.GetKind())
  1009  			if err := cTo.Get(ctx, objKey, existingTargetObj); err != nil {
  1010  				return errors.Wrapf(err, "error reading resource for %q %s/%s",
  1011  					existingTargetObj.GroupVersionKind(), existingTargetObj.GetNamespace(), existingTargetObj.GetName())
  1012  			}
  1013  
  1014  			obj.SetUID(existingTargetObj.GetUID())
  1015  			obj.SetResourceVersion(existingTargetObj.GetResourceVersion())
  1016  			if err := cTo.Update(ctx, obj); err != nil {
  1017  				return errors.Wrapf(err, "error updating %q %s/%s",
  1018  					obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
  1019  			}
  1020  		}
  1021  	}
  1022  
  1023  	// Stores the newUID assigned to the newly created object.
  1024  	nodeToCreate.newUID = obj.GetUID()
  1025  
  1026  	if err := patchTopologyManagedFields(ctx, oldManagedFields, obj, cTo); err != nil {
  1027  		return errors.Wrap(err, "error patching the managed fields")
  1028  	}
  1029  
  1030  	return nil
  1031  }
  1032  
  1033  func (o *objectMover) backupTargetObject(ctx context.Context, nodeToCreate *node, directory string) error {
  1034  	log := logf.Log
  1035  	log.V(1).Info("Saving", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace)
  1036  
  1037  	cFrom, err := o.fromProxy.NewClient(ctx)
  1038  	if err != nil {
  1039  		return err
  1040  	}
  1041  
  1042  	// Get the source object
  1043  	obj := &unstructured.Unstructured{}
  1044  	obj.SetAPIVersion(nodeToCreate.identity.APIVersion)
  1045  	obj.SetKind(nodeToCreate.identity.Kind)
  1046  	objKey := client.ObjectKey{
  1047  		Namespace: nodeToCreate.identity.Namespace,
  1048  		Name:      nodeToCreate.identity.Name,
  1049  	}
  1050  
  1051  	if err := cFrom.Get(ctx, objKey, obj); err != nil {
  1052  		return errors.Wrapf(err, "error reading %q %s/%s",
  1053  			obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
  1054  	}
  1055  
  1056  	// Get JSON for object and write it into the configured directory
  1057  	byObj, err := obj.MarshalJSON()
  1058  	if err != nil {
  1059  		return err
  1060  	}
  1061  
  1062  	filenameObj := nodeToCreate.getFilename()
  1063  	objectFile := filepath.Join(directory, filenameObj)
  1064  
  1065  	// If file exists, then remove it to be written again
  1066  	_, err = os.Stat(objectFile)
  1067  	if err != nil && !os.IsNotExist(err) {
  1068  		return err
  1069  	}
  1070  	if err == nil {
  1071  		if err := os.Remove(objectFile); err != nil {
  1072  			return err
  1073  		}
  1074  	}
  1075  
  1076  	err = os.WriteFile(objectFile, byObj, 0600)
  1077  	if err != nil {
  1078  		return err
  1079  	}
  1080  
  1081  	return nil
  1082  }
  1083  
  1084  func (o *objectMover) restoreTargetObject(ctx context.Context, nodeToCreate *node, toProxy Proxy) error {
  1085  	log := logf.Log
  1086  	log.V(1).Info("Restoring", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace)
  1087  
  1088  	// Creates the targetObj into the target management cluster.
  1089  	cTo, err := toProxy.NewClient(ctx)
  1090  	if err != nil {
  1091  		return err
  1092  	}
  1093  
  1094  	// Attempt to retrieve an existing object. If it exists, update the UID to rebuild the owner chain
  1095  	objKey := client.ObjectKey{
  1096  		Namespace: nodeToCreate.identity.Namespace,
  1097  		Name:      nodeToCreate.identity.Name,
  1098  	}
  1099  
  1100  	existingTargetObj := &unstructured.Unstructured{}
  1101  	existingTargetObj.SetAPIVersion(nodeToCreate.restoreObject.GetAPIVersion())
  1102  	existingTargetObj.SetKind(nodeToCreate.restoreObject.GetKind())
  1103  	if err := cTo.Get(ctx, objKey, existingTargetObj); err == nil {
  1104  		log.V(5).Info("Object already exists, skipping moving from directory", nodeToCreate.identity.Kind, nodeToCreate.identity.Name, "Namespace", nodeToCreate.identity.Namespace)
  1105  
  1106  		// Update the nodes UID since it already exists. Any nodes owned by this existing node will be updated when the owner chain is rebuilt
  1107  		nodeToCreate.newUID = existingTargetObj.GetUID()
  1108  
  1109  		// Return early since the object already exists
  1110  		return nil
  1111  	}
  1112  
  1113  	// Rebuild the source object
  1114  	obj := nodeToCreate.restoreObject
  1115  
  1116  	obj.SetAPIVersion(nodeToCreate.identity.APIVersion)
  1117  	obj.SetKind(nodeToCreate.identity.Kind)
  1118  
  1119  	// New objects cannot have a specified resource version. Clear it out.
  1120  	obj.SetResourceVersion("")
  1121  
  1122  	// Removes current OwnerReferences
  1123  	obj.SetOwnerReferences(nil)
  1124  
  1125  	// Rebuild the owner reference chain
  1126  	o.buildOwnerChain(obj, nodeToCreate)
  1127  
  1128  	if err := cTo.Create(ctx, obj); err != nil {
  1129  		if !apierrors.IsAlreadyExists(err) {
  1130  			return errors.Wrapf(err, "error creating %q %s/%s",
  1131  				obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
  1132  		}
  1133  	}
  1134  
  1135  	// Stores the newUID assigned to the newly created object.
  1136  	nodeToCreate.newUID = obj.GetUID()
  1137  
  1138  	return nil
  1139  }
  1140  
  1141  // Recreate all the OwnerReferences using the newUID of the owner nodes.
  1142  func (o *objectMover) buildOwnerChain(obj *unstructured.Unstructured, n *node) {
  1143  	if len(n.owners) > 0 {
  1144  		ownerRefs := []metav1.OwnerReference{}
  1145  		for ownerNode := range n.owners {
  1146  			ownerRef := metav1.OwnerReference{
  1147  				APIVersion: ownerNode.identity.APIVersion,
  1148  				Kind:       ownerNode.identity.Kind,
  1149  				Name:       ownerNode.identity.Name,
  1150  				UID:        ownerNode.newUID, // Use the owner's newUID read from the target management cluster (instead of the UID read during discovery).
  1151  			}
  1152  
  1153  			// Restores the attributes of the OwnerReference.
  1154  			if attributes, ok := n.owners[ownerNode]; ok {
  1155  				ownerRef.Controller = attributes.Controller
  1156  				ownerRef.BlockOwnerDeletion = attributes.BlockOwnerDeletion
  1157  			}
  1158  
  1159  			ownerRefs = append(ownerRefs, ownerRef)
  1160  		}
  1161  		obj.SetOwnerReferences(ownerRefs)
  1162  	}
  1163  }
  1164  
  1165  // deleteGroup deletes all the Kubernetes objects from the source management cluster corresponding to the object graph nodes in a moveGroup.
  1166  func (o *objectMover) deleteGroup(ctx context.Context, group moveGroup) error {
  1167  	deleteSourceObjectBackoff := newWriteBackoff()
  1168  	errList := []error{}
  1169  	for i := range group {
  1170  		nodeToDelete := group[i]
  1171  
  1172  		// Delete the Kubernetes object corresponding to the current node.
  1173  		// Nb. The operation is wrapped in a retry loop to make move more resilient to unexpected conditions.
  1174  		err := retryWithExponentialBackoff(ctx, deleteSourceObjectBackoff, func(ctx context.Context) error {
  1175  			return o.deleteSourceObject(ctx, nodeToDelete)
  1176  		})
  1177  
  1178  		if err != nil {
  1179  			errList = append(errList, err)
  1180  		}
  1181  	}
  1182  
  1183  	return kerrors.NewAggregate(errList)
  1184  }
  1185  
  1186  var (
  1187  	removeFinalizersPatch           = client.RawPatch(types.MergePatchType, []byte("{\"metadata\":{\"finalizers\":[]}}"))
  1188  	addDeleteForMoveAnnotationPatch = client.RawPatch(types.JSONPatchType, []byte(fmt.Sprintf("[{\"op\": \"add\", \"path\":\"/metadata/annotations\", \"value\":{%q:\"\"}}]", clusterctlv1.DeleteForMoveAnnotation)))
  1189  )
  1190  
  1191  // deleteSourceObject deletes the Kubernetes object corresponding to the node from the source management cluster, taking care of removing all the finalizers so
  1192  // the objects gets immediately deleted (force delete).
  1193  func (o *objectMover) deleteSourceObject(ctx context.Context, nodeToDelete *node) error {
  1194  	// Don't delete cluster-wide nodes or nodes that are below a hierarchy that starts with a global object (e.g. a secrets owned by a global identity object).
  1195  	if nodeToDelete.isGlobal || nodeToDelete.isGlobalHierarchy {
  1196  		return nil
  1197  	}
  1198  
  1199  	log := logf.Log
  1200  	log.V(1).Info("Deleting", nodeToDelete.identity.Kind, nodeToDelete.identity.Name, "Namespace", nodeToDelete.identity.Namespace)
  1201  
  1202  	if o.dryRun {
  1203  		return nil
  1204  	}
  1205  
  1206  	cFrom, err := o.fromProxy.NewClient(ctx)
  1207  	if err != nil {
  1208  		return err
  1209  	}
  1210  
  1211  	// Get the source object
  1212  	sourceObj := &unstructured.Unstructured{}
  1213  	sourceObj.SetAPIVersion(nodeToDelete.identity.APIVersion)
  1214  	sourceObj.SetKind(nodeToDelete.identity.Kind)
  1215  	sourceObjKey := client.ObjectKey{
  1216  		Namespace: nodeToDelete.identity.Namespace,
  1217  		Name:      nodeToDelete.identity.Name,
  1218  	}
  1219  
  1220  	if err := cFrom.Get(ctx, sourceObjKey, sourceObj); err != nil {
  1221  		if apierrors.IsNotFound(err) {
  1222  			// If the object is already deleted, move on.
  1223  			log.V(5).Info("Object already deleted, skipping delete for", nodeToDelete.identity.Kind, nodeToDelete.identity.Name, "Namespace", nodeToDelete.identity.Namespace)
  1224  			return nil
  1225  		}
  1226  		return errors.Wrapf(err, "error reading %q %s/%s",
  1227  			sourceObj.GroupVersionKind(), sourceObj.GetNamespace(), sourceObj.GetName())
  1228  	}
  1229  
  1230  	if err := cFrom.Patch(ctx, sourceObj, addDeleteForMoveAnnotationPatch); err != nil {
  1231  		return errors.Wrapf(err, "error adding delete-for-move annotation from %q %s/%s",
  1232  			sourceObj.GroupVersionKind(), sourceObj.GetNamespace(), sourceObj.GetName())
  1233  	}
  1234  
  1235  	if len(sourceObj.GetFinalizers()) > 0 {
  1236  		if err := cFrom.Patch(ctx, sourceObj, removeFinalizersPatch); err != nil {
  1237  			return errors.Wrapf(err, "error removing finalizers from %q %s/%s",
  1238  				sourceObj.GroupVersionKind(), sourceObj.GetNamespace(), sourceObj.GetName())
  1239  		}
  1240  	}
  1241  
  1242  	if err := cFrom.Delete(ctx, sourceObj); err != nil {
  1243  		return errors.Wrapf(err, "error deleting %q %s/%s",
  1244  			sourceObj.GroupVersionKind(), sourceObj.GetNamespace(), sourceObj.GetName())
  1245  	}
  1246  
  1247  	return nil
  1248  }
  1249  
  1250  // checkTargetProviders checks that all the providers installed in the source cluster exists in the target cluster as well (with a version >= of the current version).
  1251  func (o *objectMover) checkTargetProviders(ctx context.Context, toInventory InventoryClient) error {
  1252  	if o.dryRun {
  1253  		return nil
  1254  	}
  1255  
  1256  	// Gets the list of providers in the source/target cluster.
  1257  	fromProviders, err := o.fromProviderInventory.List(ctx)
  1258  	if err != nil {
  1259  		return errors.Wrapf(err, "failed to get provider list from the source cluster")
  1260  	}
  1261  
  1262  	toProviders, err := toInventory.List(ctx)
  1263  	if err != nil {
  1264  		return errors.Wrapf(err, "failed to get provider list from the target cluster")
  1265  	}
  1266  
  1267  	// Checks all the providers installed in the source cluster
  1268  	errList := []error{}
  1269  	for _, sourceProvider := range fromProviders.Items {
  1270  		sourceVersion, err := version.ParseSemantic(sourceProvider.Version)
  1271  		if err != nil {
  1272  			return errors.Wrapf(err, "unable to parse version %q for the %s provider in the source cluster", sourceProvider.Version, sourceProvider.InstanceName())
  1273  		}
  1274  
  1275  		// Check corresponding providers in the target cluster and gets the latest version installed.
  1276  		var maxTargetVersion *version.Version
  1277  		for _, targetProvider := range toProviders.Items {
  1278  			// Skips other providers.
  1279  			if !sourceProvider.SameAs(targetProvider) {
  1280  				continue
  1281  			}
  1282  
  1283  			targetVersion, err := version.ParseSemantic(targetProvider.Version)
  1284  			if err != nil {
  1285  				return errors.Wrapf(err, "unable to parse version %q for the %s provider in the target cluster", targetProvider.Version, targetProvider.InstanceName())
  1286  			}
  1287  			if maxTargetVersion == nil || maxTargetVersion.LessThan(targetVersion) {
  1288  				maxTargetVersion = targetVersion
  1289  			}
  1290  		}
  1291  		if maxTargetVersion == nil {
  1292  			errList = append(errList, errors.Errorf("provider %s not found in the target cluster", sourceProvider.Name))
  1293  			continue
  1294  		}
  1295  
  1296  		if !maxTargetVersion.AtLeast(sourceVersion) {
  1297  			errList = append(errList, errors.Errorf("provider %s in the target cluster is older than in the source cluster (source: %s, target: %s)", sourceProvider.Name, sourceVersion.String(), maxTargetVersion.String()))
  1298  		}
  1299  	}
  1300  
  1301  	return kerrors.NewAggregate(errList)
  1302  }
  1303  
  1304  // patchTopologyManagedFields patches the managed fields of obj.
  1305  // Without patching the managed fields, clusterctl would be the owner of the fields
  1306  // which would lead to co-ownership and preventing other controllers using SSA from deleting fields.
  1307  func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.ManagedFieldsEntry, obj *unstructured.Unstructured, cTo client.Client) error {
  1308  	base := obj.DeepCopy()
  1309  	obj.SetManagedFields(oldManagedFields)
  1310  
  1311  	if err := cTo.Patch(ctx, obj, client.MergeFrom(base)); err != nil {
  1312  		return errors.Wrapf(err, "error patching managed fields %q %s/%s",
  1313  			obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
  1314  	}
  1315  	return nil
  1316  }
  1317  
  1318  // applyMutators applies mutators to an object.
  1319  // Note: TypeMeta must always be set in the object because otherwise after conversion the
  1320  // resulting Unstructured would have an empty GVK.
  1321  func applyMutators(object client.Object, mutators ...ResourceMutatorFunc) (*unstructured.Unstructured, error) {
  1322  	if object == nil {
  1323  		return nil, nil
  1324  	}
  1325  	u := &unstructured.Unstructured{}
  1326  	to, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object)
  1327  	if err != nil {
  1328  		return nil, err
  1329  	}
  1330  	u.SetUnstructuredContent(to)
  1331  	for _, mutator := range mutators {
  1332  		if err := mutator(u); err != nil {
  1333  			return nil, errors.Wrapf(err, "error applying resource mutator to %q %s/%s",
  1334  				u.GroupVersionKind(), object.GetNamespace(), object.GetName())
  1335  		}
  1336  	}
  1337  	return u, nil
  1338  }