sigs.k8s.io/cluster-api@v1.7.1/cmd/clusterctl/client/cluster/mover_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"strings"
    25  	"testing"
    26  	"time"
    27  
    28  	. "github.com/onsi/gomega"
    29  	corev1 "k8s.io/api/core/v1"
    30  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    33  	"k8s.io/apimachinery/pkg/types"
    34  	"k8s.io/apimachinery/pkg/util/sets"
    35  	"k8s.io/apimachinery/pkg/util/wait"
    36  	"k8s.io/utils/ptr"
    37  	"sigs.k8s.io/controller-runtime/pkg/client"
    38  
    39  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    40  	clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
    41  	"sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test"
    42  	"sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test/providers/infrastructure"
    43  	"sigs.k8s.io/cluster-api/util/conditions"
    44  )
    45  
    46  type moveTestsFields struct {
    47  	objs []client.Object
    48  }
    49  
    50  var moveTests = []struct {
    51  	name           string
    52  	fields         moveTestsFields
    53  	wantMoveGroups [][]string
    54  	wantErr        bool
    55  }{
    56  	{
    57  		name: "Cluster with ClusterClass",
    58  		fields: moveTestsFields{
    59  			objs: func() []client.Object {
    60  				objs := test.NewFakeClusterClass("ns1", "class1").Objs()
    61  				objs = append(objs, test.NewFakeCluster("ns1", "foo").WithTopologyClass("class1").Objs()...)
    62  				return deduplicateObjects(objs)
    63  			}(),
    64  		},
    65  		wantMoveGroups: [][]string{
    66  			{ // group 1
    67  				"cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1",
    68  			},
    69  			{ // group 2
    70  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1",
    71  				"controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1",
    72  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo",
    73  			},
    74  			{ // group 3
    75  				"/v1, Kind=Secret, ns1/foo-ca",
    76  				"/v1, Kind=Secret, ns1/foo-kubeconfig",
    77  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo",
    78  			},
    79  		},
    80  		wantErr: false,
    81  	},
    82  	{
    83  		name: "Cluster",
    84  		fields: moveTestsFields{
    85  			objs: test.NewFakeCluster("ns1", "foo").Objs(),
    86  		},
    87  		wantMoveGroups: [][]string{
    88  			{ // group 1
    89  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo",
    90  			},
    91  			{ // group 2 (objects with ownerReferences in group 1)
    92  				// owned by Clusters
    93  				"/v1, Kind=Secret, ns1/foo-ca",
    94  				"/v1, Kind=Secret, ns1/foo-kubeconfig",
    95  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo",
    96  			},
    97  		},
    98  		wantErr: false,
    99  	},
   100  	{
   101  		name: "Cluster with cloud config secret with the force move label",
   102  		fields: moveTestsFields{
   103  			objs: test.NewFakeCluster("ns1", "foo").WithCloudConfigSecret().Objs(),
   104  		},
   105  		wantMoveGroups: [][]string{
   106  			{ // group 1
   107  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo",
   108  				// objects with force move flag
   109  				"/v1, Kind=Secret, ns1/foo-cloud-config",
   110  			},
   111  			{ // group 2 (objects with ownerReferences in group 1)
   112  				// owned by Clusters
   113  				"/v1, Kind=Secret, ns1/foo-ca",
   114  				"/v1, Kind=Secret, ns1/foo-kubeconfig",
   115  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo",
   116  			},
   117  		},
   118  		wantErr: false,
   119  	},
   120  	{
   121  		name: "Cluster with machine",
   122  		fields: moveTestsFields{
   123  			objs: test.NewFakeCluster("ns1", "cluster1").
   124  				WithMachines(
   125  					test.NewFakeMachine("m1"),
   126  					test.NewFakeMachine("m2"),
   127  				).Objs(),
   128  		},
   129  		wantMoveGroups: [][]string{
   130  			{ // group 1
   131  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   132  			},
   133  			{ // group 2 (objects with ownerReferences in group 1)
   134  				// owned by Clusters
   135  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   136  				"/v1, Kind=Secret, ns1/cluster1-ca",
   137  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1",
   138  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m2",
   139  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   140  			},
   141  			{ // group 3 (objects with ownerReferences in group 1,2)
   142  				// owned by Machines
   143  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m1",
   144  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m2",
   145  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m1",
   146  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m2",
   147  			},
   148  			{ // group 4 (objects with ownerReferences in group 1,2,3)
   149  				// owned by GenericBootstrapConfigs
   150  				"/v1, Kind=Secret, ns1/cluster1-sa",
   151  				"/v1, Kind=Secret, ns1/m1",
   152  				"/v1, Kind=Secret, ns1/m2",
   153  			},
   154  		},
   155  		wantErr: false,
   156  	},
   157  	{
   158  		name: "Cluster with MachineSet",
   159  		fields: moveTestsFields{
   160  			objs: test.NewFakeCluster("ns1", "cluster1").
   161  				WithMachineSets(
   162  					test.NewFakeMachineSet("ms1").
   163  						WithMachines(
   164  							test.NewFakeMachine("m1"),
   165  							test.NewFakeMachine("m2"),
   166  						),
   167  				).Objs(),
   168  		},
   169  		wantMoveGroups: [][]string{
   170  			{ // group 1
   171  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   172  			},
   173  			{ // group 2 (objects with ownerReferences in group 1)
   174  				// owned by Clusters
   175  				"/v1, Kind=Secret, ns1/cluster1-ca",
   176  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   177  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfigTemplate, ns1/ms1",
   178  				"cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/ms1",
   179  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   180  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/ms1",
   181  			},
   182  			{ // group 3 (objects with ownerReferences in group 1,2)
   183  				// owned by MachineSets
   184  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1",
   185  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m2",
   186  			},
   187  			{ // group 4 (objects with ownerReferences in group 1,2,3)
   188  				// owned by Machines
   189  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m1",
   190  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m2",
   191  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m1",
   192  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m2",
   193  			},
   194  			{ // group 5 (objects with ownerReferences in group 1,2,3,4)
   195  				// owned by GenericBootstrapConfigs
   196  				"/v1, Kind=Secret, ns1/m1",
   197  				"/v1, Kind=Secret, ns1/m2",
   198  			},
   199  		},
   200  		wantErr: false,
   201  	},
   202  	{
   203  		name: "Cluster with MachineDeployment",
   204  		fields: moveTestsFields{
   205  			objs: test.NewFakeCluster("ns1", "cluster1").
   206  				WithMachineDeployments(
   207  					test.NewFakeMachineDeployment("md1").
   208  						WithMachineSets(
   209  							test.NewFakeMachineSet("ms1").
   210  								WithMachines(
   211  									test.NewFakeMachine("m1"),
   212  									test.NewFakeMachine("m2"),
   213  								),
   214  						),
   215  				).Objs(),
   216  		},
   217  		wantMoveGroups: [][]string{
   218  			{ // group 1
   219  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   220  			},
   221  			{ // group 2 (objects with ownerReferences in group 1)
   222  				// owned by Clusters
   223  				"/v1, Kind=Secret, ns1/cluster1-ca",
   224  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   225  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfigTemplate, ns1/md1",
   226  				"cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md1",
   227  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   228  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/md1",
   229  			},
   230  			{ // group 3 (objects with ownerReferences in group 1,2)
   231  				// owned by MachineDeployments
   232  				"cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/ms1",
   233  			},
   234  			{ // group 4 (objects with ownerReferences in group 1,2,3)
   235  				// owned by MachineSets
   236  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1",
   237  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m2",
   238  			},
   239  			{ // group 5 (objects with ownerReferences in group 1,2,3,4)
   240  				// owned by Machines
   241  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m1",
   242  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m2",
   243  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m1",
   244  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m2",
   245  			},
   246  			{ // group 6 (objects with ownerReferences in group 1,2,3,5,6)
   247  				// owned by GenericBootstrapConfigs
   248  				"/v1, Kind=Secret, ns1/m1",
   249  				"/v1, Kind=Secret, ns1/m2",
   250  			},
   251  		},
   252  		wantErr: false,
   253  	},
   254  	{
   255  		name: "Cluster with MachineDeployment with a static bootstrap config",
   256  		fields: moveTestsFields{
   257  			objs: test.NewFakeCluster("ns1", "cluster1").
   258  				WithMachineDeployments(
   259  					test.NewFakeMachineDeployment("md1").
   260  						WithStaticBootstrapConfig().
   261  						WithMachineSets(
   262  							test.NewFakeMachineSet("ms1").
   263  								WithStaticBootstrapConfig().
   264  								WithMachines(
   265  									test.NewFakeMachine("m1").
   266  										WithStaticBootstrapConfig(),
   267  									test.NewFakeMachine("m2").
   268  										WithStaticBootstrapConfig(),
   269  								),
   270  						),
   271  				).Objs(),
   272  		},
   273  		wantMoveGroups: [][]string{
   274  			{ // group 1
   275  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   276  			},
   277  			{ // group 2 (objects with ownerReferences in group 1)
   278  				// owned by Clusters
   279  				"/v1, Kind=Secret, ns1/cluster1-ca",
   280  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   281  				"cluster.x-k8s.io/v1beta1, Kind=MachineDeployment, ns1/md1",
   282  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   283  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/md1",
   284  			},
   285  			{ // group 3 (objects with ownerReferences in group 1,2)
   286  				// owned by MachineDeployments
   287  				"cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/ms1",
   288  			},
   289  			{ // group 4 (objects with ownerReferences in group 1,2,3)
   290  				// owned by MachineSets
   291  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1",
   292  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m2",
   293  			},
   294  			{ // group 5 (objects with ownerReferences in group 1,2,3,4)
   295  				// owned by Machines
   296  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m1",
   297  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m2",
   298  			},
   299  		},
   300  		wantErr: false,
   301  	},
   302  	{
   303  		name: "Cluster with Control Plane",
   304  		fields: moveTestsFields{
   305  			objs: test.NewFakeCluster("ns1", "cluster1").
   306  				WithControlPlane(
   307  					test.NewFakeControlPlane("cp1").
   308  						WithMachines(
   309  							test.NewFakeMachine("m1"),
   310  							test.NewFakeMachine("m2"),
   311  						),
   312  				).Objs(),
   313  		},
   314  		wantMoveGroups: [][]string{
   315  			{ // group 1
   316  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   317  			},
   318  			{ // group 2 (objects with ownerReferences in group 1)
   319  				// owned by Clusters
   320  				"/v1, Kind=Secret, ns1/cluster1-ca",
   321  				"controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlane, ns1/cp1",
   322  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   323  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/cp1",
   324  			},
   325  			{ // group 3 (objects with ownerReferences in group 1,2)
   326  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   327  				"/v1, Kind=Secret, ns1/cluster1-sa",
   328  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m1",
   329  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/m2",
   330  			},
   331  			{ // group 4 (objects with ownerReferences in group 1,2,3)
   332  				// owned by Machines
   333  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m1",
   334  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/m2",
   335  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m1",
   336  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/m2",
   337  			},
   338  			{ // group 5 (objects with ownerReferences in group 1,2,3,4)
   339  				// owned by GenericBootstrapConfigs
   340  				"/v1, Kind=Secret, ns1/m1",
   341  				"/v1, Kind=Secret, ns1/m2",
   342  			},
   343  		},
   344  		wantErr: false,
   345  	},
   346  	{
   347  		name: "Cluster with MachinePool",
   348  		fields: moveTestsFields{
   349  			objs: test.NewFakeCluster("ns1", "cluster1").
   350  				WithMachinePools(
   351  					test.NewFakeMachinePool("mp1"),
   352  				).Objs(),
   353  		},
   354  		wantMoveGroups: [][]string{
   355  			{ // group 1
   356  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   357  			},
   358  			{ // group 2 (objects with ownerReferences in group 1)
   359  				// owned by Clusters
   360  				"/v1, Kind=Secret, ns1/cluster1-ca",
   361  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   362  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfigTemplate, ns1/mp1",
   363  				"cluster.x-k8s.io/v1beta1, Kind=MachinePool, ns1/mp1",
   364  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   365  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/mp1",
   366  			},
   367  		},
   368  		wantErr: false,
   369  	},
   370  	{
   371  		name: "Two clusters",
   372  		fields: moveTestsFields{
   373  			objs: func() []client.Object {
   374  				objs := []client.Object{}
   375  				objs = append(objs, test.NewFakeCluster("ns1", "foo").Objs()...)
   376  				objs = append(objs, test.NewFakeCluster("ns1", "bar").Objs()...)
   377  				return objs
   378  			}(),
   379  		},
   380  		wantMoveGroups: [][]string{
   381  			{ // group 1
   382  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo",
   383  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/bar",
   384  			},
   385  			{ // group 2 (objects with ownerReferences in group 1)
   386  				// owned by Clusters
   387  				"/v1, Kind=Secret, ns1/foo-ca",
   388  				"/v1, Kind=Secret, ns1/foo-kubeconfig",
   389  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo",
   390  				"/v1, Kind=Secret, ns1/bar-ca",
   391  				"/v1, Kind=Secret, ns1/bar-kubeconfig",
   392  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/bar",
   393  			},
   394  		},
   395  	},
   396  	{
   397  		name: "Two clusters with a shared object",
   398  		fields: moveTestsFields{
   399  			objs: func() []client.Object {
   400  				sharedInfrastructureTemplate := test.NewFakeInfrastructureTemplate("shared")
   401  
   402  				objs := []client.Object{
   403  					sharedInfrastructureTemplate,
   404  				}
   405  
   406  				objs = append(objs, test.NewFakeCluster("ns1", "cluster1").
   407  					WithMachineSets(
   408  						test.NewFakeMachineSet("cluster1-ms1").
   409  							WithInfrastructureTemplate(sharedInfrastructureTemplate).
   410  							WithMachines(
   411  								test.NewFakeMachine("cluster1-m1"),
   412  							),
   413  					).Objs()...)
   414  
   415  				objs = append(objs, test.NewFakeCluster("ns1", "cluster2").
   416  					WithMachineSets(
   417  						test.NewFakeMachineSet("cluster2-ms1").
   418  							WithInfrastructureTemplate(sharedInfrastructureTemplate).
   419  							WithMachines(
   420  								test.NewFakeMachine("cluster2-m1"),
   421  							),
   422  					).Objs()...)
   423  
   424  				return objs
   425  			}(),
   426  		},
   427  		wantMoveGroups: [][]string{
   428  			{ // group 1
   429  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   430  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster2",
   431  			},
   432  			{ // group 2 (objects with ownerReferences in group 1)
   433  				// owned by Clusters
   434  				"/v1, Kind=Secret, ns1/cluster1-ca",
   435  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   436  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfigTemplate, ns1/cluster1-ms1",
   437  				"cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/cluster1-ms1",
   438  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   439  				"/v1, Kind=Secret, ns1/cluster2-ca",
   440  				"/v1, Kind=Secret, ns1/cluster2-kubeconfig",
   441  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfigTemplate, ns1/cluster2-ms1",
   442  				"cluster.x-k8s.io/v1beta1, Kind=MachineSet, ns1/cluster2-ms1",
   443  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster2",
   444  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachineTemplate, ns1/shared", // shared object
   445  			},
   446  			{ // group 3 (objects with ownerReferences in group 1,2)
   447  				// owned by MachineSets
   448  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/cluster1-m1",
   449  				"cluster.x-k8s.io/v1beta1, Kind=Machine, ns1/cluster2-m1",
   450  			},
   451  			{ // group 4 (objects with ownerReferences in group 1,2,3)
   452  				// owned by Machines
   453  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/cluster1-m1",
   454  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/cluster1-m1",
   455  				"bootstrap.cluster.x-k8s.io/v1beta1, Kind=GenericBootstrapConfig, ns1/cluster2-m1",
   456  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureMachine, ns1/cluster2-m1",
   457  			},
   458  			{ // group 5 (objects with ownerReferences in group 1,2,3,4)
   459  				// owned by GenericBootstrapConfigs
   460  				"/v1, Kind=Secret, ns1/cluster1-m1",
   461  				"/v1, Kind=Secret, ns1/cluster2-m1",
   462  			},
   463  		},
   464  	},
   465  	{
   466  		name: "A ClusterResourceSet applied to a cluster",
   467  		fields: moveTestsFields{
   468  			objs: func() []client.Object {
   469  				objs := []client.Object{}
   470  				objs = append(objs, test.NewFakeCluster("ns1", "cluster1").Objs()...)
   471  
   472  				objs = append(objs, test.NewFakeClusterResourceSet("ns1", "crs1").
   473  					WithSecret("resource-s1").
   474  					WithConfigMap("resource-c1").
   475  					ApplyToCluster(test.SelectClusterObj(objs, "ns1", "cluster1")).
   476  					Objs()...)
   477  
   478  				return objs
   479  			}(),
   480  		},
   481  		wantMoveGroups: [][]string{
   482  			{ // group 1
   483  				// Cluster
   484  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/cluster1",
   485  				// ClusterResourceSet
   486  				"addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSet, ns1/crs1",
   487  			},
   488  			{ // group 2 (objects with ownerReferences in group 1)
   489  				// owned by Clusters
   490  				"/v1, Kind=Secret, ns1/cluster1-ca",
   491  				"/v1, Kind=Secret, ns1/cluster1-kubeconfig",
   492  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/cluster1",
   493  				// owned by ClusterResourceSet
   494  				"/v1, Kind=Secret, ns1/resource-s1",
   495  				"/v1, Kind=ConfigMap, ns1/resource-c1",
   496  				// owned by ClusterResourceSet & Cluster
   497  				"addons.cluster.x-k8s.io/v1beta1, Kind=ClusterResourceSetBinding, ns1/cluster1",
   498  			},
   499  		},
   500  	},
   501  	{
   502  		name: "Cluster with ClusterClass",
   503  		fields: moveTestsFields{
   504  			objs: func() []client.Object {
   505  				objs := test.NewFakeClusterClass("ns1", "class1").Objs()
   506  				objs = append(objs, test.NewFakeCluster("ns1", "foo").WithTopologyClass("class1").Objs()...)
   507  				return deduplicateObjects(objs)
   508  			}(),
   509  		},
   510  		wantMoveGroups: [][]string{
   511  			{ // group 1
   512  				"cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1",
   513  			},
   514  			{ // group 2
   515  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1",
   516  				"controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1",
   517  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo",
   518  			},
   519  			{ // group 3
   520  				"/v1, Kind=Secret, ns1/foo-ca",
   521  				"/v1, Kind=Secret, ns1/foo-kubeconfig",
   522  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo",
   523  			},
   524  		},
   525  		wantErr: false,
   526  	},
   527  	{
   528  		name: "Two Clusters with two ClusterClasses",
   529  		fields: moveTestsFields{
   530  			objs: func() []client.Object {
   531  				objs := test.NewFakeClusterClass("ns1", "class1").Objs()
   532  				objs = append(objs, test.NewFakeClusterClass("ns1", "class2").Objs()...)
   533  				objs = append(objs, test.NewFakeCluster("ns1", "foo1").WithTopologyClass("class1").Objs()...)
   534  				objs = append(objs, test.NewFakeCluster("ns1", "foo2").WithTopologyClass("class2").Objs()...)
   535  				return deduplicateObjects(objs)
   536  			}(),
   537  		},
   538  		wantMoveGroups: [][]string{
   539  			{ // group 1
   540  				"cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1",
   541  				"cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class2",
   542  			},
   543  			{ // group 2
   544  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1",
   545  				"controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1",
   546  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo1",
   547  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class2",
   548  				"controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class2",
   549  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo2",
   550  			},
   551  			{ // group 3
   552  				"/v1, Kind=Secret, ns1/foo1-ca",
   553  				"/v1, Kind=Secret, ns1/foo1-kubeconfig",
   554  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo1",
   555  				"/v1, Kind=Secret, ns1/foo2-ca",
   556  				"/v1, Kind=Secret, ns1/foo2-kubeconfig",
   557  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo2",
   558  			},
   559  		},
   560  		wantErr: false,
   561  	},
   562  	{
   563  		name: "Two Clusters sharing one ClusterClass",
   564  		fields: moveTestsFields{
   565  			objs: func() []client.Object {
   566  				objs := test.NewFakeClusterClass("ns1", "class1").Objs()
   567  				objs = append(objs, test.NewFakeCluster("ns1", "foo1").WithTopologyClass("class1").Objs()...)
   568  				objs = append(objs, test.NewFakeCluster("ns1", "foo2").WithTopologyClass("class1").Objs()...)
   569  				return deduplicateObjects(objs)
   570  			}(),
   571  		},
   572  		wantMoveGroups: [][]string{
   573  			{ // group 1
   574  				"cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1",
   575  			},
   576  			{ // group 2
   577  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1",
   578  				"controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1",
   579  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo1",
   580  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo2",
   581  			},
   582  			{ // group 3
   583  				"/v1, Kind=Secret, ns1/foo1-ca",
   584  				"/v1, Kind=Secret, ns1/foo1-kubeconfig",
   585  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo1",
   586  				"/v1, Kind=Secret, ns1/foo2-ca",
   587  				"/v1, Kind=Secret, ns1/foo2-kubeconfig",
   588  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo2",
   589  			},
   590  		},
   591  		wantErr: false,
   592  	},
   593  	{
   594  		name: "Cluster with unused ClusterClass",
   595  		fields: moveTestsFields{
   596  			objs: func() []client.Object {
   597  				objs := test.NewFakeClusterClass("ns1", "class1").Objs()
   598  				objs = append(objs, test.NewFakeCluster("ns1", "foo1").Objs()...)
   599  				return deduplicateObjects(objs)
   600  			}(),
   601  		},
   602  		wantMoveGroups: [][]string{
   603  			{ // group 1
   604  				"cluster.x-k8s.io/v1beta1, Kind=ClusterClass, ns1/class1",
   605  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo1",
   606  			},
   607  			{ // group 2
   608  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureClusterTemplate, ns1/class1",
   609  				"controlplane.cluster.x-k8s.io/v1beta1, Kind=GenericControlPlaneTemplate, ns1/class1",
   610  				"/v1, Kind=Secret, ns1/foo1-ca",
   611  				"/v1, Kind=Secret, ns1/foo1-kubeconfig",
   612  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo1",
   613  			},
   614  		},
   615  		wantErr: false,
   616  	},
   617  	{
   618  		// NOTE: External objects are CRD types installed by clusterctl, but not directly related with the CAPI hierarchy of objects. e.g. IPAM claims.
   619  		name: "Namespaced External Objects with force move label",
   620  		fields: moveTestsFields{
   621  			objs: test.NewFakeExternalObject("ns1", "externalObject1").Objs(),
   622  		},
   623  		wantMoveGroups: [][]string{
   624  			{ // group1
   625  				"external.cluster.x-k8s.io/v1beta1, Kind=GenericExternalObject, ns1/externalObject1",
   626  			},
   627  		},
   628  		wantErr: false,
   629  	},
   630  	{
   631  		// NOTE: External objects are CRD types installed by clusterctl, but not directly related with the CAPI hierarchy of objects. e.g. IPAM claims.
   632  		name: "Global External Objects with force move label",
   633  		fields: moveTestsFields{
   634  			objs: test.NewFakeClusterExternalObject("externalObject1").Objs(),
   635  		},
   636  		wantMoveGroups: [][]string{
   637  			{ // group1
   638  				"external.cluster.x-k8s.io/v1beta1, Kind=GenericClusterExternalObject, externalObject1",
   639  			},
   640  		},
   641  		wantErr: false,
   642  	},
   643  	{
   644  		name: "Cluster owning a secret with infrastructure credentials",
   645  		fields: moveTestsFields{
   646  			objs: test.NewFakeCluster("ns1", "foo").
   647  				WithCredentialSecret().Objs(),
   648  		},
   649  		wantMoveGroups: [][]string{
   650  			{ // group 1
   651  				"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo",
   652  			},
   653  			{ // group 2 (objects with ownerReferences in group 1)
   654  				// owned by Clusters
   655  				"/v1, Kind=Secret, ns1/foo-ca",
   656  				"/v1, Kind=Secret, ns1/foo-credentials",
   657  				"/v1, Kind=Secret, ns1/foo-kubeconfig",
   658  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo",
   659  			},
   660  		},
   661  		wantErr: false,
   662  	},
   663  	{
   664  		name: "A global identity for an infrastructure provider owning a Secret with credentials in the provider's namespace",
   665  		fields: moveTestsFields{
   666  			objs: test.NewFakeClusterInfrastructureIdentity("infra1-identity").
   667  				WithSecretIn("infra1-system"). // a secret in infra1-system namespace, where an infrastructure provider is installed
   668  				Objs(),
   669  		},
   670  		wantMoveGroups: [][]string{
   671  			{ // group 1
   672  				"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericClusterInfrastructureIdentity, infra1-identity",
   673  			},
   674  			{ // group 2 (objects with ownerReferences in group 1)
   675  				// owned by Clusters
   676  				"/v1, Kind=Secret, infra1-system/infra1-identity-credentials",
   677  			},
   678  		},
   679  		wantErr: false,
   680  	},
   681  }
   682  
   683  var backupRestoreTests = []struct {
   684  	name    string
   685  	fields  moveTestsFields
   686  	files   map[string]string
   687  	wantErr bool
   688  }{
   689  	{
   690  		name: "Cluster",
   691  		fields: moveTestsFields{
   692  			objs: test.NewFakeCluster("ns1", "foo").Objs(),
   693  		},
   694  		files: map[string]string{
   695  			"Cluster_ns1_foo.yaml":                      `{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","metadata":{"creationTimestamp":null,"name":"foo","namespace":"ns1","resourceVersion":"999","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo"},"spec":{"controlPlaneEndpoint":{"host":"","port":0},"infrastructureRef":{"apiVersion":"infrastructure.cluster.x-k8s.io/v1beta1","kind":"GenericInfrastructureCluster","name":"foo","namespace":"ns1"}},"status":{"controlPlaneReady":false,"infrastructureReady":false}}` + "\n",
   696  			"Secret_ns1_foo-kubeconfig.yaml":            `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-kubeconfig","namespace":"ns1","ownerReferences":[{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","name":"foo","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-kubeconfig"}}` + "\n",
   697  			"Secret_ns1_foo-ca.yaml":                    `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-ca","namespace":"ns1","resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-ca"}}` + "\n",
   698  			"GenericInfrastructureCluster_ns1_foo.yaml": `{"apiVersion":"infrastructure.cluster.x-k8s.io/v1beta1","kind":"GenericInfrastructureCluster","metadata":{"creationTimestamp":null,"labels":{"cluster.x-k8s.io/cluster-name":"foo"},"name":"foo","namespace":"ns1","ownerReferences":[{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","name":"foo","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo"}}` + "\n",
   699  		},
   700  		wantErr: false,
   701  	},
   702  	{
   703  		name: "Many namespace cluster",
   704  		fields: moveTestsFields{
   705  			objs: func() []client.Object {
   706  				objs := []client.Object{}
   707  				objs = append(objs, test.NewFakeCluster("ns1", "foo").Objs()...)
   708  				objs = append(objs, test.NewFakeCluster("ns2", "bar").Objs()...)
   709  				return objs
   710  			}(),
   711  		},
   712  		files: map[string]string{
   713  			"Cluster_ns1_foo.yaml":                      `{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","metadata":{"creationTimestamp":null,"name":"foo","namespace":"ns1","resourceVersion":"999","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo"},"spec":{"controlPlaneEndpoint":{"host":"","port":0},"infrastructureRef":{"apiVersion":"infrastructure.cluster.x-k8s.io/v1beta1","kind":"GenericInfrastructureCluster","name":"foo","namespace":"ns1"}},"status":{"controlPlaneReady":false,"infrastructureReady":false}}` + "\n",
   714  			"Secret_ns1_foo-kubeconfig.yaml":            `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-kubeconfig","namespace":"ns1","ownerReferences":[{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","name":"foo","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-kubeconfig"}}` + "\n",
   715  			"Secret_ns1_foo-ca.yaml":                    `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"foo-ca","namespace":"ns1","resourceVersion":"999","uid":"/v1, Kind=Secret, ns1/foo-ca"}}` + "\n",
   716  			"GenericInfrastructureCluster_ns1_foo.yaml": `{"apiVersion":"infrastructure.cluster.x-k8s.io/v1beta1","kind":"GenericInfrastructureCluster","metadata":{"creationTimestamp":null,"labels":{"cluster.x-k8s.io/cluster-name":"foo"},"name":"foo","namespace":"ns1","ownerReferences":[{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","name":"foo","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns1/foo"}],"resourceVersion":"999","uid":"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns1/foo"}}` + "\n",
   717  			"Cluster_ns2_bar.yaml":                      `{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","metadata":{"creationTimestamp":null,"name":"bar","namespace":"ns2","resourceVersion":"999","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns2/bar"},"spec":{"controlPlaneEndpoint":{"host":"","port":0},"infrastructureRef":{"apiVersion":"infrastructure.cluster.x-k8s.io/v1beta1","kind":"GenericInfrastructureCluster","name":"bar","namespace":"ns2"}},"status":{"controlPlaneReady":false,"infrastructureReady":false}}` + "\n",
   718  			"Secret_ns2_bar-kubeconfig.yaml":            `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"bar-kubeconfig","namespace":"ns2","ownerReferences":[{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","name":"bar","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns2/bar"}],"resourceVersion":"999","uid":"/v1, Kind=Secret, ns2/bar-kubeconfig"}}` + "\n",
   719  			"Secret_ns2_bar-ca.yaml":                    `{"apiVersion":"v1","kind":"Secret","metadata":{"creationTimestamp":null,"name":"bar-ca","namespace":"ns2","resourceVersion":"999","uid":"/v1, Kind=Secret, ns2/bar-ca"}}` + "\n",
   720  			"GenericInfrastructureCluster_ns2_bar.yaml": `{"apiVersion":"infrastructure.cluster.x-k8s.io/v1beta1","kind":"GenericInfrastructureCluster","metadata":{"creationTimestamp":null,"labels":{"cluster.x-k8s.io/cluster-name":"bar"},"name":"bar","namespace":"ns2","ownerReferences":[{"apiVersion":"cluster.x-k8s.io/v1beta1","kind":"Cluster","name":"bar","uid":"cluster.x-k8s.io/v1beta1, Kind=Cluster, ns2/bar"}],"resourceVersion":"999","uid":"infrastructure.cluster.x-k8s.io/v1beta1, Kind=GenericInfrastructureCluster, ns2/bar"}}` + "\n",
   721  		},
   722  		wantErr: false,
   723  	},
   724  }
   725  
   726  func Test_objectMover_backupTargetObject(t *testing.T) {
   727  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
   728  	for _, tt := range backupRestoreTests {
   729  		t.Run(tt.name, func(t *testing.T) {
   730  			g := NewWithT(t)
   731  
   732  			ctx := context.Background()
   733  
   734  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
   735  			graph := getObjectGraphWithObjs(tt.fields.objs)
   736  
   737  			// Get all the types to be considered for discovery
   738  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
   739  
   740  			// trigger discovery the content of the source cluster
   741  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
   742  
   743  			// Run backupTargetObject on nodes in graph
   744  			mover := objectMover{
   745  				fromProxy: graph.proxy,
   746  			}
   747  
   748  			dir, err := os.MkdirTemp("/tmp", "cluster-api")
   749  			if err != nil {
   750  				t.Error(err)
   751  			}
   752  			defer os.RemoveAll(dir)
   753  
   754  			for _, node := range graph.uidToNode {
   755  				err = mover.backupTargetObject(ctx, node, dir)
   756  				if tt.wantErr {
   757  					g.Expect(err).To(HaveOccurred())
   758  					return
   759  				}
   760  
   761  				g.Expect(err).ToNot(HaveOccurred())
   762  
   763  				// objects are stored and serialized correctly in the temporary directory
   764  				expectedFilename := node.getFilename()
   765  				expectedFileContents, ok := tt.files[expectedFilename]
   766  				if !ok {
   767  					t.Errorf("Could not access file map: %v\n", expectedFilename)
   768  				}
   769  
   770  				path := filepath.Join(dir, expectedFilename)
   771  				fileContents, err := os.ReadFile(path) //nolint:gosec
   772  				if err != nil {
   773  					g.Expect(err).ToNot(HaveOccurred())
   774  					return
   775  				}
   776  
   777  				firstFileStat, err := os.Stat(path)
   778  				if err != nil {
   779  					g.Expect(err).ToNot(HaveOccurred())
   780  					return
   781  				}
   782  
   783  				fmt.Printf("Actual file content %v\n", string(fileContents))
   784  				g.Expect(string(fileContents)).To(Equal(expectedFileContents))
   785  
   786  				// Add delay so we ensure the file ModTime of updated files is different from old ones in the original files
   787  				time.Sleep(time.Millisecond * 50)
   788  
   789  				// Running backupTargetObject should override any existing files since it represents a new toDirectory
   790  				err = mover.backupTargetObject(ctx, node, dir)
   791  				if tt.wantErr {
   792  					g.Expect(err).To(HaveOccurred())
   793  					return
   794  				}
   795  
   796  				g.Expect(err).ToNot(HaveOccurred())
   797  
   798  				secondFileStat, err := os.Stat(path)
   799  				if err != nil {
   800  					g.Expect(err).ToNot(HaveOccurred())
   801  					return
   802  				}
   803  
   804  				g.Expect(firstFileStat.ModTime()).To(BeTemporally("<", secondFileStat.ModTime()))
   805  			}
   806  		})
   807  	}
   808  }
   809  
   810  func Test_objectMover_restoreTargetObject(t *testing.T) {
   811  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
   812  	for _, tt := range backupRestoreTests {
   813  		t.Run(tt.name, func(t *testing.T) {
   814  			g := NewWithT(t)
   815  
   816  			ctx := context.Background()
   817  
   818  			// temporary directory
   819  			dir, err := os.MkdirTemp("/tmp", "cluster-api")
   820  			if err != nil {
   821  				g.Expect(err).ToNot(HaveOccurred())
   822  			}
   823  			defer os.RemoveAll(dir)
   824  
   825  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
   826  			graph := getObjectGraph()
   827  
   828  			// Get all the types to be considered for discovery
   829  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
   830  
   831  			// trigger discovery the content of the source cluster
   832  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
   833  
   834  			// gets a fakeProxy to an empty cluster with all the required CRDs
   835  			toProxy := getFakeProxyWithCRDs()
   836  
   837  			// Run restoreTargetObject
   838  			mover := objectMover{
   839  				fromProxy: graph.proxy,
   840  			}
   841  
   842  			// Write go string slice to directory
   843  			for _, file := range tt.files {
   844  				tempFile, err := os.CreateTemp(dir, "obj")
   845  				g.Expect(err).ToNot(HaveOccurred())
   846  
   847  				_, err = tempFile.WriteString(file)
   848  				g.Expect(err).ToNot(HaveOccurred())
   849  				g.Expect(tempFile.Close()).To(Succeed())
   850  			}
   851  
   852  			objs, err := mover.filesToObjs(dir)
   853  			g.Expect(err).ToNot(HaveOccurred())
   854  
   855  			for i := range objs {
   856  				g.Expect(graph.addRestoredObj(&objs[i])).ToNot(HaveOccurred())
   857  			}
   858  
   859  			for _, node := range graph.uidToNode {
   860  				err = mover.restoreTargetObject(ctx, node, toProxy)
   861  				if tt.wantErr {
   862  					g.Expect(err).To(HaveOccurred())
   863  					return
   864  				}
   865  
   866  				g.Expect(err).ToNot(HaveOccurred())
   867  
   868  				// Check objects are in new restored cluster
   869  				csTo, err := toProxy.NewClient(ctx)
   870  				g.Expect(err).ToNot(HaveOccurred())
   871  
   872  				key := client.ObjectKey{
   873  					Namespace: node.identity.Namespace,
   874  					Name:      node.identity.Name,
   875  				}
   876  
   877  				// objects are created in the target cluster
   878  				oTo := &unstructured.Unstructured{}
   879  				oTo.SetAPIVersion(node.identity.APIVersion)
   880  				oTo.SetKind(node.identity.Kind)
   881  
   882  				if err := csTo.Get(ctx, key, oTo); err != nil {
   883  					t.Errorf("error = %v when checking for %s %v created in target cluster", err, oTo.GetKind(), key)
   884  					continue
   885  				}
   886  
   887  				// Re-running restoreTargetObjects won't override existing objects
   888  				err = mover.restoreTargetObject(ctx, node, toProxy)
   889  				if tt.wantErr {
   890  					g.Expect(err).To(HaveOccurred())
   891  					return
   892  				}
   893  
   894  				g.Expect(err).ToNot(HaveOccurred())
   895  
   896  				// Check objects are in new restored cluster
   897  				csAfter, err := toProxy.NewClient(ctx)
   898  				g.Expect(err).ToNot(HaveOccurred())
   899  
   900  				keyAfter := client.ObjectKey{
   901  					Namespace: node.identity.Namespace,
   902  					Name:      node.identity.Name,
   903  				}
   904  
   905  				// objects are created in the target cluster
   906  				oAfter := &unstructured.Unstructured{}
   907  				oAfter.SetAPIVersion(node.identity.APIVersion)
   908  				oAfter.SetKind(node.identity.Kind)
   909  
   910  				if err := csAfter.Get(ctx, keyAfter, oAfter); err != nil {
   911  					t.Errorf("error = %v when checking for %s %v created in target cluster", err, oAfter.GetKind(), key)
   912  					continue
   913  				}
   914  
   915  				g.Expect(oAfter.GetAPIVersion()).Should(Equal(oTo.GetAPIVersion()))
   916  				g.Expect(oAfter.GetName()).Should(Equal(oTo.GetName()))
   917  				g.Expect(oAfter.GetCreationTimestamp()).Should(Equal(oTo.GetCreationTimestamp()))
   918  				g.Expect(oAfter.GetUID()).Should(Equal(oTo.GetUID()))
   919  				g.Expect(oAfter.GetOwnerReferences()).Should(BeComparableTo(oTo.GetOwnerReferences()))
   920  			}
   921  		})
   922  	}
   923  }
   924  
   925  func Test_objectMover_toDirectory(t *testing.T) {
   926  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
   927  	for _, tt := range backupRestoreTests {
   928  		t.Run(tt.name, func(t *testing.T) {
   929  			g := NewWithT(t)
   930  
   931  			ctx := context.Background()
   932  
   933  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
   934  			graph := getObjectGraphWithObjs(tt.fields.objs)
   935  
   936  			// Get all the types to be considered for discovery
   937  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
   938  
   939  			// trigger discovery the content of the source cluster
   940  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
   941  
   942  			// Run toDirectory
   943  			mover := objectMover{
   944  				fromProxy: graph.proxy,
   945  			}
   946  
   947  			dir, err := os.MkdirTemp("/tmp", "cluster-api")
   948  			if err != nil {
   949  				t.Error(err)
   950  			}
   951  			defer os.RemoveAll(dir)
   952  
   953  			err = mover.toDirectory(ctx, graph, dir)
   954  			if tt.wantErr {
   955  				g.Expect(err).To(HaveOccurred())
   956  				return
   957  			}
   958  
   959  			g.Expect(err).ToNot(HaveOccurred())
   960  
   961  			// check that the objects are stored in the temporary directory but not deleted from the source cluster
   962  			csFrom, err := graph.proxy.NewClient(ctx)
   963  			g.Expect(err).ToNot(HaveOccurred())
   964  
   965  			missingFiles := []string{}
   966  			for _, node := range graph.uidToNode {
   967  				key := client.ObjectKey{
   968  					Namespace: node.identity.Namespace,
   969  					Name:      node.identity.Name,
   970  				}
   971  
   972  				// objects are not deleted from the source cluster
   973  				oFrom := &unstructured.Unstructured{}
   974  				oFrom.SetAPIVersion(node.identity.APIVersion)
   975  				oFrom.SetKind(node.identity.Kind)
   976  
   977  				err := csFrom.Get(ctx, key, oFrom)
   978  				g.Expect(err).ToNot(HaveOccurred())
   979  
   980  				// objects are stored in the temporary directory with the expected filename
   981  				files, err := os.ReadDir(dir)
   982  				g.Expect(err).ToNot(HaveOccurred())
   983  
   984  				expectedFilename := node.getFilename()
   985  				found := false
   986  				for _, f := range files {
   987  					if strings.Contains(f.Name(), expectedFilename) {
   988  						found = true
   989  					}
   990  				}
   991  
   992  				if !found {
   993  					missingFiles = append(missingFiles, expectedFilename)
   994  				}
   995  			}
   996  
   997  			g.Expect(missingFiles).To(BeEmpty())
   998  		})
   999  	}
  1000  }
  1001  
  1002  func Test_objectMover_filesToObjs(t *testing.T) {
  1003  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
  1004  	for _, tt := range backupRestoreTests {
  1005  		t.Run(tt.name, func(t *testing.T) {
  1006  			g := NewWithT(t)
  1007  
  1008  			dir, err := os.MkdirTemp("/tmp", "cluster-api")
  1009  			if err != nil {
  1010  				t.Error(err)
  1011  			}
  1012  			defer os.RemoveAll(dir)
  1013  
  1014  			for _, fileName := range tt.files {
  1015  				path := filepath.Join(dir, fileName)
  1016  				file, err := os.Create(path) //nolint:gosec // No security issue: unit test.
  1017  				if err != nil {
  1018  					return
  1019  				}
  1020  
  1021  				_, err = file.WriteString(tt.files[fileName])
  1022  				g.Expect(err).ToNot(HaveOccurred())
  1023  				g.Expect(file.Close()).To(Succeed())
  1024  			}
  1025  
  1026  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  1027  			graph := getObjectGraphWithObjs(tt.fields.objs)
  1028  
  1029  			// Run filesToObjs
  1030  			mover := objectMover{
  1031  				fromProxy: graph.proxy,
  1032  			}
  1033  
  1034  			objs, err := mover.filesToObjs(dir)
  1035  			if tt.wantErr {
  1036  				g.Expect(err).To(HaveOccurred())
  1037  				return
  1038  			}
  1039  
  1040  			g.Expect(err).ToNot(HaveOccurred())
  1041  
  1042  			missingObjs := []unstructured.Unstructured{}
  1043  			for _, obj := range objs {
  1044  				found := false
  1045  				for _, expected := range tt.fields.objs {
  1046  					if expected.GetName() == obj.GetName() && expected.GetObjectKind() == obj.GetObjectKind() {
  1047  						found = true
  1048  					}
  1049  				}
  1050  
  1051  				if !found {
  1052  					missingObjs = append(missingObjs, obj)
  1053  				}
  1054  			}
  1055  
  1056  			g.Expect(missingObjs).To(BeEmpty())
  1057  		})
  1058  	}
  1059  }
  1060  
  1061  func Test_objectMover_fromDirectory(t *testing.T) {
  1062  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
  1063  	for _, tt := range backupRestoreTests {
  1064  		t.Run(tt.name, func(t *testing.T) {
  1065  			g := NewWithT(t)
  1066  
  1067  			ctx := context.Background()
  1068  
  1069  			// temporary directory
  1070  			dir, err := os.MkdirTemp("/tmp", "cluster-api")
  1071  			if err != nil {
  1072  				g.Expect(err).ToNot(HaveOccurred())
  1073  			}
  1074  			defer os.RemoveAll(dir)
  1075  
  1076  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  1077  			graph := getObjectGraph()
  1078  
  1079  			// Get all the types to be considered for discovery
  1080  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  1081  
  1082  			// gets a fakeProxy to an empty cluster with all the required CRDs
  1083  			toProxy := getFakeProxyWithCRDs()
  1084  
  1085  			// Run fromDirectory
  1086  			mover := objectMover{
  1087  				fromProxy: graph.proxy,
  1088  			}
  1089  
  1090  			// Write go string slice to directory
  1091  			for _, file := range tt.files {
  1092  				tempFile, err := os.CreateTemp(dir, "obj")
  1093  				g.Expect(err).ToNot(HaveOccurred())
  1094  
  1095  				_, err = tempFile.WriteString(file)
  1096  				g.Expect(err).ToNot(HaveOccurred())
  1097  				g.Expect(tempFile.Close()).To(Succeed())
  1098  			}
  1099  
  1100  			objs, err := mover.filesToObjs(dir)
  1101  			g.Expect(err).ToNot(HaveOccurred())
  1102  
  1103  			for i := range objs {
  1104  				g.Expect(graph.addRestoredObj(&objs[i])).ToNot(HaveOccurred())
  1105  			}
  1106  
  1107  			// fromDirectory works on the target cluster which does not yet have objs to discover
  1108  			// instead set the owners and tenants correctly on object graph like how ObjectMover.Restore does
  1109  			// https://github.com/kubernetes-sigs/cluster-api/blob/main/cmd/clusterctl/client/cluster/mover.go#L129-L132
  1110  			graph.setSoftOwnership()
  1111  			graph.setTenants()
  1112  			graph.checkVirtualNode()
  1113  
  1114  			err = mover.fromDirectory(ctx, graph, toProxy)
  1115  			if tt.wantErr {
  1116  				g.Expect(err).To(HaveOccurred())
  1117  				return
  1118  			}
  1119  
  1120  			g.Expect(err).ToNot(HaveOccurred())
  1121  
  1122  			// Check objects are in new restored cluster
  1123  			csTo, err := toProxy.NewClient(ctx)
  1124  			g.Expect(err).ToNot(HaveOccurred())
  1125  
  1126  			for _, node := range graph.uidToNode {
  1127  				key := client.ObjectKey{
  1128  					Namespace: node.identity.Namespace,
  1129  					Name:      node.identity.Name,
  1130  				}
  1131  
  1132  				// objects are created in the target cluster
  1133  				oTo := &unstructured.Unstructured{}
  1134  				oTo.SetAPIVersion(node.identity.APIVersion)
  1135  				oTo.SetKind(node.identity.Kind)
  1136  
  1137  				if err := csTo.Get(ctx, key, oTo); err != nil {
  1138  					t.Errorf("error = %v when checking for %s %v created in target cluster", err, oTo.GetKind(), key)
  1139  					continue
  1140  				}
  1141  			}
  1142  		})
  1143  	}
  1144  }
  1145  
  1146  func Test_getMoveSequence(t *testing.T) {
  1147  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
  1148  	for _, tt := range moveTests {
  1149  		t.Run(tt.name, func(t *testing.T) {
  1150  			g := NewWithT(t)
  1151  
  1152  			ctx := context.Background()
  1153  
  1154  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  1155  			graph := getObjectGraphWithObjs(tt.fields.objs)
  1156  
  1157  			// Get all the types to be considered for discovery
  1158  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  1159  
  1160  			// trigger discovery the content of the source cluster
  1161  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
  1162  
  1163  			moveSequence := getMoveSequence(graph)
  1164  			g.Expect(moveSequence.groups).To(HaveLen(len(tt.wantMoveGroups)))
  1165  
  1166  			for i, gotGroup := range moveSequence.groups {
  1167  				wantGroup := tt.wantMoveGroups[i]
  1168  				gotNodes := []string{}
  1169  				for _, node := range gotGroup {
  1170  					gotNodes = append(gotNodes, string(node.identity.UID))
  1171  				}
  1172  
  1173  				g.Expect(gotNodes).To(ConsistOf(wantGroup))
  1174  			}
  1175  		})
  1176  	}
  1177  }
  1178  
  1179  func Test_objectMover_move_dryRun(t *testing.T) {
  1180  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
  1181  	for _, tt := range moveTests {
  1182  		t.Run(tt.name, func(t *testing.T) {
  1183  			g := NewWithT(t)
  1184  
  1185  			ctx := context.Background()
  1186  
  1187  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  1188  			graph := getObjectGraphWithObjs(tt.fields.objs)
  1189  
  1190  			// Get all the types to be considered for discovery
  1191  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  1192  
  1193  			// trigger discovery the content of the source cluster
  1194  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
  1195  
  1196  			// gets a fakeProxy to an empty cluster with all the required CRDs
  1197  			toProxy := getFakeProxyWithCRDs()
  1198  
  1199  			// Run move
  1200  			mover := objectMover{
  1201  				fromProxy: graph.proxy,
  1202  				dryRun:    true,
  1203  			}
  1204  
  1205  			err := mover.move(ctx, graph, toProxy, nil)
  1206  			if tt.wantErr {
  1207  				g.Expect(err).To(HaveOccurred())
  1208  				return
  1209  			}
  1210  
  1211  			g.Expect(err).ToNot(HaveOccurred())
  1212  
  1213  			// check that the objects are kept in the source cluster and are not created in the target cluster
  1214  			csFrom, err := graph.proxy.NewClient(ctx)
  1215  			g.Expect(err).ToNot(HaveOccurred())
  1216  
  1217  			csTo, err := toProxy.NewClient(ctx)
  1218  			g.Expect(err).ToNot(HaveOccurred())
  1219  			for _, node := range graph.uidToNode {
  1220  				key := client.ObjectKey{
  1221  					Namespace: node.identity.Namespace,
  1222  					Name:      node.identity.Name,
  1223  				}
  1224  				// objects are kept in source cluster as it's dry run
  1225  				oFrom := &unstructured.Unstructured{}
  1226  				oFrom.SetAPIVersion(node.identity.APIVersion)
  1227  				oFrom.SetKind(node.identity.Kind)
  1228  
  1229  				if err := csFrom.Get(ctx, key, oFrom); err != nil {
  1230  					t.Errorf("error = %v when checking for %s %v kept in source cluster", err, oFrom.GetKind(), key)
  1231  					continue
  1232  				}
  1233  
  1234  				// objects are not created in target cluster as it's dry run
  1235  				oTo := &unstructured.Unstructured{}
  1236  				oTo.SetAPIVersion(node.identity.APIVersion)
  1237  				oTo.SetKind(node.identity.Kind)
  1238  
  1239  				err := csTo.Get(ctx, key, oTo)
  1240  				if err == nil {
  1241  					if oFrom.GetNamespace() != "" {
  1242  						t.Errorf("%s %v created in target cluster which should not", oFrom.GetKind(), key)
  1243  						continue
  1244  					}
  1245  				} else if !apierrors.IsNotFound(err) {
  1246  					t.Errorf("error = %v when checking for %s %v should not created ojects in target cluster", err, oFrom.GetKind(), key)
  1247  					continue
  1248  				}
  1249  			}
  1250  		})
  1251  	}
  1252  }
  1253  
  1254  func Test_objectMover_move(t *testing.T) {
  1255  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
  1256  	for _, tt := range moveTests {
  1257  		t.Run(tt.name, func(t *testing.T) {
  1258  			g := NewWithT(t)
  1259  
  1260  			ctx := context.Background()
  1261  
  1262  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  1263  			graph := getObjectGraphWithObjs(tt.fields.objs)
  1264  
  1265  			// Get all the types to be considered for discovery
  1266  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  1267  
  1268  			// trigger discovery the content of the source cluster
  1269  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
  1270  
  1271  			// gets a fakeProxy to an empty cluster with all the required CRDs
  1272  			toProxy := getFakeProxyWithCRDs()
  1273  
  1274  			// Run move
  1275  			mover := objectMover{
  1276  				fromProxy: graph.proxy,
  1277  			}
  1278  			err := mover.move(ctx, graph, toProxy)
  1279  
  1280  			if tt.wantErr {
  1281  				g.Expect(err).To(HaveOccurred())
  1282  				return
  1283  			}
  1284  
  1285  			g.Expect(err).ToNot(HaveOccurred())
  1286  
  1287  			// check that the objects are removed from the source cluster and are created in the target cluster
  1288  			csFrom, err := graph.proxy.NewClient(ctx)
  1289  			g.Expect(err).ToNot(HaveOccurred())
  1290  
  1291  			csTo, err := toProxy.NewClient(ctx)
  1292  			g.Expect(err).ToNot(HaveOccurred())
  1293  
  1294  			for _, node := range graph.uidToNode {
  1295  				key := client.ObjectKey{
  1296  					Namespace: node.identity.Namespace,
  1297  					Name:      node.identity.Name,
  1298  				}
  1299  
  1300  				// objects are deleted from the source cluster
  1301  				oFrom := &unstructured.Unstructured{}
  1302  				oFrom.SetAPIVersion(node.identity.APIVersion)
  1303  				oFrom.SetKind(node.identity.Kind)
  1304  
  1305  				err := csFrom.Get(ctx, key, oFrom)
  1306  				if err == nil {
  1307  					if !node.isGlobal && !node.isGlobalHierarchy {
  1308  						t.Errorf("%s %v not deleted in source cluster", oFrom.GetKind(), key)
  1309  						continue
  1310  					}
  1311  				} else if !apierrors.IsNotFound(err) {
  1312  					t.Errorf("error = %v when checking for %s %v deleted in source cluster", err, oFrom.GetKind(), key)
  1313  					continue
  1314  				}
  1315  
  1316  				// objects are created in the target cluster
  1317  				oTo := &unstructured.Unstructured{}
  1318  				oTo.SetAPIVersion(node.identity.APIVersion)
  1319  				oTo.SetKind(node.identity.Kind)
  1320  
  1321  				if err := csTo.Get(ctx, key, oTo); err != nil {
  1322  					t.Errorf("error = %v when checking for %s %v created in target cluster", err, oFrom.GetKind(), key)
  1323  					continue
  1324  				}
  1325  			}
  1326  		})
  1327  	}
  1328  }
  1329  
  1330  func Test_objectMover_move_with_Mutator(t *testing.T) {
  1331  	// NB. we are testing the move and move sequence using the same set of moveTests, but checking the results at different stages of the move process
  1332  	// we use same mutator function for all tests and validate outcome based on input.
  1333  	for _, tt := range moveTests {
  1334  		t.Run(tt.name, func(t *testing.T) {
  1335  			g := NewWithT(t)
  1336  
  1337  			ctx := context.Background()
  1338  
  1339  			toNamespace := "foobar"
  1340  			updateKnownKinds := map[string][][]string{
  1341  				"Cluster": {
  1342  					{"metadata", "namespace"},
  1343  					{"spec", "controlPlaneRef", "namespace"},
  1344  					{"spec", "infrastructureRef", "namespace"},
  1345  					{"unknown", "field", "does", "not", "cause", "errors"},
  1346  				},
  1347  				"KubeadmControlPlane": {
  1348  					{"spec", "machineTemplate", "infrastructureRef", "namespace"},
  1349  				},
  1350  				"Machine": {
  1351  					{"spec", "bootstrap", "configRef", "namespace"},
  1352  					{"spec", "infrastructureRef", "namespace"},
  1353  				},
  1354  			}
  1355  			var namespaceMutator ResourceMutatorFunc = func(u *unstructured.Unstructured) error {
  1356  				if u == nil || u.Object == nil {
  1357  					return nil
  1358  				}
  1359  				if u.GetNamespace() != "" {
  1360  					u.SetNamespace(toNamespace)
  1361  				}
  1362  				if fields, knownKind := updateKnownKinds[u.GetKind()]; knownKind {
  1363  					for _, nsField := range fields {
  1364  						_, exists, err := unstructured.NestedFieldNoCopy(u.Object, nsField...)
  1365  						g.Expect(err).ToNot(HaveOccurred())
  1366  						if exists {
  1367  							g.Expect(unstructured.SetNestedField(u.Object, toNamespace, nsField...)).To(Succeed())
  1368  						}
  1369  					}
  1370  				}
  1371  				return nil
  1372  			}
  1373  
  1374  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  1375  			graph := getObjectGraphWithObjs(tt.fields.objs)
  1376  
  1377  			// Get all the types to be considered for discovery
  1378  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  1379  
  1380  			// trigger discovery the content of the source cluster
  1381  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
  1382  
  1383  			// gets a fakeProxy to an empty cluster with all the required CRDs
  1384  			toProxy := getFakeProxyWithCRDs()
  1385  
  1386  			// Run move with mutators
  1387  			mover := objectMover{
  1388  				fromProxy: graph.proxy,
  1389  			}
  1390  
  1391  			err := mover.move(ctx, graph, toProxy, namespaceMutator)
  1392  			if tt.wantErr {
  1393  				g.Expect(err).To(HaveOccurred())
  1394  				return
  1395  			}
  1396  
  1397  			g.Expect(err).ToNot(HaveOccurred())
  1398  
  1399  			// check that the objects are removed from the source cluster and are created in the target cluster
  1400  			csFrom, err := graph.proxy.NewClient(ctx)
  1401  			g.Expect(err).ToNot(HaveOccurred())
  1402  
  1403  			csTo, err := toProxy.NewClient(ctx)
  1404  			g.Expect(err).ToNot(HaveOccurred())
  1405  
  1406  			for _, node := range graph.uidToNode {
  1407  				key := client.ObjectKey{
  1408  					Namespace: node.identity.Namespace,
  1409  					Name:      node.identity.Name,
  1410  				}
  1411  
  1412  				// objects are deleted from the source cluster
  1413  				oFrom := &unstructured.Unstructured{}
  1414  				oFrom.SetAPIVersion(node.identity.APIVersion)
  1415  				oFrom.SetKind(node.identity.Kind)
  1416  
  1417  				err := csFrom.Get(ctx, key, oFrom)
  1418  				if err == nil {
  1419  					if !node.isGlobal && !node.isGlobalHierarchy {
  1420  						t.Errorf("%s %v not deleted in source cluster", oFrom.GetKind(), key)
  1421  						continue
  1422  					}
  1423  				} else if !apierrors.IsNotFound(err) {
  1424  					t.Errorf("error = %v when checking for %s %v deleted in source cluster", err, oFrom.GetKind(), key)
  1425  					continue
  1426  				}
  1427  
  1428  				// objects are created in the target cluster
  1429  				oTo := &unstructured.Unstructured{}
  1430  				oTo.SetAPIVersion(node.identity.APIVersion)
  1431  				oTo.SetKind(node.identity.Kind)
  1432  				if !node.isGlobal {
  1433  					key.Namespace = toNamespace
  1434  				}
  1435  
  1436  				if err := csTo.Get(ctx, key, oTo); err != nil {
  1437  					t.Errorf("error = %v when checking for %s %v created in target cluster", err, oFrom.GetKind(), key)
  1438  					continue
  1439  				}
  1440  				if fields, knownKind := updateKnownKinds[oTo.GetKind()]; knownKind {
  1441  					for _, nsField := range fields {
  1442  						value, exists, err := unstructured.NestedFieldNoCopy(oTo.Object, nsField...)
  1443  						g.Expect(err).ToNot(HaveOccurred())
  1444  						if exists {
  1445  							g.Expect(value).To(Equal(toNamespace))
  1446  						}
  1447  					}
  1448  				}
  1449  			}
  1450  		})
  1451  	}
  1452  }
  1453  
  1454  func Test_objectMover_checkProvisioningCompleted(t *testing.T) {
  1455  	type fields struct {
  1456  		objs []client.Object
  1457  	}
  1458  	tests := []struct {
  1459  		name    string
  1460  		fields  fields
  1461  		wantErr bool
  1462  	}{
  1463  		{
  1464  			name: "Blocks with a cluster without InfrastructureReady",
  1465  			fields: fields{
  1466  				objs: []client.Object{
  1467  					&clusterv1.Cluster{
  1468  						TypeMeta: metav1.TypeMeta{
  1469  							Kind:       "Cluster",
  1470  							APIVersion: clusterv1.GroupVersion.String(),
  1471  						},
  1472  						ObjectMeta: metav1.ObjectMeta{
  1473  							Namespace: "ns1",
  1474  							Name:      "cluster1",
  1475  						},
  1476  						Status: clusterv1.ClusterStatus{
  1477  							InfrastructureReady: false,
  1478  							Conditions: clusterv1.Conditions{
  1479  								*conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition),
  1480  							},
  1481  						},
  1482  					},
  1483  				},
  1484  			},
  1485  			wantErr: true,
  1486  		},
  1487  		{
  1488  			name: "Blocks with a cluster without ControlPlaneInitialized",
  1489  			fields: fields{
  1490  				objs: []client.Object{
  1491  					&clusterv1.Cluster{
  1492  						TypeMeta: metav1.TypeMeta{
  1493  							Kind:       "Cluster",
  1494  							APIVersion: clusterv1.GroupVersion.String(),
  1495  						},
  1496  						ObjectMeta: metav1.ObjectMeta{
  1497  							Namespace: "ns1",
  1498  							Name:      "cluster1",
  1499  						},
  1500  						Status: clusterv1.ClusterStatus{
  1501  							InfrastructureReady: true,
  1502  						},
  1503  					},
  1504  				},
  1505  			},
  1506  			wantErr: true,
  1507  		},
  1508  		{
  1509  			name: "Blocks with a cluster with ControlPlaneInitialized=False",
  1510  			fields: fields{
  1511  				objs: []client.Object{
  1512  					&clusterv1.Cluster{
  1513  						TypeMeta: metav1.TypeMeta{
  1514  							Kind:       "Cluster",
  1515  							APIVersion: clusterv1.GroupVersion.String(),
  1516  						},
  1517  						ObjectMeta: metav1.ObjectMeta{
  1518  							Namespace: "ns1",
  1519  							Name:      "cluster1",
  1520  						},
  1521  						Status: clusterv1.ClusterStatus{
  1522  							InfrastructureReady: true,
  1523  							Conditions: clusterv1.Conditions{
  1524  								*conditions.FalseCondition(clusterv1.ControlPlaneInitializedCondition, "", clusterv1.ConditionSeverityInfo, ""),
  1525  							},
  1526  						},
  1527  					},
  1528  				},
  1529  			},
  1530  			wantErr: true,
  1531  		},
  1532  		{
  1533  			name: "Blocks with a cluster without ControlPlaneReady",
  1534  			fields: fields{
  1535  				objs: []client.Object{
  1536  					&clusterv1.Cluster{
  1537  						TypeMeta: metav1.TypeMeta{
  1538  							Kind:       "Cluster",
  1539  							APIVersion: clusterv1.GroupVersion.String(),
  1540  						},
  1541  						ObjectMeta: metav1.ObjectMeta{
  1542  							Namespace: "ns1",
  1543  							Name:      "cluster1",
  1544  						},
  1545  						Spec: clusterv1.ClusterSpec{
  1546  							ControlPlaneRef: &corev1.ObjectReference{},
  1547  						},
  1548  						Status: clusterv1.ClusterStatus{
  1549  							InfrastructureReady: true,
  1550  							Conditions: clusterv1.Conditions{
  1551  								*conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition),
  1552  							},
  1553  							ControlPlaneReady: false,
  1554  						},
  1555  					},
  1556  				},
  1557  			},
  1558  			wantErr: true,
  1559  		},
  1560  		{
  1561  			name: "Blocks with a Machine Without NodeRef",
  1562  			fields: fields{
  1563  				objs: []client.Object{
  1564  					&clusterv1.Cluster{
  1565  						TypeMeta: metav1.TypeMeta{
  1566  							Kind:       "Cluster",
  1567  							APIVersion: clusterv1.GroupVersion.String(),
  1568  						},
  1569  						ObjectMeta: metav1.ObjectMeta{
  1570  							Namespace: "ns1",
  1571  							Name:      "cluster1",
  1572  							UID:       "cluster1",
  1573  						},
  1574  						Status: clusterv1.ClusterStatus{
  1575  							InfrastructureReady: true,
  1576  							Conditions: clusterv1.Conditions{
  1577  								*conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition),
  1578  							},
  1579  						},
  1580  					},
  1581  					&clusterv1.Machine{
  1582  						TypeMeta: metav1.TypeMeta{
  1583  							Kind:       "Machine",
  1584  							APIVersion: clusterv1.GroupVersion.String(),
  1585  						},
  1586  						ObjectMeta: metav1.ObjectMeta{
  1587  							Namespace: "ns1",
  1588  							Name:      "machine1",
  1589  							OwnerReferences: []metav1.OwnerReference{
  1590  								{
  1591  									APIVersion: clusterv1.GroupVersion.String(),
  1592  									Kind:       "Cluster",
  1593  									Name:       "cluster1",
  1594  									UID:        "cluster1",
  1595  								},
  1596  							},
  1597  						},
  1598  						Status: clusterv1.MachineStatus{
  1599  							NodeRef: nil,
  1600  						},
  1601  					},
  1602  				},
  1603  			},
  1604  			wantErr: true,
  1605  		},
  1606  		{
  1607  			name: "Pass",
  1608  			fields: fields{
  1609  				objs: []client.Object{
  1610  					&clusterv1.Cluster{
  1611  						TypeMeta: metav1.TypeMeta{
  1612  							Kind:       "Cluster",
  1613  							APIVersion: clusterv1.GroupVersion.String(),
  1614  						},
  1615  						ObjectMeta: metav1.ObjectMeta{
  1616  							Namespace: "ns1",
  1617  							Name:      "cluster1",
  1618  							UID:       "cluster1",
  1619  						},
  1620  						Status: clusterv1.ClusterStatus{
  1621  							InfrastructureReady: true,
  1622  							Conditions: clusterv1.Conditions{
  1623  								*conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition),
  1624  							},
  1625  						},
  1626  					},
  1627  					&clusterv1.Machine{
  1628  						TypeMeta: metav1.TypeMeta{
  1629  							Kind:       "Machine",
  1630  							APIVersion: clusterv1.GroupVersion.String(),
  1631  						},
  1632  						ObjectMeta: metav1.ObjectMeta{
  1633  							Namespace: "ns1",
  1634  							Name:      "machine1",
  1635  							OwnerReferences: []metav1.OwnerReference{
  1636  								{
  1637  									APIVersion: clusterv1.GroupVersion.String(),
  1638  									Kind:       "Cluster",
  1639  									Name:       "cluster1",
  1640  									UID:        "cluster1",
  1641  								},
  1642  							},
  1643  						},
  1644  						Status: clusterv1.MachineStatus{
  1645  							NodeRef: &corev1.ObjectReference{},
  1646  						},
  1647  					},
  1648  				},
  1649  			},
  1650  			wantErr: false,
  1651  		},
  1652  	}
  1653  	for _, tt := range tests {
  1654  		t.Run(tt.name, func(t *testing.T) {
  1655  			g := NewWithT(t)
  1656  
  1657  			ctx := context.Background()
  1658  
  1659  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  1660  			graph := getObjectGraphWithObjs(tt.fields.objs)
  1661  
  1662  			// Get all the types to be considered for discovery
  1663  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  1664  
  1665  			// trigger discovery the content of the source cluster
  1666  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
  1667  
  1668  			o := &objectMover{
  1669  				fromProxy: graph.proxy,
  1670  			}
  1671  			err := o.checkProvisioningCompleted(ctx, graph)
  1672  			if tt.wantErr {
  1673  				g.Expect(err).To(HaveOccurred())
  1674  			} else {
  1675  				g.Expect(err).ToNot(HaveOccurred())
  1676  			}
  1677  		})
  1678  	}
  1679  }
  1680  
  1681  func Test_objectsMoverService_checkTargetProviders(t *testing.T) {
  1682  	type fields struct {
  1683  		fromProxy Proxy
  1684  	}
  1685  	type args struct {
  1686  		toProxy Proxy
  1687  	}
  1688  	tests := []struct {
  1689  		name    string
  1690  		fields  fields
  1691  		args    args
  1692  		wantErr bool
  1693  	}{
  1694  		{
  1695  			name: "all the providers in place (lazy matching)",
  1696  			fields: fields{
  1697  				fromProxy: test.NewFakeProxy().
  1698  					WithProviderInventory("capi", clusterctlv1.CoreProviderType, "v1.0.0", "capi-system").
  1699  					WithProviderInventory("kubeadm", clusterctlv1.BootstrapProviderType, "v1.0.0", "cabpk-system").
  1700  					WithProviderInventory("capa", clusterctlv1.InfrastructureProviderType, "v1.0.0", "capa-system"),
  1701  			},
  1702  			args: args{
  1703  				toProxy: test.NewFakeProxy().
  1704  					WithProviderInventory("capi", clusterctlv1.CoreProviderType, "v1.0.0", "capi-system").
  1705  					WithProviderInventory("kubeadm", clusterctlv1.BootstrapProviderType, "v1.0.0", "cabpk-system").
  1706  					WithProviderInventory("capa", clusterctlv1.InfrastructureProviderType, "v1.0.0", "capa-system"),
  1707  			},
  1708  			wantErr: false,
  1709  		},
  1710  		{
  1711  			name: "all the providers in place but with a newer version (lazy matching)",
  1712  			fields: fields{
  1713  				fromProxy: test.NewFakeProxy().
  1714  					WithProviderInventory("capi", clusterctlv1.CoreProviderType, "v2.0.0", "capi-system"),
  1715  			},
  1716  			args: args{
  1717  				toProxy: test.NewFakeProxy().
  1718  					WithProviderInventory("capi", clusterctlv1.CoreProviderType, "v2.1.0", "capi-system"), // Lazy matching
  1719  			},
  1720  			wantErr: false,
  1721  		},
  1722  		{
  1723  			name: "fails if a provider is missing",
  1724  			fields: fields{
  1725  				fromProxy: test.NewFakeProxy().
  1726  					WithProviderInventory("capi", clusterctlv1.CoreProviderType, "v2.0.0", "capi-system"),
  1727  			},
  1728  			args: args{
  1729  				toProxy: test.NewFakeProxy(),
  1730  			},
  1731  			wantErr: true,
  1732  		},
  1733  		{
  1734  			name: "fails if a provider version is older than expected",
  1735  			fields: fields{
  1736  				fromProxy: test.NewFakeProxy().
  1737  					WithProviderInventory("capi", clusterctlv1.CoreProviderType, "v2.0.0", "capi-system"),
  1738  			},
  1739  			args: args{
  1740  				toProxy: test.NewFakeProxy().
  1741  					WithProviderInventory("capi", clusterctlv1.CoreProviderType, "v1.0.0", "capi1-system"),
  1742  			},
  1743  			wantErr: true,
  1744  		},
  1745  	}
  1746  	for _, tt := range tests {
  1747  		t.Run(tt.name, func(t *testing.T) {
  1748  			g := NewWithT(t)
  1749  
  1750  			ctx := context.Background()
  1751  
  1752  			o := &objectMover{
  1753  				fromProviderInventory: newInventoryClient(tt.fields.fromProxy, nil),
  1754  			}
  1755  			err := o.checkTargetProviders(ctx, newInventoryClient(tt.args.toProxy, nil))
  1756  			if tt.wantErr {
  1757  				g.Expect(err).To(HaveOccurred())
  1758  			} else {
  1759  				g.Expect(err).ToNot(HaveOccurred())
  1760  			}
  1761  		})
  1762  	}
  1763  }
  1764  
  1765  func Test_objectMoverService_ensureNamespace(t *testing.T) {
  1766  	type args struct {
  1767  		toProxy   Proxy
  1768  		namespace string
  1769  	}
  1770  
  1771  	namespace1 := &corev1.Namespace{
  1772  		ObjectMeta: metav1.ObjectMeta{
  1773  			Name: "namespace-1",
  1774  		},
  1775  	}
  1776  
  1777  	tests := []struct {
  1778  		name string
  1779  		args args
  1780  	}{
  1781  		{
  1782  			name: "ensureNamespace doesn't fail given an existing namespace",
  1783  			args: args{
  1784  				// Create a fake cluster target with namespace-1 already existing
  1785  				toProxy: test.NewFakeProxy().WithObjs(namespace1),
  1786  				// Ensure namespace-1 gets created
  1787  				namespace: "namespace-1",
  1788  			},
  1789  		},
  1790  		{
  1791  			name: "ensureNamespace doesn't fail if the namespace does not already exist in the target",
  1792  			args: args{
  1793  				// Create a fake empty client
  1794  				toProxy: test.NewFakeProxy(),
  1795  				// Ensure namespace-2 gets created
  1796  				namespace: "namespace-2",
  1797  			},
  1798  		},
  1799  	}
  1800  
  1801  	for _, tt := range tests {
  1802  		t.Run(tt.name, func(t *testing.T) {
  1803  			g := NewWithT(t)
  1804  
  1805  			ctx := context.Background()
  1806  
  1807  			mover := objectMover{
  1808  				fromProxy: test.NewFakeProxy(),
  1809  			}
  1810  
  1811  			err := mover.ensureNamespace(ctx, tt.args.toProxy, tt.args.namespace)
  1812  			g.Expect(err).ToNot(HaveOccurred())
  1813  
  1814  			// Check that the namespaces either existed or were created in the
  1815  			// target.
  1816  			csTo, err := tt.args.toProxy.NewClient(ctx)
  1817  			g.Expect(err).ToNot(HaveOccurred())
  1818  
  1819  			ns := &corev1.Namespace{}
  1820  			key := client.ObjectKey{
  1821  				// Search for this namespace
  1822  				Name: tt.args.namespace,
  1823  			}
  1824  
  1825  			err = csTo.Get(ctx, key, ns)
  1826  			g.Expect(err).ToNot(HaveOccurred())
  1827  		})
  1828  	}
  1829  }
  1830  
  1831  func Test_objectMoverService_ensureNamespaces(t *testing.T) {
  1832  	type args struct {
  1833  		toProxy Proxy
  1834  	}
  1835  	type fields struct {
  1836  		objs []client.Object
  1837  	}
  1838  
  1839  	// Create some test runtime objects to be used in the tests
  1840  	namespace1 := &corev1.Namespace{
  1841  		ObjectMeta: metav1.ObjectMeta{
  1842  			Name: "namespace-1",
  1843  		},
  1844  	}
  1845  
  1846  	cluster1 := test.NewFakeCluster("namespace-1", "cluster-1")
  1847  	cluster2 := test.NewFakeCluster("namespace-2", "cluster-2")
  1848  	globalObj := test.NewFakeClusterExternalObject("eo-1")
  1849  
  1850  	clustersObjs := append(cluster1.Objs(), cluster2.Objs()...)
  1851  
  1852  	tests := []struct {
  1853  		name               string
  1854  		fields             fields
  1855  		args               args
  1856  		expectedNamespaces []string
  1857  	}{
  1858  		{
  1859  			name: "ensureNamespaces doesn't fail given an existing namespace",
  1860  			fields: fields{
  1861  				objs: cluster1.Objs(),
  1862  			},
  1863  			args: args{
  1864  				toProxy: test.NewFakeProxy(),
  1865  			},
  1866  			expectedNamespaces: []string{"namespace-1"},
  1867  		},
  1868  		{
  1869  			name: "ensureNamespaces moves namespace-1 and namespace-2 to target",
  1870  			fields: fields{
  1871  				objs: clustersObjs,
  1872  			},
  1873  			args: args{
  1874  				toProxy: test.NewFakeProxy(),
  1875  			},
  1876  			expectedNamespaces: []string{"namespace-1", "namespace-2"},
  1877  		},
  1878  		{
  1879  
  1880  			name: "ensureNamespaces moves namespace-2 to target which already has namespace-1",
  1881  			fields: fields{
  1882  				objs: cluster2.Objs(),
  1883  			},
  1884  			args: args{
  1885  				toProxy: test.NewFakeProxy().WithObjs(namespace1),
  1886  			},
  1887  			expectedNamespaces: []string{"namespace-1", "namespace-2"},
  1888  		},
  1889  		{
  1890  			name: "ensureNamespaces doesn't fail if no namespace is specified (cluster-wide)",
  1891  			fields: fields{
  1892  				objs: globalObj.Objs(),
  1893  			},
  1894  			args: args{
  1895  				toProxy: test.NewFakeProxy(),
  1896  			},
  1897  		},
  1898  	}
  1899  
  1900  	for _, tt := range tests {
  1901  		t.Run(tt.name, func(t *testing.T) {
  1902  			g := NewWithT(t)
  1903  
  1904  			ctx := context.Background()
  1905  
  1906  			graph := getObjectGraphWithObjs(tt.fields.objs)
  1907  
  1908  			// Get all the types to be considered for discovery
  1909  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  1910  
  1911  			// Trigger discovery the content of the source cluster
  1912  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
  1913  
  1914  			mover := objectMover{
  1915  				fromProxy: graph.proxy,
  1916  			}
  1917  
  1918  			err := mover.ensureNamespaces(ctx, graph, tt.args.toProxy)
  1919  			g.Expect(err).ToNot(HaveOccurred())
  1920  
  1921  			// Check that the namespaces either existed or were created in the
  1922  			// target.
  1923  			csTo, err := tt.args.toProxy.NewClient(ctx)
  1924  			g.Expect(err).ToNot(HaveOccurred())
  1925  
  1926  			namespaces := &corev1.NamespaceList{}
  1927  
  1928  			err = csTo.List(ctx, namespaces, client.Continue(namespaces.Continue))
  1929  			g.Expect(err).ToNot(HaveOccurred())
  1930  
  1931  			// Ensure length of namespaces matches what's expected to ensure we're handling
  1932  			// cluster-wide (namespace of "") objects
  1933  			g.Expect(namespaces.Items).To(HaveLen(len(tt.expectedNamespaces)))
  1934  
  1935  			// Loop through each expected result to ensure that it is found in
  1936  			// the actual results.
  1937  			for _, expected := range tt.expectedNamespaces {
  1938  				exists := false
  1939  				for _, item := range namespaces.Items {
  1940  					if item.Name == expected {
  1941  						exists = true
  1942  					}
  1943  				}
  1944  				// If at any point a namespace was not found, it must have not
  1945  				// been moved to the target successfully.
  1946  				if !exists {
  1947  					t.Errorf("namespace: %v not found in target cluster", expected)
  1948  				}
  1949  			}
  1950  		})
  1951  	}
  1952  }
  1953  
  1954  func Test_createTargetObject(t *testing.T) {
  1955  	type args struct {
  1956  		fromProxy Proxy
  1957  		toProxy   Proxy
  1958  		node      *node
  1959  	}
  1960  
  1961  	tests := []struct {
  1962  		name    string
  1963  		args    args
  1964  		want    func(*WithT, client.Client)
  1965  		wantErr bool
  1966  	}{
  1967  		{
  1968  			name: "fails if the object is missing from the source",
  1969  			args: args{
  1970  				fromProxy: test.NewFakeProxy(),
  1971  				toProxy: test.NewFakeProxy().WithObjs(
  1972  					&clusterv1.Cluster{
  1973  						ObjectMeta: metav1.ObjectMeta{
  1974  							Name:      "foo",
  1975  							Namespace: "ns1",
  1976  						},
  1977  					},
  1978  				),
  1979  				node: &node{
  1980  					identity: corev1.ObjectReference{
  1981  						Kind:       "Cluster",
  1982  						Namespace:  "ns1",
  1983  						Name:       "foo",
  1984  						APIVersion: "cluster.x-k8s.io/v1beta1",
  1985  					},
  1986  				},
  1987  			},
  1988  			wantErr: true,
  1989  		},
  1990  		{
  1991  			name: "creates the object with owner references if not exists",
  1992  			args: args{
  1993  				fromProxy: test.NewFakeProxy().WithObjs(
  1994  					&clusterv1.Cluster{
  1995  						ObjectMeta: metav1.ObjectMeta{
  1996  							Name:      "foo",
  1997  							Namespace: "ns1",
  1998  						},
  1999  					},
  2000  				),
  2001  				toProxy: test.NewFakeProxy(),
  2002  				node: &node{
  2003  					identity: corev1.ObjectReference{
  2004  						Kind:       "Cluster",
  2005  						Namespace:  "ns1",
  2006  						Name:       "foo",
  2007  						APIVersion: "cluster.x-k8s.io/v1beta1",
  2008  					},
  2009  					owners: map[*node]ownerReferenceAttributes{
  2010  						{
  2011  							identity: corev1.ObjectReference{
  2012  								Kind:       "Something",
  2013  								Namespace:  "ns1",
  2014  								Name:       "bar",
  2015  								APIVersion: "cluster.x-k8s.io/v1beta1",
  2016  							},
  2017  						}: {
  2018  							Controller: ptr.To(true),
  2019  						},
  2020  					},
  2021  				},
  2022  			},
  2023  			want: func(g *WithT, toClient client.Client) {
  2024  				ns := &corev1.Namespace{}
  2025  				nsKey := client.ObjectKey{
  2026  					Name: "ns1",
  2027  				}
  2028  				g.Expect(toClient.Get(context.Background(), nsKey, ns)).To(Succeed())
  2029  				c := &clusterv1.Cluster{}
  2030  				key := client.ObjectKey{
  2031  					Namespace: "ns1",
  2032  					Name:      "foo",
  2033  				}
  2034  				g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred())
  2035  				g.Expect(c.OwnerReferences).To(HaveLen(1))
  2036  				g.Expect(c.OwnerReferences[0].Controller).To(Equal(ptr.To(true)))
  2037  			},
  2038  		},
  2039  		{
  2040  			name: "updates the object if it already exists and the object is not Global/GlobalHierarchy",
  2041  			args: args{
  2042  				fromProxy: test.NewFakeProxy().WithObjs(
  2043  					&clusterv1.Cluster{
  2044  						ObjectMeta: metav1.ObjectMeta{
  2045  							Name:      "foo",
  2046  							Namespace: "ns1",
  2047  						},
  2048  					},
  2049  				),
  2050  				toProxy: test.NewFakeProxy().WithObjs(
  2051  					&clusterv1.Cluster{
  2052  						ObjectMeta: metav1.ObjectMeta{
  2053  							Name:        "foo",
  2054  							Namespace:   "ns1",
  2055  							Annotations: map[string]string{"foo": "bar"},
  2056  						},
  2057  					},
  2058  				),
  2059  				node: &node{
  2060  					identity: corev1.ObjectReference{
  2061  						Kind:       "Cluster",
  2062  						Namespace:  "ns1",
  2063  						Name:       "foo",
  2064  						APIVersion: "cluster.x-k8s.io/v1beta1",
  2065  					},
  2066  				},
  2067  			},
  2068  			want: func(g *WithT, toClient client.Client) {
  2069  				c := &clusterv1.Cluster{}
  2070  				key := client.ObjectKey{
  2071  					Namespace: "ns1",
  2072  					Name:      "foo",
  2073  				}
  2074  				g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred())
  2075  				g.Expect(c.Annotations).To(BeEmpty())
  2076  			},
  2077  		},
  2078  		{
  2079  			name: "should not update Global objects",
  2080  			args: args{
  2081  				fromProxy: test.NewFakeProxy().WithObjs(
  2082  					&infrastructure.GenericClusterInfrastructureIdentity{
  2083  						ObjectMeta: metav1.ObjectMeta{
  2084  							Name: "foo",
  2085  						},
  2086  					},
  2087  				),
  2088  				toProxy: test.NewFakeProxy().WithObjs(
  2089  					&infrastructure.GenericClusterInfrastructureIdentity{
  2090  						ObjectMeta: metav1.ObjectMeta{
  2091  							Name:        "foo",
  2092  							Annotations: map[string]string{"foo": "bar"},
  2093  						},
  2094  					},
  2095  				),
  2096  				node: &node{
  2097  					identity: corev1.ObjectReference{
  2098  						Kind:       "GenericClusterInfrastructureIdentity",
  2099  						Name:       "foo",
  2100  						APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2101  					},
  2102  					isGlobal: true,
  2103  				},
  2104  			},
  2105  			want: func(g *WithT, toClient client.Client) {
  2106  				c := &infrastructure.GenericClusterInfrastructureIdentity{}
  2107  				key := client.ObjectKey{
  2108  					Name: "foo",
  2109  				}
  2110  				g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred())
  2111  				g.Expect(c.Annotations).ToNot(BeEmpty())
  2112  			},
  2113  		},
  2114  		{
  2115  			name: "should not update Global Hierarchy objects",
  2116  			args: args{
  2117  				fromProxy: test.NewFakeProxy().WithObjs(
  2118  					&corev1.Secret{
  2119  						ObjectMeta: metav1.ObjectMeta{
  2120  							Name:      "foo",
  2121  							Namespace: "ns1",
  2122  						},
  2123  					},
  2124  				),
  2125  				toProxy: test.NewFakeProxy().WithObjs(
  2126  					&corev1.Secret{
  2127  						ObjectMeta: metav1.ObjectMeta{
  2128  							Name:        "foo",
  2129  							Namespace:   "ns1",
  2130  							Annotations: map[string]string{"foo": "bar"},
  2131  						},
  2132  					},
  2133  				),
  2134  				node: &node{
  2135  					identity: corev1.ObjectReference{
  2136  						Kind:       "Secret",
  2137  						Namespace:  "ns1",
  2138  						Name:       "foo",
  2139  						APIVersion: "v1",
  2140  					},
  2141  					isGlobalHierarchy: true,
  2142  				},
  2143  			},
  2144  			want: func(g *WithT, toClient client.Client) {
  2145  				c := &corev1.Secret{}
  2146  				key := client.ObjectKey{
  2147  					Namespace: "ns1",
  2148  					Name:      "foo",
  2149  				}
  2150  				g.Expect(toClient.Get(context.Background(), key, c)).ToNot(HaveOccurred())
  2151  				g.Expect(c.Annotations).ToNot(BeEmpty())
  2152  			},
  2153  		},
  2154  	}
  2155  
  2156  	for _, tt := range tests {
  2157  		t.Run(tt.name, func(t *testing.T) {
  2158  			g := NewWithT(t)
  2159  
  2160  			ctx := context.Background()
  2161  
  2162  			mover := objectMover{
  2163  				fromProxy: tt.args.fromProxy,
  2164  			}
  2165  
  2166  			err := mover.createTargetObject(ctx, tt.args.node, tt.args.toProxy, nil, sets.New[string]())
  2167  			if tt.wantErr {
  2168  				g.Expect(err).To(HaveOccurred())
  2169  				return
  2170  			}
  2171  			g.Expect(err).ToNot(HaveOccurred())
  2172  
  2173  			toClient, err := tt.args.toProxy.NewClient(ctx)
  2174  			g.Expect(err).ToNot(HaveOccurred())
  2175  
  2176  			tt.want(g, toClient)
  2177  		})
  2178  	}
  2179  }
  2180  
  2181  func Test_deleteSourceObject(t *testing.T) {
  2182  	type args struct {
  2183  		fromProxy Proxy
  2184  		node      *node
  2185  	}
  2186  
  2187  	tests := []struct {
  2188  		name string
  2189  		args args
  2190  		want func(*WithT, client.Client)
  2191  	}{
  2192  		{
  2193  			name: "no op if the object is already deleted from source",
  2194  			args: args{
  2195  				fromProxy: test.NewFakeProxy(),
  2196  				node: &node{
  2197  					identity: corev1.ObjectReference{
  2198  						Kind:       "Cluster",
  2199  						Namespace:  "ns1",
  2200  						Name:       "foo",
  2201  						APIVersion: "cluster.x-k8s.io/v1beta1",
  2202  					},
  2203  				},
  2204  			},
  2205  			want: func(g *WithT, toClient client.Client) {
  2206  				c := &clusterv1.Cluster{}
  2207  				key := client.ObjectKey{
  2208  					Namespace: "ns1",
  2209  					Name:      "foo",
  2210  				}
  2211  				g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue())
  2212  			},
  2213  		},
  2214  		{
  2215  			name: "deletes from source if the object is not is not Global/GlobalHierarchy",
  2216  			args: args{
  2217  				fromProxy: test.NewFakeProxy().WithObjs(
  2218  					&clusterv1.Cluster{
  2219  						ObjectMeta: metav1.ObjectMeta{
  2220  							Name:      "foo",
  2221  							Namespace: "ns1",
  2222  						},
  2223  					},
  2224  				),
  2225  				node: &node{
  2226  					identity: corev1.ObjectReference{
  2227  						Kind:       "Cluster",
  2228  						Namespace:  "ns1",
  2229  						Name:       "foo",
  2230  						APIVersion: "cluster.x-k8s.io/v1beta1",
  2231  					},
  2232  				},
  2233  			},
  2234  			want: func(g *WithT, toClient client.Client) {
  2235  				c := &clusterv1.Cluster{}
  2236  				key := client.ObjectKey{
  2237  					Namespace: "ns1",
  2238  					Name:      "foo",
  2239  				}
  2240  				g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue())
  2241  			},
  2242  		},
  2243  		{
  2244  			name: "does not delete from source if the object is not is Global",
  2245  			args: args{
  2246  				fromProxy: test.NewFakeProxy().WithObjs(
  2247  					&infrastructure.GenericClusterInfrastructureIdentity{
  2248  						ObjectMeta: metav1.ObjectMeta{
  2249  							Name: "foo",
  2250  						},
  2251  					},
  2252  				),
  2253  				node: &node{
  2254  					identity: corev1.ObjectReference{
  2255  						Kind:       "GenericClusterInfrastructureIdentity",
  2256  						Name:       "foo",
  2257  						APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2258  					},
  2259  					isGlobal: true,
  2260  				},
  2261  			},
  2262  			want: func(g *WithT, toClient client.Client) {
  2263  				c := &clusterv1.Cluster{}
  2264  				key := client.ObjectKey{
  2265  					Namespace: "ns1",
  2266  					Name:      "foo",
  2267  				}
  2268  				g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue())
  2269  			},
  2270  		},
  2271  		{
  2272  			name: "does not delete from source if the object is not is Global Hierarchy",
  2273  			args: args{
  2274  				fromProxy: test.NewFakeProxy().WithObjs(
  2275  					&corev1.Secret{
  2276  						ObjectMeta: metav1.ObjectMeta{
  2277  							Name:      "foo",
  2278  							Namespace: "ns1",
  2279  						},
  2280  					},
  2281  				),
  2282  				node: &node{
  2283  					identity: corev1.ObjectReference{
  2284  						Kind:       "Secret",
  2285  						Namespace:  "ns1",
  2286  						Name:       "foo",
  2287  						APIVersion: "v1",
  2288  					},
  2289  					isGlobalHierarchy: true,
  2290  				},
  2291  			},
  2292  			want: func(g *WithT, toClient client.Client) {
  2293  				c := &clusterv1.Cluster{}
  2294  				key := client.ObjectKey{
  2295  					Namespace: "ns1",
  2296  					Name:      "foo",
  2297  				}
  2298  				g.Expect(apierrors.IsNotFound(toClient.Get(context.Background(), key, c))).To(BeTrue())
  2299  			},
  2300  		},
  2301  	}
  2302  
  2303  	for _, tt := range tests {
  2304  		t.Run(tt.name, func(t *testing.T) {
  2305  			g := NewWithT(t)
  2306  
  2307  			ctx := context.Background()
  2308  
  2309  			mover := objectMover{
  2310  				fromProxy: tt.args.fromProxy,
  2311  			}
  2312  
  2313  			err := mover.deleteSourceObject(ctx, tt.args.node)
  2314  			g.Expect(err).ToNot(HaveOccurred())
  2315  
  2316  			fromClient, err := tt.args.fromProxy.NewClient(ctx)
  2317  			g.Expect(err).ToNot(HaveOccurred())
  2318  
  2319  			tt.want(g, fromClient)
  2320  		})
  2321  	}
  2322  }
  2323  
  2324  func TestWaitReadyForMove(t *testing.T) {
  2325  	tests := []struct {
  2326  		name        string
  2327  		moveBlocked bool
  2328  		doUnblock   bool
  2329  		wantErr     bool
  2330  	}{
  2331  		{
  2332  			name:        "moving blocked cluster should fail",
  2333  			moveBlocked: true,
  2334  			wantErr:     true,
  2335  		},
  2336  		{
  2337  			name:        "moving unblocked cluster should succeed",
  2338  			moveBlocked: false,
  2339  			wantErr:     false,
  2340  		},
  2341  		{
  2342  			name:        "moving blocked cluster that is eventually unblocked should succeed",
  2343  			moveBlocked: true,
  2344  			doUnblock:   true,
  2345  			wantErr:     false,
  2346  		},
  2347  	}
  2348  
  2349  	for _, tt := range tests {
  2350  		t.Run(tt.name, func(t *testing.T) {
  2351  			g := NewWithT(t)
  2352  
  2353  			clusterName := "foo"
  2354  			clusterNamespace := "ns1"
  2355  			objs := test.NewFakeCluster(clusterNamespace, clusterName).Objs()
  2356  
  2357  			ctx := context.Background()
  2358  
  2359  			// Create an objectGraph bound a source cluster with all the CRDs for the types involved in the test.
  2360  			graph := getObjectGraphWithObjs(objs)
  2361  
  2362  			if tt.moveBlocked {
  2363  				c, err := graph.proxy.NewClient(ctx)
  2364  				g.Expect(err).NotTo(HaveOccurred())
  2365  
  2366  				cluster := &clusterv1.Cluster{}
  2367  				err = c.Get(ctx, types.NamespacedName{Namespace: clusterNamespace, Name: clusterName}, cluster)
  2368  				g.Expect(err).NotTo(HaveOccurred())
  2369  				anns := cluster.GetAnnotations()
  2370  				if anns == nil {
  2371  					anns = make(map[string]string)
  2372  				}
  2373  				anns[clusterctlv1.BlockMoveAnnotation] = "anything"
  2374  				cluster.SetAnnotations(anns)
  2375  
  2376  				g.Expect(c.Update(ctx, cluster)).To(Succeed())
  2377  
  2378  				if tt.doUnblock {
  2379  					go func() {
  2380  						time.Sleep(50 * time.Millisecond)
  2381  						delete(cluster.Annotations, clusterctlv1.BlockMoveAnnotation)
  2382  						g.Expect(c.Update(ctx, cluster)).To(Succeed())
  2383  					}()
  2384  				}
  2385  			}
  2386  
  2387  			// Get all the types to be considered for discovery
  2388  			g.Expect(graph.getDiscoveryTypes(ctx)).To(Succeed())
  2389  
  2390  			// trigger discovery the content of the source cluster
  2391  			g.Expect(graph.Discovery(ctx, "")).To(Succeed())
  2392  
  2393  			backoff := wait.Backoff{
  2394  				Steps: 1,
  2395  			}
  2396  			if tt.doUnblock {
  2397  				backoff = wait.Backoff{
  2398  					Duration: 20 * time.Millisecond,
  2399  					Steps:    10,
  2400  				}
  2401  			}
  2402  			err := waitReadyForMove(ctx, graph.proxy, graph.getMoveNodes(), false, backoff)
  2403  			if tt.wantErr {
  2404  				g.Expect(err).To(HaveOccurred())
  2405  			} else {
  2406  				g.Expect(err).NotTo(HaveOccurred())
  2407  			}
  2408  		})
  2409  	}
  2410  }