sigs.k8s.io/cluster-api-provider-azure@v1.14.3/controllers/helpers.go (about) 1 /* 2 Copyright 2020 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package controllers 18 19 import ( 20 "context" 21 "crypto/sha256" 22 "encoding/hex" 23 "encoding/json" 24 "fmt" 25 26 "github.com/go-logr/logr" 27 "github.com/pkg/errors" 28 corev1 "k8s.io/api/core/v1" 29 "k8s.io/apimachinery/pkg/api/equality" 30 apierrors "k8s.io/apimachinery/pkg/api/errors" 31 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 32 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 33 "k8s.io/apimachinery/pkg/runtime" 34 "k8s.io/apimachinery/pkg/runtime/schema" 35 "k8s.io/apimachinery/pkg/types" 36 "k8s.io/klog/v2" 37 infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" 38 "sigs.k8s.io/cluster-api-provider-azure/azure" 39 "sigs.k8s.io/cluster-api-provider-azure/azure/scope" 40 "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" 41 infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" 42 "sigs.k8s.io/cluster-api-provider-azure/feature" 43 "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" 44 "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" 45 "sigs.k8s.io/cluster-api-provider-azure/util/tele" 46 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 47 clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" 48 expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" 49 capifeature "sigs.k8s.io/cluster-api/feature" 50 "sigs.k8s.io/cluster-api/util" 51 "sigs.k8s.io/cluster-api/util/conditions" 52 "sigs.k8s.io/cluster-api/util/patch" 53 "sigs.k8s.io/cluster-api/util/predicates" 54 ctrl "sigs.k8s.io/controller-runtime" 55 "sigs.k8s.io/controller-runtime/pkg/client" 56 "sigs.k8s.io/controller-runtime/pkg/client/apiutil" 57 "sigs.k8s.io/controller-runtime/pkg/controller" 58 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 59 "sigs.k8s.io/controller-runtime/pkg/event" 60 "sigs.k8s.io/controller-runtime/pkg/handler" 61 "sigs.k8s.io/controller-runtime/pkg/predicate" 62 "sigs.k8s.io/controller-runtime/pkg/reconcile" 63 ) 64 65 const ( 66 spIdentityWarning = "You are using Service Principal authentication for Cloud Provider Azure which is less secure than Managed Identity. " + 67 "Your Service Principal credentials will be written to a file on the disk of each VM in order to be accessible by Cloud Provider. " + 68 "To learn more, see https://capz.sigs.k8s.io/topics/identities-use-cases.html#azure-host-identity " 69 deprecatedManagerCredsWarning = "You're using deprecated functionality: " + 70 "Using Azure credentials from the manager environment is deprecated and will be removed in future releases. " + 71 "Please specify an AzureClusterIdentity for the AzureCluster instead, see: https://capz.sigs.k8s.io/topics/multitenancy.html " 72 ) 73 74 type ( 75 // Options are controller options extended. 76 Options struct { 77 controller.Options 78 Cache *coalescing.ReconcileCache 79 } 80 81 // ClusterScoper is a interface used by AzureMachinePools that can be owned by either an AzureManagedCluster or AzureCluster. 82 ClusterScoper interface { 83 azure.ClusterScoper 84 groups.GroupScope 85 } 86 ) 87 88 // AzureClusterToAzureMachinesMapper creates a mapping handler to transform AzureClusters into AzureMachines. The transform 89 // requires AzureCluster to map to the owning Cluster, then from the Cluster, collect the Machines belonging to the cluster, 90 // then finally projecting the infrastructure reference to the AzureMachine. 91 func AzureClusterToAzureMachinesMapper(ctx context.Context, c client.Client, obj runtime.Object, scheme *runtime.Scheme, log logr.Logger) (handler.MapFunc, error) { 92 gvk, err := apiutil.GVKForObject(obj, scheme) 93 if err != nil { 94 return nil, errors.Wrap(err, "failed to find GVK for AzureMachine") 95 } 96 97 return func(ctx context.Context, o client.Object) []ctrl.Request { 98 ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) 99 defer cancel() 100 101 azCluster, ok := o.(*infrav1.AzureCluster) 102 if !ok { 103 log.Error(errors.Errorf("expected an AzureCluster, got %T instead", o), "failed to map AzureCluster") 104 return nil 105 } 106 107 log := log.WithValues("AzureCluster", azCluster.Name, "Namespace", azCluster.Namespace) 108 109 // Don't handle deleted AzureClusters 110 if !azCluster.ObjectMeta.DeletionTimestamp.IsZero() { 111 log.V(4).Info("AzureCluster has a deletion timestamp, skipping mapping.") 112 return nil 113 } 114 115 clusterName, ok := GetOwnerClusterName(azCluster.ObjectMeta) 116 if !ok { 117 log.Info("unable to get the owner cluster") 118 return nil 119 } 120 121 machineList := &clusterv1.MachineList{} 122 machineList.SetGroupVersionKind(gvk) 123 // list all of the requested objects within the cluster namespace with the cluster name label 124 if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { 125 return nil 126 } 127 128 mapFunc := util.MachineToInfrastructureMapFunc(gvk) 129 var results []ctrl.Request 130 for _, machine := range machineList.Items { 131 m := machine 132 azureMachines := mapFunc(ctx, &m) 133 results = append(results, azureMachines...) 134 } 135 136 return results 137 }, nil 138 } 139 140 // GetOwnerClusterName returns the name of the owning Cluster by finding a clusterv1.Cluster in the ownership references. 141 func GetOwnerClusterName(obj metav1.ObjectMeta) (string, bool) { 142 for _, ref := range obj.OwnerReferences { 143 if ref.Kind != "Cluster" { 144 continue 145 } 146 gv, err := schema.ParseGroupVersion(ref.APIVersion) 147 if err != nil { 148 return "", false 149 } 150 if gv.Group == clusterv1.GroupVersion.Group { 151 return ref.Name, true 152 } 153 } 154 return "", false 155 } 156 157 // GetObjectsToRequestsByNamespaceAndClusterName returns the slice of ctrl.Requests consisting the list items contained in the unstructured list. 158 func GetObjectsToRequestsByNamespaceAndClusterName(ctx context.Context, c client.Client, clusterKey client.ObjectKey, list *unstructured.UnstructuredList) []ctrl.Request { 159 // list all of the requested objects within the cluster namespace with the cluster name label 160 if err := c.List(ctx, list, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}); err != nil { 161 return nil 162 } 163 164 results := make([]ctrl.Request, len(list.Items)) 165 for i, obj := range list.Items { 166 results[i] = ctrl.Request{ 167 NamespacedName: client.ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}, 168 } 169 } 170 return results 171 } 172 173 // referSameObject returns true if a and b point to the same object. 174 func referSameObject(a, b metav1.OwnerReference) bool { 175 aGV, err := schema.ParseGroupVersion(a.APIVersion) 176 if err != nil { 177 return false 178 } 179 180 bGV, err := schema.ParseGroupVersion(b.APIVersion) 181 if err != nil { 182 return false 183 } 184 185 return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name 186 } 187 188 // GetCloudProviderSecret returns the required azure json secret for the provided parameters. 189 func GetCloudProviderSecret(d azure.ClusterScoper, namespace, name string, owner metav1.OwnerReference, identityType infrav1.VMIdentity, userIdentityID string) (*corev1.Secret, error) { 190 secret := &corev1.Secret{ 191 ObjectMeta: metav1.ObjectMeta{ 192 Namespace: namespace, 193 Name: fmt.Sprintf("%s-azure-json", name), 194 Labels: map[string]string{ 195 d.ClusterName(): string(infrav1.ResourceLifecycleOwned), 196 }, 197 OwnerReferences: []metav1.OwnerReference{owner}, 198 }, 199 } 200 201 var controlPlaneConfig, workerNodeConfig *CloudProviderConfig 202 203 switch identityType { 204 case infrav1.VMIdentitySystemAssigned: 205 controlPlaneConfig, workerNodeConfig = systemAssignedIdentityCloudProviderConfig(d) 206 case infrav1.VMIdentityUserAssigned: 207 if len(userIdentityID) < 1 { 208 return nil, errors.New("expected a non-empty userIdentityID") 209 } 210 controlPlaneConfig, workerNodeConfig = userAssignedIdentityCloudProviderConfig(d, userIdentityID) 211 case infrav1.VMIdentityNone: 212 controlPlaneConfig, workerNodeConfig = newCloudProviderConfig(d) 213 } 214 215 // Enable VMSS Flexible nodes if MachinePools are enabled 216 if feature.Gates.Enabled(capifeature.MachinePool) { 217 if controlPlaneConfig != nil && controlPlaneConfig.VMType == "vmss" { 218 controlPlaneConfig.EnableVmssFlexNodes = true 219 } 220 if workerNodeConfig != nil && workerNodeConfig.VMType == "vmss" { 221 workerNodeConfig.EnableVmssFlexNodes = true 222 } 223 } 224 225 controlPlaneData, err := json.MarshalIndent(controlPlaneConfig, "", " ") 226 if err != nil { 227 return nil, errors.Wrap(err, "failed control plane json marshal") 228 } 229 workerNodeData, err := json.MarshalIndent(workerNodeConfig, "", " ") 230 if err != nil { 231 return nil, errors.Wrap(err, "failed worker node json marshal") 232 } 233 234 secret.Data = map[string][]byte{ 235 "control-plane-azure.json": controlPlaneData, 236 "worker-node-azure.json": workerNodeData, 237 // added for backwards compatibility 238 "azure.json": controlPlaneData, 239 } 240 241 return secret, nil 242 } 243 244 func systemAssignedIdentityCloudProviderConfig(d azure.ClusterScoper) (cpConfig *CloudProviderConfig, wkConfig *CloudProviderConfig) { 245 controlPlaneConfig, workerConfig := newCloudProviderConfig(d) 246 controlPlaneConfig.AadClientID = "" 247 controlPlaneConfig.AadClientSecret = "" 248 controlPlaneConfig.UseManagedIdentityExtension = true 249 workerConfig.AadClientID = "" 250 workerConfig.AadClientSecret = "" 251 workerConfig.UseManagedIdentityExtension = true 252 return controlPlaneConfig, workerConfig 253 } 254 255 func userAssignedIdentityCloudProviderConfig(d azure.ClusterScoper, identityID string) (cpConfig *CloudProviderConfig, wkConfig *CloudProviderConfig) { 256 controlPlaneConfig, workerConfig := newCloudProviderConfig(d) 257 controlPlaneConfig.AadClientID = "" 258 controlPlaneConfig.AadClientSecret = "" 259 controlPlaneConfig.UseManagedIdentityExtension = true 260 controlPlaneConfig.UserAssignedIdentityID = identityID 261 workerConfig.AadClientID = "" 262 workerConfig.AadClientSecret = "" 263 workerConfig.UseManagedIdentityExtension = true 264 workerConfig.UserAssignedIdentityID = identityID 265 return controlPlaneConfig, workerConfig 266 } 267 268 func newCloudProviderConfig(d azure.ClusterScoper) (controlPlaneConfig *CloudProviderConfig, workerConfig *CloudProviderConfig) { 269 subnet := getOneNodeSubnet(d) 270 return (&CloudProviderConfig{ 271 Cloud: d.CloudEnvironment(), 272 AadClientID: d.ClientID(), 273 AadClientSecret: d.ClientSecret(), 274 TenantID: d.TenantID(), 275 SubscriptionID: d.SubscriptionID(), 276 ResourceGroup: d.ResourceGroup(), 277 SecurityGroupName: subnet.SecurityGroup.Name, 278 SecurityGroupResourceGroup: d.Vnet().ResourceGroup, 279 Location: d.Location(), 280 ExtendedLocationType: d.ExtendedLocationType(), 281 ExtendedLocationName: d.ExtendedLocationName(), 282 VMType: "vmss", 283 VnetName: d.Vnet().Name, 284 VnetResourceGroup: d.Vnet().ResourceGroup, 285 SubnetName: subnet.Name, 286 RouteTableName: subnet.RouteTable.Name, 287 LoadBalancerSku: "Standard", 288 LoadBalancerName: d.OutboundLBName(infrav1.Node), 289 MaximumLoadBalancerRuleCount: 250, 290 UseManagedIdentityExtension: false, 291 UseInstanceMetadata: true, 292 }).overrideFromSpec(d), 293 (&CloudProviderConfig{ 294 Cloud: d.CloudEnvironment(), 295 AadClientID: d.ClientID(), 296 AadClientSecret: d.ClientSecret(), 297 TenantID: d.TenantID(), 298 SubscriptionID: d.SubscriptionID(), 299 ResourceGroup: d.ResourceGroup(), 300 SecurityGroupName: subnet.SecurityGroup.Name, 301 SecurityGroupResourceGroup: d.Vnet().ResourceGroup, 302 Location: d.Location(), 303 ExtendedLocationType: d.ExtendedLocationType(), 304 ExtendedLocationName: d.ExtendedLocationName(), 305 VMType: "vmss", 306 VnetName: d.Vnet().Name, 307 VnetResourceGroup: d.Vnet().ResourceGroup, 308 SubnetName: subnet.Name, 309 RouteTableName: subnet.RouteTable.Name, 310 LoadBalancerSku: "Standard", 311 LoadBalancerName: d.OutboundLBName(infrav1.Node), 312 MaximumLoadBalancerRuleCount: 250, 313 UseManagedIdentityExtension: false, 314 UseInstanceMetadata: true, 315 }).overrideFromSpec(d) 316 } 317 318 // getOneNodeSubnet returns one of the subnets for the node role. 319 func getOneNodeSubnet(d azure.ClusterScoper) infrav1.SubnetSpec { 320 for _, subnet := range d.Subnets() { 321 if subnet.Role == infrav1.SubnetNode || subnet.Role == infrav1.SubnetCluster { 322 return subnet 323 } 324 } 325 return infrav1.SubnetSpec{} 326 } 327 328 // CloudProviderConfig is an abbreviated version of the same struct in k/k. 329 type CloudProviderConfig struct { 330 Cloud string `json:"cloud"` 331 TenantID string `json:"tenantId"` 332 SubscriptionID string `json:"subscriptionId"` 333 AadClientID string `json:"aadClientId,omitempty"` 334 AadClientSecret string `json:"aadClientSecret,omitempty"` 335 ResourceGroup string `json:"resourceGroup"` 336 SecurityGroupName string `json:"securityGroupName"` 337 SecurityGroupResourceGroup string `json:"securityGroupResourceGroup"` 338 Location string `json:"location"` 339 ExtendedLocationType string `json:"extendedLocationType,omitempty"` 340 ExtendedLocationName string `json:"extendedLocationName,omitempty"` 341 VMType string `json:"vmType"` 342 VnetName string `json:"vnetName"` 343 VnetResourceGroup string `json:"vnetResourceGroup"` 344 SubnetName string `json:"subnetName"` 345 RouteTableName string `json:"routeTableName"` 346 LoadBalancerSku string `json:"loadBalancerSku"` 347 LoadBalancerName string `json:"loadBalancerName"` 348 MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount"` 349 UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` 350 UseInstanceMetadata bool `json:"useInstanceMetadata"` 351 EnableVmssFlexNodes bool `json:"enableVmssFlexNodes,omitempty"` 352 UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty"` 353 CloudProviderRateLimitConfig 354 BackOffConfig 355 } 356 357 // overrideFromSpec overrides cloud provider config with the values provided in cluster spec. 358 func (cpc *CloudProviderConfig) overrideFromSpec(d azure.ClusterScoper) *CloudProviderConfig { 359 if d.CloudProviderConfigOverrides() == nil { 360 return cpc 361 } 362 363 for _, rateLimit := range d.CloudProviderConfigOverrides().RateLimits { 364 switch rateLimit.Name { 365 case infrav1.DefaultRateLimit: 366 cpc.RateLimitConfig = *toCloudProviderRateLimitConfig(rateLimit.Config) 367 case infrav1.RouteRateLimit: 368 cpc.RouteRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 369 case infrav1.SubnetsRateLimit: 370 cpc.SubnetsRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 371 case infrav1.InterfaceRateLimit: 372 cpc.InterfaceRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 373 case infrav1.RouteTableRateLimit: 374 cpc.RouteTableRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 375 case infrav1.LoadBalancerRateLimit: 376 cpc.LoadBalancerRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 377 case infrav1.PublicIPAddressRateLimit: 378 cpc.PublicIPAddressRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 379 case infrav1.SecurityGroupRateLimit: 380 cpc.SecurityGroupRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 381 case infrav1.VirtualMachineRateLimit: 382 cpc.VirtualMachineRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 383 case infrav1.StorageAccountRateLimit: 384 cpc.StorageAccountRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 385 case infrav1.DiskRateLimit: 386 cpc.DiskRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 387 case infrav1.SnapshotRateLimit: 388 cpc.SnapshotRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 389 case infrav1.VirtualMachineScaleSetRateLimit: 390 cpc.VirtualMachineScaleSetRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 391 case infrav1.VirtualMachineSizesRateLimit: 392 cpc.VirtualMachineSizeRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 393 case infrav1.AvailabilitySetRateLimit: 394 cpc.AvailabilitySetRateLimit = toCloudProviderRateLimitConfig(rateLimit.Config) 395 } 396 } 397 398 cpc.BackOffConfig = toCloudProviderBackOffConfig(d.CloudProviderConfigOverrides().BackOffs) 399 return cpc 400 } 401 402 // toCloudProviderRateLimitConfig returns converts infrav1.RateLimitConfig to RateLimitConfig that is required with the cloud provider. 403 func toCloudProviderRateLimitConfig(source infrav1.RateLimitConfig) *RateLimitConfig { 404 rateLimitConfig := RateLimitConfig{} 405 rateLimitConfig.CloudProviderRateLimit = source.CloudProviderRateLimit 406 if source.CloudProviderRateLimitQPS != nil { 407 rateLimitConfig.CloudProviderRateLimitQPS = float32(source.CloudProviderRateLimitQPS.AsApproximateFloat64()) 408 } 409 rateLimitConfig.CloudProviderRateLimitBucket = source.CloudProviderRateLimitBucket 410 if source.CloudProviderRateLimitQPSWrite != nil { 411 rateLimitConfig.CloudProviderRateLimitQPSWrite = float32(source.CloudProviderRateLimitQPSWrite.AsApproximateFloat64()) 412 } 413 rateLimitConfig.CloudProviderRateLimitBucketWrite = source.CloudProviderRateLimitBucketWrite 414 return &rateLimitConfig 415 } 416 417 // CloudProviderRateLimitConfig represents the rate limiting configurations in azure cloud provider config. 418 // See: https://cloud-provider-azure.sigs.k8s.io/install/configs/#per-client-rate-limiting. 419 // This is a copy of the struct used in cloud-provider-azure: https://github.com/kubernetes-sigs/cloud-provider-azure/blob/d585c2031925b39c925624302f22f8856e29e352/pkg/provider/azure_ratelimit.go#L25 420 type CloudProviderRateLimitConfig struct { 421 RateLimitConfig 422 423 RouteRateLimit *RateLimitConfig `json:"routeRateLimit,omitempty"` 424 SubnetsRateLimit *RateLimitConfig `json:"subnetsRateLimit,omitempty"` 425 InterfaceRateLimit *RateLimitConfig `json:"interfaceRateLimit,omitempty"` 426 RouteTableRateLimit *RateLimitConfig `json:"routeTableRateLimit,omitempty"` 427 LoadBalancerRateLimit *RateLimitConfig `json:"loadBalancerRateLimit,omitempty"` 428 PublicIPAddressRateLimit *RateLimitConfig `json:"publicIPAddressRateLimit,omitempty"` 429 SecurityGroupRateLimit *RateLimitConfig `json:"securityGroupRateLimit,omitempty"` 430 VirtualMachineRateLimit *RateLimitConfig `json:"virtualMachineRateLimit,omitempty"` 431 StorageAccountRateLimit *RateLimitConfig `json:"storageAccountRateLimit,omitempty"` 432 DiskRateLimit *RateLimitConfig `json:"diskRateLimit,omitempty"` 433 SnapshotRateLimit *RateLimitConfig `json:"snapshotRateLimit,omitempty"` 434 VirtualMachineScaleSetRateLimit *RateLimitConfig `json:"virtualMachineScaleSetRateLimit,omitempty"` 435 VirtualMachineSizeRateLimit *RateLimitConfig `json:"virtualMachineSizesRateLimit,omitempty"` 436 AvailabilitySetRateLimit *RateLimitConfig `json:"availabilitySetRateLimit,omitempty"` 437 } 438 439 // RateLimitConfig indicates the rate limit config options. 440 // This is a copy of the struct used in cloud-provider-azure: https://github.com/kubernetes-sigs/cloud-provider-azure/blob/d585c2031925b39c925624302f22f8856e29e352/pkg/azureclients/azure_client_config.go#L48 441 type RateLimitConfig struct { 442 CloudProviderRateLimit bool `json:"cloudProviderRateLimit,omitempty"` 443 CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS,omitempty"` 444 CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket,omitempty"` 445 CloudProviderRateLimitQPSWrite float32 `json:"cloudProviderRateLimitQPSWrite,omitempty"` 446 CloudProviderRateLimitBucketWrite int `json:"cloudProviderRateLimitBucketWrite,omitempty"` 447 } 448 449 // BackOffConfig indicates the back-off config options. 450 // This is a copy of the struct used in cloud-provider-azure: https://github.com/kubernetes-sigs/cloud-provider-azure/blob/d585c2031925b39c925624302f22f8856e29e352/pkg/azureclients/azure_client_config.go#L48 451 type BackOffConfig struct { 452 CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty"` 453 CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries,omitempty"` 454 CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent,omitempty"` 455 CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration,omitempty"` 456 CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter,omitempty"` 457 } 458 459 // toCloudProviderBackOffConfig returns converts infrav1.BackOffConfig to BackOffConfig that is required with the cloud provider. 460 func toCloudProviderBackOffConfig(source infrav1.BackOffConfig) BackOffConfig { 461 backOffConfig := BackOffConfig{} 462 backOffConfig.CloudProviderBackoff = source.CloudProviderBackoff 463 if source.CloudProviderBackoffExponent != nil { 464 backOffConfig.CloudProviderBackoffExponent = source.CloudProviderBackoffExponent.AsApproximateFloat64() 465 } 466 backOffConfig.CloudProviderBackoffRetries = source.CloudProviderBackoffRetries 467 if source.CloudProviderBackoffJitter != nil { 468 backOffConfig.CloudProviderBackoffJitter = source.CloudProviderBackoffJitter.AsApproximateFloat64() 469 } 470 backOffConfig.CloudProviderBackoffDuration = source.CloudProviderBackoffDuration 471 return backOffConfig 472 } 473 474 func reconcileAzureSecret(ctx context.Context, kubeclient client.Client, owner metav1.OwnerReference, newSecret *corev1.Secret, clusterName string) error { 475 ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.reconcileAzureSecret") 476 defer done() 477 478 // Fetch previous secret, if it exists 479 key := types.NamespacedName{ 480 Namespace: newSecret.Namespace, 481 Name: newSecret.Name, 482 } 483 old := &corev1.Secret{} 484 err := kubeclient.Get(ctx, key, old) 485 if err != nil && !apierrors.IsNotFound(err) { 486 return errors.Wrap(err, "failed to fetch existing secret") 487 } 488 489 // Create if it wasn't found 490 if apierrors.IsNotFound(err) { 491 if err := kubeclient.Create(ctx, newSecret); err != nil && !apierrors.IsAlreadyExists(err) { 492 return errors.Wrap(err, "failed to create secret") 493 } 494 return nil 495 } 496 497 tag, exists := old.Labels[clusterName] 498 499 if !exists || tag != string(infrav1.ResourceLifecycleOwned) { 500 log.V(2).Info("returning early from secret reconcile, user provided secret already exists") 501 return nil 502 } 503 504 // Otherwise, check ownership and data freshness. Update as necessary 505 hasOwner := false 506 for _, ownerRef := range old.OwnerReferences { 507 if referSameObject(ownerRef, owner) { 508 hasOwner = true 509 break 510 } 511 } 512 513 hasData := equality.Semantic.DeepEqual(old.Data, newSecret.Data) 514 if hasData && hasOwner { 515 // no update required 516 log.V(2).Info("returning early from secret reconcile, no update needed") 517 return nil 518 } 519 520 if !hasOwner { 521 old.OwnerReferences = append(old.OwnerReferences, owner) 522 } 523 524 if !hasData { 525 old.Data = newSecret.Data 526 } 527 528 log.V(2).Info("updating azure secret") 529 if err := kubeclient.Update(ctx, old); err != nil { 530 return errors.Wrap(err, "failed to update secret when diff was required") 531 } 532 533 log.V(2).Info("done updating secret") 534 535 return nil 536 } 537 538 // GetOwnerMachinePool returns the MachinePool object owning the current resource. 539 func GetOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expv1.MachinePool, error) { 540 ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.GetOwnerMachinePool") 541 defer done() 542 543 for _, ref := range obj.OwnerReferences { 544 if ref.Kind != "MachinePool" { 545 continue 546 } 547 gv, err := schema.ParseGroupVersion(ref.APIVersion) 548 if err != nil { 549 return nil, errors.WithStack(err) 550 } 551 552 if gv.Group == expv1.GroupVersion.Group { 553 return GetMachinePoolByName(ctx, c, obj.Namespace, ref.Name) 554 } 555 } 556 return nil, nil 557 } 558 559 // GetOwnerAzureMachinePool returns the AzureMachinePool object owning the current resource. 560 func GetOwnerAzureMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*infrav1exp.AzureMachinePool, error) { 561 ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.GetOwnerAzureMachinePool") 562 defer done() 563 564 for _, ref := range obj.OwnerReferences { 565 if ref.Kind != infrav1.AzureMachinePoolKind { 566 continue 567 } 568 569 gv, err := schema.ParseGroupVersion(ref.APIVersion) 570 if err != nil { 571 return nil, errors.WithStack(err) 572 } 573 574 if gv.Group == infrav1exp.GroupVersion.Group { 575 return GetAzureMachinePoolByName(ctx, c, obj.Namespace, ref.Name) 576 } 577 } 578 return nil, nil 579 } 580 581 // GetMachinePoolByName finds and return a MachinePool object using the specified params. 582 func GetMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expv1.MachinePool, error) { 583 ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.GetMachinePoolByName") 584 defer done() 585 586 m := &expv1.MachinePool{} 587 key := client.ObjectKey{Name: name, Namespace: namespace} 588 if err := c.Get(ctx, key, m); err != nil { 589 return nil, err 590 } 591 return m, nil 592 } 593 594 // GetAzureMachinePoolByName finds and return an AzureMachinePool object using the specified params. 595 func GetAzureMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*infrav1exp.AzureMachinePool, error) { 596 ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.GetAzureMachinePoolByName") 597 defer done() 598 599 m := &infrav1exp.AzureMachinePool{} 600 key := client.ObjectKey{Name: name, Namespace: namespace} 601 if err := c.Get(ctx, key, m); err != nil { 602 return nil, err 603 } 604 return m, nil 605 } 606 607 // ShouldDeleteIndividualResources returns false if the resource group is managed and the whole cluster is being deleted 608 // meaning that we can rely on a single resource group delete operation as opposed to deleting every individual VM resource. 609 func ShouldDeleteIndividualResources(ctx context.Context, cluster ClusterScoper) bool { 610 ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.ShouldDeleteIndividualResources") 611 defer done() 612 613 if cluster.GetDeletionTimestamp().IsZero() { 614 return true 615 } 616 617 managed, err := groups.New(cluster).IsManaged(ctx) 618 // Since this is a best effort attempt to speed up delete, we don't fail the delete if we can't get the RG status. 619 // Instead, take the long way and delete all resources one by one. 620 return err != nil || !managed 621 } 622 623 // GetClusterIdentityFromRef returns the AzureClusterIdentity referenced by the AzureCluster. 624 func GetClusterIdentityFromRef(ctx context.Context, c client.Client, azureClusterNamespace string, ref *corev1.ObjectReference) (*infrav1.AzureClusterIdentity, error) { 625 identity := &infrav1.AzureClusterIdentity{} 626 if ref != nil { 627 namespace := ref.Namespace 628 if namespace == "" { 629 namespace = azureClusterNamespace 630 } 631 key := client.ObjectKey{Name: ref.Name, Namespace: namespace} 632 if err := c.Get(ctx, key, identity); err != nil { 633 return nil, err 634 } 635 return identity, nil 636 } 637 return nil, nil 638 } 639 640 // deprecatedClusterIdentityFinalizer is was briefly used to compute a finalizer without a hash in releases v1.5.1 and v1.4.4. 641 // It is kept here to ensure that we can remove it from existing clusters for backwards compatibility. 642 // This function should be removed in a future release. 643 func deprecatedClusterIdentityFinalizer(prefix, clusterNamespace, clusterName string) string { 644 return fmt.Sprintf("%s/%s-%s", prefix, clusterNamespace, clusterName) 645 } 646 647 // clusterIdentityFinalizer generates a finalizer key. 648 // The finalizer key is a combination of the prefix and a hash of the cluster name and namespace. 649 // We use a hash to ensure that the finalizer key name is not longer than 63 characters. 650 func clusterIdentityFinalizer(prefix, clusterNamespace, clusterName string) string { 651 hash := sha256.Sum224([]byte(fmt.Sprintf("%s-%s", clusterNamespace, clusterName))) 652 return fmt.Sprintf("%s/%s", prefix, hex.EncodeToString(hash[:])) 653 } 654 655 // EnsureClusterIdentity ensures that the identity ref is allowed in the namespace and sets a finalizer. 656 func EnsureClusterIdentity(ctx context.Context, c client.Client, object conditions.Setter, identityRef *corev1.ObjectReference, finalizerPrefix string) error { 657 name := object.GetName() 658 namespace := object.GetNamespace() 659 identity, err := GetClusterIdentityFromRef(ctx, c, namespace, identityRef) 660 if err != nil { 661 return err 662 } 663 664 if !scope.IsClusterNamespaceAllowed(ctx, c, identity.Spec.AllowedNamespaces, namespace) { 665 conditions.MarkFalse(object, infrav1.NetworkInfrastructureReadyCondition, infrav1.NamespaceNotAllowedByIdentity, clusterv1.ConditionSeverityError, "") 666 return errors.New("AzureClusterIdentity list of allowed namespaces doesn't include current cluster namespace") 667 } 668 669 // Remove deprecated finalizer if it exists, Register the finalizer immediately to avoid orphaning Azure resources on delete. 670 needsPatch := controllerutil.RemoveFinalizer(identity, deprecatedClusterIdentityFinalizer(finalizerPrefix, namespace, name)) 671 needsPatch = controllerutil.AddFinalizer(identity, clusterIdentityFinalizer(finalizerPrefix, namespace, name)) || needsPatch 672 if needsPatch { 673 // finalizers are added/removed then patch the object 674 identityHelper, err := patch.NewHelper(identity, c) 675 if err != nil { 676 return errors.Wrap(err, "failed to init patch helper") 677 } 678 return identityHelper.Patch(ctx, identity) 679 } 680 681 return nil 682 } 683 684 // RemoveClusterIdentityFinalizer removes the finalizer on an AzureClusterIdentity. 685 func RemoveClusterIdentityFinalizer(ctx context.Context, c client.Client, object client.Object, identityRef *corev1.ObjectReference, finalizerPrefix string) error { 686 name := object.GetName() 687 namespace := object.GetNamespace() 688 identity, err := GetClusterIdentityFromRef(ctx, c, namespace, identityRef) 689 if err != nil { 690 return err 691 } 692 identityHelper, err := patch.NewHelper(identity, c) 693 if err != nil { 694 return errors.Wrap(err, "failed to init patch helper") 695 } 696 controllerutil.RemoveFinalizer(identity, clusterIdentityFinalizer(finalizerPrefix, namespace, name)) 697 err = identityHelper.Patch(ctx, identity) 698 if err != nil { 699 return errors.Wrap(err, "failed to patch AzureClusterIdentity") 700 } 701 return nil 702 } 703 704 // MachinePoolToInfrastructureMapFunc returns a handler.MapFunc that watches for 705 // MachinePool events and returns reconciliation requests for an infrastructure provider object. 706 func MachinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind, log logr.Logger) handler.MapFunc { 707 return func(ctx context.Context, o client.Object) []reconcile.Request { 708 m, ok := o.(*expv1.MachinePool) 709 if !ok { 710 log.V(4).Info("attempt to map incorrect type", "type", fmt.Sprintf("%T", o)) 711 return nil 712 } 713 714 gk := gvk.GroupKind() 715 ref := m.Spec.Template.Spec.InfrastructureRef 716 // Return early if the GroupKind doesn't match what we expect. 717 infraGK := ref.GroupVersionKind().GroupKind() 718 if gk != infraGK { 719 log.V(4).Info("gk does not match", "gk", gk, "infraGK", infraGK) 720 return nil 721 } 722 723 return []reconcile.Request{ 724 { 725 NamespacedName: client.ObjectKey{ 726 Namespace: m.Namespace, 727 Name: ref.Name, 728 }, 729 }, 730 } 731 } 732 } 733 734 // AzureManagedClusterToAzureManagedMachinePoolsMapper creates a mapping handler to transform AzureManagedClusters into 735 // AzureManagedMachinePools. The transform requires AzureManagedCluster to map to the owning Cluster, then from the 736 // Cluster, collect the MachinePools belonging to the cluster, then finally projecting the infrastructure reference 737 // to the AzureManagedMachinePools. 738 func AzureManagedClusterToAzureManagedMachinePoolsMapper(ctx context.Context, c client.Client, scheme *runtime.Scheme, log logr.Logger) (handler.MapFunc, error) { 739 gvk, err := apiutil.GVKForObject(new(infrav1.AzureManagedMachinePool), scheme) 740 if err != nil { 741 return nil, errors.Wrap(err, "failed to find GVK for AzureManagedMachinePool") 742 } 743 744 return func(ctx context.Context, o client.Object) []ctrl.Request { 745 ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) 746 defer cancel() 747 748 azCluster, ok := o.(*infrav1.AzureManagedCluster) 749 if !ok { 750 log.Error(errors.Errorf("expected an AzureManagedCluster, got %T instead", o.GetObjectKind()), "failed to map AzureManagedCluster") 751 return nil 752 } 753 754 log := log.WithValues("AzureManagedCluster", azCluster.Name, "Namespace", azCluster.Namespace) 755 756 // Don't handle deleted AzureManagedClusters 757 if !azCluster.ObjectMeta.DeletionTimestamp.IsZero() { 758 log.V(4).Info("AzureManagedCluster has a deletion timestamp, skipping mapping.") 759 return nil 760 } 761 762 clusterName, ok := GetOwnerClusterName(azCluster.ObjectMeta) 763 if !ok { 764 log.V(4).Info("unable to get the owner cluster") 765 return nil 766 } 767 768 machineList := &expv1.MachinePoolList{} 769 machineList.SetGroupVersionKind(gvk) 770 // list all of the requested objects within the cluster namespace with the cluster name label 771 if err := c.List(ctx, machineList, client.InNamespace(azCluster.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { 772 return nil 773 } 774 775 mapFunc := MachinePoolToInfrastructureMapFunc(gvk, log) 776 var results []ctrl.Request 777 for _, machine := range machineList.Items { 778 m := machine 779 azureMachines := mapFunc(ctx, &m) 780 results = append(results, azureMachines...) 781 } 782 783 return results 784 }, nil 785 } 786 787 // AzureManagedControlPlaneToAzureManagedMachinePoolsMapper creates a mapping handler to transform AzureManagedControlPlanes into 788 // AzureManagedMachinePools. The transform requires AzureManagedControlPlane to map to the owning Cluster, then from the 789 // Cluster, collect the MachinePools belonging to the cluster, then finally projecting the infrastructure reference 790 // to the AzureManagedMachinePools. 791 func AzureManagedControlPlaneToAzureManagedMachinePoolsMapper(ctx context.Context, c client.Client, scheme *runtime.Scheme, log logr.Logger) (handler.MapFunc, error) { 792 gvk, err := apiutil.GVKForObject(new(infrav1.AzureManagedMachinePool), scheme) 793 if err != nil { 794 return nil, errors.Wrap(err, "failed to find GVK for AzureManagedMachinePool") 795 } 796 797 return func(ctx context.Context, o client.Object) []ctrl.Request { 798 ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) 799 defer cancel() 800 801 azControlPlane, ok := o.(*infrav1.AzureManagedControlPlane) 802 if !ok { 803 log.Error(errors.Errorf("expected an AzureManagedControlPlane, got %T instead", o.GetObjectKind()), "failed to map AzureManagedControlPlane") 804 return nil 805 } 806 807 log := log.WithValues("AzureManagedControlPlane", azControlPlane.Name, "Namespace", azControlPlane.Namespace) 808 809 // Don't handle deleted AzureManagedControlPlanes 810 if !azControlPlane.ObjectMeta.DeletionTimestamp.IsZero() { 811 log.V(4).Info("AzureManagedControlPlane has a deletion timestamp, skipping mapping.") 812 return nil 813 } 814 815 clusterName, ok := GetOwnerClusterName(azControlPlane.ObjectMeta) 816 if !ok { 817 log.Info("unable to get the owner cluster") 818 return nil 819 } 820 821 machineList := &expv1.MachinePoolList{} 822 machineList.SetGroupVersionKind(gvk) 823 // list all of the requested objects within the cluster namespace with the cluster name label 824 if err := c.List(ctx, machineList, client.InNamespace(azControlPlane.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterName}); err != nil { 825 return nil 826 } 827 828 mapFunc := MachinePoolToInfrastructureMapFunc(gvk, log) 829 var results []ctrl.Request 830 for _, machine := range machineList.Items { 831 m := machine 832 azureMachines := mapFunc(ctx, &m) 833 results = append(results, azureMachines...) 834 } 835 836 return results 837 }, nil 838 } 839 840 // AzureManagedClusterToAzureManagedControlPlaneMapper creates a mapping handler to transform AzureManagedClusters into 841 // AzureManagedControlPlane. The transform requires AzureManagedCluster to map to the owning Cluster, then from the 842 // Cluster, collect the control plane infrastructure reference. 843 func AzureManagedClusterToAzureManagedControlPlaneMapper(ctx context.Context, c client.Client, log logr.Logger) (handler.MapFunc, error) { 844 return func(ctx context.Context, o client.Object) []ctrl.Request { 845 ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) 846 defer cancel() 847 848 azCluster, ok := o.(*infrav1.AzureManagedCluster) 849 if !ok { 850 log.Error(errors.Errorf("expected an AzureManagedCluster, got %T instead", o), "failed to map AzureManagedCluster") 851 return nil 852 } 853 854 log := log.WithValues("AzureManagedCluster", azCluster.Name, "Namespace", azCluster.Namespace) 855 856 // Don't handle deleted AzureManagedClusters 857 if !azCluster.ObjectMeta.DeletionTimestamp.IsZero() { 858 log.V(4).Info("AzureManagedCluster has a deletion timestamp, skipping mapping.") 859 return nil 860 } 861 862 cluster, err := util.GetOwnerCluster(ctx, c, azCluster.ObjectMeta) 863 if err != nil { 864 log.Error(err, "failed to get the owning cluster") 865 return nil 866 } 867 868 if cluster == nil { 869 log.Error(err, "cluster has not set owner ref yet") 870 return nil 871 } 872 873 ref := cluster.Spec.ControlPlaneRef 874 if ref == nil || ref.Name == "" { 875 return nil 876 } 877 878 return []ctrl.Request{ 879 { 880 NamespacedName: types.NamespacedName{ 881 Namespace: ref.Namespace, 882 Name: ref.Name, 883 }, 884 }, 885 } 886 }, nil 887 } 888 889 // AzureManagedControlPlaneToAzureManagedClusterMapper creates a mapping handler to transform AzureManagedClusters into 890 // AzureManagedControlPlane. The transform requires AzureManagedCluster to map to the owning Cluster, then from the 891 // Cluster, collect the control plane infrastructure reference. 892 func AzureManagedControlPlaneToAzureManagedClusterMapper(ctx context.Context, c client.Client, log logr.Logger) (handler.MapFunc, error) { 893 return func(ctx context.Context, o client.Object) []ctrl.Request { 894 ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) 895 defer cancel() 896 897 azManagedControlPlane, ok := o.(*infrav1.AzureManagedControlPlane) 898 if !ok { 899 log.Error(errors.Errorf("expected an AzureManagedControlPlane, got %T instead", o), "failed to map AzureManagedControlPlane") 900 return nil 901 } 902 903 log := log.WithValues("AzureManagedControlPlane", azManagedControlPlane.Name, "Namespace", azManagedControlPlane.Namespace) 904 905 // Don't handle deleted AzureManagedControlPlanes 906 if !azManagedControlPlane.ObjectMeta.DeletionTimestamp.IsZero() { 907 log.V(4).Info("AzureManagedControlPlane has a deletion timestamp, skipping mapping.") 908 return nil 909 } 910 911 cluster, err := util.GetOwnerCluster(ctx, c, azManagedControlPlane.ObjectMeta) 912 if err != nil { 913 log.Error(err, "failed to get the owning cluster") 914 return nil 915 } 916 917 if cluster == nil { 918 log.Error(err, "cluster has not set owner ref yet") 919 return nil 920 } 921 922 ref := cluster.Spec.InfrastructureRef 923 if ref == nil || ref.Name == "" { 924 return nil 925 } 926 927 return []ctrl.Request{ 928 { 929 NamespacedName: types.NamespacedName{ 930 Namespace: ref.Namespace, 931 Name: ref.Name, 932 }, 933 }, 934 } 935 }, nil 936 } 937 938 // MachinePoolToAzureManagedControlPlaneMapFunc returns a handler.MapFunc that watches for 939 // MachinePool events and returns reconciliation requests for a control plane object. 940 func MachinePoolToAzureManagedControlPlaneMapFunc(ctx context.Context, c client.Client, gvk schema.GroupVersionKind, log logr.Logger) handler.MapFunc { 941 return func(ctx context.Context, o client.Object) []reconcile.Request { 942 ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultMappingTimeout) 943 defer cancel() 944 945 machinePool, ok := o.(*expv1.MachinePool) 946 if !ok { 947 log.Info("expected a MachinePool, got wrong type", "type", fmt.Sprintf("%T", o)) 948 return nil 949 } 950 951 cluster, err := util.GetClusterByName(ctx, c, machinePool.ObjectMeta.Namespace, machinePool.Spec.ClusterName) 952 if err != nil { 953 log.Error(err, "failed to get the owning cluster") 954 return nil 955 } 956 957 gk := gvk.GroupKind() 958 ref := cluster.Spec.ControlPlaneRef 959 if ref == nil || ref.Name == "" { 960 log.Info("control plane ref is nil or empty: control plane ref not found") 961 return nil 962 } 963 // Return early if the GroupKind doesn't match what we expect. 964 controlPlaneGK := ref.GroupVersionKind().GroupKind() 965 if gk != controlPlaneGK { 966 // MachinePool does not correlate to a AzureManagedControlPlane, nothing to do 967 return nil 968 } 969 970 controlPlaneKey := client.ObjectKey{ 971 Name: ref.Name, 972 Namespace: ref.Namespace, 973 } 974 controlPlane := &infrav1.AzureManagedControlPlane{} 975 if err := c.Get(ctx, controlPlaneKey, controlPlane); err != nil { 976 log.Error(err, "failed to fetch default pool reference") 977 // If we get here, we might want to reconcile but aren't sure. 978 // Do it anyway to be safe. Worst case we reconcile a few extra times with no-ops. 979 return []reconcile.Request{ 980 { 981 NamespacedName: client.ObjectKey{ 982 Namespace: ref.Namespace, 983 Name: ref.Name, 984 }, 985 }, 986 } 987 } 988 989 infraMachinePoolRef := machinePool.Spec.Template.Spec.InfrastructureRef 990 991 gv, err := schema.ParseGroupVersion(infraMachinePoolRef.APIVersion) 992 if err != nil { 993 log.Error(err, "failed to parse group version") 994 // If we get here, we might want to reconcile but aren't sure. 995 // Do it anyway to be safe. Worst case we reconcile a few extra times with no-ops. 996 return []reconcile.Request{ 997 { 998 NamespacedName: client.ObjectKey{ 999 Namespace: ref.Namespace, 1000 Name: ref.Name, 1001 }, 1002 }, 1003 } 1004 } 1005 1006 kindMatches := infraMachinePoolRef.Kind == "AzureManagedMachinePool" 1007 groupMatches := controlPlaneGK.Group == gv.Group 1008 1009 ammp := &infrav1.AzureManagedMachinePool{} 1010 key := types.NamespacedName{Namespace: infraMachinePoolRef.Namespace, Name: infraMachinePoolRef.Name} 1011 if err := c.Get(ctx, key, ammp); err != nil { 1012 log.Error(err, fmt.Sprintf("failed to fetch azure managed machine pool for Machinepool: %s", infraMachinePoolRef.Name)) 1013 // If we get here, we might want to reconcile but aren't sure. 1014 // Do it anyway to be safe. Worst case we reconcile a few extra times with no-ops. 1015 return []reconcile.Request{ 1016 { 1017 NamespacedName: client.ObjectKey{ 1018 Namespace: ref.Namespace, 1019 Name: ref.Name, 1020 }, 1021 }, 1022 } 1023 } 1024 1025 isSystemNodePool := ammp.Spec.Mode == string(infrav1.NodePoolModeSystem) 1026 1027 if groupMatches && kindMatches && isSystemNodePool { 1028 return []reconcile.Request{ 1029 { 1030 NamespacedName: client.ObjectKey{ 1031 Namespace: ref.Namespace, 1032 Name: ref.Name, 1033 }, 1034 }, 1035 } 1036 } 1037 1038 // By default, return nothing for a machine pool which is not the default pool for a control plane. 1039 return nil 1040 } 1041 } 1042 1043 // ClusterUpdatePauseChange returns a predicate that returns true for an update event when a cluster's 1044 // Spec.Paused changes between any two distinct values. 1045 func ClusterUpdatePauseChange(logger logr.Logger) predicate.Funcs { 1046 return predicate.Funcs{ 1047 UpdateFunc: func(e event.UpdateEvent) bool { 1048 log := logger.WithValues("predicate", "ClusterUpdatePauseChange", "eventType", "update") 1049 1050 oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) 1051 if !ok { 1052 log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) 1053 return false 1054 } 1055 log = log.WithValues("Cluster", klog.KObj(oldCluster)) 1056 1057 newCluster := e.ObjectNew.(*clusterv1.Cluster) 1058 1059 if oldCluster.Spec.Paused != newCluster.Spec.Paused { 1060 log.V(4).Info("Cluster paused status changed, allowing further processing") 1061 return true 1062 } 1063 1064 log.V(6).Info("Cluster paused status remained the same, blocking further processing") 1065 return false 1066 }, 1067 CreateFunc: func(e event.CreateEvent) bool { return false }, 1068 DeleteFunc: func(e event.DeleteEvent) bool { return false }, 1069 GenericFunc: func(e event.GenericEvent) bool { return false }, 1070 } 1071 } 1072 1073 // ClusterPauseChangeAndInfrastructureReady is based on ClusterUnpausedAndInfrastructureReady, but 1074 // additionally accepts Cluster pause events. 1075 func ClusterPauseChangeAndInfrastructureReady(log logr.Logger) predicate.Funcs { 1076 return predicates.Any(log, predicates.ClusterCreateInfraReady(log), predicates.ClusterUpdateInfraReady(log), ClusterUpdatePauseChange(log)) 1077 } 1078 1079 // GetClusterScoper returns a ClusterScoper for the given cluster using the infra ref pointing to either an AzureCluster or an AzureManagedCluster. 1080 func GetClusterScoper(ctx context.Context, logger logr.Logger, c client.Client, cluster *clusterv1.Cluster, timeouts reconciler.Timeouts) (ClusterScoper, error) { 1081 infraRef := cluster.Spec.InfrastructureRef 1082 switch infraRef.Kind { 1083 case "AzureCluster": 1084 logger = logger.WithValues("AzureCluster", infraRef.Name) 1085 azureClusterName := client.ObjectKey{ 1086 Namespace: infraRef.Namespace, 1087 Name: infraRef.Name, 1088 } 1089 azureCluster := &infrav1.AzureCluster{} 1090 if err := c.Get(ctx, azureClusterName, azureCluster); err != nil { 1091 logger.V(2).Info("AzureCluster is not available yet") 1092 return nil, err 1093 } 1094 1095 // Create the cluster scope 1096 return scope.NewClusterScope(ctx, scope.ClusterScopeParams{ 1097 Client: c, 1098 Cluster: cluster, 1099 AzureCluster: azureCluster, 1100 Timeouts: timeouts, 1101 }) 1102 1103 case "AzureManagedCluster": 1104 logger = logger.WithValues("AzureManagedCluster", infraRef.Name) 1105 azureManagedControlPlaneName := client.ObjectKey{ 1106 Namespace: infraRef.Namespace, 1107 Name: cluster.Spec.ControlPlaneRef.Name, 1108 } 1109 azureManagedControlPlane := &infrav1.AzureManagedControlPlane{} 1110 if err := c.Get(ctx, azureManagedControlPlaneName, azureManagedControlPlane); err != nil { 1111 logger.V(2).Info("AzureManagedControlPlane is not available yet") 1112 return nil, err 1113 } 1114 1115 // Create the control plane scope 1116 return scope.NewManagedControlPlaneScope(ctx, scope.ManagedControlPlaneScopeParams{ 1117 Client: c, 1118 Cluster: cluster, 1119 ControlPlane: azureManagedControlPlane, 1120 Timeouts: timeouts, 1121 }) 1122 } 1123 1124 return nil, errors.Errorf("unsupported infrastructure type %q, should be AzureCluster or AzureManagedCluster", cluster.Spec.InfrastructureRef.Kind) 1125 } 1126 1127 // AddBlockMoveAnnotation adds CAPI's block-move annotation and returns whether or not the annotation was added. 1128 func AddBlockMoveAnnotation(obj metav1.Object) bool { 1129 annotations := obj.GetAnnotations() 1130 1131 if _, exists := annotations[clusterctlv1.BlockMoveAnnotation]; exists { 1132 return false 1133 } 1134 1135 if annotations == nil { 1136 annotations = make(map[string]string) 1137 } 1138 1139 // this value doesn't mean anything, only the presence of the annotation matters. 1140 annotations[clusterctlv1.BlockMoveAnnotation] = "true" 1141 obj.SetAnnotations(annotations) 1142 1143 return true 1144 } 1145 1146 // RemoveBlockMoveAnnotation removes CAPI's block-move annotation from the object. 1147 func RemoveBlockMoveAnnotation(obj metav1.Object) { 1148 azClusterAnnotations := obj.GetAnnotations() 1149 delete(azClusterAnnotations, clusterctlv1.BlockMoveAnnotation) 1150 obj.SetAnnotations(azClusterAnnotations) 1151 }