sigs.k8s.io/cluster-api-provider-azure@v1.17.0/main.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package main
    18  
    19  import (
    20  	"context"
    21  	"flag"
    22  	"fmt"
    23  	"os"
    24  	"time"
    25  
    26  	// +kubebuilder:scaffold:imports
    27  	asocontainerservicev1api20210501 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20210501"
    28  	asocontainerservicev1api20230201 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230201"
    29  	asocontainerservicev1api20230202preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview"
    30  	asocontainerservicev1api20230315preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230315preview"
    31  	asocontainerservicev1api20231001 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001"
    32  	asocontainerservicev1api20231102preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview"
    33  	asocontainerservicev1api20240402preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20240402preview"
    34  	asokubernetesconfigurationv1 "github.com/Azure/azure-service-operator/v2/api/kubernetesconfiguration/v1api20230501"
    35  	asonetworkv1api20201101 "github.com/Azure/azure-service-operator/v2/api/network/v1api20201101"
    36  	asonetworkv1api20220701 "github.com/Azure/azure-service-operator/v2/api/network/v1api20220701"
    37  	asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601"
    38  	"github.com/spf13/pflag"
    39  	corev1 "k8s.io/api/core/v1"
    40  	"k8s.io/apimachinery/pkg/runtime"
    41  	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
    42  	"k8s.io/client-go/tools/leaderelection/resourcelock"
    43  	cgrecord "k8s.io/client-go/tools/record"
    44  	"k8s.io/klog/v2"
    45  	infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1"
    46  	infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
    47  	"sigs.k8s.io/cluster-api-provider-azure/controllers"
    48  	infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
    49  	infrav1controllersexp "sigs.k8s.io/cluster-api-provider-azure/exp/controllers"
    50  	"sigs.k8s.io/cluster-api-provider-azure/feature"
    51  	"sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing"
    52  	"sigs.k8s.io/cluster-api-provider-azure/pkg/ot"
    53  	"sigs.k8s.io/cluster-api-provider-azure/util/reconciler"
    54  	"sigs.k8s.io/cluster-api-provider-azure/version"
    55  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    56  	kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    57  	"sigs.k8s.io/cluster-api/controllers/remote"
    58  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    59  	capifeature "sigs.k8s.io/cluster-api/feature"
    60  	"sigs.k8s.io/cluster-api/util/flags"
    61  	"sigs.k8s.io/cluster-api/util/record"
    62  	ctrl "sigs.k8s.io/controller-runtime"
    63  	"sigs.k8s.io/controller-runtime/pkg/cache"
    64  	"sigs.k8s.io/controller-runtime/pkg/client"
    65  	"sigs.k8s.io/controller-runtime/pkg/controller"
    66  	"sigs.k8s.io/controller-runtime/pkg/manager"
    67  	"sigs.k8s.io/controller-runtime/pkg/webhook"
    68  )
    69  
    70  var (
    71  	scheme   = runtime.NewScheme()
    72  	setupLog = ctrl.Log.WithName("setup")
    73  )
    74  
    75  func init() {
    76  	_ = clientgoscheme.AddToScheme(scheme)
    77  	_ = infrav1.AddToScheme(scheme)
    78  	_ = infrav1exp.AddToScheme(scheme)
    79  	_ = infrav1alpha.AddToScheme(scheme)
    80  	_ = clusterv1.AddToScheme(scheme)
    81  	_ = expv1.AddToScheme(scheme)
    82  	_ = kubeadmv1.AddToScheme(scheme)
    83  	_ = asoresourcesv1.AddToScheme(scheme)
    84  	_ = asocontainerservicev1api20210501.AddToScheme(scheme)
    85  	_ = asocontainerservicev1api20230201.AddToScheme(scheme)
    86  	_ = asocontainerservicev1api20231001.AddToScheme(scheme)
    87  	_ = asonetworkv1api20220701.AddToScheme(scheme)
    88  	_ = asonetworkv1api20201101.AddToScheme(scheme)
    89  	_ = asocontainerservicev1api20230202preview.AddToScheme(scheme)
    90  	_ = asocontainerservicev1api20230315preview.AddToScheme(scheme)
    91  	_ = asocontainerservicev1api20231102preview.AddToScheme(scheme)
    92  	_ = asocontainerservicev1api20240402preview.AddToScheme(scheme)
    93  	_ = asokubernetesconfigurationv1.AddToScheme(scheme)
    94  	// +kubebuilder:scaffold:scheme
    95  }
    96  
    97  var (
    98  	enableLeaderElection               bool
    99  	leaderElectionNamespace            string
   100  	leaderElectionLeaseDuration        time.Duration
   101  	leaderElectionRenewDeadline        time.Duration
   102  	leaderElectionRetryPeriod          time.Duration
   103  	watchNamespace                     string
   104  	watchFilterValue                   string
   105  	profilerAddress                    string
   106  	azureClusterConcurrency            int
   107  	azureMachineConcurrency            int
   108  	azureMachinePoolConcurrency        int
   109  	azureMachinePoolMachineConcurrency int
   110  	azureBootrapConfigGVK              string
   111  	debouncingTimer                    time.Duration
   112  	syncPeriod                         time.Duration
   113  	healthAddr                         string
   114  	webhookPort                        int
   115  	webhookCertDir                     string
   116  	managerOptions                     = flags.ManagerOptions{}
   117  	timeouts                           reconciler.Timeouts
   118  	enableTracing                      bool
   119  )
   120  
   121  // InitFlags initializes all command-line flags.
   122  func InitFlags(fs *pflag.FlagSet) {
   123  	fs.BoolVar(
   124  		&enableLeaderElection,
   125  		"leader-elect",
   126  		false,
   127  		"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.",
   128  	)
   129  
   130  	flag.StringVar(
   131  		&leaderElectionNamespace,
   132  		"leader-election-namespace",
   133  		"",
   134  		"Namespace that the controller performs leader election in. If unspecified, the controller will discover which namespace it is running in.",
   135  	)
   136  
   137  	fs.DurationVar(
   138  		&leaderElectionLeaseDuration,
   139  		"leader-elect-lease-duration",
   140  		15*time.Second,
   141  		"Interval at which non-leader candidates will wait to force acquire leadership (duration string)",
   142  	)
   143  
   144  	fs.DurationVar(
   145  		&leaderElectionRenewDeadline,
   146  		"leader-elect-renew-deadline",
   147  		10*time.Second,
   148  		"Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)",
   149  	)
   150  
   151  	fs.DurationVar(
   152  		&leaderElectionRetryPeriod,
   153  		"leader-elect-retry-period",
   154  		2*time.Second,
   155  		"Duration the LeaderElector clients should wait between tries of actions (duration string)",
   156  	)
   157  
   158  	fs.StringVar(
   159  		&watchNamespace,
   160  		"namespace",
   161  		"",
   162  		"Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.",
   163  	)
   164  
   165  	fs.StringVar(
   166  		&watchFilterValue,
   167  		"watch-filter",
   168  		"",
   169  		fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel),
   170  	)
   171  
   172  	fs.StringVar(
   173  		&profilerAddress,
   174  		"profiler-address",
   175  		"",
   176  		"Bind address to expose the pprof profiler (e.g. localhost:6060)",
   177  	)
   178  
   179  	fs.IntVar(&azureClusterConcurrency,
   180  		"azurecluster-concurrency",
   181  		10,
   182  		"Number of AzureClusters to process simultaneously",
   183  	)
   184  
   185  	fs.IntVar(&azureMachineConcurrency,
   186  		"azuremachine-concurrency",
   187  		10,
   188  		"Number of AzureMachines to process simultaneously",
   189  	)
   190  
   191  	fs.IntVar(&azureMachinePoolConcurrency,
   192  		"azuremachinepool-concurrency",
   193  		10,
   194  		"Number of AzureMachinePools to process simultaneously")
   195  
   196  	fs.IntVar(&azureMachinePoolMachineConcurrency,
   197  		"azuremachinepoolmachine-concurrency",
   198  		10,
   199  		"Number of AzureMachinePoolMachines to process simultaneously")
   200  
   201  	fs.DurationVar(&debouncingTimer,
   202  		"debouncing-timer",
   203  		10*time.Second,
   204  		"The minimum interval the controller should wait after a successful reconciliation of a particular object before reconciling it again",
   205  	)
   206  
   207  	fs.DurationVar(&syncPeriod,
   208  		"sync-period",
   209  		10*time.Minute,
   210  		"The minimum interval at which watched resources are reconciled (e.g. 15m)",
   211  	)
   212  
   213  	fs.StringVar(&healthAddr,
   214  		"health-addr",
   215  		":9440",
   216  		"The address the health endpoint binds to.",
   217  	)
   218  
   219  	fs.IntVar(&webhookPort,
   220  		"webhook-port",
   221  		9443,
   222  		"The webhook server port the manager will listen on.",
   223  	)
   224  
   225  	fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/",
   226  		"The webhook certificate directory, where the server should find the TLS certificate and key.")
   227  
   228  	fs.DurationVar(&timeouts.Loop,
   229  		"reconcile-timeout",
   230  		reconciler.DefaultLoopTimeout,
   231  		"The maximum duration a reconcile loop can run (e.g. 10m)",
   232  	)
   233  
   234  	fs.DurationVar(&timeouts.AzureServiceReconcile,
   235  		"service-reconcile-timeout",
   236  		reconciler.DefaultAzureServiceReconcileTimeout,
   237  		"The maximum duration each Azure service reconcile can run (e.g. 90m)",
   238  	)
   239  
   240  	fs.DurationVar(&timeouts.AzureCall,
   241  		"api-call-timeout",
   242  		reconciler.DefaultAzureCallTimeout,
   243  		"The maximum duration CAPZ will wait for each Azure API request before it is considered long running and performed async (e.g. 10s)",
   244  	)
   245  
   246  	fs.DurationVar(&timeouts.Requeue,
   247  		"reconciler-requeue",
   248  		reconciler.DefaultReconcilerRequeue,
   249  		"The duration to wait before retrying after a transient reconcile error occurs (e.g. 15s)",
   250  	)
   251  
   252  	fs.BoolVar(
   253  		&enableTracing,
   254  		"enable-tracing",
   255  		false,
   256  		"Enable tracing to the opentelemetry-collector service in the same namespace.",
   257  	)
   258  
   259  	fs.StringVar(&azureBootrapConfigGVK,
   260  		"bootstrap-config-gvk",
   261  		"",
   262  		"Provide fully qualified GVK string to override default kubeadm config watch source, in the form of Kind.version.group (default: KubeadmConfig.v1beta1.bootstrap.cluster.x-k8s.io)",
   263  	)
   264  
   265  	flags.AddManagerOptions(fs, &managerOptions)
   266  
   267  	feature.MutableGates.AddFlag(fs)
   268  }
   269  
   270  // Add RBAC for the authorized diagnostics endpoint.
   271  // +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create
   272  // +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create
   273  
   274  func main() {
   275  	InitFlags(pflag.CommandLine)
   276  	klog.InitFlags(flag.CommandLine)
   277  	pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
   278  	pflag.Parse()
   279  
   280  	// klog.Background will automatically use the right logger.
   281  	ctrl.SetLogger(klog.Background())
   282  
   283  	// Machine and cluster operations can create enough events to trigger the event recorder spam filter
   284  	// Setting the burst size higher ensures all events will be recorded and submitted to the API
   285  	broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{
   286  		BurstSize: 100,
   287  	})
   288  
   289  	tlsOptions, metricsOptions, err := flags.GetManagerOptions(managerOptions)
   290  	if err != nil {
   291  		setupLog.Error(err, "Unable to start manager: invalid flags")
   292  		os.Exit(1)
   293  	}
   294  
   295  	var watchNamespaces map[string]cache.Config
   296  	if watchNamespace != "" {
   297  		setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace)
   298  		watchNamespaces = map[string]cache.Config{
   299  			watchNamespace: {},
   300  		}
   301  	}
   302  
   303  	restConfig := ctrl.GetConfigOrDie()
   304  	restConfig.UserAgent = "cluster-api-provider-azure-manager"
   305  	mgr, err := ctrl.NewManager(restConfig, ctrl.Options{
   306  		Scheme:                     scheme,
   307  		LeaderElection:             enableLeaderElection,
   308  		LeaderElectionID:           "controller-leader-election-capz",
   309  		LeaderElectionNamespace:    leaderElectionNamespace,
   310  		LeaseDuration:              &leaderElectionLeaseDuration,
   311  		RenewDeadline:              &leaderElectionRenewDeadline,
   312  		RetryPeriod:                &leaderElectionRetryPeriod,
   313  		LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
   314  		HealthProbeBindAddress:     healthAddr,
   315  		PprofBindAddress:           profilerAddress,
   316  		Metrics:                    *metricsOptions,
   317  		Cache: cache.Options{
   318  			DefaultNamespaces: watchNamespaces,
   319  			SyncPeriod:        &syncPeriod,
   320  		},
   321  		Client: client.Options{
   322  			Cache: &client.CacheOptions{
   323  				DisableFor: []client.Object{
   324  					&corev1.ConfigMap{},
   325  					&corev1.Secret{},
   326  				},
   327  			},
   328  		},
   329  		WebhookServer: webhook.NewServer(webhook.Options{
   330  			Port:    webhookPort,
   331  			CertDir: webhookCertDir,
   332  			TLSOpts: tlsOptions,
   333  		}),
   334  		EventBroadcaster: broadcaster,
   335  	})
   336  
   337  	if err != nil {
   338  		setupLog.Error(err, "unable to start manager")
   339  		os.Exit(1)
   340  	}
   341  
   342  	// Initialize event recorder.
   343  	record.InitFromRecorder(mgr.GetEventRecorderFor("azure-controller"))
   344  
   345  	// Setup the context that's going to be used in controllers and for the manager.
   346  	ctx := ctrl.SetupSignalHandler()
   347  
   348  	if enableTracing {
   349  		if err := ot.RegisterTracing(ctx, setupLog); err != nil {
   350  			setupLog.Error(err, "unable to initialize tracing")
   351  			os.Exit(1)
   352  		}
   353  	}
   354  
   355  	if err := ot.RegisterMetrics(); err != nil {
   356  		setupLog.Error(err, "unable to initialize metrics")
   357  		os.Exit(1)
   358  	}
   359  
   360  	registerControllers(ctx, mgr)
   361  
   362  	registerWebhooks(mgr)
   363  
   364  	// +kubebuilder:scaffold:builder
   365  	setupLog.Info("starting manager", "version", version.Get().String())
   366  	if err := mgr.Start(ctx); err != nil {
   367  		setupLog.Error(err, "problem running manager")
   368  		os.Exit(1)
   369  	}
   370  }
   371  
   372  func registerControllers(ctx context.Context, mgr manager.Manager) {
   373  	machineCache, err := coalescing.NewRequestCache(debouncingTimer)
   374  	if err != nil {
   375  		setupLog.Error(err, "failed to build machineCache ReconcileCache")
   376  	}
   377  	if err := controllers.NewAzureMachineReconciler(mgr.GetClient(),
   378  		mgr.GetEventRecorderFor("azuremachine-reconciler"),
   379  		timeouts,
   380  		watchFilterValue,
   381  	).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}, Cache: machineCache}); err != nil {
   382  		setupLog.Error(err, "unable to create controller", "controller", "AzureMachine")
   383  		os.Exit(1)
   384  	}
   385  
   386  	clusterCache, err := coalescing.NewRequestCache(debouncingTimer)
   387  	if err != nil {
   388  		setupLog.Error(err, "failed to build clusterCache ReconcileCache")
   389  	}
   390  	if err := controllers.NewAzureClusterReconciler(
   391  		mgr.GetClient(),
   392  		mgr.GetEventRecorderFor("azurecluster-reconciler"),
   393  		timeouts,
   394  		watchFilterValue,
   395  	).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: clusterCache}); err != nil {
   396  		setupLog.Error(err, "unable to create controller", "controller", "AzureCluster")
   397  		os.Exit(1)
   398  	}
   399  
   400  	if err := (&controllers.AzureJSONTemplateReconciler{
   401  		Client:           mgr.GetClient(),
   402  		Recorder:         mgr.GetEventRecorderFor("azurejsontemplate-reconciler"),
   403  		Timeouts:         timeouts,
   404  		WatchFilterValue: watchFilterValue,
   405  	}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil {
   406  		setupLog.Error(err, "unable to create controller", "controller", "AzureJSONTemplate")
   407  		os.Exit(1)
   408  	}
   409  
   410  	if err := (&controllers.AzureJSONMachineReconciler{
   411  		Client:           mgr.GetClient(),
   412  		Recorder:         mgr.GetEventRecorderFor("azurejsonmachine-reconciler"),
   413  		Timeouts:         timeouts,
   414  		WatchFilterValue: watchFilterValue,
   415  	}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil {
   416  		setupLog.Error(err, "unable to create controller", "controller", "AzureJSONMachine")
   417  		os.Exit(1)
   418  	}
   419  
   420  	if err := (&controllers.ASOSecretReconciler{
   421  		Client:           mgr.GetClient(),
   422  		Recorder:         mgr.GetEventRecorderFor("asosecret-reconciler"),
   423  		Timeouts:         timeouts,
   424  		WatchFilterValue: watchFilterValue,
   425  	}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil {
   426  		setupLog.Error(err, "unable to create controller", "controller", "ASOSecret")
   427  		os.Exit(1)
   428  	}
   429  
   430  	// just use CAPI MachinePool feature flag rather than create a new one
   431  	setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates))
   432  	if feature.Gates.Enabled(capifeature.MachinePool) {
   433  		mpCache, err := coalescing.NewRequestCache(debouncingTimer)
   434  		if err != nil {
   435  			setupLog.Error(err, "failed to build mpCache ReconcileCache")
   436  		}
   437  
   438  		if err := infrav1controllersexp.NewAzureMachinePoolReconciler(
   439  			mgr.GetClient(),
   440  			mgr.GetEventRecorderFor("azuremachinepool-reconciler"),
   441  			timeouts,
   442  			watchFilterValue,
   443  			azureBootrapConfigGVK,
   444  		).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mpCache}); err != nil {
   445  			setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool")
   446  			os.Exit(1)
   447  		}
   448  
   449  		mpmCache, err := coalescing.NewRequestCache(debouncingTimer)
   450  		if err != nil {
   451  			setupLog.Error(err, "failed to build mpmCache ReconcileCache")
   452  		}
   453  
   454  		if err := infrav1controllersexp.NewAzureMachinePoolMachineController(
   455  			mgr.GetClient(),
   456  			mgr.GetEventRecorderFor("azuremachinepoolmachine-reconciler"),
   457  			timeouts,
   458  			watchFilterValue,
   459  		).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolMachineConcurrency}, Cache: mpmCache}); err != nil {
   460  			setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePoolMachine")
   461  			os.Exit(1)
   462  		}
   463  
   464  		if err := (&controllers.AzureJSONMachinePoolReconciler{
   465  			Client:           mgr.GetClient(),
   466  			Recorder:         mgr.GetEventRecorderFor("azurejsonmachinepool-reconciler"),
   467  			Timeouts:         timeouts,
   468  			WatchFilterValue: watchFilterValue,
   469  		}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}); err != nil {
   470  			setupLog.Error(err, "unable to create controller", "controller", "AzureJSONMachinePool")
   471  			os.Exit(1)
   472  		}
   473  
   474  		mmpmCache, err := coalescing.NewRequestCache(debouncingTimer)
   475  		if err != nil {
   476  			setupLog.Error(err, "failed to build mmpmCache ReconcileCache")
   477  		}
   478  
   479  		if err := controllers.NewAzureManagedMachinePoolReconciler(
   480  			mgr.GetClient(),
   481  			mgr.GetEventRecorderFor("azuremanagedmachinepoolmachine-reconciler"),
   482  			timeouts,
   483  			watchFilterValue,
   484  		).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mmpmCache}); err != nil {
   485  			setupLog.Error(err, "unable to create controller", "controller", "AzureManagedMachinePool")
   486  			os.Exit(1)
   487  		}
   488  
   489  		mcCache, err := coalescing.NewRequestCache(debouncingTimer)
   490  		if err != nil {
   491  			setupLog.Error(err, "failed to build mcCache ReconcileCache")
   492  		}
   493  
   494  		if err := (&controllers.AzureManagedClusterReconciler{
   495  			Client:           mgr.GetClient(),
   496  			Recorder:         mgr.GetEventRecorderFor("azuremanagedcluster-reconciler"),
   497  			Timeouts:         timeouts,
   498  			WatchFilterValue: watchFilterValue,
   499  		}).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: mcCache}); err != nil {
   500  			setupLog.Error(err, "unable to create controller", "controller", "AzureManagedCluster")
   501  			os.Exit(1)
   502  		}
   503  
   504  		mcpCache, err := coalescing.NewRequestCache(debouncingTimer)
   505  		if err != nil {
   506  			setupLog.Error(err, "failed to build mcpCache ReconcileCache")
   507  		}
   508  
   509  		if err := (&controllers.AzureManagedControlPlaneReconciler{
   510  			Client:           mgr.GetClient(),
   511  			Recorder:         mgr.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"),
   512  			Timeouts:         timeouts,
   513  			WatchFilterValue: watchFilterValue,
   514  		}).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: mcpCache}); err != nil {
   515  			setupLog.Error(err, "unable to create controller", "controller", "AzureManagedControlPlane")
   516  			os.Exit(1)
   517  		}
   518  	}
   519  
   520  	if feature.Gates.Enabled(feature.ASOAPI) {
   521  		if err := (&controllers.AzureASOManagedClusterReconciler{
   522  			Client:           mgr.GetClient(),
   523  			WatchFilterValue: watchFilterValue,
   524  		}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil {
   525  			setupLog.Error(err, "unable to create controller", "controller", "AzureASOManagedCluster")
   526  			os.Exit(1)
   527  		}
   528  
   529  		if err := (&controllers.AzureASOManagedControlPlaneReconciler{
   530  			Client:           mgr.GetClient(),
   531  			WatchFilterValue: watchFilterValue,
   532  		}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil {
   533  			setupLog.Error(err, "unable to create controller", "controller", "AzureASOManagedControlPlane")
   534  			os.Exit(1)
   535  		}
   536  
   537  		// The AzureASOManagedMachinePool controller reads the nodes in clusters to set provider IDs.
   538  		secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{
   539  			HTTPClient: mgr.GetHTTPClient(),
   540  			Cache: &client.CacheOptions{
   541  				Reader: mgr.GetCache(),
   542  			},
   543  		})
   544  		if err != nil {
   545  			setupLog.Error(err, "unable to create secret caching client")
   546  			os.Exit(1)
   547  		}
   548  		tracker, err := remote.NewClusterCacheTracker(
   549  			mgr,
   550  			remote.ClusterCacheTrackerOptions{
   551  				SecretCachingClient: secretCachingClient,
   552  				Log:                 &ctrl.Log,
   553  				Indexes:             []remote.Index{remote.NodeProviderIDIndex},
   554  			},
   555  		)
   556  		if err != nil {
   557  			setupLog.Error(err, "unable to create cluster cache tracker")
   558  			os.Exit(1)
   559  		}
   560  
   561  		if err := (&controllers.AzureASOManagedMachinePoolReconciler{
   562  			Client:           mgr.GetClient(),
   563  			WatchFilterValue: watchFilterValue,
   564  			Tracker:          tracker,
   565  		}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}); err != nil {
   566  			setupLog.Error(err, "unable to create controller", "controller", "AzureASOManagedMachinePool")
   567  			os.Exit(1)
   568  		}
   569  
   570  		if err := (&controllers.ManagedClusterAdoptReconciler{
   571  			Client: mgr.GetClient(),
   572  		}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil {
   573  			setupLog.Error(err, "unable to create controller", "controller", "ManagedCluster")
   574  			os.Exit(1)
   575  		}
   576  
   577  		if err := (&controllers.AgentPoolAdoptReconciler{
   578  			Client: mgr.GetClient(),
   579  		}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}); err != nil {
   580  			setupLog.Error(err, "unable to create controller", "controller", "AgentPool")
   581  			os.Exit(1)
   582  		}
   583  	}
   584  }
   585  
   586  func registerWebhooks(mgr manager.Manager) {
   587  	if err := (&infrav1.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil {
   588  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureCluster")
   589  		os.Exit(1)
   590  	}
   591  
   592  	if err := (&infrav1.AzureClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil {
   593  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureClusterTemplate")
   594  		os.Exit(1)
   595  	}
   596  
   597  	if err := (&infrav1.AzureMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
   598  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachineTemplate")
   599  		os.Exit(1)
   600  	}
   601  
   602  	if err := (&infrav1.AzureClusterIdentity{}).SetupWebhookWithManager(mgr); err != nil {
   603  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureClusterIdentity")
   604  		os.Exit(1)
   605  	}
   606  
   607  	if err := (&infrav1exp.AzureMachinePoolMachine{}).SetupWebhookWithManager(mgr); err != nil {
   608  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePoolMachine")
   609  		os.Exit(1)
   610  	}
   611  
   612  	// NOTE: AzureManagedCluster is behind AKS feature gate flag; the webhook
   613  	// is going to prevent creating or updating new objects in case the feature flag is disabled
   614  	if err := (&infrav1.AzureManagedCluster{}).SetupWebhookWithManager(mgr); err != nil {
   615  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedCluster")
   616  		os.Exit(1)
   617  	}
   618  
   619  	if err := (&infrav1.AzureManagedClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil {
   620  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedClusterTemplate")
   621  		os.Exit(1)
   622  	}
   623  
   624  	if err := infrav1exp.SetupAzureMachinePoolWebhookWithManager(mgr); err != nil {
   625  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePool")
   626  		os.Exit(1)
   627  	}
   628  
   629  	if err := infrav1.SetupAzureMachineWebhookWithManager(mgr); err != nil {
   630  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachine")
   631  		os.Exit(1)
   632  	}
   633  
   634  	if err := infrav1.SetupAzureManagedMachinePoolWebhookWithManager(mgr); err != nil {
   635  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedMachinePool")
   636  		os.Exit(1)
   637  	}
   638  
   639  	if err := infrav1.SetupAzureManagedMachinePoolTemplateWebhookWithManager(mgr); err != nil {
   640  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedMachinePoolTemplate")
   641  		os.Exit(1)
   642  	}
   643  
   644  	if err := infrav1.SetupAzureManagedControlPlaneWebhookWithManager(mgr); err != nil {
   645  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlane")
   646  		os.Exit(1)
   647  	}
   648  
   649  	if err := infrav1.SetupAzureManagedControlPlaneTemplateWebhookWithManager(mgr); err != nil {
   650  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlaneTemplate")
   651  		os.Exit(1)
   652  	}
   653  
   654  	if err := infrav1alpha.SetupAzureASOManagedClusterWebhookWithManager(mgr); err != nil {
   655  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureASOManagedCluster")
   656  		os.Exit(1)
   657  	}
   658  
   659  	if err := infrav1alpha.SetupAzureASOManagedControlPlaneWebhookWithManager(mgr); err != nil {
   660  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureASOManagedControlPlane")
   661  		os.Exit(1)
   662  	}
   663  
   664  	if err := infrav1alpha.SetupAzureASOManagedMachinePoolWebhookWithManager(mgr); err != nil {
   665  		setupLog.Error(err, "unable to create webhook", "webhook", "AzureASOManagedMachinePool")
   666  		os.Exit(1)
   667  	}
   668  
   669  	if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
   670  		setupLog.Error(err, "unable to create ready check")
   671  		os.Exit(1)
   672  	}
   673  
   674  	if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
   675  		setupLog.Error(err, "unable to create health check")
   676  		os.Exit(1)
   677  	}
   678  }