sigs.k8s.io/cluster-api-provider-azure@v1.14.3/main.go (about) 1 /* 2 Copyright 2020 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package main 18 19 import ( 20 "context" 21 "flag" 22 "fmt" 23 "net/http" 24 "net/http/pprof" 25 "os" 26 "time" 27 28 // +kubebuilder:scaffold:imports 29 asocontainerservicev1api20230202preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" 30 asocontainerservicev1api20230315preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230315preview" 31 asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" 32 asokubernetesconfigurationv1 "github.com/Azure/azure-service-operator/v2/api/kubernetesconfiguration/v1api20230501" 33 asonetworkv1api20201101 "github.com/Azure/azure-service-operator/v2/api/network/v1api20201101" 34 asonetworkv1api20220701 "github.com/Azure/azure-service-operator/v2/api/network/v1api20220701" 35 asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" 36 "github.com/spf13/pflag" 37 corev1 "k8s.io/api/core/v1" 38 "k8s.io/apimachinery/pkg/runtime" 39 "k8s.io/apiserver/pkg/server/routes" 40 clientgoscheme "k8s.io/client-go/kubernetes/scheme" 41 "k8s.io/client-go/tools/leaderelection/resourcelock" 42 cgrecord "k8s.io/client-go/tools/record" 43 "k8s.io/component-base/logs" 44 "k8s.io/klog/v2" 45 infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" 46 "sigs.k8s.io/cluster-api-provider-azure/controllers" 47 infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" 48 infrav1controllersexp "sigs.k8s.io/cluster-api-provider-azure/exp/controllers" 49 "sigs.k8s.io/cluster-api-provider-azure/feature" 50 "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" 51 "sigs.k8s.io/cluster-api-provider-azure/pkg/ot" 52 "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" 53 "sigs.k8s.io/cluster-api-provider-azure/version" 54 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 55 kubeadmv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" 56 expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" 57 capifeature "sigs.k8s.io/cluster-api/feature" 58 "sigs.k8s.io/cluster-api/util/record" 59 ctrl "sigs.k8s.io/controller-runtime" 60 "sigs.k8s.io/controller-runtime/pkg/cache" 61 "sigs.k8s.io/controller-runtime/pkg/client" 62 "sigs.k8s.io/controller-runtime/pkg/controller" 63 "sigs.k8s.io/controller-runtime/pkg/manager" 64 "sigs.k8s.io/controller-runtime/pkg/metrics/filters" 65 metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" 66 "sigs.k8s.io/controller-runtime/pkg/webhook" 67 ) 68 69 var ( 70 scheme = runtime.NewScheme() 71 setupLog = ctrl.Log.WithName("setup") 72 ) 73 74 func init() { 75 _ = clientgoscheme.AddToScheme(scheme) 76 _ = infrav1.AddToScheme(scheme) 77 _ = infrav1exp.AddToScheme(scheme) 78 _ = clusterv1.AddToScheme(scheme) 79 _ = expv1.AddToScheme(scheme) 80 _ = kubeadmv1.AddToScheme(scheme) 81 _ = asoresourcesv1.AddToScheme(scheme) 82 _ = asocontainerservicev1.AddToScheme(scheme) 83 _ = asonetworkv1api20220701.AddToScheme(scheme) 84 _ = asonetworkv1api20201101.AddToScheme(scheme) 85 _ = asocontainerservicev1api20230202preview.AddToScheme(scheme) 86 _ = asocontainerservicev1api20230315preview.AddToScheme(scheme) 87 _ = asokubernetesconfigurationv1.AddToScheme(scheme) 88 // +kubebuilder:scaffold:scheme 89 } 90 91 var ( 92 enableLeaderElection bool 93 leaderElectionNamespace string 94 leaderElectionLeaseDuration time.Duration 95 leaderElectionRenewDeadline time.Duration 96 leaderElectionRetryPeriod time.Duration 97 watchNamespace string 98 watchFilterValue string 99 profilerAddress string 100 azureClusterConcurrency int 101 azureMachineConcurrency int 102 azureMachinePoolConcurrency int 103 azureMachinePoolMachineConcurrency int 104 debouncingTimer time.Duration 105 syncPeriod time.Duration 106 healthAddr string 107 webhookPort int 108 webhookCertDir string 109 diagnosticsOptions = DiagnosticsOptions{} 110 timeouts reconciler.Timeouts 111 enableTracing bool 112 ) 113 114 // InitFlags initializes all command-line flags. 115 func InitFlags(fs *pflag.FlagSet) { 116 fs.BoolVar( 117 &enableLeaderElection, 118 "leader-elect", 119 false, 120 "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.", 121 ) 122 123 flag.StringVar( 124 &leaderElectionNamespace, 125 "leader-election-namespace", 126 "", 127 "Namespace that the controller performs leader election in. If unspecified, the controller will discover which namespace it is running in.", 128 ) 129 130 fs.DurationVar( 131 &leaderElectionLeaseDuration, 132 "leader-elect-lease-duration", 133 15*time.Second, 134 "Interval at which non-leader candidates will wait to force acquire leadership (duration string)", 135 ) 136 137 fs.DurationVar( 138 &leaderElectionRenewDeadline, 139 "leader-elect-renew-deadline", 140 10*time.Second, 141 "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)", 142 ) 143 144 fs.DurationVar( 145 &leaderElectionRetryPeriod, 146 "leader-elect-retry-period", 147 2*time.Second, 148 "Duration the LeaderElector clients should wait between tries of actions (duration string)", 149 ) 150 151 fs.StringVar( 152 &watchNamespace, 153 "namespace", 154 "", 155 "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.", 156 ) 157 158 fs.StringVar( 159 &watchFilterValue, 160 "watch-filter", 161 "", 162 fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel), 163 ) 164 165 fs.StringVar( 166 &profilerAddress, 167 "profiler-address", 168 "", 169 "Bind address to expose the pprof profiler (e.g. localhost:6060)", 170 ) 171 172 fs.IntVar(&azureClusterConcurrency, 173 "azurecluster-concurrency", 174 10, 175 "Number of AzureClusters to process simultaneously", 176 ) 177 178 fs.IntVar(&azureMachineConcurrency, 179 "azuremachine-concurrency", 180 10, 181 "Number of AzureMachines to process simultaneously", 182 ) 183 184 fs.IntVar(&azureMachinePoolConcurrency, 185 "azuremachinepool-concurrency", 186 10, 187 "Number of AzureMachinePools to process simultaneously") 188 189 fs.IntVar(&azureMachinePoolMachineConcurrency, 190 "azuremachinepoolmachine-concurrency", 191 10, 192 "Number of AzureMachinePoolMachines to process simultaneously") 193 194 fs.DurationVar(&debouncingTimer, 195 "debouncing-timer", 196 10*time.Second, 197 "The minimum interval the controller should wait after a successful reconciliation of a particular object before reconciling it again", 198 ) 199 200 fs.DurationVar(&syncPeriod, 201 "sync-period", 202 10*time.Minute, 203 "The minimum interval at which watched resources are reconciled (e.g. 15m)", 204 ) 205 206 fs.StringVar(&healthAddr, 207 "health-addr", 208 ":9440", 209 "The address the health endpoint binds to.", 210 ) 211 212 fs.IntVar(&webhookPort, 213 "webhook-port", 214 9443, 215 "The webhook server port the manager will listen on.", 216 ) 217 218 fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", 219 "The webhook certificate directory, where the server should find the TLS certificate and key.") 220 221 fs.DurationVar(&timeouts.Loop, 222 "reconcile-timeout", 223 reconciler.DefaultLoopTimeout, 224 "The maximum duration a reconcile loop can run (e.g. 10m)", 225 ) 226 227 fs.DurationVar(&timeouts.AzureServiceReconcile, 228 "service-reconcile-timeout", 229 reconciler.DefaultAzureServiceReconcileTimeout, 230 "The maximum duration each Azure service reconcile can run (e.g. 90m)", 231 ) 232 233 fs.DurationVar(&timeouts.AzureCall, 234 "api-call-timeout", 235 reconciler.DefaultAzureCallTimeout, 236 "The maximum duration CAPZ will wait for each Azure API request before it is considered long running and performed async (e.g. 10s)", 237 ) 238 239 fs.DurationVar(&timeouts.Requeue, 240 "reconciler-requeue", 241 reconciler.DefaultReconcilerRequeue, 242 "The duration to wait before retrying after a transient reconcile error occurs (e.g. 15s)", 243 ) 244 245 fs.BoolVar( 246 &enableTracing, 247 "enable-tracing", 248 false, 249 "Enable tracing to the opentelemetry-collector service in the same namespace.", 250 ) 251 252 AddDiagnosticsOptions(fs, &diagnosticsOptions) 253 254 feature.MutableGates.AddFlag(fs) 255 } 256 257 // Add RBAC for the authorized diagnostics endpoint. 258 // +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create 259 // +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create 260 261 func main() { 262 InitFlags(pflag.CommandLine) 263 klog.InitFlags(flag.CommandLine) 264 pflag.CommandLine.AddGoFlagSet(flag.CommandLine) 265 pflag.Parse() 266 267 // klog.Background will automatically use the right logger. 268 ctrl.SetLogger(klog.Background()) 269 270 // Machine and cluster operations can create enough events to trigger the event recorder spam filter 271 // Setting the burst size higher ensures all events will be recorded and submitted to the API 272 broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{ 273 BurstSize: 100, 274 }) 275 276 diagnosticsOpts := GetDiagnosticsOptions(diagnosticsOptions) 277 278 var watchNamespaces map[string]cache.Config 279 if watchNamespace != "" { 280 setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace) 281 watchNamespaces = map[string]cache.Config{ 282 watchNamespace: {}, 283 } 284 } 285 286 restConfig := ctrl.GetConfigOrDie() 287 restConfig.UserAgent = "cluster-api-provider-azure-manager" 288 mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ 289 Scheme: scheme, 290 LeaderElection: enableLeaderElection, 291 LeaderElectionID: "controller-leader-election-capz", 292 LeaderElectionNamespace: leaderElectionNamespace, 293 LeaseDuration: &leaderElectionLeaseDuration, 294 RenewDeadline: &leaderElectionRenewDeadline, 295 RetryPeriod: &leaderElectionRetryPeriod, 296 LeaderElectionResourceLock: resourcelock.LeasesResourceLock, 297 HealthProbeBindAddress: healthAddr, 298 PprofBindAddress: profilerAddress, 299 Metrics: diagnosticsOpts, 300 Cache: cache.Options{ 301 DefaultNamespaces: watchNamespaces, 302 SyncPeriod: &syncPeriod, 303 }, 304 Client: client.Options{ 305 Cache: &client.CacheOptions{ 306 DisableFor: []client.Object{ 307 &corev1.ConfigMap{}, 308 &corev1.Secret{}, 309 }, 310 }, 311 }, 312 WebhookServer: webhook.NewServer(webhook.Options{ 313 Port: webhookPort, 314 CertDir: webhookCertDir, 315 }), 316 EventBroadcaster: broadcaster, 317 }) 318 319 if err != nil { 320 setupLog.Error(err, "unable to start manager") 321 os.Exit(1) 322 } 323 324 // Initialize event recorder. 325 record.InitFromRecorder(mgr.GetEventRecorderFor("azure-controller")) 326 327 // Setup the context that's going to be used in controllers and for the manager. 328 ctx := ctrl.SetupSignalHandler() 329 330 if enableTracing { 331 if err := ot.RegisterTracing(ctx, setupLog); err != nil { 332 setupLog.Error(err, "unable to initialize tracing") 333 os.Exit(1) 334 } 335 } 336 337 if err := ot.RegisterMetrics(); err != nil { 338 setupLog.Error(err, "unable to initialize metrics") 339 os.Exit(1) 340 } 341 342 registerControllers(ctx, mgr) 343 344 registerWebhooks(mgr) 345 346 // +kubebuilder:scaffold:builder 347 setupLog.Info("starting manager", "version", version.Get().String()) 348 if err := mgr.Start(ctx); err != nil { 349 setupLog.Error(err, "problem running manager") 350 os.Exit(1) 351 } 352 } 353 354 func registerControllers(ctx context.Context, mgr manager.Manager) { 355 machineCache, err := coalescing.NewRequestCache(debouncingTimer) 356 if err != nil { 357 setupLog.Error(err, "failed to build machineCache ReconcileCache") 358 } 359 if err := controllers.NewAzureMachineReconciler(mgr.GetClient(), 360 mgr.GetEventRecorderFor("azuremachine-reconciler"), 361 timeouts, 362 watchFilterValue, 363 ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}, Cache: machineCache}); err != nil { 364 setupLog.Error(err, "unable to create controller", "controller", "AzureMachine") 365 os.Exit(1) 366 } 367 368 clusterCache, err := coalescing.NewRequestCache(debouncingTimer) 369 if err != nil { 370 setupLog.Error(err, "failed to build clusterCache ReconcileCache") 371 } 372 if err := controllers.NewAzureClusterReconciler( 373 mgr.GetClient(), 374 mgr.GetEventRecorderFor("azurecluster-reconciler"), 375 timeouts, 376 watchFilterValue, 377 ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: clusterCache}); err != nil { 378 setupLog.Error(err, "unable to create controller", "controller", "AzureCluster") 379 os.Exit(1) 380 } 381 382 if err := (&controllers.AzureJSONTemplateReconciler{ 383 Client: mgr.GetClient(), 384 Recorder: mgr.GetEventRecorderFor("azurejsontemplate-reconciler"), 385 Timeouts: timeouts, 386 WatchFilterValue: watchFilterValue, 387 }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil { 388 setupLog.Error(err, "unable to create controller", "controller", "AzureJSONTemplate") 389 os.Exit(1) 390 } 391 392 if err := (&controllers.AzureJSONMachineReconciler{ 393 Client: mgr.GetClient(), 394 Recorder: mgr.GetEventRecorderFor("azurejsonmachine-reconciler"), 395 Timeouts: timeouts, 396 WatchFilterValue: watchFilterValue, 397 }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachineConcurrency}); err != nil { 398 setupLog.Error(err, "unable to create controller", "controller", "AzureJSONMachine") 399 os.Exit(1) 400 } 401 402 if err := (&controllers.ASOSecretReconciler{ 403 Client: mgr.GetClient(), 404 Recorder: mgr.GetEventRecorderFor("asosecret-reconciler"), 405 Timeouts: timeouts, 406 WatchFilterValue: watchFilterValue, 407 }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}); err != nil { 408 setupLog.Error(err, "unable to create controller", "controller", "ASOSecret") 409 os.Exit(1) 410 } 411 412 // just use CAPI MachinePool feature flag rather than create a new one 413 setupLog.V(1).Info(fmt.Sprintf("%+v\n", feature.Gates)) 414 if feature.Gates.Enabled(capifeature.MachinePool) { 415 mpCache, err := coalescing.NewRequestCache(debouncingTimer) 416 if err != nil { 417 setupLog.Error(err, "failed to build mpCache ReconcileCache") 418 } 419 420 if err := infrav1controllersexp.NewAzureMachinePoolReconciler( 421 mgr.GetClient(), 422 mgr.GetEventRecorderFor("azuremachinepool-reconciler"), 423 timeouts, 424 watchFilterValue, 425 ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mpCache}); err != nil { 426 setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePool") 427 os.Exit(1) 428 } 429 430 mpmCache, err := coalescing.NewRequestCache(debouncingTimer) 431 if err != nil { 432 setupLog.Error(err, "failed to build mpmCache ReconcileCache") 433 } 434 435 if err := infrav1controllersexp.NewAzureMachinePoolMachineController( 436 mgr.GetClient(), 437 mgr.GetEventRecorderFor("azuremachinepoolmachine-reconciler"), 438 timeouts, 439 watchFilterValue, 440 ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolMachineConcurrency}, Cache: mpmCache}); err != nil { 441 setupLog.Error(err, "unable to create controller", "controller", "AzureMachinePoolMachine") 442 os.Exit(1) 443 } 444 445 if err := (&controllers.AzureJSONMachinePoolReconciler{ 446 Client: mgr.GetClient(), 447 Recorder: mgr.GetEventRecorderFor("azurejsonmachinepool-reconciler"), 448 Timeouts: timeouts, 449 WatchFilterValue: watchFilterValue, 450 }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}); err != nil { 451 setupLog.Error(err, "unable to create controller", "controller", "AzureJSONMachinePool") 452 os.Exit(1) 453 } 454 455 mmpmCache, err := coalescing.NewRequestCache(debouncingTimer) 456 if err != nil { 457 setupLog.Error(err, "failed to build mmpmCache ReconcileCache") 458 } 459 460 if err := controllers.NewAzureManagedMachinePoolReconciler( 461 mgr.GetClient(), 462 mgr.GetEventRecorderFor("azuremanagedmachinepoolmachine-reconciler"), 463 timeouts, 464 watchFilterValue, 465 ).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureMachinePoolConcurrency}, Cache: mmpmCache}); err != nil { 466 setupLog.Error(err, "unable to create controller", "controller", "AzureManagedMachinePool") 467 os.Exit(1) 468 } 469 470 mcCache, err := coalescing.NewRequestCache(debouncingTimer) 471 if err != nil { 472 setupLog.Error(err, "failed to build mcCache ReconcileCache") 473 } 474 475 if err := (&controllers.AzureManagedClusterReconciler{ 476 Client: mgr.GetClient(), 477 Recorder: mgr.GetEventRecorderFor("azuremanagedcluster-reconciler"), 478 Timeouts: timeouts, 479 WatchFilterValue: watchFilterValue, 480 }).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: mcCache}); err != nil { 481 setupLog.Error(err, "unable to create controller", "controller", "AzureManagedCluster") 482 os.Exit(1) 483 } 484 485 mcpCache, err := coalescing.NewRequestCache(debouncingTimer) 486 if err != nil { 487 setupLog.Error(err, "failed to build mcpCache ReconcileCache") 488 } 489 490 if err := (&controllers.AzureManagedControlPlaneReconciler{ 491 Client: mgr.GetClient(), 492 Recorder: mgr.GetEventRecorderFor("azuremanagedcontrolplane-reconciler"), 493 Timeouts: timeouts, 494 WatchFilterValue: watchFilterValue, 495 }).SetupWithManager(ctx, mgr, controllers.Options{Options: controller.Options{MaxConcurrentReconciles: azureClusterConcurrency}, Cache: mcpCache}); err != nil { 496 setupLog.Error(err, "unable to create controller", "controller", "AzureManagedControlPlane") 497 os.Exit(1) 498 } 499 } 500 } 501 502 func registerWebhooks(mgr manager.Manager) { 503 if err := (&infrav1.AzureCluster{}).SetupWebhookWithManager(mgr); err != nil { 504 setupLog.Error(err, "unable to create webhook", "webhook", "AzureCluster") 505 os.Exit(1) 506 } 507 508 if err := (&infrav1.AzureClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil { 509 setupLog.Error(err, "unable to create webhook", "webhook", "AzureClusterTemplate") 510 os.Exit(1) 511 } 512 513 if err := (&infrav1.AzureMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil { 514 setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachineTemplate") 515 os.Exit(1) 516 } 517 518 if err := (&infrav1.AzureClusterIdentity{}).SetupWebhookWithManager(mgr); err != nil { 519 setupLog.Error(err, "unable to create webhook", "webhook", "AzureClusterIdentity") 520 os.Exit(1) 521 } 522 523 if err := (&infrav1exp.AzureMachinePoolMachine{}).SetupWebhookWithManager(mgr); err != nil { 524 setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePoolMachine") 525 os.Exit(1) 526 } 527 528 // NOTE: AzureManagedCluster is behind AKS feature gate flag; the webhook 529 // is going to prevent creating or updating new objects in case the feature flag is disabled 530 if err := (&infrav1.AzureManagedCluster{}).SetupWebhookWithManager(mgr); err != nil { 531 setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedCluster") 532 os.Exit(1) 533 } 534 535 if err := (&infrav1.AzureManagedClusterTemplate{}).SetupWebhookWithManager(mgr); err != nil { 536 setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedClusterTemplate") 537 os.Exit(1) 538 } 539 540 if err := infrav1exp.SetupAzureMachinePoolWebhookWithManager(mgr); err != nil { 541 setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachinePool") 542 os.Exit(1) 543 } 544 545 if err := infrav1.SetupAzureMachineWebhookWithManager(mgr); err != nil { 546 setupLog.Error(err, "unable to create webhook", "webhook", "AzureMachine") 547 os.Exit(1) 548 } 549 550 if err := infrav1.SetupAzureManagedMachinePoolWebhookWithManager(mgr); err != nil { 551 setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedMachinePool") 552 os.Exit(1) 553 } 554 555 if err := infrav1.SetupAzureManagedMachinePoolTemplateWebhookWithManager(mgr); err != nil { 556 setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedMachinePoolTemplate") 557 os.Exit(1) 558 } 559 560 if err := infrav1.SetupAzureManagedControlPlaneWebhookWithManager(mgr); err != nil { 561 setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlane") 562 os.Exit(1) 563 } 564 565 if err := infrav1.SetupAzureManagedControlPlaneTemplateWebhookWithManager(mgr); err != nil { 566 setupLog.Error(err, "unable to create webhook", "webhook", "AzureManagedControlPlaneTemplate") 567 os.Exit(1) 568 } 569 570 if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { 571 setupLog.Error(err, "unable to create ready check") 572 os.Exit(1) 573 } 574 575 if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { 576 setupLog.Error(err, "unable to create health check") 577 os.Exit(1) 578 } 579 } 580 581 // DiagnosticsOptions is CAPI 1.6's (util/flags).DiagnosticsOptions. 582 type DiagnosticsOptions struct { 583 // MetricsBindAddr 584 // 585 // Deprecated: This field will be removed in an upcoming release. 586 MetricsBindAddr string 587 DiagnosticsAddress string 588 InsecureDiagnostics bool 589 } 590 591 // AddDiagnosticsOptions is CAPI 1.6's (util/flags).AddDiagnosticsOptions. 592 func AddDiagnosticsOptions(fs *pflag.FlagSet, options *DiagnosticsOptions) { 593 fs.StringVar(&options.MetricsBindAddr, "metrics-bind-addr", "", 594 "The address the metrics endpoint binds to.") 595 _ = fs.MarkDeprecated("metrics-bind-addr", "Please use --diagnostics-address instead. To continue to serve"+ 596 "metrics via http and without authentication/authorization set --insecure-diagnostics as well.") 597 598 fs.StringVar(&options.DiagnosticsAddress, "diagnostics-address", ":8443", 599 "The address the diagnostics endpoint binds to. Per default metrics are served via https and with"+ 600 "authentication/authorization. To serve via http and without authentication/authorization set --insecure-diagnostics."+ 601 "If --insecure-diagnostics is not set the diagnostics endpoint also serves pprof endpoints and an endpoint to change the log level.") 602 603 fs.BoolVar(&options.InsecureDiagnostics, "insecure-diagnostics", false, 604 "Enable insecure diagnostics serving. For more details see the description of --diagnostics-address.") 605 } 606 607 // GetDiagnosticsOptions is CAPI 1.6's (util/flags).GetDiagnosticsOptions. 608 func GetDiagnosticsOptions(options DiagnosticsOptions) metricsserver.Options { 609 // If the deprecated "--metrics-bind-addr" flag is set, continue to serve metrics via http 610 // and without authentication/authorization. 611 if options.MetricsBindAddr != "" { 612 return metricsserver.Options{ 613 BindAddress: options.MetricsBindAddr, 614 } 615 } 616 617 // If "--insecure-diagnostics" is set, serve metrics via http 618 // and without authentication/authorization. 619 if options.InsecureDiagnostics { 620 return metricsserver.Options{ 621 BindAddress: options.DiagnosticsAddress, 622 SecureServing: false, 623 } 624 } 625 626 // If "--insecure-diagnostics" is not set, serve metrics via https 627 // and with authentication/authorization. As the endpoint is protected, 628 // we also serve pprof endpoints and an endpoint to change the log level. 629 return metricsserver.Options{ 630 BindAddress: options.DiagnosticsAddress, 631 SecureServing: true, 632 FilterProvider: filters.WithAuthenticationAndAuthorization, 633 ExtraHandlers: map[string]http.Handler{ 634 // Add handler to dynamically change log level. 635 "/debug/flags/v": routes.StringFlagPutHandler(logs.GlogSetter), 636 // Add pprof handler. 637 "/debug/pprof/": http.HandlerFunc(pprof.Index), 638 "/debug/pprof/cmdline": http.HandlerFunc(pprof.Cmdline), 639 "/debug/pprof/profile": http.HandlerFunc(pprof.Profile), 640 "/debug/pprof/symbol": http.HandlerFunc(pprof.Symbol), 641 "/debug/pprof/trace": http.HandlerFunc(pprof.Trace), 642 }, 643 } 644 }