open-cluster-management.io/governance-policy-propagator@v0.13.0/main.go (about)

     1  // Copyright (c) 2021 Red Hat, Inc.
     2  // Copyright Contributors to the Open Cluster Management project
     3  
     4  package main
     5  
     6  import (
     7  	"context"
     8  	"crypto/tls"
     9  	"errors"
    10  	"flag"
    11  	"fmt"
    12  	"net"
    13  	"os"
    14  	"runtime"
    15  	"strconv"
    16  	"strings"
    17  	"sync"
    18  
    19  	"github.com/go-logr/zapr"
    20  	"github.com/spf13/pflag"
    21  	"github.com/stolostron/go-log-utils/zaputil"
    22  	templates "github.com/stolostron/go-template-utils/v4/pkg/templates"
    23  	k8sdepwatches "github.com/stolostron/kubernetes-dependency-watches/client"
    24  	corev1 "k8s.io/api/core/v1"
    25  	k8serrors "k8s.io/apimachinery/pkg/api/errors"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    28  	"k8s.io/apimachinery/pkg/fields"
    29  	k8sruntime "k8s.io/apimachinery/pkg/runtime"
    30  	"k8s.io/apimachinery/pkg/runtime/schema"
    31  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    32  	"k8s.io/client-go/dynamic"
    33  	"k8s.io/client-go/kubernetes"
    34  	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
    35  	_ "k8s.io/client-go/plugin/pkg/client/auth"
    36  	"k8s.io/client-go/rest"
    37  	"k8s.io/klog/v2"
    38  	clusterv1 "open-cluster-management.io/api/cluster/v1"
    39  	clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
    40  	appsv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1"
    41  	ctrl "sigs.k8s.io/controller-runtime"
    42  	"sigs.k8s.io/controller-runtime/pkg/cache"
    43  	"sigs.k8s.io/controller-runtime/pkg/client"
    44  	"sigs.k8s.io/controller-runtime/pkg/client/config"
    45  	"sigs.k8s.io/controller-runtime/pkg/event"
    46  	"sigs.k8s.io/controller-runtime/pkg/healthz"
    47  	"sigs.k8s.io/controller-runtime/pkg/source"
    48  
    49  	//+kubebuilder:scaffold:imports
    50  	policyv1 "open-cluster-management.io/governance-policy-propagator/api/v1"
    51  	policyv1beta1 "open-cluster-management.io/governance-policy-propagator/api/v1beta1"
    52  	automationctrl "open-cluster-management.io/governance-policy-propagator/controllers/automation"
    53  	"open-cluster-management.io/governance-policy-propagator/controllers/complianceeventsapi"
    54  	encryptionkeysctrl "open-cluster-management.io/governance-policy-propagator/controllers/encryptionkeys"
    55  	metricsctrl "open-cluster-management.io/governance-policy-propagator/controllers/policymetrics"
    56  	policysetctrl "open-cluster-management.io/governance-policy-propagator/controllers/policyset"
    57  	propagatorctrl "open-cluster-management.io/governance-policy-propagator/controllers/propagator"
    58  	rootpolicystatusctrl "open-cluster-management.io/governance-policy-propagator/controllers/rootpolicystatus"
    59  	"open-cluster-management.io/governance-policy-propagator/version"
    60  )
    61  
    62  var (
    63  	scheme          = k8sruntime.NewScheme()
    64  	log             = ctrl.Log.WithName("setup")
    65  	clusterClaimGVR = schema.GroupVersionResource{
    66  		Group:    "cluster.open-cluster-management.io",
    67  		Version:  "v1alpha1",
    68  		Resource: "clusterclaims",
    69  	}
    70  )
    71  
    72  func printVersion() {
    73  	log.Info(
    74  		"Using",
    75  		"OperatorVersion", version.Version,
    76  		"GoVersion", runtime.Version(),
    77  		"GOOS", runtime.GOOS,
    78  		"GOARCH", runtime.GOARCH,
    79  	)
    80  }
    81  
    82  func init() {
    83  	utilruntime.Must(clientgoscheme.AddToScheme(scheme))
    84  
    85  	utilruntime.Must(clusterv1.AddToScheme(scheme))
    86  	utilruntime.Must(clusterv1beta1.AddToScheme(scheme))
    87  	utilruntime.Must(appsv1.AddToScheme(scheme))
    88  
    89  	//+kubebuilder:scaffold:scheme
    90  	utilruntime.Must(policyv1.AddToScheme(scheme))
    91  	utilruntime.Must(policyv1beta1.AddToScheme(scheme))
    92  }
    93  
    94  func main() {
    95  	klog.InitFlags(nil)
    96  
    97  	zflags := zaputil.FlagConfig{
    98  		LevelName:   "log-level",
    99  		EncoderName: "log-encoder",
   100  	}
   101  
   102  	zflags.Bind(flag.CommandLine)
   103  
   104  	pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
   105  
   106  	var (
   107  		metricsAddr                 string
   108  		enableLeaderElection        bool
   109  		probeAddr                   string
   110  		keyRotationDays             uint
   111  		keyRotationMaxConcurrency   uint
   112  		policyMetricsMaxConcurrency uint
   113  		policyStatusMaxConcurrency  uint
   114  		rootPolicyMaxConcurrency    uint
   115  		replPolicyMaxConcurrency    uint
   116  		enableWebhooks              bool
   117  		complianceAPIHost           string
   118  		complianceAPIPort           string
   119  		complianceAPICert           string
   120  		complianceAPIKey            string
   121  	)
   122  
   123  	pflag.StringVar(&metricsAddr, "metrics-bind-address", ":8383", "The address the metric endpoint binds to.")
   124  	pflag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
   125  	pflag.BoolVar(&enableLeaderElection, "leader-elect", true,
   126  		"Enable leader election for controller manager. "+
   127  			"Enabling this will ensure there is only one active controller manager.")
   128  	pflag.BoolVar(&enableWebhooks, "enable-webhooks", true,
   129  		"Enable the policy validating webhook")
   130  	pflag.UintVar(
   131  		&keyRotationDays,
   132  		"encryption-key-rotation",
   133  		30,
   134  		"The number of days until the policy encryption key is rotated",
   135  	)
   136  	pflag.UintVar(
   137  		&keyRotationMaxConcurrency,
   138  		"key-rotation-max-concurrency",
   139  		10,
   140  		"The maximum number of concurrent reconciles for the policy-encryption-keys controller",
   141  	)
   142  	pflag.UintVar(
   143  		&policyMetricsMaxConcurrency,
   144  		"policy-metrics-max-concurrency",
   145  		5,
   146  		"The maximum number of concurrent reconciles for the policy-metrics controller",
   147  	)
   148  	pflag.UintVar(
   149  		&policyStatusMaxConcurrency,
   150  		"policy-status-max-concurrency",
   151  		5,
   152  		"The maximum number of concurrent reconciles for the policy-status controller",
   153  	)
   154  	pflag.UintVar(
   155  		&rootPolicyMaxConcurrency,
   156  		"root-policy-max-concurrency",
   157  		2,
   158  		"The maximum number of concurrent reconciles for the root-policy controller",
   159  	)
   160  	pflag.UintVar(
   161  		&replPolicyMaxConcurrency,
   162  		"replicated-policy-max-concurrency",
   163  		10,
   164  		"The maximum number of concurrent reconciles for the replicated-policy controller",
   165  	)
   166  	pflag.StringVar(
   167  		&complianceAPIHost, "compliance-history-api-host", "localhost",
   168  		"The hostname that the event history API will listen on",
   169  	)
   170  	pflag.StringVar(
   171  		&complianceAPIPort, "compliance-history-api-port", "8384",
   172  		"The port that the compliance history API will listen on",
   173  	)
   174  	pflag.StringVar(
   175  		&complianceAPICert, "compliance-history-api-cert", "",
   176  		"The path to the certificate the compliance history API will use for HTTPS (CA cert, if any, concatenated "+
   177  			"after server cert). If not set, HTTP will be used.",
   178  	)
   179  	pflag.StringVar(
   180  		&complianceAPIKey, "compliance-history-api-key", "",
   181  		"The path to the private key the compliance history API will use for HTTPS. If not set, HTTP will be used.",
   182  	)
   183  
   184  	pflag.Parse()
   185  
   186  	ctrlZap, err := zflags.BuildForCtrl()
   187  	if err != nil {
   188  		panic(fmt.Sprintf("Failed to build zap logger for controller: %v", err))
   189  	}
   190  
   191  	ctrl.SetLogger(zapr.NewLogger(ctrlZap))
   192  
   193  	klogFlags := flag.NewFlagSet("klog", flag.ExitOnError)
   194  	klog.InitFlags(klogFlags)
   195  
   196  	err = zaputil.SyncWithGlogFlags(klogFlags)
   197  	if err != nil {
   198  		log.Error(err, "Failed to synchronize klog and glog flags, continuing with what succeeded")
   199  	}
   200  
   201  	klogZap, err := zaputil.BuildForKlog(zflags.GetConfig(), klogFlags)
   202  	if err != nil {
   203  		log.Error(err, "Failed to build zap logger for klog, those logs will not go through zap")
   204  	} else {
   205  		klog.SetLogger(zapr.NewLogger(klogZap).WithName("klog"))
   206  	}
   207  
   208  	printVersion()
   209  
   210  	if keyRotationDays < 1 {
   211  		log.Info("the encryption-key-rotation flag must be greater than 0")
   212  		os.Exit(1)
   213  	}
   214  
   215  	if keyRotationMaxConcurrency < 1 {
   216  		log.Info("the key-rotation-max-concurrency flag must be greater than 0")
   217  		os.Exit(1)
   218  	}
   219  
   220  	namespace, err := getWatchNamespace()
   221  	if err != nil {
   222  		log.Error(err, "Failed to get watch namespace")
   223  		os.Exit(1)
   224  	}
   225  
   226  	// Get a config to talk to the apiserver
   227  	cfg := config.GetConfigOrDie()
   228  
   229  	// Some default tuned values here, but can be overridden via env vars
   230  	cfg.QPS = 200.0
   231  	cfg.Burst = 400
   232  
   233  	qpsOverride, found := os.LookupEnv("CONTROLLER_CONFIG_QPS")
   234  	if found {
   235  		qpsVal, err := strconv.ParseFloat(qpsOverride, 32)
   236  		if err == nil {
   237  			cfg.QPS = float32(qpsVal)
   238  			log.Info("Using QPS override", "value", cfg.QPS)
   239  		}
   240  	}
   241  
   242  	burstOverride, found := os.LookupEnv("CONTROLLER_CONFIG_BURST")
   243  	if found {
   244  		burstVal, err := strconv.Atoi(burstOverride)
   245  		if err == nil {
   246  			cfg.Burst = burstVal
   247  			log.Info("Using Burst override", "value", cfg.Burst)
   248  		}
   249  	}
   250  
   251  	// Set default manager options
   252  	options := ctrl.Options{
   253  		Namespace:                  namespace,
   254  		Scheme:                     scheme,
   255  		MetricsBindAddress:         metricsAddr,
   256  		HealthProbeBindAddress:     probeAddr,
   257  		LeaderElection:             enableLeaderElection,
   258  		LeaderElectionID:           "policy-propagator.open-cluster-management.io",
   259  		LeaderElectionResourceLock: "leases",
   260  		Cache: cache.Options{
   261  			ByObject: map[client.Object]cache.ByObject{
   262  				// Set a field selector so that a watch on secrets will be limited to just the secret with
   263  				// the policy template encryption key.
   264  				&corev1.Secret{}: {
   265  					Field: fields.SelectorFromSet(fields.Set{"metadata.name": propagatorctrl.EncryptionKeySecret}),
   266  				},
   267  				&clusterv1.ManagedCluster{}: {
   268  					Transform: func(obj interface{}) (interface{}, error) {
   269  						cluster := obj.(*clusterv1.ManagedCluster)
   270  						// All that ManagedCluster objects are used for is to check their existence to see if a
   271  						// namespace is a cluster namespace.
   272  						guttedCluster := &clusterv1.ManagedCluster{}
   273  						guttedCluster.SetName(cluster.Name)
   274  
   275  						return guttedCluster, nil
   276  					},
   277  				},
   278  				&policyv1.Policy{}: {
   279  					Transform: func(obj interface{}) (interface{}, error) {
   280  						policy := obj.(*policyv1.Policy)
   281  						// Remove unused large fields
   282  						delete(policy.Annotations, "kubectl.kubernetes.io/last-applied-configuration")
   283  						policy.ManagedFields = nil
   284  
   285  						return policy, nil
   286  					},
   287  				},
   288  			},
   289  		},
   290  	}
   291  
   292  	if strings.Contains(namespace, ",") {
   293  		options.Cache.Namespaces = strings.Split(namespace, ",")
   294  	}
   295  
   296  	mgr, err := ctrl.NewManager(cfg, options)
   297  	if err != nil {
   298  		log.Error(err, "Unable to start manager")
   299  		os.Exit(1)
   300  	}
   301  
   302  	log.Info("Registering components")
   303  
   304  	controllerCtx := ctrl.SetupSignalHandler()
   305  
   306  	// This is used to trigger reconciles when a related policy set changes due to a dependency on a policy set.
   307  	dynamicWatcherReconciler, dynamicWatcherSource := k8sdepwatches.NewControllerRuntimeSource()
   308  
   309  	dynamicWatcher, err := k8sdepwatches.New(cfg, dynamicWatcherReconciler, nil)
   310  	if err != nil {
   311  		log.Error(err, "Unable to create the dynamic watcher", "controller", propagatorctrl.ControllerName)
   312  		os.Exit(1)
   313  	}
   314  
   315  	go func() {
   316  		err := dynamicWatcher.Start(controllerCtx)
   317  		if err != nil {
   318  			log.Error(err, "Unable to start the dynamic watcher", "controller", propagatorctrl.ControllerName)
   319  			os.Exit(1)
   320  		}
   321  	}()
   322  
   323  	policiesLock := &sync.Map{}
   324  	replicatedResourceVersions := &sync.Map{}
   325  
   326  	bufferSize := 1024
   327  
   328  	replicatedPolicyUpdates := make(chan event.GenericEvent, bufferSize)
   329  	replicatedUpdatesSource := &source.Channel{
   330  		Source:         replicatedPolicyUpdates,
   331  		DestBufferSize: bufferSize,
   332  	}
   333  
   334  	propagator := propagatorctrl.Propagator{
   335  		Client:                  mgr.GetClient(),
   336  		Scheme:                  mgr.GetScheme(),
   337  		Recorder:                mgr.GetEventRecorderFor(propagatorctrl.ControllerName),
   338  		RootPolicyLocks:         policiesLock,
   339  		ReplicatedPolicyUpdates: replicatedPolicyUpdates,
   340  	}
   341  
   342  	if err = (&propagatorctrl.RootPolicyReconciler{
   343  		Propagator: propagator,
   344  	}).SetupWithManager(mgr, rootPolicyMaxConcurrency); err != nil {
   345  		log.Error(err, "Unable to create the controller", "controller", "root-policy-spec")
   346  		os.Exit(1)
   347  	}
   348  
   349  	templateResolver, templatesSource, err := templates.NewResolverWithCaching(
   350  		controllerCtx,
   351  		cfg,
   352  		templates.Config{
   353  			AdditionalIndentation: 8,
   354  			DisabledFunctions:     []string{},
   355  			StartDelim:            propagatorctrl.TemplateStartDelim,
   356  			StopDelim:             propagatorctrl.TemplateStopDelim,
   357  		},
   358  	)
   359  	if err != nil {
   360  		log.Error(err, "Unable to setup the template resolver the controller", "controller", "replicated-policy")
   361  		os.Exit(1)
   362  	}
   363  
   364  	if reportMetrics() {
   365  		if err = (&metricsctrl.MetricReconciler{
   366  			Client: mgr.GetClient(),
   367  			Scheme: mgr.GetScheme(),
   368  		}).SetupWithManager(mgr, policyMetricsMaxConcurrency); err != nil {
   369  			log.Error(err, "Unable to create the controller", "controller", metricsctrl.ControllerName)
   370  			os.Exit(1)
   371  		}
   372  	}
   373  
   374  	dynamicClient := dynamic.NewForConfigOrDie(mgr.GetConfig())
   375  
   376  	var clusterID string
   377  
   378  	idClusterClaim, err := dynamicClient.Resource(clusterClaimGVR).Get(controllerCtx, "id.k8s.io", metav1.GetOptions{})
   379  	if err != nil && !k8serrors.IsNotFound(err) {
   380  		log.Error(err, "Failed to find the cluster ID")
   381  
   382  		os.Exit(1)
   383  	}
   384  
   385  	if err == nil {
   386  		clusterID, _, _ = unstructured.NestedString(idClusterClaim.Object, "spec", "value")
   387  	}
   388  
   389  	if clusterID == "" {
   390  		log.Info("The id.k8s.io cluster claim is not set. Using the cluster ID of unknown.")
   391  
   392  		clusterID = "unknown"
   393  	}
   394  
   395  	if err = (&automationctrl.PolicyAutomationReconciler{
   396  		Client:        mgr.GetClient(),
   397  		DynamicClient: dynamicClient,
   398  		Scheme:        mgr.GetScheme(),
   399  		Recorder:      mgr.GetEventRecorderFor(automationctrl.ControllerName),
   400  	}).SetupWithManager(mgr); err != nil {
   401  		log.Error(err, "Unable to create the controller", "controller", automationctrl.ControllerName)
   402  		os.Exit(1)
   403  	}
   404  
   405  	if err = (&policysetctrl.PolicySetReconciler{
   406  		Client:   mgr.GetClient(),
   407  		Scheme:   mgr.GetScheme(),
   408  		Recorder: mgr.GetEventRecorderFor(policysetctrl.ControllerName),
   409  	}).SetupWithManager(mgr); err != nil {
   410  		log.Error(err, "Unable to create controller", "controller", policysetctrl.ControllerName)
   411  		os.Exit(1)
   412  	}
   413  
   414  	if err = (&encryptionkeysctrl.EncryptionKeysReconciler{
   415  		Client:          mgr.GetClient(),
   416  		KeyRotationDays: keyRotationDays,
   417  		Scheme:          mgr.GetScheme(),
   418  	}).SetupWithManager(mgr, keyRotationMaxConcurrency); err != nil {
   419  		log.Error(err, "Unable to create controller", "controller", encryptionkeysctrl.ControllerName)
   420  		os.Exit(1)
   421  	}
   422  
   423  	if err = (&rootpolicystatusctrl.RootPolicyStatusReconciler{
   424  		Client:          mgr.GetClient(),
   425  		RootPolicyLocks: policiesLock,
   426  		Scheme:          mgr.GetScheme(),
   427  	}).SetupWithManager(mgr, policyStatusMaxConcurrency); err != nil {
   428  		log.Error(err, "Unable to create controller", "controller", rootpolicystatusctrl.ControllerName)
   429  		os.Exit(1)
   430  	}
   431  
   432  	if enableWebhooks {
   433  		if err = (&policyv1.Policy{}).SetupWebhookWithManager(mgr); err != nil {
   434  			log.Error(err, "unable to create webhook", "webhook", "Policy")
   435  			os.Exit(1)
   436  		}
   437  	}
   438  
   439  	//+kubebuilder:scaffold:builder
   440  
   441  	if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
   442  		log.Error(err, "Unable to set up health check")
   443  		os.Exit(1)
   444  	}
   445  
   446  	if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
   447  		log.Error(err, "Unable to set up ready check")
   448  		os.Exit(1)
   449  	}
   450  
   451  	cache := mgr.GetCache()
   452  
   453  	// The following index for the PlacementRef Name is being added to the
   454  	// client cache to improve the performance of querying PlacementBindings
   455  	indexFunc := func(obj client.Object) []string {
   456  		return []string{obj.(*policyv1.PlacementBinding).PlacementRef.Name}
   457  	}
   458  
   459  	if err := cache.IndexField(
   460  		context.TODO(), &policyv1.PlacementBinding{}, "placementRef.name", indexFunc,
   461  	); err != nil {
   462  		panic(err)
   463  	}
   464  
   465  	log.Info("Waiting for the dynamic watcher to start")
   466  	// This is important to avoid adding watches before the dynamic watcher is ready
   467  	<-dynamicWatcher.Started()
   468  
   469  	log.V(1).Info("Starting the compliance events API and controller")
   470  
   471  	client := kubernetes.NewForConfigOrDie(mgr.GetConfig())
   472  
   473  	tempDir, err := os.MkdirTemp("", "compliance-events-store")
   474  	if err != nil {
   475  		log.Error(err, "Failed to create a temporary directory")
   476  		os.Exit(1)
   477  	}
   478  
   479  	defer func() {
   480  		err := os.RemoveAll(tempDir)
   481  		if err != nil {
   482  			log.Error(err, "Failed to clean up the temporary directory", "path", tempDir)
   483  		}
   484  	}()
   485  
   486  	complianceEventsNamespace, _ := os.LookupEnv(complianceeventsapi.WatchNamespaceEnvVar)
   487  	if complianceEventsNamespace == "" {
   488  		namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
   489  		if err == nil {
   490  			complianceEventsNamespace = string(namespace)
   491  		} else {
   492  			log.Info("Could not detect the controller namespace. Assuming open-cluster-management.")
   493  
   494  			complianceEventsNamespace = "open-cluster-management"
   495  		}
   496  	}
   497  
   498  	wg := sync.WaitGroup{}
   499  
   500  	log.Info("Starting the compliance events API")
   501  
   502  	complianceServerCtx := startComplianceEventsAPI(
   503  		controllerCtx,
   504  		cfg,
   505  		client,
   506  		clusterID,
   507  		complianceEventsNamespace,
   508  		net.JoinHostPort(complianceAPIHost, complianceAPIPort),
   509  		complianceAPICert,
   510  		complianceAPIKey,
   511  		&wg,
   512  		tempDir,
   513  		replicatedPolicyUpdates,
   514  	)
   515  
   516  	replicatedPolicyCtrler := &propagatorctrl.ReplicatedPolicyReconciler{
   517  		Propagator:          propagator,
   518  		ResourceVersions:    replicatedResourceVersions,
   519  		DynamicWatcher:      dynamicWatcher,
   520  		TemplateResolver:    templateResolver,
   521  		ComplianceServerCtx: complianceServerCtx,
   522  	}
   523  
   524  	if err = (replicatedPolicyCtrler).SetupWithManager(
   525  		mgr, replPolicyMaxConcurrency, dynamicWatcherSource, replicatedUpdatesSource, templatesSource,
   526  	); err != nil {
   527  		log.Error(err, "Unable to create the controller", "controller", "replicated-policy")
   528  		os.Exit(1)
   529  	}
   530  
   531  	log.Info("Starting manager")
   532  
   533  	wg.Add(1)
   534  
   535  	go func() {
   536  		if err := mgr.Start(controllerCtx); err != nil {
   537  			log.Error(err, "Problem running manager")
   538  			os.Exit(1)
   539  		}
   540  
   541  		wg.Done()
   542  	}()
   543  
   544  	wg.Wait()
   545  }
   546  
   547  func startComplianceEventsAPI(
   548  	ctx context.Context,
   549  	cfg *rest.Config,
   550  	client *kubernetes.Clientset,
   551  	clusterID string,
   552  	controllerNamespace string,
   553  	complianceAPIAddr string,
   554  	complianceAPICert string,
   555  	complianceAPIKey string,
   556  	wg *sync.WaitGroup,
   557  	tempDir string,
   558  	reconcileRequests chan<- event.GenericEvent,
   559  ) *complianceeventsapi.ComplianceServerCtx {
   560  	var dbConnectionURL string
   561  
   562  	dbSecret, err := client.CoreV1().Secrets(controllerNamespace).Get(
   563  		ctx, complianceeventsapi.DBSecretName, metav1.GetOptions{},
   564  	)
   565  	if k8serrors.IsNotFound(err) {
   566  		log.Info(
   567  			"Could not start the compliance events API. To enable this functionality, ensure the Postgres "+
   568  				"connection secret is valid in the controller namespace.",
   569  			"secretName", complianceeventsapi.DBSecretName,
   570  			"namespace", controllerNamespace,
   571  		)
   572  	} else if err != nil {
   573  		log.Error(
   574  			err,
   575  			"Failed to determine if the secret was defined",
   576  			"secretName", complianceeventsapi.DBSecretName,
   577  			"namespace", controllerNamespace,
   578  		)
   579  
   580  		os.Exit(1)
   581  	} else {
   582  		var err error
   583  
   584  		dbConnectionURL, err = complianceeventsapi.ParseDBSecret(dbSecret, tempDir)
   585  		if err != nil {
   586  			log.Error(
   587  				err,
   588  				"Fix the connection details to enable the compliance events API feature",
   589  				"secret", complianceeventsapi.DBSecretName,
   590  				"namespace", controllerNamespace,
   591  			)
   592  		}
   593  	}
   594  
   595  	complianceServerCtx, err := complianceeventsapi.NewComplianceServerCtx(dbConnectionURL, clusterID)
   596  	if err == nil {
   597  		// If the migration failed, MigrateDB will log it and MonitorDatabaseConnection will fix it.
   598  		err := complianceServerCtx.MigrateDB(ctx, client, controllerNamespace)
   599  		if err != nil {
   600  			log.Info("Will periodically retry the migration until it is successful")
   601  		}
   602  	} else if !errors.Is(err, complianceeventsapi.ErrInvalidConnectionURL) {
   603  		log.Error(err, "Unexpected error")
   604  
   605  		os.Exit(1)
   606  	}
   607  
   608  	reconciler := complianceeventsapi.ComplianceDBSecretReconciler{
   609  		Client: client, ComplianceServerCtx: complianceServerCtx, TempDir: tempDir, ConnectionURL: dbConnectionURL,
   610  	}
   611  
   612  	dbSecretDynamicWatcher, err := k8sdepwatches.New(
   613  		cfg, &reconciler, &k8sdepwatches.Options{EnableCache: true},
   614  	)
   615  	if err != nil {
   616  		log.Error(err, "Failed to instantiate the dynamic watcher for the compliance events database secret reconciler")
   617  		os.Exit(1)
   618  	}
   619  
   620  	reconciler.DynamicWatcher = dbSecretDynamicWatcher
   621  
   622  	var cert *tls.Certificate
   623  
   624  	if complianceAPICert != "" && complianceAPIKey != "" {
   625  		certTemp, err := tls.LoadX509KeyPair(complianceAPICert, complianceAPIKey)
   626  		if err != nil {
   627  			log.Error(
   628  				err,
   629  				"Failed to parse the provided TLS certificate and key",
   630  				"cert", complianceAPICert,
   631  				"key", complianceAPIKey,
   632  			)
   633  			os.Exit(1)
   634  		}
   635  
   636  		cert = &certTemp
   637  	} else {
   638  		log.Info("The compliance events history API will listen on HTTP since no certificate was provided")
   639  	}
   640  
   641  	complianceAPI := complianceeventsapi.NewComplianceAPIServer(complianceAPIAddr, cfg, cert)
   642  
   643  	wg.Add(1)
   644  
   645  	go func() {
   646  		if err := complianceAPI.Start(ctx, complianceServerCtx); err != nil {
   647  			log.Error(err, "Failed to start the compliance API server")
   648  
   649  			os.Exit(1)
   650  		}
   651  
   652  		wg.Done()
   653  	}()
   654  
   655  	wg.Add(1)
   656  
   657  	go func() {
   658  		err := dbSecretDynamicWatcher.Start(ctx)
   659  		if err != nil {
   660  			log.Error(
   661  				err,
   662  				"Unable to start the compliance events database secret watcher",
   663  				"controller", complianceeventsapi.ControllerName,
   664  			)
   665  			os.Exit(1)
   666  		}
   667  
   668  		wg.Done()
   669  	}()
   670  
   671  	<-dbSecretDynamicWatcher.Started()
   672  
   673  	go complianceeventsapi.MonitorDatabaseConnection(
   674  		ctx, complianceServerCtx, client, controllerNamespace, reconcileRequests,
   675  	)
   676  
   677  	watcherSecret := k8sdepwatches.ObjectIdentifier{
   678  		Version:   "v1",
   679  		Kind:      "Secret",
   680  		Namespace: controllerNamespace,
   681  		Name:      complianceeventsapi.DBSecretName,
   682  	}
   683  	if err := dbSecretDynamicWatcher.AddWatcher(watcherSecret, watcherSecret); err != nil {
   684  		log.Error(
   685  			err,
   686  			"Unable to start the compliance events database secret watcher",
   687  			"controller", complianceeventsapi.ControllerName,
   688  		)
   689  		os.Exit(1)
   690  	}
   691  
   692  	return complianceServerCtx
   693  }
   694  
   695  // reportMetrics returns a bool on whether to report GRC metrics from the propagator
   696  func reportMetrics() bool {
   697  	metrics, _ := os.LookupEnv("DISABLE_REPORT_METRICS")
   698  
   699  	return !strings.EqualFold(metrics, "true")
   700  }
   701  
   702  // getWatchNamespace returns the Namespace the operator should be watching for changes
   703  func getWatchNamespace() (string, error) {
   704  	// WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE
   705  	// which specifies the Namespace to watch.
   706  	// An empty value means the operator is running with cluster scope.
   707  	watchNamespaceEnvVar := "WATCH_NAMESPACE"
   708  
   709  	ns, found := os.LookupEnv(watchNamespaceEnvVar)
   710  	if !found {
   711  		return "", fmt.Errorf("%s must be set", watchNamespaceEnvVar)
   712  	}
   713  
   714  	return ns, nil
   715  }