open-cluster-management.io/governance-policy-propagator@v0.13.0/controllers/rootpolicystatus/root_policy_status_controller.go (about)

     1  // Copyright Contributors to the Open Cluster Management project
     2  
     3  package policystatus
     4  
     5  import (
     6  	"context"
     7  	"sync"
     8  
     9  	k8serrors "k8s.io/apimachinery/pkg/api/errors"
    10  	"k8s.io/apimachinery/pkg/runtime"
    11  	"k8s.io/apimachinery/pkg/types"
    12  	clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
    13  	appsv1 "open-cluster-management.io/multicloud-operators-subscription/pkg/apis/apps/placementrule/v1"
    14  	ctrl "sigs.k8s.io/controller-runtime"
    15  	"sigs.k8s.io/controller-runtime/pkg/builder"
    16  	"sigs.k8s.io/controller-runtime/pkg/client"
    17  	"sigs.k8s.io/controller-runtime/pkg/controller"
    18  	"sigs.k8s.io/controller-runtime/pkg/handler"
    19  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    20  
    21  	policiesv1 "open-cluster-management.io/governance-policy-propagator/api/v1"
    22  	"open-cluster-management.io/governance-policy-propagator/controllers/common"
    23  )
    24  
    25  const ControllerName string = "root-policy-status"
    26  
    27  var log = ctrl.Log.WithName(ControllerName)
    28  
    29  //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=policies,verbs=get;list;watch
    30  //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=policies/status,verbs=get;update;patch
    31  
    32  // SetupWithManager sets up the controller with the Manager.
    33  func (r *RootPolicyStatusReconciler) SetupWithManager(mgr ctrl.Manager, maxConcurrentReconciles uint) error {
    34  	return ctrl.NewControllerManagedBy(mgr).
    35  		WithOptions(controller.Options{MaxConcurrentReconciles: int(maxConcurrentReconciles)}).
    36  		Named(ControllerName).
    37  		For(
    38  			&policiesv1.Policy{},
    39  			builder.WithPredicates(common.NeverEnqueue),
    40  		).
    41  		Watches(
    42  			&policiesv1.PlacementBinding{},
    43  			handler.EnqueueRequestsFromMapFunc(mapBindingToPolicies(mgr.GetClient())),
    44  		).
    45  		Watches(
    46  			&appsv1.PlacementRule{},
    47  			handler.EnqueueRequestsFromMapFunc(mapRuleToPolicies(mgr.GetClient())),
    48  		).
    49  		Watches(
    50  			&clusterv1beta1.PlacementDecision{},
    51  			handler.EnqueueRequestsFromMapFunc(mapDecisionToPolicies(mgr.GetClient())),
    52  		).
    53  		// This is a workaround - the controller-runtime requires a "For", but does not allow it to
    54  		// modify the eventhandler. Currently we need to enqueue requests for Policies in a very
    55  		// particular way, so we will define that in a separate "Watches"
    56  		Watches(
    57  			&policiesv1.Policy{},
    58  			handler.EnqueueRequestsFromMapFunc(common.MapToRootPolicy(mgr.GetClient())),
    59  			builder.WithPredicates(policyStatusPredicate()),
    60  		).
    61  		Complete(r)
    62  }
    63  
    64  // blank assignment to verify that RootPolicyStatusReconciler implements reconcile.Reconciler
    65  var _ reconcile.Reconciler = &RootPolicyStatusReconciler{}
    66  
    67  // RootPolicyStatusReconciler handles replicated policy status updates and updates the root policy status.
    68  type RootPolicyStatusReconciler struct {
    69  	client.Client
    70  	// Use a shared lock with the main policy controller to avoid conflicting updates.
    71  	RootPolicyLocks *sync.Map
    72  	Scheme          *runtime.Scheme
    73  }
    74  
    75  // Reconcile will update the root policy status based on the current state whenever a root or replicated policy status
    76  // is updated. The reconcile request is always on the root policy. This approach is taken rather than just handling a
    77  // single replicated policy status per reconcile to be able to "batch" status update requests when there are bursts of
    78  // replicated policy status updates. This lowers resource utilization on the controller and the Kubernetes API server.
    79  func (r *RootPolicyStatusReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
    80  	log := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
    81  	log.V(1).Info("Reconciling the root policy status")
    82  
    83  	log.V(3).Info("Acquiring the lock for the root policy")
    84  
    85  	lock, _ := r.RootPolicyLocks.LoadOrStore(request.NamespacedName, &sync.Mutex{})
    86  
    87  	lock.(*sync.Mutex).Lock()
    88  	defer lock.(*sync.Mutex).Unlock()
    89  
    90  	rootPolicy := &policiesv1.Policy{}
    91  
    92  	err := r.Get(ctx, types.NamespacedName{Namespace: request.Namespace, Name: request.Name}, rootPolicy)
    93  	if err != nil {
    94  		if k8serrors.IsNotFound(err) {
    95  			log.V(2).Info("The root policy has been deleted. Doing nothing.")
    96  
    97  			return reconcile.Result{}, nil
    98  		}
    99  
   100  		log.Error(err, "Failed to get the root policy")
   101  
   102  		return reconcile.Result{}, err
   103  	}
   104  
   105  	// Replicated policies don't need to update status here
   106  	if _, ok := rootPolicy.Labels["policy.open-cluster-management.io/root-policy"]; ok {
   107  		return reconcile.Result{}, nil
   108  	}
   109  
   110  	_, err = common.RootStatusUpdate(ctx, r.Client, rootPolicy)
   111  	if err != nil {
   112  		return reconcile.Result{}, err
   113  	}
   114  
   115  	return reconcile.Result{}, nil
   116  }