sigs.k8s.io/kubebuilder/v3@v3.14.0/pkg/plugins/golang/deploy-image/v1alpha1/scaffolds/internal/templates/controllers/controller.go (about)

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package controllers
    18  
    19  import (
    20  	"path/filepath"
    21  
    22  	log "github.com/sirupsen/logrus"
    23  
    24  	"sigs.k8s.io/kubebuilder/v3/pkg/machinery"
    25  )
    26  
    27  var _ machinery.Template = &Controller{}
    28  
    29  // Controller scaffolds the file that defines the controller for a CRD or a builtin resource
    30  // nolint:maligned
    31  type Controller struct {
    32  	machinery.TemplateMixin
    33  	machinery.MultiGroupMixin
    34  	machinery.BoilerplateMixin
    35  	machinery.ResourceMixin
    36  	machinery.ProjectNameMixin
    37  
    38  	ControllerRuntimeVersion string
    39  
    40  	// IsLegacyLayout is added to ensure backwards compatibility and should
    41  	// be removed when we remove the go/v3 plugin
    42  	IsLegacyLayout bool
    43  	PackageName    string
    44  }
    45  
    46  // SetTemplateDefaults implements file.Template
    47  func (f *Controller) SetTemplateDefaults() error {
    48  	if f.Path == "" {
    49  		if f.MultiGroup && f.Resource.Group != "" {
    50  			if f.IsLegacyLayout {
    51  				f.Path = filepath.Join("controllers", "%[group]", "%[kind]_controller.go")
    52  			} else {
    53  				f.Path = filepath.Join("internal", "controller", "%[group]", "%[kind]_controller.go")
    54  			}
    55  		} else {
    56  			if f.IsLegacyLayout {
    57  				f.Path = filepath.Join("controllers", "%[kind]_controller.go")
    58  			} else {
    59  				f.Path = filepath.Join("internal", "controller", "%[kind]_controller.go")
    60  			}
    61  		}
    62  	}
    63  	f.Path = f.Resource.Replacer().Replace(f.Path)
    64  	log.Println(f.Path)
    65  
    66  	f.PackageName = "controller"
    67  	if f.IsLegacyLayout {
    68  		f.PackageName = "controllers"
    69  	}
    70  
    71  	log.Println("creating import for %", f.Resource.Path)
    72  	f.TemplateBody = controllerTemplate
    73  
    74  	// This one is to overwrite the controller if it exist
    75  	f.IfExistsAction = machinery.OverwriteFile
    76  
    77  	return nil
    78  }
    79  
    80  //nolint:lll
    81  const controllerTemplate = `{{ .Boilerplate }}
    82  
    83  package {{ if and .MultiGroup .Resource.Group }}{{ .Resource.PackageName }}{{ else }}{{ .PackageName }}{{ end }}
    84  
    85  import (
    86  	"context"
    87  	"strings"
    88  	"time"
    89  	"fmt"
    90  	"os"
    91  
    92  	appsv1 "k8s.io/api/apps/v1"
    93  	corev1 "k8s.io/api/core/v1"
    94  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    95  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    96  	"k8s.io/apimachinery/pkg/types"
    97  	"k8s.io/apimachinery/pkg/runtime"
    98  	"k8s.io/apimachinery/pkg/api/meta"
    99  	"k8s.io/client-go/tools/record"
   100  	ctrl "sigs.k8s.io/controller-runtime"
   101  	"sigs.k8s.io/controller-runtime/pkg/client"
   102  	"sigs.k8s.io/controller-runtime/pkg/log"
   103  	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
   104  	
   105  	{{ if not (isEmptyStr .Resource.Path) -}}
   106  	{{ .Resource.ImportAlias }} "{{ .Resource.Path }}"
   107  	{{- end }}
   108  )
   109  
   110  const {{ lower .Resource.Kind }}Finalizer = "{{ .Resource.Group }}.{{ .Resource.Domain }}/finalizer"
   111  
   112  // Definitions to manage status conditions
   113  const (
   114  	// typeAvailable{{ .Resource.Kind }} represents the status of the Deployment reconciliation
   115  	typeAvailable{{ .Resource.Kind }} = "Available"
   116  	// typeDegraded{{ .Resource.Kind }} represents the status used when the custom resource is deleted and the finalizer operations are must to occur. 
   117  	typeDegraded{{ .Resource.Kind }} = "Degraded"
   118  )
   119  
   120  // {{ .Resource.Kind }}Reconciler reconciles a {{ .Resource.Kind }} object
   121  type {{ .Resource.Kind }}Reconciler struct {
   122  	client.Client
   123  	Scheme *runtime.Scheme
   124  	Recorder record.EventRecorder
   125  }
   126  
   127  // The following markers are used to generate the rules permissions (RBAC) on config/rbac using controller-gen
   128  // when the command <make manifests> is executed. 
   129  // To know more about markers see: https://book.kubebuilder.io/reference/markers.html
   130  
   131  //+kubebuilder:rbac:groups={{ .Resource.QualifiedGroup }},resources={{ .Resource.Plural }},verbs=get;list;watch;create;update;patch;delete
   132  //+kubebuilder:rbac:groups={{ .Resource.QualifiedGroup }},resources={{ .Resource.Plural }}/status,verbs=get;update;patch
   133  //+kubebuilder:rbac:groups={{ .Resource.QualifiedGroup }},resources={{ .Resource.Plural }}/finalizers,verbs=update
   134  //+kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
   135  //+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
   136  //+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
   137  
   138  // Reconcile is part of the main kubernetes reconciliation loop which aims to
   139  // move the current state of the cluster closer to the desired state.
   140  // It is essential for the controller's reconciliation loop to be idempotent. By following the Operator 
   141  // pattern you will create Controllers which provide a reconcile function
   142  // responsible for synchronizing resources until the desired state is reached on the cluster. 
   143  // Breaking this recommendation goes against the design principles of controller-runtime. 
   144  // and may lead to unforeseen consequences such as resources becoming stuck and requiring manual intervention.
   145  // For further info:
   146  // - About Operator Pattern: https://kubernetes.io/docs/concepts/extend-kubernetes/operator/
   147  // - About Controllers: https://kubernetes.io/docs/concepts/architecture/controller/
   148  // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@{{ .ControllerRuntimeVersion }}/pkg/reconcile
   149  func (r *{{ .Resource.Kind }}Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
   150  	log := log.FromContext(ctx)
   151  
   152  	// Fetch the {{ .Resource.Kind }} instance
   153  	// The purpose is check if the Custom Resource for the Kind {{ .Resource.Kind }}
   154  	// is applied on the cluster if not we return nil to stop the reconciliation
   155  	{{ lower .Resource.Kind }} := &{{ .Resource.ImportAlias }}.{{ .Resource.Kind }}{}
   156  	err := r.Get(ctx, req.NamespacedName, {{ lower .Resource.Kind }})
   157  	if err != nil {
   158  		if apierrors.IsNotFound(err) {
   159  			// If the custom resource is not found then, it usually means that it was deleted or not created
   160  			// In this way, we will stop the reconciliation 
   161  			log.Info("{{ lower .Resource.Kind }} resource not found. Ignoring since object must be deleted")
   162  			return ctrl.Result{}, nil
   163  		}
   164  		// Error reading the object - requeue the request.
   165  		log.Error(err, "Failed to get {{ lower .Resource.Kind }}")
   166  		return ctrl.Result{}, err
   167  	}
   168  
   169  	// Let's just set the status as Unknown when no status are available
   170  	if {{ lower .Resource.Kind }}.Status.Conditions == nil || len({{ lower .Resource.Kind }}.Status.Conditions) == 0 {
   171  		meta.SetStatusCondition(&{{ lower .Resource.Kind }}.Status.Conditions, metav1.Condition{Type: typeAvailable{{ .Resource.Kind }}, Status: metav1.ConditionUnknown, Reason: "Reconciling", Message: "Starting reconciliation"})
   172  		if err = r.Status().Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   173  			log.Error(err, "Failed to update {{ .Resource.Kind }} status")
   174  			return ctrl.Result{}, err
   175  		}
   176  
   177  		// Let's re-fetch the {{ lower .Resource.Kind }} Custom Resource after update the status 
   178  		// so that we have the latest state of the resource on the cluster and we will avoid
   179  		// raise the issue "the object has been modified, please apply
   180  		// your changes to the latest version and try again" which would re-trigger the reconciliation
   181  		// if we try to update it again in the following operations
   182  		if err := r.Get(ctx, req.NamespacedName, {{ lower .Resource.Kind }}); err != nil {
   183  			log.Error(err, "Failed to re-fetch {{ lower .Resource.Kind }}")
   184  			return ctrl.Result{}, err
   185  		}
   186  	}
   187  
   188  	// Let's add a finalizer. Then, we can define some operations which should
   189  	// occurs before the custom resource to be deleted.
   190  	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/finalizers
   191  	if !controllerutil.ContainsFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer) {
   192  		log.Info("Adding Finalizer for {{ .Resource.Kind }}")
   193  		if ok := controllerutil.AddFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer); !ok {
   194  			log.Error(err, "Failed to add finalizer into the custom resource")
   195  			return ctrl.Result{Requeue: true}, nil
   196  		}
   197  		
   198  		if err = r.Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   199  			log.Error(err, "Failed to update custom resource to add finalizer")
   200  			return ctrl.Result{}, err
   201  		}
   202  	}
   203  
   204  	// Check if the {{ .Resource.Kind }} instance is marked to be deleted, which is
   205  	// indicated by the deletion timestamp being set.
   206  	is{{ .Resource.Kind }}MarkedToBeDeleted := {{ lower .Resource.Kind }}.GetDeletionTimestamp() != nil
   207  	if is{{ .Resource.Kind }}MarkedToBeDeleted {
   208  		if controllerutil.ContainsFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer) {
   209  			log.Info("Performing Finalizer Operations for {{ .Resource.Kind }} before delete CR")
   210  
   211  			// Let's add here an status "Downgrade" to define that this resource begin its process to be terminated.
   212  			meta.SetStatusCondition(&{{ lower .Resource.Kind }}.Status.Conditions, metav1.Condition{Type: typeDegraded{{ .Resource.Kind }},
   213  				Status: metav1.ConditionUnknown, Reason: "Finalizing",
   214  				Message: fmt.Sprintf("Performing finalizer operations for the custom resource: %s ", {{ lower .Resource.Kind }}.Name)})
   215  
   216  			if err := r.Status().Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   217  				log.Error(err, "Failed to update {{ .Resource.Kind }} status")
   218  				return ctrl.Result{}, err
   219  			}
   220  
   221  			// Perform all operations required before remove the finalizer and allow
   222  			// the Kubernetes API to remove the custom resource.
   223  			r.doFinalizerOperationsFor{{ .Resource.Kind }}({{ lower .Resource.Kind }})
   224  
   225  			// TODO(user): If you add operations to the doFinalizerOperationsFor{{ .Resource.Kind }} method 
   226  			// then you need to ensure that all worked fine before deleting and updating the Downgrade status
   227  			// otherwise, you should requeue here.
   228  
   229  			// Re-fetch the {{ lower .Resource.Kind }} Custom Resource before update the status 
   230  			// so that we have the latest state of the resource on the cluster and we will avoid
   231  			// raise the issue "the object has been modified, please apply
   232  			// your changes to the latest version and try again" which would re-trigger the reconciliation
   233  			if err := r.Get(ctx, req.NamespacedName, {{ lower .Resource.Kind }}); err != nil {
   234  				log.Error(err, "Failed to re-fetch {{ lower .Resource.Kind }}")
   235  				return ctrl.Result{}, err
   236  			}
   237  
   238  			meta.SetStatusCondition(&{{ lower .Resource.Kind }}.Status.Conditions, metav1.Condition{Type: typeDegraded{{ .Resource.Kind }},
   239  				Status: metav1.ConditionTrue, Reason: "Finalizing",
   240  				Message: fmt.Sprintf("Finalizer operations for custom resource %s name were successfully accomplished", {{ lower .Resource.Kind }}.Name)})
   241  			
   242  			if err := r.Status().Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   243  				log.Error(err, "Failed to update {{ .Resource.Kind }} status")
   244  				return ctrl.Result{}, err
   245  			}
   246  
   247  			log.Info("Removing Finalizer for {{ .Resource.Kind }} after successfully perform the operations")
   248  			if ok:= controllerutil.RemoveFinalizer({{ lower .Resource.Kind }}, {{ lower .Resource.Kind }}Finalizer); !ok{
   249  				log.Error(err, "Failed to remove finalizer for {{ .Resource.Kind }}")
   250  				return ctrl.Result{Requeue: true}, nil
   251  			}
   252  
   253  			if err := r.Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   254  				log.Error(err, "Failed to remove finalizer for {{ .Resource.Kind }}")
   255  				return ctrl.Result{}, err
   256  			}
   257  		}
   258  		return ctrl.Result{}, nil
   259  	}
   260  
   261  	// Check if the deployment already exists, if not create a new one
   262  	found := &appsv1.Deployment{}
   263  	err = r.Get(ctx, types.NamespacedName{Name: {{ lower .Resource.Kind }}.Name, Namespace: {{ lower .Resource.Kind }}.Namespace}, found)
   264  	if err != nil && apierrors.IsNotFound(err) {
   265  		// Define a new deployment
   266  		dep, err := r.deploymentFor{{ .Resource.Kind }}({{ lower .Resource.Kind }})
   267  		if err != nil {
   268  			log.Error(err, "Failed to define new Deployment resource for {{ .Resource.Kind }}")
   269  
   270  			// The following implementation will update the status
   271  			meta.SetStatusCondition(&{{ lower .Resource.Kind }}.Status.Conditions, metav1.Condition{Type: typeAvailable{{ .Resource.Kind }},
   272  				Status: metav1.ConditionFalse, Reason: "Reconciling",
   273  				Message: fmt.Sprintf("Failed to create Deployment for the custom resource (%s): (%s)", {{ lower .Resource.Kind }}.Name, err)})
   274  
   275  			if err := r.Status().Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   276  				log.Error(err, "Failed to update {{ .Resource.Kind }} status")
   277  				return ctrl.Result{}, err
   278  			}
   279  
   280  			return ctrl.Result{}, err
   281  		}
   282  
   283  		log.Info("Creating a new Deployment", 
   284  			"Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
   285  		if err = r.Create(ctx, dep); err != nil {
   286  			log.Error(err, "Failed to create new Deployment",
   287  				"Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name)
   288  			return ctrl.Result{}, err
   289  		}
   290  
   291  		// Deployment created successfully 
   292  		// We will requeue the reconciliation so that we can ensure the state
   293  		// and move forward for the next operations
   294  		return ctrl.Result{RequeueAfter: time.Minute}, nil
   295  	} else if err != nil {
   296  		log.Error(err, "Failed to get Deployment")
   297  		// Let's return the error for the reconciliation be re-trigged again 
   298  		return ctrl.Result{}, err
   299  	}
   300  
   301  	// The CRD API is defining that the {{ .Resource.Kind }} type, have a {{ .Resource.Kind }}Spec.Size field 
   302  	// to set the quantity of Deployment instances is the desired state on the cluster. 
   303  	// Therefore, the following code will ensure the Deployment size is the same as defined 
   304  	// via the Size spec of the Custom Resource which we are reconciling.
   305  	size := {{ lower .Resource.Kind }}.Spec.Size
   306  	if *found.Spec.Replicas != size {
   307  		found.Spec.Replicas = &size
   308  		if err = r.Update(ctx, found); err != nil {
   309  			log.Error(err, "Failed to update Deployment", 
   310  				"Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name)
   311  
   312  			// Re-fetch the {{ lower .Resource.Kind }} Custom Resource before update the status 
   313  			// so that we have the latest state of the resource on the cluster and we will avoid
   314  			// raise the issue "the object has been modified, please apply
   315  			// your changes to the latest version and try again" which would re-trigger the reconciliation
   316  			if err := r.Get(ctx, req.NamespacedName, {{ lower .Resource.Kind }}); err != nil {
   317  				log.Error(err, "Failed to re-fetch {{ lower .Resource.Kind }}")
   318  				return ctrl.Result{}, err
   319  			}
   320  
   321  			// The following implementation will update the status
   322  			meta.SetStatusCondition(&{{ lower .Resource.Kind }}.Status.Conditions, metav1.Condition{Type: typeAvailable{{ .Resource.Kind }},
   323  				Status: metav1.ConditionFalse, Reason: "Resizing",
   324  				Message: fmt.Sprintf("Failed to update the size for the custom resource (%s): (%s)", {{ lower .Resource.Kind }}.Name, err)})
   325  
   326  			if err := r.Status().Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   327  				log.Error(err, "Failed to update {{ .Resource.Kind }} status")
   328  				return ctrl.Result{}, err
   329  			}
   330  
   331  			return ctrl.Result{}, err
   332  		}
   333  
   334  		// Now, that we update the size we want to requeue the reconciliation
   335  		// so that we can ensure that we have the latest state of the resource before
   336  		// update. Also, it will help ensure the desired state on the cluster
   337  		return ctrl.Result{Requeue: true}, nil
   338  	}
   339  
   340  	// The following implementation will update the status
   341  	meta.SetStatusCondition(&{{ lower .Resource.Kind }}.Status.Conditions, metav1.Condition{Type: typeAvailable{{ .Resource.Kind }},
   342  		Status: metav1.ConditionTrue, Reason: "Reconciling",
   343  		Message: fmt.Sprintf("Deployment for custom resource (%s) with %d replicas created successfully", {{ lower .Resource.Kind }}.Name, size)})
   344  
   345  	if err := r.Status().Update(ctx, {{ lower .Resource.Kind }}); err != nil {
   346  		log.Error(err, "Failed to update {{ .Resource.Kind }} status")
   347  		return ctrl.Result{}, err
   348  	}
   349  
   350  	return ctrl.Result{}, nil
   351  }
   352  
   353  // finalize{{ .Resource.Kind }} will perform the required operations before delete the CR.
   354  func (r *{{ .Resource.Kind }}Reconciler) doFinalizerOperationsFor{{ .Resource.Kind }}(cr *{{ .Resource.ImportAlias }}.{{ .Resource.Kind }}) {
   355  	// TODO(user): Add the cleanup steps that the operator
   356  	// needs to do before the CR can be deleted. Examples
   357  	// of finalizers include performing backups and deleting
   358  	// resources that are not owned by this CR, like a PVC.
   359  
   360  	// Note: It is not recommended to use finalizers with the purpose of delete resources which are
   361  	// created and managed in the reconciliation. These ones, such as the Deployment created on this reconcile, 
   362  	// are defined as depended of the custom resource. See that we use the method ctrl.SetControllerReference.
   363  	// to set the ownerRef which means that the Deployment will be deleted by the Kubernetes API.
   364  	// More info: https://kubernetes.io/docs/tasks/administer-cluster/use-cascading-deletion/
   365  
   366  	// The following implementation will raise an event
   367  	r.Recorder.Event(cr, "Warning", "Deleting",
   368  		fmt.Sprintf("Custom Resource %s is being deleted from the namespace %s",
   369  		cr.Name,
   370  		cr.Namespace),)
   371  }
   372  
   373  // deploymentFor{{ .Resource.Kind }} returns a {{ .Resource.Kind }} Deployment object
   374  func (r *{{ .Resource.Kind }}Reconciler) deploymentFor{{ .Resource.Kind }}(
   375  	{{ lower .Resource.Kind }} *{{ .Resource.ImportAlias }}.{{ .Resource.Kind }}) (*appsv1.Deployment, error) {
   376  	ls := labelsFor{{ .Resource.Kind }}({{ lower .Resource.Kind }}.Name)
   377  	replicas := {{ lower .Resource.Kind }}.Spec.Size
   378  	
   379  	// Get the Operand image
   380  	image, err := imageFor{{ .Resource.Kind }}()
   381  	if err != nil {
   382      	return nil, err
   383  	}
   384  
   385  	dep := &appsv1.Deployment{
   386  		ObjectMeta: metav1.ObjectMeta{
   387  			Name:      {{ lower .Resource.Kind }}.Name,
   388  			Namespace: {{ lower .Resource.Kind }}.Namespace,
   389  		},
   390  		Spec: appsv1.DeploymentSpec{
   391  			Replicas: &replicas,
   392  			Selector: &metav1.LabelSelector{
   393  				MatchLabels: ls,
   394  			},
   395  			Template: corev1.PodTemplateSpec{
   396  				ObjectMeta: metav1.ObjectMeta{
   397  					Labels: ls,
   398  				},
   399  				Spec: corev1.PodSpec{
   400  					// TODO(user): Uncomment the following code to configure the nodeAffinity expression
   401  					// according to the platforms which are supported by your solution. It is considered
   402  					// best practice to support multiple architectures. build your manager image using the
   403  					// makefile target docker-buildx. Also, you can use docker manifest inspect <image>
   404  					// to check what are the platforms supported.
   405  					// More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
   406  					//Affinity: &corev1.Affinity{
   407  					//	NodeAffinity: &corev1.NodeAffinity{
   408  					//		RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
   409  					//			NodeSelectorTerms: []corev1.NodeSelectorTerm{
   410  					//				{
   411  					//					MatchExpressions: []corev1.NodeSelectorRequirement{
   412  					//						{
   413  					//							Key:      "kubernetes.io/arch",
   414  					//							Operator: "In",
   415  					//							Values:   []string{"amd64", "arm64", "ppc64le", "s390x"},
   416  					//						},
   417  					//						{
   418  					//							Key:      "kubernetes.io/os",
   419  					//							Operator: "In",
   420  					//							Values:   []string{"linux"},
   421  					//						},
   422  					//					},
   423  					//				},
   424  					//			},
   425  					//		},
   426  					//	},
   427  					//},
   428  					SecurityContext: &corev1.PodSecurityContext{
   429  						RunAsNonRoot: &[]bool{true}[0],
   430  						// IMPORTANT: seccomProfile was introduced with Kubernetes 1.19
   431  						// If you are looking for to produce solutions to be supported
   432  						// on lower versions you must remove this option.
   433  						SeccompProfile: &corev1.SeccompProfile{
   434  							Type: corev1.SeccompProfileTypeRuntimeDefault,
   435  						},
   436  					},
   437  					//TODO: scaffold container,
   438  				},
   439  			},
   440  		},
   441  	}
   442  	
   443  	// Set the ownerRef for the Deployment
   444  	// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/owners-dependents/
   445  	if err := ctrl.SetControllerReference({{ lower .Resource.Kind }}, dep, r.Scheme); err != nil {
   446  		return nil, err
   447  	}
   448  	return dep, nil
   449  }
   450  
   451  // labelsFor{{ .Resource.Kind }} returns the labels for selecting the resources
   452  // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/
   453  func labelsFor{{ .Resource.Kind }}(name string) map[string]string {
   454  	var imageTag string
   455  	image, err := imageFor{{ .Resource.Kind }}()
   456  	if err == nil {
   457  		imageTag = strings.Split(image, ":")[1]
   458  	}
   459  	return map[string]string{"app.kubernetes.io/name": "{{ .Resource.Kind }}",
   460  		"app.kubernetes.io/instance": name,
   461  		"app.kubernetes.io/version": imageTag,
   462  		"app.kubernetes.io/part-of": "{{ .ProjectName }}",
   463  		"app.kubernetes.io/created-by": "controller-manager",
   464  	}
   465  }
   466  
   467  // imageFor{{ .Resource.Kind }} gets the Operand image which is managed by this controller
   468  // from the {{ upper .Resource.Kind }}_IMAGE environment variable defined in the config/manager/manager.yaml
   469  func imageFor{{ .Resource.Kind }}() (string, error) {
   470  	var imageEnvVar = "{{ upper .Resource.Kind }}_IMAGE"
   471      image, found := os.LookupEnv(imageEnvVar)
   472      if !found {
   473          return "", fmt.Errorf("Unable to find %s environment variable with the image", imageEnvVar)
   474      }
   475      return image, nil
   476  }
   477  
   478  // SetupWithManager sets up the controller with the Manager.
   479  // Note that the Deployment will be also watched in order to ensure its 
   480  // desirable state on the cluster
   481  func (r *{{ .Resource.Kind }}Reconciler) SetupWithManager(mgr ctrl.Manager) error {
   482  	return ctrl.NewControllerManagedBy(mgr).
   483  		{{ if not (isEmptyStr .Resource.Path) -}}
   484  		For(&{{ .Resource.ImportAlias }}.{{ .Resource.Kind }}{}).
   485  		{{- else -}}
   486  		// Uncomment the following line adding a pointer to an instance of the controlled resource as an argument
   487  		// For().
   488  		{{- end }}
   489  		Owns(&appsv1.Deployment{}).
   490  		Complete(r)
   491  }
   492  `