github.com/argoproj/argo-cd@v1.8.7/controller/appcontroller.go (about)

     1  package controller
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"math"
     8  	"net/http"
     9  	"reflect"
    10  	"runtime/debug"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"sync"
    15  	"time"
    16  
    17  	"github.com/argoproj/gitops-engine/pkg/diff"
    18  	"github.com/argoproj/gitops-engine/pkg/health"
    19  	synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
    20  	"github.com/argoproj/gitops-engine/pkg/utils/kube"
    21  	jsonpatch "github.com/evanphx/json-patch"
    22  	log "github.com/sirupsen/logrus"
    23  	"golang.org/x/sync/semaphore"
    24  	v1 "k8s.io/api/core/v1"
    25  	apierr "k8s.io/apimachinery/pkg/api/errors"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    28  	"k8s.io/apimachinery/pkg/labels"
    29  	apiruntime "k8s.io/apimachinery/pkg/runtime"
    30  	"k8s.io/apimachinery/pkg/types"
    31  	"k8s.io/apimachinery/pkg/util/runtime"
    32  	"k8s.io/apimachinery/pkg/util/wait"
    33  	"k8s.io/apimachinery/pkg/watch"
    34  	"k8s.io/client-go/kubernetes"
    35  	"k8s.io/client-go/tools/cache"
    36  	"k8s.io/client-go/util/workqueue"
    37  
    38  	// make sure to register workqueue prometheus metrics
    39  	_ "k8s.io/component-base/metrics/prometheus/workqueue"
    40  
    41  	"github.com/argoproj/argo-cd/common"
    42  	statecache "github.com/argoproj/argo-cd/controller/cache"
    43  	"github.com/argoproj/argo-cd/controller/metrics"
    44  	"github.com/argoproj/argo-cd/pkg/apis/application"
    45  	appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
    46  	appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned"
    47  	"github.com/argoproj/argo-cd/pkg/client/informers/externalversions/application/v1alpha1"
    48  	applisters "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1"
    49  	"github.com/argoproj/argo-cd/reposerver/apiclient"
    50  	"github.com/argoproj/argo-cd/util/argo"
    51  	appstatecache "github.com/argoproj/argo-cd/util/cache/appstate"
    52  	"github.com/argoproj/argo-cd/util/db"
    53  	"github.com/argoproj/argo-cd/util/errors"
    54  	"github.com/argoproj/argo-cd/util/glob"
    55  	logutils "github.com/argoproj/argo-cd/util/log"
    56  	settings_util "github.com/argoproj/argo-cd/util/settings"
    57  )
    58  
    59  const (
    60  	updateOperationStateTimeout = 1 * time.Second
    61  	// orphanedIndex contains application which monitor orphaned resources by namespace
    62  	orphanedIndex = "orphaned"
    63  )
    64  
    65  type CompareWith int
    66  
    67  const (
    68  	// Compare live application state against state defined in latest git revision.
    69  	CompareWithLatest CompareWith = 2
    70  	// Compare live application state against state defined using revision of most recent comparison.
    71  	CompareWithRecent CompareWith = 1
    72  	// Skip comparison and only refresh application resources tree
    73  	ComparisonWithNothing CompareWith = 0
    74  )
    75  
    76  func (a CompareWith) Max(b CompareWith) CompareWith {
    77  	return CompareWith(math.Max(float64(a), float64(b)))
    78  }
    79  
    80  func (a CompareWith) Pointer() *CompareWith {
    81  	return &a
    82  }
    83  
    84  // ApplicationController is the controller for application resources.
    85  type ApplicationController struct {
    86  	cache                *appstatecache.Cache
    87  	namespace            string
    88  	kubeClientset        kubernetes.Interface
    89  	kubectl              kube.Kubectl
    90  	applicationClientset appclientset.Interface
    91  	auditLogger          *argo.AuditLogger
    92  	// queue contains app namespace/name
    93  	appRefreshQueue workqueue.RateLimitingInterface
    94  	// queue contains app namespace/name/comparisonType and used to request app refresh with the predefined comparison type
    95  	appComparisonTypeRefreshQueue workqueue.RateLimitingInterface
    96  	appOperationQueue             workqueue.RateLimitingInterface
    97  	projectRefreshQueue           workqueue.RateLimitingInterface
    98  	appInformer                   cache.SharedIndexInformer
    99  	appLister                     applisters.ApplicationLister
   100  	projInformer                  cache.SharedIndexInformer
   101  	appStateManager               AppStateManager
   102  	stateCache                    statecache.LiveStateCache
   103  	statusRefreshTimeout          time.Duration
   104  	selfHealTimeout               time.Duration
   105  	repoClientset                 apiclient.Clientset
   106  	db                            db.ArgoDB
   107  	settingsMgr                   *settings_util.SettingsManager
   108  	refreshRequestedApps          map[string]CompareWith
   109  	refreshRequestedAppsMutex     *sync.Mutex
   110  	metricsServer                 *metrics.MetricsServer
   111  	kubectlSemaphore              *semaphore.Weighted
   112  	clusterFilter                 func(cluster *appv1.Cluster) bool
   113  }
   114  
   115  // NewApplicationController creates new instance of ApplicationController.
   116  func NewApplicationController(
   117  	namespace string,
   118  	settingsMgr *settings_util.SettingsManager,
   119  	kubeClientset kubernetes.Interface,
   120  	applicationClientset appclientset.Interface,
   121  	repoClientset apiclient.Clientset,
   122  	argoCache *appstatecache.Cache,
   123  	kubectl kube.Kubectl,
   124  	appResyncPeriod time.Duration,
   125  	selfHealTimeout time.Duration,
   126  	metricsPort int,
   127  	kubectlParallelismLimit int64,
   128  	clusterFilter func(cluster *appv1.Cluster) bool,
   129  ) (*ApplicationController, error) {
   130  	log.Infof("appResyncPeriod=%v", appResyncPeriod)
   131  	db := db.NewDB(namespace, settingsMgr, kubeClientset)
   132  	ctrl := ApplicationController{
   133  		cache:                         argoCache,
   134  		namespace:                     namespace,
   135  		kubeClientset:                 kubeClientset,
   136  		kubectl:                       kubectl,
   137  		applicationClientset:          applicationClientset,
   138  		repoClientset:                 repoClientset,
   139  		appRefreshQueue:               workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_reconciliation_queue"),
   140  		appOperationQueue:             workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "app_operation_processing_queue"),
   141  		projectRefreshQueue:           workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "project_reconciliation_queue"),
   142  		appComparisonTypeRefreshQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
   143  		db:                            db,
   144  		statusRefreshTimeout:          appResyncPeriod,
   145  		refreshRequestedApps:          make(map[string]CompareWith),
   146  		refreshRequestedAppsMutex:     &sync.Mutex{},
   147  		auditLogger:                   argo.NewAuditLogger(namespace, kubeClientset, "argocd-application-controller"),
   148  		settingsMgr:                   settingsMgr,
   149  		selfHealTimeout:               selfHealTimeout,
   150  		clusterFilter:                 clusterFilter,
   151  	}
   152  	if kubectlParallelismLimit > 0 {
   153  		ctrl.kubectlSemaphore = semaphore.NewWeighted(kubectlParallelismLimit)
   154  	}
   155  	kubectl.SetOnKubectlRun(ctrl.onKubectlRun)
   156  	appInformer, appLister := ctrl.newApplicationInformerAndLister()
   157  	indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
   158  	projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, indexers)
   159  	projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
   160  		AddFunc: func(obj interface{}) {
   161  			if key, err := cache.MetaNamespaceKeyFunc(obj); err == nil {
   162  				ctrl.projectRefreshQueue.Add(key)
   163  			}
   164  		},
   165  		UpdateFunc: func(old, new interface{}) {
   166  			if key, err := cache.MetaNamespaceKeyFunc(new); err == nil {
   167  				ctrl.projectRefreshQueue.Add(key)
   168  			}
   169  		},
   170  		DeleteFunc: func(obj interface{}) {
   171  			if key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj); err == nil {
   172  				ctrl.projectRefreshQueue.Add(key)
   173  			}
   174  		},
   175  	})
   176  	metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
   177  	var err error
   178  	ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, func(r *http.Request) error {
   179  		return nil
   180  	})
   181  	if err != nil {
   182  		return nil, err
   183  	}
   184  	stateCache := statecache.NewLiveStateCache(db, appInformer, ctrl.settingsMgr, kubectl, ctrl.metricsServer, ctrl.handleObjectUpdated, clusterFilter)
   185  	appStateManager := NewAppStateManager(db, applicationClientset, repoClientset, namespace, kubectl, ctrl.settingsMgr, stateCache, projInformer, ctrl.metricsServer)
   186  	ctrl.appInformer = appInformer
   187  	ctrl.appLister = appLister
   188  	ctrl.projInformer = projInformer
   189  	ctrl.appStateManager = appStateManager
   190  	ctrl.stateCache = stateCache
   191  
   192  	return &ctrl, nil
   193  }
   194  
   195  func (ctrl *ApplicationController) GetMetricsServer() *metrics.MetricsServer {
   196  	return ctrl.metricsServer
   197  }
   198  
   199  func (ctrl *ApplicationController) onKubectlRun(command string) (kube.CleanupFunc, error) {
   200  	ctrl.metricsServer.IncKubectlExec(command)
   201  	if ctrl.kubectlSemaphore != nil {
   202  		if err := ctrl.kubectlSemaphore.Acquire(context.Background(), 1); err != nil {
   203  			return nil, err
   204  		}
   205  		ctrl.metricsServer.IncKubectlExecPending(command)
   206  	}
   207  	return func() {
   208  		if ctrl.kubectlSemaphore != nil {
   209  			ctrl.kubectlSemaphore.Release(1)
   210  			ctrl.metricsServer.DecKubectlExecPending(command)
   211  		}
   212  	}, nil
   213  }
   214  
   215  func isSelfReferencedApp(app *appv1.Application, ref v1.ObjectReference) bool {
   216  	gvk := ref.GroupVersionKind()
   217  	return ref.UID == app.UID &&
   218  		ref.Name == app.Name &&
   219  		ref.Namespace == app.Namespace &&
   220  		gvk.Group == application.Group &&
   221  		gvk.Kind == application.ApplicationKind
   222  }
   223  
   224  func (ctrl *ApplicationController) getAppProj(app *appv1.Application) (*appv1.AppProject, error) {
   225  	return argo.GetAppProject(&app.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace, ctrl.settingsMgr)
   226  }
   227  
   228  func (ctrl *ApplicationController) handleObjectUpdated(managedByApp map[string]bool, ref v1.ObjectReference) {
   229  	// if namespaced resource is not managed by any app it might be orphaned resource of some other apps
   230  	if len(managedByApp) == 0 && ref.Namespace != "" {
   231  		// retrieve applications which monitor orphaned resources in the same namespace and refresh them unless resource is denied in app project
   232  		if objs, err := ctrl.appInformer.GetIndexer().ByIndex(orphanedIndex, ref.Namespace); err == nil {
   233  			for i := range objs {
   234  				app, ok := objs[i].(*appv1.Application)
   235  				if !ok {
   236  					continue
   237  				}
   238  				// exclude resource unless it is permitted in the app project. If project is not permitted then it is not controlled by the user and there is no point showing the warning.
   239  				if proj, err := ctrl.getAppProj(app); err == nil && proj.IsGroupKindPermitted(ref.GroupVersionKind().GroupKind(), true) &&
   240  					!isKnownOrphanedResourceExclusion(kube.NewResourceKey(ref.GroupVersionKind().Group, ref.GroupVersionKind().Kind, ref.Namespace, ref.Name), proj) {
   241  
   242  					managedByApp[app.Name] = false
   243  				}
   244  			}
   245  		}
   246  	}
   247  	for appName, isManagedResource := range managedByApp {
   248  		obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName)
   249  		if app, ok := obj.(*appv1.Application); exists && err == nil && ok && isSelfReferencedApp(app, ref) {
   250  			// Don't force refresh app if related resource is application itself. This prevents infinite reconciliation loop.
   251  			continue
   252  		}
   253  
   254  		if !ctrl.canProcessApp(obj) {
   255  			// Don't force refresh app if app belongs to a different controller shard
   256  			continue
   257  		}
   258  
   259  		level := ComparisonWithNothing
   260  		if isManagedResource {
   261  			level = CompareWithRecent
   262  		}
   263  		ctrl.requestAppRefresh(appName, &level, nil)
   264  	}
   265  }
   266  
   267  func (ctrl *ApplicationController) setAppManagedResources(a *appv1.Application, comparisonResult *comparisonResult) (*appv1.ApplicationTree, error) {
   268  	managedResources, err := ctrl.managedResources(comparisonResult)
   269  	if err != nil {
   270  		return nil, err
   271  	}
   272  	tree, err := ctrl.getResourceTree(a, managedResources)
   273  	if err != nil {
   274  		return nil, err
   275  	}
   276  	err = ctrl.cache.SetAppResourcesTree(a.Name, tree)
   277  	if err != nil {
   278  		return nil, err
   279  	}
   280  	return tree, ctrl.cache.SetAppManagedResources(a.Name, managedResources)
   281  }
   282  
   283  // returns true of given resources exist in the namespace by default and not managed by the user
   284  func isKnownOrphanedResourceExclusion(key kube.ResourceKey, proj *appv1.AppProject) bool {
   285  	if key.Namespace == "default" && key.Group == "" && key.Kind == kube.ServiceKind && key.Name == "kubernetes" {
   286  		return true
   287  	}
   288  	if key.Group == "" && key.Kind == kube.ServiceAccountKind && key.Name == "default" {
   289  		return true
   290  	}
   291  	list := proj.Spec.OrphanedResources.Ignore
   292  	for _, item := range list {
   293  		if item.Kind == "" || glob.Match(item.Kind, key.Kind) {
   294  			if glob.Match(item.Group, key.Group) {
   295  				if item.Name == "" || glob.Match(item.Name, key.Name) {
   296  					return true
   297  				}
   298  			}
   299  		}
   300  	}
   301  	return false
   302  }
   303  
   304  func (ctrl *ApplicationController) getResourceTree(a *appv1.Application, managedResources []*appv1.ResourceDiff) (*appv1.ApplicationTree, error) {
   305  	nodes := make([]appv1.ResourceNode, 0)
   306  
   307  	proj, err := argo.GetAppProject(&a.Spec, applisters.NewAppProjectLister(ctrl.projInformer.GetIndexer()), ctrl.namespace, ctrl.settingsMgr)
   308  	if err != nil {
   309  		return nil, err
   310  	}
   311  	orphanedNodesMap := make(map[kube.ResourceKey]appv1.ResourceNode)
   312  	warnOrphaned := true
   313  	if proj.Spec.OrphanedResources != nil {
   314  		orphanedNodesMap, err = ctrl.stateCache.GetNamespaceTopLevelResources(a.Spec.Destination.Server, a.Spec.Destination.Namespace)
   315  		if err != nil {
   316  			return nil, err
   317  		}
   318  		warnOrphaned = proj.Spec.OrphanedResources.IsWarn()
   319  	}
   320  
   321  	for i := range managedResources {
   322  		managedResource := managedResources[i]
   323  		delete(orphanedNodesMap, kube.NewResourceKey(managedResource.Group, managedResource.Kind, managedResource.Namespace, managedResource.Name))
   324  		var live = &unstructured.Unstructured{}
   325  		err := json.Unmarshal([]byte(managedResource.LiveState), &live)
   326  		if err != nil {
   327  			return nil, err
   328  		}
   329  		var target = &unstructured.Unstructured{}
   330  		err = json.Unmarshal([]byte(managedResource.TargetState), &target)
   331  		if err != nil {
   332  			return nil, err
   333  		}
   334  
   335  		if live == nil {
   336  			nodes = append(nodes, appv1.ResourceNode{
   337  				ResourceRef: appv1.ResourceRef{
   338  					Version:   target.GroupVersionKind().Version,
   339  					Name:      managedResource.Name,
   340  					Kind:      managedResource.Kind,
   341  					Group:     managedResource.Group,
   342  					Namespace: managedResource.Namespace,
   343  				},
   344  			})
   345  		} else {
   346  			err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, kube.GetResourceKey(live), func(child appv1.ResourceNode, appName string) {
   347  				nodes = append(nodes, child)
   348  			})
   349  			if err != nil {
   350  				return nil, err
   351  			}
   352  		}
   353  	}
   354  	orphanedNodes := make([]appv1.ResourceNode, 0)
   355  	for k := range orphanedNodesMap {
   356  		if k.Namespace != "" && proj.IsGroupKindPermitted(k.GroupKind(), true) && !isKnownOrphanedResourceExclusion(k, proj) {
   357  			err := ctrl.stateCache.IterateHierarchy(a.Spec.Destination.Server, k, func(child appv1.ResourceNode, appName string) {
   358  				belongToAnotherApp := false
   359  				if appName != "" {
   360  					if _, exists, err := ctrl.appInformer.GetIndexer().GetByKey(ctrl.namespace + "/" + appName); exists && err == nil {
   361  						belongToAnotherApp = true
   362  					}
   363  				}
   364  				if !belongToAnotherApp {
   365  					orphanedNodes = append(orphanedNodes, child)
   366  				}
   367  			})
   368  			if err != nil {
   369  				return nil, err
   370  			}
   371  		}
   372  	}
   373  	var conditions []appv1.ApplicationCondition
   374  	if len(orphanedNodes) > 0 && warnOrphaned {
   375  		conditions = []appv1.ApplicationCondition{{
   376  			Type:    appv1.ApplicationConditionOrphanedResourceWarning,
   377  			Message: fmt.Sprintf("Application has %d orphaned resources", len(orphanedNodes)),
   378  		}}
   379  	}
   380  	a.Status.SetConditions(conditions, map[appv1.ApplicationConditionType]bool{appv1.ApplicationConditionOrphanedResourceWarning: true})
   381  	sort.Slice(orphanedNodes, func(i, j int) bool {
   382  		return orphanedNodes[i].ResourceRef.String() < orphanedNodes[j].ResourceRef.String()
   383  	})
   384  	return &appv1.ApplicationTree{Nodes: nodes, OrphanedNodes: orphanedNodes}, nil
   385  }
   386  
   387  func (ctrl *ApplicationController) managedResources(comparisonResult *comparisonResult) ([]*appv1.ResourceDiff, error) {
   388  	items := make([]*appv1.ResourceDiff, len(comparisonResult.managedResources))
   389  	for i := range comparisonResult.managedResources {
   390  		res := comparisonResult.managedResources[i]
   391  		item := appv1.ResourceDiff{
   392  			Namespace: res.Namespace,
   393  			Name:      res.Name,
   394  			Group:     res.Group,
   395  			Kind:      res.Kind,
   396  			Hook:      res.Hook,
   397  		}
   398  
   399  		target := res.Target
   400  		live := res.Live
   401  		resDiff := res.Diff
   402  		if res.Kind == kube.SecretKind && res.Group == "" {
   403  			var err error
   404  			target, live, err = diff.HideSecretData(res.Target, res.Live)
   405  			if err != nil {
   406  				return nil, err
   407  			}
   408  			compareOptions, err := ctrl.settingsMgr.GetResourceCompareOptions()
   409  			if err != nil {
   410  				return nil, err
   411  			}
   412  			resDiffPtr, err := diff.Diff(target, live,
   413  				diff.WithNormalizer(comparisonResult.diffNormalizer),
   414  				diff.WithLogr(logutils.NewLogrusLogger(log.New())),
   415  				diff.IgnoreAggregatedRoles(compareOptions.IgnoreAggregatedRoles))
   416  			if err != nil {
   417  				return nil, err
   418  			}
   419  			resDiff = *resDiffPtr
   420  		}
   421  
   422  		if live != nil {
   423  			data, err := json.Marshal(live)
   424  			if err != nil {
   425  				return nil, err
   426  			}
   427  			item.LiveState = string(data)
   428  		} else {
   429  			item.LiveState = "null"
   430  		}
   431  
   432  		if target != nil {
   433  			data, err := json.Marshal(target)
   434  			if err != nil {
   435  				return nil, err
   436  			}
   437  			item.TargetState = string(data)
   438  		} else {
   439  			item.TargetState = "null"
   440  		}
   441  		item.PredictedLiveState = string(resDiff.PredictedLive)
   442  		item.NormalizedLiveState = string(resDiff.NormalizedLive)
   443  
   444  		items[i] = &item
   445  	}
   446  	return items, nil
   447  }
   448  
   449  // Run starts the Application CRD controller.
   450  func (ctrl *ApplicationController) Run(ctx context.Context, statusProcessors int, operationProcessors int) {
   451  	defer runtime.HandleCrash()
   452  	defer ctrl.appRefreshQueue.ShutDown()
   453  	defer ctrl.appComparisonTypeRefreshQueue.ShutDown()
   454  	defer ctrl.appOperationQueue.ShutDown()
   455  	defer ctrl.projectRefreshQueue.ShutDown()
   456  
   457  	ctrl.metricsServer.RegisterClustersInfoSource(ctx, ctrl.stateCache)
   458  	ctrl.RegisterClusterSecretUpdater(ctx)
   459  
   460  	go ctrl.appInformer.Run(ctx.Done())
   461  	go ctrl.projInformer.Run(ctx.Done())
   462  
   463  	errors.CheckError(ctrl.stateCache.Init())
   464  
   465  	if !cache.WaitForCacheSync(ctx.Done(), ctrl.appInformer.HasSynced, ctrl.projInformer.HasSynced) {
   466  		log.Error("Timed out waiting for caches to sync")
   467  		return
   468  	}
   469  
   470  	go func() { errors.CheckError(ctrl.stateCache.Run(ctx)) }()
   471  	go func() { errors.CheckError(ctrl.metricsServer.ListenAndServe()) }()
   472  
   473  	for i := 0; i < statusProcessors; i++ {
   474  		go wait.Until(func() {
   475  			for ctrl.processAppRefreshQueueItem() {
   476  			}
   477  		}, time.Second, ctx.Done())
   478  	}
   479  
   480  	for i := 0; i < operationProcessors; i++ {
   481  		go wait.Until(func() {
   482  			for ctrl.processAppOperationQueueItem() {
   483  			}
   484  		}, time.Second, ctx.Done())
   485  	}
   486  
   487  	go wait.Until(func() {
   488  		for ctrl.processAppComparisonTypeQueueItem() {
   489  		}
   490  	}, time.Second, ctx.Done())
   491  
   492  	go wait.Until(func() {
   493  		for ctrl.processProjectQueueItem() {
   494  		}
   495  	}, time.Second, ctx.Done())
   496  	<-ctx.Done()
   497  }
   498  
   499  func (ctrl *ApplicationController) requestAppRefresh(appName string, compareWith *CompareWith, after *time.Duration) {
   500  	key := fmt.Sprintf("%s/%s", ctrl.namespace, appName)
   501  
   502  	if compareWith != nil && after != nil {
   503  		ctrl.appComparisonTypeRefreshQueue.AddAfter(fmt.Sprintf("%s/%d", key, compareWith), *after)
   504  	} else {
   505  		if compareWith != nil {
   506  			ctrl.refreshRequestedAppsMutex.Lock()
   507  			ctrl.refreshRequestedApps[appName] = compareWith.Max(ctrl.refreshRequestedApps[appName])
   508  			ctrl.refreshRequestedAppsMutex.Unlock()
   509  		}
   510  		if after != nil {
   511  			ctrl.appRefreshQueue.AddAfter(key, *after)
   512  			ctrl.appOperationQueue.AddAfter(key, *after)
   513  		} else {
   514  			ctrl.appRefreshQueue.Add(key)
   515  			ctrl.appOperationQueue.Add(key)
   516  		}
   517  	}
   518  }
   519  
   520  func (ctrl *ApplicationController) isRefreshRequested(appName string) (bool, CompareWith) {
   521  	ctrl.refreshRequestedAppsMutex.Lock()
   522  	defer ctrl.refreshRequestedAppsMutex.Unlock()
   523  	level, ok := ctrl.refreshRequestedApps[appName]
   524  	if ok {
   525  		delete(ctrl.refreshRequestedApps, appName)
   526  	}
   527  	return ok, level
   528  }
   529  
   530  func (ctrl *ApplicationController) processAppOperationQueueItem() (processNext bool) {
   531  	appKey, shutdown := ctrl.appOperationQueue.Get()
   532  	if shutdown {
   533  		processNext = false
   534  		return
   535  	}
   536  	processNext = true
   537  	defer func() {
   538  		if r := recover(); r != nil {
   539  			log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
   540  		}
   541  		ctrl.appOperationQueue.Done(appKey)
   542  	}()
   543  
   544  	obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
   545  	if err != nil {
   546  		log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
   547  		return
   548  	}
   549  	if !exists {
   550  		// This happens after app was deleted, but the work queue still had an entry for it.
   551  		return
   552  	}
   553  	origApp, ok := obj.(*appv1.Application)
   554  	if !ok {
   555  		log.Warnf("Key '%s' in index is not an application", appKey)
   556  		return
   557  	}
   558  	app := origApp.DeepCopy()
   559  
   560  	if app.Operation != nil {
   561  		ctrl.processRequestedAppOperation(app)
   562  	} else if app.DeletionTimestamp != nil && app.CascadedDeletion() {
   563  		_, err = ctrl.finalizeApplicationDeletion(app)
   564  		if err != nil {
   565  			ctrl.setAppCondition(app, appv1.ApplicationCondition{
   566  				Type:    appv1.ApplicationConditionDeletionError,
   567  				Message: err.Error(),
   568  			})
   569  			message := fmt.Sprintf("Unable to delete application resources: %v", err.Error())
   570  			ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonStatusRefreshed, Type: v1.EventTypeWarning}, message)
   571  		}
   572  	}
   573  	return
   574  }
   575  
   576  func (ctrl *ApplicationController) processAppComparisonTypeQueueItem() (processNext bool) {
   577  	key, shutdown := ctrl.appComparisonTypeRefreshQueue.Get()
   578  	processNext = true
   579  
   580  	defer func() {
   581  		if r := recover(); r != nil {
   582  			log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
   583  		}
   584  		ctrl.appComparisonTypeRefreshQueue.Done(key)
   585  	}()
   586  	if shutdown {
   587  		processNext = false
   588  		return
   589  	}
   590  
   591  	if parts := strings.Split(key.(string), "/"); len(parts) != 3 {
   592  		log.Warnf("Unexpected key format in appComparisonTypeRefreshTypeQueue. Key should consists of namespace/name/comparisonType but got: %s", key.(string))
   593  	} else {
   594  		if compareWith, err := strconv.Atoi(parts[2]); err != nil {
   595  			log.Warnf("Unable to parse comparison type: %v", err)
   596  			return
   597  		} else {
   598  			ctrl.requestAppRefresh(parts[1], CompareWith(compareWith).Pointer(), nil)
   599  		}
   600  	}
   601  	return
   602  }
   603  
   604  func (ctrl *ApplicationController) processProjectQueueItem() (processNext bool) {
   605  	key, shutdown := ctrl.projectRefreshQueue.Get()
   606  	processNext = true
   607  
   608  	defer func() {
   609  		if r := recover(); r != nil {
   610  			log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
   611  		}
   612  		ctrl.projectRefreshQueue.Done(key)
   613  	}()
   614  	if shutdown {
   615  		processNext = false
   616  		return
   617  	}
   618  	obj, exists, err := ctrl.projInformer.GetIndexer().GetByKey(key.(string))
   619  	if err != nil {
   620  		log.Errorf("Failed to get project '%s' from informer index: %+v", key, err)
   621  		return
   622  	}
   623  	if !exists {
   624  		// This happens after appproj was deleted, but the work queue still had an entry for it.
   625  		return
   626  	}
   627  	origProj, ok := obj.(*appv1.AppProject)
   628  	if !ok {
   629  		log.Warnf("Key '%s' in index is not an appproject", key)
   630  		return
   631  	}
   632  
   633  	if origProj.DeletionTimestamp != nil && origProj.HasFinalizer() {
   634  		if err := ctrl.finalizeProjectDeletion(origProj.DeepCopy()); err != nil {
   635  			log.Warnf("Failed to finalize project deletion: %v", err)
   636  		}
   637  	}
   638  	return
   639  }
   640  
   641  func (ctrl *ApplicationController) finalizeProjectDeletion(proj *appv1.AppProject) error {
   642  	apps, err := ctrl.appLister.Applications(ctrl.namespace).List(labels.Everything())
   643  	if err != nil {
   644  		return err
   645  	}
   646  	appsCount := 0
   647  	for i := range apps {
   648  		if apps[i].Spec.GetProject() == proj.Name {
   649  			appsCount++
   650  			break
   651  		}
   652  	}
   653  	if appsCount == 0 {
   654  		return ctrl.removeProjectFinalizer(proj)
   655  	} else {
   656  		log.Infof("Cannot remove project '%s' finalizer as is referenced by %d applications", proj.Name, appsCount)
   657  	}
   658  	return nil
   659  }
   660  
   661  func (ctrl *ApplicationController) removeProjectFinalizer(proj *appv1.AppProject) error {
   662  	proj.RemoveFinalizer()
   663  	var patch []byte
   664  	patch, _ = json.Marshal(map[string]interface{}{
   665  		"metadata": map[string]interface{}{
   666  			"finalizers": proj.Finalizers,
   667  		},
   668  	})
   669  	_, err := ctrl.applicationClientset.ArgoprojV1alpha1().AppProjects(ctrl.namespace).Patch(context.Background(), proj.Name, types.MergePatchType, patch, metav1.PatchOptions{})
   670  	return err
   671  }
   672  
   673  // shouldBeDeleted returns whether a given resource obj should be deleted on cascade delete of application app
   674  func (ctrl *ApplicationController) shouldBeDeleted(app *appv1.Application, obj *unstructured.Unstructured) bool {
   675  	return !kube.IsCRD(obj) && !isSelfReferencedApp(app, kube.GetObjectRef(obj))
   676  }
   677  
   678  func (ctrl *ApplicationController) getPermittedAppLiveObjects(app *appv1.Application, proj *appv1.AppProject) (map[kube.ResourceKey]*unstructured.Unstructured, error) {
   679  	objsMap, err := ctrl.stateCache.GetManagedLiveObjs(app, []*unstructured.Unstructured{})
   680  	if err != nil {
   681  		return nil, err
   682  	}
   683  	// Don't delete live resources which are not permitted in the app project
   684  	for k, v := range objsMap {
   685  		if !proj.IsLiveResourcePermitted(v, app.Spec.Destination.Server) {
   686  			delete(objsMap, k)
   687  		}
   688  	}
   689  	return objsMap, nil
   690  }
   691  
   692  func (ctrl *ApplicationController) finalizeApplicationDeletion(app *appv1.Application) ([]*unstructured.Unstructured, error) {
   693  	logCtx := log.WithField("application", app.Name)
   694  	logCtx.Infof("Deleting resources")
   695  	// Get refreshed application info, since informer app copy might be stale
   696  	app, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(context.Background(), app.Name, metav1.GetOptions{})
   697  	if err != nil {
   698  		if !apierr.IsNotFound(err) {
   699  			logCtx.Errorf("Unable to get refreshed application info prior deleting resources: %v", err)
   700  		}
   701  		return nil, nil
   702  	}
   703  	proj, err := ctrl.getAppProj(app)
   704  	if err != nil {
   705  		return nil, err
   706  	}
   707  
   708  	err = argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db)
   709  	if err != nil {
   710  		return nil, err
   711  	}
   712  
   713  	objsMap, err := ctrl.getPermittedAppLiveObjects(app, proj)
   714  	if err != nil {
   715  		return nil, err
   716  	}
   717  
   718  	objs := make([]*unstructured.Unstructured, 0)
   719  	for k := range objsMap {
   720  		// Wait for objects pending deletion to complete before proceeding with next sync wave
   721  		if objsMap[k].GetDeletionTimestamp() != nil {
   722  			logCtx.Infof("%d objects remaining for deletion", len(objsMap))
   723  			return objs, nil
   724  		}
   725  		if ctrl.shouldBeDeleted(app, objsMap[k]) {
   726  			objs = append(objs, objsMap[k])
   727  		}
   728  	}
   729  
   730  	cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
   731  	if err != nil {
   732  		return nil, err
   733  	}
   734  	config := metrics.AddMetricsTransportWrapper(ctrl.metricsServer, app, cluster.RESTConfig())
   735  
   736  	filteredObjs := FilterObjectsForDeletion(objs)
   737  	err = kube.RunAllAsync(len(filteredObjs), func(i int) error {
   738  		obj := filteredObjs[i]
   739  		return ctrl.kubectl.DeleteResource(context.Background(), config, obj.GroupVersionKind(), obj.GetName(), obj.GetNamespace(), false)
   740  	})
   741  	if err != nil {
   742  		return objs, err
   743  	}
   744  
   745  	objsMap, err = ctrl.getPermittedAppLiveObjects(app, proj)
   746  	if err != nil {
   747  		return nil, err
   748  	}
   749  
   750  	for k, obj := range objsMap {
   751  		if !ctrl.shouldBeDeleted(app, obj) {
   752  			delete(objsMap, k)
   753  		}
   754  	}
   755  	if len(objsMap) > 0 {
   756  		logCtx.Infof("%d objects remaining for deletion", len(objsMap))
   757  		return objs, nil
   758  	}
   759  	err = ctrl.cache.SetAppManagedResources(app.Name, nil)
   760  	if err != nil {
   761  		return objs, err
   762  	}
   763  	err = ctrl.cache.SetAppResourcesTree(app.Name, nil)
   764  	if err != nil {
   765  		return objs, err
   766  	}
   767  	app.SetCascadedDeletion(false)
   768  	var patch []byte
   769  	patch, _ = json.Marshal(map[string]interface{}{
   770  		"metadata": map[string]interface{}{
   771  			"finalizers": app.Finalizers,
   772  		},
   773  	})
   774  	_, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
   775  	if err != nil {
   776  		return objs, err
   777  	}
   778  
   779  	logCtx.Infof("Successfully deleted %d resources", len(objs))
   780  	ctrl.projectRefreshQueue.Add(fmt.Sprintf("%s/%s", app.Namespace, app.Spec.GetProject()))
   781  	return objs, nil
   782  }
   783  
   784  func (ctrl *ApplicationController) setAppCondition(app *appv1.Application, condition appv1.ApplicationCondition) {
   785  	// do nothing if app already has same condition
   786  	for _, c := range app.Status.Conditions {
   787  		if c.Message == condition.Message && c.Type == condition.Type {
   788  			return
   789  		}
   790  	}
   791  
   792  	app.Status.SetConditions([]appv1.ApplicationCondition{condition}, map[appv1.ApplicationConditionType]bool{condition.Type: true})
   793  
   794  	var patch []byte
   795  	patch, err := json.Marshal(map[string]interface{}{
   796  		"status": map[string]interface{}{
   797  			"conditions": app.Status.Conditions,
   798  		},
   799  	})
   800  	if err == nil {
   801  		_, err = ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
   802  	}
   803  	if err != nil {
   804  		log.Errorf("Unable to set application condition: %v", err)
   805  	}
   806  }
   807  
   808  func (ctrl *ApplicationController) processRequestedAppOperation(app *appv1.Application) {
   809  	logCtx := log.WithField("application", app.Name)
   810  	var state *appv1.OperationState
   811  	// Recover from any unexpected panics and automatically set the status to be failed
   812  	defer func() {
   813  		if r := recover(); r != nil {
   814  			logCtx.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
   815  			state.Phase = synccommon.OperationError
   816  			if rerr, ok := r.(error); ok {
   817  				state.Message = rerr.Error()
   818  			} else {
   819  				state.Message = fmt.Sprintf("%v", r)
   820  			}
   821  			ctrl.setOperationState(app, state)
   822  		}
   823  	}()
   824  	terminating := false
   825  	if isOperationInProgress(app) {
   826  		// If we get here, we are about process an operation but we notice it is already in progress.
   827  		// We need to detect if the app object we pulled off the informer is stale and doesn't
   828  		// reflect the fact that the operation is completed. We don't want to perform the operation
   829  		// again. To detect this, always retrieve the latest version to ensure it is not stale.
   830  		freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(context.Background(), app.ObjectMeta.Name, metav1.GetOptions{})
   831  		if err != nil {
   832  			logCtx.Errorf("Failed to retrieve latest application state: %v", err)
   833  			return
   834  		}
   835  		if !isOperationInProgress(freshApp) {
   836  			logCtx.Infof("Skipping operation on stale application state")
   837  			return
   838  		}
   839  		app = freshApp
   840  		state = app.Status.OperationState.DeepCopy()
   841  		terminating = state.Phase == synccommon.OperationTerminating
   842  		// Failed  operation with retry strategy might have be in-progress and has completion time
   843  		if state.FinishedAt != nil && !terminating {
   844  			retryAt, err := app.Status.OperationState.Operation.Retry.NextRetryAt(state.FinishedAt.Time, state.RetryCount)
   845  			if err != nil {
   846  				state.Phase = synccommon.OperationFailed
   847  				state.Message = err.Error()
   848  				ctrl.setOperationState(app, state)
   849  				return
   850  			}
   851  			retryAfter := time.Until(retryAt)
   852  			if retryAfter > 0 {
   853  				logCtx.Infof("Skipping retrying in-progress operation. Attempting again at: %s", retryAt.Format(time.RFC3339))
   854  				ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), &retryAfter)
   855  				return
   856  			} else {
   857  				// retrying operation. remove previous failure time in app since it is used as a trigger
   858  				// that previous failed and operation should be retried
   859  				state.FinishedAt = nil
   860  				ctrl.setOperationState(app, state)
   861  				// Get rid of sync results and null out previous operation completion time
   862  				state.SyncResult = nil
   863  			}
   864  		} else {
   865  			logCtx.Infof("Resuming in-progress operation. phase: %s, message: %s", state.Phase, state.Message)
   866  		}
   867  	} else {
   868  		state = &appv1.OperationState{Phase: synccommon.OperationRunning, Operation: *app.Operation, StartedAt: metav1.Now()}
   869  		ctrl.setOperationState(app, state)
   870  		logCtx.Infof("Initialized new operation: %v", *app.Operation)
   871  	}
   872  
   873  	if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil {
   874  		state.Phase = synccommon.OperationFailed
   875  		state.Message = err.Error()
   876  	} else {
   877  		ctrl.appStateManager.SyncAppState(app, state)
   878  	}
   879  
   880  	if state.Phase == synccommon.OperationRunning {
   881  		// It's possible for an app to be terminated while we were operating on it. We do not want
   882  		// to clobber the Terminated state with Running. Get the latest app state to check for this.
   883  		freshApp, err := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Get(context.Background(), app.ObjectMeta.Name, metav1.GetOptions{})
   884  		if err == nil {
   885  			if freshApp.Status.OperationState != nil && freshApp.Status.OperationState.Phase == synccommon.OperationTerminating {
   886  				state.Phase = synccommon.OperationTerminating
   887  				state.Message = "operation is terminating"
   888  				// after this, we will get requeued to the workqueue, but next time the
   889  				// SyncAppState will operate in a Terminating phase, allowing the worker to perform
   890  				// cleanup (e.g. delete jobs, workflows, etc...)
   891  			}
   892  		}
   893  	} else if state.Phase == synccommon.OperationFailed || state.Phase == synccommon.OperationError {
   894  		if !terminating && (state.RetryCount < state.Operation.Retry.Limit || state.Operation.Retry.Limit < 0) {
   895  			now := metav1.Now()
   896  			state.FinishedAt = &now
   897  			if retryAt, err := state.Operation.Retry.NextRetryAt(now.Time, state.RetryCount); err != nil {
   898  				state.Phase = synccommon.OperationFailed
   899  				state.Message = fmt.Sprintf("%s (failed to retry: %v)", state.Message, err)
   900  			} else {
   901  				state.Phase = synccommon.OperationRunning
   902  				state.RetryCount++
   903  				state.Message = fmt.Sprintf("%s. Retrying attempt #%d at %s.", state.Message, state.RetryCount, retryAt.Format(time.Kitchen))
   904  			}
   905  		} else if state.RetryCount > 0 {
   906  			state.Message = fmt.Sprintf("%s (retried %d times).", state.Message, state.RetryCount)
   907  		}
   908  
   909  	}
   910  
   911  	ctrl.setOperationState(app, state)
   912  	if state.Phase.Completed() && !app.Operation.Sync.DryRun {
   913  		// if we just completed an operation, force a refresh so that UI will report up-to-date
   914  		// sync/health information
   915  		if _, err := cache.MetaNamespaceKeyFunc(app); err == nil {
   916  			// force app refresh with using CompareWithLatest comparison type and trigger app reconciliation loop
   917  			ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), nil)
   918  		} else {
   919  			logCtx.Warnf("Fails to requeue application: %v", err)
   920  		}
   921  	}
   922  }
   923  
   924  func (ctrl *ApplicationController) setOperationState(app *appv1.Application, state *appv1.OperationState) {
   925  	kube.RetryUntilSucceed(context.Background(), updateOperationStateTimeout, "Update application operation state", logutils.NewLogrusLogger(log.New()), func() error {
   926  		if state.Phase == "" {
   927  			// expose any bugs where we neglect to set phase
   928  			panic("no phase was set")
   929  		}
   930  		if state.Phase.Completed() {
   931  			now := metav1.Now()
   932  			state.FinishedAt = &now
   933  		}
   934  		patch := map[string]interface{}{
   935  			"status": map[string]interface{}{
   936  				"operationState": state,
   937  			},
   938  		}
   939  		if state.Phase.Completed() {
   940  			// If operation is completed, clear the operation field to indicate no operation is
   941  			// in progress.
   942  			patch["operation"] = nil
   943  		}
   944  		if reflect.DeepEqual(app.Status.OperationState, state) {
   945  			log.Infof("No operation updates necessary to '%s'. Skipping patch", app.Name)
   946  			return nil
   947  		}
   948  		patchJSON, err := json.Marshal(patch)
   949  		if err != nil {
   950  			return err
   951  		}
   952  		if app.Status.OperationState != nil && app.Status.OperationState.FinishedAt != nil && state.FinishedAt == nil {
   953  			patchJSON, err = jsonpatch.MergeMergePatches(patchJSON, []byte(`{"status": {"operationState": {"finishedAt": null}}}`))
   954  			if err != nil {
   955  				return err
   956  			}
   957  		}
   958  
   959  		appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace)
   960  		_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patchJSON, metav1.PatchOptions{})
   961  		if err != nil {
   962  			// Stop retrying updating deleted application
   963  			if apierr.IsNotFound(err) {
   964  				return nil
   965  			}
   966  			return err
   967  		}
   968  		log.Infof("updated '%s' operation (phase: %s)", app.Name, state.Phase)
   969  		if state.Phase.Completed() {
   970  			eventInfo := argo.EventInfo{Reason: argo.EventReasonOperationCompleted}
   971  			var messages []string
   972  			if state.Operation.Sync != nil && len(state.Operation.Sync.Resources) > 0 {
   973  				messages = []string{"Partial sync operation"}
   974  			} else {
   975  				messages = []string{"Sync operation"}
   976  			}
   977  			if state.SyncResult != nil {
   978  				messages = append(messages, "to", state.SyncResult.Revision)
   979  			}
   980  			if state.Phase.Successful() {
   981  				eventInfo.Type = v1.EventTypeNormal
   982  				messages = append(messages, "succeeded")
   983  			} else {
   984  				eventInfo.Type = v1.EventTypeWarning
   985  				messages = append(messages, "failed:", state.Message)
   986  			}
   987  			ctrl.auditLogger.LogAppEvent(app, eventInfo, strings.Join(messages, " "))
   988  			ctrl.metricsServer.IncSync(app, state)
   989  		}
   990  		return nil
   991  	})
   992  }
   993  
   994  func (ctrl *ApplicationController) processAppRefreshQueueItem() (processNext bool) {
   995  	appKey, shutdown := ctrl.appRefreshQueue.Get()
   996  	if shutdown {
   997  		processNext = false
   998  		return
   999  	}
  1000  	processNext = true
  1001  	defer func() {
  1002  		if r := recover(); r != nil {
  1003  			log.Errorf("Recovered from panic: %+v\n%s", r, debug.Stack())
  1004  		}
  1005  		ctrl.appRefreshQueue.Done(appKey)
  1006  	}()
  1007  
  1008  	obj, exists, err := ctrl.appInformer.GetIndexer().GetByKey(appKey.(string))
  1009  	if err != nil {
  1010  		log.Errorf("Failed to get application '%s' from informer index: %+v", appKey, err)
  1011  		return
  1012  	}
  1013  	if !exists {
  1014  		// This happens after app was deleted, but the work queue still had an entry for it.
  1015  		return
  1016  	}
  1017  	origApp, ok := obj.(*appv1.Application)
  1018  	if !ok {
  1019  		log.Warnf("Key '%s' in index is not an application", appKey)
  1020  		return
  1021  	}
  1022  	origApp = origApp.DeepCopy()
  1023  	needRefresh, refreshType, comparisonLevel := ctrl.needRefreshAppStatus(origApp, ctrl.statusRefreshTimeout)
  1024  
  1025  	if !needRefresh {
  1026  		return
  1027  	}
  1028  
  1029  	app := origApp.DeepCopy()
  1030  	logCtx := log.WithFields(log.Fields{"application": app.Name})
  1031  	startTime := time.Now()
  1032  	defer func() {
  1033  		reconcileDuration := time.Since(startTime)
  1034  		ctrl.metricsServer.IncReconcile(origApp, reconcileDuration)
  1035  		logCtx.WithFields(log.Fields{
  1036  			"time_ms":        reconcileDuration.Milliseconds(),
  1037  			"level":          comparisonLevel,
  1038  			"dest-server":    origApp.Spec.Destination.Server,
  1039  			"dest-name":      origApp.Spec.Destination.Name,
  1040  			"dest-namespace": origApp.Spec.Destination.Namespace,
  1041  		}).Info("Reconciliation completed")
  1042  	}()
  1043  
  1044  	if comparisonLevel == ComparisonWithNothing {
  1045  		managedResources := make([]*appv1.ResourceDiff, 0)
  1046  		if err := ctrl.cache.GetAppManagedResources(app.Name, &managedResources); err != nil {
  1047  			logCtx.Warnf("Failed to get cached managed resources for tree reconciliation, fallback to full reconciliation")
  1048  		} else {
  1049  			var tree *appv1.ApplicationTree
  1050  			if tree, err = ctrl.getResourceTree(app, managedResources); err == nil {
  1051  				app.Status.Summary = tree.GetSummary()
  1052  				if err := ctrl.cache.SetAppResourcesTree(app.Name, tree); err != nil {
  1053  					logCtx.Errorf("Failed to cache resources tree: %v", err)
  1054  					return
  1055  				}
  1056  			}
  1057  
  1058  			ctrl.persistAppStatus(origApp, &app.Status)
  1059  			return
  1060  		}
  1061  	}
  1062  
  1063  	project, hasErrors := ctrl.refreshAppConditions(app)
  1064  	if hasErrors {
  1065  		app.Status.Sync.Status = appv1.SyncStatusCodeUnknown
  1066  		app.Status.Health.Status = health.HealthStatusUnknown
  1067  		ctrl.persistAppStatus(origApp, &app.Status)
  1068  		return
  1069  	}
  1070  
  1071  	var localManifests []string
  1072  	if opState := app.Status.OperationState; opState != nil && opState.Operation.Sync != nil {
  1073  		localManifests = opState.Operation.Sync.Manifests
  1074  	}
  1075  
  1076  	revision := app.Spec.Source.TargetRevision
  1077  	if comparisonLevel == CompareWithRecent {
  1078  		revision = app.Status.Sync.Revision
  1079  	}
  1080  
  1081  	now := metav1.Now()
  1082  	compareResult := ctrl.appStateManager.CompareAppState(app, project, revision, app.Spec.Source, refreshType == appv1.RefreshTypeHard, localManifests)
  1083  	for k, v := range compareResult.timings {
  1084  		logCtx = logCtx.WithField(k, v.Milliseconds())
  1085  	}
  1086  
  1087  	ctrl.normalizeApplication(origApp, app)
  1088  
  1089  	tree, err := ctrl.setAppManagedResources(app, compareResult)
  1090  	if err != nil {
  1091  		logCtx.Errorf("Failed to cache app resources: %v", err)
  1092  	} else {
  1093  		app.Status.Summary = tree.GetSummary()
  1094  	}
  1095  
  1096  	if project.Spec.SyncWindows.Matches(app).CanSync(false) {
  1097  		syncErrCond := ctrl.autoSync(app, compareResult.syncStatus, compareResult.resources)
  1098  		if syncErrCond != nil {
  1099  			app.Status.SetConditions(
  1100  				[]appv1.ApplicationCondition{*syncErrCond},
  1101  				map[appv1.ApplicationConditionType]bool{appv1.ApplicationConditionSyncError: true},
  1102  			)
  1103  		} else {
  1104  			app.Status.SetConditions(
  1105  				[]appv1.ApplicationCondition{},
  1106  				map[appv1.ApplicationConditionType]bool{appv1.ApplicationConditionSyncError: true},
  1107  			)
  1108  		}
  1109  	} else {
  1110  		logCtx.Info("Sync prevented by sync window")
  1111  	}
  1112  
  1113  	if app.Status.ReconciledAt == nil || comparisonLevel == CompareWithLatest {
  1114  		app.Status.ReconciledAt = &now
  1115  	}
  1116  	app.Status.Sync = *compareResult.syncStatus
  1117  	app.Status.Health = *compareResult.healthStatus
  1118  	app.Status.Resources = compareResult.resources
  1119  	sort.Slice(app.Status.Resources, func(i, j int) bool {
  1120  		return resourceStatusKey(app.Status.Resources[i]) < resourceStatusKey(app.Status.Resources[j])
  1121  	})
  1122  	app.Status.SourceType = compareResult.appSourceType
  1123  	ctrl.persistAppStatus(origApp, &app.Status)
  1124  	return
  1125  }
  1126  
  1127  func resourceStatusKey(res appv1.ResourceStatus) string {
  1128  	return strings.Join([]string{res.Group, res.Kind, res.Namespace, res.Name}, "/")
  1129  }
  1130  
  1131  // needRefreshAppStatus answers if application status needs to be refreshed.
  1132  // Returns true if application never been compared, has changed or comparison result has expired.
  1133  // Additionally returns whether full refresh was requested or not.
  1134  // If full refresh is requested then target and live state should be reconciled, else only live state tree should be updated.
  1135  func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application, statusRefreshTimeout time.Duration) (bool, appv1.RefreshType, CompareWith) {
  1136  	logCtx := log.WithFields(log.Fields{"application": app.Name})
  1137  	var reason string
  1138  	compareWith := CompareWithLatest
  1139  	refreshType := appv1.RefreshTypeNormal
  1140  	expired := app.Status.ReconciledAt == nil || app.Status.ReconciledAt.Add(statusRefreshTimeout).Before(time.Now().UTC())
  1141  
  1142  	if requestedType, ok := app.IsRefreshRequested(); ok {
  1143  		// user requested app refresh.
  1144  		refreshType = requestedType
  1145  		reason = fmt.Sprintf("%s refresh requested", refreshType)
  1146  	} else if expired {
  1147  		// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
  1148  		// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
  1149  		//TODO: find existing Golang bug or create a new one
  1150  		reconciledAtStr := "never"
  1151  		if app.Status.ReconciledAt != nil {
  1152  			reconciledAtStr = app.Status.ReconciledAt.String()
  1153  		}
  1154  		reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", reconciledAtStr, statusRefreshTimeout)
  1155  	} else if !app.Spec.Source.Equals(app.Status.Sync.ComparedTo.Source) {
  1156  		reason = "spec.source differs"
  1157  	} else if !app.Spec.Destination.Equals(app.Status.Sync.ComparedTo.Destination) {
  1158  		reason = "spec.destination differs"
  1159  	} else if requested, level := ctrl.isRefreshRequested(app.Name); requested {
  1160  		compareWith = level
  1161  		reason = "controller refresh requested"
  1162  	}
  1163  
  1164  	if reason != "" {
  1165  		logCtx.Infof("Refreshing app status (%s), level (%d)", reason, compareWith)
  1166  		return true, refreshType, compareWith
  1167  	}
  1168  	return false, refreshType, compareWith
  1169  }
  1170  
  1171  func (ctrl *ApplicationController) refreshAppConditions(app *appv1.Application) (*appv1.AppProject, bool) {
  1172  	errorConditions := make([]appv1.ApplicationCondition, 0)
  1173  	proj, err := ctrl.getAppProj(app)
  1174  	if err != nil {
  1175  		if apierr.IsNotFound(err) {
  1176  			errorConditions = append(errorConditions, appv1.ApplicationCondition{
  1177  				Type:    appv1.ApplicationConditionInvalidSpecError,
  1178  				Message: fmt.Sprintf("Application referencing project %s which does not exist", app.Spec.Project),
  1179  			})
  1180  		} else {
  1181  			errorConditions = append(errorConditions, appv1.ApplicationCondition{
  1182  				Type:    appv1.ApplicationConditionUnknownError,
  1183  				Message: err.Error(),
  1184  			})
  1185  		}
  1186  	} else {
  1187  		specConditions, err := argo.ValidatePermissions(context.Background(), &app.Spec, proj, ctrl.db)
  1188  		if err != nil {
  1189  			errorConditions = append(errorConditions, appv1.ApplicationCondition{
  1190  				Type:    appv1.ApplicationConditionUnknownError,
  1191  				Message: err.Error(),
  1192  			})
  1193  		} else {
  1194  			errorConditions = append(errorConditions, specConditions...)
  1195  		}
  1196  	}
  1197  	app.Status.SetConditions(errorConditions, map[appv1.ApplicationConditionType]bool{
  1198  		appv1.ApplicationConditionInvalidSpecError: true,
  1199  		appv1.ApplicationConditionUnknownError:     true,
  1200  	})
  1201  	return proj, len(errorConditions) > 0
  1202  }
  1203  
  1204  // normalizeApplication normalizes an application.spec and additionally persists updates if it changed
  1205  func (ctrl *ApplicationController) normalizeApplication(orig, app *appv1.Application) {
  1206  	logCtx := log.WithFields(log.Fields{"application": app.Name})
  1207  	app.Spec = *argo.NormalizeApplicationSpec(&app.Spec)
  1208  	patch, modified, err := diff.CreateTwoWayMergePatch(orig, app, appv1.Application{})
  1209  	if err != nil {
  1210  		logCtx.Errorf("error constructing app spec patch: %v", err)
  1211  	} else if modified {
  1212  		appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
  1213  		_, err = appClient.Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
  1214  		if err != nil {
  1215  			logCtx.Errorf("Error persisting normalized application spec: %v", err)
  1216  		} else {
  1217  			logCtx.Infof("Normalized app spec: %s", string(patch))
  1218  		}
  1219  	}
  1220  }
  1221  
  1222  // persistAppStatus persists updates to application status. If no changes were made, it is a no-op
  1223  func (ctrl *ApplicationController) persistAppStatus(orig *appv1.Application, newStatus *appv1.ApplicationStatus) {
  1224  	logCtx := log.WithFields(log.Fields{"application": orig.Name})
  1225  	if orig.Status.Sync.Status != newStatus.Sync.Status {
  1226  		message := fmt.Sprintf("Updated sync status: %s -> %s", orig.Status.Sync.Status, newStatus.Sync.Status)
  1227  		ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message)
  1228  	}
  1229  	if orig.Status.Health.Status != newStatus.Health.Status {
  1230  		message := fmt.Sprintf("Updated health status: %s -> %s", orig.Status.Health.Status, newStatus.Health.Status)
  1231  		ctrl.auditLogger.LogAppEvent(orig, argo.EventInfo{Reason: argo.EventReasonResourceUpdated, Type: v1.EventTypeNormal}, message)
  1232  	}
  1233  	var newAnnotations map[string]string
  1234  	if orig.GetAnnotations() != nil {
  1235  		newAnnotations = make(map[string]string)
  1236  		for k, v := range orig.GetAnnotations() {
  1237  			newAnnotations[k] = v
  1238  		}
  1239  		delete(newAnnotations, common.AnnotationKeyRefresh)
  1240  	}
  1241  	patch, modified, err := diff.CreateTwoWayMergePatch(
  1242  		&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: orig.GetAnnotations()}, Status: orig.Status},
  1243  		&appv1.Application{ObjectMeta: metav1.ObjectMeta{Annotations: newAnnotations}, Status: *newStatus}, appv1.Application{})
  1244  	if err != nil {
  1245  		logCtx.Errorf("Error constructing app status patch: %v", err)
  1246  		return
  1247  	}
  1248  	if !modified {
  1249  		logCtx.Infof("No status changes. Skipping patch")
  1250  		return
  1251  	}
  1252  	logCtx.Debugf("patch: %s", string(patch))
  1253  	appClient := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(orig.Namespace)
  1254  	_, err = appClient.Patch(context.Background(), orig.Name, types.MergePatchType, patch, metav1.PatchOptions{})
  1255  	if err != nil {
  1256  		logCtx.Warnf("Error updating application: %v", err)
  1257  	} else {
  1258  		logCtx.Infof("Update successful")
  1259  	}
  1260  }
  1261  
  1262  // autoSync will initiate a sync operation for an application configured with automated sync
  1263  func (ctrl *ApplicationController) autoSync(app *appv1.Application, syncStatus *appv1.SyncStatus, resources []appv1.ResourceStatus) *appv1.ApplicationCondition {
  1264  	if app.Spec.SyncPolicy == nil || app.Spec.SyncPolicy.Automated == nil {
  1265  		return nil
  1266  	}
  1267  	logCtx := log.WithFields(log.Fields{"application": app.Name})
  1268  	if app.Operation != nil {
  1269  		logCtx.Infof("Skipping auto-sync: another operation is in progress")
  1270  		return nil
  1271  	}
  1272  	if app.DeletionTimestamp != nil && !app.DeletionTimestamp.IsZero() {
  1273  		logCtx.Infof("Skipping auto-sync: deletion in progress")
  1274  		return nil
  1275  	}
  1276  
  1277  	// Only perform auto-sync if we detect OutOfSync status. This is to prevent us from attempting
  1278  	// a sync when application is already in a Synced or Unknown state
  1279  	if syncStatus.Status != appv1.SyncStatusCodeOutOfSync {
  1280  		logCtx.Infof("Skipping auto-sync: application status is %s", syncStatus.Status)
  1281  		return nil
  1282  	}
  1283  
  1284  	if !app.Spec.SyncPolicy.Automated.Prune {
  1285  		requirePruneOnly := true
  1286  		for _, r := range resources {
  1287  			if r.Status != appv1.SyncStatusCodeSynced && !r.RequiresPruning {
  1288  				requirePruneOnly = false
  1289  				break
  1290  			}
  1291  		}
  1292  		if requirePruneOnly {
  1293  			logCtx.Infof("Skipping auto-sync: need to prune extra resources only but automated prune is disabled")
  1294  			return nil
  1295  		}
  1296  	}
  1297  
  1298  	desiredCommitSHA := syncStatus.Revision
  1299  	alreadyAttempted, attemptPhase := alreadyAttemptedSync(app, desiredCommitSHA)
  1300  	selfHeal := app.Spec.SyncPolicy.Automated.SelfHeal
  1301  	op := appv1.Operation{
  1302  		Sync: &appv1.SyncOperation{
  1303  			Revision:    desiredCommitSHA,
  1304  			Prune:       app.Spec.SyncPolicy.Automated.Prune,
  1305  			SyncOptions: app.Spec.SyncPolicy.SyncOptions,
  1306  		},
  1307  		InitiatedBy: appv1.OperationInitiator{Automated: true},
  1308  		Retry:       appv1.RetryStrategy{Limit: 5},
  1309  	}
  1310  	if app.Spec.SyncPolicy.Retry != nil {
  1311  		op.Retry = *app.Spec.SyncPolicy.Retry
  1312  	}
  1313  	// It is possible for manifests to remain OutOfSync even after a sync/kubectl apply (e.g.
  1314  	// auto-sync with pruning disabled). We need to ensure that we do not keep Syncing an
  1315  	// application in an infinite loop. To detect this, we only attempt the Sync if the revision
  1316  	// and parameter overrides are different from our most recent sync operation.
  1317  	if alreadyAttempted && (!selfHeal || !attemptPhase.Successful()) {
  1318  		if !attemptPhase.Successful() {
  1319  			logCtx.Warnf("Skipping auto-sync: failed previous sync attempt to %s", desiredCommitSHA)
  1320  			message := fmt.Sprintf("Failed sync attempt to %s: %s", desiredCommitSHA, app.Status.OperationState.Message)
  1321  			return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
  1322  		}
  1323  		logCtx.Infof("Skipping auto-sync: most recent sync already to %s", desiredCommitSHA)
  1324  		return nil
  1325  	} else if alreadyAttempted && selfHeal {
  1326  		if shouldSelfHeal, retryAfter := ctrl.shouldSelfHeal(app); shouldSelfHeal {
  1327  			for _, resource := range resources {
  1328  				if resource.Status != appv1.SyncStatusCodeSynced {
  1329  					op.Sync.Resources = append(op.Sync.Resources, appv1.SyncOperationResource{
  1330  						Kind:  resource.Kind,
  1331  						Group: resource.Group,
  1332  						Name:  resource.Name,
  1333  					})
  1334  				}
  1335  			}
  1336  		} else {
  1337  			logCtx.Infof("Skipping auto-sync: already attempted sync to %s with timeout %v (retrying in %v)", desiredCommitSHA, ctrl.selfHealTimeout, retryAfter)
  1338  			ctrl.requestAppRefresh(app.Name, CompareWithLatest.Pointer(), &retryAfter)
  1339  			return nil
  1340  		}
  1341  
  1342  	}
  1343  
  1344  	if app.Spec.SyncPolicy.Automated.Prune && !app.Spec.SyncPolicy.Automated.AllowEmpty {
  1345  		bAllNeedPrune := true
  1346  		for _, r := range resources {
  1347  			if !r.RequiresPruning {
  1348  				bAllNeedPrune = false
  1349  			}
  1350  		}
  1351  		if bAllNeedPrune {
  1352  			message := fmt.Sprintf("Skipping sync attempt to %s: auto-sync will wipe out all resources", desiredCommitSHA)
  1353  			logCtx.Warnf(message)
  1354  			return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: message}
  1355  		}
  1356  	}
  1357  
  1358  	appIf := ctrl.applicationClientset.ArgoprojV1alpha1().Applications(app.Namespace)
  1359  	_, err := argo.SetAppOperation(appIf, app.Name, &op)
  1360  	if err != nil {
  1361  		logCtx.Errorf("Failed to initiate auto-sync to %s: %v", desiredCommitSHA, err)
  1362  		return &appv1.ApplicationCondition{Type: appv1.ApplicationConditionSyncError, Message: err.Error()}
  1363  	}
  1364  	message := fmt.Sprintf("Initiated automated sync to '%s'", desiredCommitSHA)
  1365  	ctrl.auditLogger.LogAppEvent(app, argo.EventInfo{Reason: argo.EventReasonOperationStarted, Type: v1.EventTypeNormal}, message)
  1366  	logCtx.Info(message)
  1367  	return nil
  1368  }
  1369  
  1370  // alreadyAttemptedSync returns whether or not the most recent sync was performed against the
  1371  // commitSHA and with the same app source config which are currently set in the app
  1372  func alreadyAttemptedSync(app *appv1.Application, commitSHA string) (bool, synccommon.OperationPhase) {
  1373  	if app.Status.OperationState == nil || app.Status.OperationState.Operation.Sync == nil || app.Status.OperationState.SyncResult == nil {
  1374  		return false, ""
  1375  	}
  1376  	if app.Status.OperationState.SyncResult.Revision != commitSHA {
  1377  		return false, ""
  1378  	}
  1379  	// Ignore differences in target revision, since we already just verified commitSHAs are equal,
  1380  	// and we do not want to trigger auto-sync due to things like HEAD != master
  1381  	specSource := app.Spec.Source.DeepCopy()
  1382  	specSource.TargetRevision = ""
  1383  	syncResSource := app.Status.OperationState.SyncResult.Source.DeepCopy()
  1384  	syncResSource.TargetRevision = ""
  1385  	return reflect.DeepEqual(app.Spec.Source, app.Status.OperationState.SyncResult.Source), app.Status.OperationState.Phase
  1386  }
  1387  
  1388  func (ctrl *ApplicationController) shouldSelfHeal(app *appv1.Application) (bool, time.Duration) {
  1389  	if app.Status.OperationState == nil {
  1390  		return true, time.Duration(0)
  1391  	}
  1392  
  1393  	var retryAfter time.Duration
  1394  	if app.Status.OperationState.FinishedAt == nil {
  1395  		retryAfter = ctrl.selfHealTimeout
  1396  	} else {
  1397  		retryAfter = ctrl.selfHealTimeout - time.Since(app.Status.OperationState.FinishedAt.Time)
  1398  	}
  1399  	return retryAfter <= 0, retryAfter
  1400  }
  1401  
  1402  func (ctrl *ApplicationController) canProcessApp(obj interface{}) bool {
  1403  	app, ok := obj.(*appv1.Application)
  1404  	if !ok {
  1405  		return false
  1406  	}
  1407  	if ctrl.clusterFilter != nil {
  1408  		cluster, err := ctrl.db.GetCluster(context.Background(), app.Spec.Destination.Server)
  1409  		if err != nil {
  1410  			return ctrl.clusterFilter(nil)
  1411  		}
  1412  		return ctrl.clusterFilter(cluster)
  1413  	}
  1414  
  1415  	return true
  1416  }
  1417  
  1418  func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.SharedIndexInformer, applisters.ApplicationLister) {
  1419  	informer := cache.NewSharedIndexInformer(
  1420  		&cache.ListWatch{
  1421  			ListFunc: func(options metav1.ListOptions) (apiruntime.Object, error) {
  1422  				return ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).List(context.TODO(), options)
  1423  			},
  1424  			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
  1425  				return ctrl.applicationClientset.ArgoprojV1alpha1().Applications(ctrl.namespace).Watch(context.TODO(), options)
  1426  			},
  1427  		},
  1428  		&appv1.Application{},
  1429  		ctrl.statusRefreshTimeout,
  1430  		cache.Indexers{
  1431  			cache.NamespaceIndex: func(obj interface{}) ([]string, error) {
  1432  				app, ok := obj.(*appv1.Application)
  1433  				if ok {
  1434  					if err := argo.ValidateDestination(context.Background(), &app.Spec.Destination, ctrl.db); err != nil {
  1435  						ctrl.setAppCondition(app, appv1.ApplicationCondition{Type: appv1.ApplicationConditionInvalidSpecError, Message: err.Error()})
  1436  					}
  1437  				}
  1438  
  1439  				return cache.MetaNamespaceIndexFunc(obj)
  1440  			},
  1441  			orphanedIndex: func(obj interface{}) (i []string, e error) {
  1442  				app, ok := obj.(*appv1.Application)
  1443  				if !ok {
  1444  					return nil, nil
  1445  				}
  1446  
  1447  				proj, err := ctrl.getAppProj(app)
  1448  				if err != nil {
  1449  					return nil, nil
  1450  				}
  1451  				if proj.Spec.OrphanedResources != nil {
  1452  					return []string{app.Spec.Destination.Namespace}, nil
  1453  				}
  1454  				return nil, nil
  1455  			},
  1456  		},
  1457  	)
  1458  	lister := applisters.NewApplicationLister(informer.GetIndexer())
  1459  	informer.AddEventHandler(
  1460  		cache.ResourceEventHandlerFuncs{
  1461  			AddFunc: func(obj interface{}) {
  1462  				if !ctrl.canProcessApp(obj) {
  1463  					return
  1464  				}
  1465  				key, err := cache.MetaNamespaceKeyFunc(obj)
  1466  				if err == nil {
  1467  					ctrl.appRefreshQueue.Add(key)
  1468  					ctrl.appOperationQueue.Add(key)
  1469  				}
  1470  			},
  1471  			UpdateFunc: func(old, new interface{}) {
  1472  				if !ctrl.canProcessApp(new) {
  1473  					return
  1474  				}
  1475  
  1476  				key, err := cache.MetaNamespaceKeyFunc(new)
  1477  				if err != nil {
  1478  					return
  1479  				}
  1480  				var compareWith *CompareWith
  1481  				oldApp, oldOK := old.(*appv1.Application)
  1482  				newApp, newOK := new.(*appv1.Application)
  1483  				if oldOK && newOK && automatedSyncEnabled(oldApp, newApp) {
  1484  					log.WithField("application", newApp.Name).Info("Enabled automated sync")
  1485  					compareWith = CompareWithLatest.Pointer()
  1486  				}
  1487  				ctrl.requestAppRefresh(newApp.Name, compareWith, nil)
  1488  				ctrl.appOperationQueue.Add(key)
  1489  			},
  1490  			DeleteFunc: func(obj interface{}) {
  1491  				if !ctrl.canProcessApp(obj) {
  1492  					return
  1493  				}
  1494  				// IndexerInformer uses a delta queue, therefore for deletes we have to use this
  1495  				// key function.
  1496  				key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
  1497  				if err == nil {
  1498  					ctrl.appRefreshQueue.Add(key)
  1499  				}
  1500  			},
  1501  		},
  1502  	)
  1503  	return informer, lister
  1504  }
  1505  
  1506  func (ctrl *ApplicationController) RegisterClusterSecretUpdater(ctx context.Context) {
  1507  	updater := NewClusterInfoUpdater(ctrl.stateCache, ctrl.db, ctrl.appLister.Applications(ctrl.namespace), ctrl.cache, ctrl.clusterFilter)
  1508  	go updater.Run(ctx)
  1509  }
  1510  
  1511  func isOperationInProgress(app *appv1.Application) bool {
  1512  	return app.Status.OperationState != nil && !app.Status.OperationState.Phase.Completed()
  1513  }
  1514  
  1515  // automatedSyncEnabled tests if an app went from auto-sync disabled to enabled.
  1516  // if it was toggled to be enabled, the informer handler will force a refresh
  1517  func automatedSyncEnabled(oldApp *appv1.Application, newApp *appv1.Application) bool {
  1518  	oldEnabled := false
  1519  	oldSelfHealEnabled := false
  1520  	if oldApp.Spec.SyncPolicy != nil && oldApp.Spec.SyncPolicy.Automated != nil {
  1521  		oldEnabled = true
  1522  		oldSelfHealEnabled = oldApp.Spec.SyncPolicy.Automated.SelfHeal
  1523  	}
  1524  
  1525  	newEnabled := false
  1526  	newSelfHealEnabled := false
  1527  	if newApp.Spec.SyncPolicy != nil && newApp.Spec.SyncPolicy.Automated != nil {
  1528  		newEnabled = true
  1529  		newSelfHealEnabled = newApp.Spec.SyncPolicy.Automated.SelfHeal
  1530  	}
  1531  	if !oldEnabled && newEnabled {
  1532  		return true
  1533  	}
  1534  	if !oldSelfHealEnabled && newSelfHealEnabled {
  1535  		return true
  1536  	}
  1537  	// nothing changed
  1538  	return false
  1539  }