github.com/argoproj/argo-cd/v3@v3.2.1/controller/state.go (about)

     1  package controller
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"reflect"
     9  	"slices"
    10  	"strings"
    11  	goSync "sync"
    12  	"time"
    13  
    14  	synccommon "github.com/argoproj/gitops-engine/pkg/sync/common"
    15  	corev1 "k8s.io/api/core/v1"
    16  
    17  	"github.com/argoproj/gitops-engine/pkg/diff"
    18  	"github.com/argoproj/gitops-engine/pkg/health"
    19  	"github.com/argoproj/gitops-engine/pkg/sync"
    20  	hookutil "github.com/argoproj/gitops-engine/pkg/sync/hook"
    21  	"github.com/argoproj/gitops-engine/pkg/sync/ignore"
    22  	resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
    23  	"github.com/argoproj/gitops-engine/pkg/sync/syncwaves"
    24  	kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube"
    25  	log "github.com/sirupsen/logrus"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    28  	"k8s.io/apimachinery/pkg/runtime/schema"
    29  	"k8s.io/apimachinery/pkg/types"
    30  
    31  	"github.com/argoproj/argo-cd/v3/common"
    32  	statecache "github.com/argoproj/argo-cd/v3/controller/cache"
    33  	"github.com/argoproj/argo-cd/v3/controller/metrics"
    34  	"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
    35  	appclientset "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned"
    36  	"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
    37  	applog "github.com/argoproj/argo-cd/v3/util/app/log"
    38  	"github.com/argoproj/argo-cd/v3/util/app/path"
    39  	"github.com/argoproj/argo-cd/v3/util/argo"
    40  	argodiff "github.com/argoproj/argo-cd/v3/util/argo/diff"
    41  	"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
    42  	appstatecache "github.com/argoproj/argo-cd/v3/util/cache/appstate"
    43  	"github.com/argoproj/argo-cd/v3/util/db"
    44  	"github.com/argoproj/argo-cd/v3/util/gpg"
    45  	utilio "github.com/argoproj/argo-cd/v3/util/io"
    46  	"github.com/argoproj/argo-cd/v3/util/settings"
    47  	"github.com/argoproj/argo-cd/v3/util/stats"
    48  )
    49  
    50  var ErrCompareStateRepo = errors.New("failed to get repo objects")
    51  
    52  type resourceInfoProviderStub struct{}
    53  
    54  func (r *resourceInfoProviderStub) IsNamespaced(_ schema.GroupKind) (bool, error) {
    55  	return false, nil
    56  }
    57  
    58  type managedResource struct {
    59  	Target          *unstructured.Unstructured
    60  	Live            *unstructured.Unstructured
    61  	Diff            diff.DiffResult
    62  	Group           string
    63  	Version         string
    64  	Kind            string
    65  	Namespace       string
    66  	Name            string
    67  	Hook            bool
    68  	ResourceVersion string
    69  }
    70  
    71  // AppStateManager defines methods which allow to compare application spec and actual application state.
    72  type AppStateManager interface {
    73  	CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localObjects []string, hasMultipleSources bool) (*comparisonResult, error)
    74  	SyncAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, state *v1alpha1.OperationState)
    75  	GetRepoObjs(ctx context.Context, app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error)
    76  }
    77  
    78  // comparisonResult holds the state of an application after the reconciliation
    79  type comparisonResult struct {
    80  	syncStatus           *v1alpha1.SyncStatus
    81  	healthStatus         health.HealthStatusCode
    82  	resources            []v1alpha1.ResourceStatus
    83  	managedResources     []managedResource
    84  	reconciliationResult sync.ReconciliationResult
    85  	diffConfig           argodiff.DiffConfig
    86  	appSourceType        v1alpha1.ApplicationSourceType
    87  	// appSourceTypes stores the SourceType for each application source under sources field
    88  	appSourceTypes []v1alpha1.ApplicationSourceType
    89  	// timings maps phases of comparison to the duration it took to complete (for statistical purposes)
    90  	timings            map[string]time.Duration
    91  	diffResultList     *diff.DiffResultList
    92  	hasPostDeleteHooks bool
    93  	// revisionsMayHaveChanges indicates if there are any possibilities that the revisions contain changes
    94  	revisionsMayHaveChanges bool
    95  }
    96  
    97  func (res *comparisonResult) GetSyncStatus() *v1alpha1.SyncStatus {
    98  	return res.syncStatus
    99  }
   100  
   101  func (res *comparisonResult) GetHealthStatus() health.HealthStatusCode {
   102  	return res.healthStatus
   103  }
   104  
   105  // appStateManager allows to compare applications to git
   106  type appStateManager struct {
   107  	metricsServer         *metrics.MetricsServer
   108  	db                    db.ArgoDB
   109  	settingsMgr           *settings.SettingsManager
   110  	appclientset          appclientset.Interface
   111  	kubectl               kubeutil.Kubectl
   112  	onKubectlRun          kubeutil.OnKubectlRunFunc
   113  	repoClientset         apiclient.Clientset
   114  	liveStateCache        statecache.LiveStateCache
   115  	cache                 *appstatecache.Cache
   116  	namespace             string
   117  	statusRefreshTimeout  time.Duration
   118  	resourceTracking      argo.ResourceTracking
   119  	persistResourceHealth bool
   120  	repoErrorCache        goSync.Map
   121  	repoErrorGracePeriod  time.Duration
   122  	serverSideDiff        bool
   123  	ignoreNormalizerOpts  normalizers.IgnoreNormalizerOpts
   124  }
   125  
   126  // GetRepoObjs will generate the manifests for the given application delegating the
   127  // task to the repo-server. It returns the list of generated manifests as unstructured
   128  // objects. It also returns the full response from all calls to the repo server as the
   129  // second argument.
   130  func (m *appStateManager) GetRepoObjs(ctx context.Context, app *v1alpha1.Application, sources []v1alpha1.ApplicationSource, appLabelKey string, revisions []string, noCache, noRevisionCache, verifySignature bool, proj *v1alpha1.AppProject, sendRuntimeState bool) ([]*unstructured.Unstructured, []*apiclient.ManifestResponse, bool, error) {
   131  	ts := stats.NewTimingStats()
   132  	helmRepos, err := m.db.ListHelmRepositories(ctx)
   133  	if err != nil {
   134  		return nil, nil, false, fmt.Errorf("failed to list Helm repositories: %w", err)
   135  	}
   136  	permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos)
   137  	if err != nil {
   138  		return nil, nil, false, fmt.Errorf("failed to get permitted Helm repositories for project %q: %w", proj.Name, err)
   139  	}
   140  
   141  	ociRepos, err := m.db.ListOCIRepositories(ctx)
   142  	if err != nil {
   143  		return nil, nil, false, fmt.Errorf("failed to list OCI repositories: %w", err)
   144  	}
   145  	permittedOCIRepos, err := argo.GetPermittedRepos(proj, ociRepos)
   146  	if err != nil {
   147  		return nil, nil, false, fmt.Errorf("failed to get permitted OCI repositories for project %q: %w", proj.Name, err)
   148  	}
   149  
   150  	ts.AddCheckpoint("repo_ms")
   151  	helmRepositoryCredentials, err := m.db.GetAllHelmRepositoryCredentials(ctx)
   152  	if err != nil {
   153  		return nil, nil, false, fmt.Errorf("failed to get Helm credentials: %w", err)
   154  	}
   155  	permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials)
   156  	if err != nil {
   157  		return nil, nil, false, fmt.Errorf("failed to get permitted Helm credentials for project %q: %w", proj.Name, err)
   158  	}
   159  
   160  	ociRepositoryCredentials, err := m.db.GetAllOCIRepositoryCredentials(ctx)
   161  	if err != nil {
   162  		return nil, nil, false, fmt.Errorf("failed to get OCI credentials: %w", err)
   163  	}
   164  	permittedOCICredentials, err := argo.GetPermittedReposCredentials(proj, ociRepositoryCredentials)
   165  	if err != nil {
   166  		return nil, nil, false, fmt.Errorf("failed to get permitted OCI credentials for project %q: %w", proj.Name, err)
   167  	}
   168  
   169  	enabledSourceTypes, err := m.settingsMgr.GetEnabledSourceTypes()
   170  	if err != nil {
   171  		return nil, nil, false, fmt.Errorf("failed to get enabled source types: %w", err)
   172  	}
   173  	ts.AddCheckpoint("plugins_ms")
   174  
   175  	kustomizeSettings, err := m.settingsMgr.GetKustomizeSettings()
   176  	if err != nil {
   177  		return nil, nil, false, fmt.Errorf("failed to get Kustomize settings: %w", err)
   178  	}
   179  
   180  	helmOptions, err := m.settingsMgr.GetHelmSettings()
   181  	if err != nil {
   182  		return nil, nil, false, fmt.Errorf("failed to get Helm settings: %w", err)
   183  	}
   184  
   185  	trackingMethod, err := m.settingsMgr.GetTrackingMethod()
   186  	if err != nil {
   187  		return nil, nil, false, fmt.Errorf("failed to get trackingMethod: %w", err)
   188  	}
   189  
   190  	installationID, err := m.settingsMgr.GetInstallationID()
   191  	if err != nil {
   192  		return nil, nil, false, fmt.Errorf("failed to get installation ID: %w", err)
   193  	}
   194  
   195  	destCluster, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, m.db)
   196  	if err != nil {
   197  		return nil, nil, false, fmt.Errorf("failed to get destination cluster: %w", err)
   198  	}
   199  
   200  	ts.AddCheckpoint("build_options_ms")
   201  	var serverVersion string
   202  	var apiResources []kubeutil.APIResourceInfo
   203  	if sendRuntimeState {
   204  		serverVersion, apiResources, err = m.liveStateCache.GetVersionsInfo(destCluster)
   205  		if err != nil {
   206  			return nil, nil, false, fmt.Errorf("failed to get cluster version for cluster %q: %w", destCluster.Server, err)
   207  		}
   208  	}
   209  	conn, repoClient, err := m.repoClientset.NewRepoServerClient()
   210  	if err != nil {
   211  		return nil, nil, false, fmt.Errorf("failed to connect to repo server: %w", err)
   212  	}
   213  	defer utilio.Close(conn)
   214  
   215  	manifestInfos := make([]*apiclient.ManifestResponse, 0)
   216  	targetObjs := make([]*unstructured.Unstructured, 0)
   217  
   218  	// Store the map of all sources having ref field into a map for applications with sources field
   219  	// If it's for a rollback process, the refSources[*].targetRevision fields are the desired
   220  	// revisions for the rollback
   221  	refSources, err := argo.GetRefSources(ctx, sources, app.Spec.Project, m.db.GetRepository, revisions)
   222  	if err != nil {
   223  		return nil, nil, false, fmt.Errorf("failed to get ref sources: %w", err)
   224  	}
   225  
   226  	revisionsMayHaveChanges := false
   227  
   228  	keyManifestGenerateAnnotationVal, keyManifestGenerateAnnotationExists := app.Annotations[v1alpha1.AnnotationKeyManifestGeneratePaths]
   229  
   230  	for i, source := range sources {
   231  		if len(revisions) < len(sources) || revisions[i] == "" {
   232  			revisions[i] = source.TargetRevision
   233  		}
   234  		repo, err := m.db.GetRepository(ctx, source.RepoURL, proj.Name)
   235  		if err != nil {
   236  			return nil, nil, false, fmt.Errorf("failed to get repo %q: %w", source.RepoURL, err)
   237  		}
   238  
   239  		syncedRevision := app.Status.Sync.Revision
   240  		if app.Spec.HasMultipleSources() {
   241  			if i < len(app.Status.Sync.Revisions) {
   242  				syncedRevision = app.Status.Sync.Revisions[i]
   243  			} else {
   244  				syncedRevision = ""
   245  			}
   246  		}
   247  
   248  		revision := revisions[i]
   249  
   250  		appNamespace := app.Spec.Destination.Namespace
   251  		apiVersions := argo.APIResourcesToStrings(apiResources, true)
   252  		if !sendRuntimeState {
   253  			appNamespace = ""
   254  		}
   255  
   256  		if !source.IsHelm() && !source.IsOCI() && syncedRevision != "" && keyManifestGenerateAnnotationExists && keyManifestGenerateAnnotationVal != "" {
   257  			// Validate the manifest-generate-path annotation to avoid generating manifests if it has not changed.
   258  			updateRevisionResult, err := repoClient.UpdateRevisionForPaths(ctx, &apiclient.UpdateRevisionForPathsRequest{
   259  				Repo:               repo,
   260  				Revision:           revision,
   261  				SyncedRevision:     syncedRevision,
   262  				NoRevisionCache:    noRevisionCache,
   263  				Paths:              path.GetAppRefreshPaths(app),
   264  				AppLabelKey:        appLabelKey,
   265  				AppName:            app.InstanceName(m.namespace),
   266  				Namespace:          appNamespace,
   267  				ApplicationSource:  &source,
   268  				KubeVersion:        serverVersion,
   269  				ApiVersions:        apiVersions,
   270  				TrackingMethod:     trackingMethod,
   271  				RefSources:         refSources,
   272  				HasMultipleSources: app.Spec.HasMultipleSources(),
   273  				InstallationID:     installationID,
   274  			})
   275  			if err != nil {
   276  				return nil, nil, false, fmt.Errorf("failed to compare revisions for source %d of %d: %w", i+1, len(sources), err)
   277  			}
   278  			if updateRevisionResult.Changes {
   279  				revisionsMayHaveChanges = true
   280  			}
   281  
   282  			// Generate manifests should use same revision as updateRevisionForPaths, because HEAD revision may be different between these two calls
   283  			if updateRevisionResult.Revision != "" {
   284  				revision = updateRevisionResult.Revision
   285  			}
   286  		} else {
   287  			// revisionsMayHaveChanges is set to true if at least one revision is not possible to be updated
   288  			revisionsMayHaveChanges = true
   289  		}
   290  
   291  		repos := permittedHelmRepos
   292  		helmRepoCreds := permittedHelmCredentials
   293  		// If the source is OCI, there is a potential for an OCI image to be a Helm chart and that said chart in
   294  		// turn would have OCI dependencies. To ensure that those dependencies can be resolved, add them to the repos
   295  		// list.
   296  		if source.IsOCI() {
   297  			repos = slices.Clone(permittedHelmRepos)
   298  			helmRepoCreds = slices.Clone(permittedHelmCredentials)
   299  			repos = append(repos, permittedOCIRepos...)
   300  			helmRepoCreds = append(helmRepoCreds, permittedOCICredentials...)
   301  		}
   302  
   303  		log.Debugf("Generating Manifest for source %s revision %s", source, revision)
   304  		manifestInfo, err := repoClient.GenerateManifest(ctx, &apiclient.ManifestRequest{
   305  			Repo:                            repo,
   306  			Repos:                           repos,
   307  			Revision:                        revision,
   308  			NoCache:                         noCache,
   309  			NoRevisionCache:                 noRevisionCache,
   310  			AppLabelKey:                     appLabelKey,
   311  			AppName:                         app.InstanceName(m.namespace),
   312  			Namespace:                       appNamespace,
   313  			ApplicationSource:               &source,
   314  			KustomizeOptions:                kustomizeSettings,
   315  			KubeVersion:                     serverVersion,
   316  			ApiVersions:                     apiVersions,
   317  			VerifySignature:                 verifySignature,
   318  			HelmRepoCreds:                   helmRepoCreds,
   319  			TrackingMethod:                  trackingMethod,
   320  			EnabledSourceTypes:              enabledSourceTypes,
   321  			HelmOptions:                     helmOptions,
   322  			HasMultipleSources:              app.Spec.HasMultipleSources(),
   323  			RefSources:                      refSources,
   324  			ProjectName:                     proj.Name,
   325  			ProjectSourceRepos:              proj.Spec.SourceRepos,
   326  			AnnotationManifestGeneratePaths: app.GetAnnotation(v1alpha1.AnnotationKeyManifestGeneratePaths),
   327  			InstallationID:                  installationID,
   328  		})
   329  		if err != nil {
   330  			return nil, nil, false, fmt.Errorf("failed to generate manifest for source %d of %d: %w", i+1, len(sources), err)
   331  		}
   332  
   333  		targetObj, err := unmarshalManifests(manifestInfo.Manifests)
   334  		if err != nil {
   335  			return nil, nil, false, fmt.Errorf("failed to unmarshal manifests for source %d of %d: %w", i+1, len(sources), err)
   336  		}
   337  		targetObjs = append(targetObjs, targetObj...)
   338  		manifestInfos = append(manifestInfos, manifestInfo)
   339  	}
   340  
   341  	ts.AddCheckpoint("manifests_ms")
   342  	logCtx := log.WithFields(applog.GetAppLogFields(app))
   343  	for k, v := range ts.Timings() {
   344  		logCtx = logCtx.WithField(k, v.Milliseconds())
   345  	}
   346  	logCtx = logCtx.WithField("time_ms", time.Since(ts.StartTime).Milliseconds())
   347  	logCtx.Info("GetRepoObjs stats")
   348  
   349  	return targetObjs, manifestInfos, revisionsMayHaveChanges, nil
   350  }
   351  
   352  // ResolveGitRevision will resolve the given revision to a full commit SHA. Only works for git.
   353  func (m *appStateManager) ResolveGitRevision(repoURL string, revision string) (string, error) {
   354  	conn, repoClient, err := m.repoClientset.NewRepoServerClient()
   355  	if err != nil {
   356  		return "", fmt.Errorf("failed to connect to repo server: %w", err)
   357  	}
   358  	defer utilio.Close(conn)
   359  
   360  	repo, err := m.db.GetRepository(context.Background(), repoURL, "")
   361  	if err != nil {
   362  		return "", fmt.Errorf("failed to get repo %q: %w", repoURL, err)
   363  	}
   364  
   365  	// Mock the app. The repo-server only needs to know whether the "chart" field is populated.
   366  	app := &v1alpha1.Application{
   367  		Spec: v1alpha1.ApplicationSpec{
   368  			Source: &v1alpha1.ApplicationSource{
   369  				RepoURL:        repoURL,
   370  				TargetRevision: revision,
   371  			},
   372  		},
   373  	}
   374  	resp, err := repoClient.ResolveRevision(context.Background(), &apiclient.ResolveRevisionRequest{
   375  		Repo:              repo,
   376  		App:               app,
   377  		AmbiguousRevision: revision,
   378  	})
   379  	if err != nil {
   380  		return "", fmt.Errorf("failed to determine whether the dry source has changed: %w", err)
   381  	}
   382  	return resp.Revision, nil
   383  }
   384  
   385  func unmarshalManifests(manifests []string) ([]*unstructured.Unstructured, error) {
   386  	targetObjs := make([]*unstructured.Unstructured, 0)
   387  	for _, manifest := range manifests {
   388  		obj, err := v1alpha1.UnmarshalToUnstructured(manifest)
   389  		if err != nil {
   390  			return nil, err
   391  		}
   392  		targetObjs = append(targetObjs, obj)
   393  	}
   394  	return targetObjs, nil
   395  }
   396  
   397  func DeduplicateTargetObjects(
   398  	namespace string,
   399  	objs []*unstructured.Unstructured,
   400  	infoProvider kubeutil.ResourceInfoProvider,
   401  ) ([]*unstructured.Unstructured, []v1alpha1.ApplicationCondition, error) {
   402  	targetByKey := make(map[kubeutil.ResourceKey][]*unstructured.Unstructured)
   403  	for i := range objs {
   404  		obj := objs[i]
   405  		if obj == nil {
   406  			continue
   407  		}
   408  		isNamespaced := kubeutil.IsNamespacedOrUnknown(infoProvider, obj.GroupVersionKind().GroupKind())
   409  		if !isNamespaced {
   410  			obj.SetNamespace("")
   411  		} else if obj.GetNamespace() == "" {
   412  			obj.SetNamespace(namespace)
   413  		}
   414  		key := kubeutil.GetResourceKey(obj)
   415  		if key.Name == "" && obj.GetGenerateName() != "" {
   416  			key.Name = fmt.Sprintf("%s%d", obj.GetGenerateName(), i)
   417  		}
   418  		targetByKey[key] = append(targetByKey[key], obj)
   419  	}
   420  	conditions := make([]v1alpha1.ApplicationCondition, 0)
   421  	result := make([]*unstructured.Unstructured, 0)
   422  	for key, targets := range targetByKey {
   423  		if len(targets) > 1 {
   424  			now := metav1.Now()
   425  			conditions = append(conditions, v1alpha1.ApplicationCondition{
   426  				Type:               v1alpha1.ApplicationConditionRepeatedResourceWarning,
   427  				Message:            fmt.Sprintf("Resource %s appeared %d times among application resources.", key.String(), len(targets)),
   428  				LastTransitionTime: &now,
   429  			})
   430  		}
   431  		result = append(result, targets[len(targets)-1])
   432  	}
   433  
   434  	return result, conditions, nil
   435  }
   436  
   437  // normalizeClusterScopeTracking will set the app instance tracking metadata on malformed cluster-scoped resources where
   438  // metadata.namespace is not empty. The repo-server doesn't know which resources are cluster-scoped, so it may apply
   439  // an incorrect tracking annotation using the metadata.namespace. This function will correct that.
   440  func normalizeClusterScopeTracking(targetObjs []*unstructured.Unstructured, infoProvider kubeutil.ResourceInfoProvider, setAppInstance func(*unstructured.Unstructured) error) error {
   441  	for i := len(targetObjs) - 1; i >= 0; i-- {
   442  		targetObj := targetObjs[i]
   443  		if targetObj == nil {
   444  			continue
   445  		}
   446  		gvk := targetObj.GroupVersionKind()
   447  		if !kubeutil.IsNamespacedOrUnknown(infoProvider, gvk.GroupKind()) {
   448  			if targetObj.GetNamespace() != "" {
   449  				targetObj.SetNamespace("")
   450  				err := setAppInstance(targetObj)
   451  				if err != nil {
   452  					return fmt.Errorf("failed to set app instance label on cluster-scoped resource %s/%s: %w", gvk.String(), targetObj.GetName(), err)
   453  				}
   454  			}
   455  		}
   456  	}
   457  	return nil
   458  }
   459  
   460  // getComparisonSettings will return the system level settings related to the
   461  // diff/normalization process.
   462  func (m *appStateManager) getComparisonSettings() (string, map[string]v1alpha1.ResourceOverride, *settings.ResourcesFilter, string, string, error) {
   463  	resourceOverrides, err := m.settingsMgr.GetResourceOverrides()
   464  	if err != nil {
   465  		return "", nil, nil, "", "", err
   466  	}
   467  	appLabelKey, err := m.settingsMgr.GetAppInstanceLabelKey()
   468  	if err != nil {
   469  		return "", nil, nil, "", "", err
   470  	}
   471  	resFilter, err := m.settingsMgr.GetResourcesFilter()
   472  	if err != nil {
   473  		return "", nil, nil, "", "", err
   474  	}
   475  	installationID, err := m.settingsMgr.GetInstallationID()
   476  	if err != nil {
   477  		return "", nil, nil, "", "", err
   478  	}
   479  	trackingMethod, err := m.settingsMgr.GetTrackingMethod()
   480  	if err != nil {
   481  		return "", nil, nil, "", "", err
   482  	}
   483  	return appLabelKey, resourceOverrides, resFilter, installationID, trackingMethod, nil
   484  }
   485  
   486  // verifyGnuPGSignature verifies the result of a GnuPG operation for a given git
   487  // revision.
   488  func verifyGnuPGSignature(revision string, project *v1alpha1.AppProject, manifestInfo *apiclient.ManifestResponse) []v1alpha1.ApplicationCondition {
   489  	now := metav1.Now()
   490  	conditions := make([]v1alpha1.ApplicationCondition, 0)
   491  	// We need to have some data in the verification result to parse, otherwise there was no signature
   492  	if manifestInfo.VerifyResult != "" {
   493  		verifyResult := gpg.ParseGitCommitVerification(manifestInfo.VerifyResult)
   494  		switch verifyResult.Result {
   495  		case gpg.VerifyResultGood:
   496  			// This is the only case we allow to sync to, but we need to make sure signing key is allowed
   497  			validKey := false
   498  			for _, k := range project.Spec.SignatureKeys {
   499  				if gpg.KeyID(k.KeyID) == gpg.KeyID(verifyResult.KeyID) && gpg.KeyID(k.KeyID) != "" {
   500  					validKey = true
   501  					break
   502  				}
   503  			}
   504  			if !validKey {
   505  				msg := fmt.Sprintf("Found good signature made with %s key %s, but this key is not allowed in AppProject",
   506  					verifyResult.Cipher, verifyResult.KeyID)
   507  				conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   508  			}
   509  		case gpg.VerifyResultInvalid:
   510  			msg := fmt.Sprintf("Found signature made with %s key %s, but verification result was invalid: '%s'",
   511  				verifyResult.Cipher, verifyResult.KeyID, verifyResult.Message)
   512  			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   513  		default:
   514  			msg := fmt.Sprintf("Could not verify commit signature on revision '%s', check logs for more information.", revision)
   515  			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   516  		}
   517  	} else {
   518  		msg := fmt.Sprintf("Target revision %s in Git is not signed, but a signature is required", revision)
   519  		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   520  	}
   521  
   522  	return conditions
   523  }
   524  
   525  func isManagedNamespace(ns *unstructured.Unstructured, app *v1alpha1.Application) bool {
   526  	return ns != nil && ns.GetKind() == kubeutil.NamespaceKind && ns.GetName() == app.Spec.Destination.Namespace && app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.ManagedNamespaceMetadata != nil
   527  }
   528  
   529  // CompareAppState compares application git state to the live app state, using the specified
   530  // revision and supplied source. If revision or overrides are empty, then compares against
   531  // revision and overrides in the app spec.
   532  func (m *appStateManager) CompareAppState(app *v1alpha1.Application, project *v1alpha1.AppProject, revisions []string, sources []v1alpha1.ApplicationSource, noCache bool, noRevisionCache bool, localManifests []string, hasMultipleSources bool) (*comparisonResult, error) {
   533  	ts := stats.NewTimingStats()
   534  	logCtx := log.WithFields(applog.GetAppLogFields(app))
   535  
   536  	// Build initial sync status
   537  	syncStatus := &v1alpha1.SyncStatus{
   538  		ComparedTo: v1alpha1.ComparedTo{
   539  			Destination:       app.Spec.Destination,
   540  			IgnoreDifferences: app.Spec.IgnoreDifferences,
   541  		},
   542  		Status: v1alpha1.SyncStatusCodeUnknown,
   543  	}
   544  	if hasMultipleSources {
   545  		syncStatus.ComparedTo.Sources = sources
   546  		syncStatus.Revisions = revisions
   547  	} else {
   548  		if len(sources) > 0 {
   549  			syncStatus.ComparedTo.Source = sources[0]
   550  		} else {
   551  			logCtx.Warn("CompareAppState: sources should not be empty")
   552  		}
   553  		if len(revisions) > 0 {
   554  			syncStatus.Revision = revisions[0]
   555  		}
   556  	}
   557  
   558  	appLabelKey, resourceOverrides, resFilter, installationID, trackingMethod, err := m.getComparisonSettings()
   559  	ts.AddCheckpoint("settings_ms")
   560  	if err != nil {
   561  		log.Infof("Basic comparison settings cannot be loaded, using unknown comparison: %s", err.Error())
   562  		return &comparisonResult{syncStatus: syncStatus, healthStatus: health.HealthStatusUnknown}, nil
   563  	}
   564  
   565  	// When signature keys are defined in the project spec, we need to verify the signature on the Git revision
   566  	verifySignature := len(project.Spec.SignatureKeys) > 0 && gpg.IsGPGEnabled()
   567  
   568  	// do best effort loading live and target state to present as much information about app state as possible
   569  	failedToLoadObjs := false
   570  	conditions := make([]v1alpha1.ApplicationCondition, 0)
   571  
   572  	destCluster, err := argo.GetDestinationCluster(context.Background(), app.Spec.Destination, m.db)
   573  	if err != nil {
   574  		return nil, err
   575  	}
   576  
   577  	logCtx.Infof("Comparing app state (cluster: %s, namespace: %s)", app.Spec.Destination.Server, app.Spec.Destination.Namespace)
   578  
   579  	var targetObjs []*unstructured.Unstructured
   580  	now := metav1.Now()
   581  
   582  	var manifestInfos []*apiclient.ManifestResponse
   583  	targetNsExists := false
   584  
   585  	var revisionsMayHaveChanges bool
   586  
   587  	if len(localManifests) == 0 {
   588  		// If the length of revisions is not same as the length of sources,
   589  		// we take the revisions from the sources directly for all the sources.
   590  		if len(revisions) != len(sources) {
   591  			revisions = make([]string, 0)
   592  			for _, source := range sources {
   593  				revisions = append(revisions, source.TargetRevision)
   594  			}
   595  		}
   596  
   597  		targetObjs, manifestInfos, revisionsMayHaveChanges, err = m.GetRepoObjs(context.Background(), app, sources, appLabelKey, revisions, noCache, noRevisionCache, verifySignature, project, true)
   598  		if err != nil {
   599  			targetObjs = make([]*unstructured.Unstructured, 0)
   600  			msg := "Failed to load target state: " + err.Error()
   601  			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   602  			if firstSeen, ok := m.repoErrorCache.Load(app.Name); ok {
   603  				if time.Since(firstSeen.(time.Time)) <= m.repoErrorGracePeriod && !noRevisionCache {
   604  					// if first seen is less than grace period and it's not a Level 3 comparison,
   605  					// ignore error and short circuit
   606  					logCtx.Debugf("Ignoring repo error %v, already encountered error in grace period", err.Error())
   607  					return nil, ErrCompareStateRepo
   608  				}
   609  			} else if !noRevisionCache {
   610  				logCtx.Debugf("Ignoring repo error %v, new occurrence", err.Error())
   611  				m.repoErrorCache.Store(app.Name, time.Now())
   612  				return nil, ErrCompareStateRepo
   613  			}
   614  			failedToLoadObjs = true
   615  		} else {
   616  			m.repoErrorCache.Delete(app.Name)
   617  		}
   618  	} else {
   619  		// Prevent applying local manifests for now when signature verification is enabled
   620  		// This is also enforced on API level, but as a last resort, we also enforce it here
   621  		if gpg.IsGPGEnabled() && verifySignature {
   622  			msg := "Cannot use local manifests when signature verification is required"
   623  			targetObjs = make([]*unstructured.Unstructured, 0)
   624  			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   625  			failedToLoadObjs = true
   626  		} else {
   627  			targetObjs, err = unmarshalManifests(localManifests)
   628  			if err != nil {
   629  				targetObjs = make([]*unstructured.Unstructured, 0)
   630  				msg := "Failed to load local manifests: " + err.Error()
   631  				conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   632  				failedToLoadObjs = true
   633  			}
   634  		}
   635  		// empty out manifestInfoMap
   636  		manifestInfos = make([]*apiclient.ManifestResponse, 0)
   637  	}
   638  	ts.AddCheckpoint("git_ms")
   639  
   640  	var infoProvider kubeutil.ResourceInfoProvider
   641  	infoProvider, err = m.liveStateCache.GetClusterCache(destCluster)
   642  	if err != nil {
   643  		infoProvider = &resourceInfoProviderStub{}
   644  	}
   645  
   646  	err = normalizeClusterScopeTracking(targetObjs, infoProvider, func(u *unstructured.Unstructured) error {
   647  		return m.resourceTracking.SetAppInstance(u, appLabelKey, app.InstanceName(m.namespace), app.Spec.Destination.Namespace, v1alpha1.TrackingMethod(trackingMethod), installationID)
   648  	})
   649  	if err != nil {
   650  		msg := "Failed to normalize cluster-scoped resource tracking: " + err.Error()
   651  		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   652  	}
   653  
   654  	targetObjs, dedupConditions, err := DeduplicateTargetObjects(app.Spec.Destination.Namespace, targetObjs, infoProvider)
   655  	if err != nil {
   656  		msg := "Failed to deduplicate target state: " + err.Error()
   657  		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   658  	}
   659  	conditions = append(conditions, dedupConditions...)
   660  
   661  	for i := len(targetObjs) - 1; i >= 0; i-- {
   662  		targetObj := targetObjs[i]
   663  		gvk := targetObj.GroupVersionKind()
   664  		if resFilter.IsExcludedResource(gvk.Group, gvk.Kind, destCluster.Server) {
   665  			targetObjs = append(targetObjs[:i], targetObjs[i+1:]...)
   666  			conditions = append(conditions, v1alpha1.ApplicationCondition{
   667  				Type:               v1alpha1.ApplicationConditionExcludedResourceWarning,
   668  				Message:            fmt.Sprintf("Resource %s/%s %s is excluded in the settings", gvk.Group, gvk.Kind, targetObj.GetName()),
   669  				LastTransitionTime: &now,
   670  			})
   671  		}
   672  
   673  		// If we reach this path, this means that a namespace has been both defined in Git, as well in the
   674  		// application's managedNamespaceMetadata. We want to ensure that this manifest is the one being used instead
   675  		// of what is present in managedNamespaceMetadata.
   676  		if isManagedNamespace(targetObj, app) {
   677  			targetNsExists = true
   678  		}
   679  	}
   680  	ts.AddCheckpoint("dedup_ms")
   681  
   682  	liveObjByKey, err := m.liveStateCache.GetManagedLiveObjs(destCluster, app, targetObjs)
   683  	if err != nil {
   684  		liveObjByKey = make(map[kubeutil.ResourceKey]*unstructured.Unstructured)
   685  		msg := "Failed to load live state: " + err.Error()
   686  		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   687  		failedToLoadObjs = true
   688  	}
   689  
   690  	logCtx.Debugf("Retrieved live manifests")
   691  	// filter out all resources which are not permitted in the application project
   692  	for k, v := range liveObjByKey {
   693  		permitted, err := project.IsLiveResourcePermitted(v, destCluster, func(project string) ([]*v1alpha1.Cluster, error) {
   694  			clusters, err := m.db.GetProjectClusters(context.TODO(), project)
   695  			if err != nil {
   696  				return nil, fmt.Errorf("failed to get clusters for project %q: %w", project, err)
   697  			}
   698  			return clusters, nil
   699  		})
   700  		if err != nil {
   701  			msg := fmt.Sprintf("Failed to check if live resource %q is permitted in project %q: %s", k.String(), app.Spec.Project, err.Error())
   702  			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   703  			failedToLoadObjs = true
   704  			continue
   705  		}
   706  
   707  		if !permitted {
   708  			delete(liveObjByKey, k)
   709  		}
   710  	}
   711  
   712  	for _, liveObj := range liveObjByKey {
   713  		if liveObj != nil {
   714  			appInstanceName := m.resourceTracking.GetAppName(liveObj, appLabelKey, v1alpha1.TrackingMethod(trackingMethod), installationID)
   715  			if appInstanceName != "" && appInstanceName != app.InstanceName(m.namespace) {
   716  				fqInstanceName := strings.ReplaceAll(appInstanceName, "_", "/")
   717  				conditions = append(conditions, v1alpha1.ApplicationCondition{
   718  					Type:               v1alpha1.ApplicationConditionSharedResourceWarning,
   719  					Message:            fmt.Sprintf("%s/%s is part of applications %s and %s", liveObj.GetKind(), liveObj.GetName(), app.QualifiedName(), fqInstanceName),
   720  					LastTransitionTime: &now,
   721  				})
   722  			}
   723  
   724  			// For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking
   725  			// enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some
   726  			// bookkeeping in order to prevent the managed namespace from being pruned.
   727  			//
   728  			// Live namespaces which are managed namespaces (i.e. application namespaces which are managed with
   729  			// CreateNamespace=true and has non-nil managedNamespaceMetadata) will (usually) not have a corresponding
   730  			// entry in source control. In order for the namespace not to risk being pruned, we'll need to generate a
   731  			// namespace which we can compare the live namespace with. For that, we'll do the same as is done in
   732  			// gitops-engine, the difference here being that we create a managed namespace which is only used for comparison.
   733  			//
   734  			// targetNsExists == true implies that it already exists as a target, so no need to add the namespace to the
   735  			// targetObjs array.
   736  			if isManagedNamespace(liveObj, app) && !targetNsExists {
   737  				nsSpec := &corev1.Namespace{TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: kubeutil.NamespaceKind}, ObjectMeta: metav1.ObjectMeta{Name: liveObj.GetName()}}
   738  				managedNs, err := kubeutil.ToUnstructured(nsSpec)
   739  				if err != nil {
   740  					conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
   741  					failedToLoadObjs = true
   742  					continue
   743  				}
   744  
   745  				// No need to care about the return value here, we just want the modified managedNs
   746  				_, err = syncNamespace(app.Spec.SyncPolicy)(managedNs, liveObj)
   747  				if err != nil {
   748  					conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: err.Error(), LastTransitionTime: &now})
   749  					failedToLoadObjs = true
   750  				} else {
   751  					targetObjs = append(targetObjs, managedNs)
   752  				}
   753  			}
   754  		}
   755  	}
   756  	hasPostDeleteHooks := false
   757  	for _, obj := range targetObjs {
   758  		if isPostDeleteHook(obj) {
   759  			hasPostDeleteHooks = true
   760  		}
   761  	}
   762  
   763  	reconciliation := sync.Reconcile(targetObjs, liveObjByKey, app.Spec.Destination.Namespace, infoProvider)
   764  	ts.AddCheckpoint("live_ms")
   765  
   766  	compareOptions, err := m.settingsMgr.GetResourceCompareOptions()
   767  	if err != nil {
   768  		log.Warnf("Could not get compare options from ConfigMap (assuming defaults): %v", err)
   769  		compareOptions = settings.GetDefaultDiffOptions()
   770  	}
   771  	manifestRevisions := make([]string, 0)
   772  
   773  	for _, manifestInfo := range manifestInfos {
   774  		manifestRevisions = append(manifestRevisions, manifestInfo.Revision)
   775  	}
   776  
   777  	serverSideDiff := m.serverSideDiff ||
   778  		resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=true")
   779  
   780  	// This allows turning SSD off for a given app if it is enabled at the
   781  	// controller level
   782  	if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "ServerSideDiff=false") {
   783  		serverSideDiff = false
   784  	}
   785  
   786  	useDiffCache := useDiffCache(noCache, manifestInfos, sources, app, manifestRevisions, m.statusRefreshTimeout, serverSideDiff, logCtx)
   787  
   788  	diffConfigBuilder := argodiff.NewDiffConfigBuilder().
   789  		WithDiffSettings(app.Spec.IgnoreDifferences, resourceOverrides, compareOptions.IgnoreAggregatedRoles, m.ignoreNormalizerOpts).
   790  		WithTracking(appLabelKey, string(trackingMethod))
   791  
   792  	if useDiffCache {
   793  		diffConfigBuilder.WithCache(m.cache, app.InstanceName(m.namespace))
   794  	} else {
   795  		diffConfigBuilder.WithNoCache()
   796  	}
   797  
   798  	if resourceutil.HasAnnotationOption(app, common.AnnotationCompareOptions, "IncludeMutationWebhook=true") {
   799  		diffConfigBuilder.WithIgnoreMutationWebhook(false)
   800  	}
   801  
   802  	gvkParser, err := m.getGVKParser(destCluster)
   803  	if err != nil {
   804  		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
   805  	}
   806  	diffConfigBuilder.WithGVKParser(gvkParser)
   807  	diffConfigBuilder.WithManager(common.ArgoCDSSAManager)
   808  
   809  	diffConfigBuilder.WithServerSideDiff(serverSideDiff)
   810  
   811  	if serverSideDiff {
   812  		applier, cleanup, err := m.getServerSideDiffDryRunApplier(destCluster)
   813  		if err != nil {
   814  			log.Errorf("CompareAppState error getting server side diff dry run applier: %s", err)
   815  			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionUnknownError, Message: err.Error(), LastTransitionTime: &now})
   816  		}
   817  		defer cleanup()
   818  		diffConfigBuilder.WithServerSideDryRunner(diff.NewK8sServerSideDryRunner(applier))
   819  	}
   820  
   821  	// enable structured merge diff if application syncs with server-side apply
   822  	if app.Spec.SyncPolicy != nil && app.Spec.SyncPolicy.SyncOptions.HasOption("ServerSideApply=true") {
   823  		diffConfigBuilder.WithStructuredMergeDiff(true)
   824  	}
   825  
   826  	// it is necessary to ignore the error at this point to avoid creating duplicated
   827  	// application conditions as argo.StateDiffs will validate this diffConfig again.
   828  	diffConfig, _ := diffConfigBuilder.Build()
   829  
   830  	diffResults, err := argodiff.StateDiffs(reconciliation.Live, reconciliation.Target, diffConfig)
   831  	if err != nil {
   832  		diffResults = &diff.DiffResultList{}
   833  		failedToLoadObjs = true
   834  		msg := "Failed to compare desired state to live state: " + err.Error()
   835  		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: msg, LastTransitionTime: &now})
   836  	}
   837  	ts.AddCheckpoint("diff_ms")
   838  
   839  	syncCode := v1alpha1.SyncStatusCodeSynced
   840  	managedResources := make([]managedResource, len(reconciliation.Target))
   841  	resourceSummaries := make([]v1alpha1.ResourceStatus, len(reconciliation.Target))
   842  	for i, targetObj := range reconciliation.Target {
   843  		liveObj := reconciliation.Live[i]
   844  		obj := liveObj
   845  		if obj == nil {
   846  			obj = targetObj
   847  		}
   848  		if obj == nil {
   849  			continue
   850  		}
   851  		gvk := obj.GroupVersionKind()
   852  
   853  		isSelfReferencedObj := m.isSelfReferencedObj(liveObj, targetObj, app.GetName(), v1alpha1.TrackingMethod(trackingMethod), installationID)
   854  
   855  		resState := v1alpha1.ResourceStatus{
   856  			Namespace:       obj.GetNamespace(),
   857  			Name:            obj.GetName(),
   858  			Kind:            gvk.Kind,
   859  			Version:         gvk.Version,
   860  			Group:           gvk.Group,
   861  			Hook:            isHook(obj),
   862  			RequiresPruning: targetObj == nil && liveObj != nil && isSelfReferencedObj,
   863  			RequiresDeletionConfirmation: targetObj != nil && resourceutil.HasAnnotationOption(targetObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDeleteRequireConfirm) ||
   864  				liveObj != nil && resourceutil.HasAnnotationOption(liveObj, synccommon.AnnotationSyncOptions, synccommon.SyncOptionDeleteRequireConfirm),
   865  		}
   866  		if targetObj != nil {
   867  			resState.SyncWave = int64(syncwaves.Wave(targetObj))
   868  		}
   869  
   870  		var diffResult diff.DiffResult
   871  		if i < len(diffResults.Diffs) {
   872  			diffResult = diffResults.Diffs[i]
   873  		} else {
   874  			diffResult = diff.DiffResult{Modified: false, NormalizedLive: []byte("{}"), PredictedLive: []byte("{}")}
   875  		}
   876  
   877  		// For the case when a namespace is managed with `managedNamespaceMetadata` AND it has resource tracking
   878  		// enabled (e.g. someone manually adds resource tracking labels or annotations), we need to do some
   879  		// bookkeeping in order to ensure that it's not considered `OutOfSync` (since it does not exist in source
   880  		// control).
   881  		//
   882  		// This is in addition to the bookkeeping we do (see `isManagedNamespace` and its references) to prevent said
   883  		// namespace from being pruned.
   884  		isManagedNs := isManagedNamespace(targetObj, app) && liveObj == nil
   885  
   886  		switch {
   887  		case resState.Hook || ignore.Ignore(obj) || (targetObj != nil && hookutil.Skip(targetObj)) || !isSelfReferencedObj:
   888  			// For resource hooks, skipped resources or objects that may have
   889  			// been created by another controller with annotations copied from
   890  			// the source object, don't store sync status, and do not affect
   891  			// overall sync status
   892  		case !isManagedNs && (diffResult.Modified || targetObj == nil || liveObj == nil):
   893  			// Set resource state to OutOfSync since one of the following is true:
   894  			// * target and live resource are different
   895  			// * target resource not defined and live resource is extra
   896  			// * target resource present but live resource is missing
   897  			resState.Status = v1alpha1.SyncStatusCodeOutOfSync
   898  			// we ignore the status if the obj needs pruning AND we have the annotation
   899  			needsPruning := targetObj == nil && liveObj != nil
   900  			if !needsPruning || !resourceutil.HasAnnotationOption(obj, common.AnnotationCompareOptions, "IgnoreExtraneous") {
   901  				syncCode = v1alpha1.SyncStatusCodeOutOfSync
   902  			}
   903  		default:
   904  			resState.Status = v1alpha1.SyncStatusCodeSynced
   905  		}
   906  		// set unknown status to all resource that are not permitted in the app project
   907  		isNamespaced, err := m.liveStateCache.IsNamespaced(destCluster, gvk.GroupKind())
   908  		if !project.IsGroupKindPermitted(gvk.GroupKind(), isNamespaced && err == nil) {
   909  			resState.Status = v1alpha1.SyncStatusCodeUnknown
   910  		}
   911  
   912  		if isNamespaced && obj.GetNamespace() == "" {
   913  			conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionInvalidSpecError, Message: fmt.Sprintf("Namespace for %s %s is missing.", obj.GetName(), gvk.String()), LastTransitionTime: &now})
   914  		}
   915  
   916  		// we can't say anything about the status if we were unable to get the target objects
   917  		if failedToLoadObjs {
   918  			resState.Status = v1alpha1.SyncStatusCodeUnknown
   919  		}
   920  
   921  		resourceVersion := ""
   922  		if liveObj != nil {
   923  			resourceVersion = liveObj.GetResourceVersion()
   924  		}
   925  		managedResources[i] = managedResource{
   926  			Name:            resState.Name,
   927  			Namespace:       resState.Namespace,
   928  			Group:           resState.Group,
   929  			Kind:            resState.Kind,
   930  			Version:         resState.Version,
   931  			Live:            liveObj,
   932  			Target:          targetObj,
   933  			Diff:            diffResult,
   934  			Hook:            resState.Hook,
   935  			ResourceVersion: resourceVersion,
   936  		}
   937  		resourceSummaries[i] = resState
   938  	}
   939  
   940  	if failedToLoadObjs {
   941  		syncCode = v1alpha1.SyncStatusCodeUnknown
   942  	} else if app.HasChangedManagedNamespaceMetadata() {
   943  		syncCode = v1alpha1.SyncStatusCodeOutOfSync
   944  	}
   945  
   946  	syncStatus.Status = syncCode
   947  
   948  	// Update the initial revision to the resolved manifest SHA
   949  	if hasMultipleSources {
   950  		syncStatus.Revisions = manifestRevisions
   951  	} else if len(manifestRevisions) > 0 {
   952  		syncStatus.Revision = manifestRevisions[0]
   953  	}
   954  
   955  	ts.AddCheckpoint("sync_ms")
   956  
   957  	healthStatus, err := setApplicationHealth(managedResources, resourceSummaries, resourceOverrides, app, m.persistResourceHealth)
   958  	if err != nil {
   959  		conditions = append(conditions, v1alpha1.ApplicationCondition{Type: v1alpha1.ApplicationConditionComparisonError, Message: "error setting app health: " + err.Error(), LastTransitionTime: &now})
   960  	}
   961  
   962  	// Git has already performed the signature verification via its GPG interface, and the result is available
   963  	// in the manifest info received from the repository server. We now need to form our opinion about the result
   964  	// and stop processing if we do not agree about the outcome.
   965  	for _, manifestInfo := range manifestInfos {
   966  		if gpg.IsGPGEnabled() && verifySignature && manifestInfo != nil {
   967  			conditions = append(conditions, verifyGnuPGSignature(manifestInfo.Revision, project, manifestInfo)...)
   968  		}
   969  	}
   970  
   971  	compRes := comparisonResult{
   972  		syncStatus:              syncStatus,
   973  		healthStatus:            healthStatus,
   974  		resources:               resourceSummaries,
   975  		managedResources:        managedResources,
   976  		reconciliationResult:    reconciliation,
   977  		diffConfig:              diffConfig,
   978  		diffResultList:          diffResults,
   979  		hasPostDeleteHooks:      hasPostDeleteHooks,
   980  		revisionsMayHaveChanges: revisionsMayHaveChanges,
   981  	}
   982  
   983  	if hasMultipleSources {
   984  		for _, manifestInfo := range manifestInfos {
   985  			compRes.appSourceTypes = append(compRes.appSourceTypes, v1alpha1.ApplicationSourceType(manifestInfo.SourceType))
   986  		}
   987  	} else {
   988  		for _, manifestInfo := range manifestInfos {
   989  			compRes.appSourceType = v1alpha1.ApplicationSourceType(manifestInfo.SourceType)
   990  			break
   991  		}
   992  	}
   993  
   994  	app.Status.SetConditions(conditions, map[v1alpha1.ApplicationConditionType]bool{
   995  		v1alpha1.ApplicationConditionComparisonError:         true,
   996  		v1alpha1.ApplicationConditionSharedResourceWarning:   true,
   997  		v1alpha1.ApplicationConditionRepeatedResourceWarning: true,
   998  		v1alpha1.ApplicationConditionExcludedResourceWarning: true,
   999  	})
  1000  	ts.AddCheckpoint("health_ms")
  1001  	compRes.timings = ts.Timings()
  1002  	return &compRes, nil
  1003  }
  1004  
  1005  // useDiffCache will determine if the diff should be calculated based
  1006  // on the existing live state cache or not.
  1007  func useDiffCache(noCache bool, manifestInfos []*apiclient.ManifestResponse, sources []v1alpha1.ApplicationSource, app *v1alpha1.Application, manifestRevisions []string, statusRefreshTimeout time.Duration, serverSideDiff bool, log *log.Entry) bool {
  1008  	if noCache {
  1009  		log.WithField("useDiffCache", "false").Debug("noCache is true")
  1010  		return false
  1011  	}
  1012  	refreshType, refreshRequested := app.IsRefreshRequested()
  1013  	if refreshRequested {
  1014  		log.WithField("useDiffCache", "false").Debugf("refresh type %s requested", string(refreshType))
  1015  		return false
  1016  	}
  1017  	// serverSideDiff should still use cache even if status is expired.
  1018  	// This is an attempt to avoid hitting k8s API server too frequently during
  1019  	// app refresh with serverSideDiff is enabled. If there are negative side
  1020  	// effects identified with this approach, the serverSideDiff should be removed
  1021  	// from this condition.
  1022  	if app.Status.Expired(statusRefreshTimeout) && !serverSideDiff {
  1023  		log.WithField("useDiffCache", "false").Debug("app.status.expired")
  1024  		return false
  1025  	}
  1026  
  1027  	if len(manifestInfos) != len(sources) {
  1028  		log.WithField("useDiffCache", "false").Debug("manifestInfos len != sources len")
  1029  		return false
  1030  	}
  1031  
  1032  	revisionChanged := !reflect.DeepEqual(app.Status.GetRevisions(), manifestRevisions)
  1033  	if revisionChanged {
  1034  		log.WithField("useDiffCache", "false").Debug("revisionChanged")
  1035  		return false
  1036  	}
  1037  
  1038  	if !specEqualsCompareTo(app.Spec, sources, app.Status.Sync.ComparedTo) {
  1039  		log.WithField("useDiffCache", "false").Debug("specChanged")
  1040  		return false
  1041  	}
  1042  
  1043  	log.WithField("useDiffCache", "true").Debug("using diff cache")
  1044  	return true
  1045  }
  1046  
  1047  // specEqualsCompareTo compares the application spec to the comparedTo status. It normalizes the destination to match
  1048  // the comparedTo destination before comparing. It does not mutate the original spec or comparedTo.
  1049  func specEqualsCompareTo(spec v1alpha1.ApplicationSpec, sources []v1alpha1.ApplicationSource, comparedTo v1alpha1.ComparedTo) bool {
  1050  	// Make a copy to be sure we don't mutate the original.
  1051  	specCopy := spec.DeepCopy()
  1052  	compareToSpec := specCopy.BuildComparedToStatus(sources)
  1053  	return reflect.DeepEqual(comparedTo, compareToSpec)
  1054  }
  1055  
  1056  func (m *appStateManager) persistRevisionHistory(
  1057  	app *v1alpha1.Application,
  1058  	revision string,
  1059  	source v1alpha1.ApplicationSource,
  1060  	revisions []string,
  1061  	sources []v1alpha1.ApplicationSource,
  1062  	hasMultipleSources bool,
  1063  	startedAt metav1.Time,
  1064  	initiatedBy v1alpha1.OperationInitiator,
  1065  ) error {
  1066  	var nextID int64
  1067  	if len(app.Status.History) > 0 {
  1068  		nextID = app.Status.History.LastRevisionHistory().ID + 1
  1069  	}
  1070  
  1071  	if hasMultipleSources {
  1072  		app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
  1073  			DeployedAt:      metav1.NewTime(time.Now().UTC()),
  1074  			DeployStartedAt: &startedAt,
  1075  			ID:              nextID,
  1076  			Sources:         sources,
  1077  			Revisions:       revisions,
  1078  			InitiatedBy:     initiatedBy,
  1079  		})
  1080  	} else {
  1081  		app.Status.History = append(app.Status.History, v1alpha1.RevisionHistory{
  1082  			Revision:        revision,
  1083  			DeployedAt:      metav1.NewTime(time.Now().UTC()),
  1084  			DeployStartedAt: &startedAt,
  1085  			ID:              nextID,
  1086  			Source:          source,
  1087  			InitiatedBy:     initiatedBy,
  1088  		})
  1089  	}
  1090  
  1091  	app.Status.History = app.Status.History.Trunc(app.Spec.GetRevisionHistoryLimit())
  1092  
  1093  	patch, err := json.Marshal(map[string]map[string][]v1alpha1.RevisionHistory{
  1094  		"status": {
  1095  			"history": app.Status.History,
  1096  		},
  1097  	})
  1098  	if err != nil {
  1099  		return fmt.Errorf("error marshaling revision history patch: %w", err)
  1100  	}
  1101  	_, err = m.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Patch(context.Background(), app.Name, types.MergePatchType, patch, metav1.PatchOptions{})
  1102  	return err
  1103  }
  1104  
  1105  // NewAppStateManager creates new instance of AppStateManager
  1106  func NewAppStateManager(
  1107  	db db.ArgoDB,
  1108  	appclientset appclientset.Interface,
  1109  	repoClientset apiclient.Clientset,
  1110  	namespace string,
  1111  	kubectl kubeutil.Kubectl,
  1112  	onKubectlRun kubeutil.OnKubectlRunFunc,
  1113  	settingsMgr *settings.SettingsManager,
  1114  	liveStateCache statecache.LiveStateCache,
  1115  	metricsServer *metrics.MetricsServer,
  1116  	cache *appstatecache.Cache,
  1117  	statusRefreshTimeout time.Duration,
  1118  	resourceTracking argo.ResourceTracking,
  1119  	persistResourceHealth bool,
  1120  	repoErrorGracePeriod time.Duration,
  1121  	serverSideDiff bool,
  1122  	ignoreNormalizerOpts normalizers.IgnoreNormalizerOpts,
  1123  ) AppStateManager {
  1124  	return &appStateManager{
  1125  		liveStateCache:        liveStateCache,
  1126  		cache:                 cache,
  1127  		db:                    db,
  1128  		appclientset:          appclientset,
  1129  		kubectl:               kubectl,
  1130  		onKubectlRun:          onKubectlRun,
  1131  		repoClientset:         repoClientset,
  1132  		namespace:             namespace,
  1133  		settingsMgr:           settingsMgr,
  1134  		metricsServer:         metricsServer,
  1135  		statusRefreshTimeout:  statusRefreshTimeout,
  1136  		resourceTracking:      resourceTracking,
  1137  		persistResourceHealth: persistResourceHealth,
  1138  		repoErrorGracePeriod:  repoErrorGracePeriod,
  1139  		serverSideDiff:        serverSideDiff,
  1140  		ignoreNormalizerOpts:  ignoreNormalizerOpts,
  1141  	}
  1142  }
  1143  
  1144  // isSelfReferencedObj returns whether the given obj is managed by the application
  1145  // according to the values of the tracking id (aka app instance value) annotation.
  1146  // It returns true when all of the properties of the tracking id (app name, namespace,
  1147  // group and kind) match the properties of the live object, or if the tracking method
  1148  // used does not provide the required properties for matching.
  1149  // Reference: https://github.com/argoproj/argo-cd/issues/8683
  1150  func (m *appStateManager) isSelfReferencedObj(live, config *unstructured.Unstructured, appName string, trackingMethod v1alpha1.TrackingMethod, installationID string) bool {
  1151  	if live == nil {
  1152  		return true
  1153  	}
  1154  
  1155  	// If tracking method doesn't contain required metadata for this check,
  1156  	// we are not able to determine and just assume the object to be managed.
  1157  	if trackingMethod == v1alpha1.TrackingMethodLabel {
  1158  		return true
  1159  	}
  1160  
  1161  	// config != nil is the best-case scenario for constructing an accurate
  1162  	// Tracking ID. `config` is the "desired state" (from git/helm/etc.).
  1163  	// Using the desired state is important when there is an ApiGroup upgrade.
  1164  	// When upgrading, the comparison must be made with the new tracking ID.
  1165  	// Example:
  1166  	//     live resource annotation will be:
  1167  	//        ingress-app:extensions/Ingress:default/some-ingress
  1168  	//     when it should be:
  1169  	//        ingress-app:networking.k8s.io/Ingress:default/some-ingress
  1170  	// More details in: https://github.com/argoproj/argo-cd/pull/11012
  1171  	var aiv argo.AppInstanceValue
  1172  	if config != nil {
  1173  		aiv = argo.UnstructuredToAppInstanceValue(config, appName, "")
  1174  		return isSelfReferencedObj(live, aiv)
  1175  	}
  1176  
  1177  	// If config is nil then compare the live resource with the value
  1178  	// of the annotation. In this case, in order to validate if obj is
  1179  	// managed by this application, the values from the annotation have
  1180  	// to match the properties from the live object. Cluster scoped objects
  1181  	// carry the app's destination namespace in the tracking annotation,
  1182  	// but are unique in GVK + name combination.
  1183  	appInstance := m.resourceTracking.GetAppInstance(live, trackingMethod, installationID)
  1184  	if appInstance != nil {
  1185  		return isSelfReferencedObj(live, *appInstance)
  1186  	}
  1187  	return true
  1188  }
  1189  
  1190  // isSelfReferencedObj returns true if the given Tracking ID (`aiv`) matches
  1191  // the given object. It returns false when the ID doesn't match. This sometimes
  1192  // happens when a tracking label or annotation gets accidentally copied to a
  1193  // different resource.
  1194  func isSelfReferencedObj(obj *unstructured.Unstructured, aiv argo.AppInstanceValue) bool {
  1195  	return (obj.GetNamespace() == aiv.Namespace || obj.GetNamespace() == "") &&
  1196  		obj.GetName() == aiv.Name &&
  1197  		obj.GetObjectKind().GroupVersionKind().Group == aiv.Group &&
  1198  		obj.GetObjectKind().GroupVersionKind().Kind == aiv.Kind
  1199  }