github.com/argoproj/argo-cd/v3@v3.2.1/server/application/application.go (about)

     1  package application
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"math"
     9  	"reflect"
    10  	"slices"
    11  	"sort"
    12  	"strconv"
    13  	"strings"
    14  	"time"
    15  
    16  	cacheutil "github.com/argoproj/argo-cd/v3/util/cache"
    17  
    18  	kubecache "github.com/argoproj/gitops-engine/pkg/cache"
    19  	"github.com/argoproj/gitops-engine/pkg/diff"
    20  	"github.com/argoproj/gitops-engine/pkg/health"
    21  	"github.com/argoproj/gitops-engine/pkg/sync/common"
    22  	"github.com/argoproj/gitops-engine/pkg/utils/kube"
    23  	"github.com/argoproj/gitops-engine/pkg/utils/text"
    24  	"github.com/argoproj/pkg/v2/sync"
    25  	jsonpatch "github.com/evanphx/json-patch"
    26  	log "github.com/sirupsen/logrus"
    27  	"google.golang.org/grpc/codes"
    28  	"google.golang.org/grpc/status"
    29  	corev1 "k8s.io/api/core/v1"
    30  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    33  	"k8s.io/apimachinery/pkg/fields"
    34  	"k8s.io/apimachinery/pkg/labels"
    35  	"k8s.io/apimachinery/pkg/runtime/schema"
    36  	"k8s.io/apimachinery/pkg/types"
    37  	"k8s.io/apimachinery/pkg/watch"
    38  	"k8s.io/client-go/kubernetes"
    39  	"k8s.io/client-go/rest"
    40  	"k8s.io/client-go/tools/cache"
    41  	"k8s.io/utils/ptr"
    42  
    43  	argocommon "github.com/argoproj/argo-cd/v3/common"
    44  	"github.com/argoproj/argo-cd/v3/pkg/apiclient/application"
    45  	"github.com/argoproj/argo-cd/v3/pkg/apis/application/v1alpha1"
    46  
    47  	appclientset "github.com/argoproj/argo-cd/v3/pkg/client/clientset/versioned"
    48  	applisters "github.com/argoproj/argo-cd/v3/pkg/client/listers/application/v1alpha1"
    49  	"github.com/argoproj/argo-cd/v3/reposerver/apiclient"
    50  	servercache "github.com/argoproj/argo-cd/v3/server/cache"
    51  	"github.com/argoproj/argo-cd/v3/server/deeplinks"
    52  	applog "github.com/argoproj/argo-cd/v3/util/app/log"
    53  	"github.com/argoproj/argo-cd/v3/util/argo"
    54  	"github.com/argoproj/argo-cd/v3/util/collections"
    55  	"github.com/argoproj/argo-cd/v3/util/db"
    56  	"github.com/argoproj/argo-cd/v3/util/env"
    57  	"github.com/argoproj/argo-cd/v3/util/git"
    58  	utilio "github.com/argoproj/argo-cd/v3/util/io"
    59  	"github.com/argoproj/argo-cd/v3/util/lua"
    60  	"github.com/argoproj/argo-cd/v3/util/manifeststream"
    61  	"github.com/argoproj/argo-cd/v3/util/rbac"
    62  	"github.com/argoproj/argo-cd/v3/util/security"
    63  	"github.com/argoproj/argo-cd/v3/util/session"
    64  	"github.com/argoproj/argo-cd/v3/util/settings"
    65  
    66  	resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource"
    67  
    68  	applicationType "github.com/argoproj/argo-cd/v3/pkg/apis/application"
    69  	argodiff "github.com/argoproj/argo-cd/v3/util/argo/diff"
    70  	"github.com/argoproj/argo-cd/v3/util/argo/normalizers"
    71  	kubeutil "github.com/argoproj/argo-cd/v3/util/kube"
    72  )
    73  
    74  type AppResourceTreeFn func(ctx context.Context, app *v1alpha1.Application) (*v1alpha1.ApplicationTree, error)
    75  
    76  const (
    77  	backgroundPropagationPolicy string = "background"
    78  	foregroundPropagationPolicy string = "foreground"
    79  )
    80  
    81  var (
    82  	ErrCacheMiss       = cacheutil.ErrCacheMiss
    83  	watchAPIBufferSize = env.ParseNumFromEnv(argocommon.EnvWatchAPIBufferSize, 1000, 0, math.MaxInt32)
    84  )
    85  
    86  // Server provides an Application service
    87  type Server struct {
    88  	ns                     string
    89  	kubeclientset          kubernetes.Interface
    90  	appclientset           appclientset.Interface
    91  	appLister              applisters.ApplicationLister
    92  	appInformer            cache.SharedIndexInformer
    93  	appBroadcaster         Broadcaster
    94  	repoClientset          apiclient.Clientset
    95  	kubectl                kube.Kubectl
    96  	db                     db.ArgoDB
    97  	enf                    *rbac.Enforcer
    98  	projectLock            sync.KeyLock
    99  	auditLogger            *argo.AuditLogger
   100  	settingsMgr            *settings.SettingsManager
   101  	cache                  *servercache.Cache
   102  	projInformer           cache.SharedIndexInformer
   103  	enabledNamespaces      []string
   104  	syncWithReplaceAllowed bool
   105  }
   106  
   107  // NewServer returns a new instance of the Application service
   108  func NewServer(
   109  	namespace string,
   110  	kubeclientset kubernetes.Interface,
   111  	appclientset appclientset.Interface,
   112  	appLister applisters.ApplicationLister,
   113  	appInformer cache.SharedIndexInformer,
   114  	appBroadcaster Broadcaster,
   115  	repoClientset apiclient.Clientset,
   116  	cache *servercache.Cache,
   117  	kubectl kube.Kubectl,
   118  	db db.ArgoDB,
   119  	enf *rbac.Enforcer,
   120  	projectLock sync.KeyLock,
   121  	settingsMgr *settings.SettingsManager,
   122  	projInformer cache.SharedIndexInformer,
   123  	enabledNamespaces []string,
   124  	enableK8sEvent []string,
   125  	syncWithReplaceAllowed bool,
   126  ) (application.ApplicationServiceServer, AppResourceTreeFn) {
   127  	if appBroadcaster == nil {
   128  		appBroadcaster = &broadcasterHandler{}
   129  	}
   130  	_, err := appInformer.AddEventHandler(appBroadcaster)
   131  	if err != nil {
   132  		log.Error(err)
   133  	}
   134  	s := &Server{
   135  		ns:                     namespace,
   136  		appclientset:           &deepCopyAppClientset{appclientset},
   137  		appLister:              &deepCopyApplicationLister{appLister},
   138  		appInformer:            appInformer,
   139  		appBroadcaster:         appBroadcaster,
   140  		kubeclientset:          kubeclientset,
   141  		cache:                  cache,
   142  		db:                     db,
   143  		repoClientset:          repoClientset,
   144  		kubectl:                kubectl,
   145  		enf:                    enf,
   146  		projectLock:            projectLock,
   147  		auditLogger:            argo.NewAuditLogger(kubeclientset, "argocd-server", enableK8sEvent),
   148  		settingsMgr:            settingsMgr,
   149  		projInformer:           projInformer,
   150  		enabledNamespaces:      enabledNamespaces,
   151  		syncWithReplaceAllowed: syncWithReplaceAllowed,
   152  	}
   153  	return s, s.getAppResources
   154  }
   155  
   156  // getAppEnforceRBAC gets the Application with the given name in the given namespace. If no namespace is
   157  // specified, the Application is fetched from the default namespace (the one in which the API server is running).
   158  //
   159  // If the user does not provide a "project," then we have to be very careful how we respond. If an app with the given
   160  // name exists, and the user has access to that app in the app's project, we return the app. If the app exists but the
   161  // user does not have access, we return "permission denied." If the app does not exist, we return "permission denied" -
   162  // if we responded with a 404, then the user could infer that the app exists when they get "permission denied."
   163  //
   164  // If the user does provide a "project," we can respond more specifically. If the user does not have access to the given
   165  // app name in the given project, we return "permission denied." If the app exists, but the project is different from
   166  func (s *Server) getAppEnforceRBAC(ctx context.Context, action, project, namespace, name string, getApp func() (*v1alpha1.Application, error)) (*v1alpha1.Application, *v1alpha1.AppProject, error) {
   167  	user := session.Username(ctx)
   168  	if user == "" {
   169  		user = "Unknown user"
   170  	}
   171  	logCtx := log.WithFields(map[string]any{
   172  		"user":        user,
   173  		"application": name,
   174  		"namespace":   namespace,
   175  	})
   176  	if project != "" {
   177  		// The user has provided everything we need to perform an initial RBAC check.
   178  		givenRBACName := security.RBACName(s.ns, project, namespace, name)
   179  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, action, givenRBACName); err != nil {
   180  			logCtx.WithFields(map[string]any{
   181  				"project":                project,
   182  				argocommon.SecurityField: argocommon.SecurityMedium,
   183  			}).Warnf("user tried to %s application which they do not have access to: %s", action, err)
   184  			// Do a GET on the app. This ensures that the timing of a "no access" response is the same as a "yes access,
   185  			// but the app is in a different project" response. We don't want the user inferring the existence of the
   186  			// app from response time.
   187  			_, _ = getApp()
   188  			return nil, nil, argocommon.PermissionDeniedAPIError
   189  		}
   190  	}
   191  	a, err := getApp()
   192  	if err != nil {
   193  		if apierrors.IsNotFound(err) {
   194  			if project != "" {
   195  				// We know that the user was allowed to get the Application, but the Application does not exist. Return 404.
   196  				return nil, nil, status.Error(codes.NotFound, apierrors.NewNotFound(schema.GroupResource{Group: "argoproj.io", Resource: "applications"}, name).Error())
   197  			}
   198  			// We don't know if the user was allowed to get the Application, and we don't want to leak information about
   199  			// the Application's existence. Return 403.
   200  			logCtx.Warn("application does not exist")
   201  			return nil, nil, argocommon.PermissionDeniedAPIError
   202  		}
   203  		logCtx.Errorf("failed to get application: %s", err)
   204  		return nil, nil, argocommon.PermissionDeniedAPIError
   205  	}
   206  	// Even if we performed an initial RBAC check (because the request was fully parameterized), we still need to
   207  	// perform a second RBAC check to ensure that the user has access to the actual Application's project (not just the
   208  	// project they specified in the request).
   209  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, action, a.RBACName(s.ns)); err != nil {
   210  		logCtx.WithFields(map[string]any{
   211  			"project":                a.Spec.Project,
   212  			argocommon.SecurityField: argocommon.SecurityMedium,
   213  		}).Warnf("user tried to %s application which they do not have access to: %s", action, err)
   214  		if project != "" {
   215  			// The user specified a project. We would have returned a 404 if the user had access to the app, but the app
   216  			// did not exist. So we have to return a 404 when the app does exist, but the user does not have access.
   217  			// Otherwise, they could infer that the app exists based on the error code.
   218  			return nil, nil, status.Error(codes.NotFound, apierrors.NewNotFound(schema.GroupResource{Group: "argoproj.io", Resource: "applications"}, name).Error())
   219  		}
   220  		// The user didn't specify a project. We always return permission denied for both lack of access and lack of
   221  		// existence.
   222  		return nil, nil, argocommon.PermissionDeniedAPIError
   223  	}
   224  	effectiveProject := "default"
   225  	if a.Spec.Project != "" {
   226  		effectiveProject = a.Spec.Project
   227  	}
   228  	if project != "" && effectiveProject != project {
   229  		logCtx.WithFields(map[string]any{
   230  			"project":                a.Spec.Project,
   231  			argocommon.SecurityField: argocommon.SecurityMedium,
   232  		}).Warnf("user tried to %s application in project %s, but the application is in project %s", action, project, effectiveProject)
   233  		// The user has access to the app, but the app is in a different project. Return 404, meaning "app doesn't
   234  		// exist in that project".
   235  		return nil, nil, status.Error(codes.NotFound, apierrors.NewNotFound(schema.GroupResource{Group: "argoproj.io", Resource: "applications"}, name).Error())
   236  	}
   237  	// Get the app's associated project, and make sure all project restrictions are enforced.
   238  	proj, err := s.getAppProject(ctx, a, logCtx)
   239  	if err != nil {
   240  		return a, nil, err
   241  	}
   242  	return a, proj, nil
   243  }
   244  
   245  // getApplicationEnforceRBACInformer uses an informer to get an Application. If the app does not exist, permission is
   246  // denied, or any other error occurs when getting the app, we return a permission denied error to obscure any sensitive
   247  // information.
   248  func (s *Server) getApplicationEnforceRBACInformer(ctx context.Context, action, project, namespace, name string) (*v1alpha1.Application, *v1alpha1.AppProject, error) {
   249  	namespaceOrDefault := s.appNamespaceOrDefault(namespace)
   250  	return s.getAppEnforceRBAC(ctx, action, project, namespaceOrDefault, name, func() (*v1alpha1.Application, error) {
   251  		if !s.isNamespaceEnabled(namespaceOrDefault) {
   252  			return nil, security.NamespaceNotPermittedError(namespaceOrDefault)
   253  		}
   254  		return s.appLister.Applications(namespaceOrDefault).Get(name)
   255  	})
   256  }
   257  
   258  // getApplicationEnforceRBACClient uses a client to get an Application. If the app does not exist, permission is denied,
   259  // or any other error occurs when getting the app, we return a permission denied error to obscure any sensitive
   260  // information.
   261  func (s *Server) getApplicationEnforceRBACClient(ctx context.Context, action, project, namespace, name, resourceVersion string) (*v1alpha1.Application, *v1alpha1.AppProject, error) {
   262  	namespaceOrDefault := s.appNamespaceOrDefault(namespace)
   263  	return s.getAppEnforceRBAC(ctx, action, project, namespaceOrDefault, name, func() (*v1alpha1.Application, error) {
   264  		if !s.isNamespaceEnabled(namespaceOrDefault) {
   265  			return nil, security.NamespaceNotPermittedError(namespaceOrDefault)
   266  		}
   267  		app, err := s.appclientset.ArgoprojV1alpha1().Applications(namespaceOrDefault).Get(ctx, name, metav1.GetOptions{
   268  			ResourceVersion: resourceVersion,
   269  		})
   270  		if err != nil {
   271  			return nil, err
   272  		}
   273  		return app, nil
   274  	})
   275  }
   276  
   277  // List returns list of applications
   278  func (s *Server) List(ctx context.Context, q *application.ApplicationQuery) (*v1alpha1.ApplicationList, error) {
   279  	selector, err := labels.Parse(q.GetSelector())
   280  	if err != nil {
   281  		return nil, fmt.Errorf("error parsing the selector: %w", err)
   282  	}
   283  	var apps []*v1alpha1.Application
   284  	if q.GetAppNamespace() == "" {
   285  		apps, err = s.appLister.List(selector)
   286  	} else {
   287  		apps, err = s.appLister.Applications(q.GetAppNamespace()).List(selector)
   288  	}
   289  	if err != nil {
   290  		return nil, fmt.Errorf("error listing apps with selectors: %w", err)
   291  	}
   292  
   293  	filteredApps := apps
   294  	// Filter applications by name
   295  	if q.Name != nil {
   296  		filteredApps = argo.FilterByNameP(filteredApps, *q.Name)
   297  	}
   298  
   299  	// Filter applications by projects
   300  	filteredApps = argo.FilterByProjectsP(filteredApps, getProjectsFromApplicationQuery(*q))
   301  
   302  	// Filter applications by source repo URL
   303  	filteredApps = argo.FilterByRepoP(filteredApps, q.GetRepo())
   304  
   305  	newItems := make([]v1alpha1.Application, 0)
   306  	for _, a := range filteredApps {
   307  		// Skip any application that is neither in the control plane's namespace
   308  		// nor in the list of enabled namespaces.
   309  		if !s.isNamespaceEnabled(a.Namespace) {
   310  			continue
   311  		}
   312  		if s.enf.Enforce(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionGet, a.RBACName(s.ns)) {
   313  			newItems = append(newItems, *a)
   314  		}
   315  	}
   316  
   317  	// Sort found applications by name
   318  	sort.Slice(newItems, func(i, j int) bool {
   319  		return newItems[i].Name < newItems[j].Name
   320  	})
   321  
   322  	appList := v1alpha1.ApplicationList{
   323  		ListMeta: metav1.ListMeta{
   324  			ResourceVersion: s.appInformer.LastSyncResourceVersion(),
   325  		},
   326  		Items: newItems,
   327  	}
   328  	return &appList, nil
   329  }
   330  
   331  // Create creates an application
   332  func (s *Server) Create(ctx context.Context, q *application.ApplicationCreateRequest) (*v1alpha1.Application, error) {
   333  	if q.GetApplication() == nil {
   334  		return nil, errors.New("error creating application: application is nil in request")
   335  	}
   336  	a := q.GetApplication()
   337  
   338  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionCreate, a.RBACName(s.ns)); err != nil {
   339  		return nil, err
   340  	}
   341  
   342  	s.projectLock.RLock(a.Spec.GetProject())
   343  	defer s.projectLock.RUnlock(a.Spec.GetProject())
   344  
   345  	validate := true
   346  	if q.Validate != nil {
   347  		validate = *q.Validate
   348  	}
   349  
   350  	proj, err := s.getAppProject(ctx, a, log.WithFields(applog.GetAppLogFields(a)))
   351  	if err != nil {
   352  		return nil, err
   353  	}
   354  
   355  	err = s.validateAndNormalizeApp(ctx, a, proj, validate)
   356  	if err != nil {
   357  		return nil, fmt.Errorf("error while validating and normalizing app: %w", err)
   358  	}
   359  
   360  	appNs := s.appNamespaceOrDefault(a.Namespace)
   361  
   362  	if !s.isNamespaceEnabled(appNs) {
   363  		return nil, security.NamespaceNotPermittedError(appNs)
   364  	}
   365  
   366  	// Don't let the app creator set the operation explicitly. Those requests should always go through the Sync API.
   367  	if a.Operation != nil {
   368  		log.WithFields(applog.GetAppLogFields(a)).
   369  			WithFields(log.Fields{
   370  				argocommon.SecurityField: argocommon.SecurityLow,
   371  			}).Warn("User attempted to set operation on application creation. This could have allowed them to bypass branch protection rules by setting manifests directly. Ignoring the set operation.")
   372  		a.Operation = nil
   373  	}
   374  
   375  	created, err := s.appclientset.ArgoprojV1alpha1().Applications(appNs).Create(ctx, a, metav1.CreateOptions{})
   376  	if err == nil {
   377  		s.logAppEvent(ctx, created, argo.EventReasonResourceCreated, "created application")
   378  		s.waitSync(created)
   379  		return created, nil
   380  	}
   381  	if !apierrors.IsAlreadyExists(err) {
   382  		return nil, fmt.Errorf("error creating application: %w", err)
   383  	}
   384  
   385  	// act idempotent if existing spec matches new spec
   386  	existing, err := s.appLister.Applications(appNs).Get(a.Name)
   387  	if err != nil {
   388  		return nil, status.Errorf(codes.Internal, "unable to check existing application details (%s): %v", appNs, err)
   389  	}
   390  
   391  	equalSpecs := reflect.DeepEqual(existing.Spec.Destination, a.Spec.Destination) &&
   392  		reflect.DeepEqual(existing.Spec, a.Spec) &&
   393  		reflect.DeepEqual(existing.Labels, a.Labels) &&
   394  		reflect.DeepEqual(existing.Annotations, a.Annotations) &&
   395  		reflect.DeepEqual(existing.Finalizers, a.Finalizers)
   396  
   397  	if equalSpecs {
   398  		return existing, nil
   399  	}
   400  	if q.Upsert == nil || !*q.Upsert {
   401  		return nil, status.Errorf(codes.InvalidArgument, "existing application spec is different, use upsert flag to force update")
   402  	}
   403  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionUpdate, a.RBACName(s.ns)); err != nil {
   404  		return nil, err
   405  	}
   406  	updated, err := s.updateApp(ctx, existing, a, true)
   407  	if err != nil {
   408  		return nil, fmt.Errorf("error updating application: %w", err)
   409  	}
   410  	return updated, nil
   411  }
   412  
   413  func (s *Server) queryRepoServer(ctx context.Context, proj *v1alpha1.AppProject, action func(
   414  	client apiclient.RepoServerServiceClient,
   415  	helmRepos []*v1alpha1.Repository,
   416  	helmCreds []*v1alpha1.RepoCreds,
   417  	ociRepos []*v1alpha1.Repository,
   418  	ociCreds []*v1alpha1.RepoCreds,
   419  	helmOptions *v1alpha1.HelmOptions,
   420  	enabledSourceTypes map[string]bool,
   421  ) error,
   422  ) error {
   423  	closer, client, err := s.repoClientset.NewRepoServerClient()
   424  	if err != nil {
   425  		return fmt.Errorf("error creating repo server client: %w", err)
   426  	}
   427  	defer utilio.Close(closer)
   428  
   429  	helmRepos, err := s.db.ListHelmRepositories(ctx)
   430  	if err != nil {
   431  		return fmt.Errorf("error listing helm repositories: %w", err)
   432  	}
   433  
   434  	permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos)
   435  	if err != nil {
   436  		return fmt.Errorf("error retrieving permitted repos: %w", err)
   437  	}
   438  	helmRepositoryCredentials, err := s.db.GetAllHelmRepositoryCredentials(ctx)
   439  	if err != nil {
   440  		return fmt.Errorf("error getting helm repository credentials: %w", err)
   441  	}
   442  	helmOptions, err := s.settingsMgr.GetHelmSettings()
   443  	if err != nil {
   444  		return fmt.Errorf("error getting helm settings: %w", err)
   445  	}
   446  	permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials)
   447  	if err != nil {
   448  		return fmt.Errorf("error getting permitted repos credentials: %w", err)
   449  	}
   450  	enabledSourceTypes, err := s.settingsMgr.GetEnabledSourceTypes()
   451  	if err != nil {
   452  		return fmt.Errorf("error getting settings enabled source types: %w", err)
   453  	}
   454  	ociRepos, err := s.db.ListOCIRepositories(context.Background())
   455  	if err != nil {
   456  		return fmt.Errorf("failed to list OCI repositories: %w", err)
   457  	}
   458  	permittedOCIRepos, err := argo.GetPermittedRepos(proj, ociRepos)
   459  	if err != nil {
   460  		return fmt.Errorf("failed to get permitted OCI repositories for project %q: %w", proj.Name, err)
   461  	}
   462  	ociRepositoryCredentials, err := s.db.GetAllOCIRepositoryCredentials(context.Background())
   463  	if err != nil {
   464  		return fmt.Errorf("failed to get OCI credentials: %w", err)
   465  	}
   466  	permittedOCICredentials, err := argo.GetPermittedReposCredentials(proj, ociRepositoryCredentials)
   467  	if err != nil {
   468  		return fmt.Errorf("failed to get permitted OCI credentials for project %q: %w", proj.Name, err)
   469  	}
   470  
   471  	return action(client, permittedHelmRepos, permittedHelmCredentials, permittedOCIRepos, permittedOCICredentials, helmOptions, enabledSourceTypes)
   472  }
   473  
   474  // GetManifests returns application manifests
   475  func (s *Server) GetManifests(ctx context.Context, q *application.ApplicationManifestQuery) (*apiclient.ManifestResponse, error) {
   476  	if q.Name == nil || *q.Name == "" {
   477  		return nil, errors.New("invalid request: application name is missing")
   478  	}
   479  	a, proj, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
   480  	if err != nil {
   481  		return nil, err
   482  	}
   483  
   484  	if !s.isNamespaceEnabled(a.Namespace) {
   485  		return nil, security.NamespaceNotPermittedError(a.Namespace)
   486  	}
   487  
   488  	manifestInfos := make([]*apiclient.ManifestResponse, 0)
   489  	err = s.queryRepoServer(ctx, proj, func(
   490  		client apiclient.RepoServerServiceClient, helmRepos []*v1alpha1.Repository, helmCreds []*v1alpha1.RepoCreds, ociRepos []*v1alpha1.Repository, ociCreds []*v1alpha1.RepoCreds, helmOptions *v1alpha1.HelmOptions, enableGenerateManifests map[string]bool,
   491  	) error {
   492  		appInstanceLabelKey, err := s.settingsMgr.GetAppInstanceLabelKey()
   493  		if err != nil {
   494  			return fmt.Errorf("error getting app instance label key from settings: %w", err)
   495  		}
   496  
   497  		config, err := s.getApplicationClusterConfig(ctx, a)
   498  		if err != nil {
   499  			return fmt.Errorf("error getting application cluster config: %w", err)
   500  		}
   501  
   502  		serverVersion, err := s.kubectl.GetServerVersion(config)
   503  		if err != nil {
   504  			return fmt.Errorf("error getting server version: %w", err)
   505  		}
   506  
   507  		apiResources, err := s.kubectl.GetAPIResources(config, false, kubecache.NewNoopSettings())
   508  		if err != nil {
   509  			return fmt.Errorf("error getting API resources: %w", err)
   510  		}
   511  
   512  		sources := make([]v1alpha1.ApplicationSource, 0)
   513  		appSpec := a.Spec
   514  		if a.Spec.HasMultipleSources() {
   515  			numOfSources := int64(len(a.Spec.GetSources()))
   516  			for i, pos := range q.SourcePositions {
   517  				if pos <= 0 || pos > numOfSources {
   518  					return errors.New("source position is out of range")
   519  				}
   520  				appSpec.Sources[pos-1].TargetRevision = q.Revisions[i]
   521  			}
   522  			sources = appSpec.GetSources()
   523  		} else {
   524  			source := a.Spec.GetSource()
   525  			if q.GetRevision() != "" {
   526  				source.TargetRevision = q.GetRevision()
   527  			}
   528  			sources = append(sources, source)
   529  		}
   530  
   531  		// Store the map of all sources having ref field into a map for applications with sources field
   532  		refSources, err := argo.GetRefSources(context.Background(), sources, appSpec.Project, s.db.GetRepository, []string{})
   533  		if err != nil {
   534  			return fmt.Errorf("failed to get ref sources: %w", err)
   535  		}
   536  
   537  		for _, source := range sources {
   538  			repo, err := s.db.GetRepository(ctx, source.RepoURL, proj.Name)
   539  			if err != nil {
   540  				return fmt.Errorf("error getting repository: %w", err)
   541  			}
   542  
   543  			kustomizeSettings, err := s.settingsMgr.GetKustomizeSettings()
   544  			if err != nil {
   545  				return fmt.Errorf("error getting kustomize settings: %w", err)
   546  			}
   547  
   548  			installationID, err := s.settingsMgr.GetInstallationID()
   549  			if err != nil {
   550  				return fmt.Errorf("error getting installation ID: %w", err)
   551  			}
   552  			trackingMethod, err := s.settingsMgr.GetTrackingMethod()
   553  			if err != nil {
   554  				return fmt.Errorf("error getting trackingMethod from settings: %w", err)
   555  			}
   556  
   557  			repos := helmRepos
   558  			helmRepoCreds := helmCreds
   559  			// If the source is OCI, there is a potential for an OCI image to be a Helm chart and that said chart in
   560  			// turn would have OCI dependencies. To ensure that those dependencies can be resolved, add them to the repos
   561  			// list.
   562  			if source.IsOCI() {
   563  				repos = slices.Clone(helmRepos)
   564  				helmRepoCreds = slices.Clone(helmCreds)
   565  				repos = append(repos, ociRepos...)
   566  				helmRepoCreds = append(helmRepoCreds, ociCreds...)
   567  			}
   568  
   569  			manifestInfo, err := client.GenerateManifest(ctx, &apiclient.ManifestRequest{
   570  				Repo:                            repo,
   571  				Revision:                        source.TargetRevision,
   572  				AppLabelKey:                     appInstanceLabelKey,
   573  				AppName:                         a.InstanceName(s.ns),
   574  				Namespace:                       a.Spec.Destination.Namespace,
   575  				ApplicationSource:               &source,
   576  				Repos:                           repos,
   577  				KustomizeOptions:                kustomizeSettings,
   578  				KubeVersion:                     serverVersion,
   579  				ApiVersions:                     argo.APIResourcesToStrings(apiResources, true),
   580  				HelmRepoCreds:                   helmRepoCreds,
   581  				HelmOptions:                     helmOptions,
   582  				TrackingMethod:                  trackingMethod,
   583  				EnabledSourceTypes:              enableGenerateManifests,
   584  				ProjectName:                     proj.Name,
   585  				ProjectSourceRepos:              proj.Spec.SourceRepos,
   586  				HasMultipleSources:              a.Spec.HasMultipleSources(),
   587  				RefSources:                      refSources,
   588  				AnnotationManifestGeneratePaths: a.GetAnnotation(v1alpha1.AnnotationKeyManifestGeneratePaths),
   589  				InstallationID:                  installationID,
   590  				NoCache:                         q.NoCache != nil && *q.NoCache,
   591  			})
   592  			if err != nil {
   593  				return fmt.Errorf("error generating manifests: %w", err)
   594  			}
   595  			manifestInfos = append(manifestInfos, manifestInfo)
   596  		}
   597  		return nil
   598  	})
   599  	if err != nil {
   600  		return nil, err
   601  	}
   602  
   603  	manifests := &apiclient.ManifestResponse{}
   604  	for _, manifestInfo := range manifestInfos {
   605  		for i, manifest := range manifestInfo.Manifests {
   606  			obj := &unstructured.Unstructured{}
   607  			err = json.Unmarshal([]byte(manifest), obj)
   608  			if err != nil {
   609  				return nil, fmt.Errorf("error unmarshaling manifest into unstructured: %w", err)
   610  			}
   611  			if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" {
   612  				obj, _, err = diff.HideSecretData(obj, nil, s.settingsMgr.GetSensitiveAnnotations())
   613  				if err != nil {
   614  					return nil, fmt.Errorf("error hiding secret data: %w", err)
   615  				}
   616  				data, err := json.Marshal(obj)
   617  				if err != nil {
   618  					return nil, fmt.Errorf("error marshaling manifest: %w", err)
   619  				}
   620  				manifestInfo.Manifests[i] = string(data)
   621  			}
   622  		}
   623  		manifests.Manifests = append(manifests.Manifests, manifestInfo.Manifests...)
   624  	}
   625  
   626  	return manifests, nil
   627  }
   628  
   629  func (s *Server) GetManifestsWithFiles(stream application.ApplicationService_GetManifestsWithFilesServer) error {
   630  	ctx := stream.Context()
   631  	query, err := manifeststream.ReceiveApplicationManifestQueryWithFiles(stream)
   632  	if err != nil {
   633  		return fmt.Errorf("error getting query: %w", err)
   634  	}
   635  
   636  	if query.Name == nil || *query.Name == "" {
   637  		return errors.New("invalid request: application name is missing")
   638  	}
   639  
   640  	a, proj, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, query.GetProject(), query.GetAppNamespace(), query.GetName())
   641  	if err != nil {
   642  		return err
   643  	}
   644  
   645  	var manifestInfo *apiclient.ManifestResponse
   646  	err = s.queryRepoServer(ctx, proj, func(
   647  		client apiclient.RepoServerServiceClient, helmRepos []*v1alpha1.Repository, helmCreds []*v1alpha1.RepoCreds, _ []*v1alpha1.Repository, _ []*v1alpha1.RepoCreds, helmOptions *v1alpha1.HelmOptions, enableGenerateManifests map[string]bool,
   648  	) error {
   649  		appInstanceLabelKey, err := s.settingsMgr.GetAppInstanceLabelKey()
   650  		if err != nil {
   651  			return fmt.Errorf("error getting app instance label key from settings: %w", err)
   652  		}
   653  
   654  		trackingMethod, err := s.settingsMgr.GetTrackingMethod()
   655  		if err != nil {
   656  			return fmt.Errorf("error getting trackingMethod from settings: %w", err)
   657  		}
   658  
   659  		config, err := s.getApplicationClusterConfig(ctx, a)
   660  		if err != nil {
   661  			return fmt.Errorf("error getting application cluster config: %w", err)
   662  		}
   663  
   664  		serverVersion, err := s.kubectl.GetServerVersion(config)
   665  		if err != nil {
   666  			return fmt.Errorf("error getting server version: %w", err)
   667  		}
   668  
   669  		apiResources, err := s.kubectl.GetAPIResources(config, false, kubecache.NewNoopSettings())
   670  		if err != nil {
   671  			return fmt.Errorf("error getting API resources: %w", err)
   672  		}
   673  
   674  		source := a.Spec.GetSource()
   675  
   676  		proj, err := argo.GetAppProject(ctx, a, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), s.ns, s.settingsMgr, s.db)
   677  		if err != nil {
   678  			return fmt.Errorf("error getting app project: %w", err)
   679  		}
   680  
   681  		repo, err := s.db.GetRepository(ctx, a.Spec.GetSource().RepoURL, proj.Name)
   682  		if err != nil {
   683  			return fmt.Errorf("error getting repository: %w", err)
   684  		}
   685  
   686  		kustomizeSettings, err := s.settingsMgr.GetKustomizeSettings()
   687  		if err != nil {
   688  			return fmt.Errorf("error getting kustomize settings: %w", err)
   689  		}
   690  
   691  		req := &apiclient.ManifestRequest{
   692  			Repo:                            repo,
   693  			Revision:                        source.TargetRevision,
   694  			AppLabelKey:                     appInstanceLabelKey,
   695  			AppName:                         a.Name,
   696  			Namespace:                       a.Spec.Destination.Namespace,
   697  			ApplicationSource:               &source,
   698  			Repos:                           helmRepos,
   699  			KustomizeOptions:                kustomizeSettings,
   700  			KubeVersion:                     serverVersion,
   701  			ApiVersions:                     argo.APIResourcesToStrings(apiResources, true),
   702  			HelmRepoCreds:                   helmCreds,
   703  			HelmOptions:                     helmOptions,
   704  			TrackingMethod:                  trackingMethod,
   705  			EnabledSourceTypes:              enableGenerateManifests,
   706  			ProjectName:                     proj.Name,
   707  			ProjectSourceRepos:              proj.Spec.SourceRepos,
   708  			AnnotationManifestGeneratePaths: a.GetAnnotation(v1alpha1.AnnotationKeyManifestGeneratePaths),
   709  		}
   710  
   711  		repoStreamClient, err := client.GenerateManifestWithFiles(stream.Context())
   712  		if err != nil {
   713  			return fmt.Errorf("error opening stream: %w", err)
   714  		}
   715  
   716  		err = manifeststream.SendRepoStream(repoStreamClient, stream, req, *query.Checksum)
   717  		if err != nil {
   718  			return fmt.Errorf("error sending repo stream: %w", err)
   719  		}
   720  
   721  		resp, err := repoStreamClient.CloseAndRecv()
   722  		if err != nil {
   723  			return fmt.Errorf("error generating manifests: %w", err)
   724  		}
   725  
   726  		manifestInfo = resp
   727  		return nil
   728  	})
   729  	if err != nil {
   730  		return err
   731  	}
   732  
   733  	for i, manifest := range manifestInfo.Manifests {
   734  		obj := &unstructured.Unstructured{}
   735  		err = json.Unmarshal([]byte(manifest), obj)
   736  		if err != nil {
   737  			return fmt.Errorf("error unmarshaling manifest into unstructured: %w", err)
   738  		}
   739  		if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" {
   740  			obj, _, err = diff.HideSecretData(obj, nil, s.settingsMgr.GetSensitiveAnnotations())
   741  			if err != nil {
   742  				return fmt.Errorf("error hiding secret data: %w", err)
   743  			}
   744  			data, err := json.Marshal(obj)
   745  			if err != nil {
   746  				return fmt.Errorf("error marshaling manifest: %w", err)
   747  			}
   748  			manifestInfo.Manifests[i] = string(data)
   749  		}
   750  	}
   751  
   752  	stream.SendAndClose(manifestInfo)
   753  	return nil
   754  }
   755  
   756  // Get returns an application by name
   757  func (s *Server) Get(ctx context.Context, q *application.ApplicationQuery) (*v1alpha1.Application, error) {
   758  	appName := q.GetName()
   759  	appNs := s.appNamespaceOrDefault(q.GetAppNamespace())
   760  
   761  	project := ""
   762  	projects := getProjectsFromApplicationQuery(*q)
   763  	if len(projects) == 1 {
   764  		project = projects[0]
   765  	} else if len(projects) > 1 {
   766  		return nil, status.Errorf(codes.InvalidArgument, "multiple projects specified - the get endpoint accepts either zero or one project")
   767  	}
   768  
   769  	// We must use a client Get instead of an informer Get, because it's common to call Get immediately
   770  	// following a Watch (which is not yet powered by an informer), and the Get must reflect what was
   771  	// previously seen by the client.
   772  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionGet, project, appNs, appName, q.GetResourceVersion())
   773  	if err != nil {
   774  		return nil, err
   775  	}
   776  
   777  	if q.Refresh == nil {
   778  		s.inferResourcesStatusHealth(a)
   779  		return a, nil
   780  	}
   781  
   782  	refreshType := v1alpha1.RefreshTypeNormal
   783  	if *q.Refresh == string(v1alpha1.RefreshTypeHard) {
   784  		refreshType = v1alpha1.RefreshTypeHard
   785  	}
   786  	appIf := s.appclientset.ArgoprojV1alpha1().Applications(appNs)
   787  
   788  	// subscribe early with buffered channel to ensure we don't miss events
   789  	events := make(chan *v1alpha1.ApplicationWatchEvent, watchAPIBufferSize)
   790  	unsubscribe := s.appBroadcaster.Subscribe(events, func(event *v1alpha1.ApplicationWatchEvent) bool {
   791  		return event.Application.Name == appName && event.Application.Namespace == appNs
   792  	})
   793  	defer unsubscribe()
   794  
   795  	app, err := argo.RefreshApp(appIf, appName, refreshType, true)
   796  	if err != nil {
   797  		return nil, fmt.Errorf("error refreshing the app: %w", err)
   798  	}
   799  
   800  	if refreshType == v1alpha1.RefreshTypeHard {
   801  		// force refresh cached application details
   802  		if err := s.queryRepoServer(ctx, proj, func(
   803  			client apiclient.RepoServerServiceClient,
   804  			helmRepos []*v1alpha1.Repository,
   805  			_ []*v1alpha1.RepoCreds,
   806  			_ []*v1alpha1.Repository,
   807  			_ []*v1alpha1.RepoCreds,
   808  			helmOptions *v1alpha1.HelmOptions,
   809  			enabledSourceTypes map[string]bool,
   810  		) error {
   811  			source := app.Spec.GetSource()
   812  			repo, err := s.db.GetRepository(ctx, a.Spec.GetSource().RepoURL, proj.Name)
   813  			if err != nil {
   814  				return fmt.Errorf("error getting repository: %w", err)
   815  			}
   816  			kustomizeSettings, err := s.settingsMgr.GetKustomizeSettings()
   817  			if err != nil {
   818  				return fmt.Errorf("error getting kustomize settings: %w", err)
   819  			}
   820  			trackingMethod, err := s.settingsMgr.GetTrackingMethod()
   821  			if err != nil {
   822  				return fmt.Errorf("error getting trackingMethod from settings: %w", err)
   823  			}
   824  			_, err = client.GetAppDetails(ctx, &apiclient.RepoServerAppDetailsQuery{
   825  				Repo:               repo,
   826  				Source:             &source,
   827  				AppName:            appName,
   828  				KustomizeOptions:   kustomizeSettings,
   829  				Repos:              helmRepos,
   830  				NoCache:            true,
   831  				TrackingMethod:     trackingMethod,
   832  				EnabledSourceTypes: enabledSourceTypes,
   833  				HelmOptions:        helmOptions,
   834  			})
   835  			return err
   836  		}); err != nil {
   837  			log.Warnf("Failed to force refresh application details: %v", err)
   838  		}
   839  	}
   840  
   841  	minVersion := 0
   842  	if minVersion, err = strconv.Atoi(app.ResourceVersion); err != nil {
   843  		minVersion = 0
   844  	}
   845  
   846  	for {
   847  		select {
   848  		case <-ctx.Done():
   849  			return nil, errors.New("application refresh deadline exceeded")
   850  		case event := <-events:
   851  			if appVersion, err := strconv.Atoi(event.Application.ResourceVersion); err == nil && appVersion > minVersion {
   852  				annotations := event.Application.GetAnnotations()
   853  				if annotations == nil {
   854  					annotations = make(map[string]string)
   855  				}
   856  				if _, ok := annotations[v1alpha1.AnnotationKeyRefresh]; !ok {
   857  					refreshedApp := event.Application.DeepCopy()
   858  					s.inferResourcesStatusHealth(refreshedApp)
   859  					return refreshedApp, nil
   860  				}
   861  			}
   862  		}
   863  	}
   864  }
   865  
   866  // ListResourceEvents returns a list of event resources
   867  func (s *Server) ListResourceEvents(ctx context.Context, q *application.ApplicationResourceEventsQuery) (*corev1.EventList, error) {
   868  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
   869  	if err != nil {
   870  		return nil, err
   871  	}
   872  
   873  	var (
   874  		kubeClientset kubernetes.Interface
   875  		fieldSelector string
   876  		namespace     string
   877  	)
   878  	// There are two places where we get events. If we are getting application events, we query
   879  	// our own cluster. If it is events on a resource on an external cluster, then we query the
   880  	// external cluster using its rest.Config
   881  	if q.GetResourceName() == "" && q.GetResourceUID() == "" {
   882  		kubeClientset = s.kubeclientset
   883  		namespace = a.Namespace
   884  		fieldSelector = fields.SelectorFromSet(map[string]string{
   885  			"involvedObject.name":      a.Name,
   886  			"involvedObject.uid":       string(a.UID),
   887  			"involvedObject.namespace": a.Namespace,
   888  		}).String()
   889  	} else {
   890  		tree, err := s.getAppResources(ctx, a)
   891  		if err != nil {
   892  			return nil, fmt.Errorf("error getting app resources: %w", err)
   893  		}
   894  		found := false
   895  		for _, n := range append(tree.Nodes, tree.OrphanedNodes...) {
   896  			if n.UID == q.GetResourceUID() && n.Name == q.GetResourceName() && n.Namespace == q.GetResourceNamespace() {
   897  				found = true
   898  				break
   899  			}
   900  		}
   901  		if !found {
   902  			return nil, status.Errorf(codes.InvalidArgument, "%s not found as part of application %s", q.GetResourceName(), q.GetName())
   903  		}
   904  
   905  		namespace = q.GetResourceNamespace()
   906  		var config *rest.Config
   907  		config, err = s.getApplicationClusterConfig(ctx, a)
   908  		if err != nil {
   909  			return nil, fmt.Errorf("error getting application cluster config: %w", err)
   910  		}
   911  		kubeClientset, err = kubernetes.NewForConfig(config)
   912  		if err != nil {
   913  			return nil, fmt.Errorf("error creating kube client: %w", err)
   914  		}
   915  		fieldSelector = fields.SelectorFromSet(map[string]string{
   916  			"involvedObject.name":      q.GetResourceName(),
   917  			"involvedObject.uid":       q.GetResourceUID(),
   918  			"involvedObject.namespace": namespace,
   919  		}).String()
   920  	}
   921  	log.Infof("Querying for resource events with field selector: %s", fieldSelector)
   922  	opts := metav1.ListOptions{FieldSelector: fieldSelector}
   923  	list, err := kubeClientset.CoreV1().Events(namespace).List(ctx, opts)
   924  	if err != nil {
   925  		return nil, fmt.Errorf("error listing resource events: %w", err)
   926  	}
   927  	return list.DeepCopy(), nil
   928  }
   929  
   930  // validateAndUpdateApp validates and updates the application. currentProject is the name of the project the app
   931  // currently is under. If not specified, we assume that the app is under the project specified in the app spec.
   932  func (s *Server) validateAndUpdateApp(ctx context.Context, newApp *v1alpha1.Application, merge bool, validate bool, action string, currentProject string) (*v1alpha1.Application, error) {
   933  	s.projectLock.RLock(newApp.Spec.GetProject())
   934  	defer s.projectLock.RUnlock(newApp.Spec.GetProject())
   935  
   936  	app, proj, err := s.getApplicationEnforceRBACClient(ctx, action, currentProject, newApp.Namespace, newApp.Name, "")
   937  	if err != nil {
   938  		return nil, err
   939  	}
   940  
   941  	err = s.validateAndNormalizeApp(ctx, newApp, proj, validate)
   942  	if err != nil {
   943  		return nil, fmt.Errorf("error validating and normalizing app: %w", err)
   944  	}
   945  
   946  	a, err := s.updateApp(ctx, app, newApp, merge)
   947  	if err != nil {
   948  		return nil, fmt.Errorf("error updating application: %w", err)
   949  	}
   950  	return a, nil
   951  }
   952  
   953  var informerSyncTimeout = 2 * time.Second
   954  
   955  // waitSync is a helper to wait until the application informer cache is synced after create/update.
   956  // It waits until the app in the informer, has a resource version greater than the version in the
   957  // supplied app, or after 2 seconds, whichever comes first. Returns true if synced.
   958  // We use an informer cache for read operations (Get, List). Since the cache is only
   959  // eventually consistent, it is possible that it doesn't reflect an application change immediately
   960  // after a mutating API call (create/update). This function should be called after a creates &
   961  // update to give a probable (but not guaranteed) chance of being up-to-date after the create/update.
   962  func (s *Server) waitSync(app *v1alpha1.Application) {
   963  	logCtx := log.WithFields(applog.GetAppLogFields(app))
   964  	deadline := time.Now().Add(informerSyncTimeout)
   965  	minVersion, err := strconv.Atoi(app.ResourceVersion)
   966  	if err != nil {
   967  		logCtx.Warnf("waitSync failed: could not parse resource version %s", app.ResourceVersion)
   968  		time.Sleep(50 * time.Millisecond) // sleep anyway
   969  		return
   970  	}
   971  	for {
   972  		if currApp, err := s.appLister.Applications(app.Namespace).Get(app.Name); err == nil {
   973  			currVersion, err := strconv.Atoi(currApp.ResourceVersion)
   974  			if err == nil && currVersion >= minVersion {
   975  				return
   976  			}
   977  		}
   978  		if time.Now().After(deadline) {
   979  			break
   980  		}
   981  		time.Sleep(20 * time.Millisecond)
   982  	}
   983  	logCtx.Warnf("waitSync failed: timed out")
   984  }
   985  
   986  func (s *Server) updateApp(ctx context.Context, app *v1alpha1.Application, newApp *v1alpha1.Application, merge bool) (*v1alpha1.Application, error) {
   987  	for i := 0; i < 10; i++ {
   988  		app.Spec = newApp.Spec
   989  		if merge {
   990  			app.Labels = collections.Merge(app.Labels, newApp.Labels)
   991  			app.Annotations = collections.Merge(app.Annotations, newApp.Annotations)
   992  		} else {
   993  			app.Labels = newApp.Labels
   994  			app.Annotations = newApp.Annotations
   995  		}
   996  
   997  		app.Finalizers = newApp.Finalizers
   998  
   999  		res, err := s.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Update(ctx, app, metav1.UpdateOptions{})
  1000  		if err == nil {
  1001  			s.logAppEvent(ctx, app, argo.EventReasonResourceUpdated, "updated application spec")
  1002  			s.waitSync(res)
  1003  			return res, nil
  1004  		}
  1005  		if !apierrors.IsConflict(err) {
  1006  			return nil, err
  1007  		}
  1008  
  1009  		app, err = s.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(ctx, newApp.Name, metav1.GetOptions{})
  1010  		if err != nil {
  1011  			return nil, fmt.Errorf("error getting application: %w", err)
  1012  		}
  1013  		s.inferResourcesStatusHealth(app)
  1014  	}
  1015  	return nil, status.Errorf(codes.Internal, "Failed to update application. Too many conflicts")
  1016  }
  1017  
  1018  // Update updates an application
  1019  func (s *Server) Update(ctx context.Context, q *application.ApplicationUpdateRequest) (*v1alpha1.Application, error) {
  1020  	if q.GetApplication() == nil {
  1021  		return nil, errors.New("error updating application: application is nil in request")
  1022  	}
  1023  	a := q.GetApplication()
  1024  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionUpdate, a.RBACName(s.ns)); err != nil {
  1025  		return nil, err
  1026  	}
  1027  
  1028  	validate := true
  1029  	if q.Validate != nil {
  1030  		validate = *q.Validate
  1031  	}
  1032  	return s.validateAndUpdateApp(ctx, q.Application, false, validate, rbac.ActionUpdate, q.GetProject())
  1033  }
  1034  
  1035  // UpdateSpec updates an application spec and filters out any invalid parameter overrides
  1036  func (s *Server) UpdateSpec(ctx context.Context, q *application.ApplicationUpdateSpecRequest) (*v1alpha1.ApplicationSpec, error) {
  1037  	if q.GetSpec() == nil {
  1038  		return nil, errors.New("error updating application spec: spec is nil in request")
  1039  	}
  1040  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionUpdate, q.GetProject(), q.GetAppNamespace(), q.GetName(), "")
  1041  	if err != nil {
  1042  		return nil, err
  1043  	}
  1044  
  1045  	a.Spec = *q.GetSpec()
  1046  	validate := true
  1047  	if q.Validate != nil {
  1048  		validate = *q.Validate
  1049  	}
  1050  	a, err = s.validateAndUpdateApp(ctx, a, false, validate, rbac.ActionUpdate, q.GetProject())
  1051  	if err != nil {
  1052  		return nil, fmt.Errorf("error validating and updating app: %w", err)
  1053  	}
  1054  	return &a.Spec, nil
  1055  }
  1056  
  1057  // Patch patches an application
  1058  func (s *Server) Patch(ctx context.Context, q *application.ApplicationPatchRequest) (*v1alpha1.Application, error) {
  1059  	app, _, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName(), "")
  1060  	if err != nil {
  1061  		return nil, err
  1062  	}
  1063  
  1064  	err = s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionUpdate, app.RBACName(s.ns))
  1065  	if err != nil {
  1066  		return nil, err
  1067  	}
  1068  
  1069  	jsonApp, err := json.Marshal(app)
  1070  	if err != nil {
  1071  		return nil, fmt.Errorf("error marshaling application: %w", err)
  1072  	}
  1073  
  1074  	var patchApp []byte
  1075  
  1076  	switch q.GetPatchType() {
  1077  	case "json", "":
  1078  		patch, err := jsonpatch.DecodePatch([]byte(q.GetPatch()))
  1079  		if err != nil {
  1080  			return nil, fmt.Errorf("error decoding json patch: %w", err)
  1081  		}
  1082  		patchApp, err = patch.Apply(jsonApp)
  1083  		if err != nil {
  1084  			return nil, fmt.Errorf("error applying patch: %w", err)
  1085  		}
  1086  	case "merge":
  1087  		patchApp, err = jsonpatch.MergePatch(jsonApp, []byte(q.GetPatch()))
  1088  		if err != nil {
  1089  			return nil, fmt.Errorf("error calculating merge patch: %w", err)
  1090  		}
  1091  	default:
  1092  		return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Patch type '%s' is not supported", q.GetPatchType()))
  1093  	}
  1094  
  1095  	newApp := &v1alpha1.Application{}
  1096  	err = json.Unmarshal(patchApp, newApp)
  1097  	if err != nil {
  1098  		return nil, fmt.Errorf("error unmarshaling patched app: %w", err)
  1099  	}
  1100  	return s.validateAndUpdateApp(ctx, newApp, false, true, rbac.ActionUpdate, q.GetProject())
  1101  }
  1102  
  1103  func (s *Server) getAppProject(ctx context.Context, a *v1alpha1.Application, logCtx *log.Entry) (*v1alpha1.AppProject, error) {
  1104  	proj, err := argo.GetAppProject(ctx, a, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), s.ns, s.settingsMgr, s.db)
  1105  	if err == nil {
  1106  		return proj, nil
  1107  	}
  1108  
  1109  	// If there's a permission issue or the app doesn't exist, return a vague error to avoid letting the user enumerate project names.
  1110  	vagueError := status.Errorf(codes.InvalidArgument, "app is not allowed in project %q, or the project does not exist", a.Spec.Project)
  1111  
  1112  	if apierrors.IsNotFound(err) {
  1113  		return nil, vagueError
  1114  	}
  1115  
  1116  	var applicationNotAllowedToUseProjectErr *argo.ErrApplicationNotAllowedToUseProject
  1117  	if errors.As(err, &applicationNotAllowedToUseProjectErr) {
  1118  		return nil, vagueError
  1119  	}
  1120  
  1121  	// Unknown error, log it but return the vague error to the user
  1122  	logCtx.WithFields(map[string]any{
  1123  		"project":                a.Spec.Project,
  1124  		argocommon.SecurityField: argocommon.SecurityMedium,
  1125  	}).Warnf("error getting app project: %s", err)
  1126  	return nil, vagueError
  1127  }
  1128  
  1129  // Delete removes an application and all associated resources
  1130  func (s *Server) Delete(ctx context.Context, q *application.ApplicationDeleteRequest) (*application.ApplicationResponse, error) {
  1131  	appName := q.GetName()
  1132  	appNs := s.appNamespaceOrDefault(q.GetAppNamespace())
  1133  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionGet, q.GetProject(), appNs, appName, "")
  1134  	if err != nil {
  1135  		return nil, err
  1136  	}
  1137  
  1138  	s.projectLock.RLock(a.Spec.Project)
  1139  	defer s.projectLock.RUnlock(a.Spec.Project)
  1140  
  1141  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionDelete, a.RBACName(s.ns)); err != nil {
  1142  		return nil, err
  1143  	}
  1144  
  1145  	if q.Cascade != nil && !*q.Cascade && q.GetPropagationPolicy() != "" {
  1146  		return nil, status.Error(codes.InvalidArgument, "cannot set propagation policy when cascading is disabled")
  1147  	}
  1148  
  1149  	patchFinalizer := false
  1150  	if q.Cascade == nil || *q.Cascade {
  1151  		// validate the propgation policy
  1152  		policyFinalizer := getPropagationPolicyFinalizer(q.GetPropagationPolicy())
  1153  		if policyFinalizer == "" {
  1154  			return nil, status.Errorf(codes.InvalidArgument, "invalid propagation policy: %s", *q.PropagationPolicy)
  1155  		}
  1156  		if !a.IsFinalizerPresent(policyFinalizer) {
  1157  			a.SetCascadedDeletion(policyFinalizer)
  1158  			patchFinalizer = true
  1159  		}
  1160  	} else if a.CascadedDeletion() {
  1161  		a.UnSetCascadedDeletion()
  1162  		patchFinalizer = true
  1163  	}
  1164  
  1165  	if patchFinalizer {
  1166  		// Although the cascaded deletion/propagation policy finalizer is not set when apps are created via
  1167  		// API, they will often be set by the user as part of declarative config. As part of a delete
  1168  		// request, we always calculate the patch to see if we need to set/unset the finalizer.
  1169  		patch, err := json.Marshal(map[string]any{
  1170  			"metadata": map[string]any{
  1171  				"finalizers": a.Finalizers,
  1172  			},
  1173  		})
  1174  		if err != nil {
  1175  			return nil, fmt.Errorf("error marshaling finalizers: %w", err)
  1176  		}
  1177  		_, err = s.appclientset.ArgoprojV1alpha1().Applications(a.Namespace).Patch(ctx, a.Name, types.MergePatchType, patch, metav1.PatchOptions{})
  1178  		if err != nil {
  1179  			return nil, fmt.Errorf("error patching application with finalizers: %w", err)
  1180  		}
  1181  	}
  1182  
  1183  	err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Delete(ctx, appName, metav1.DeleteOptions{})
  1184  	if err != nil {
  1185  		return nil, fmt.Errorf("error deleting application: %w", err)
  1186  	}
  1187  	s.logAppEvent(ctx, a, argo.EventReasonResourceDeleted, "deleted application")
  1188  	return &application.ApplicationResponse{}, nil
  1189  }
  1190  
  1191  func (s *Server) isApplicationPermitted(selector labels.Selector, minVersion int, claims any, appName, appNs string, projects map[string]bool, a v1alpha1.Application) bool {
  1192  	if len(projects) > 0 && !projects[a.Spec.GetProject()] {
  1193  		return false
  1194  	}
  1195  
  1196  	if appVersion, err := strconv.Atoi(a.ResourceVersion); err == nil && appVersion < minVersion {
  1197  		return false
  1198  	}
  1199  	matchedEvent := (appName == "" || (a.Name == appName && a.Namespace == appNs)) && selector.Matches(labels.Set(a.Labels))
  1200  	if !matchedEvent {
  1201  		return false
  1202  	}
  1203  
  1204  	if !s.isNamespaceEnabled(a.Namespace) {
  1205  		return false
  1206  	}
  1207  
  1208  	if !s.enf.Enforce(claims, rbac.ResourceApplications, rbac.ActionGet, a.RBACName(s.ns)) {
  1209  		// do not emit apps user does not have accessing
  1210  		return false
  1211  	}
  1212  
  1213  	return true
  1214  }
  1215  
  1216  func (s *Server) Watch(q *application.ApplicationQuery, ws application.ApplicationService_WatchServer) error {
  1217  	appName := q.GetName()
  1218  	appNs := s.appNamespaceOrDefault(q.GetAppNamespace())
  1219  	logCtx := log.NewEntry(log.New())
  1220  	if q.Name != nil {
  1221  		logCtx = logCtx.WithField("application", *q.Name)
  1222  	}
  1223  	projects := map[string]bool{}
  1224  	for _, project := range getProjectsFromApplicationQuery(*q) {
  1225  		projects[project] = true
  1226  	}
  1227  	claims := ws.Context().Value("claims")
  1228  	selector, err := labels.Parse(q.GetSelector())
  1229  	if err != nil {
  1230  		return fmt.Errorf("error parsing labels with selectors: %w", err)
  1231  	}
  1232  	minVersion := 0
  1233  	if q.GetResourceVersion() != "" {
  1234  		if minVersion, err = strconv.Atoi(q.GetResourceVersion()); err != nil {
  1235  			minVersion = 0
  1236  		}
  1237  	}
  1238  
  1239  	// sendIfPermitted is a helper to send the application to the client's streaming channel if the
  1240  	// caller has RBAC privileges permissions to view it
  1241  	sendIfPermitted := func(a v1alpha1.Application, eventType watch.EventType) {
  1242  		permitted := s.isApplicationPermitted(selector, minVersion, claims, appName, appNs, projects, a)
  1243  		if !permitted {
  1244  			return
  1245  		}
  1246  		s.inferResourcesStatusHealth(&a)
  1247  		err := ws.Send(&v1alpha1.ApplicationWatchEvent{
  1248  			Type:        eventType,
  1249  			Application: a,
  1250  		})
  1251  		if err != nil {
  1252  			logCtx.Warnf("Unable to send stream message: %v", err)
  1253  			return
  1254  		}
  1255  	}
  1256  
  1257  	events := make(chan *v1alpha1.ApplicationWatchEvent, watchAPIBufferSize)
  1258  	// Mimic watch API behavior: send ADDED events if no resource version provided
  1259  	// If watch API is executed for one application when emit event even if resource version is provided
  1260  	// This is required since single app watch API is used for during operations like app syncing and it is
  1261  	// critical to never miss events.
  1262  	if q.GetResourceVersion() == "" || q.GetName() != "" {
  1263  		apps, err := s.appLister.List(selector)
  1264  		if err != nil {
  1265  			return fmt.Errorf("error listing apps with selector: %w", err)
  1266  		}
  1267  		sort.Slice(apps, func(i, j int) bool {
  1268  			return apps[i].QualifiedName() < apps[j].QualifiedName()
  1269  		})
  1270  		for i := range apps {
  1271  			sendIfPermitted(*apps[i], watch.Added)
  1272  		}
  1273  	}
  1274  	unsubscribe := s.appBroadcaster.Subscribe(events)
  1275  	defer unsubscribe()
  1276  	for {
  1277  		select {
  1278  		case event := <-events:
  1279  			sendIfPermitted(event.Application, event.Type)
  1280  		case <-ws.Context().Done():
  1281  			return nil
  1282  		}
  1283  	}
  1284  }
  1285  
  1286  func (s *Server) validateAndNormalizeApp(ctx context.Context, app *v1alpha1.Application, proj *v1alpha1.AppProject, validate bool) error {
  1287  	if app.GetName() == "" {
  1288  		return errors.New("resource name may not be empty")
  1289  	}
  1290  
  1291  	// ensure sources names are unique
  1292  	if app.Spec.HasMultipleSources() {
  1293  		sourceNames := make(map[string]bool)
  1294  		for _, source := range app.Spec.Sources {
  1295  			if source.Name != "" && sourceNames[source.Name] {
  1296  				return fmt.Errorf("application %s has duplicate source name: %s", app.Name, source.Name)
  1297  			}
  1298  			sourceNames[source.Name] = true
  1299  		}
  1300  	}
  1301  
  1302  	appNs := s.appNamespaceOrDefault(app.Namespace)
  1303  	currApp, err := s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, app.Name, metav1.GetOptions{})
  1304  	if err != nil {
  1305  		if !apierrors.IsNotFound(err) {
  1306  			return fmt.Errorf("error getting application by name: %w", err)
  1307  		}
  1308  		// Kubernetes go-client will return a pointer to a zero-value app instead of nil, even
  1309  		// though the API response was NotFound. This behavior was confirmed via logs.
  1310  		currApp = nil
  1311  	}
  1312  	if currApp != nil && currApp.Spec.GetProject() != app.Spec.GetProject() {
  1313  		// When changing projects, caller must have application create & update privileges in new project
  1314  		// NOTE: the update check was already verified in the caller to this function
  1315  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionCreate, app.RBACName(s.ns)); err != nil {
  1316  			return err
  1317  		}
  1318  		// They also need 'update' privileges in the old project
  1319  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionUpdate, currApp.RBACName(s.ns)); err != nil {
  1320  			return err
  1321  		}
  1322  		// Validate that the new project exists and the application is allowed to use it
  1323  		newProj, err := s.getAppProject(ctx, app, log.WithFields(applog.GetAppLogFields(app)))
  1324  		if err != nil {
  1325  			return err
  1326  		}
  1327  		proj = newProj
  1328  	}
  1329  
  1330  	if _, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, s.db); err != nil {
  1331  		return status.Errorf(codes.InvalidArgument, "application destination spec for %s is invalid: %s", app.Name, err.Error())
  1332  	}
  1333  
  1334  	var conditions []v1alpha1.ApplicationCondition
  1335  
  1336  	if validate {
  1337  		conditions := make([]v1alpha1.ApplicationCondition, 0)
  1338  		condition, err := argo.ValidateRepo(ctx, app, s.repoClientset, s.db, s.kubectl, proj, s.settingsMgr)
  1339  		if err != nil {
  1340  			return fmt.Errorf("error validating the repo: %w", err)
  1341  		}
  1342  		conditions = append(conditions, condition...)
  1343  		if len(conditions) > 0 {
  1344  			return status.Errorf(codes.InvalidArgument, "application spec for %s is invalid: %s", app.Name, argo.FormatAppConditions(conditions))
  1345  		}
  1346  	}
  1347  
  1348  	conditions, err = argo.ValidatePermissions(ctx, &app.Spec, proj, s.db)
  1349  	if err != nil {
  1350  		return fmt.Errorf("error validating project permissions: %w", err)
  1351  	}
  1352  	if len(conditions) > 0 {
  1353  		return status.Errorf(codes.InvalidArgument, "application spec for %s is invalid: %s", app.Name, argo.FormatAppConditions(conditions))
  1354  	}
  1355  
  1356  	app.Spec = *argo.NormalizeApplicationSpec(&app.Spec)
  1357  	return nil
  1358  }
  1359  
  1360  func (s *Server) getApplicationClusterConfig(ctx context.Context, a *v1alpha1.Application) (*rest.Config, error) {
  1361  	cluster, err := argo.GetDestinationCluster(ctx, a.Spec.Destination, s.db)
  1362  	if err != nil {
  1363  		return nil, fmt.Errorf("error validating destination: %w", err)
  1364  	}
  1365  	config, err := cluster.RESTConfig()
  1366  	if err != nil {
  1367  		return nil, fmt.Errorf("error getting cluster REST config: %w", err)
  1368  	}
  1369  
  1370  	return config, err
  1371  }
  1372  
  1373  // getCachedAppState loads the cached state and trigger app refresh if cache is missing
  1374  func (s *Server) getCachedAppState(ctx context.Context, a *v1alpha1.Application, getFromCache func() error) error {
  1375  	err := getFromCache()
  1376  	if err != nil && errors.Is(err, servercache.ErrCacheMiss) {
  1377  		conditions := a.Status.GetConditions(map[v1alpha1.ApplicationConditionType]bool{
  1378  			v1alpha1.ApplicationConditionComparisonError:  true,
  1379  			v1alpha1.ApplicationConditionInvalidSpecError: true,
  1380  		})
  1381  		if len(conditions) > 0 {
  1382  			return errors.New(argo.FormatAppConditions(conditions))
  1383  		}
  1384  		_, err = s.Get(ctx, &application.ApplicationQuery{
  1385  			Name:         ptr.To(a.GetName()),
  1386  			AppNamespace: ptr.To(a.GetNamespace()),
  1387  			Refresh:      ptr.To(string(v1alpha1.RefreshTypeNormal)),
  1388  		})
  1389  		if err != nil {
  1390  			return fmt.Errorf("error getting application by query: %w", err)
  1391  		}
  1392  		return getFromCache()
  1393  	}
  1394  	return err
  1395  }
  1396  
  1397  func (s *Server) getAppResources(ctx context.Context, a *v1alpha1.Application) (*v1alpha1.ApplicationTree, error) {
  1398  	var tree v1alpha1.ApplicationTree
  1399  	err := s.getCachedAppState(ctx, a, func() error {
  1400  		return s.cache.GetAppResourcesTree(a.InstanceName(s.ns), &tree)
  1401  	})
  1402  	if err != nil {
  1403  		if errors.Is(err, ErrCacheMiss) {
  1404  			fmt.Println("Cache Key is missing.\nEnsure that the Redis compression setting on the Application controller and CLI is same. See --redis-compress.")
  1405  		}
  1406  		return &tree, fmt.Errorf("error getting cached app resource tree: %w", err)
  1407  	}
  1408  	return &tree, nil
  1409  }
  1410  
  1411  func (s *Server) getAppLiveResource(ctx context.Context, action string, q *application.ApplicationResourceRequest) (*v1alpha1.ResourceNode, *rest.Config, *v1alpha1.Application, error) {
  1412  	fineGrainedInheritanceDisabled, err := s.settingsMgr.ApplicationFineGrainedRBACInheritanceDisabled()
  1413  	if err != nil {
  1414  		return nil, nil, nil, err
  1415  	}
  1416  
  1417  	if fineGrainedInheritanceDisabled && (action == rbac.ActionDelete || action == rbac.ActionUpdate) {
  1418  		action = fmt.Sprintf("%s/%s/%s/%s/%s", action, q.GetGroup(), q.GetKind(), q.GetNamespace(), q.GetResourceName())
  1419  	}
  1420  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, action, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1421  	if !fineGrainedInheritanceDisabled && err != nil && errors.Is(err, argocommon.PermissionDeniedAPIError) && (action == rbac.ActionDelete || action == rbac.ActionUpdate) {
  1422  		action = fmt.Sprintf("%s/%s/%s/%s/%s", action, q.GetGroup(), q.GetKind(), q.GetNamespace(), q.GetResourceName())
  1423  		a, _, err = s.getApplicationEnforceRBACInformer(ctx, action, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1424  	}
  1425  	if err != nil {
  1426  		return nil, nil, nil, err
  1427  	}
  1428  
  1429  	tree, err := s.getAppResources(ctx, a)
  1430  	if err != nil {
  1431  		return nil, nil, nil, fmt.Errorf("error getting app resources: %w", err)
  1432  	}
  1433  
  1434  	found := tree.FindNode(q.GetGroup(), q.GetKind(), q.GetNamespace(), q.GetResourceName())
  1435  	if found == nil || found.UID == "" {
  1436  		return nil, nil, nil, status.Errorf(codes.InvalidArgument, "%s %s %s not found as part of application %s", q.GetKind(), q.GetGroup(), q.GetResourceName(), q.GetName())
  1437  	}
  1438  	config, err := s.getApplicationClusterConfig(ctx, a)
  1439  	if err != nil {
  1440  		return nil, nil, nil, fmt.Errorf("error getting application cluster config: %w", err)
  1441  	}
  1442  	return found, config, a, nil
  1443  }
  1444  
  1445  func (s *Server) GetResource(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ApplicationResourceResponse, error) {
  1446  	res, config, _, err := s.getAppLiveResource(ctx, rbac.ActionGet, q)
  1447  	if err != nil {
  1448  		return nil, err
  1449  	}
  1450  
  1451  	// make sure to use specified resource version if provided
  1452  	if q.GetVersion() != "" {
  1453  		res.Version = q.GetVersion()
  1454  	}
  1455  	obj, err := s.kubectl.GetResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace)
  1456  	if err != nil {
  1457  		return nil, fmt.Errorf("error getting resource: %w", err)
  1458  	}
  1459  	obj, err = s.replaceSecretValues(obj)
  1460  	if err != nil {
  1461  		return nil, fmt.Errorf("error replacing secret values: %w", err)
  1462  	}
  1463  	data, err := json.Marshal(obj.Object)
  1464  	if err != nil {
  1465  		return nil, fmt.Errorf("error marshaling object: %w", err)
  1466  	}
  1467  	manifest := string(data)
  1468  	return &application.ApplicationResourceResponse{Manifest: &manifest}, nil
  1469  }
  1470  
  1471  func (s *Server) replaceSecretValues(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
  1472  	if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" {
  1473  		_, obj, err := diff.HideSecretData(nil, obj, s.settingsMgr.GetSensitiveAnnotations())
  1474  		if err != nil {
  1475  			return nil, err
  1476  		}
  1477  		return obj, err
  1478  	}
  1479  	return obj, nil
  1480  }
  1481  
  1482  // PatchResource patches a resource
  1483  func (s *Server) PatchResource(ctx context.Context, q *application.ApplicationResourcePatchRequest) (*application.ApplicationResourceResponse, error) {
  1484  	resourceRequest := &application.ApplicationResourceRequest{
  1485  		Name:         q.Name,
  1486  		AppNamespace: q.AppNamespace,
  1487  		Namespace:    q.Namespace,
  1488  		ResourceName: q.ResourceName,
  1489  		Kind:         q.Kind,
  1490  		Version:      q.Version,
  1491  		Group:        q.Group,
  1492  		Project:      q.Project,
  1493  	}
  1494  	res, config, a, err := s.getAppLiveResource(ctx, rbac.ActionUpdate, resourceRequest)
  1495  	if err != nil {
  1496  		return nil, err
  1497  	}
  1498  
  1499  	manifest, err := s.kubectl.PatchResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace, types.PatchType(q.GetPatchType()), []byte(q.GetPatch()))
  1500  	if err != nil {
  1501  		// don't expose real error for secrets since it might contain secret data
  1502  		if res.Kind == kube.SecretKind && res.Group == "" {
  1503  			return nil, fmt.Errorf("failed to patch Secret %s/%s", res.Namespace, res.Name)
  1504  		}
  1505  		return nil, fmt.Errorf("error patching resource: %w", err)
  1506  	}
  1507  	if manifest == nil {
  1508  		return nil, errors.New("failed to patch resource: manifest was nil")
  1509  	}
  1510  	manifest, err = s.replaceSecretValues(manifest)
  1511  	if err != nil {
  1512  		return nil, fmt.Errorf("error replacing secret values: %w", err)
  1513  	}
  1514  	data, err := json.Marshal(manifest.Object)
  1515  	if err != nil {
  1516  		return nil, fmt.Errorf("erro marshaling manifest object: %w", err)
  1517  	}
  1518  	s.logAppEvent(ctx, a, argo.EventReasonResourceUpdated, fmt.Sprintf("patched resource %s/%s '%s'", q.GetGroup(), q.GetKind(), q.GetResourceName()))
  1519  	m := string(data)
  1520  	return &application.ApplicationResourceResponse{
  1521  		Manifest: &m,
  1522  	}, nil
  1523  }
  1524  
  1525  // DeleteResource deletes a specified resource
  1526  func (s *Server) DeleteResource(ctx context.Context, q *application.ApplicationResourceDeleteRequest) (*application.ApplicationResponse, error) {
  1527  	resourceRequest := &application.ApplicationResourceRequest{
  1528  		Name:         q.Name,
  1529  		AppNamespace: q.AppNamespace,
  1530  		Namespace:    q.Namespace,
  1531  		ResourceName: q.ResourceName,
  1532  		Kind:         q.Kind,
  1533  		Version:      q.Version,
  1534  		Group:        q.Group,
  1535  		Project:      q.Project,
  1536  	}
  1537  	res, config, a, err := s.getAppLiveResource(ctx, rbac.ActionDelete, resourceRequest)
  1538  	if err != nil {
  1539  		return nil, err
  1540  	}
  1541  	var deleteOption metav1.DeleteOptions
  1542  	switch {
  1543  	case q.GetOrphan():
  1544  		propagationPolicy := metav1.DeletePropagationOrphan
  1545  		deleteOption = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}
  1546  	case q.GetForce():
  1547  		propagationPolicy := metav1.DeletePropagationBackground
  1548  		zeroGracePeriod := int64(0)
  1549  		deleteOption = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy, GracePeriodSeconds: &zeroGracePeriod}
  1550  	default:
  1551  		propagationPolicy := metav1.DeletePropagationForeground
  1552  		deleteOption = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}
  1553  	}
  1554  	err = s.kubectl.DeleteResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace, deleteOption)
  1555  	if err != nil {
  1556  		return nil, fmt.Errorf("error deleting resource: %w", err)
  1557  	}
  1558  	s.logAppEvent(ctx, a, argo.EventReasonResourceDeleted, fmt.Sprintf("deleted resource %s/%s '%s'", q.GetGroup(), q.GetKind(), q.GetResourceName()))
  1559  	return &application.ApplicationResponse{}, nil
  1560  }
  1561  
  1562  func (s *Server) ResourceTree(ctx context.Context, q *application.ResourcesQuery) (*v1alpha1.ApplicationTree, error) {
  1563  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetApplicationName())
  1564  	if err != nil {
  1565  		return nil, err
  1566  	}
  1567  
  1568  	return s.getAppResources(ctx, a)
  1569  }
  1570  
  1571  func (s *Server) WatchResourceTree(q *application.ResourcesQuery, ws application.ApplicationService_WatchResourceTreeServer) error {
  1572  	_, _, err := s.getApplicationEnforceRBACInformer(ws.Context(), rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetApplicationName())
  1573  	if err != nil {
  1574  		return err
  1575  	}
  1576  
  1577  	cacheKey := argo.AppInstanceName(q.GetApplicationName(), q.GetAppNamespace(), s.ns)
  1578  	return s.cache.OnAppResourcesTreeChanged(ws.Context(), cacheKey, func() error {
  1579  		var tree v1alpha1.ApplicationTree
  1580  		err := s.cache.GetAppResourcesTree(cacheKey, &tree)
  1581  		if err != nil {
  1582  			return fmt.Errorf("error getting app resource tree: %w", err)
  1583  		}
  1584  		return ws.Send(&tree)
  1585  	})
  1586  }
  1587  
  1588  func (s *Server) RevisionMetadata(ctx context.Context, q *application.RevisionMetadataQuery) (*v1alpha1.RevisionMetadata, error) {
  1589  	a, proj, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1590  	if err != nil {
  1591  		return nil, err
  1592  	}
  1593  
  1594  	source, err := getAppSourceBySourceIndexAndVersionId(a, q.SourceIndex, q.VersionId)
  1595  	if err != nil {
  1596  		return nil, fmt.Errorf("error getting app source by source index and version ID: %w", err)
  1597  	}
  1598  
  1599  	repo, err := s.db.GetRepository(ctx, source.RepoURL, proj.Name)
  1600  	if err != nil {
  1601  		return nil, fmt.Errorf("error getting repository by URL: %w", err)
  1602  	}
  1603  	conn, repoClient, err := s.repoClientset.NewRepoServerClient()
  1604  	if err != nil {
  1605  		return nil, fmt.Errorf("error creating repo server client: %w", err)
  1606  	}
  1607  	defer utilio.Close(conn)
  1608  	return repoClient.GetRevisionMetadata(ctx, &apiclient.RepoServerRevisionMetadataRequest{
  1609  		Repo:           repo,
  1610  		Revision:       q.GetRevision(),
  1611  		CheckSignature: len(proj.Spec.SignatureKeys) > 0,
  1612  	})
  1613  }
  1614  
  1615  // RevisionChartDetails returns the helm chart metadata, as fetched from the reposerver
  1616  func (s *Server) RevisionChartDetails(ctx context.Context, q *application.RevisionMetadataQuery) (*v1alpha1.ChartDetails, error) {
  1617  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1618  	if err != nil {
  1619  		return nil, err
  1620  	}
  1621  
  1622  	source, err := getAppSourceBySourceIndexAndVersionId(a, q.SourceIndex, q.VersionId)
  1623  	if err != nil {
  1624  		return nil, fmt.Errorf("error getting app source by source index and version ID: %w", err)
  1625  	}
  1626  
  1627  	if source.Chart == "" {
  1628  		return nil, fmt.Errorf("no chart found for application: %v", q.GetName())
  1629  	}
  1630  	repo, err := s.db.GetRepository(ctx, source.RepoURL, a.Spec.Project)
  1631  	if err != nil {
  1632  		return nil, fmt.Errorf("error getting repository by URL: %w", err)
  1633  	}
  1634  	conn, repoClient, err := s.repoClientset.NewRepoServerClient()
  1635  	if err != nil {
  1636  		return nil, fmt.Errorf("error creating repo server client: %w", err)
  1637  	}
  1638  	defer utilio.Close(conn)
  1639  	return repoClient.GetRevisionChartDetails(ctx, &apiclient.RepoServerRevisionChartDetailsRequest{
  1640  		Repo:     repo,
  1641  		Name:     source.Chart,
  1642  		Revision: q.GetRevision(),
  1643  	})
  1644  }
  1645  
  1646  func (s *Server) GetOCIMetadata(ctx context.Context, q *application.RevisionMetadataQuery) (*v1alpha1.OCIMetadata, error) {
  1647  	a, proj, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1648  	if err != nil {
  1649  		return nil, err
  1650  	}
  1651  
  1652  	source, err := getAppSourceBySourceIndexAndVersionId(a, q.SourceIndex, q.VersionId)
  1653  	if err != nil {
  1654  		return nil, fmt.Errorf("error getting app source by source index and version ID: %w", err)
  1655  	}
  1656  
  1657  	repo, err := s.db.GetRepository(ctx, source.RepoURL, proj.Name)
  1658  	if err != nil {
  1659  		return nil, fmt.Errorf("error getting repository by URL: %w", err)
  1660  	}
  1661  	conn, repoClient, err := s.repoClientset.NewRepoServerClient()
  1662  	if err != nil {
  1663  		return nil, fmt.Errorf("error creating repo server client: %w", err)
  1664  	}
  1665  	defer utilio.Close(conn)
  1666  
  1667  	return repoClient.GetOCIMetadata(ctx, &apiclient.RepoServerRevisionChartDetailsRequest{
  1668  		Repo:     repo,
  1669  		Name:     source.Chart,
  1670  		Revision: q.GetRevision(),
  1671  	})
  1672  }
  1673  
  1674  // getAppSourceBySourceIndexAndVersionId returns the source for a specific source index and version ID. Source index and
  1675  // version ID are optional. If the source index is not specified, it defaults to 0. If the version ID is not specified,
  1676  // we use the source(s) currently configured for the app. If the version ID is specified, we find the source for that
  1677  // version ID. If the version ID is not found, we return an error. If the source index is out of bounds for whichever
  1678  // source we choose (configured sources or sources for a specific version), we return an error.
  1679  func getAppSourceBySourceIndexAndVersionId(a *v1alpha1.Application, sourceIndexMaybe *int32, versionIdMaybe *int32) (v1alpha1.ApplicationSource, error) {
  1680  	// Start with all the app's configured sources.
  1681  	sources := a.Spec.GetSources()
  1682  
  1683  	// If the user specified a version, get the sources for that version. If the version is not found, return an error.
  1684  	if versionIdMaybe != nil {
  1685  		versionId := int64(*versionIdMaybe)
  1686  		var err error
  1687  		sources, err = getSourcesByVersionId(a, versionId)
  1688  		if err != nil {
  1689  			return v1alpha1.ApplicationSource{}, fmt.Errorf("error getting source by version ID: %w", err)
  1690  		}
  1691  	}
  1692  
  1693  	// Start by assuming we want the first source.
  1694  	sourceIndex := 0
  1695  
  1696  	// If the user specified a source index, use that instead.
  1697  	if sourceIndexMaybe != nil {
  1698  		sourceIndex = int(*sourceIndexMaybe)
  1699  		if sourceIndex >= len(sources) {
  1700  			if len(sources) == 1 {
  1701  				return v1alpha1.ApplicationSource{}, fmt.Errorf("source index %d not found because there is only 1 source", sourceIndex)
  1702  			}
  1703  			return v1alpha1.ApplicationSource{}, fmt.Errorf("source index %d not found because there are only %d sources", sourceIndex, len(sources))
  1704  		}
  1705  	}
  1706  
  1707  	source := sources[sourceIndex]
  1708  
  1709  	return source, nil
  1710  }
  1711  
  1712  // getRevisionHistoryByVersionId returns the revision history for a specific version ID.
  1713  // If the version ID is not found, it returns an empty revision history and false.
  1714  func getRevisionHistoryByVersionId(histories v1alpha1.RevisionHistories, versionId int64) (v1alpha1.RevisionHistory, bool) {
  1715  	for _, h := range histories {
  1716  		if h.ID == versionId {
  1717  			return h, true
  1718  		}
  1719  	}
  1720  	return v1alpha1.RevisionHistory{}, false
  1721  }
  1722  
  1723  // getSourcesByVersionId returns the sources for a specific version ID. If there is no history, it returns an error.
  1724  // If the version ID is not found, it returns an error. If the version ID is found, and there are multiple sources,
  1725  // it returns the sources for that version ID. If the version ID is found, and there is only one source, it returns
  1726  // a slice with just the single source.
  1727  func getSourcesByVersionId(a *v1alpha1.Application, versionId int64) ([]v1alpha1.ApplicationSource, error) {
  1728  	if len(a.Status.History) == 0 {
  1729  		return nil, fmt.Errorf("version ID %d not found because the app has no history", versionId)
  1730  	}
  1731  
  1732  	h, ok := getRevisionHistoryByVersionId(a.Status.History, versionId)
  1733  	if !ok {
  1734  		return nil, fmt.Errorf("revision history not found for version ID %d", versionId)
  1735  	}
  1736  
  1737  	if len(h.Sources) > 0 {
  1738  		return h.Sources, nil
  1739  	}
  1740  
  1741  	return []v1alpha1.ApplicationSource{h.Source}, nil
  1742  }
  1743  
  1744  func isMatchingResource(q *application.ResourcesQuery, key kube.ResourceKey) bool {
  1745  	return (q.GetName() == "" || q.GetName() == key.Name) &&
  1746  		(q.GetNamespace() == "" || q.GetNamespace() == key.Namespace) &&
  1747  		(q.GetGroup() == "" || q.GetGroup() == key.Group) &&
  1748  		(q.GetKind() == "" || q.GetKind() == key.Kind)
  1749  }
  1750  
  1751  func (s *Server) ManagedResources(ctx context.Context, q *application.ResourcesQuery) (*application.ManagedResourcesResponse, error) {
  1752  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetApplicationName())
  1753  	if err != nil {
  1754  		return nil, err
  1755  	}
  1756  
  1757  	items := make([]*v1alpha1.ResourceDiff, 0)
  1758  	err = s.getCachedAppState(ctx, a, func() error {
  1759  		return s.cache.GetAppManagedResources(a.InstanceName(s.ns), &items)
  1760  	})
  1761  	if err != nil {
  1762  		return nil, fmt.Errorf("error getting cached app managed resources: %w", err)
  1763  	}
  1764  	res := &application.ManagedResourcesResponse{}
  1765  	for i := range items {
  1766  		item := items[i]
  1767  		if !item.Hook && isMatchingResource(q, kube.ResourceKey{Name: item.Name, Namespace: item.Namespace, Kind: item.Kind, Group: item.Group}) {
  1768  			res.Items = append(res.Items, item)
  1769  		}
  1770  	}
  1771  
  1772  	return res, nil
  1773  }
  1774  
  1775  func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application.ApplicationService_PodLogsServer) error {
  1776  	if q.PodName != nil {
  1777  		podKind := "Pod"
  1778  		q.Kind = &podKind
  1779  		q.ResourceName = q.PodName
  1780  	}
  1781  
  1782  	var sinceSeconds, tailLines *int64
  1783  	if q.GetSinceSeconds() > 0 {
  1784  		sinceSeconds = ptr.To(q.GetSinceSeconds())
  1785  	}
  1786  	if q.GetTailLines() > 0 {
  1787  		tailLines = ptr.To(q.GetTailLines())
  1788  	}
  1789  	var untilTime *metav1.Time
  1790  	if q.GetUntilTime() != "" {
  1791  		val, err := time.Parse(time.RFC3339Nano, q.GetUntilTime())
  1792  		if err != nil {
  1793  			return fmt.Errorf("invalid untilTime parameter value: %w", err)
  1794  		}
  1795  		untilTimeVal := metav1.NewTime(val)
  1796  		untilTime = &untilTimeVal
  1797  	}
  1798  
  1799  	literal := ""
  1800  	inverse := false
  1801  	if q.GetFilter() != "" {
  1802  		literal = *q.Filter
  1803  		if literal[0] == '!' {
  1804  			literal = literal[1:]
  1805  			inverse = true
  1806  		}
  1807  	}
  1808  
  1809  	a, _, err := s.getApplicationEnforceRBACInformer(ws.Context(), rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1810  	if err != nil {
  1811  		return err
  1812  	}
  1813  
  1814  	if err := s.enf.EnforceErr(ws.Context().Value("claims"), rbac.ResourceLogs, rbac.ActionGet, a.RBACName(s.ns)); err != nil {
  1815  		return err
  1816  	}
  1817  
  1818  	tree, err := s.getAppResources(ws.Context(), a)
  1819  	if err != nil {
  1820  		return fmt.Errorf("error getting app resource tree: %w", err)
  1821  	}
  1822  
  1823  	config, err := s.getApplicationClusterConfig(ws.Context(), a)
  1824  	if err != nil {
  1825  		return fmt.Errorf("error getting application cluster config: %w", err)
  1826  	}
  1827  
  1828  	kubeClientset, err := kubernetes.NewForConfig(config)
  1829  	if err != nil {
  1830  		return fmt.Errorf("error creating kube client: %w", err)
  1831  	}
  1832  
  1833  	// from the tree find pods which match query of kind, group, and resource name
  1834  	pods := getSelectedPods(tree.Nodes, q)
  1835  	if len(pods) == 0 {
  1836  		return nil
  1837  	}
  1838  
  1839  	maxPodLogsToRender, err := s.settingsMgr.GetMaxPodLogsToRender()
  1840  	if err != nil {
  1841  		return fmt.Errorf("error getting MaxPodLogsToRender config: %w", err)
  1842  	}
  1843  
  1844  	if int64(len(pods)) > maxPodLogsToRender {
  1845  		return status.Error(codes.InvalidArgument, "max pods to view logs are reached. Please provide more granular query")
  1846  	}
  1847  
  1848  	var streams []chan logEntry
  1849  
  1850  	for _, pod := range pods {
  1851  		stream, err := kubeClientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{
  1852  			Container:    q.GetContainer(),
  1853  			Follow:       q.GetFollow(),
  1854  			Timestamps:   true,
  1855  			SinceSeconds: sinceSeconds,
  1856  			SinceTime:    q.GetSinceTime(),
  1857  			TailLines:    tailLines,
  1858  			Previous:     q.GetPrevious(),
  1859  		}).Stream(ws.Context())
  1860  		podName := pod.Name
  1861  		logStream := make(chan logEntry)
  1862  		if err == nil {
  1863  			defer utilio.Close(stream)
  1864  		}
  1865  
  1866  		streams = append(streams, logStream)
  1867  		go func() {
  1868  			// if k8s failed to start steaming logs (typically because Pod is not ready yet)
  1869  			// then the error should be shown in the UI so that user know the reason
  1870  			if err != nil {
  1871  				logStream <- logEntry{line: err.Error()}
  1872  			} else {
  1873  				parseLogsStream(podName, stream, logStream)
  1874  			}
  1875  			close(logStream)
  1876  		}()
  1877  	}
  1878  
  1879  	logStream := mergeLogStreams(streams, time.Millisecond*100)
  1880  	sentCount := int64(0)
  1881  	done := make(chan error)
  1882  	go func() {
  1883  		for entry := range logStream {
  1884  			if entry.err != nil {
  1885  				done <- entry.err
  1886  				return
  1887  			}
  1888  			if q.Filter != nil {
  1889  				var lineContainsFilter bool
  1890  				if q.GetMatchCase() {
  1891  					lineContainsFilter = strings.Contains(entry.line, literal)
  1892  				} else {
  1893  					lineContainsFilter = strings.Contains(strings.ToLower(entry.line), strings.ToLower(literal))
  1894  				}
  1895  
  1896  				if (inverse && lineContainsFilter) || (!inverse && !lineContainsFilter) {
  1897  					continue
  1898  				}
  1899  			}
  1900  			ts := metav1.NewTime(entry.timeStamp)
  1901  			if untilTime != nil && entry.timeStamp.After(untilTime.Time) {
  1902  				done <- ws.Send(&application.LogEntry{
  1903  					Last:         ptr.To(true),
  1904  					PodName:      &entry.podName,
  1905  					Content:      &entry.line,
  1906  					TimeStampStr: ptr.To(entry.timeStamp.Format(time.RFC3339Nano)),
  1907  					TimeStamp:    &ts,
  1908  				})
  1909  				return
  1910  			}
  1911  			sentCount++
  1912  			if err := ws.Send(&application.LogEntry{
  1913  				PodName:      &entry.podName,
  1914  				Content:      &entry.line,
  1915  				TimeStampStr: ptr.To(entry.timeStamp.Format(time.RFC3339Nano)),
  1916  				TimeStamp:    &ts,
  1917  				Last:         ptr.To(false),
  1918  			}); err != nil {
  1919  				done <- err
  1920  				break
  1921  			}
  1922  		}
  1923  		now := time.Now()
  1924  		nowTS := metav1.NewTime(now)
  1925  		done <- ws.Send(&application.LogEntry{
  1926  			Last:         ptr.To(true),
  1927  			PodName:      ptr.To(""),
  1928  			Content:      ptr.To(""),
  1929  			TimeStampStr: ptr.To(now.Format(time.RFC3339Nano)),
  1930  			TimeStamp:    &nowTS,
  1931  		})
  1932  	}()
  1933  
  1934  	select {
  1935  	case err := <-done:
  1936  		return err
  1937  	case <-ws.Context().Done():
  1938  		log.WithField("application", q.Name).Debug("k8s pod logs reader completed due to closed grpc context")
  1939  		return nil
  1940  	}
  1941  }
  1942  
  1943  // from all of the treeNodes, get the pod who meets the criteria or whose parents meets the criteria
  1944  func getSelectedPods(treeNodes []v1alpha1.ResourceNode, q *application.ApplicationPodLogsQuery) []v1alpha1.ResourceNode {
  1945  	var pods []v1alpha1.ResourceNode
  1946  	isTheOneMap := make(map[string]bool)
  1947  	for _, treeNode := range treeNodes {
  1948  		if treeNode.Kind == kube.PodKind && treeNode.Group == "" && treeNode.UID != "" {
  1949  			if isTheSelectedOne(&treeNode, q, treeNodes, isTheOneMap) {
  1950  				pods = append(pods, treeNode)
  1951  			}
  1952  		}
  1953  	}
  1954  	return pods
  1955  }
  1956  
  1957  // check is currentNode is matching with group, kind, and name, or if any of its parents matches
  1958  func isTheSelectedOne(currentNode *v1alpha1.ResourceNode, q *application.ApplicationPodLogsQuery, resourceNodes []v1alpha1.ResourceNode, isTheOneMap map[string]bool) bool {
  1959  	exist, value := isTheOneMap[currentNode.UID]
  1960  	if exist {
  1961  		return value
  1962  	}
  1963  
  1964  	if (q.GetResourceName() == "" || currentNode.Name == q.GetResourceName()) &&
  1965  		(q.GetKind() == "" || currentNode.Kind == q.GetKind()) &&
  1966  		(q.GetGroup() == "" || currentNode.Group == q.GetGroup()) &&
  1967  		(q.GetNamespace() == "" || currentNode.Namespace == q.GetNamespace()) {
  1968  		isTheOneMap[currentNode.UID] = true
  1969  		return true
  1970  	}
  1971  
  1972  	if len(currentNode.ParentRefs) == 0 {
  1973  		isTheOneMap[currentNode.UID] = false
  1974  		return false
  1975  	}
  1976  
  1977  	for _, parentResource := range currentNode.ParentRefs {
  1978  		// look up parentResource from resourceNodes
  1979  		// then check if the parent isTheSelectedOne
  1980  		for _, resourceNode := range resourceNodes {
  1981  			if resourceNode.Namespace == parentResource.Namespace &&
  1982  				resourceNode.Name == parentResource.Name &&
  1983  				resourceNode.Group == parentResource.Group &&
  1984  				resourceNode.Kind == parentResource.Kind {
  1985  				if isTheSelectedOne(&resourceNode, q, resourceNodes, isTheOneMap) {
  1986  					isTheOneMap[currentNode.UID] = true
  1987  					return true
  1988  				}
  1989  			}
  1990  		}
  1991  	}
  1992  
  1993  	isTheOneMap[currentNode.UID] = false
  1994  	return false
  1995  }
  1996  
  1997  // Sync syncs an application to its target state
  1998  func (s *Server) Sync(ctx context.Context, syncReq *application.ApplicationSyncRequest) (*v1alpha1.Application, error) {
  1999  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionGet, syncReq.GetProject(), syncReq.GetAppNamespace(), syncReq.GetName(), "")
  2000  	if err != nil {
  2001  		return nil, err
  2002  	}
  2003  
  2004  	s.inferResourcesStatusHealth(a)
  2005  
  2006  	canSync, err := proj.Spec.SyncWindows.Matches(a).CanSync(true)
  2007  	if err != nil {
  2008  		return a, status.Errorf(codes.PermissionDenied, "cannot sync: invalid sync window: %v", err)
  2009  	}
  2010  	if !canSync {
  2011  		return a, status.Errorf(codes.PermissionDenied, "cannot sync: blocked by sync window")
  2012  	}
  2013  
  2014  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionSync, a.RBACName(s.ns)); err != nil {
  2015  		return nil, err
  2016  	}
  2017  
  2018  	if syncReq.Manifests != nil {
  2019  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbac.ActionOverride, a.RBACName(s.ns)); err != nil {
  2020  			return nil, err
  2021  		}
  2022  		if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.IsAutomatedSyncEnabled() && !syncReq.GetDryRun() {
  2023  			return nil, status.Error(codes.FailedPrecondition, "cannot use local sync when Automatic Sync Policy is enabled unless for dry run")
  2024  		}
  2025  	}
  2026  	if a.DeletionTimestamp != nil {
  2027  		return nil, status.Errorf(codes.FailedPrecondition, "application is deleting")
  2028  	}
  2029  
  2030  	revision, displayRevision, sourceRevisions, displayRevisions, err := s.resolveSourceRevisions(ctx, a, syncReq)
  2031  	if err != nil {
  2032  		return nil, err
  2033  	}
  2034  
  2035  	var retry *v1alpha1.RetryStrategy
  2036  	var syncOptions v1alpha1.SyncOptions
  2037  	if a.Spec.SyncPolicy != nil {
  2038  		syncOptions = a.Spec.SyncPolicy.SyncOptions
  2039  		retry = a.Spec.SyncPolicy.Retry
  2040  	}
  2041  	if syncReq.RetryStrategy != nil {
  2042  		retry = syncReq.RetryStrategy
  2043  	}
  2044  	if syncReq.SyncOptions != nil {
  2045  		syncOptions = syncReq.SyncOptions.Items
  2046  	}
  2047  
  2048  	if syncOptions.HasOption(common.SyncOptionReplace) && !s.syncWithReplaceAllowed {
  2049  		return nil, status.Error(codes.FailedPrecondition, "sync with replace was disabled on the API Server level via the server configuration")
  2050  	}
  2051  
  2052  	// We cannot use local manifests if we're only allowed to sync to signed commits
  2053  	if syncReq.Manifests != nil && len(proj.Spec.SignatureKeys) > 0 {
  2054  		return nil, status.Errorf(codes.FailedPrecondition, "Cannot use local sync when signature keys are required.")
  2055  	}
  2056  
  2057  	resources := []v1alpha1.SyncOperationResource{}
  2058  	if syncReq.GetResources() != nil {
  2059  		for _, r := range syncReq.GetResources() {
  2060  			if r != nil {
  2061  				resources = append(resources, *r)
  2062  			}
  2063  		}
  2064  	}
  2065  
  2066  	var source *v1alpha1.ApplicationSource
  2067  	if !a.Spec.HasMultipleSources() {
  2068  		source = ptr.To(a.Spec.GetSource())
  2069  	}
  2070  
  2071  	op := v1alpha1.Operation{
  2072  		Sync: &v1alpha1.SyncOperation{
  2073  			Source:       source,
  2074  			Revision:     revision,
  2075  			Prune:        syncReq.GetPrune(),
  2076  			DryRun:       syncReq.GetDryRun(),
  2077  			SyncOptions:  syncOptions,
  2078  			SyncStrategy: syncReq.Strategy,
  2079  			Resources:    resources,
  2080  			Manifests:    syncReq.Manifests,
  2081  			Sources:      a.Spec.Sources,
  2082  			Revisions:    sourceRevisions,
  2083  		},
  2084  		InitiatedBy: v1alpha1.OperationInitiator{Username: session.Username(ctx)},
  2085  		Info:        syncReq.Infos,
  2086  	}
  2087  	if retry != nil {
  2088  		op.Retry = *retry
  2089  	}
  2090  
  2091  	appName := syncReq.GetName()
  2092  	appNs := s.appNamespaceOrDefault(syncReq.GetAppNamespace())
  2093  	appIf := s.appclientset.ArgoprojV1alpha1().Applications(appNs)
  2094  	a, err = argo.SetAppOperation(appIf, appName, &op)
  2095  	if err != nil {
  2096  		return nil, fmt.Errorf("error setting app operation: %w", err)
  2097  	}
  2098  	partial := ""
  2099  	if len(syncReq.Resources) > 0 {
  2100  		partial = "partial "
  2101  	}
  2102  	var reason string
  2103  	if a.Spec.HasMultipleSources() {
  2104  		reason = fmt.Sprintf("initiated %ssync to %s", partial, strings.Join(displayRevisions, ","))
  2105  	} else {
  2106  		reason = fmt.Sprintf("initiated %ssync to %s", partial, displayRevision)
  2107  	}
  2108  	if syncReq.Manifests != nil {
  2109  		reason = fmt.Sprintf("initiated %ssync locally", partial)
  2110  	}
  2111  	s.logAppEvent(ctx, a, argo.EventReasonOperationStarted, reason)
  2112  	return a, nil
  2113  }
  2114  
  2115  func (s *Server) resolveSourceRevisions(ctx context.Context, a *v1alpha1.Application, syncReq *application.ApplicationSyncRequest) (string, string, []string, []string, error) {
  2116  	if a.Spec.HasMultipleSources() {
  2117  		numOfSources := int64(len(a.Spec.GetSources()))
  2118  		sourceRevisions := make([]string, numOfSources)
  2119  		displayRevisions := make([]string, numOfSources)
  2120  
  2121  		sources := a.Spec.GetSources()
  2122  		for i, pos := range syncReq.SourcePositions {
  2123  			if pos <= 0 || pos > numOfSources {
  2124  				return "", "", nil, nil, errors.New("source position is out of range")
  2125  			}
  2126  			sources[pos-1].TargetRevision = syncReq.Revisions[i]
  2127  		}
  2128  		for index, source := range sources {
  2129  			if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.IsAutomatedSyncEnabled() && !syncReq.GetDryRun() {
  2130  				if text.FirstNonEmpty(a.Spec.GetSources()[index].TargetRevision, "HEAD") != text.FirstNonEmpty(source.TargetRevision, "HEAD") {
  2131  					return "", "", nil, nil, status.Errorf(codes.FailedPrecondition, "Cannot sync source %s to %s: auto-sync currently set to %s", source.RepoURL, source.TargetRevision, a.Spec.Sources[index].TargetRevision)
  2132  				}
  2133  			}
  2134  			revision, displayRevision, err := s.resolveRevision(ctx, a, syncReq, index)
  2135  			if err != nil {
  2136  				return "", "", nil, nil, status.Error(codes.FailedPrecondition, err.Error())
  2137  			}
  2138  			sourceRevisions[index] = revision
  2139  			displayRevisions[index] = displayRevision
  2140  		}
  2141  		return "", "", sourceRevisions, displayRevisions, nil
  2142  	}
  2143  	source := a.Spec.GetSource()
  2144  	if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.IsAutomatedSyncEnabled() && !syncReq.GetDryRun() {
  2145  		if syncReq.GetRevision() != "" && syncReq.GetRevision() != text.FirstNonEmpty(source.TargetRevision, "HEAD") {
  2146  			return "", "", nil, nil, status.Errorf(codes.FailedPrecondition, "Cannot sync to %s: auto-sync currently set to %s", syncReq.GetRevision(), source.TargetRevision)
  2147  		}
  2148  	}
  2149  	revision, displayRevision, err := s.resolveRevision(ctx, a, syncReq, -1)
  2150  	if err != nil {
  2151  		return "", "", nil, nil, status.Error(codes.FailedPrecondition, err.Error())
  2152  	}
  2153  	return revision, displayRevision, nil, nil, nil
  2154  }
  2155  
  2156  func (s *Server) Rollback(ctx context.Context, rollbackReq *application.ApplicationRollbackRequest) (*v1alpha1.Application, error) {
  2157  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionSync, rollbackReq.GetProject(), rollbackReq.GetAppNamespace(), rollbackReq.GetName(), "")
  2158  	if err != nil {
  2159  		return nil, err
  2160  	}
  2161  
  2162  	s.inferResourcesStatusHealth(a)
  2163  
  2164  	if a.DeletionTimestamp != nil {
  2165  		return nil, status.Errorf(codes.FailedPrecondition, "application is deleting")
  2166  	}
  2167  	if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.IsAutomatedSyncEnabled() {
  2168  		return nil, status.Errorf(codes.FailedPrecondition, "rollback cannot be initiated when auto-sync is enabled")
  2169  	}
  2170  
  2171  	var deploymentInfo *v1alpha1.RevisionHistory
  2172  	for _, info := range a.Status.History {
  2173  		if info.ID == rollbackReq.GetId() {
  2174  			deploymentInfo = &info
  2175  			break
  2176  		}
  2177  	}
  2178  	if deploymentInfo == nil {
  2179  		return nil, status.Errorf(codes.InvalidArgument, "application %s does not have deployment with id %v", a.QualifiedName(), rollbackReq.GetId())
  2180  	}
  2181  	if deploymentInfo.Source.IsZero() && deploymentInfo.Sources.IsZero() {
  2182  		// Since source type was introduced to history starting with v0.12, and is now required for
  2183  		// rollback, we cannot support rollback to revisions deployed using Argo CD v0.11 or below
  2184  		// As multi source doesn't use app.Source, we need to check to the Sources length
  2185  		return nil, status.Errorf(codes.FailedPrecondition, "cannot rollback to revision deployed with Argo CD v0.11 or lower. sync to revision instead.")
  2186  	}
  2187  
  2188  	var syncOptions v1alpha1.SyncOptions
  2189  	if a.Spec.SyncPolicy != nil {
  2190  		syncOptions = a.Spec.SyncPolicy.SyncOptions
  2191  	}
  2192  
  2193  	// Rollback is just a convenience around Sync
  2194  	op := v1alpha1.Operation{
  2195  		Sync: &v1alpha1.SyncOperation{
  2196  			Revision:     deploymentInfo.Revision,
  2197  			Revisions:    deploymentInfo.Revisions,
  2198  			DryRun:       rollbackReq.GetDryRun(),
  2199  			Prune:        rollbackReq.GetPrune(),
  2200  			SyncOptions:  syncOptions,
  2201  			SyncStrategy: &v1alpha1.SyncStrategy{Apply: &v1alpha1.SyncStrategyApply{}},
  2202  			Source:       &deploymentInfo.Source,
  2203  			Sources:      deploymentInfo.Sources,
  2204  		},
  2205  		InitiatedBy: v1alpha1.OperationInitiator{Username: session.Username(ctx)},
  2206  	}
  2207  	appName := rollbackReq.GetName()
  2208  	appNs := s.appNamespaceOrDefault(rollbackReq.GetAppNamespace())
  2209  	appIf := s.appclientset.ArgoprojV1alpha1().Applications(appNs)
  2210  	a, err = argo.SetAppOperation(appIf, appName, &op)
  2211  	if err != nil {
  2212  		return nil, fmt.Errorf("error setting app operation: %w", err)
  2213  	}
  2214  	s.logAppEvent(ctx, a, argo.EventReasonOperationStarted, fmt.Sprintf("initiated rollback to %d", rollbackReq.GetId()))
  2215  	return a, nil
  2216  }
  2217  
  2218  func (s *Server) ListLinks(ctx context.Context, req *application.ListAppLinksRequest) (*application.LinksResponse, error) {
  2219  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionGet, req.GetProject(), req.GetNamespace(), req.GetName(), "")
  2220  	if err != nil {
  2221  		return nil, err
  2222  	}
  2223  
  2224  	obj, err := kube.ToUnstructured(a)
  2225  	if err != nil {
  2226  		return nil, fmt.Errorf("error getting application: %w", err)
  2227  	}
  2228  
  2229  	deepLinks, err := s.settingsMgr.GetDeepLinks(settings.ApplicationDeepLinks)
  2230  	if err != nil {
  2231  		return nil, fmt.Errorf("failed to read application deep links from configmap: %w", err)
  2232  	}
  2233  
  2234  	clstObj, _, err := s.getObjectsForDeepLinks(ctx, a, proj)
  2235  	if err != nil {
  2236  		return nil, err
  2237  	}
  2238  
  2239  	deepLinksObject := deeplinks.CreateDeepLinksObject(nil, obj, clstObj, nil)
  2240  
  2241  	finalList, errorList := deeplinks.EvaluateDeepLinksResponse(deepLinksObject, obj.GetName(), deepLinks)
  2242  	if len(errorList) > 0 {
  2243  		log.Errorf("errorList while evaluating application deep links, %v", strings.Join(errorList, ", "))
  2244  	}
  2245  
  2246  	return finalList, nil
  2247  }
  2248  
  2249  func (s *Server) getObjectsForDeepLinks(ctx context.Context, app *v1alpha1.Application, proj *v1alpha1.AppProject) (cluster *unstructured.Unstructured, project *unstructured.Unstructured, err error) {
  2250  	// sanitize project jwt tokens
  2251  	proj.Status = v1alpha1.AppProjectStatus{}
  2252  
  2253  	project, err = kube.ToUnstructured(proj)
  2254  	if err != nil {
  2255  		return nil, nil, err
  2256  	}
  2257  
  2258  	getProjectClusters := func(project string) ([]*v1alpha1.Cluster, error) {
  2259  		return s.db.GetProjectClusters(ctx, project)
  2260  	}
  2261  
  2262  	destCluster, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, s.db)
  2263  	if err != nil {
  2264  		log.WithFields(applog.GetAppLogFields(app)).
  2265  			WithFields(map[string]any{
  2266  				"destination": app.Spec.Destination,
  2267  			}).Warnf("cannot validate cluster, error=%v", err.Error())
  2268  		return nil, nil, nil
  2269  	}
  2270  
  2271  	permitted, err := proj.IsDestinationPermitted(destCluster, app.Spec.Destination.Namespace, getProjectClusters)
  2272  	if err != nil {
  2273  		return nil, nil, err
  2274  	}
  2275  	if !permitted {
  2276  		return nil, nil, errors.New("error getting destination cluster")
  2277  	}
  2278  	// sanitize cluster, remove cluster config creds and other unwanted fields
  2279  	cluster, err = deeplinks.SanitizeCluster(destCluster)
  2280  	return cluster, project, err
  2281  }
  2282  
  2283  func (s *Server) ListResourceLinks(ctx context.Context, req *application.ApplicationResourceRequest) (*application.LinksResponse, error) {
  2284  	obj, _, app, _, err := s.getUnstructuredLiveResourceOrApp(ctx, rbac.ActionGet, req)
  2285  	if err != nil {
  2286  		return nil, err
  2287  	}
  2288  	deepLinks, err := s.settingsMgr.GetDeepLinks(settings.ResourceDeepLinks)
  2289  	if err != nil {
  2290  		return nil, fmt.Errorf("failed to read application deep links from configmap: %w", err)
  2291  	}
  2292  
  2293  	obj, err = s.replaceSecretValues(obj)
  2294  	if err != nil {
  2295  		return nil, fmt.Errorf("error replacing secret values: %w", err)
  2296  	}
  2297  
  2298  	appObj, err := kube.ToUnstructured(app)
  2299  	if err != nil {
  2300  		return nil, err
  2301  	}
  2302  
  2303  	proj, err := s.getAppProject(ctx, app, log.WithFields(applog.GetAppLogFields(app)))
  2304  	if err != nil {
  2305  		return nil, err
  2306  	}
  2307  
  2308  	clstObj, projObj, err := s.getObjectsForDeepLinks(ctx, app, proj)
  2309  	if err != nil {
  2310  		return nil, err
  2311  	}
  2312  
  2313  	deepLinksObject := deeplinks.CreateDeepLinksObject(obj, appObj, clstObj, projObj)
  2314  	finalList, errorList := deeplinks.EvaluateDeepLinksResponse(deepLinksObject, obj.GetName(), deepLinks)
  2315  	if len(errorList) > 0 {
  2316  		log.Errorf("errors while evaluating resource deep links, %v", strings.Join(errorList, ", "))
  2317  	}
  2318  
  2319  	return finalList, nil
  2320  }
  2321  
  2322  func getAmbiguousRevision(app *v1alpha1.Application, syncReq *application.ApplicationSyncRequest, sourceIndex int) string {
  2323  	ambiguousRevision := ""
  2324  	if app.Spec.HasMultipleSources() {
  2325  		for i, pos := range syncReq.SourcePositions {
  2326  			if pos == int64(sourceIndex+1) {
  2327  				ambiguousRevision = syncReq.Revisions[i]
  2328  			}
  2329  		}
  2330  		if ambiguousRevision == "" {
  2331  			ambiguousRevision = app.Spec.Sources[sourceIndex].TargetRevision
  2332  		}
  2333  	} else {
  2334  		ambiguousRevision = syncReq.GetRevision()
  2335  		if ambiguousRevision == "" {
  2336  			ambiguousRevision = app.Spec.GetSource().TargetRevision
  2337  		}
  2338  	}
  2339  	return ambiguousRevision
  2340  }
  2341  
  2342  // resolveRevision resolves the revision specified either in the sync request, or the
  2343  // application source, into a concrete revision that will be used for a sync operation.
  2344  func (s *Server) resolveRevision(ctx context.Context, app *v1alpha1.Application, syncReq *application.ApplicationSyncRequest, sourceIndex int) (string, string, error) {
  2345  	if syncReq.Manifests != nil {
  2346  		return "", "", nil
  2347  	}
  2348  
  2349  	ambiguousRevision := getAmbiguousRevision(app, syncReq, sourceIndex)
  2350  
  2351  	repoURL := app.Spec.GetSource().RepoURL
  2352  	if app.Spec.HasMultipleSources() {
  2353  		repoURL = app.Spec.Sources[sourceIndex].RepoURL
  2354  	}
  2355  
  2356  	repo, err := s.db.GetRepository(ctx, repoURL, app.Spec.Project)
  2357  	if err != nil {
  2358  		return "", "", fmt.Errorf("error getting repository by URL: %w", err)
  2359  	}
  2360  	conn, repoClient, err := s.repoClientset.NewRepoServerClient()
  2361  	if err != nil {
  2362  		return "", "", fmt.Errorf("error getting repo server client: %w", err)
  2363  	}
  2364  	defer utilio.Close(conn)
  2365  
  2366  	source := app.Spec.GetSourcePtrByIndex(sourceIndex)
  2367  	if !source.IsHelm() {
  2368  		if git.IsCommitSHA(ambiguousRevision) {
  2369  			// If it's already a commit SHA, then no need to look it up
  2370  			return ambiguousRevision, ambiguousRevision, nil
  2371  		}
  2372  	}
  2373  
  2374  	resolveRevisionResponse, err := repoClient.ResolveRevision(ctx, &apiclient.ResolveRevisionRequest{
  2375  		Repo:              repo,
  2376  		App:               app,
  2377  		AmbiguousRevision: ambiguousRevision,
  2378  		SourceIndex:       int64(sourceIndex),
  2379  	})
  2380  	if err != nil {
  2381  		return "", "", fmt.Errorf("error resolving repo revision: %w", err)
  2382  	}
  2383  	return resolveRevisionResponse.Revision, resolveRevisionResponse.AmbiguousRevision, nil
  2384  }
  2385  
  2386  func (s *Server) TerminateOperation(ctx context.Context, termOpReq *application.OperationTerminateRequest) (*application.OperationTerminateResponse, error) {
  2387  	appName := termOpReq.GetName()
  2388  	appNs := s.appNamespaceOrDefault(termOpReq.GetAppNamespace())
  2389  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionSync, termOpReq.GetProject(), appNs, appName, "")
  2390  	if err != nil {
  2391  		return nil, err
  2392  	}
  2393  
  2394  	for i := 0; i < 10; i++ {
  2395  		if a.Operation == nil || a.Status.OperationState == nil {
  2396  			return nil, status.Errorf(codes.InvalidArgument, "Unable to terminate operation. No operation is in progress")
  2397  		}
  2398  		a.Status.OperationState.Phase = common.OperationTerminating
  2399  		updated, err := s.appclientset.ArgoprojV1alpha1().Applications(appNs).Update(ctx, a, metav1.UpdateOptions{})
  2400  		if err == nil {
  2401  			s.waitSync(updated)
  2402  			s.logAppEvent(ctx, a, argo.EventReasonResourceUpdated, "terminated running operation")
  2403  			return &application.OperationTerminateResponse{}, nil
  2404  		}
  2405  		if !apierrors.IsConflict(err) {
  2406  			return nil, fmt.Errorf("error updating application: %w", err)
  2407  		}
  2408  		log.Warnf("failed to set operation for app %q due to update conflict. retrying again...", *termOpReq.Name)
  2409  		time.Sleep(100 * time.Millisecond)
  2410  		_, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
  2411  		if err != nil {
  2412  			return nil, fmt.Errorf("error getting application by name: %w", err)
  2413  		}
  2414  	}
  2415  	return nil, status.Errorf(codes.Internal, "Failed to terminate app. Too many conflicts")
  2416  }
  2417  
  2418  func (s *Server) logAppEvent(ctx context.Context, a *v1alpha1.Application, reason string, action string) {
  2419  	eventInfo := argo.EventInfo{Type: corev1.EventTypeNormal, Reason: reason}
  2420  	user := session.Username(ctx)
  2421  	if user == "" {
  2422  		user = "Unknown user"
  2423  	}
  2424  	message := fmt.Sprintf("%s %s", user, action)
  2425  	eventLabels := argo.GetAppEventLabels(ctx, a, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), s.ns, s.settingsMgr, s.db)
  2426  	s.auditLogger.LogAppEvent(a, eventInfo, message, user, eventLabels)
  2427  }
  2428  
  2429  func (s *Server) logResourceEvent(ctx context.Context, res *v1alpha1.ResourceNode, reason string, action string) {
  2430  	eventInfo := argo.EventInfo{Type: corev1.EventTypeNormal, Reason: reason}
  2431  	user := session.Username(ctx)
  2432  	if user == "" {
  2433  		user = "Unknown user"
  2434  	}
  2435  	message := fmt.Sprintf("%s %s", user, action)
  2436  	s.auditLogger.LogResourceEvent(res, eventInfo, message, user)
  2437  }
  2438  
  2439  func (s *Server) ListResourceActions(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ResourceActionsListResponse, error) {
  2440  	obj, _, _, _, err := s.getUnstructuredLiveResourceOrApp(ctx, rbac.ActionGet, q)
  2441  	if err != nil {
  2442  		return nil, err
  2443  	}
  2444  	resourceOverrides, err := s.settingsMgr.GetResourceOverrides()
  2445  	if err != nil {
  2446  		return nil, fmt.Errorf("error getting resource overrides: %w", err)
  2447  	}
  2448  
  2449  	availableActions, err := s.getAvailableActions(resourceOverrides, obj)
  2450  	if err != nil {
  2451  		return nil, fmt.Errorf("error getting available actions: %w", err)
  2452  	}
  2453  	actionsPtr := []*v1alpha1.ResourceAction{}
  2454  	for i := range availableActions {
  2455  		actionsPtr = append(actionsPtr, &availableActions[i])
  2456  	}
  2457  
  2458  	return &application.ResourceActionsListResponse{Actions: actionsPtr}, nil
  2459  }
  2460  
  2461  func (s *Server) getUnstructuredLiveResourceOrApp(ctx context.Context, rbacRequest string, q *application.ApplicationResourceRequest) (obj *unstructured.Unstructured, res *v1alpha1.ResourceNode, app *v1alpha1.Application, config *rest.Config, err error) {
  2462  	if q.GetKind() == applicationType.ApplicationKind && q.GetGroup() == applicationType.Group && q.GetName() == q.GetResourceName() {
  2463  		app, _, err = s.getApplicationEnforceRBACInformer(ctx, rbacRequest, q.GetProject(), q.GetAppNamespace(), q.GetName())
  2464  		if err != nil {
  2465  			return nil, nil, nil, nil, err
  2466  		}
  2467  		err = s.enf.EnforceErr(ctx.Value("claims"), rbac.ResourceApplications, rbacRequest, app.RBACName(s.ns))
  2468  		if err != nil {
  2469  			return nil, nil, nil, nil, err
  2470  		}
  2471  		config, err = s.getApplicationClusterConfig(ctx, app)
  2472  		if err != nil {
  2473  			return nil, nil, nil, nil, fmt.Errorf("error getting application cluster config: %w", err)
  2474  		}
  2475  		obj, err = kube.ToUnstructured(app)
  2476  	} else {
  2477  		res, config, app, err = s.getAppLiveResource(ctx, rbacRequest, q)
  2478  		if err != nil {
  2479  			return nil, nil, nil, nil, err
  2480  		}
  2481  		obj, err = s.kubectl.GetResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace)
  2482  	}
  2483  	if err != nil {
  2484  		return nil, nil, nil, nil, fmt.Errorf("error getting resource: %w", err)
  2485  	}
  2486  	return
  2487  }
  2488  
  2489  func (s *Server) getAvailableActions(resourceOverrides map[string]v1alpha1.ResourceOverride, obj *unstructured.Unstructured) ([]v1alpha1.ResourceAction, error) {
  2490  	luaVM := lua.VM{
  2491  		ResourceOverrides: resourceOverrides,
  2492  	}
  2493  
  2494  	discoveryScripts, err := luaVM.GetResourceActionDiscovery(obj)
  2495  	if err != nil {
  2496  		return nil, fmt.Errorf("error getting Lua discovery script: %w", err)
  2497  	}
  2498  	if len(discoveryScripts) == 0 {
  2499  		return []v1alpha1.ResourceAction{}, nil
  2500  	}
  2501  	availableActions, err := luaVM.ExecuteResourceActionDiscovery(obj, discoveryScripts)
  2502  	if err != nil {
  2503  		return nil, fmt.Errorf("error executing Lua discovery script: %w", err)
  2504  	}
  2505  	return availableActions, nil
  2506  }
  2507  
  2508  // RunResourceAction runs a resource action on a live resource
  2509  //
  2510  // Deprecated: use RunResourceActionV2 instead. This version does not support resource action parameters but is
  2511  // maintained for backward compatibility. It will be removed in a future release.
  2512  func (s *Server) RunResourceAction(ctx context.Context, q *application.ResourceActionRunRequest) (*application.ApplicationResponse, error) {
  2513  	log.WithFields(log.Fields{
  2514  		"action":        q.Action,
  2515  		"application":   q.Name,
  2516  		"app-namespace": q.AppNamespace,
  2517  		"project":       q.Project,
  2518  		"user":          session.Username(ctx),
  2519  	}).Warn("RunResourceAction was called. RunResourceAction is deprecated and will be removed in a future release. Use RunResourceActionV2 instead.")
  2520  	qV2 := &application.ResourceActionRunRequestV2{
  2521  		Name:         q.Name,
  2522  		AppNamespace: q.AppNamespace,
  2523  		Namespace:    q.Namespace,
  2524  		ResourceName: q.ResourceName,
  2525  		Kind:         q.Kind,
  2526  		Version:      q.Version,
  2527  		Group:        q.Group,
  2528  		Action:       q.Action,
  2529  		Project:      q.Project,
  2530  	}
  2531  	return s.RunResourceActionV2(ctx, qV2)
  2532  }
  2533  
  2534  func (s *Server) RunResourceActionV2(ctx context.Context, q *application.ResourceActionRunRequestV2) (*application.ApplicationResponse, error) {
  2535  	resourceRequest := &application.ApplicationResourceRequest{
  2536  		Name:         q.Name,
  2537  		AppNamespace: q.AppNamespace,
  2538  		Namespace:    q.Namespace,
  2539  		ResourceName: q.ResourceName,
  2540  		Kind:         q.Kind,
  2541  		Version:      q.Version,
  2542  		Group:        q.Group,
  2543  		Project:      q.Project,
  2544  	}
  2545  	actionRequest := fmt.Sprintf("%s/%s/%s/%s", rbac.ActionAction, q.GetGroup(), q.GetKind(), q.GetAction())
  2546  	liveObj, res, a, config, err := s.getUnstructuredLiveResourceOrApp(ctx, actionRequest, resourceRequest)
  2547  	if err != nil {
  2548  		return nil, err
  2549  	}
  2550  
  2551  	liveObjBytes, err := json.Marshal(liveObj)
  2552  	if err != nil {
  2553  		return nil, fmt.Errorf("error marshaling live object: %w", err)
  2554  	}
  2555  
  2556  	resourceOverrides, err := s.settingsMgr.GetResourceOverrides()
  2557  	if err != nil {
  2558  		return nil, fmt.Errorf("error getting resource overrides: %w", err)
  2559  	}
  2560  
  2561  	luaVM := lua.VM{
  2562  		ResourceOverrides: resourceOverrides,
  2563  	}
  2564  	action, err := luaVM.GetResourceAction(liveObj, q.GetAction())
  2565  	if err != nil {
  2566  		return nil, fmt.Errorf("error getting Lua resource action: %w", err)
  2567  	}
  2568  
  2569  	newObjects, err := luaVM.ExecuteResourceAction(liveObj, action.ActionLua, q.GetResourceActionParameters())
  2570  	if err != nil {
  2571  		return nil, fmt.Errorf("error executing Lua resource action: %w", err)
  2572  	}
  2573  
  2574  	var app *v1alpha1.Application
  2575  	// Only bother getting the app if we know we're going to need it for a resource permission check.
  2576  	if len(newObjects) > 0 {
  2577  		// No need for an RBAC check, we checked above that the user is allowed to run this action.
  2578  		app, err = s.appLister.Applications(s.appNamespaceOrDefault(q.GetAppNamespace())).Get(q.GetName())
  2579  		if err != nil {
  2580  			return nil, err
  2581  		}
  2582  	}
  2583  
  2584  	proj, err := s.getAppProject(ctx, a, log.WithFields(applog.GetAppLogFields(a)))
  2585  	if err != nil {
  2586  		return nil, err
  2587  	}
  2588  
  2589  	destCluster, err := argo.GetDestinationCluster(ctx, app.Spec.Destination, s.db)
  2590  	if err != nil {
  2591  		return nil, err
  2592  	}
  2593  
  2594  	// First, make sure all the returned resources are permitted, for each operation.
  2595  	// Also perform create with dry-runs for all create-operation resources.
  2596  	// This is performed separately to reduce the risk of only some of the resources being successfully created later.
  2597  	// TODO: when apply/delete operations would be supported for custom actions,
  2598  	// the dry-run for relevant apply/delete operation would have to be invoked as well.
  2599  	for _, impactedResource := range newObjects {
  2600  		newObj := impactedResource.UnstructuredObj
  2601  		err := s.verifyResourcePermitted(destCluster, proj, newObj)
  2602  		if err != nil {
  2603  			return nil, err
  2604  		}
  2605  		if impactedResource.K8SOperation == lua.CreateOperation {
  2606  			createOptions := metav1.CreateOptions{DryRun: []string{"All"}}
  2607  			_, err := s.kubectl.CreateResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), newObj, createOptions)
  2608  			if err != nil {
  2609  				return nil, err
  2610  			}
  2611  		}
  2612  	}
  2613  
  2614  	// Now, perform the actual operations.
  2615  	// The creation itself is not transactional.
  2616  	// TODO: maybe create a k8s list representation of the resources,
  2617  	// and invoke create on this list resource to make it semi-transactional (there is still patch operation that is separate,
  2618  	// thus can fail separately from create).
  2619  	for _, impactedResource := range newObjects {
  2620  		newObj := impactedResource.UnstructuredObj
  2621  		newObjBytes, err := json.Marshal(newObj)
  2622  		if err != nil {
  2623  			return nil, fmt.Errorf("error marshaling new object: %w", err)
  2624  		}
  2625  
  2626  		switch impactedResource.K8SOperation {
  2627  		// No default case since a not supported operation would have failed upon unmarshaling earlier
  2628  		case lua.PatchOperation:
  2629  			_, err := s.patchResource(ctx, config, liveObjBytes, newObjBytes, newObj)
  2630  			if err != nil {
  2631  				return nil, err
  2632  			}
  2633  		case lua.CreateOperation:
  2634  			_, err := s.createResource(ctx, config, newObj)
  2635  			if err != nil {
  2636  				return nil, err
  2637  			}
  2638  		}
  2639  	}
  2640  
  2641  	if res == nil {
  2642  		s.logAppEvent(ctx, a, argo.EventReasonResourceActionRan, "ran action "+q.GetAction())
  2643  	} else {
  2644  		s.logAppEvent(ctx, a, argo.EventReasonResourceActionRan, fmt.Sprintf("ran action %s on resource %s/%s/%s", q.GetAction(), res.Group, res.Kind, res.Name))
  2645  		s.logResourceEvent(ctx, res, argo.EventReasonResourceActionRan, "ran action "+q.GetAction())
  2646  	}
  2647  	return &application.ApplicationResponse{}, nil
  2648  }
  2649  
  2650  func (s *Server) patchResource(ctx context.Context, config *rest.Config, liveObjBytes, newObjBytes []byte, newObj *unstructured.Unstructured) (*application.ApplicationResponse, error) {
  2651  	diffBytes, err := jsonpatch.CreateMergePatch(liveObjBytes, newObjBytes)
  2652  	if err != nil {
  2653  		return nil, fmt.Errorf("error calculating merge patch: %w", err)
  2654  	}
  2655  	if string(diffBytes) == "{}" {
  2656  		return &application.ApplicationResponse{}, nil
  2657  	}
  2658  
  2659  	// The following logic detects if the resource action makes a modification to status and/or spec.
  2660  	// If status was modified, we attempt to patch the status using status subresource, in case the
  2661  	// CRD is configured using the status subresource feature. See:
  2662  	// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource
  2663  	// If status subresource is in use, the patch has to be split into two:
  2664  	// * one to update spec (and other non-status fields)
  2665  	// * the other to update only status.
  2666  	nonStatusPatch, statusPatch, err := splitStatusPatch(diffBytes)
  2667  	if err != nil {
  2668  		return nil, fmt.Errorf("error splitting status patch: %w", err)
  2669  	}
  2670  	if statusPatch != nil {
  2671  		_, err = s.kubectl.PatchResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), types.MergePatchType, diffBytes, "status")
  2672  		if err != nil {
  2673  			if !apierrors.IsNotFound(err) {
  2674  				return nil, fmt.Errorf("error patching resource: %w", err)
  2675  			}
  2676  			// K8s API server returns 404 NotFound when the CRD does not support the status subresource
  2677  			// if we get here, the CRD does not use the status subresource. We will fall back to a normal patch
  2678  		} else {
  2679  			// If we get here, the CRD does use the status subresource, so we must patch status and
  2680  			// spec separately. update the diffBytes to the spec-only patch and fall through.
  2681  			diffBytes = nonStatusPatch
  2682  		}
  2683  	}
  2684  	if diffBytes != nil {
  2685  		_, err = s.kubectl.PatchResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), types.MergePatchType, diffBytes)
  2686  		if err != nil {
  2687  			return nil, fmt.Errorf("error patching resource: %w", err)
  2688  		}
  2689  	}
  2690  	return &application.ApplicationResponse{}, nil
  2691  }
  2692  
  2693  func (s *Server) verifyResourcePermitted(destCluster *v1alpha1.Cluster, proj *v1alpha1.AppProject, obj *unstructured.Unstructured) error {
  2694  	permitted, err := proj.IsResourcePermitted(schema.GroupKind{Group: obj.GroupVersionKind().Group, Kind: obj.GroupVersionKind().Kind}, obj.GetNamespace(), destCluster, func(project string) ([]*v1alpha1.Cluster, error) {
  2695  		clusters, err := s.db.GetProjectClusters(context.TODO(), project)
  2696  		if err != nil {
  2697  			return nil, fmt.Errorf("failed to get project clusters: %w", err)
  2698  		}
  2699  		return clusters, nil
  2700  	})
  2701  	if err != nil {
  2702  		return fmt.Errorf("error checking resource permissions: %w", err)
  2703  	}
  2704  	if !permitted {
  2705  		return fmt.Errorf("application is not permitted to manage %s/%s/%s in %s", obj.GroupVersionKind().Group, obj.GroupVersionKind().Kind, obj.GetName(), obj.GetNamespace())
  2706  	}
  2707  
  2708  	return nil
  2709  }
  2710  
  2711  func (s *Server) createResource(ctx context.Context, config *rest.Config, newObj *unstructured.Unstructured) (*application.ApplicationResponse, error) {
  2712  	_, err := s.kubectl.CreateResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), newObj, metav1.CreateOptions{})
  2713  	if err != nil {
  2714  		return nil, fmt.Errorf("error creating resource: %w", err)
  2715  	}
  2716  	return &application.ApplicationResponse{}, nil
  2717  }
  2718  
  2719  // splitStatusPatch splits a patch into two: one for a non-status patch, and the status-only patch.
  2720  // Returns nil for either if the patch doesn't have modifications to non-status, or status, respectively.
  2721  func splitStatusPatch(patch []byte) ([]byte, []byte, error) {
  2722  	var obj map[string]any
  2723  	err := json.Unmarshal(patch, &obj)
  2724  	if err != nil {
  2725  		return nil, nil, err
  2726  	}
  2727  	var nonStatusPatch, statusPatch []byte
  2728  	if statusVal, ok := obj["status"]; ok {
  2729  		// calculate the status-only patch
  2730  		statusObj := map[string]any{
  2731  			"status": statusVal,
  2732  		}
  2733  		statusPatch, err = json.Marshal(statusObj)
  2734  		if err != nil {
  2735  			return nil, nil, err
  2736  		}
  2737  		// remove status, and calculate the non-status patch
  2738  		delete(obj, "status")
  2739  		if len(obj) > 0 {
  2740  			nonStatusPatch, err = json.Marshal(obj)
  2741  			if err != nil {
  2742  				return nil, nil, err
  2743  			}
  2744  		}
  2745  	} else {
  2746  		// status was not modified in patch
  2747  		nonStatusPatch = patch
  2748  	}
  2749  	return nonStatusPatch, statusPatch, nil
  2750  }
  2751  
  2752  func (s *Server) GetApplicationSyncWindows(ctx context.Context, q *application.ApplicationSyncWindowsQuery) (*application.ApplicationSyncWindowsResponse, error) {
  2753  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName(), "")
  2754  	if err != nil {
  2755  		return nil, err
  2756  	}
  2757  
  2758  	windows := proj.Spec.SyncWindows.Matches(a)
  2759  	sync, err := windows.CanSync(true)
  2760  	if err != nil {
  2761  		return nil, fmt.Errorf("invalid sync windows: %w", err)
  2762  	}
  2763  
  2764  	activeWindows, err := windows.Active()
  2765  	if err != nil {
  2766  		return nil, fmt.Errorf("invalid sync windows: %w", err)
  2767  	}
  2768  	res := &application.ApplicationSyncWindowsResponse{
  2769  		ActiveWindows:   convertSyncWindows(activeWindows),
  2770  		AssignedWindows: convertSyncWindows(windows),
  2771  		CanSync:         &sync,
  2772  	}
  2773  
  2774  	return res, nil
  2775  }
  2776  
  2777  func (s *Server) inferResourcesStatusHealth(app *v1alpha1.Application) {
  2778  	if app.Status.ResourceHealthSource == v1alpha1.ResourceHealthLocationAppTree {
  2779  		tree := &v1alpha1.ApplicationTree{}
  2780  		if err := s.cache.GetAppResourcesTree(app.InstanceName(s.ns), tree); err == nil {
  2781  			healthByKey := map[kube.ResourceKey]*v1alpha1.HealthStatus{}
  2782  			for _, node := range tree.Nodes {
  2783  				if node.Health != nil {
  2784  					healthByKey[kube.NewResourceKey(node.Group, node.Kind, node.Namespace, node.Name)] = node.Health
  2785  				} else if node.ResourceVersion == "" && node.UID == "" && node.CreatedAt == nil {
  2786  					healthByKey[kube.NewResourceKey(node.Group, node.Kind, node.Namespace, node.Name)] = &v1alpha1.HealthStatus{
  2787  						Status:  health.HealthStatusMissing,
  2788  						Message: "Resource has not been created",
  2789  					}
  2790  				}
  2791  			}
  2792  			for i, res := range app.Status.Resources {
  2793  				res.Health = healthByKey[kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name)]
  2794  				app.Status.Resources[i] = res
  2795  			}
  2796  		}
  2797  	}
  2798  }
  2799  
  2800  func convertSyncWindows(w *v1alpha1.SyncWindows) []*application.ApplicationSyncWindow {
  2801  	if w != nil {
  2802  		var windows []*application.ApplicationSyncWindow
  2803  		for _, w := range *w {
  2804  			nw := &application.ApplicationSyncWindow{
  2805  				Kind:       &w.Kind,
  2806  				Schedule:   &w.Schedule,
  2807  				Duration:   &w.Duration,
  2808  				ManualSync: &w.ManualSync,
  2809  			}
  2810  			windows = append(windows, nw)
  2811  		}
  2812  		if len(windows) > 0 {
  2813  			return windows
  2814  		}
  2815  	}
  2816  	return nil
  2817  }
  2818  
  2819  func getPropagationPolicyFinalizer(policy string) string {
  2820  	switch strings.ToLower(policy) {
  2821  	case backgroundPropagationPolicy:
  2822  		return v1alpha1.BackgroundPropagationPolicyFinalizer
  2823  	case foregroundPropagationPolicy:
  2824  		return v1alpha1.ForegroundPropagationPolicyFinalizer
  2825  	case "":
  2826  		return v1alpha1.ResourcesFinalizerName
  2827  	default:
  2828  		return ""
  2829  	}
  2830  }
  2831  
  2832  func (s *Server) appNamespaceOrDefault(appNs string) string {
  2833  	if appNs == "" {
  2834  		return s.ns
  2835  	}
  2836  	return appNs
  2837  }
  2838  
  2839  func (s *Server) isNamespaceEnabled(namespace string) bool {
  2840  	return security.IsNamespaceEnabled(namespace, s.ns, s.enabledNamespaces)
  2841  }
  2842  
  2843  // getProjectsFromApplicationQuery gets the project names from a query. If the legacy "project" field was specified, use
  2844  // that. Otherwise, use the newer "projects" field.
  2845  func getProjectsFromApplicationQuery(q application.ApplicationQuery) []string {
  2846  	if q.Project != nil {
  2847  		return q.Project
  2848  	}
  2849  	return q.Projects
  2850  }
  2851  
  2852  // ServerSideDiff gets the destination cluster and creates a server-side dry run applier and performs the diff
  2853  // It returns the diff result in the form of a list of ResourceDiffs.
  2854  func (s *Server) ServerSideDiff(ctx context.Context, q *application.ApplicationServerSideDiffQuery) (*application.ApplicationServerSideDiffResponse, error) {
  2855  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbac.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetAppName())
  2856  	if err != nil {
  2857  		return nil, fmt.Errorf("error getting application: %w", err)
  2858  	}
  2859  
  2860  	argoSettings, err := s.settingsMgr.GetSettings()
  2861  	if err != nil {
  2862  		return nil, fmt.Errorf("error getting ArgoCD settings: %w", err)
  2863  	}
  2864  
  2865  	resourceOverrides, err := s.settingsMgr.GetResourceOverrides()
  2866  	if err != nil {
  2867  		return nil, fmt.Errorf("error getting resource overrides: %w", err)
  2868  	}
  2869  
  2870  	// Convert to map format expected by DiffConfigBuilder
  2871  	overrides := make(map[string]v1alpha1.ResourceOverride)
  2872  	for k, v := range resourceOverrides {
  2873  		overrides[k] = v
  2874  	}
  2875  
  2876  	// Get cluster connection for server-side dry run
  2877  	cluster, err := argo.GetDestinationCluster(ctx, a.Spec.Destination, s.db)
  2878  	if err != nil {
  2879  		return nil, fmt.Errorf("error getting destination cluster: %w", err)
  2880  	}
  2881  
  2882  	clusterConfig, err := cluster.RawRestConfig()
  2883  	if err != nil {
  2884  		return nil, fmt.Errorf("error getting cluster raw REST config: %w", err)
  2885  	}
  2886  
  2887  	// Create server-side diff dry run applier
  2888  	openAPISchema, gvkParser, err := s.kubectl.LoadOpenAPISchema(clusterConfig)
  2889  	if err != nil {
  2890  		return nil, fmt.Errorf("failed to get OpenAPI schema: %w", err)
  2891  	}
  2892  
  2893  	applier, cleanup, err := kubeutil.ManageServerSideDiffDryRuns(clusterConfig, openAPISchema, func(_ string) (kube.CleanupFunc, error) {
  2894  		return func() {}, nil
  2895  	})
  2896  	if err != nil {
  2897  		return nil, fmt.Errorf("error creating server-side dry run applier: %w", err)
  2898  	}
  2899  	defer cleanup()
  2900  
  2901  	dryRunner := diff.NewK8sServerSideDryRunner(applier)
  2902  
  2903  	appLabelKey, err := s.settingsMgr.GetAppInstanceLabelKey()
  2904  	if err != nil {
  2905  		return nil, fmt.Errorf("error getting app instance label key: %w", err)
  2906  	}
  2907  
  2908  	// Build diff config like the CLI does, but with server-side diff enabled
  2909  	ignoreAggregatedRoles := false
  2910  	diffConfig, err := argodiff.NewDiffConfigBuilder().
  2911  		WithDiffSettings(a.Spec.IgnoreDifferences, overrides, ignoreAggregatedRoles, normalizers.IgnoreNormalizerOpts{}).
  2912  		WithTracking(appLabelKey, argoSettings.TrackingMethod).
  2913  		WithNoCache().
  2914  		WithManager(argocommon.ArgoCDSSAManager).
  2915  		WithServerSideDiff(true).
  2916  		WithServerSideDryRunner(dryRunner).
  2917  		WithGVKParser(gvkParser).
  2918  		WithIgnoreMutationWebhook(!resourceutil.HasAnnotationOption(a, argocommon.AnnotationCompareOptions, "IncludeMutationWebhook=true")).
  2919  		Build()
  2920  	if err != nil {
  2921  		return nil, fmt.Errorf("error building diff config: %w", err)
  2922  	}
  2923  
  2924  	// Convert live resources to unstructured objects
  2925  	liveObjs := make([]*unstructured.Unstructured, 0, len(q.GetLiveResources()))
  2926  	for _, liveResource := range q.GetLiveResources() {
  2927  		if liveResource.LiveState != "" && liveResource.LiveState != "null" {
  2928  			liveObj := &unstructured.Unstructured{}
  2929  			err := json.Unmarshal([]byte(liveResource.LiveState), liveObj)
  2930  			if err != nil {
  2931  				return nil, fmt.Errorf("error unmarshaling live state for %s/%s: %w", liveResource.Kind, liveResource.Name, err)
  2932  			}
  2933  			liveObjs = append(liveObjs, liveObj)
  2934  		} else {
  2935  			liveObjs = append(liveObjs, nil)
  2936  		}
  2937  	}
  2938  
  2939  	// Convert target manifests to unstructured objects
  2940  	targetObjs := make([]*unstructured.Unstructured, 0, len(q.GetTargetManifests()))
  2941  	for i, manifestStr := range q.GetTargetManifests() {
  2942  		obj, err := v1alpha1.UnmarshalToUnstructured(manifestStr)
  2943  		if err != nil {
  2944  			return nil, fmt.Errorf("error unmarshaling target manifest %d: %w", i, err)
  2945  		}
  2946  		targetObjs = append(targetObjs, obj)
  2947  	}
  2948  
  2949  	diffResults, err := argodiff.StateDiffs(liveObjs, targetObjs, diffConfig)
  2950  	if err != nil {
  2951  		return nil, fmt.Errorf("error performing state diffs: %w", err)
  2952  	}
  2953  
  2954  	// Convert StateDiffs results to ResourceDiff format for API response
  2955  	responseDiffs := make([]*v1alpha1.ResourceDiff, 0, len(diffResults.Diffs))
  2956  	modified := false
  2957  
  2958  	for i, diffRes := range diffResults.Diffs {
  2959  		if diffRes.Modified {
  2960  			modified = true
  2961  		}
  2962  
  2963  		// Extract resource metadata for the diff result. Resources should be pre-aligned by the CLI.
  2964  		var group, kind, namespace, name string
  2965  		var hook bool
  2966  		var resourceVersion string
  2967  
  2968  		// Extract resource metadata for the ResourceDiff response. The CLI sends aligned arrays
  2969  		// of live resources and target manifests, but individual resources may only exist in one
  2970  		// array depending on the operation
  2971  		switch {
  2972  		case i < len(q.GetLiveResources()):
  2973  			// A live resource exists at this index
  2974  			lr := q.GetLiveResources()[i]
  2975  			group = lr.Group
  2976  			kind = lr.Kind
  2977  			namespace = lr.Namespace
  2978  			name = lr.Name
  2979  			hook = lr.Hook
  2980  			resourceVersion = lr.ResourceVersion
  2981  		case i < len(targetObjs) && targetObjs[i] != nil:
  2982  			// A target resource exists at this index, but no live resource exists at this index
  2983  			obj := targetObjs[i]
  2984  			group = obj.GroupVersionKind().Group
  2985  			kind = obj.GroupVersionKind().Kind
  2986  			namespace = obj.GetNamespace()
  2987  			name = obj.GetName()
  2988  			hook = false
  2989  			resourceVersion = ""
  2990  		default:
  2991  			return nil, fmt.Errorf("diff result index %d out of bounds: live resources (%d), target objects (%d)",
  2992  				i, len(q.GetLiveResources()), len(targetObjs))
  2993  		}
  2994  
  2995  		// Create ResourceDiff with StateDiffs results
  2996  		// TargetState = PredictedLive (what the target should be after applying)
  2997  		// LiveState = NormalizedLive (current normalized live state)
  2998  		responseDiffs = append(responseDiffs, &v1alpha1.ResourceDiff{
  2999  			Group:           group,
  3000  			Kind:            kind,
  3001  			Namespace:       namespace,
  3002  			Name:            name,
  3003  			TargetState:     string(diffRes.PredictedLive),
  3004  			LiveState:       string(diffRes.NormalizedLive),
  3005  			Diff:            "", // Diff string is generated client-side
  3006  			Hook:            hook,
  3007  			Modified:        diffRes.Modified,
  3008  			ResourceVersion: resourceVersion,
  3009  		})
  3010  	}
  3011  
  3012  	log.Infof("ServerSideDiff completed with %d results, overall modified: %t", len(responseDiffs), modified)
  3013  
  3014  	return &application.ApplicationServerSideDiffResponse{
  3015  		Items:    responseDiffs,
  3016  		Modified: &modified,
  3017  	}, nil
  3018  }