github.com/argoproj/argo-cd/v2@v2.10.9/server/application/application.go (about)

     1  package application
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"errors"
     7  	"fmt"
     8  	"math"
     9  	"reflect"
    10  	"sort"
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  
    15  	kubecache "github.com/argoproj/gitops-engine/pkg/cache"
    16  	"github.com/argoproj/gitops-engine/pkg/diff"
    17  	"github.com/argoproj/gitops-engine/pkg/sync/common"
    18  	"github.com/argoproj/gitops-engine/pkg/utils/kube"
    19  	"github.com/argoproj/gitops-engine/pkg/utils/text"
    20  	"github.com/argoproj/pkg/sync"
    21  	jsonpatch "github.com/evanphx/json-patch"
    22  	log "github.com/sirupsen/logrus"
    23  	"google.golang.org/grpc/codes"
    24  	"google.golang.org/grpc/status"
    25  	v1 "k8s.io/api/core/v1"
    26  	apierr "k8s.io/apimachinery/pkg/api/errors"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    29  	"k8s.io/apimachinery/pkg/fields"
    30  	"k8s.io/apimachinery/pkg/labels"
    31  	"k8s.io/apimachinery/pkg/runtime/schema"
    32  	"k8s.io/apimachinery/pkg/types"
    33  	"k8s.io/apimachinery/pkg/watch"
    34  	"k8s.io/client-go/kubernetes"
    35  	"k8s.io/client-go/rest"
    36  	"k8s.io/client-go/tools/cache"
    37  	"k8s.io/utils/pointer"
    38  
    39  	argocommon "github.com/argoproj/argo-cd/v2/common"
    40  	"github.com/argoproj/argo-cd/v2/pkg/apiclient/application"
    41  	appv1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1"
    42  	appclientset "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned"
    43  	applisters "github.com/argoproj/argo-cd/v2/pkg/client/listers/application/v1alpha1"
    44  	"github.com/argoproj/argo-cd/v2/reposerver/apiclient"
    45  	servercache "github.com/argoproj/argo-cd/v2/server/cache"
    46  	"github.com/argoproj/argo-cd/v2/server/deeplinks"
    47  	"github.com/argoproj/argo-cd/v2/server/rbacpolicy"
    48  	"github.com/argoproj/argo-cd/v2/util/argo"
    49  	argoutil "github.com/argoproj/argo-cd/v2/util/argo"
    50  	"github.com/argoproj/argo-cd/v2/util/collections"
    51  	"github.com/argoproj/argo-cd/v2/util/db"
    52  	"github.com/argoproj/argo-cd/v2/util/env"
    53  	"github.com/argoproj/argo-cd/v2/util/git"
    54  	ioutil "github.com/argoproj/argo-cd/v2/util/io"
    55  	"github.com/argoproj/argo-cd/v2/util/lua"
    56  	"github.com/argoproj/argo-cd/v2/util/manifeststream"
    57  	"github.com/argoproj/argo-cd/v2/util/rbac"
    58  	"github.com/argoproj/argo-cd/v2/util/security"
    59  	"github.com/argoproj/argo-cd/v2/util/session"
    60  	"github.com/argoproj/argo-cd/v2/util/settings"
    61  
    62  	applicationType "github.com/argoproj/argo-cd/v2/pkg/apis/application"
    63  )
    64  
    65  type AppResourceTreeFn func(ctx context.Context, app *appv1.Application) (*appv1.ApplicationTree, error)
    66  
    67  const (
    68  	maxPodLogsToRender                 = 10
    69  	backgroundPropagationPolicy string = "background"
    70  	foregroundPropagationPolicy string = "foreground"
    71  )
    72  
    73  var (
    74  	watchAPIBufferSize  = env.ParseNumFromEnv(argocommon.EnvWatchAPIBufferSize, 1000, 0, math.MaxInt32)
    75  	permissionDeniedErr = status.Error(codes.PermissionDenied, "permission denied")
    76  )
    77  
    78  // Server provides an Application service
    79  type Server struct {
    80  	ns                string
    81  	kubeclientset     kubernetes.Interface
    82  	appclientset      appclientset.Interface
    83  	appLister         applisters.ApplicationLister
    84  	appInformer       cache.SharedIndexInformer
    85  	appBroadcaster    Broadcaster
    86  	repoClientset     apiclient.Clientset
    87  	kubectl           kube.Kubectl
    88  	db                db.ArgoDB
    89  	enf               *rbac.Enforcer
    90  	projectLock       sync.KeyLock
    91  	auditLogger       *argo.AuditLogger
    92  	settingsMgr       *settings.SettingsManager
    93  	cache             *servercache.Cache
    94  	projInformer      cache.SharedIndexInformer
    95  	enabledNamespaces []string
    96  }
    97  
    98  // NewServer returns a new instance of the Application service
    99  func NewServer(
   100  	namespace string,
   101  	kubeclientset kubernetes.Interface,
   102  	appclientset appclientset.Interface,
   103  	appLister applisters.ApplicationLister,
   104  	appInformer cache.SharedIndexInformer,
   105  	appBroadcaster Broadcaster,
   106  	repoClientset apiclient.Clientset,
   107  	cache *servercache.Cache,
   108  	kubectl kube.Kubectl,
   109  	db db.ArgoDB,
   110  	enf *rbac.Enforcer,
   111  	projectLock sync.KeyLock,
   112  	settingsMgr *settings.SettingsManager,
   113  	projInformer cache.SharedIndexInformer,
   114  	enabledNamespaces []string,
   115  ) (application.ApplicationServiceServer, AppResourceTreeFn) {
   116  	if appBroadcaster == nil {
   117  		appBroadcaster = &broadcasterHandler{}
   118  	}
   119  	_, err := appInformer.AddEventHandler(appBroadcaster)
   120  	if err != nil {
   121  		log.Error(err)
   122  	}
   123  	s := &Server{
   124  		ns:                namespace,
   125  		appclientset:      appclientset,
   126  		appLister:         appLister,
   127  		appInformer:       appInformer,
   128  		appBroadcaster:    appBroadcaster,
   129  		kubeclientset:     kubeclientset,
   130  		cache:             cache,
   131  		db:                db,
   132  		repoClientset:     repoClientset,
   133  		kubectl:           kubectl,
   134  		enf:               enf,
   135  		projectLock:       projectLock,
   136  		auditLogger:       argo.NewAuditLogger(namespace, kubeclientset, "argocd-server"),
   137  		settingsMgr:       settingsMgr,
   138  		projInformer:      projInformer,
   139  		enabledNamespaces: enabledNamespaces,
   140  	}
   141  	return s, s.getAppResources
   142  }
   143  
   144  // getAppEnforceRBAC gets the Application with the given name in the given namespace. If no namespace is
   145  // specified, the Application is fetched from the default namespace (the one in which the API server is running).
   146  //
   147  // If the user does not provide a "project," then we have to be very careful how we respond. If an app with the given
   148  // name exists, and the user has access to that app in the app's project, we return the app. If the app exists but the
   149  // user does not have access, we return "permission denied." If the app does not exist, we return "permission denied" -
   150  // if we responded with a 404, then the user could infer that the app exists when they get "permission denied."
   151  //
   152  // If the user does provide a "project," we can respond more specifically. If the user does not have access to the given
   153  // app name in the given project, we return "permission denied." If the app exists, but the project is different from
   154  func (s *Server) getAppEnforceRBAC(ctx context.Context, action, project, namespace, name string, getApp func() (*appv1.Application, error)) (*appv1.Application, *appv1.AppProject, error) {
   155  	user := session.Username(ctx)
   156  	if user == "" {
   157  		user = "Unknown user"
   158  	}
   159  	logCtx := log.WithFields(map[string]interface{}{
   160  		"user":        user,
   161  		"application": name,
   162  		"namespace":   namespace,
   163  	})
   164  	if project != "" {
   165  		// The user has provided everything we need to perform an initial RBAC check.
   166  		givenRBACName := security.RBACName(s.ns, project, namespace, name)
   167  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, action, givenRBACName); err != nil {
   168  			logCtx.WithFields(map[string]interface{}{
   169  				"project":                project,
   170  				argocommon.SecurityField: argocommon.SecurityMedium,
   171  			}).Warnf("user tried to %s application which they do not have access to: %s", action, err)
   172  			// Do a GET on the app. This ensures that the timing of a "no access" response is the same as a "yes access,
   173  			// but the app is in a different project" response. We don't want the user inferring the existence of the
   174  			// app from response time.
   175  			_, _ = getApp()
   176  			return nil, nil, permissionDeniedErr
   177  		}
   178  	}
   179  	a, err := getApp()
   180  	if err != nil {
   181  		if apierr.IsNotFound(err) {
   182  			if project != "" {
   183  				// We know that the user was allowed to get the Application, but the Application does not exist. Return 404.
   184  				return nil, nil, status.Errorf(codes.NotFound, apierr.NewNotFound(schema.GroupResource{Group: "argoproj.io", Resource: "applications"}, name).Error())
   185  			}
   186  			// We don't know if the user was allowed to get the Application, and we don't want to leak information about
   187  			// the Application's existence. Return 403.
   188  			logCtx.Warn("application does not exist")
   189  			return nil, nil, permissionDeniedErr
   190  		}
   191  		logCtx.Errorf("failed to get application: %s", err)
   192  		return nil, nil, permissionDeniedErr
   193  	}
   194  	// Even if we performed an initial RBAC check (because the request was fully parameterized), we still need to
   195  	// perform a second RBAC check to ensure that the user has access to the actual Application's project (not just the
   196  	// project they specified in the request).
   197  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, action, a.RBACName(s.ns)); err != nil {
   198  		logCtx.WithFields(map[string]interface{}{
   199  			"project":                a.Spec.Project,
   200  			argocommon.SecurityField: argocommon.SecurityMedium,
   201  		}).Warnf("user tried to %s application which they do not have access to: %s", action, err)
   202  		if project != "" {
   203  			// The user specified a project. We would have returned a 404 if the user had access to the app, but the app
   204  			// did not exist. So we have to return a 404 when the app does exist, but the user does not have access.
   205  			// Otherwise, they could infer that the app exists based on the error code.
   206  			return nil, nil, status.Errorf(codes.NotFound, apierr.NewNotFound(schema.GroupResource{Group: "argoproj.io", Resource: "applications"}, name).Error())
   207  		}
   208  		// The user didn't specify a project. We always return permission denied for both lack of access and lack of
   209  		// existence.
   210  		return nil, nil, permissionDeniedErr
   211  	}
   212  	effectiveProject := "default"
   213  	if a.Spec.Project != "" {
   214  		effectiveProject = a.Spec.Project
   215  	}
   216  	if project != "" && effectiveProject != project {
   217  		logCtx.WithFields(map[string]interface{}{
   218  			"project":                a.Spec.Project,
   219  			argocommon.SecurityField: argocommon.SecurityMedium,
   220  		}).Warnf("user tried to %s application in project %s, but the application is in project %s", action, project, effectiveProject)
   221  		// The user has access to the app, but the app is in a different project. Return 404, meaning "app doesn't
   222  		// exist in that project".
   223  		return nil, nil, status.Errorf(codes.NotFound, apierr.NewNotFound(schema.GroupResource{Group: "argoproj.io", Resource: "applications"}, name).Error())
   224  	}
   225  	// Get the app's associated project, and make sure all project restrictions are enforced.
   226  	proj, err := s.getAppProject(ctx, a, logCtx)
   227  	if err != nil {
   228  		return a, nil, err
   229  	}
   230  	return a, proj, nil
   231  }
   232  
   233  // getApplicationEnforceRBACInformer uses an informer to get an Application. If the app does not exist, permission is
   234  // denied, or any other error occurs when getting the app, we return a permission denied error to obscure any sensitive
   235  // information.
   236  func (s *Server) getApplicationEnforceRBACInformer(ctx context.Context, action, project, namespace, name string) (*appv1.Application, *appv1.AppProject, error) {
   237  	namespaceOrDefault := s.appNamespaceOrDefault(namespace)
   238  	return s.getAppEnforceRBAC(ctx, action, project, namespaceOrDefault, name, func() (*appv1.Application, error) {
   239  		return s.appLister.Applications(namespaceOrDefault).Get(name)
   240  	})
   241  }
   242  
   243  // getApplicationEnforceRBACClient uses a client to get an Application. If the app does not exist, permission is denied,
   244  // or any other error occurs when getting the app, we return a permission denied error to obscure any sensitive
   245  // information.
   246  func (s *Server) getApplicationEnforceRBACClient(ctx context.Context, action, project, namespace, name, resourceVersion string) (*appv1.Application, *appv1.AppProject, error) {
   247  	namespaceOrDefault := s.appNamespaceOrDefault(namespace)
   248  	return s.getAppEnforceRBAC(ctx, action, project, namespaceOrDefault, name, func() (*appv1.Application, error) {
   249  		if !s.isNamespaceEnabled(namespaceOrDefault) {
   250  			return nil, security.NamespaceNotPermittedError(namespaceOrDefault)
   251  		}
   252  		return s.appclientset.ArgoprojV1alpha1().Applications(namespaceOrDefault).Get(ctx, name, metav1.GetOptions{
   253  			ResourceVersion: resourceVersion,
   254  		})
   255  	})
   256  }
   257  
   258  // List returns list of applications
   259  func (s *Server) List(ctx context.Context, q *application.ApplicationQuery) (*appv1.ApplicationList, error) {
   260  	selector, err := labels.Parse(q.GetSelector())
   261  	if err != nil {
   262  		return nil, fmt.Errorf("error parsing the selector: %w", err)
   263  	}
   264  	var apps []*appv1.Application
   265  	if q.GetAppNamespace() == "" {
   266  		apps, err = s.appLister.List(selector)
   267  	} else {
   268  		apps, err = s.appLister.Applications(q.GetAppNamespace()).List(selector)
   269  	}
   270  	if err != nil {
   271  		return nil, fmt.Errorf("error listing apps with selectors: %w", err)
   272  	}
   273  
   274  	filteredApps := apps
   275  	// Filter applications by name
   276  	if q.Name != nil {
   277  		filteredApps = argoutil.FilterByNameP(filteredApps, *q.Name)
   278  	}
   279  
   280  	// Filter applications by projects
   281  	filteredApps = argoutil.FilterByProjectsP(filteredApps, getProjectsFromApplicationQuery(*q))
   282  
   283  	// Filter applications by source repo URL
   284  	filteredApps = argoutil.FilterByRepoP(filteredApps, q.GetRepo())
   285  
   286  	newItems := make([]appv1.Application, 0)
   287  	for _, a := range filteredApps {
   288  		// Skip any application that is neither in the control plane's namespace
   289  		// nor in the list of enabled namespaces.
   290  		if !s.isNamespaceEnabled(a.Namespace) {
   291  			continue
   292  		}
   293  		if s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, a.RBACName(s.ns)) {
   294  			newItems = append(newItems, *a)
   295  		}
   296  	}
   297  
   298  	// Sort found applications by name
   299  	sort.Slice(newItems, func(i, j int) bool {
   300  		return newItems[i].Name < newItems[j].Name
   301  	})
   302  
   303  	appList := appv1.ApplicationList{
   304  		ListMeta: metav1.ListMeta{
   305  			ResourceVersion: s.appInformer.LastSyncResourceVersion(),
   306  		},
   307  		Items: newItems,
   308  	}
   309  	return &appList, nil
   310  }
   311  
   312  // Create creates an application
   313  func (s *Server) Create(ctx context.Context, q *application.ApplicationCreateRequest) (*appv1.Application, error) {
   314  	if q.GetApplication() == nil {
   315  		return nil, fmt.Errorf("error creating application: application is nil in request")
   316  	}
   317  	a := q.GetApplication()
   318  
   319  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionCreate, a.RBACName(s.ns)); err != nil {
   320  		return nil, err
   321  	}
   322  
   323  	s.projectLock.RLock(a.Spec.GetProject())
   324  	defer s.projectLock.RUnlock(a.Spec.GetProject())
   325  
   326  	validate := true
   327  	if q.Validate != nil {
   328  		validate = *q.Validate
   329  	}
   330  
   331  	proj, err := s.getAppProject(ctx, a, log.WithField("application", a.Name))
   332  	if err != nil {
   333  		return nil, err
   334  	}
   335  
   336  	err = s.validateAndNormalizeApp(ctx, a, proj, validate)
   337  	if err != nil {
   338  		return nil, fmt.Errorf("error while validating and normalizing app: %w", err)
   339  	}
   340  
   341  	appNs := s.appNamespaceOrDefault(a.Namespace)
   342  
   343  	if !s.isNamespaceEnabled(appNs) {
   344  		return nil, security.NamespaceNotPermittedError(appNs)
   345  	}
   346  
   347  	// Don't let the app creator set the operation explicitly. Those requests should always go through the Sync API.
   348  	if a.Operation != nil {
   349  		log.WithFields(log.Fields{
   350  			"application":            a.Name,
   351  			argocommon.SecurityField: argocommon.SecurityLow,
   352  		}).Warn("User attempted to set operation on application creation. This could have allowed them to bypass branch protection rules by setting manifests directly. Ignoring the set operation.")
   353  		a.Operation = nil
   354  	}
   355  
   356  	created, err := s.appclientset.ArgoprojV1alpha1().Applications(appNs).Create(ctx, a, metav1.CreateOptions{})
   357  	if err == nil {
   358  		s.logAppEvent(created, ctx, argo.EventReasonResourceCreated, "created application")
   359  		s.waitSync(created)
   360  		return created, nil
   361  	}
   362  	if !apierr.IsAlreadyExists(err) {
   363  		return nil, fmt.Errorf("error creating application: %w", err)
   364  	}
   365  
   366  	// act idempotent if existing spec matches new spec
   367  	existing, err := s.appLister.Applications(appNs).Get(a.Name)
   368  	if err != nil {
   369  		return nil, status.Errorf(codes.Internal, "unable to check existing application details (%s): %v", appNs, err)
   370  	}
   371  	equalSpecs := reflect.DeepEqual(existing.Spec, a.Spec) &&
   372  		reflect.DeepEqual(existing.Labels, a.Labels) &&
   373  		reflect.DeepEqual(existing.Annotations, a.Annotations) &&
   374  		reflect.DeepEqual(existing.Finalizers, a.Finalizers)
   375  
   376  	if equalSpecs {
   377  		return existing, nil
   378  	}
   379  	if q.Upsert == nil || !*q.Upsert {
   380  		return nil, status.Errorf(codes.InvalidArgument, "existing application spec is different, use upsert flag to force update")
   381  	}
   382  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, a.RBACName(s.ns)); err != nil {
   383  		return nil, err
   384  	}
   385  	updated, err := s.updateApp(existing, a, ctx, true)
   386  	if err != nil {
   387  		return nil, fmt.Errorf("error updating application: %w", err)
   388  	}
   389  	return updated, nil
   390  }
   391  
   392  func (s *Server) queryRepoServer(ctx context.Context, a *appv1.Application, proj *appv1.AppProject, action func(
   393  	client apiclient.RepoServerServiceClient,
   394  	repo *appv1.Repository,
   395  	helmRepos []*appv1.Repository,
   396  	helmCreds []*appv1.RepoCreds,
   397  	helmOptions *appv1.HelmOptions,
   398  	kustomizeOptions *appv1.KustomizeOptions,
   399  	enabledSourceTypes map[string]bool,
   400  ) error) error {
   401  
   402  	closer, client, err := s.repoClientset.NewRepoServerClient()
   403  	if err != nil {
   404  		return fmt.Errorf("error creating repo server client: %w", err)
   405  	}
   406  	defer ioutil.Close(closer)
   407  	repo, err := s.db.GetRepository(ctx, a.Spec.GetSource().RepoURL)
   408  	if err != nil {
   409  		return fmt.Errorf("error getting repository: %w", err)
   410  	}
   411  	kustomizeSettings, err := s.settingsMgr.GetKustomizeSettings()
   412  	if err != nil {
   413  		return fmt.Errorf("error getting kustomize settings: %w", err)
   414  	}
   415  	kustomizeOptions, err := kustomizeSettings.GetOptions(a.Spec.GetSource())
   416  	if err != nil {
   417  		return fmt.Errorf("error getting kustomize settings options: %w", err)
   418  	}
   419  
   420  	helmRepos, err := s.db.ListHelmRepositories(ctx)
   421  	if err != nil {
   422  		return fmt.Errorf("error listing helm repositories: %w", err)
   423  	}
   424  
   425  	permittedHelmRepos, err := argo.GetPermittedRepos(proj, helmRepos)
   426  	if err != nil {
   427  		return fmt.Errorf("error retrieving permitted repos: %w", err)
   428  	}
   429  	helmRepositoryCredentials, err := s.db.GetAllHelmRepositoryCredentials(ctx)
   430  	if err != nil {
   431  		return fmt.Errorf("error getting helm repository credentials: %w", err)
   432  	}
   433  	helmOptions, err := s.settingsMgr.GetHelmSettings()
   434  	if err != nil {
   435  		return fmt.Errorf("error getting helm settings: %w", err)
   436  	}
   437  	permittedHelmCredentials, err := argo.GetPermittedReposCredentials(proj, helmRepositoryCredentials)
   438  	if err != nil {
   439  		return fmt.Errorf("error getting permitted repos credentials: %w", err)
   440  	}
   441  	enabledSourceTypes, err := s.settingsMgr.GetEnabledSourceTypes()
   442  	if err != nil {
   443  		return fmt.Errorf("error getting settings enabled source types: %w", err)
   444  	}
   445  	return action(client, repo, permittedHelmRepos, permittedHelmCredentials, helmOptions, kustomizeOptions, enabledSourceTypes)
   446  }
   447  
   448  // GetManifests returns application manifests
   449  func (s *Server) GetManifests(ctx context.Context, q *application.ApplicationManifestQuery) (*apiclient.ManifestResponse, error) {
   450  	if q.Name == nil || *q.Name == "" {
   451  		return nil, fmt.Errorf("invalid request: application name is missing")
   452  	}
   453  	a, proj, err := s.getApplicationEnforceRBACInformer(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
   454  	if err != nil {
   455  		return nil, err
   456  	}
   457  
   458  	source := a.Spec.GetSource()
   459  
   460  	if !s.isNamespaceEnabled(a.Namespace) {
   461  		return nil, security.NamespaceNotPermittedError(a.Namespace)
   462  	}
   463  
   464  	var manifestInfo *apiclient.ManifestResponse
   465  	err = s.queryRepoServer(ctx, a, proj, func(
   466  		client apiclient.RepoServerServiceClient, repo *appv1.Repository, helmRepos []*appv1.Repository, helmCreds []*appv1.RepoCreds, helmOptions *appv1.HelmOptions, kustomizeOptions *appv1.KustomizeOptions, enableGenerateManifests map[string]bool) error {
   467  		revision := source.TargetRevision
   468  		if q.GetRevision() != "" {
   469  			revision = q.GetRevision()
   470  		}
   471  		appInstanceLabelKey, err := s.settingsMgr.GetAppInstanceLabelKey()
   472  		if err != nil {
   473  			return fmt.Errorf("error getting app instance label key from settings: %w", err)
   474  		}
   475  
   476  		config, err := s.getApplicationClusterConfig(ctx, a)
   477  		if err != nil {
   478  			return fmt.Errorf("error getting application cluster config: %w", err)
   479  		}
   480  
   481  		serverVersion, err := s.kubectl.GetServerVersion(config)
   482  		if err != nil {
   483  			return fmt.Errorf("error getting server version: %w", err)
   484  		}
   485  
   486  		apiResources, err := s.kubectl.GetAPIResources(config, false, kubecache.NewNoopSettings())
   487  		if err != nil {
   488  			return fmt.Errorf("error getting API resources: %w", err)
   489  		}
   490  
   491  		manifestInfo, err = client.GenerateManifest(ctx, &apiclient.ManifestRequest{
   492  			Repo:               repo,
   493  			Revision:           revision,
   494  			AppLabelKey:        appInstanceLabelKey,
   495  			AppName:            a.InstanceName(s.ns),
   496  			Namespace:          a.Spec.Destination.Namespace,
   497  			ApplicationSource:  &source,
   498  			Repos:              helmRepos,
   499  			KustomizeOptions:   kustomizeOptions,
   500  			KubeVersion:        serverVersion,
   501  			ApiVersions:        argo.APIResourcesToStrings(apiResources, true),
   502  			HelmRepoCreds:      helmCreds,
   503  			HelmOptions:        helmOptions,
   504  			TrackingMethod:     string(argoutil.GetTrackingMethod(s.settingsMgr)),
   505  			EnabledSourceTypes: enableGenerateManifests,
   506  			ProjectName:        proj.Name,
   507  			ProjectSourceRepos: proj.Spec.SourceRepos,
   508  		})
   509  		if err != nil {
   510  			return fmt.Errorf("error generating manifests: %w", err)
   511  		}
   512  		return nil
   513  	})
   514  
   515  	if err != nil {
   516  		return nil, err
   517  	}
   518  
   519  	for i, manifest := range manifestInfo.Manifests {
   520  		obj := &unstructured.Unstructured{}
   521  		err = json.Unmarshal([]byte(manifest), obj)
   522  		if err != nil {
   523  			return nil, fmt.Errorf("error unmarshaling manifest into unstructured: %w", err)
   524  		}
   525  		if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" {
   526  			obj, _, err = diff.HideSecretData(obj, nil)
   527  			if err != nil {
   528  				return nil, fmt.Errorf("error hiding secret data: %w", err)
   529  			}
   530  			data, err := json.Marshal(obj)
   531  			if err != nil {
   532  				return nil, fmt.Errorf("error marshaling manifest: %w", err)
   533  			}
   534  			manifestInfo.Manifests[i] = string(data)
   535  		}
   536  	}
   537  
   538  	return manifestInfo, nil
   539  }
   540  
   541  func (s *Server) GetManifestsWithFiles(stream application.ApplicationService_GetManifestsWithFilesServer) error {
   542  	ctx := stream.Context()
   543  	query, err := manifeststream.ReceiveApplicationManifestQueryWithFiles(stream)
   544  
   545  	if err != nil {
   546  		return fmt.Errorf("error getting query: %w", err)
   547  	}
   548  
   549  	if query.Name == nil || *query.Name == "" {
   550  		return fmt.Errorf("invalid request: application name is missing")
   551  	}
   552  
   553  	a, proj, err := s.getApplicationEnforceRBACInformer(ctx, rbacpolicy.ActionGet, query.GetProject(), query.GetAppNamespace(), query.GetName())
   554  	if err != nil {
   555  		return err
   556  	}
   557  
   558  	var manifestInfo *apiclient.ManifestResponse
   559  	err = s.queryRepoServer(ctx, a, proj, func(
   560  		client apiclient.RepoServerServiceClient, repo *appv1.Repository, helmRepos []*appv1.Repository, helmCreds []*appv1.RepoCreds, helmOptions *appv1.HelmOptions, kustomizeOptions *appv1.KustomizeOptions, enableGenerateManifests map[string]bool) error {
   561  
   562  		appInstanceLabelKey, err := s.settingsMgr.GetAppInstanceLabelKey()
   563  		if err != nil {
   564  			return fmt.Errorf("error getting app instance label key from settings: %w", err)
   565  		}
   566  
   567  		config, err := s.getApplicationClusterConfig(ctx, a)
   568  		if err != nil {
   569  			return fmt.Errorf("error getting application cluster config: %w", err)
   570  		}
   571  
   572  		serverVersion, err := s.kubectl.GetServerVersion(config)
   573  		if err != nil {
   574  			return fmt.Errorf("error getting server version: %w", err)
   575  		}
   576  
   577  		apiResources, err := s.kubectl.GetAPIResources(config, false, kubecache.NewNoopSettings())
   578  		if err != nil {
   579  			return fmt.Errorf("error getting API resources: %w", err)
   580  		}
   581  
   582  		source := a.Spec.GetSource()
   583  
   584  		proj, err := argo.GetAppProject(a, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), s.ns, s.settingsMgr, s.db, ctx)
   585  		if err != nil {
   586  			return fmt.Errorf("error getting app project: %w", err)
   587  		}
   588  
   589  		req := &apiclient.ManifestRequest{
   590  			Repo:               repo,
   591  			Revision:           source.TargetRevision,
   592  			AppLabelKey:        appInstanceLabelKey,
   593  			AppName:            a.Name,
   594  			Namespace:          a.Spec.Destination.Namespace,
   595  			ApplicationSource:  &source,
   596  			Repos:              helmRepos,
   597  			KustomizeOptions:   kustomizeOptions,
   598  			KubeVersion:        serverVersion,
   599  			ApiVersions:        argo.APIResourcesToStrings(apiResources, true),
   600  			HelmRepoCreds:      helmCreds,
   601  			HelmOptions:        helmOptions,
   602  			TrackingMethod:     string(argoutil.GetTrackingMethod(s.settingsMgr)),
   603  			EnabledSourceTypes: enableGenerateManifests,
   604  			ProjectName:        proj.Name,
   605  			ProjectSourceRepos: proj.Spec.SourceRepos,
   606  		}
   607  
   608  		repoStreamClient, err := client.GenerateManifestWithFiles(stream.Context())
   609  		if err != nil {
   610  			return fmt.Errorf("error opening stream: %w", err)
   611  		}
   612  
   613  		err = manifeststream.SendRepoStream(repoStreamClient, stream, req, *query.Checksum)
   614  		if err != nil {
   615  			return fmt.Errorf("error sending repo stream: %w", err)
   616  		}
   617  
   618  		resp, err := repoStreamClient.CloseAndRecv()
   619  		if err != nil {
   620  			return fmt.Errorf("error generating manifests: %w", err)
   621  		}
   622  
   623  		manifestInfo = resp
   624  		return nil
   625  	})
   626  
   627  	if err != nil {
   628  		return err
   629  	}
   630  
   631  	for i, manifest := range manifestInfo.Manifests {
   632  		obj := &unstructured.Unstructured{}
   633  		err = json.Unmarshal([]byte(manifest), obj)
   634  		if err != nil {
   635  			return fmt.Errorf("error unmarshaling manifest into unstructured: %w", err)
   636  		}
   637  		if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" {
   638  			obj, _, err = diff.HideSecretData(obj, nil)
   639  			if err != nil {
   640  				return fmt.Errorf("error hiding secret data: %w", err)
   641  			}
   642  			data, err := json.Marshal(obj)
   643  			if err != nil {
   644  				return fmt.Errorf("error marshaling manifest: %w", err)
   645  			}
   646  			manifestInfo.Manifests[i] = string(data)
   647  		}
   648  	}
   649  
   650  	stream.SendAndClose(manifestInfo)
   651  	return nil
   652  }
   653  
   654  // Get returns an application by name
   655  func (s *Server) Get(ctx context.Context, q *application.ApplicationQuery) (*appv1.Application, error) {
   656  	appName := q.GetName()
   657  	appNs := s.appNamespaceOrDefault(q.GetAppNamespace())
   658  
   659  	project := ""
   660  	projects := getProjectsFromApplicationQuery(*q)
   661  	if len(projects) == 1 {
   662  		project = projects[0]
   663  	} else if len(projects) > 1 {
   664  		return nil, status.Errorf(codes.InvalidArgument, "multiple projects specified - the get endpoint accepts either zero or one project")
   665  	}
   666  
   667  	// We must use a client Get instead of an informer Get, because it's common to call Get immediately
   668  	// following a Watch (which is not yet powered by an informer), and the Get must reflect what was
   669  	// previously seen by the client.
   670  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionGet, project, appNs, appName, q.GetResourceVersion())
   671  	if err != nil {
   672  		return nil, err
   673  	}
   674  
   675  	s.inferResourcesStatusHealth(a)
   676  
   677  	if q.Refresh == nil {
   678  		return a, nil
   679  	}
   680  
   681  	refreshType := appv1.RefreshTypeNormal
   682  	if *q.Refresh == string(appv1.RefreshTypeHard) {
   683  		refreshType = appv1.RefreshTypeHard
   684  	}
   685  	appIf := s.appclientset.ArgoprojV1alpha1().Applications(appNs)
   686  
   687  	// subscribe early with buffered channel to ensure we don't miss events
   688  	events := make(chan *appv1.ApplicationWatchEvent, watchAPIBufferSize)
   689  	unsubscribe := s.appBroadcaster.Subscribe(events, func(event *appv1.ApplicationWatchEvent) bool {
   690  		return event.Application.Name == appName && event.Application.Namespace == appNs
   691  	})
   692  	defer unsubscribe()
   693  
   694  	app, err := argoutil.RefreshApp(appIf, appName, refreshType)
   695  	if err != nil {
   696  		return nil, fmt.Errorf("error refreshing the app: %w", err)
   697  	}
   698  
   699  	if refreshType == appv1.RefreshTypeHard {
   700  		// force refresh cached application details
   701  		if err := s.queryRepoServer(ctx, a, proj, func(
   702  			client apiclient.RepoServerServiceClient,
   703  			repo *appv1.Repository,
   704  			helmRepos []*appv1.Repository,
   705  			_ []*appv1.RepoCreds,
   706  			helmOptions *appv1.HelmOptions,
   707  			kustomizeOptions *appv1.KustomizeOptions,
   708  			enabledSourceTypes map[string]bool,
   709  		) error {
   710  			source := app.Spec.GetSource()
   711  			_, err := client.GetAppDetails(ctx, &apiclient.RepoServerAppDetailsQuery{
   712  				Repo:               repo,
   713  				Source:             &source,
   714  				AppName:            appName,
   715  				KustomizeOptions:   kustomizeOptions,
   716  				Repos:              helmRepos,
   717  				NoCache:            true,
   718  				TrackingMethod:     string(argoutil.GetTrackingMethod(s.settingsMgr)),
   719  				EnabledSourceTypes: enabledSourceTypes,
   720  				HelmOptions:        helmOptions,
   721  			})
   722  			return err
   723  		}); err != nil {
   724  			log.Warnf("Failed to force refresh application details: %v", err)
   725  		}
   726  	}
   727  
   728  	minVersion := 0
   729  	if minVersion, err = strconv.Atoi(app.ResourceVersion); err != nil {
   730  		minVersion = 0
   731  	}
   732  
   733  	for {
   734  		select {
   735  		case <-ctx.Done():
   736  			return nil, fmt.Errorf("application refresh deadline exceeded")
   737  		case event := <-events:
   738  			if appVersion, err := strconv.Atoi(event.Application.ResourceVersion); err == nil && appVersion > minVersion {
   739  				annotations := event.Application.GetAnnotations()
   740  				if annotations == nil {
   741  					annotations = make(map[string]string)
   742  				}
   743  				if _, ok := annotations[appv1.AnnotationKeyRefresh]; !ok {
   744  					return &event.Application, nil
   745  				}
   746  			}
   747  		}
   748  	}
   749  }
   750  
   751  // ListResourceEvents returns a list of event resources
   752  func (s *Server) ListResourceEvents(ctx context.Context, q *application.ApplicationResourceEventsQuery) (*v1.EventList, error) {
   753  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
   754  	if err != nil {
   755  		return nil, err
   756  	}
   757  
   758  	var (
   759  		kubeClientset kubernetes.Interface
   760  		fieldSelector string
   761  		namespace     string
   762  	)
   763  	// There are two places where we get events. If we are getting application events, we query
   764  	// our own cluster. If it is events on a resource on an external cluster, then we query the
   765  	// external cluster using its rest.Config
   766  	if q.GetResourceName() == "" && q.GetResourceUID() == "" {
   767  		kubeClientset = s.kubeclientset
   768  		namespace = a.Namespace
   769  		fieldSelector = fields.SelectorFromSet(map[string]string{
   770  			"involvedObject.name":      a.Name,
   771  			"involvedObject.uid":       string(a.UID),
   772  			"involvedObject.namespace": a.Namespace,
   773  		}).String()
   774  	} else {
   775  		tree, err := s.getAppResources(ctx, a)
   776  		if err != nil {
   777  			return nil, fmt.Errorf("error getting app resources: %w", err)
   778  		}
   779  		found := false
   780  		for _, n := range append(tree.Nodes, tree.OrphanedNodes...) {
   781  			if n.ResourceRef.UID == q.GetResourceUID() && n.ResourceRef.Name == q.GetResourceName() && n.ResourceRef.Namespace == q.GetResourceNamespace() {
   782  				found = true
   783  				break
   784  			}
   785  		}
   786  		if !found {
   787  			return nil, status.Errorf(codes.InvalidArgument, "%s not found as part of application %s", q.GetResourceName(), q.GetName())
   788  		}
   789  
   790  		namespace = q.GetResourceNamespace()
   791  		var config *rest.Config
   792  		config, err = s.getApplicationClusterConfig(ctx, a)
   793  		if err != nil {
   794  			return nil, fmt.Errorf("error getting application cluster config: %w", err)
   795  		}
   796  		kubeClientset, err = kubernetes.NewForConfig(config)
   797  		if err != nil {
   798  			return nil, fmt.Errorf("error creating kube client: %w", err)
   799  		}
   800  		fieldSelector = fields.SelectorFromSet(map[string]string{
   801  			"involvedObject.name":      q.GetResourceName(),
   802  			"involvedObject.uid":       q.GetResourceUID(),
   803  			"involvedObject.namespace": namespace,
   804  		}).String()
   805  	}
   806  	log.Infof("Querying for resource events with field selector: %s", fieldSelector)
   807  	opts := metav1.ListOptions{FieldSelector: fieldSelector}
   808  	list, err := kubeClientset.CoreV1().Events(namespace).List(ctx, opts)
   809  	if err != nil {
   810  		return nil, fmt.Errorf("error listing resource events: %w", err)
   811  	}
   812  	return list, nil
   813  }
   814  
   815  // validateAndUpdateApp validates and updates the application. currentProject is the name of the project the app
   816  // currently is under. If not specified, we assume that the app is under the project specified in the app spec.
   817  func (s *Server) validateAndUpdateApp(ctx context.Context, newApp *appv1.Application, merge bool, validate bool, action string, currentProject string) (*appv1.Application, error) {
   818  	s.projectLock.RLock(newApp.Spec.GetProject())
   819  	defer s.projectLock.RUnlock(newApp.Spec.GetProject())
   820  
   821  	app, proj, err := s.getApplicationEnforceRBACClient(ctx, action, currentProject, newApp.Namespace, newApp.Name, "")
   822  	if err != nil {
   823  		return nil, err
   824  	}
   825  
   826  	err = s.validateAndNormalizeApp(ctx, newApp, proj, validate)
   827  	if err != nil {
   828  		return nil, fmt.Errorf("error validating and normalizing app: %w", err)
   829  	}
   830  
   831  	a, err := s.updateApp(app, newApp, ctx, merge)
   832  	if err != nil {
   833  		return nil, fmt.Errorf("error updating application: %w", err)
   834  	}
   835  	return a, nil
   836  }
   837  
   838  var informerSyncTimeout = 2 * time.Second
   839  
   840  // waitSync is a helper to wait until the application informer cache is synced after create/update.
   841  // It waits until the app in the informer, has a resource version greater than the version in the
   842  // supplied app, or after 2 seconds, whichever comes first. Returns true if synced.
   843  // We use an informer cache for read operations (Get, List). Since the cache is only
   844  // eventually consistent, it is possible that it doesn't reflect an application change immediately
   845  // after a mutating API call (create/update). This function should be called after a creates &
   846  // update to give a probable (but not guaranteed) chance of being up-to-date after the create/update.
   847  func (s *Server) waitSync(app *appv1.Application) {
   848  	logCtx := log.WithField("application", app.Name)
   849  	deadline := time.Now().Add(informerSyncTimeout)
   850  	minVersion, err := strconv.Atoi(app.ResourceVersion)
   851  	if err != nil {
   852  		logCtx.Warnf("waitSync failed: could not parse resource version %s", app.ResourceVersion)
   853  		time.Sleep(50 * time.Millisecond) // sleep anyway
   854  		return
   855  	}
   856  	for {
   857  		if currApp, err := s.appLister.Applications(app.Namespace).Get(app.Name); err == nil {
   858  			currVersion, err := strconv.Atoi(currApp.ResourceVersion)
   859  			if err == nil && currVersion >= minVersion {
   860  				return
   861  			}
   862  		}
   863  		if time.Now().After(deadline) {
   864  			break
   865  		}
   866  		time.Sleep(20 * time.Millisecond)
   867  	}
   868  	logCtx.Warnf("waitSync failed: timed out")
   869  }
   870  
   871  func (s *Server) updateApp(app *appv1.Application, newApp *appv1.Application, ctx context.Context, merge bool) (*appv1.Application, error) {
   872  	for i := 0; i < 10; i++ {
   873  		app.Spec = newApp.Spec
   874  		if merge {
   875  			app.Labels = collections.MergeStringMaps(app.Labels, newApp.Labels)
   876  			app.Annotations = collections.MergeStringMaps(app.Annotations, newApp.Annotations)
   877  		} else {
   878  			app.Labels = newApp.Labels
   879  			app.Annotations = newApp.Annotations
   880  		}
   881  
   882  		app.Finalizers = newApp.Finalizers
   883  
   884  		res, err := s.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Update(ctx, app, metav1.UpdateOptions{})
   885  		if err == nil {
   886  			s.logAppEvent(app, ctx, argo.EventReasonResourceUpdated, "updated application spec")
   887  			s.waitSync(res)
   888  			return res, nil
   889  		}
   890  		if !apierr.IsConflict(err) {
   891  			return nil, err
   892  		}
   893  
   894  		app, err = s.appclientset.ArgoprojV1alpha1().Applications(app.Namespace).Get(ctx, newApp.Name, metav1.GetOptions{})
   895  		if err != nil {
   896  			return nil, fmt.Errorf("error getting application: %w", err)
   897  		}
   898  		s.inferResourcesStatusHealth(app)
   899  	}
   900  	return nil, status.Errorf(codes.Internal, "Failed to update application. Too many conflicts")
   901  }
   902  
   903  // Update updates an application
   904  func (s *Server) Update(ctx context.Context, q *application.ApplicationUpdateRequest) (*appv1.Application, error) {
   905  	if q.GetApplication() == nil {
   906  		return nil, fmt.Errorf("error updating application: application is nil in request")
   907  	}
   908  	a := q.GetApplication()
   909  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, a.RBACName(s.ns)); err != nil {
   910  		return nil, err
   911  	}
   912  
   913  	validate := true
   914  	if q.Validate != nil {
   915  		validate = *q.Validate
   916  	}
   917  	return s.validateAndUpdateApp(ctx, q.Application, false, validate, rbacpolicy.ActionUpdate, q.GetProject())
   918  }
   919  
   920  // UpdateSpec updates an application spec and filters out any invalid parameter overrides
   921  func (s *Server) UpdateSpec(ctx context.Context, q *application.ApplicationUpdateSpecRequest) (*appv1.ApplicationSpec, error) {
   922  	if q.GetSpec() == nil {
   923  		return nil, fmt.Errorf("error updating application spec: spec is nil in request")
   924  	}
   925  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionUpdate, q.GetProject(), q.GetAppNamespace(), q.GetName(), "")
   926  	if err != nil {
   927  		return nil, err
   928  	}
   929  
   930  	a.Spec = *q.GetSpec()
   931  	validate := true
   932  	if q.Validate != nil {
   933  		validate = *q.Validate
   934  	}
   935  	a, err = s.validateAndUpdateApp(ctx, a, false, validate, rbacpolicy.ActionUpdate, q.GetProject())
   936  	if err != nil {
   937  		return nil, fmt.Errorf("error validating and updating app: %w", err)
   938  	}
   939  	return &a.Spec, nil
   940  }
   941  
   942  // Patch patches an application
   943  func (s *Server) Patch(ctx context.Context, q *application.ApplicationPatchRequest) (*appv1.Application, error) {
   944  	app, _, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName(), "")
   945  	if err != nil {
   946  		return nil, err
   947  	}
   948  
   949  	if err = s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, app.RBACName(s.ns)); err != nil {
   950  		return nil, err
   951  	}
   952  
   953  	jsonApp, err := json.Marshal(app)
   954  	if err != nil {
   955  		return nil, fmt.Errorf("error marshaling application: %w", err)
   956  	}
   957  
   958  	var patchApp []byte
   959  
   960  	switch q.GetPatchType() {
   961  	case "json", "":
   962  		patch, err := jsonpatch.DecodePatch([]byte(q.GetPatch()))
   963  		if err != nil {
   964  			return nil, fmt.Errorf("error decoding json patch: %w", err)
   965  		}
   966  		patchApp, err = patch.Apply(jsonApp)
   967  		if err != nil {
   968  			return nil, fmt.Errorf("error applying patch: %w", err)
   969  		}
   970  	case "merge":
   971  		patchApp, err = jsonpatch.MergePatch(jsonApp, []byte(q.GetPatch()))
   972  		if err != nil {
   973  			return nil, fmt.Errorf("error calculating merge patch: %w", err)
   974  		}
   975  	default:
   976  		return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Patch type '%s' is not supported", q.GetPatchType()))
   977  	}
   978  
   979  	newApp := &appv1.Application{}
   980  	err = json.Unmarshal(patchApp, newApp)
   981  	if err != nil {
   982  		return nil, fmt.Errorf("error unmarshaling patched app: %w", err)
   983  	}
   984  	return s.validateAndUpdateApp(ctx, newApp, false, true, rbacpolicy.ActionUpdate, q.GetProject())
   985  }
   986  
   987  func (s *Server) getAppProject(ctx context.Context, a *appv1.Application, logCtx *log.Entry) (*appv1.AppProject, error) {
   988  	proj, err := argo.GetAppProject(a, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), s.ns, s.settingsMgr, s.db, ctx)
   989  	if err == nil {
   990  		return proj, nil
   991  	}
   992  
   993  	// If there's a permission issue or the app doesn't exist, return a vague error to avoid letting the user enumerate project names.
   994  	vagueError := status.Errorf(codes.InvalidArgument, "app is not allowed in project %q, or the project does not exist", a.Spec.Project)
   995  
   996  	if apierr.IsNotFound(err) {
   997  		return nil, vagueError
   998  	}
   999  
  1000  	if _, ok := err.(*appv1.ErrApplicationNotAllowedToUseProject); ok {
  1001  		logCtx.WithFields(map[string]interface{}{
  1002  			"project":                a.Spec.Project,
  1003  			argocommon.SecurityField: argocommon.SecurityMedium,
  1004  		}).Warnf("error getting app project: %s", err)
  1005  		return nil, vagueError
  1006  	}
  1007  
  1008  	return nil, vagueError
  1009  }
  1010  
  1011  // Delete removes an application and all associated resources
  1012  func (s *Server) Delete(ctx context.Context, q *application.ApplicationDeleteRequest) (*application.ApplicationResponse, error) {
  1013  	appName := q.GetName()
  1014  	appNs := s.appNamespaceOrDefault(q.GetAppNamespace())
  1015  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionGet, q.GetProject(), appNs, appName, "")
  1016  	if err != nil {
  1017  		return nil, err
  1018  	}
  1019  
  1020  	s.projectLock.RLock(a.Spec.Project)
  1021  	defer s.projectLock.RUnlock(a.Spec.Project)
  1022  
  1023  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionDelete, a.RBACName(s.ns)); err != nil {
  1024  		return nil, err
  1025  	}
  1026  
  1027  	if q.Cascade != nil && !*q.Cascade && q.GetPropagationPolicy() != "" {
  1028  		return nil, status.Error(codes.InvalidArgument, "cannot set propagation policy when cascading is disabled")
  1029  	}
  1030  
  1031  	patchFinalizer := false
  1032  	if q.Cascade == nil || *q.Cascade {
  1033  		// validate the propgation policy
  1034  		policyFinalizer := getPropagationPolicyFinalizer(q.GetPropagationPolicy())
  1035  		if policyFinalizer == "" {
  1036  			return nil, status.Errorf(codes.InvalidArgument, "invalid propagation policy: %s", *q.PropagationPolicy)
  1037  		}
  1038  		if !a.IsFinalizerPresent(policyFinalizer) {
  1039  			a.SetCascadedDeletion(policyFinalizer)
  1040  			patchFinalizer = true
  1041  		}
  1042  	} else {
  1043  		if a.CascadedDeletion() {
  1044  			a.UnSetCascadedDeletion()
  1045  			patchFinalizer = true
  1046  		}
  1047  	}
  1048  
  1049  	if patchFinalizer {
  1050  		// Although the cascaded deletion/propagation policy finalizer is not set when apps are created via
  1051  		// API, they will often be set by the user as part of declarative config. As part of a delete
  1052  		// request, we always calculate the patch to see if we need to set/unset the finalizer.
  1053  		patch, err := json.Marshal(map[string]interface{}{
  1054  			"metadata": map[string]interface{}{
  1055  				"finalizers": a.Finalizers,
  1056  			},
  1057  		})
  1058  		if err != nil {
  1059  			return nil, fmt.Errorf("error marshaling finalizers: %w", err)
  1060  		}
  1061  		_, err = s.appclientset.ArgoprojV1alpha1().Applications(a.Namespace).Patch(ctx, a.Name, types.MergePatchType, patch, metav1.PatchOptions{})
  1062  		if err != nil {
  1063  			return nil, fmt.Errorf("error patching application with finalizers: %w", err)
  1064  		}
  1065  	}
  1066  
  1067  	err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Delete(ctx, appName, metav1.DeleteOptions{})
  1068  	if err != nil {
  1069  		return nil, fmt.Errorf("error deleting application: %w", err)
  1070  	}
  1071  	s.logAppEvent(a, ctx, argo.EventReasonResourceDeleted, "deleted application")
  1072  	return &application.ApplicationResponse{}, nil
  1073  }
  1074  
  1075  func (s *Server) isApplicationPermitted(selector labels.Selector, minVersion int, claims any, appName, appNs string, projects map[string]bool, a appv1.Application) bool {
  1076  	if len(projects) > 0 && !projects[a.Spec.GetProject()] {
  1077  		return false
  1078  	}
  1079  
  1080  	if appVersion, err := strconv.Atoi(a.ResourceVersion); err == nil && appVersion < minVersion {
  1081  		return false
  1082  	}
  1083  	matchedEvent := (appName == "" || (a.Name == appName && a.Namespace == appNs)) && selector.Matches(labels.Set(a.Labels))
  1084  	if !matchedEvent {
  1085  		return false
  1086  	}
  1087  
  1088  	if !s.isNamespaceEnabled(a.Namespace) {
  1089  		return false
  1090  	}
  1091  
  1092  	if !s.enf.Enforce(claims, rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, a.RBACName(s.ns)) {
  1093  		// do not emit apps user does not have accessing
  1094  		return false
  1095  	}
  1096  
  1097  	return true
  1098  }
  1099  
  1100  func (s *Server) Watch(q *application.ApplicationQuery, ws application.ApplicationService_WatchServer) error {
  1101  	appName := q.GetName()
  1102  	appNs := s.appNamespaceOrDefault(q.GetAppNamespace())
  1103  	logCtx := log.NewEntry(log.New())
  1104  	if q.Name != nil {
  1105  		logCtx = logCtx.WithField("application", *q.Name)
  1106  	}
  1107  	projects := map[string]bool{}
  1108  	for _, project := range getProjectsFromApplicationQuery(*q) {
  1109  		projects[project] = true
  1110  	}
  1111  	claims := ws.Context().Value("claims")
  1112  	selector, err := labels.Parse(q.GetSelector())
  1113  	if err != nil {
  1114  		return fmt.Errorf("error parsing labels with selectors: %w", err)
  1115  	}
  1116  	minVersion := 0
  1117  	if q.GetResourceVersion() != "" {
  1118  		if minVersion, err = strconv.Atoi(q.GetResourceVersion()); err != nil {
  1119  			minVersion = 0
  1120  		}
  1121  	}
  1122  
  1123  	// sendIfPermitted is a helper to send the application to the client's streaming channel if the
  1124  	// caller has RBAC privileges permissions to view it
  1125  	sendIfPermitted := func(a appv1.Application, eventType watch.EventType) {
  1126  		permitted := s.isApplicationPermitted(selector, minVersion, claims, appName, appNs, projects, a)
  1127  		if !permitted {
  1128  			return
  1129  		}
  1130  		s.inferResourcesStatusHealth(&a)
  1131  		err := ws.Send(&appv1.ApplicationWatchEvent{
  1132  			Type:        eventType,
  1133  			Application: a,
  1134  		})
  1135  		if err != nil {
  1136  			logCtx.Warnf("Unable to send stream message: %v", err)
  1137  			return
  1138  		}
  1139  	}
  1140  
  1141  	events := make(chan *appv1.ApplicationWatchEvent, watchAPIBufferSize)
  1142  	// Mimic watch API behavior: send ADDED events if no resource version provided
  1143  	// If watch API is executed for one application when emit event even if resource version is provided
  1144  	// This is required since single app watch API is used for during operations like app syncing and it is
  1145  	// critical to never miss events.
  1146  	if q.GetResourceVersion() == "" || q.GetName() != "" {
  1147  		apps, err := s.appLister.List(selector)
  1148  		if err != nil {
  1149  			return fmt.Errorf("error listing apps with selector: %w", err)
  1150  		}
  1151  		sort.Slice(apps, func(i, j int) bool {
  1152  			return apps[i].QualifiedName() < apps[j].QualifiedName()
  1153  		})
  1154  		for i := range apps {
  1155  			sendIfPermitted(*apps[i], watch.Added)
  1156  		}
  1157  	}
  1158  	unsubscribe := s.appBroadcaster.Subscribe(events)
  1159  	defer unsubscribe()
  1160  	for {
  1161  		select {
  1162  		case event := <-events:
  1163  			sendIfPermitted(event.Application, event.Type)
  1164  		case <-ws.Context().Done():
  1165  			return nil
  1166  		}
  1167  	}
  1168  }
  1169  
  1170  func (s *Server) validateAndNormalizeApp(ctx context.Context, app *appv1.Application, proj *appv1.AppProject, validate bool) error {
  1171  	if app.GetName() == "" {
  1172  		return fmt.Errorf("resource name may not be empty")
  1173  	}
  1174  	appNs := s.appNamespaceOrDefault(app.Namespace)
  1175  	currApp, err := s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, app.Name, metav1.GetOptions{})
  1176  	if err != nil {
  1177  		if !apierr.IsNotFound(err) {
  1178  			return fmt.Errorf("error getting application by name: %w", err)
  1179  		}
  1180  		// Kubernetes go-client will return a pointer to a zero-value app instead of nil, even
  1181  		// though the API response was NotFound. This behavior was confirmed via logs.
  1182  		currApp = nil
  1183  	}
  1184  	if currApp != nil && currApp.Spec.GetProject() != app.Spec.GetProject() {
  1185  		// When changing projects, caller must have application create & update privileges in new project
  1186  		// NOTE: the update check was already verified in the caller to this function
  1187  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionCreate, app.RBACName(s.ns)); err != nil {
  1188  			return err
  1189  		}
  1190  		// They also need 'update' privileges in the old project
  1191  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, currApp.RBACName(s.ns)); err != nil {
  1192  			return err
  1193  		}
  1194  	}
  1195  
  1196  	if err := argo.ValidateDestination(ctx, &app.Spec.Destination, s.db); err != nil {
  1197  		return status.Errorf(codes.InvalidArgument, "application destination spec for %s is invalid: %s", app.Name, err.Error())
  1198  	}
  1199  
  1200  	var conditions []appv1.ApplicationCondition
  1201  
  1202  	if validate {
  1203  		conditions := make([]appv1.ApplicationCondition, 0)
  1204  		condition, err := argo.ValidateRepo(ctx, app, s.repoClientset, s.db, s.kubectl, proj, s.settingsMgr)
  1205  		if err != nil {
  1206  			return fmt.Errorf("error validating the repo: %w", err)
  1207  		}
  1208  		conditions = append(conditions, condition...)
  1209  		if len(conditions) > 0 {
  1210  			return status.Errorf(codes.InvalidArgument, "application spec for %s is invalid: %s", app.Name, argo.FormatAppConditions(conditions))
  1211  		}
  1212  	}
  1213  
  1214  	conditions, err = argo.ValidatePermissions(ctx, &app.Spec, proj, s.db)
  1215  	if err != nil {
  1216  		return fmt.Errorf("error validating project permissions: %w", err)
  1217  	}
  1218  	if len(conditions) > 0 {
  1219  		return status.Errorf(codes.InvalidArgument, "application spec for %s is invalid: %s", app.Name, argo.FormatAppConditions(conditions))
  1220  	}
  1221  
  1222  	app.Spec = *argo.NormalizeApplicationSpec(&app.Spec)
  1223  	return nil
  1224  }
  1225  
  1226  func (s *Server) getApplicationClusterConfig(ctx context.Context, a *appv1.Application) (*rest.Config, error) {
  1227  	if err := argo.ValidateDestination(ctx, &a.Spec.Destination, s.db); err != nil {
  1228  		return nil, fmt.Errorf("error validating destination: %w", err)
  1229  	}
  1230  	clst, err := s.db.GetCluster(ctx, a.Spec.Destination.Server)
  1231  	if err != nil {
  1232  		return nil, fmt.Errorf("error getting cluster: %w", err)
  1233  	}
  1234  	config := clst.RESTConfig()
  1235  	return config, err
  1236  }
  1237  
  1238  // getCachedAppState loads the cached state and trigger app refresh if cache is missing
  1239  func (s *Server) getCachedAppState(ctx context.Context, a *appv1.Application, getFromCache func() error) error {
  1240  	err := getFromCache()
  1241  	if err != nil && err == servercache.ErrCacheMiss {
  1242  		conditions := a.Status.GetConditions(map[appv1.ApplicationConditionType]bool{
  1243  			appv1.ApplicationConditionComparisonError:  true,
  1244  			appv1.ApplicationConditionInvalidSpecError: true,
  1245  		})
  1246  		if len(conditions) > 0 {
  1247  			return errors.New(argoutil.FormatAppConditions(conditions))
  1248  		}
  1249  		_, err = s.Get(ctx, &application.ApplicationQuery{
  1250  			Name:         pointer.String(a.GetName()),
  1251  			AppNamespace: pointer.String(a.GetNamespace()),
  1252  			Refresh:      pointer.String(string(appv1.RefreshTypeNormal)),
  1253  		})
  1254  		if err != nil {
  1255  			return fmt.Errorf("error getting application by query: %w", err)
  1256  		}
  1257  		return getFromCache()
  1258  	}
  1259  	return err
  1260  }
  1261  
  1262  func (s *Server) getAppResources(ctx context.Context, a *appv1.Application) (*appv1.ApplicationTree, error) {
  1263  	var tree appv1.ApplicationTree
  1264  	err := s.getCachedAppState(ctx, a, func() error {
  1265  		return s.cache.GetAppResourcesTree(a.InstanceName(s.ns), &tree)
  1266  	})
  1267  	if err != nil {
  1268  		return &tree, fmt.Errorf("error getting cached app resource tree: %w", err)
  1269  	}
  1270  	return &tree, nil
  1271  }
  1272  
  1273  func (s *Server) getAppLiveResource(ctx context.Context, action string, q *application.ApplicationResourceRequest) (*appv1.ResourceNode, *rest.Config, *appv1.Application, error) {
  1274  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, action, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1275  	if err != nil {
  1276  		return nil, nil, nil, err
  1277  	}
  1278  	tree, err := s.getAppResources(ctx, a)
  1279  	if err != nil {
  1280  		return nil, nil, nil, fmt.Errorf("error getting app resources: %w", err)
  1281  	}
  1282  
  1283  	found := tree.FindNode(q.GetGroup(), q.GetKind(), q.GetNamespace(), q.GetResourceName())
  1284  	if found == nil || found.ResourceRef.UID == "" {
  1285  		return nil, nil, nil, status.Errorf(codes.InvalidArgument, "%s %s %s not found as part of application %s", q.GetKind(), q.GetGroup(), q.GetResourceName(), q.GetName())
  1286  	}
  1287  	config, err := s.getApplicationClusterConfig(ctx, a)
  1288  	if err != nil {
  1289  		return nil, nil, nil, fmt.Errorf("error getting application cluster config: %w", err)
  1290  	}
  1291  	return found, config, a, nil
  1292  }
  1293  
  1294  func (s *Server) GetResource(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ApplicationResourceResponse, error) {
  1295  	res, config, _, err := s.getAppLiveResource(ctx, rbacpolicy.ActionGet, q)
  1296  	if err != nil {
  1297  		return nil, err
  1298  	}
  1299  
  1300  	// make sure to use specified resource version if provided
  1301  	if q.GetVersion() != "" {
  1302  		res.Version = q.GetVersion()
  1303  	}
  1304  	obj, err := s.kubectl.GetResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace)
  1305  	if err != nil {
  1306  		return nil, fmt.Errorf("error getting resource: %w", err)
  1307  	}
  1308  	obj, err = replaceSecretValues(obj)
  1309  	if err != nil {
  1310  		return nil, fmt.Errorf("error replacing secret values: %w", err)
  1311  	}
  1312  	data, err := json.Marshal(obj.Object)
  1313  	if err != nil {
  1314  		return nil, fmt.Errorf("error marshaling object: %w", err)
  1315  	}
  1316  	manifest := string(data)
  1317  	return &application.ApplicationResourceResponse{Manifest: &manifest}, nil
  1318  }
  1319  
  1320  func replaceSecretValues(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
  1321  	if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" {
  1322  		_, obj, err := diff.HideSecretData(nil, obj)
  1323  		if err != nil {
  1324  			return nil, err
  1325  		}
  1326  		return obj, err
  1327  	}
  1328  	return obj, nil
  1329  }
  1330  
  1331  // PatchResource patches a resource
  1332  func (s *Server) PatchResource(ctx context.Context, q *application.ApplicationResourcePatchRequest) (*application.ApplicationResourceResponse, error) {
  1333  	resourceRequest := &application.ApplicationResourceRequest{
  1334  		Name:         q.Name,
  1335  		AppNamespace: q.AppNamespace,
  1336  		Namespace:    q.Namespace,
  1337  		ResourceName: q.ResourceName,
  1338  		Kind:         q.Kind,
  1339  		Version:      q.Version,
  1340  		Group:        q.Group,
  1341  		Project:      q.Project,
  1342  	}
  1343  	res, config, a, err := s.getAppLiveResource(ctx, rbacpolicy.ActionUpdate, resourceRequest)
  1344  	if err != nil {
  1345  		return nil, err
  1346  	}
  1347  
  1348  	manifest, err := s.kubectl.PatchResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace, types.PatchType(q.GetPatchType()), []byte(q.GetPatch()))
  1349  	if err != nil {
  1350  		// don't expose real error for secrets since it might contain secret data
  1351  		if res.Kind == kube.SecretKind && res.Group == "" {
  1352  			return nil, fmt.Errorf("failed to patch Secret %s/%s", res.Namespace, res.Name)
  1353  		}
  1354  		return nil, fmt.Errorf("error patching resource: %w", err)
  1355  	}
  1356  	if manifest == nil {
  1357  		return nil, fmt.Errorf("failed to patch resource: manifest was nil")
  1358  	}
  1359  	manifest, err = replaceSecretValues(manifest)
  1360  	if err != nil {
  1361  		return nil, fmt.Errorf("error replacing secret values: %w", err)
  1362  	}
  1363  	data, err := json.Marshal(manifest.Object)
  1364  	if err != nil {
  1365  		return nil, fmt.Errorf("erro marshaling manifest object: %w", err)
  1366  	}
  1367  	s.logAppEvent(a, ctx, argo.EventReasonResourceUpdated, fmt.Sprintf("patched resource %s/%s '%s'", q.GetGroup(), q.GetKind(), q.GetResourceName()))
  1368  	m := string(data)
  1369  	return &application.ApplicationResourceResponse{
  1370  		Manifest: &m,
  1371  	}, nil
  1372  }
  1373  
  1374  // DeleteResource deletes a specified resource
  1375  func (s *Server) DeleteResource(ctx context.Context, q *application.ApplicationResourceDeleteRequest) (*application.ApplicationResponse, error) {
  1376  	resourceRequest := &application.ApplicationResourceRequest{
  1377  		Name:         q.Name,
  1378  		AppNamespace: q.AppNamespace,
  1379  		Namespace:    q.Namespace,
  1380  		ResourceName: q.ResourceName,
  1381  		Kind:         q.Kind,
  1382  		Version:      q.Version,
  1383  		Group:        q.Group,
  1384  		Project:      q.Project,
  1385  	}
  1386  	res, config, a, err := s.getAppLiveResource(ctx, rbacpolicy.ActionDelete, resourceRequest)
  1387  	if err != nil {
  1388  		return nil, err
  1389  	}
  1390  	var deleteOption metav1.DeleteOptions
  1391  	if q.GetOrphan() {
  1392  		propagationPolicy := metav1.DeletePropagationOrphan
  1393  		deleteOption = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}
  1394  	} else if q.GetForce() {
  1395  		propagationPolicy := metav1.DeletePropagationBackground
  1396  		zeroGracePeriod := int64(0)
  1397  		deleteOption = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy, GracePeriodSeconds: &zeroGracePeriod}
  1398  	} else {
  1399  		propagationPolicy := metav1.DeletePropagationForeground
  1400  		deleteOption = metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}
  1401  	}
  1402  	err = s.kubectl.DeleteResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace, deleteOption)
  1403  	if err != nil {
  1404  		return nil, fmt.Errorf("error deleting resource: %w", err)
  1405  	}
  1406  	s.logAppEvent(a, ctx, argo.EventReasonResourceDeleted, fmt.Sprintf("deleted resource %s/%s '%s'", q.GetGroup(), q.GetKind(), q.GetResourceName()))
  1407  	return &application.ApplicationResponse{}, nil
  1408  }
  1409  
  1410  func (s *Server) ResourceTree(ctx context.Context, q *application.ResourcesQuery) (*appv1.ApplicationTree, error) {
  1411  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetApplicationName())
  1412  	if err != nil {
  1413  		return nil, err
  1414  	}
  1415  
  1416  	return s.getAppResources(ctx, a)
  1417  }
  1418  
  1419  func (s *Server) WatchResourceTree(q *application.ResourcesQuery, ws application.ApplicationService_WatchResourceTreeServer) error {
  1420  	_, _, err := s.getApplicationEnforceRBACInformer(ws.Context(), rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetApplicationName())
  1421  	if err != nil {
  1422  		return err
  1423  	}
  1424  
  1425  	cacheKey := argo.AppInstanceName(q.GetApplicationName(), q.GetAppNamespace(), s.ns)
  1426  	return s.cache.OnAppResourcesTreeChanged(ws.Context(), cacheKey, func() error {
  1427  		var tree appv1.ApplicationTree
  1428  		err := s.cache.GetAppResourcesTree(cacheKey, &tree)
  1429  		if err != nil {
  1430  			return fmt.Errorf("error getting app resource tree: %w", err)
  1431  		}
  1432  		return ws.Send(&tree)
  1433  	})
  1434  }
  1435  
  1436  func (s *Server) RevisionMetadata(ctx context.Context, q *application.RevisionMetadataQuery) (*appv1.RevisionMetadata, error) {
  1437  	a, proj, err := s.getApplicationEnforceRBACInformer(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1438  	if err != nil {
  1439  		return nil, err
  1440  	}
  1441  
  1442  	source := a.Spec.GetSource()
  1443  	repo, err := s.db.GetRepository(ctx, source.RepoURL)
  1444  	if err != nil {
  1445  		return nil, fmt.Errorf("error getting repository by URL: %w", err)
  1446  	}
  1447  	conn, repoClient, err := s.repoClientset.NewRepoServerClient()
  1448  	if err != nil {
  1449  		return nil, fmt.Errorf("error creating repo server client: %w", err)
  1450  	}
  1451  	defer ioutil.Close(conn)
  1452  	return repoClient.GetRevisionMetadata(ctx, &apiclient.RepoServerRevisionMetadataRequest{
  1453  		Repo:           repo,
  1454  		Revision:       q.GetRevision(),
  1455  		CheckSignature: len(proj.Spec.SignatureKeys) > 0,
  1456  	})
  1457  }
  1458  
  1459  // RevisionChartDetails returns the helm chart metadata, as fetched from the reposerver
  1460  func (s *Server) RevisionChartDetails(ctx context.Context, q *application.RevisionMetadataQuery) (*appv1.ChartDetails, error) {
  1461  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1462  	if err != nil {
  1463  		return nil, err
  1464  	}
  1465  	if a.Spec.Source.Chart == "" {
  1466  		return nil, fmt.Errorf("no chart found for application: %v", a.QualifiedName())
  1467  	}
  1468  	repo, err := s.db.GetRepository(ctx, a.Spec.Source.RepoURL)
  1469  	if err != nil {
  1470  		return nil, fmt.Errorf("error getting repository by URL: %w", err)
  1471  	}
  1472  	conn, repoClient, err := s.repoClientset.NewRepoServerClient()
  1473  	if err != nil {
  1474  		return nil, fmt.Errorf("error creating repo server client: %w", err)
  1475  	}
  1476  	defer ioutil.Close(conn)
  1477  	return repoClient.GetRevisionChartDetails(ctx, &apiclient.RepoServerRevisionChartDetailsRequest{
  1478  		Repo:     repo,
  1479  		Name:     a.Spec.Source.Chart,
  1480  		Revision: q.GetRevision(),
  1481  	})
  1482  }
  1483  
  1484  func isMatchingResource(q *application.ResourcesQuery, key kube.ResourceKey) bool {
  1485  	return (q.GetName() == "" || q.GetName() == key.Name) &&
  1486  		(q.GetNamespace() == "" || q.GetNamespace() == key.Namespace) &&
  1487  		(q.GetGroup() == "" || q.GetGroup() == key.Group) &&
  1488  		(q.GetKind() == "" || q.GetKind() == key.Kind)
  1489  }
  1490  
  1491  func (s *Server) ManagedResources(ctx context.Context, q *application.ResourcesQuery) (*application.ManagedResourcesResponse, error) {
  1492  	a, _, err := s.getApplicationEnforceRBACInformer(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetApplicationName())
  1493  	if err != nil {
  1494  		return nil, err
  1495  	}
  1496  
  1497  	items := make([]*appv1.ResourceDiff, 0)
  1498  	err = s.getCachedAppState(ctx, a, func() error {
  1499  		return s.cache.GetAppManagedResources(a.InstanceName(s.ns), &items)
  1500  	})
  1501  	if err != nil {
  1502  		return nil, fmt.Errorf("error getting cached app managed resources: %w", err)
  1503  	}
  1504  	res := &application.ManagedResourcesResponse{}
  1505  	for i := range items {
  1506  		item := items[i]
  1507  		if !item.Hook && isMatchingResource(q, kube.ResourceKey{Name: item.Name, Namespace: item.Namespace, Kind: item.Kind, Group: item.Group}) {
  1508  			res.Items = append(res.Items, item)
  1509  		}
  1510  	}
  1511  
  1512  	return res, nil
  1513  }
  1514  
  1515  func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application.ApplicationService_PodLogsServer) error {
  1516  	if q.PodName != nil {
  1517  		podKind := "Pod"
  1518  		q.Kind = &podKind
  1519  		q.ResourceName = q.PodName
  1520  	}
  1521  
  1522  	var sinceSeconds, tailLines *int64
  1523  	if q.GetSinceSeconds() > 0 {
  1524  		sinceSeconds = pointer.Int64(q.GetSinceSeconds())
  1525  	}
  1526  	if q.GetTailLines() > 0 {
  1527  		tailLines = pointer.Int64(q.GetTailLines())
  1528  	}
  1529  	var untilTime *metav1.Time
  1530  	if q.GetUntilTime() != "" {
  1531  		if val, err := time.Parse(time.RFC3339Nano, q.GetUntilTime()); err != nil {
  1532  			return fmt.Errorf("invalid untilTime parameter value: %v", err)
  1533  		} else {
  1534  			untilTimeVal := metav1.NewTime(val)
  1535  			untilTime = &untilTimeVal
  1536  		}
  1537  	}
  1538  
  1539  	literal := ""
  1540  	inverse := false
  1541  	if q.GetFilter() != "" {
  1542  		literal = *q.Filter
  1543  		if literal[0] == '!' {
  1544  			literal = literal[1:]
  1545  			inverse = true
  1546  		}
  1547  	}
  1548  
  1549  	a, _, err := s.getApplicationEnforceRBACInformer(ws.Context(), rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName())
  1550  	if err != nil {
  1551  		return err
  1552  	}
  1553  
  1554  	// Logs RBAC will be enforced only if an internal var serverRBACLogEnforceEnable (representing server.rbac.log.enforce.enable env var)
  1555  	// is defined and has a "true" value
  1556  	// Otherwise, no RBAC enforcement for logs will take place (meaning, PodLogs will return the logs,
  1557  	// even if there is no explicit RBAC allow, or if there is an explicit RBAC deny)
  1558  	serverRBACLogEnforceEnable, err := s.settingsMgr.GetServerRBACLogEnforceEnable()
  1559  	if err != nil {
  1560  		return fmt.Errorf("error getting RBAC log enforce enable: %w", err)
  1561  	}
  1562  
  1563  	if serverRBACLogEnforceEnable {
  1564  		if err := s.enf.EnforceErr(ws.Context().Value("claims"), rbacpolicy.ResourceLogs, rbacpolicy.ActionGet, a.RBACName(s.ns)); err != nil {
  1565  			return err
  1566  		}
  1567  	}
  1568  
  1569  	tree, err := s.getAppResources(ws.Context(), a)
  1570  	if err != nil {
  1571  		return fmt.Errorf("error getting app resource tree: %w", err)
  1572  	}
  1573  
  1574  	config, err := s.getApplicationClusterConfig(ws.Context(), a)
  1575  	if err != nil {
  1576  		return fmt.Errorf("error getting application cluster config: %w", err)
  1577  	}
  1578  
  1579  	kubeClientset, err := kubernetes.NewForConfig(config)
  1580  	if err != nil {
  1581  		return fmt.Errorf("error creating kube client: %w", err)
  1582  	}
  1583  
  1584  	// from the tree find pods which match query of kind, group, and resource name
  1585  	pods := getSelectedPods(tree.Nodes, q)
  1586  	if len(pods) == 0 {
  1587  		return nil
  1588  	}
  1589  
  1590  	if len(pods) > maxPodLogsToRender {
  1591  		return errors.New("Max pods to view logs are reached. Please provide more granular query.")
  1592  	}
  1593  
  1594  	var streams []chan logEntry
  1595  
  1596  	for _, pod := range pods {
  1597  		stream, err := kubeClientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{
  1598  			Container:    q.GetContainer(),
  1599  			Follow:       q.GetFollow(),
  1600  			Timestamps:   true,
  1601  			SinceSeconds: sinceSeconds,
  1602  			SinceTime:    q.GetSinceTime(),
  1603  			TailLines:    tailLines,
  1604  			Previous:     q.GetPrevious(),
  1605  		}).Stream(ws.Context())
  1606  		podName := pod.Name
  1607  		logStream := make(chan logEntry)
  1608  		if err == nil {
  1609  			defer ioutil.Close(stream)
  1610  		}
  1611  
  1612  		streams = append(streams, logStream)
  1613  		go func() {
  1614  			// if k8s failed to start steaming logs (typically because Pod is not ready yet)
  1615  			// then the error should be shown in the UI so that user know the reason
  1616  			if err != nil {
  1617  				logStream <- logEntry{line: err.Error()}
  1618  			} else {
  1619  				parseLogsStream(podName, stream, logStream)
  1620  			}
  1621  			close(logStream)
  1622  		}()
  1623  	}
  1624  
  1625  	logStream := mergeLogStreams(streams, time.Millisecond*100)
  1626  	sentCount := int64(0)
  1627  	done := make(chan error)
  1628  	go func() {
  1629  		for entry := range logStream {
  1630  			if entry.err != nil {
  1631  				done <- entry.err
  1632  				return
  1633  			} else {
  1634  				if q.Filter != nil {
  1635  					lineContainsFilter := strings.Contains(entry.line, literal)
  1636  					if (inverse && lineContainsFilter) || (!inverse && !lineContainsFilter) {
  1637  						continue
  1638  					}
  1639  				}
  1640  				ts := metav1.NewTime(entry.timeStamp)
  1641  				if untilTime != nil && entry.timeStamp.After(untilTime.Time) {
  1642  					done <- ws.Send(&application.LogEntry{
  1643  						Last:         pointer.Bool(true),
  1644  						PodName:      &entry.podName,
  1645  						Content:      &entry.line,
  1646  						TimeStampStr: pointer.String(entry.timeStamp.Format(time.RFC3339Nano)),
  1647  						TimeStamp:    &ts,
  1648  					})
  1649  					return
  1650  				} else {
  1651  					sentCount++
  1652  					if err := ws.Send(&application.LogEntry{
  1653  						PodName:      &entry.podName,
  1654  						Content:      &entry.line,
  1655  						TimeStampStr: pointer.String(entry.timeStamp.Format(time.RFC3339Nano)),
  1656  						TimeStamp:    &ts,
  1657  						Last:         pointer.Bool(false),
  1658  					}); err != nil {
  1659  						done <- err
  1660  						break
  1661  					}
  1662  				}
  1663  			}
  1664  		}
  1665  		now := time.Now()
  1666  		nowTS := metav1.NewTime(now)
  1667  		done <- ws.Send(&application.LogEntry{
  1668  			Last:         pointer.Bool(true),
  1669  			PodName:      pointer.String(""),
  1670  			Content:      pointer.String(""),
  1671  			TimeStampStr: pointer.String(now.Format(time.RFC3339Nano)),
  1672  			TimeStamp:    &nowTS,
  1673  		})
  1674  	}()
  1675  
  1676  	select {
  1677  	case err := <-done:
  1678  		return err
  1679  	case <-ws.Context().Done():
  1680  		log.WithField("application", q.Name).Debug("k8s pod logs reader completed due to closed grpc context")
  1681  		return nil
  1682  	}
  1683  }
  1684  
  1685  // from all of the treeNodes, get the pod who meets the criteria or whose parents meets the criteria
  1686  func getSelectedPods(treeNodes []appv1.ResourceNode, q *application.ApplicationPodLogsQuery) []appv1.ResourceNode {
  1687  	var pods []appv1.ResourceNode
  1688  	isTheOneMap := make(map[string]bool)
  1689  	for _, treeNode := range treeNodes {
  1690  		if treeNode.Kind == kube.PodKind && treeNode.Group == "" && treeNode.UID != "" {
  1691  			if isTheSelectedOne(&treeNode, q, treeNodes, isTheOneMap) {
  1692  				pods = append(pods, treeNode)
  1693  			}
  1694  		}
  1695  	}
  1696  	return pods
  1697  }
  1698  
  1699  // check is currentNode is matching with group, kind, and name, or if any of its parents matches
  1700  func isTheSelectedOne(currentNode *appv1.ResourceNode, q *application.ApplicationPodLogsQuery, resourceNodes []appv1.ResourceNode, isTheOneMap map[string]bool) bool {
  1701  	exist, value := isTheOneMap[currentNode.UID]
  1702  	if exist {
  1703  		return value
  1704  	}
  1705  
  1706  	if (q.GetResourceName() == "" || currentNode.Name == q.GetResourceName()) &&
  1707  		(q.GetKind() == "" || currentNode.Kind == q.GetKind()) &&
  1708  		(q.GetGroup() == "" || currentNode.Group == q.GetGroup()) &&
  1709  		(q.GetNamespace() == "" || currentNode.Namespace == q.GetNamespace()) {
  1710  		isTheOneMap[currentNode.UID] = true
  1711  		return true
  1712  	}
  1713  
  1714  	if len(currentNode.ParentRefs) == 0 {
  1715  		isTheOneMap[currentNode.UID] = false
  1716  		return false
  1717  	}
  1718  
  1719  	for _, parentResource := range currentNode.ParentRefs {
  1720  		// look up parentResource from resourceNodes
  1721  		// then check if the parent isTheSelectedOne
  1722  		for _, resourceNode := range resourceNodes {
  1723  			if resourceNode.Namespace == parentResource.Namespace &&
  1724  				resourceNode.Name == parentResource.Name &&
  1725  				resourceNode.Group == parentResource.Group &&
  1726  				resourceNode.Kind == parentResource.Kind {
  1727  				if isTheSelectedOne(&resourceNode, q, resourceNodes, isTheOneMap) {
  1728  					isTheOneMap[currentNode.UID] = true
  1729  					return true
  1730  				}
  1731  			}
  1732  		}
  1733  	}
  1734  
  1735  	isTheOneMap[currentNode.UID] = false
  1736  	return false
  1737  }
  1738  
  1739  // Sync syncs an application to its target state
  1740  func (s *Server) Sync(ctx context.Context, syncReq *application.ApplicationSyncRequest) (*appv1.Application, error) {
  1741  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionGet, syncReq.GetProject(), syncReq.GetAppNamespace(), syncReq.GetName(), "")
  1742  	if err != nil {
  1743  		return nil, err
  1744  	}
  1745  
  1746  	s.inferResourcesStatusHealth(a)
  1747  
  1748  	if !proj.Spec.SyncWindows.Matches(a).CanSync(true) {
  1749  		return a, status.Errorf(codes.PermissionDenied, "cannot sync: blocked by sync window")
  1750  	}
  1751  
  1752  	if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionSync, a.RBACName(s.ns)); err != nil {
  1753  		return nil, err
  1754  	}
  1755  
  1756  	source := a.Spec.GetSource()
  1757  
  1758  	if syncReq.Manifests != nil {
  1759  		if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionOverride, a.RBACName(s.ns)); err != nil {
  1760  			return nil, err
  1761  		}
  1762  		if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil && !syncReq.GetDryRun() {
  1763  			return nil, status.Error(codes.FailedPrecondition, "cannot use local sync when Automatic Sync Policy is enabled unless for dry run")
  1764  		}
  1765  	}
  1766  	if a.DeletionTimestamp != nil {
  1767  		return nil, status.Errorf(codes.FailedPrecondition, "application is deleting")
  1768  	}
  1769  	if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil && !syncReq.GetDryRun() {
  1770  		if syncReq.GetRevision() != "" && syncReq.GetRevision() != text.FirstNonEmpty(source.TargetRevision, "HEAD") {
  1771  			return nil, status.Errorf(codes.FailedPrecondition, "Cannot sync to %s: auto-sync currently set to %s", syncReq.GetRevision(), source.TargetRevision)
  1772  		}
  1773  	}
  1774  	revision, displayRevision, err := s.resolveRevision(ctx, a, syncReq)
  1775  	if err != nil {
  1776  		return nil, status.Errorf(codes.FailedPrecondition, err.Error())
  1777  	}
  1778  
  1779  	var retry *appv1.RetryStrategy
  1780  	var syncOptions appv1.SyncOptions
  1781  	if a.Spec.SyncPolicy != nil {
  1782  		syncOptions = a.Spec.SyncPolicy.SyncOptions
  1783  		retry = a.Spec.SyncPolicy.Retry
  1784  	}
  1785  	if syncReq.RetryStrategy != nil {
  1786  		retry = syncReq.RetryStrategy
  1787  	}
  1788  	if syncReq.SyncOptions != nil {
  1789  		syncOptions = syncReq.SyncOptions.Items
  1790  	}
  1791  
  1792  	// We cannot use local manifests if we're only allowed to sync to signed commits
  1793  	if syncReq.Manifests != nil && len(proj.Spec.SignatureKeys) > 0 {
  1794  		return nil, status.Errorf(codes.FailedPrecondition, "Cannot use local sync when signature keys are required.")
  1795  	}
  1796  
  1797  	resources := []appv1.SyncOperationResource{}
  1798  	if syncReq.GetResources() != nil {
  1799  		for _, r := range syncReq.GetResources() {
  1800  			if r != nil {
  1801  				resources = append(resources, *r)
  1802  			}
  1803  		}
  1804  	}
  1805  	op := appv1.Operation{
  1806  		Sync: &appv1.SyncOperation{
  1807  			Revision:     revision,
  1808  			Prune:        syncReq.GetPrune(),
  1809  			DryRun:       syncReq.GetDryRun(),
  1810  			SyncOptions:  syncOptions,
  1811  			SyncStrategy: syncReq.Strategy,
  1812  			Resources:    resources,
  1813  			Manifests:    syncReq.Manifests,
  1814  		},
  1815  		InitiatedBy: appv1.OperationInitiator{Username: session.Username(ctx)},
  1816  		Info:        syncReq.Infos,
  1817  	}
  1818  	if retry != nil {
  1819  		op.Retry = *retry
  1820  	}
  1821  
  1822  	appName := syncReq.GetName()
  1823  	appNs := s.appNamespaceOrDefault(syncReq.GetAppNamespace())
  1824  	appIf := s.appclientset.ArgoprojV1alpha1().Applications(appNs)
  1825  	a, err = argo.SetAppOperation(appIf, appName, &op)
  1826  	if err != nil {
  1827  		return nil, fmt.Errorf("error setting app operation: %w", err)
  1828  	}
  1829  	partial := ""
  1830  	if len(syncReq.Resources) > 0 {
  1831  		partial = "partial "
  1832  	}
  1833  	reason := fmt.Sprintf("initiated %ssync to %s", partial, displayRevision)
  1834  	if syncReq.Manifests != nil {
  1835  		reason = fmt.Sprintf("initiated %ssync locally", partial)
  1836  	}
  1837  	s.logAppEvent(a, ctx, argo.EventReasonOperationStarted, reason)
  1838  	return a, nil
  1839  }
  1840  
  1841  func (s *Server) Rollback(ctx context.Context, rollbackReq *application.ApplicationRollbackRequest) (*appv1.Application, error) {
  1842  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionSync, rollbackReq.GetProject(), rollbackReq.GetAppNamespace(), rollbackReq.GetName(), "")
  1843  	if err != nil {
  1844  		return nil, err
  1845  	}
  1846  
  1847  	s.inferResourcesStatusHealth(a)
  1848  
  1849  	if a.DeletionTimestamp != nil {
  1850  		return nil, status.Errorf(codes.FailedPrecondition, "application is deleting")
  1851  	}
  1852  	if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil {
  1853  		return nil, status.Errorf(codes.FailedPrecondition, "rollback cannot be initiated when auto-sync is enabled")
  1854  	}
  1855  
  1856  	var deploymentInfo *appv1.RevisionHistory
  1857  	for _, info := range a.Status.History {
  1858  		if info.ID == rollbackReq.GetId() {
  1859  			deploymentInfo = &info
  1860  			break
  1861  		}
  1862  	}
  1863  	if deploymentInfo == nil {
  1864  		return nil, status.Errorf(codes.InvalidArgument, "application %s does not have deployment with id %v", a.QualifiedName(), rollbackReq.GetId())
  1865  	}
  1866  	if deploymentInfo.Source.IsZero() {
  1867  		// Since source type was introduced to history starting with v0.12, and is now required for
  1868  		// rollback, we cannot support rollback to revisions deployed using Argo CD v0.11 or below
  1869  		return nil, status.Errorf(codes.FailedPrecondition, "cannot rollback to revision deployed with Argo CD v0.11 or lower. sync to revision instead.")
  1870  	}
  1871  
  1872  	var syncOptions appv1.SyncOptions
  1873  	if a.Spec.SyncPolicy != nil {
  1874  		syncOptions = a.Spec.SyncPolicy.SyncOptions
  1875  	}
  1876  
  1877  	// Rollback is just a convenience around Sync
  1878  	op := appv1.Operation{
  1879  		Sync: &appv1.SyncOperation{
  1880  			Revision:     deploymentInfo.Revision,
  1881  			DryRun:       rollbackReq.GetDryRun(),
  1882  			Prune:        rollbackReq.GetPrune(),
  1883  			SyncOptions:  syncOptions,
  1884  			SyncStrategy: &appv1.SyncStrategy{Apply: &appv1.SyncStrategyApply{}},
  1885  			Source:       &deploymentInfo.Source,
  1886  		},
  1887  		InitiatedBy: appv1.OperationInitiator{Username: session.Username(ctx)},
  1888  	}
  1889  	appName := rollbackReq.GetName()
  1890  	appNs := s.appNamespaceOrDefault(rollbackReq.GetAppNamespace())
  1891  	appIf := s.appclientset.ArgoprojV1alpha1().Applications(appNs)
  1892  	a, err = argo.SetAppOperation(appIf, appName, &op)
  1893  	if err != nil {
  1894  		return nil, fmt.Errorf("error setting app operation: %w", err)
  1895  	}
  1896  	s.logAppEvent(a, ctx, argo.EventReasonOperationStarted, fmt.Sprintf("initiated rollback to %d", rollbackReq.GetId()))
  1897  	return a, nil
  1898  }
  1899  
  1900  func (s *Server) ListLinks(ctx context.Context, req *application.ListAppLinksRequest) (*application.LinksResponse, error) {
  1901  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionGet, req.GetProject(), req.GetNamespace(), req.GetName(), "")
  1902  	if err != nil {
  1903  		return nil, err
  1904  	}
  1905  
  1906  	obj, err := kube.ToUnstructured(a)
  1907  	if err != nil {
  1908  		return nil, fmt.Errorf("error getting application: %w", err)
  1909  	}
  1910  
  1911  	deepLinks, err := s.settingsMgr.GetDeepLinks(settings.ApplicationDeepLinks)
  1912  	if err != nil {
  1913  		return nil, fmt.Errorf("failed to read application deep links from configmap: %w", err)
  1914  	}
  1915  
  1916  	clstObj, _, err := s.getObjectsForDeepLinks(ctx, a, proj)
  1917  	if err != nil {
  1918  		return nil, err
  1919  	}
  1920  
  1921  	deepLinksObject := deeplinks.CreateDeepLinksObject(nil, obj, clstObj, nil)
  1922  
  1923  	finalList, errorList := deeplinks.EvaluateDeepLinksResponse(deepLinksObject, obj.GetName(), deepLinks)
  1924  	if len(errorList) > 0 {
  1925  		log.Errorf("errorList while evaluating application deep links, %v", strings.Join(errorList, ", "))
  1926  	}
  1927  
  1928  	return finalList, nil
  1929  }
  1930  
  1931  func (s *Server) getObjectsForDeepLinks(ctx context.Context, app *appv1.Application, proj *appv1.AppProject) (cluster *unstructured.Unstructured, project *unstructured.Unstructured, err error) {
  1932  	// sanitize project jwt tokens
  1933  	proj.Status = appv1.AppProjectStatus{}
  1934  
  1935  	project, err = kube.ToUnstructured(proj)
  1936  	if err != nil {
  1937  		return nil, nil, err
  1938  	}
  1939  
  1940  	getProjectClusters := func(project string) ([]*appv1.Cluster, error) {
  1941  		return s.db.GetProjectClusters(ctx, project)
  1942  	}
  1943  
  1944  	if err := argo.ValidateDestination(ctx, &app.Spec.Destination, s.db); err != nil {
  1945  		log.WithFields(map[string]interface{}{
  1946  			"application": app.GetName(),
  1947  			"ns":          app.GetNamespace(),
  1948  			"destination": app.Spec.Destination,
  1949  		}).Warnf("cannot validate cluster, error=%v", err.Error())
  1950  		return nil, nil, nil
  1951  	}
  1952  
  1953  	permitted, err := proj.IsDestinationPermitted(app.Spec.Destination, getProjectClusters)
  1954  	if err != nil {
  1955  		return nil, nil, err
  1956  	}
  1957  	if !permitted {
  1958  		return nil, nil, fmt.Errorf("error getting destination cluster")
  1959  	}
  1960  	clst, err := s.db.GetCluster(ctx, app.Spec.Destination.Server)
  1961  	if err != nil {
  1962  		log.WithFields(map[string]interface{}{
  1963  			"application": app.GetName(),
  1964  			"ns":          app.GetNamespace(),
  1965  			"destination": app.Spec.Destination,
  1966  		}).Warnf("cannot get cluster from db, error=%v", err.Error())
  1967  		return nil, nil, nil
  1968  	}
  1969  	// sanitize cluster, remove cluster config creds and other unwanted fields
  1970  	cluster, err = deeplinks.SanitizeCluster(clst)
  1971  	return cluster, project, err
  1972  }
  1973  
  1974  func (s *Server) ListResourceLinks(ctx context.Context, req *application.ApplicationResourceRequest) (*application.LinksResponse, error) {
  1975  	obj, _, app, _, err := s.getUnstructuredLiveResourceOrApp(ctx, rbacpolicy.ActionGet, req)
  1976  	if err != nil {
  1977  		return nil, err
  1978  	}
  1979  	deepLinks, err := s.settingsMgr.GetDeepLinks(settings.ResourceDeepLinks)
  1980  	if err != nil {
  1981  		return nil, fmt.Errorf("failed to read application deep links from configmap: %w", err)
  1982  	}
  1983  
  1984  	obj, err = replaceSecretValues(obj)
  1985  	if err != nil {
  1986  		return nil, fmt.Errorf("error replacing secret values: %w", err)
  1987  	}
  1988  
  1989  	appObj, err := kube.ToUnstructured(app)
  1990  	if err != nil {
  1991  		return nil, err
  1992  	}
  1993  
  1994  	proj, err := s.getAppProject(ctx, app, log.WithField("application", app.GetName()))
  1995  	if err != nil {
  1996  		return nil, err
  1997  	}
  1998  
  1999  	clstObj, projObj, err := s.getObjectsForDeepLinks(ctx, app, proj)
  2000  	if err != nil {
  2001  		return nil, err
  2002  	}
  2003  
  2004  	deepLinksObject := deeplinks.CreateDeepLinksObject(obj, appObj, clstObj, projObj)
  2005  	finalList, errorList := deeplinks.EvaluateDeepLinksResponse(deepLinksObject, obj.GetName(), deepLinks)
  2006  	if len(errorList) > 0 {
  2007  		log.Errorf("errors while evaluating resource deep links, %v", strings.Join(errorList, ", "))
  2008  	}
  2009  
  2010  	return finalList, nil
  2011  }
  2012  
  2013  // resolveRevision resolves the revision specified either in the sync request, or the
  2014  // application source, into a concrete revision that will be used for a sync operation.
  2015  func (s *Server) resolveRevision(ctx context.Context, app *appv1.Application, syncReq *application.ApplicationSyncRequest) (string, string, error) {
  2016  	if syncReq.Manifests != nil {
  2017  		return "", "", nil
  2018  	}
  2019  	ambiguousRevision := syncReq.GetRevision()
  2020  	if ambiguousRevision == "" {
  2021  		ambiguousRevision = app.Spec.GetSource().TargetRevision
  2022  	}
  2023  	repo, err := s.db.GetRepository(ctx, app.Spec.GetSource().RepoURL)
  2024  	if err != nil {
  2025  		return "", "", fmt.Errorf("error getting repository by URL: %w", err)
  2026  	}
  2027  	conn, repoClient, err := s.repoClientset.NewRepoServerClient()
  2028  	if err != nil {
  2029  		return "", "", fmt.Errorf("error getting repo server client: %w", err)
  2030  	}
  2031  	defer ioutil.Close(conn)
  2032  
  2033  	source := app.Spec.GetSource()
  2034  	if !source.IsHelm() {
  2035  		if git.IsCommitSHA(ambiguousRevision) {
  2036  			// If it's already a commit SHA, then no need to look it up
  2037  			return ambiguousRevision, ambiguousRevision, nil
  2038  		}
  2039  	}
  2040  
  2041  	resolveRevisionResponse, err := repoClient.ResolveRevision(ctx, &apiclient.ResolveRevisionRequest{
  2042  		Repo:              repo,
  2043  		App:               app,
  2044  		AmbiguousRevision: ambiguousRevision,
  2045  	})
  2046  	if err != nil {
  2047  		return "", "", fmt.Errorf("error resolving repo revision: %w", err)
  2048  	}
  2049  	return resolveRevisionResponse.Revision, resolveRevisionResponse.AmbiguousRevision, nil
  2050  }
  2051  
  2052  func (s *Server) TerminateOperation(ctx context.Context, termOpReq *application.OperationTerminateRequest) (*application.OperationTerminateResponse, error) {
  2053  	appName := termOpReq.GetName()
  2054  	appNs := s.appNamespaceOrDefault(termOpReq.GetAppNamespace())
  2055  	a, _, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionSync, termOpReq.GetProject(), appNs, appName, "")
  2056  	if err != nil {
  2057  		return nil, err
  2058  	}
  2059  
  2060  	for i := 0; i < 10; i++ {
  2061  		if a.Operation == nil || a.Status.OperationState == nil {
  2062  			return nil, status.Errorf(codes.InvalidArgument, "Unable to terminate operation. No operation is in progress")
  2063  		}
  2064  		a.Status.OperationState.Phase = common.OperationTerminating
  2065  		updated, err := s.appclientset.ArgoprojV1alpha1().Applications(appNs).Update(ctx, a, metav1.UpdateOptions{})
  2066  		if err == nil {
  2067  			s.waitSync(updated)
  2068  			s.logAppEvent(a, ctx, argo.EventReasonResourceUpdated, "terminated running operation")
  2069  			return &application.OperationTerminateResponse{}, nil
  2070  		}
  2071  		if !apierr.IsConflict(err) {
  2072  			return nil, fmt.Errorf("error updating application: %w", err)
  2073  		}
  2074  		log.Warnf("failed to set operation for app %q due to update conflict. retrying again...", *termOpReq.Name)
  2075  		time.Sleep(100 * time.Millisecond)
  2076  		a, err = s.appclientset.ArgoprojV1alpha1().Applications(appNs).Get(ctx, appName, metav1.GetOptions{})
  2077  		if err != nil {
  2078  			return nil, fmt.Errorf("error getting application by name: %w", err)
  2079  		}
  2080  	}
  2081  	return nil, status.Errorf(codes.Internal, "Failed to terminate app. Too many conflicts")
  2082  }
  2083  
  2084  func (s *Server) logAppEvent(a *appv1.Application, ctx context.Context, reason string, action string) {
  2085  	eventInfo := argo.EventInfo{Type: v1.EventTypeNormal, Reason: reason}
  2086  	user := session.Username(ctx)
  2087  	if user == "" {
  2088  		user = "Unknown user"
  2089  	}
  2090  	message := fmt.Sprintf("%s %s", user, action)
  2091  	s.auditLogger.LogAppEvent(a, eventInfo, message, user)
  2092  }
  2093  
  2094  func (s *Server) logResourceEvent(res *appv1.ResourceNode, ctx context.Context, reason string, action string) {
  2095  	eventInfo := argo.EventInfo{Type: v1.EventTypeNormal, Reason: reason}
  2096  	user := session.Username(ctx)
  2097  	if user == "" {
  2098  		user = "Unknown user"
  2099  	}
  2100  	message := fmt.Sprintf("%s %s", user, action)
  2101  	s.auditLogger.LogResourceEvent(res, eventInfo, message, user)
  2102  }
  2103  
  2104  func (s *Server) ListResourceActions(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ResourceActionsListResponse, error) {
  2105  	obj, _, _, _, err := s.getUnstructuredLiveResourceOrApp(ctx, rbacpolicy.ActionGet, q)
  2106  	if err != nil {
  2107  		return nil, err
  2108  	}
  2109  	resourceOverrides, err := s.settingsMgr.GetResourceOverrides()
  2110  	if err != nil {
  2111  		return nil, fmt.Errorf("error getting resource overrides: %w", err)
  2112  	}
  2113  
  2114  	availableActions, err := s.getAvailableActions(resourceOverrides, obj)
  2115  	if err != nil {
  2116  		return nil, fmt.Errorf("error getting available actions: %w", err)
  2117  	}
  2118  	actionsPtr := []*appv1.ResourceAction{}
  2119  	for i := range availableActions {
  2120  		actionsPtr = append(actionsPtr, &availableActions[i])
  2121  	}
  2122  
  2123  	return &application.ResourceActionsListResponse{Actions: actionsPtr}, nil
  2124  }
  2125  
  2126  func (s *Server) getUnstructuredLiveResourceOrApp(ctx context.Context, rbacRequest string, q *application.ApplicationResourceRequest) (obj *unstructured.Unstructured, res *appv1.ResourceNode, app *appv1.Application, config *rest.Config, err error) {
  2127  	if q.GetKind() == applicationType.ApplicationKind && q.GetGroup() == applicationType.Group && q.GetName() == q.GetResourceName() {
  2128  		app, _, err = s.getApplicationEnforceRBACInformer(ctx, rbacRequest, q.GetProject(), q.GetAppNamespace(), q.GetName())
  2129  		if err != nil {
  2130  			return nil, nil, nil, nil, err
  2131  		}
  2132  		if err = s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacRequest, app.RBACName(s.ns)); err != nil {
  2133  			return nil, nil, nil, nil, err
  2134  		}
  2135  		config, err = s.getApplicationClusterConfig(ctx, app)
  2136  		if err != nil {
  2137  			return nil, nil, nil, nil, fmt.Errorf("error getting application cluster config: %w", err)
  2138  		}
  2139  		obj, err = kube.ToUnstructured(app)
  2140  	} else {
  2141  		res, config, app, err = s.getAppLiveResource(ctx, rbacRequest, q)
  2142  		if err != nil {
  2143  			return nil, nil, nil, nil, err
  2144  		}
  2145  		obj, err = s.kubectl.GetResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace)
  2146  
  2147  	}
  2148  	if err != nil {
  2149  		return nil, nil, nil, nil, fmt.Errorf("error getting resource: %w", err)
  2150  	}
  2151  	return
  2152  }
  2153  
  2154  func (s *Server) getAvailableActions(resourceOverrides map[string]appv1.ResourceOverride, obj *unstructured.Unstructured) ([]appv1.ResourceAction, error) {
  2155  	luaVM := lua.VM{
  2156  		ResourceOverrides: resourceOverrides,
  2157  	}
  2158  
  2159  	discoveryScript, err := luaVM.GetResourceActionDiscovery(obj)
  2160  	if err != nil {
  2161  		return nil, fmt.Errorf("error getting Lua discovery script: %w", err)
  2162  	}
  2163  	if discoveryScript == "" {
  2164  		return []appv1.ResourceAction{}, nil
  2165  	}
  2166  	availableActions, err := luaVM.ExecuteResourceActionDiscovery(obj, discoveryScript)
  2167  	if err != nil {
  2168  		return nil, fmt.Errorf("error executing Lua discovery script: %w", err)
  2169  	}
  2170  	return availableActions, nil
  2171  
  2172  }
  2173  
  2174  func (s *Server) RunResourceAction(ctx context.Context, q *application.ResourceActionRunRequest) (*application.ApplicationResponse, error) {
  2175  	resourceRequest := &application.ApplicationResourceRequest{
  2176  		Name:         q.Name,
  2177  		AppNamespace: q.AppNamespace,
  2178  		Namespace:    q.Namespace,
  2179  		ResourceName: q.ResourceName,
  2180  		Kind:         q.Kind,
  2181  		Version:      q.Version,
  2182  		Group:        q.Group,
  2183  		Project:      q.Project,
  2184  	}
  2185  	actionRequest := fmt.Sprintf("%s/%s/%s/%s", rbacpolicy.ActionAction, q.GetGroup(), q.GetKind(), q.GetAction())
  2186  	liveObj, res, a, config, err := s.getUnstructuredLiveResourceOrApp(ctx, actionRequest, resourceRequest)
  2187  	if err != nil {
  2188  		return nil, err
  2189  	}
  2190  
  2191  	liveObjBytes, err := json.Marshal(liveObj)
  2192  	if err != nil {
  2193  		return nil, fmt.Errorf("error marshaling live object: %w", err)
  2194  	}
  2195  
  2196  	resourceOverrides, err := s.settingsMgr.GetResourceOverrides()
  2197  	if err != nil {
  2198  		return nil, fmt.Errorf("error getting resource overrides: %w", err)
  2199  	}
  2200  
  2201  	luaVM := lua.VM{
  2202  		ResourceOverrides: resourceOverrides,
  2203  	}
  2204  	action, err := luaVM.GetResourceAction(liveObj, q.GetAction())
  2205  	if err != nil {
  2206  		return nil, fmt.Errorf("error getting Lua resource action: %w", err)
  2207  	}
  2208  
  2209  	newObjects, err := luaVM.ExecuteResourceAction(liveObj, action.ActionLua)
  2210  	if err != nil {
  2211  		return nil, fmt.Errorf("error executing Lua resource action: %w", err)
  2212  	}
  2213  
  2214  	var app *appv1.Application
  2215  	// Only bother getting the app if we know we're going to need it for a resource permission check.
  2216  	if len(newObjects) > 0 {
  2217  		// No need for an RBAC check, we checked above that the user is allowed to run this action.
  2218  		app, err = s.appLister.Applications(s.appNamespaceOrDefault(q.GetAppNamespace())).Get(q.GetName())
  2219  		if err != nil {
  2220  			return nil, err
  2221  		}
  2222  	}
  2223  
  2224  	proj, err := s.getAppProject(ctx, a, log.WithField("application", a.Name))
  2225  	if err != nil {
  2226  		return nil, err
  2227  	}
  2228  
  2229  	// First, make sure all the returned resources are permitted, for each operation.
  2230  	// Also perform create with dry-runs for all create-operation resources.
  2231  	// This is performed separately to reduce the risk of only some of the resources being successfully created later.
  2232  	// TODO: when apply/delete operations would be supported for custom actions,
  2233  	// the dry-run for relevant apply/delete operation would have to be invoked as well.
  2234  	for _, impactedResource := range newObjects {
  2235  		newObj := impactedResource.UnstructuredObj
  2236  		err := s.verifyResourcePermitted(ctx, app, proj, newObj)
  2237  		if err != nil {
  2238  			return nil, err
  2239  		}
  2240  		switch impactedResource.K8SOperation {
  2241  		case lua.CreateOperation:
  2242  			createOptions := metav1.CreateOptions{DryRun: []string{"All"}}
  2243  			_, err := s.kubectl.CreateResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), newObj, createOptions)
  2244  			if err != nil {
  2245  				return nil, err
  2246  			}
  2247  		}
  2248  	}
  2249  
  2250  	// Now, perform the actual operations.
  2251  	// The creation itself is not transactional.
  2252  	// TODO: maybe create a k8s list representation of the resources,
  2253  	// and invoke create on this list resource to make it semi-transactional (there is still patch operation that is separate,
  2254  	// thus can fail separately from create).
  2255  	for _, impactedResource := range newObjects {
  2256  		newObj := impactedResource.UnstructuredObj
  2257  		newObjBytes, err := json.Marshal(newObj)
  2258  
  2259  		if err != nil {
  2260  			return nil, fmt.Errorf("error marshaling new object: %w", err)
  2261  		}
  2262  
  2263  		switch impactedResource.K8SOperation {
  2264  		// No default case since a not supported operation would have failed upon unmarshaling earlier
  2265  		case lua.PatchOperation:
  2266  			_, err := s.patchResource(ctx, config, liveObjBytes, newObjBytes, newObj)
  2267  			if err != nil {
  2268  				return nil, err
  2269  			}
  2270  		case lua.CreateOperation:
  2271  			_, err := s.createResource(ctx, config, newObj)
  2272  			if err != nil {
  2273  				return nil, err
  2274  			}
  2275  		}
  2276  	}
  2277  
  2278  	if res == nil {
  2279  		s.logAppEvent(a, ctx, argo.EventReasonResourceActionRan, fmt.Sprintf("ran action %s", q.GetAction()))
  2280  	} else {
  2281  		s.logAppEvent(a, ctx, argo.EventReasonResourceActionRan, fmt.Sprintf("ran action %s on resource %s/%s/%s", q.GetAction(), res.Group, res.Kind, res.Name))
  2282  		s.logResourceEvent(res, ctx, argo.EventReasonResourceActionRan, fmt.Sprintf("ran action %s", q.GetAction()))
  2283  	}
  2284  	return &application.ApplicationResponse{}, nil
  2285  }
  2286  
  2287  func (s *Server) patchResource(ctx context.Context, config *rest.Config, liveObjBytes, newObjBytes []byte, newObj *unstructured.Unstructured) (*application.ApplicationResponse, error) {
  2288  	diffBytes, err := jsonpatch.CreateMergePatch(liveObjBytes, newObjBytes)
  2289  	if err != nil {
  2290  		return nil, fmt.Errorf("error calculating merge patch: %w", err)
  2291  	}
  2292  	if string(diffBytes) == "{}" {
  2293  		return &application.ApplicationResponse{}, nil
  2294  	}
  2295  
  2296  	// The following logic detects if the resource action makes a modification to status and/or spec.
  2297  	// If status was modified, we attempt to patch the status using status subresource, in case the
  2298  	// CRD is configured using the status subresource feature. See:
  2299  	// https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource
  2300  	// If status subresource is in use, the patch has to be split into two:
  2301  	// * one to update spec (and other non-status fields)
  2302  	// * the other to update only status.
  2303  	nonStatusPatch, statusPatch, err := splitStatusPatch(diffBytes)
  2304  	if err != nil {
  2305  		return nil, fmt.Errorf("error splitting status patch: %w", err)
  2306  	}
  2307  	if statusPatch != nil {
  2308  		_, err = s.kubectl.PatchResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), types.MergePatchType, diffBytes, "status")
  2309  		if err != nil {
  2310  			if !apierr.IsNotFound(err) {
  2311  				return nil, fmt.Errorf("error patching resource: %w", err)
  2312  			}
  2313  			// K8s API server returns 404 NotFound when the CRD does not support the status subresource
  2314  			// if we get here, the CRD does not use the status subresource. We will fall back to a normal patch
  2315  		} else {
  2316  			// If we get here, the CRD does use the status subresource, so we must patch status and
  2317  			// spec separately. update the diffBytes to the spec-only patch and fall through.
  2318  			diffBytes = nonStatusPatch
  2319  		}
  2320  	}
  2321  	if diffBytes != nil {
  2322  		_, err = s.kubectl.PatchResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), types.MergePatchType, diffBytes)
  2323  		if err != nil {
  2324  			return nil, fmt.Errorf("error patching resource: %w", err)
  2325  		}
  2326  	}
  2327  	return &application.ApplicationResponse{}, nil
  2328  }
  2329  
  2330  func (s *Server) verifyResourcePermitted(ctx context.Context, app *appv1.Application, proj *appv1.AppProject, obj *unstructured.Unstructured) error {
  2331  	permitted, err := proj.IsResourcePermitted(schema.GroupKind{Group: obj.GroupVersionKind().Group, Kind: obj.GroupVersionKind().Kind}, obj.GetNamespace(), app.Spec.Destination, func(project string) ([]*appv1.Cluster, error) {
  2332  		clusters, err := s.db.GetProjectClusters(context.TODO(), project)
  2333  		if err != nil {
  2334  			return nil, fmt.Errorf("failed to get project clusters: %w", err)
  2335  		}
  2336  		return clusters, nil
  2337  	})
  2338  	if err != nil {
  2339  		return fmt.Errorf("error checking resource permissions: %w", err)
  2340  	}
  2341  	if !permitted {
  2342  		return fmt.Errorf("application %s is not permitted to manage %s/%s/%s in %s", app.RBACName(s.ns), obj.GroupVersionKind().Group, obj.GroupVersionKind().Kind, obj.GetName(), obj.GetNamespace())
  2343  	}
  2344  
  2345  	return nil
  2346  }
  2347  
  2348  func (s *Server) createResource(ctx context.Context, config *rest.Config, newObj *unstructured.Unstructured) (*application.ApplicationResponse, error) {
  2349  	_, err := s.kubectl.CreateResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), newObj, metav1.CreateOptions{})
  2350  	if err != nil {
  2351  		return nil, fmt.Errorf("error creating resource: %w", err)
  2352  	}
  2353  	return &application.ApplicationResponse{}, nil
  2354  }
  2355  
  2356  // splitStatusPatch splits a patch into two: one for a non-status patch, and the status-only patch.
  2357  // Returns nil for either if the patch doesn't have modifications to non-status, or status, respectively.
  2358  func splitStatusPatch(patch []byte) ([]byte, []byte, error) {
  2359  	var obj map[string]interface{}
  2360  	err := json.Unmarshal(patch, &obj)
  2361  	if err != nil {
  2362  		return nil, nil, err
  2363  	}
  2364  	var nonStatusPatch, statusPatch []byte
  2365  	if statusVal, ok := obj["status"]; ok {
  2366  		// calculate the status-only patch
  2367  		statusObj := map[string]interface{}{
  2368  			"status": statusVal,
  2369  		}
  2370  		statusPatch, err = json.Marshal(statusObj)
  2371  		if err != nil {
  2372  			return nil, nil, err
  2373  		}
  2374  		// remove status, and calculate the non-status patch
  2375  		delete(obj, "status")
  2376  		if len(obj) > 0 {
  2377  			nonStatusPatch, err = json.Marshal(obj)
  2378  			if err != nil {
  2379  				return nil, nil, err
  2380  			}
  2381  		}
  2382  	} else {
  2383  		// status was not modified in patch
  2384  		nonStatusPatch = patch
  2385  	}
  2386  	return nonStatusPatch, statusPatch, nil
  2387  }
  2388  
  2389  func (s *Server) GetApplicationSyncWindows(ctx context.Context, q *application.ApplicationSyncWindowsQuery) (*application.ApplicationSyncWindowsResponse, error) {
  2390  	a, proj, err := s.getApplicationEnforceRBACClient(ctx, rbacpolicy.ActionGet, q.GetProject(), q.GetAppNamespace(), q.GetName(), "")
  2391  	if err != nil {
  2392  		return nil, err
  2393  	}
  2394  
  2395  	windows := proj.Spec.SyncWindows.Matches(a)
  2396  	sync := windows.CanSync(true)
  2397  
  2398  	res := &application.ApplicationSyncWindowsResponse{
  2399  		ActiveWindows:   convertSyncWindows(windows.Active()),
  2400  		AssignedWindows: convertSyncWindows(windows),
  2401  		CanSync:         &sync,
  2402  	}
  2403  
  2404  	return res, nil
  2405  }
  2406  
  2407  func (s *Server) inferResourcesStatusHealth(app *appv1.Application) {
  2408  	if app.Status.ResourceHealthSource == appv1.ResourceHealthLocationAppTree {
  2409  		tree := &appv1.ApplicationTree{}
  2410  		if err := s.cache.GetAppResourcesTree(app.Name, tree); err == nil {
  2411  			healthByKey := map[kube.ResourceKey]*appv1.HealthStatus{}
  2412  			for _, node := range tree.Nodes {
  2413  				healthByKey[kube.NewResourceKey(node.Group, node.Kind, node.Namespace, node.Name)] = node.Health
  2414  			}
  2415  			for i, res := range app.Status.Resources {
  2416  				res.Health = healthByKey[kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name)]
  2417  				app.Status.Resources[i] = res
  2418  			}
  2419  		}
  2420  	}
  2421  }
  2422  
  2423  func convertSyncWindows(w *appv1.SyncWindows) []*application.ApplicationSyncWindow {
  2424  	if w != nil {
  2425  		var windows []*application.ApplicationSyncWindow
  2426  		for _, w := range *w {
  2427  			nw := &application.ApplicationSyncWindow{
  2428  				Kind:       &w.Kind,
  2429  				Schedule:   &w.Schedule,
  2430  				Duration:   &w.Duration,
  2431  				ManualSync: &w.ManualSync,
  2432  			}
  2433  			windows = append(windows, nw)
  2434  		}
  2435  		if len(windows) > 0 {
  2436  			return windows
  2437  		}
  2438  	}
  2439  	return nil
  2440  }
  2441  
  2442  func getPropagationPolicyFinalizer(policy string) string {
  2443  	switch strings.ToLower(policy) {
  2444  	case backgroundPropagationPolicy:
  2445  		return appv1.BackgroundPropagationPolicyFinalizer
  2446  	case foregroundPropagationPolicy:
  2447  		return appv1.ForegroundPropagationPolicyFinalizer
  2448  	case "":
  2449  		return appv1.ResourcesFinalizerName
  2450  	default:
  2451  		return ""
  2452  	}
  2453  }
  2454  
  2455  func (s *Server) appNamespaceOrDefault(appNs string) string {
  2456  	if appNs == "" {
  2457  		return s.ns
  2458  	} else {
  2459  		return appNs
  2460  	}
  2461  }
  2462  
  2463  func (s *Server) isNamespaceEnabled(namespace string) bool {
  2464  	return security.IsNamespaceEnabled(namespace, s.ns, s.enabledNamespaces)
  2465  }
  2466  
  2467  // getProjectFromApplicationQuery gets the project names from a query. If the legacy "project" field was specified, use
  2468  // that. Otherwise, use the newer "projects" field.
  2469  func getProjectsFromApplicationQuery(q application.ApplicationQuery) []string {
  2470  	if q.Project != nil {
  2471  		return q.Project
  2472  	}
  2473  	return q.Projects
  2474  }