github.com/argoproj/argo-cd@v1.8.7/server/application/application.go (about) 1 package application 2 3 import ( 4 "bufio" 5 "encoding/json" 6 "errors" 7 "fmt" 8 goio "io" 9 "math" 10 "reflect" 11 "sort" 12 "strconv" 13 "strings" 14 "time" 15 16 "github.com/Masterminds/semver" 17 "github.com/argoproj/gitops-engine/pkg/diff" 18 "github.com/argoproj/gitops-engine/pkg/sync/common" 19 "github.com/argoproj/gitops-engine/pkg/utils/kube" 20 "github.com/argoproj/gitops-engine/pkg/utils/text" 21 "github.com/argoproj/pkg/sync" 22 jsonpatch "github.com/evanphx/json-patch" 23 log "github.com/sirupsen/logrus" 24 "golang.org/x/net/context" 25 "google.golang.org/grpc/codes" 26 "google.golang.org/grpc/status" 27 v1 "k8s.io/api/core/v1" 28 apierr "k8s.io/apimachinery/pkg/api/errors" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 31 "k8s.io/apimachinery/pkg/fields" 32 "k8s.io/apimachinery/pkg/labels" 33 "k8s.io/apimachinery/pkg/types" 34 "k8s.io/apimachinery/pkg/watch" 35 "k8s.io/client-go/kubernetes" 36 "k8s.io/client-go/rest" 37 "k8s.io/client-go/tools/cache" 38 "k8s.io/utils/pointer" 39 40 argocommon "github.com/argoproj/argo-cd/common" 41 "github.com/argoproj/argo-cd/pkg/apiclient/application" 42 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1" 43 appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1" 44 appclientset "github.com/argoproj/argo-cd/pkg/client/clientset/versioned" 45 applisters "github.com/argoproj/argo-cd/pkg/client/listers/application/v1alpha1" 46 "github.com/argoproj/argo-cd/reposerver/apiclient" 47 servercache "github.com/argoproj/argo-cd/server/cache" 48 "github.com/argoproj/argo-cd/server/rbacpolicy" 49 "github.com/argoproj/argo-cd/util/argo" 50 argoutil "github.com/argoproj/argo-cd/util/argo" 51 "github.com/argoproj/argo-cd/util/db" 52 "github.com/argoproj/argo-cd/util/env" 53 "github.com/argoproj/argo-cd/util/git" 54 "github.com/argoproj/argo-cd/util/helm" 55 "github.com/argoproj/argo-cd/util/io" 56 "github.com/argoproj/argo-cd/util/lua" 57 "github.com/argoproj/argo-cd/util/rbac" 58 "github.com/argoproj/argo-cd/util/session" 59 "github.com/argoproj/argo-cd/util/settings" 60 ) 61 62 var ( 63 watchAPIBufferSize = env.ParseNumFromEnv(argocommon.EnvWatchAPIBufferSize, 1000, 0, math.MaxInt32) 64 ) 65 66 // Server provides a Application service 67 type Server struct { 68 ns string 69 kubeclientset kubernetes.Interface 70 appclientset appclientset.Interface 71 appLister applisters.ApplicationNamespaceLister 72 appInformer cache.SharedIndexInformer 73 appBroadcaster *broadcasterHandler 74 repoClientset apiclient.Clientset 75 kubectl kube.Kubectl 76 db db.ArgoDB 77 enf *rbac.Enforcer 78 projectLock sync.KeyLock 79 auditLogger *argo.AuditLogger 80 settingsMgr *settings.SettingsManager 81 cache *servercache.Cache 82 projInformer cache.SharedIndexInformer 83 } 84 85 // NewServer returns a new instance of the Application service 86 func NewServer( 87 namespace string, 88 kubeclientset kubernetes.Interface, 89 appclientset appclientset.Interface, 90 appLister applisters.ApplicationNamespaceLister, 91 appInformer cache.SharedIndexInformer, 92 repoClientset apiclient.Clientset, 93 cache *servercache.Cache, 94 kubectl kube.Kubectl, 95 db db.ArgoDB, 96 enf *rbac.Enforcer, 97 projectLock sync.KeyLock, 98 settingsMgr *settings.SettingsManager, 99 projInformer cache.SharedIndexInformer, 100 ) application.ApplicationServiceServer { 101 appBroadcaster := &broadcasterHandler{} 102 appInformer.AddEventHandler(appBroadcaster) 103 return &Server{ 104 ns: namespace, 105 appclientset: appclientset, 106 appLister: appLister, 107 appInformer: appInformer, 108 appBroadcaster: appBroadcaster, 109 kubeclientset: kubeclientset, 110 cache: cache, 111 db: db, 112 repoClientset: repoClientset, 113 kubectl: kubectl, 114 enf: enf, 115 projectLock: projectLock, 116 auditLogger: argo.NewAuditLogger(namespace, kubeclientset, "argocd-server"), 117 settingsMgr: settingsMgr, 118 projInformer: projInformer, 119 } 120 } 121 122 // appRBACName formats fully qualified application name for RBAC check 123 func appRBACName(app appv1.Application) string { 124 return fmt.Sprintf("%s/%s", app.Spec.GetProject(), app.Name) 125 } 126 127 // List returns list of applications 128 func (s *Server) List(ctx context.Context, q *application.ApplicationQuery) (*appv1.ApplicationList, error) { 129 labelsMap, err := labels.ConvertSelectorToLabelsMap(q.Selector) 130 if err != nil { 131 return nil, err 132 } 133 apps, err := s.appLister.List(labelsMap.AsSelector()) 134 if err != nil { 135 return nil, err 136 } 137 newItems := make([]appv1.Application, 0) 138 for _, a := range apps { 139 if s.enf.Enforce(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)) { 140 newItems = append(newItems, *a) 141 } 142 } 143 newItems = argoutil.FilterByProjects(newItems, q.Projects) 144 sort.Slice(newItems, func(i, j int) bool { 145 return newItems[i].Name < newItems[j].Name 146 }) 147 appList := appv1.ApplicationList{ 148 ListMeta: metav1.ListMeta{ 149 ResourceVersion: s.appInformer.LastSyncResourceVersion(), 150 }, 151 Items: newItems, 152 } 153 return &appList, nil 154 } 155 156 // Create creates an application 157 func (s *Server) Create(ctx context.Context, q *application.ApplicationCreateRequest) (*appv1.Application, error) { 158 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionCreate, appRBACName(q.Application)); err != nil { 159 return nil, err 160 } 161 162 s.projectLock.Lock(q.Application.Spec.Project) 163 defer s.projectLock.Unlock(q.Application.Spec.Project) 164 165 a := q.Application 166 validate := true 167 if q.Validate != nil { 168 validate = *q.Validate 169 } 170 err := s.validateAndNormalizeApp(ctx, &a, validate) 171 if err != nil { 172 return nil, err 173 } 174 created, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Create(ctx, &a, metav1.CreateOptions{}) 175 if err == nil { 176 s.logAppEvent(created, ctx, argo.EventReasonResourceCreated, "created application") 177 s.waitSync(created) 178 return created, nil 179 } 180 if !apierr.IsAlreadyExists(err) { 181 return nil, err 182 } 183 // act idempotent if existing spec matches new spec 184 existing, err := s.appLister.Get(a.Name) 185 if err != nil { 186 return nil, status.Errorf(codes.Internal, "unable to check existing application details: %v", err) 187 } 188 equalSpecs := reflect.DeepEqual(existing.Spec, a.Spec) && 189 reflect.DeepEqual(existing.Labels, a.Labels) && 190 reflect.DeepEqual(existing.Annotations, a.Annotations) && 191 reflect.DeepEqual(existing.Finalizers, a.Finalizers) 192 193 if equalSpecs { 194 return existing, nil 195 } 196 if q.Upsert == nil || !*q.Upsert { 197 return nil, status.Errorf(codes.InvalidArgument, "existing application spec is different, use upsert flag to force update") 198 } 199 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(a)); err != nil { 200 return nil, err 201 } 202 updated, err := s.updateApp(existing, &a, ctx, true) 203 if err != nil { 204 return nil, err 205 } 206 return updated, nil 207 } 208 209 // GetManifests returns application manifests 210 func (s *Server) GetManifests(ctx context.Context, q *application.ApplicationManifestQuery) (*apiclient.ManifestResponse, error) { 211 a, err := s.appLister.Get(*q.Name) 212 if err != nil { 213 return nil, err 214 } 215 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 216 return nil, err 217 } 218 repo, err := s.db.GetRepository(ctx, a.Spec.Source.RepoURL) 219 if err != nil { 220 return nil, err 221 } 222 conn, repoClient, err := s.repoClientset.NewRepoServerClient() 223 if err != nil { 224 return nil, err 225 } 226 defer io.Close(conn) 227 revision := a.Spec.Source.TargetRevision 228 if q.Revision != "" { 229 revision = q.Revision 230 } 231 appInstanceLabelKey, err := s.settingsMgr.GetAppInstanceLabelKey() 232 if err != nil { 233 return nil, err 234 } 235 helmRepos, err := s.db.ListHelmRepositories(ctx) 236 if err != nil { 237 return nil, err 238 } 239 240 plugins, err := s.plugins() 241 if err != nil { 242 return nil, err 243 } 244 // If source is Kustomize add build options 245 kustomizeSettings, err := s.settingsMgr.GetKustomizeSettings() 246 if err != nil { 247 return nil, err 248 } 249 kustomizeOptions, err := kustomizeSettings.GetOptions(a.Spec.Source) 250 if err != nil { 251 return nil, err 252 } 253 config, err := s.getApplicationClusterConfig(ctx, a) 254 if err != nil { 255 return nil, err 256 } 257 258 serverVersion, err := s.kubectl.GetServerVersion(config) 259 if err != nil { 260 return nil, err 261 } 262 263 apiGroups, err := s.kubectl.GetAPIGroups(config) 264 if err != nil { 265 return nil, err 266 } 267 manifestInfo, err := repoClient.GenerateManifest(ctx, &apiclient.ManifestRequest{ 268 Repo: repo, 269 Revision: revision, 270 AppLabelKey: appInstanceLabelKey, 271 AppLabelValue: a.Name, 272 Namespace: a.Spec.Destination.Namespace, 273 ApplicationSource: &a.Spec.Source, 274 Repos: helmRepos, 275 Plugins: plugins, 276 KustomizeOptions: kustomizeOptions, 277 KubeVersion: serverVersion, 278 ApiVersions: argo.APIGroupsToVersions(apiGroups), 279 }) 280 if err != nil { 281 return nil, err 282 } 283 for i, manifest := range manifestInfo.Manifests { 284 obj := &unstructured.Unstructured{} 285 err = json.Unmarshal([]byte(manifest), obj) 286 if err != nil { 287 return nil, err 288 } 289 if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" { 290 obj, _, err = diff.HideSecretData(obj, nil) 291 if err != nil { 292 return nil, err 293 } 294 data, err := json.Marshal(obj) 295 if err != nil { 296 return nil, err 297 } 298 manifestInfo.Manifests[i] = string(data) 299 } 300 } 301 302 return manifestInfo, nil 303 } 304 305 // Get returns an application by name 306 func (s *Server) Get(ctx context.Context, q *application.ApplicationQuery) (*appv1.Application, error) { 307 // We must use a client Get instead of an informer Get, because it's common to call Get immediately 308 // following a Watch (which is not yet powered by an informer), and the Get must reflect what was 309 // previously seen by the client. 310 a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, q.GetName(), metav1.GetOptions{ 311 ResourceVersion: q.ResourceVersion, 312 }) 313 314 if err != nil { 315 return nil, err 316 } 317 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 318 return nil, err 319 } 320 if q.Refresh == nil { 321 return a, nil 322 } 323 324 refreshType := appv1.RefreshTypeNormal 325 if *q.Refresh == string(appv1.RefreshTypeHard) { 326 refreshType = appv1.RefreshTypeHard 327 } 328 appIf := s.appclientset.ArgoprojV1alpha1().Applications(s.ns) 329 330 // subscribe early with buffered channel to ensure we don't miss events 331 events := make(chan *appv1.ApplicationWatchEvent, watchAPIBufferSize) 332 unsubscribe := s.appBroadcaster.Subscribe(events, func(event *appv1.ApplicationWatchEvent) bool { 333 return event.Application.Name == q.GetName() 334 }) 335 defer unsubscribe() 336 337 app, err := argoutil.RefreshApp(appIf, *q.Name, refreshType) 338 if err != nil { 339 return nil, err 340 } 341 342 minVersion := 0 343 if minVersion, err = strconv.Atoi(app.ResourceVersion); err != nil { 344 minVersion = 0 345 } 346 347 for { 348 select { 349 case <-ctx.Done(): 350 return nil, fmt.Errorf("application refresh deadline exceeded") 351 case event := <-events: 352 if appVersion, err := strconv.Atoi(event.Application.ResourceVersion); err == nil && appVersion > minVersion { 353 annotations := event.Application.GetAnnotations() 354 if annotations == nil { 355 annotations = make(map[string]string) 356 } 357 if _, ok := annotations[argocommon.AnnotationKeyRefresh]; !ok { 358 return &event.Application, nil 359 } 360 } 361 } 362 } 363 } 364 365 // ListResourceEvents returns a list of event resources 366 func (s *Server) ListResourceEvents(ctx context.Context, q *application.ApplicationResourceEventsQuery) (*v1.EventList, error) { 367 a, err := s.appLister.Get(*q.Name) 368 if err != nil { 369 return nil, err 370 } 371 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 372 return nil, err 373 } 374 var ( 375 kubeClientset kubernetes.Interface 376 fieldSelector string 377 namespace string 378 ) 379 // There are two places where we get events. If we are getting application events, we query 380 // our own cluster. If it is events on a resource on an external cluster, then we query the 381 // external cluster using its rest.Config 382 if q.ResourceName == "" && q.ResourceUID == "" { 383 kubeClientset = s.kubeclientset 384 namespace = a.Namespace 385 fieldSelector = fields.SelectorFromSet(map[string]string{ 386 "involvedObject.name": a.Name, 387 "involvedObject.uid": string(a.UID), 388 "involvedObject.namespace": a.Namespace, 389 }).String() 390 } else { 391 namespace = q.ResourceNamespace 392 var config *rest.Config 393 config, err = s.getApplicationClusterConfig(ctx, a) 394 if err != nil { 395 return nil, err 396 } 397 kubeClientset, err = kubernetes.NewForConfig(config) 398 if err != nil { 399 return nil, err 400 } 401 fieldSelector = fields.SelectorFromSet(map[string]string{ 402 "involvedObject.name": q.ResourceName, 403 "involvedObject.uid": q.ResourceUID, 404 "involvedObject.namespace": namespace, 405 }).String() 406 } 407 408 log.Infof("Querying for resource events with field selector: %s", fieldSelector) 409 opts := metav1.ListOptions{FieldSelector: fieldSelector} 410 return kubeClientset.CoreV1().Events(namespace).List(ctx, opts) 411 } 412 413 func (s *Server) validateAndUpdateApp(ctx context.Context, newApp *appv1.Application, merge bool, validate bool) (*appv1.Application, error) { 414 s.projectLock.Lock(newApp.Spec.GetProject()) 415 defer s.projectLock.Unlock(newApp.Spec.GetProject()) 416 417 app, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, newApp.Name, metav1.GetOptions{}) 418 if err != nil { 419 return nil, err 420 } 421 422 err = s.validateAndNormalizeApp(ctx, newApp, validate) 423 if err != nil { 424 return nil, err 425 } 426 427 return s.updateApp(app, newApp, ctx, merge) 428 } 429 430 func mergeStringMaps(items ...map[string]string) map[string]string { 431 res := make(map[string]string) 432 for _, m := range items { 433 if m == nil { 434 continue 435 } 436 for k, v := range m { 437 res[k] = v 438 } 439 } 440 return res 441 } 442 443 var informerSyncTimeout = 2 * time.Second 444 445 // waitSync is a helper to wait until the application informer cache is synced after create/update. 446 // It waits until the app in the informer, has a resource version greater than the version in the 447 // supplied app, or after 2 seconds, whichever comes first. Returns true if synced. 448 // We use an informer cache for read operations (Get, List). Since the cache is only 449 // eventually consistent, it is possible that it doesn't reflect an application change immediately 450 // after a mutating API call (create/update). This function should be called after a creates & 451 // update to give a probable (but not guaranteed) chance of being up-to-date after the create/update. 452 func (s *Server) waitSync(app *appv1.Application) { 453 logCtx := log.WithField("application", app.Name) 454 deadline := time.Now().Add(informerSyncTimeout) 455 minVersion, err := strconv.Atoi(app.ResourceVersion) 456 if err != nil { 457 logCtx.Warnf("waitSync failed: could not parse resource version %s", app.ResourceVersion) 458 time.Sleep(50 * time.Millisecond) // sleep anyways 459 return 460 } 461 for { 462 if currApp, err := s.appLister.Get(app.Name); err == nil { 463 currVersion, err := strconv.Atoi(currApp.ResourceVersion) 464 if err == nil && currVersion >= minVersion { 465 return 466 } 467 } 468 if time.Now().After(deadline) { 469 break 470 } 471 time.Sleep(20 * time.Millisecond) 472 } 473 logCtx.Warnf("waitSync failed: timed out") 474 } 475 476 func (s *Server) updateApp(app *appv1.Application, newApp *appv1.Application, ctx context.Context, merge bool) (*appv1.Application, error) { 477 for i := 0; i < 10; i++ { 478 app.Spec = newApp.Spec 479 if merge { 480 app.Labels = mergeStringMaps(app.Labels, newApp.Labels) 481 app.Annotations = mergeStringMaps(app.Annotations, newApp.Annotations) 482 } else { 483 app.Labels = newApp.Labels 484 app.Annotations = newApp.Annotations 485 } 486 487 app.Finalizers = newApp.Finalizers 488 489 res, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(ctx, app, metav1.UpdateOptions{}) 490 if err == nil { 491 s.logAppEvent(app, ctx, argo.EventReasonResourceUpdated, "updated application spec") 492 s.waitSync(res) 493 return res, nil 494 } 495 if !apierr.IsConflict(err) { 496 return nil, err 497 } 498 499 app, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, newApp.Name, metav1.GetOptions{}) 500 if err != nil { 501 return nil, err 502 } 503 } 504 return nil, status.Errorf(codes.Internal, "Failed to update application. Too many conflicts") 505 } 506 507 // Update updates an application 508 func (s *Server) Update(ctx context.Context, q *application.ApplicationUpdateRequest) (*appv1.Application, error) { 509 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(*q.Application)); err != nil { 510 return nil, err 511 } 512 513 validate := true 514 if q.Validate != nil { 515 validate = *q.Validate 516 } 517 return s.validateAndUpdateApp(ctx, q.Application, false, validate) 518 } 519 520 // UpdateSpec updates an application spec and filters out any invalid parameter overrides 521 func (s *Server) UpdateSpec(ctx context.Context, q *application.ApplicationUpdateSpecRequest) (*appv1.ApplicationSpec, error) { 522 a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, *q.Name, metav1.GetOptions{}) 523 if err != nil { 524 return nil, err 525 } 526 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(*a)); err != nil { 527 return nil, err 528 } 529 a.Spec = q.Spec 530 validate := true 531 if q.Validate != nil { 532 validate = *q.Validate 533 } 534 a, err = s.validateAndUpdateApp(ctx, a, false, validate) 535 if err != nil { 536 return nil, err 537 } 538 return &a.Spec, nil 539 } 540 541 // Patch patches an application 542 func (s *Server) Patch(ctx context.Context, q *application.ApplicationPatchRequest) (*appv1.Application, error) { 543 544 app, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, *q.Name, metav1.GetOptions{}) 545 if err != nil { 546 return nil, err 547 } 548 549 if err = s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(*app)); err != nil { 550 return nil, err 551 } 552 553 jsonApp, err := json.Marshal(app) 554 if err != nil { 555 return nil, err 556 } 557 558 var patchApp []byte 559 560 switch q.PatchType { 561 case "json", "": 562 patch, err := jsonpatch.DecodePatch([]byte(q.Patch)) 563 if err != nil { 564 return nil, err 565 } 566 patchApp, err = patch.Apply(jsonApp) 567 if err != nil { 568 return nil, err 569 } 570 case "merge": 571 patchApp, err = jsonpatch.MergePatch(jsonApp, []byte(q.Patch)) 572 if err != nil { 573 return nil, err 574 } 575 default: 576 return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Patch type '%s' is not supported", q.PatchType)) 577 } 578 579 newApp := &v1alpha1.Application{} 580 err = json.Unmarshal(patchApp, newApp) 581 if err != nil { 582 return nil, err 583 } 584 return s.validateAndUpdateApp(ctx, newApp, false, true) 585 } 586 587 // Delete removes an application and all associated resources 588 func (s *Server) Delete(ctx context.Context, q *application.ApplicationDeleteRequest) (*application.ApplicationResponse, error) { 589 a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, *q.Name, metav1.GetOptions{}) 590 if err != nil { 591 return nil, err 592 } 593 594 s.projectLock.Lock(a.Spec.Project) 595 defer s.projectLock.Unlock(a.Spec.Project) 596 597 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionDelete, appRBACName(*a)); err != nil { 598 return nil, err 599 } 600 601 patchFinalizer := false 602 if q.Cascade == nil || *q.Cascade { 603 if !a.CascadedDeletion() { 604 a.SetCascadedDeletion(true) 605 patchFinalizer = true 606 } 607 } else { 608 if a.CascadedDeletion() { 609 a.SetCascadedDeletion(false) 610 patchFinalizer = true 611 } 612 } 613 614 if patchFinalizer { 615 // Although the cascaded deletion finalizer is not set when apps are created via API, 616 // they will often be set by the user as part of declarative config. As part of a delete 617 // request, we always calculate the patch to see if we need to set/unset the finalizer. 618 patch, err := json.Marshal(map[string]interface{}{ 619 "metadata": map[string]interface{}{ 620 "finalizers": a.Finalizers, 621 }, 622 }) 623 if err != nil { 624 return nil, err 625 } 626 _, err = s.appclientset.ArgoprojV1alpha1().Applications(a.Namespace).Patch(ctx, a.Name, types.MergePatchType, patch, metav1.PatchOptions{}) 627 if err != nil { 628 return nil, err 629 } 630 } 631 632 err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Delete(ctx, *q.Name, metav1.DeleteOptions{}) 633 if err != nil { 634 return nil, err 635 } 636 s.logAppEvent(a, ctx, argo.EventReasonResourceDeleted, "deleted application") 637 return &application.ApplicationResponse{}, nil 638 } 639 640 func (s *Server) Watch(q *application.ApplicationQuery, ws application.ApplicationService_WatchServer) error { 641 logCtx := log.NewEntry(log.New()) 642 if q.Name != nil { 643 logCtx = logCtx.WithField("application", *q.Name) 644 } 645 claims := ws.Context().Value("claims") 646 selector, err := labels.Parse(q.Selector) 647 if err != nil { 648 return err 649 } 650 minVersion := 0 651 if q.ResourceVersion != "" { 652 if minVersion, err = strconv.Atoi(q.ResourceVersion); err != nil { 653 minVersion = 0 654 } 655 } 656 657 // sendIfPermitted is a helper to send the application to the client's streaming channel if the 658 // caller has RBAC privileges permissions to view it 659 sendIfPermitted := func(a appv1.Application, eventType watch.EventType) { 660 if appVersion, err := strconv.Atoi(a.ResourceVersion); err == nil && appVersion < minVersion { 661 return 662 } 663 matchedEvent := q.GetName() == "" || a.Name == q.GetName() && selector.Matches(labels.Set(a.Labels)) 664 if !matchedEvent { 665 return 666 } 667 668 if !s.enf.Enforce(claims, rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(a)) { 669 // do not emit apps user does not have accessing 670 return 671 } 672 err := ws.Send(&appv1.ApplicationWatchEvent{ 673 Type: eventType, 674 Application: a, 675 }) 676 if err != nil { 677 logCtx.Warnf("Unable to send stream message: %v", err) 678 return 679 } 680 } 681 682 events := make(chan *appv1.ApplicationWatchEvent, watchAPIBufferSize) 683 // Mimic watch API behavior: send ADDED events if no resource version provided 684 // If watch API is executed for one application when emit event even if resource version is provided 685 // This is required since single app watch API is used for during operations like app syncing and it is 686 // critical to never miss events. 687 if q.ResourceVersion == "" || q.GetName() != "" { 688 apps, err := s.appLister.List(selector) 689 if err != nil { 690 return err 691 } 692 for i := range apps { 693 sendIfPermitted(*apps[i], watch.Added) 694 } 695 } 696 unsubscribe := s.appBroadcaster.Subscribe(events) 697 defer unsubscribe() 698 for { 699 select { 700 case event := <-events: 701 sendIfPermitted(event.Application, event.Type) 702 case <-ws.Context().Done(): 703 return nil 704 } 705 } 706 } 707 708 func (s *Server) validateAndNormalizeApp(ctx context.Context, app *appv1.Application, validate bool) error { 709 proj, err := s.appclientset.ArgoprojV1alpha1().AppProjects(s.ns).Get(ctx, app.Spec.GetProject(), metav1.GetOptions{}) 710 if err != nil { 711 if apierr.IsNotFound(err) { 712 return status.Errorf(codes.InvalidArgument, "application references project %s which does not exist", app.Spec.Project) 713 } 714 return err 715 } 716 currApp, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, app.Name, metav1.GetOptions{}) 717 if err != nil { 718 if !apierr.IsNotFound(err) { 719 return err 720 } 721 // Kubernetes go-client will return a pointer to a zero-value app instead of nil, even 722 // though the API response was NotFound. This behavior was confirmed via logs. 723 currApp = nil 724 } 725 if currApp != nil && currApp.Spec.GetProject() != app.Spec.GetProject() { 726 // When changing projects, caller must have application create & update privileges in new project 727 // NOTE: the update check was already verified in the caller to this function 728 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionCreate, appRBACName(*app)); err != nil { 729 return err 730 } 731 // They also need 'update' privileges in the old project 732 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(*currApp)); err != nil { 733 return err 734 } 735 } 736 737 // If source is Kustomize add build options 738 kustomizeSettings, err := s.settingsMgr.GetKustomizeSettings() 739 if err != nil { 740 return err 741 } 742 kustomizeOptions, err := kustomizeSettings.GetOptions(app.Spec.Source) 743 if err != nil { 744 return err 745 } 746 plugins, err := s.plugins() 747 if err != nil { 748 return err 749 } 750 751 if err := argo.ValidateDestination(ctx, &app.Spec.Destination, s.db); err != nil { 752 return status.Errorf(codes.InvalidArgument, "application destination spec is invalid: %s", err.Error()) 753 } 754 755 var conditions []appv1.ApplicationCondition 756 if validate { 757 conditions, err = argo.ValidateRepo(ctx, app, s.repoClientset, s.db, kustomizeOptions, plugins, s.kubectl) 758 if err != nil { 759 return err 760 } 761 if len(conditions) > 0 { 762 return status.Errorf(codes.InvalidArgument, "application spec is invalid: %s", argo.FormatAppConditions(conditions)) 763 } 764 } 765 766 conditions, err = argo.ValidatePermissions(ctx, &app.Spec, proj, s.db) 767 if err != nil { 768 return err 769 } 770 if len(conditions) > 0 { 771 return status.Errorf(codes.InvalidArgument, "application spec is invalid: %s", argo.FormatAppConditions(conditions)) 772 } 773 774 app.Spec = *argo.NormalizeApplicationSpec(&app.Spec) 775 return nil 776 } 777 778 func (s *Server) getApplicationClusterConfig(ctx context.Context, a *appv1.Application) (*rest.Config, error) { 779 if err := argo.ValidateDestination(ctx, &a.Spec.Destination, s.db); err != nil { 780 return nil, err 781 } 782 clst, err := s.db.GetCluster(ctx, a.Spec.Destination.Server) 783 if err != nil { 784 return nil, err 785 } 786 config := clst.RESTConfig() 787 return config, err 788 } 789 790 // getCachedAppState loads the cached state and trigger app refresh if cache is missing 791 func (s *Server) getCachedAppState(ctx context.Context, a *appv1.Application, getFromCache func() error) error { 792 err := getFromCache() 793 if err != nil && err == servercache.ErrCacheMiss { 794 conditions := a.Status.GetConditions(map[appv1.ApplicationConditionType]bool{ 795 appv1.ApplicationConditionComparisonError: true, 796 appv1.ApplicationConditionInvalidSpecError: true, 797 }) 798 if len(conditions) > 0 { 799 return errors.New(argoutil.FormatAppConditions(conditions)) 800 } 801 _, err = s.Get(ctx, &application.ApplicationQuery{ 802 Name: pointer.StringPtr(a.Name), 803 Refresh: pointer.StringPtr(string(appv1.RefreshTypeNormal)), 804 }) 805 if err != nil { 806 return err 807 } 808 return getFromCache() 809 } 810 return err 811 } 812 813 func (s *Server) getAppResources(ctx context.Context, a *appv1.Application) (*appv1.ApplicationTree, error) { 814 var tree appv1.ApplicationTree 815 err := s.getCachedAppState(ctx, a, func() error { 816 return s.cache.GetAppResourcesTree(a.Name, &tree) 817 }) 818 return &tree, err 819 } 820 821 func (s *Server) getAppResource(ctx context.Context, action string, q *application.ApplicationResourceRequest) (*appv1.ResourceNode, *rest.Config, *appv1.Application, error) { 822 a, err := s.appLister.Get(*q.Name) 823 if err != nil { 824 return nil, nil, nil, err 825 } 826 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, action, appRBACName(*a)); err != nil { 827 return nil, nil, nil, err 828 } 829 830 tree, err := s.getAppResources(ctx, a) 831 if err != nil { 832 return nil, nil, nil, err 833 } 834 835 found := tree.FindNode(q.Group, q.Kind, q.Namespace, q.ResourceName) 836 if found == nil { 837 return nil, nil, nil, status.Errorf(codes.InvalidArgument, "%s %s %s not found as part of application %s", q.Kind, q.Group, q.ResourceName, *q.Name) 838 } 839 config, err := s.getApplicationClusterConfig(ctx, a) 840 if err != nil { 841 return nil, nil, nil, err 842 } 843 return found, config, a, nil 844 } 845 846 func (s *Server) GetResource(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ApplicationResourceResponse, error) { 847 res, config, _, err := s.getAppResource(ctx, rbacpolicy.ActionGet, q) 848 if err != nil { 849 return nil, err 850 } 851 852 // make sure to use specified resource version if provided 853 if q.Version != "" { 854 res.Version = q.Version 855 } 856 obj, err := s.kubectl.GetResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace) 857 if err != nil { 858 return nil, err 859 } 860 obj, err = replaceSecretValues(obj) 861 if err != nil { 862 return nil, err 863 } 864 data, err := json.Marshal(obj.Object) 865 if err != nil { 866 return nil, err 867 } 868 return &application.ApplicationResourceResponse{Manifest: string(data)}, nil 869 } 870 871 func replaceSecretValues(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { 872 if obj.GetKind() == kube.SecretKind && obj.GroupVersionKind().Group == "" { 873 _, obj, err := diff.HideSecretData(nil, obj) 874 if err != nil { 875 return nil, err 876 } 877 return obj, err 878 } 879 return obj, nil 880 } 881 882 // PatchResource patches a resource 883 func (s *Server) PatchResource(ctx context.Context, q *application.ApplicationResourcePatchRequest) (*application.ApplicationResourceResponse, error) { 884 resourceRequest := &application.ApplicationResourceRequest{ 885 Name: q.Name, 886 Namespace: q.Namespace, 887 ResourceName: q.ResourceName, 888 Kind: q.Kind, 889 Version: q.Version, 890 Group: q.Group, 891 } 892 res, config, a, err := s.getAppResource(ctx, rbacpolicy.ActionUpdate, resourceRequest) 893 if err != nil { 894 return nil, err 895 } 896 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionUpdate, appRBACName(*a)); err != nil { 897 return nil, err 898 } 899 900 manifest, err := s.kubectl.PatchResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace, types.PatchType(q.PatchType), []byte(q.Patch)) 901 if err != nil { 902 // don't expose real error for secrets since it might contain secret data 903 if res.Kind == kube.SecretKind && res.Group == "" { 904 return nil, fmt.Errorf("failed to patch Secret %s/%s", res.Namespace, res.Name) 905 } 906 return nil, err 907 } 908 manifest, err = replaceSecretValues(manifest) 909 if err != nil { 910 return nil, err 911 } 912 data, err := json.Marshal(manifest.Object) 913 if err != nil { 914 return nil, err 915 } 916 s.logAppEvent(a, ctx, argo.EventReasonResourceUpdated, fmt.Sprintf("patched resource %s/%s '%s'", q.Group, q.Kind, q.ResourceName)) 917 return &application.ApplicationResourceResponse{ 918 Manifest: string(data), 919 }, nil 920 } 921 922 // DeleteResource deletes a specified resource 923 func (s *Server) DeleteResource(ctx context.Context, q *application.ApplicationResourceDeleteRequest) (*application.ApplicationResponse, error) { 924 resourceRequest := &application.ApplicationResourceRequest{ 925 Name: q.Name, 926 Namespace: q.Namespace, 927 ResourceName: q.ResourceName, 928 Kind: q.Kind, 929 Version: q.Version, 930 Group: q.Group, 931 } 932 res, config, a, err := s.getAppResource(ctx, rbacpolicy.ActionDelete, resourceRequest) 933 if err != nil { 934 return nil, err 935 } 936 937 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionDelete, appRBACName(*a)); err != nil { 938 return nil, err 939 } 940 var force bool 941 if q.Force != nil { 942 force = *q.Force 943 } 944 err = s.kubectl.DeleteResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace, force) 945 if err != nil { 946 return nil, err 947 } 948 s.logAppEvent(a, ctx, argo.EventReasonResourceDeleted, fmt.Sprintf("deleted resource %s/%s '%s'", q.Group, q.Kind, q.ResourceName)) 949 return &application.ApplicationResponse{}, nil 950 } 951 952 func (s *Server) ResourceTree(ctx context.Context, q *application.ResourcesQuery) (*appv1.ApplicationTree, error) { 953 a, err := s.appLister.Get(q.GetApplicationName()) 954 if err != nil { 955 return nil, err 956 } 957 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 958 return nil, err 959 } 960 return s.getAppResources(ctx, a) 961 } 962 963 func (s *Server) WatchResourceTree(q *application.ResourcesQuery, ws application.ApplicationService_WatchResourceTreeServer) error { 964 a, err := s.appLister.Get(q.GetApplicationName()) 965 if err != nil { 966 return err 967 } 968 969 if err := s.enf.EnforceErr(ws.Context().Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 970 return err 971 } 972 973 return s.cache.OnAppResourcesTreeChanged(ws.Context(), q.GetApplicationName(), func() error { 974 var tree appv1.ApplicationTree 975 err := s.cache.GetAppResourcesTree(q.GetApplicationName(), &tree) 976 if err != nil { 977 return err 978 } 979 return ws.Send(&tree) 980 }) 981 } 982 983 func (s *Server) RevisionMetadata(ctx context.Context, q *application.RevisionMetadataQuery) (*v1alpha1.RevisionMetadata, error) { 984 a, err := s.appLister.Get(q.GetName()) 985 if err != nil { 986 return nil, err 987 } 988 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 989 return nil, err 990 } 991 repo, err := s.db.GetRepository(ctx, a.Spec.Source.RepoURL) 992 if err != nil { 993 return nil, err 994 } 995 // We need to get some information with the project associated to the app, 996 // so we'll know whether GPG signatures are enforced. 997 proj, err := argo.GetAppProject(&a.Spec, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), a.Namespace, s.settingsMgr) 998 if err != nil { 999 return nil, err 1000 } 1001 conn, repoClient, err := s.repoClientset.NewRepoServerClient() 1002 if err != nil { 1003 return nil, err 1004 } 1005 defer io.Close(conn) 1006 return repoClient.GetRevisionMetadata(ctx, &apiclient.RepoServerRevisionMetadataRequest{ 1007 Repo: repo, 1008 Revision: q.GetRevision(), 1009 CheckSignature: len(proj.Spec.SignatureKeys) > 0, 1010 }) 1011 } 1012 1013 func isMatchingResource(q *application.ResourcesQuery, key kube.ResourceKey) bool { 1014 return (q.Name == "" || q.Name == key.Name) && 1015 (q.Namespace == "" || q.Namespace == key.Namespace) && 1016 (q.Group == "" || q.Group == key.Group) && 1017 (q.Kind == "" || q.Kind == key.Kind) 1018 } 1019 1020 func (s *Server) ManagedResources(ctx context.Context, q *application.ResourcesQuery) (*application.ManagedResourcesResponse, error) { 1021 a, err := s.appLister.Get(*q.ApplicationName) 1022 if err != nil { 1023 return nil, err 1024 } 1025 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 1026 return nil, err 1027 } 1028 items := make([]*appv1.ResourceDiff, 0) 1029 err = s.getCachedAppState(ctx, a, func() error { 1030 return s.cache.GetAppManagedResources(a.Name, &items) 1031 }) 1032 if err != nil { 1033 return nil, err 1034 } 1035 res := &application.ManagedResourcesResponse{} 1036 for i := range items { 1037 item := items[i] 1038 if isMatchingResource(q, kube.ResourceKey{Name: item.Name, Namespace: item.Namespace, Kind: item.Kind, Group: item.Group}) { 1039 res.Items = append(res.Items, item) 1040 } 1041 } 1042 1043 return res, nil 1044 } 1045 1046 func (s *Server) PodLogs(q *application.ApplicationPodLogsQuery, ws application.ApplicationService_PodLogsServer) error { 1047 pod, config, _, err := s.getAppResource(ws.Context(), rbacpolicy.ActionGet, &application.ApplicationResourceRequest{ 1048 Name: q.Name, 1049 Namespace: q.Namespace, 1050 Kind: kube.PodKind, 1051 Group: "", 1052 Version: "v1", 1053 ResourceName: *q.PodName, 1054 }) 1055 1056 if err != nil { 1057 return err 1058 } 1059 1060 kubeClientset, err := kubernetes.NewForConfig(config) 1061 if err != nil { 1062 return err 1063 } 1064 1065 var sinceSeconds, tailLines *int64 1066 if q.SinceSeconds > 0 { 1067 sinceSeconds = &q.SinceSeconds 1068 } 1069 if q.TailLines > 0 { 1070 tailLines = &q.TailLines 1071 } 1072 stream, err := kubeClientset.CoreV1().Pods(pod.Namespace).GetLogs(*q.PodName, &v1.PodLogOptions{ 1073 Container: q.Container, 1074 Follow: q.Follow, 1075 Timestamps: true, 1076 SinceSeconds: sinceSeconds, 1077 SinceTime: q.SinceTime, 1078 TailLines: tailLines, 1079 }).Stream(ws.Context()) 1080 if err != nil { 1081 return err 1082 } 1083 logCtx := log.WithField("application", q.Name) 1084 defer io.Close(stream) 1085 done := make(chan bool) 1086 reachedEOF := false 1087 gracefulExit := false 1088 go func() { 1089 bufReader := bufio.NewReader(stream) 1090 1091 for { 1092 line, err := bufReader.ReadString('\n') 1093 if err != nil { 1094 // Error or io.EOF 1095 break 1096 } 1097 line = strings.TrimSpace(line) // Remove trailing line ending 1098 parts := strings.Split(line, " ") 1099 logTime, err := time.Parse(time.RFC3339, parts[0]) 1100 metaLogTime := metav1.NewTime(logTime) 1101 if err == nil { 1102 lines := strings.Join(parts[1:], " ") 1103 for _, line := range strings.Split(lines, "\r") { 1104 if line != "" { 1105 err = ws.Send(&application.LogEntry{ 1106 Content: line, 1107 TimeStamp: metaLogTime, 1108 }) 1109 if err != nil { 1110 logCtx.Warnf("Unable to send stream message: %v", err) 1111 } 1112 } 1113 } 1114 } 1115 } 1116 if gracefulExit { 1117 logCtx.Info("k8s pod logs reader completed due to closed grpc context") 1118 } else if err != nil && err != goio.EOF { 1119 logCtx.Warnf("k8s pod logs reader failed with error: %v", err) 1120 } else { 1121 logCtx.Info("k8s pod logs reader completed with EOF") 1122 reachedEOF = true 1123 } 1124 close(done) 1125 }() 1126 1127 select { 1128 case <-ws.Context().Done(): 1129 logCtx.Info("client pod logs grpc context closed") 1130 gracefulExit = true 1131 case <-done: 1132 } 1133 1134 if reachedEOF || gracefulExit { 1135 if err := ws.Send(&application.LogEntry{Last: true}); err != nil { 1136 logCtx.Warnf("Unable to send stream message notifying about last log message: %v", err) 1137 } 1138 } 1139 return nil 1140 } 1141 1142 // Sync syncs an application to its target state 1143 func (s *Server) Sync(ctx context.Context, syncReq *application.ApplicationSyncRequest) (*appv1.Application, error) { 1144 appIf := s.appclientset.ArgoprojV1alpha1().Applications(s.ns) 1145 a, err := appIf.Get(ctx, *syncReq.Name, metav1.GetOptions{}) 1146 if err != nil { 1147 return nil, err 1148 } 1149 1150 proj, err := argo.GetAppProject(&a.Spec, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), a.Namespace, s.settingsMgr) 1151 if err != nil { 1152 if apierr.IsNotFound(err) { 1153 return a, status.Errorf(codes.InvalidArgument, "application references project %s which does not exist", a.Spec.Project) 1154 } 1155 return a, err 1156 } 1157 1158 if !proj.Spec.SyncWindows.Matches(a).CanSync(true) { 1159 return a, status.Errorf(codes.PermissionDenied, "Cannot sync: Blocked by sync window") 1160 } 1161 1162 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionSync, appRBACName(*a)); err != nil { 1163 return nil, err 1164 } 1165 if syncReq.Manifests != nil { 1166 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionOverride, appRBACName(*a)); err != nil { 1167 return nil, err 1168 } 1169 if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil && !syncReq.DryRun { 1170 return nil, status.Error(codes.FailedPrecondition, "Cannot use local sync when Automatic Sync Policy is enabled unless for dry run") 1171 } 1172 } 1173 if a.DeletionTimestamp != nil { 1174 return nil, status.Errorf(codes.FailedPrecondition, "application is deleting") 1175 } 1176 if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil { 1177 if syncReq.Revision != "" && syncReq.Revision != text.FirstNonEmpty(a.Spec.Source.TargetRevision, "HEAD") { 1178 return nil, status.Errorf(codes.FailedPrecondition, "Cannot sync to %s: auto-sync currently set to %s", syncReq.Revision, a.Spec.Source.TargetRevision) 1179 } 1180 } 1181 revision, displayRevision, err := s.resolveRevision(ctx, a, syncReq) 1182 if err != nil { 1183 return nil, status.Errorf(codes.FailedPrecondition, err.Error()) 1184 } 1185 1186 var retry *appv1.RetryStrategy 1187 var syncOptions appv1.SyncOptions 1188 if a.Spec.SyncPolicy != nil { 1189 syncOptions = a.Spec.SyncPolicy.SyncOptions 1190 retry = a.Spec.SyncPolicy.Retry 1191 } 1192 if syncReq.RetryStrategy != nil { 1193 retry = syncReq.RetryStrategy 1194 } 1195 1196 // We cannot use local manifests if we're only allowed to sync to signed commits 1197 if syncReq.Manifests != nil && len(proj.Spec.SignatureKeys) > 0 { 1198 return nil, status.Errorf(codes.FailedPrecondition, "Cannot use local sync when signature keys are required.") 1199 } 1200 1201 op := appv1.Operation{ 1202 Sync: &appv1.SyncOperation{ 1203 Revision: revision, 1204 Prune: syncReq.Prune, 1205 DryRun: syncReq.DryRun, 1206 SyncOptions: syncOptions, 1207 SyncStrategy: syncReq.Strategy, 1208 Resources: syncReq.Resources, 1209 Manifests: syncReq.Manifests, 1210 }, 1211 InitiatedBy: appv1.OperationInitiator{Username: session.Username(ctx)}, 1212 Info: syncReq.Infos, 1213 } 1214 if retry != nil { 1215 op.Retry = *retry 1216 } 1217 1218 a, err = argo.SetAppOperation(appIf, *syncReq.Name, &op) 1219 if err == nil { 1220 partial := "" 1221 if len(syncReq.Resources) > 0 { 1222 partial = "partial " 1223 } 1224 s.logAppEvent(a, ctx, argo.EventReasonOperationStarted, fmt.Sprintf("initiated %ssync to %s", partial, displayRevision)) 1225 } 1226 return a, err 1227 } 1228 1229 func (s *Server) Rollback(ctx context.Context, rollbackReq *application.ApplicationRollbackRequest) (*appv1.Application, error) { 1230 appIf := s.appclientset.ArgoprojV1alpha1().Applications(s.ns) 1231 a, err := appIf.Get(ctx, *rollbackReq.Name, metav1.GetOptions{}) 1232 if err != nil { 1233 return nil, err 1234 } 1235 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionSync, appRBACName(*a)); err != nil { 1236 return nil, err 1237 } 1238 if a.DeletionTimestamp != nil { 1239 return nil, status.Errorf(codes.FailedPrecondition, "application is deleting") 1240 } 1241 if a.Spec.SyncPolicy != nil && a.Spec.SyncPolicy.Automated != nil { 1242 return nil, status.Errorf(codes.FailedPrecondition, "rollback cannot be initiated when auto-sync is enabled") 1243 } 1244 1245 var deploymentInfo *appv1.RevisionHistory 1246 for _, info := range a.Status.History { 1247 if info.ID == rollbackReq.ID { 1248 deploymentInfo = &info 1249 break 1250 } 1251 } 1252 if deploymentInfo == nil { 1253 return nil, status.Errorf(codes.InvalidArgument, "application %s does not have deployment with id %v", a.Name, rollbackReq.ID) 1254 } 1255 if deploymentInfo.Source.IsZero() { 1256 // Since source type was introduced to history starting with v0.12, and is now required for 1257 // rollback, we cannot support rollback to revisions deployed using Argo CD v0.11 or below 1258 return nil, status.Errorf(codes.FailedPrecondition, "cannot rollback to revision deployed with Argo CD v0.11 or lower. sync to revision instead.") 1259 } 1260 1261 var syncOptions appv1.SyncOptions 1262 if a.Spec.SyncPolicy != nil { 1263 syncOptions = a.Spec.SyncPolicy.SyncOptions 1264 } 1265 1266 // Rollback is just a convenience around Sync 1267 op := appv1.Operation{ 1268 Sync: &appv1.SyncOperation{ 1269 Revision: deploymentInfo.Revision, 1270 DryRun: rollbackReq.DryRun, 1271 Prune: rollbackReq.Prune, 1272 SyncOptions: syncOptions, 1273 SyncStrategy: &appv1.SyncStrategy{Apply: &appv1.SyncStrategyApply{}}, 1274 Source: &deploymentInfo.Source, 1275 }, 1276 } 1277 a, err = argo.SetAppOperation(appIf, *rollbackReq.Name, &op) 1278 if err == nil { 1279 s.logAppEvent(a, ctx, argo.EventReasonOperationStarted, fmt.Sprintf("initiated rollback to %d", rollbackReq.ID)) 1280 } 1281 return a, err 1282 } 1283 1284 // resolveRevision resolves the revision specified either in the sync request, or the 1285 // application source, into a concrete revision that will be used for a sync operation. 1286 func (s *Server) resolveRevision(ctx context.Context, app *appv1.Application, syncReq *application.ApplicationSyncRequest) (string, string, error) { 1287 ambiguousRevision := syncReq.Revision 1288 if ambiguousRevision == "" { 1289 ambiguousRevision = app.Spec.Source.TargetRevision 1290 } 1291 var revision string 1292 if app.Spec.Source.IsHelm() { 1293 repo, err := s.db.GetRepository(ctx, app.Spec.Source.RepoURL) 1294 if err != nil { 1295 return "", "", err 1296 } 1297 if helm.IsVersion(ambiguousRevision) { 1298 return ambiguousRevision, ambiguousRevision, nil 1299 } 1300 client := helm.NewClient(repo.Repo, repo.GetHelmCreds(), repo.EnableOCI || app.Spec.Source.IsHelmOci()) 1301 index, err := client.GetIndex() 1302 if err != nil { 1303 return "", "", err 1304 } 1305 entries, err := index.GetEntries(app.Spec.Source.Chart) 1306 if err != nil { 1307 return "", "", err 1308 } 1309 constraints, err := semver.NewConstraint(ambiguousRevision) 1310 if err != nil { 1311 return "", "", err 1312 } 1313 version, err := entries.MaxVersion(constraints) 1314 if err != nil { 1315 return "", "", err 1316 } 1317 return version.String(), fmt.Sprintf("%v (%v)", ambiguousRevision, version.String()), nil 1318 } else { 1319 if git.IsCommitSHA(ambiguousRevision) { 1320 // If it's already a commit SHA, then no need to look it up 1321 return ambiguousRevision, ambiguousRevision, nil 1322 } 1323 repo, err := s.db.GetRepository(ctx, app.Spec.Source.RepoURL) 1324 if err != nil { 1325 return "", "", err 1326 } 1327 gitClient, err := git.NewClient(repo.Repo, repo.GetGitCreds(), repo.IsInsecure(), repo.IsLFSEnabled()) 1328 if err != nil { 1329 return "", "", err 1330 } 1331 revision, err = gitClient.LsRemote(ambiguousRevision) 1332 if err != nil { 1333 return "", "", err 1334 } 1335 return revision, fmt.Sprintf("%s (%s)", ambiguousRevision, revision), nil 1336 } 1337 } 1338 1339 func (s *Server) TerminateOperation(ctx context.Context, termOpReq *application.OperationTerminateRequest) (*application.OperationTerminateResponse, error) { 1340 a, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, *termOpReq.Name, metav1.GetOptions{}) 1341 if err != nil { 1342 return nil, err 1343 } 1344 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionSync, appRBACName(*a)); err != nil { 1345 return nil, err 1346 } 1347 1348 for i := 0; i < 10; i++ { 1349 if a.Operation == nil || a.Status.OperationState == nil { 1350 return nil, status.Errorf(codes.InvalidArgument, "Unable to terminate operation. No operation is in progress") 1351 } 1352 a.Status.OperationState.Phase = common.OperationTerminating 1353 updated, err := s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Update(ctx, a, metav1.UpdateOptions{}) 1354 if err == nil { 1355 s.waitSync(updated) 1356 s.logAppEvent(a, ctx, argo.EventReasonResourceUpdated, "terminated running operation") 1357 return &application.OperationTerminateResponse{}, nil 1358 } 1359 if !apierr.IsConflict(err) { 1360 return nil, err 1361 } 1362 log.Warnf("Failed to set operation for app '%s' due to update conflict. Retrying again...", *termOpReq.Name) 1363 time.Sleep(100 * time.Millisecond) 1364 a, err = s.appclientset.ArgoprojV1alpha1().Applications(s.ns).Get(ctx, *termOpReq.Name, metav1.GetOptions{}) 1365 if err != nil { 1366 return nil, err 1367 } 1368 } 1369 return nil, status.Errorf(codes.Internal, "Failed to terminate app. Too many conflicts") 1370 } 1371 1372 func (s *Server) logAppEvent(a *appv1.Application, ctx context.Context, reason string, action string) { 1373 eventInfo := argo.EventInfo{Type: v1.EventTypeNormal, Reason: reason} 1374 user := session.Username(ctx) 1375 if user == "" { 1376 user = "Unknown user" 1377 } 1378 message := fmt.Sprintf("%s %s", user, action) 1379 s.auditLogger.LogAppEvent(a, eventInfo, message) 1380 } 1381 1382 func (s *Server) logResourceEvent(res *appv1.ResourceNode, ctx context.Context, reason string, action string) { 1383 eventInfo := argo.EventInfo{Type: v1.EventTypeNormal, Reason: reason} 1384 user := session.Username(ctx) 1385 if user == "" { 1386 user = "Unknown user" 1387 } 1388 message := fmt.Sprintf("%s %s", user, action) 1389 s.auditLogger.LogResourceEvent(res, eventInfo, message) 1390 } 1391 1392 func (s *Server) ListResourceActions(ctx context.Context, q *application.ApplicationResourceRequest) (*application.ResourceActionsListResponse, error) { 1393 res, config, _, err := s.getAppResource(ctx, rbacpolicy.ActionGet, q) 1394 if err != nil { 1395 return nil, err 1396 } 1397 obj, err := s.kubectl.GetResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace) 1398 if err != nil { 1399 return nil, err 1400 } 1401 resourceOverrides, err := s.settingsMgr.GetResourceOverrides() 1402 if err != nil { 1403 return nil, err 1404 } 1405 1406 availableActions, err := s.getAvailableActions(resourceOverrides, obj) 1407 if err != nil { 1408 return nil, err 1409 } 1410 1411 return &application.ResourceActionsListResponse{Actions: availableActions}, nil 1412 } 1413 1414 func (s *Server) getAvailableActions(resourceOverrides map[string]appv1.ResourceOverride, obj *unstructured.Unstructured) ([]appv1.ResourceAction, error) { 1415 luaVM := lua.VM{ 1416 ResourceOverrides: resourceOverrides, 1417 } 1418 1419 discoveryScript, err := luaVM.GetResourceActionDiscovery(obj) 1420 if err != nil { 1421 return nil, err 1422 } 1423 if discoveryScript == "" { 1424 return []appv1.ResourceAction{}, nil 1425 } 1426 availableActions, err := luaVM.ExecuteResourceActionDiscovery(obj, discoveryScript) 1427 if err != nil { 1428 return nil, err 1429 } 1430 return availableActions, nil 1431 1432 } 1433 1434 func (s *Server) RunResourceAction(ctx context.Context, q *application.ResourceActionRunRequest) (*application.ApplicationResponse, error) { 1435 resourceRequest := &application.ApplicationResourceRequest{ 1436 Name: q.Name, 1437 Namespace: q.Namespace, 1438 ResourceName: q.ResourceName, 1439 Kind: q.Kind, 1440 Version: q.Version, 1441 Group: q.Group, 1442 } 1443 actionRequest := fmt.Sprintf("%s/%s/%s/%s", rbacpolicy.ActionAction, q.Group, q.Kind, q.Action) 1444 res, config, a, err := s.getAppResource(ctx, actionRequest, resourceRequest) 1445 if err != nil { 1446 return nil, err 1447 } 1448 liveObj, err := s.kubectl.GetResource(ctx, config, res.GroupKindVersion(), res.Name, res.Namespace) 1449 if err != nil { 1450 return nil, err 1451 } 1452 1453 resourceOverrides, err := s.settingsMgr.GetResourceOverrides() 1454 if err != nil { 1455 return nil, err 1456 } 1457 1458 luaVM := lua.VM{ 1459 ResourceOverrides: resourceOverrides, 1460 } 1461 action, err := luaVM.GetResourceAction(liveObj, q.Action) 1462 if err != nil { 1463 return nil, err 1464 } 1465 1466 newObj, err := luaVM.ExecuteResourceAction(liveObj, action.ActionLua) 1467 if err != nil { 1468 return nil, err 1469 } 1470 1471 newObjBytes, err := json.Marshal(newObj) 1472 if err != nil { 1473 return nil, err 1474 } 1475 1476 liveObjBytes, err := json.Marshal(liveObj) 1477 if err != nil { 1478 return nil, err 1479 } 1480 1481 diffBytes, err := jsonpatch.CreateMergePatch(liveObjBytes, newObjBytes) 1482 if err != nil { 1483 return nil, err 1484 } 1485 if string(diffBytes) == "{}" { 1486 return &application.ApplicationResponse{}, nil 1487 } 1488 1489 // The following logic detects if the resource action makes a modification to status and/or spec. 1490 // If status was modified, we attempt to patch the status using status subresource, in case the 1491 // CRD is configured using the status subresource feature. See: 1492 // https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource 1493 // If status subresource is in use, the patch has to be split into two: 1494 // * one to update spec (and other non-status fields) 1495 // * the other to update only status. 1496 nonStatusPatch, statusPatch, err := splitStatusPatch(diffBytes) 1497 if err != nil { 1498 return nil, err 1499 } 1500 if statusPatch != nil { 1501 _, err = s.kubectl.PatchResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), types.MergePatchType, diffBytes, "status") 1502 if err != nil { 1503 if !apierr.IsNotFound(err) { 1504 return nil, err 1505 } 1506 // K8s API server returns 404 NotFound when the CRD does not support the status subresource 1507 // if we get here, the CRD does not use the status subresource. We will fall back to a normal patch 1508 } else { 1509 // If we get here, the CRD does use the status subresource, so we must patch status and 1510 // spec separately. update the diffBytes to the spec-only patch and fall through. 1511 diffBytes = nonStatusPatch 1512 } 1513 } 1514 if diffBytes != nil { 1515 _, err = s.kubectl.PatchResource(ctx, config, newObj.GroupVersionKind(), newObj.GetName(), newObj.GetNamespace(), types.MergePatchType, diffBytes) 1516 if err != nil { 1517 return nil, err 1518 } 1519 } 1520 1521 s.logAppEvent(a, ctx, argo.EventReasonResourceActionRan, fmt.Sprintf("ran action %s on resource %s/%s/%s", q.Action, res.Group, res.Kind, res.Name)) 1522 s.logResourceEvent(res, ctx, argo.EventReasonResourceActionRan, fmt.Sprintf("ran action %s", q.Action)) 1523 return &application.ApplicationResponse{}, nil 1524 } 1525 1526 // splitStatusPatch splits a patch into two: one for a non-status patch, and the status-only patch. 1527 // Returns nil for either if the patch doesn't have modifications to non-status, or status, respectively. 1528 func splitStatusPatch(patch []byte) ([]byte, []byte, error) { 1529 var obj map[string]interface{} 1530 err := json.Unmarshal(patch, &obj) 1531 if err != nil { 1532 return nil, nil, err 1533 } 1534 var nonStatusPatch, statusPatch []byte 1535 if statusVal, ok := obj["status"]; ok { 1536 // calculate the status-only patch 1537 statusObj := map[string]interface{}{ 1538 "status": statusVal, 1539 } 1540 statusPatch, err = json.Marshal(statusObj) 1541 if err != nil { 1542 return nil, nil, err 1543 } 1544 // remove status, and calculate the non-status patch 1545 delete(obj, "status") 1546 if len(obj) > 0 { 1547 nonStatusPatch, err = json.Marshal(obj) 1548 if err != nil { 1549 return nil, nil, err 1550 } 1551 } 1552 } else { 1553 // status was not modified in patch 1554 nonStatusPatch = patch 1555 } 1556 return nonStatusPatch, statusPatch, nil 1557 } 1558 1559 func (s *Server) plugins() ([]*v1alpha1.ConfigManagementPlugin, error) { 1560 plugins, err := s.settingsMgr.GetConfigManagementPlugins() 1561 if err != nil { 1562 return nil, err 1563 } 1564 tools := make([]*v1alpha1.ConfigManagementPlugin, len(plugins)) 1565 for i, p := range plugins { 1566 p := p 1567 tools[i] = &p 1568 } 1569 return tools, nil 1570 } 1571 1572 func (s *Server) GetApplicationSyncWindows(ctx context.Context, q *application.ApplicationSyncWindowsQuery) (*application.ApplicationSyncWindowsResponse, error) { 1573 appIf := s.appclientset.ArgoprojV1alpha1().Applications(s.ns) 1574 a, err := appIf.Get(ctx, *q.Name, metav1.GetOptions{}) 1575 if err != nil { 1576 return nil, err 1577 } 1578 1579 if err := s.enf.EnforceErr(ctx.Value("claims"), rbacpolicy.ResourceApplications, rbacpolicy.ActionGet, appRBACName(*a)); err != nil { 1580 return nil, err 1581 } 1582 1583 proj, err := argo.GetAppProject(&a.Spec, applisters.NewAppProjectLister(s.projInformer.GetIndexer()), a.Namespace, s.settingsMgr) 1584 if err != nil { 1585 return nil, err 1586 } 1587 1588 windows := proj.Spec.SyncWindows.Matches(a) 1589 sync := windows.CanSync(true) 1590 1591 res := &application.ApplicationSyncWindowsResponse{ 1592 ActiveWindows: convertSyncWindows(windows.Active()), 1593 AssignedWindows: convertSyncWindows(windows), 1594 CanSync: &sync, 1595 } 1596 1597 return res, nil 1598 } 1599 1600 func convertSyncWindows(w *v1alpha1.SyncWindows) []*application.ApplicationSyncWindow { 1601 if w != nil { 1602 var windows []*application.ApplicationSyncWindow 1603 for _, w := range *w { 1604 nw := &application.ApplicationSyncWindow{ 1605 Kind: &w.Kind, 1606 Schedule: &w.Schedule, 1607 Duration: &w.Duration, 1608 ManualSync: &w.ManualSync, 1609 } 1610 windows = append(windows, nw) 1611 } 1612 if len(windows) > 0 { 1613 return windows 1614 } 1615 } 1616 return nil 1617 }