github.com/oam-dev/kubevela@v1.9.11/pkg/resourcekeeper/gc.go (about)

     1  /*
     2  Copyright 2021 The KubeVela Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8  	http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package resourcekeeper
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"math/rand"
    23  	"strings"
    24  	"time"
    25  
    26  	"github.com/crossplane/crossplane-runtime/pkg/meta"
    27  	"github.com/hashicorp/go-version"
    28  	"github.com/kubevela/pkg/util/slices"
    29  	"github.com/pkg/errors"
    30  	appsv1 "k8s.io/api/apps/v1"
    31  	kerrors "k8s.io/apimachinery/pkg/api/errors"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    34  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    35  	"sigs.k8s.io/controller-runtime/pkg/client"
    36  
    37  	"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
    38  	"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1"
    39  	"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
    40  	"github.com/oam-dev/kubevela/pkg/auth"
    41  	"github.com/oam-dev/kubevela/pkg/features"
    42  	"github.com/oam-dev/kubevela/pkg/monitor/metrics"
    43  	"github.com/oam-dev/kubevela/pkg/multicluster"
    44  	"github.com/oam-dev/kubevela/pkg/oam"
    45  	"github.com/oam-dev/kubevela/pkg/oam/util"
    46  	"github.com/oam-dev/kubevela/pkg/policy"
    47  	"github.com/oam-dev/kubevela/pkg/resourcetracker"
    48  	"github.com/oam-dev/kubevela/pkg/utils"
    49  	"github.com/oam-dev/kubevela/pkg/utils/apply"
    50  	version2 "github.com/oam-dev/kubevela/version"
    51  )
    52  
    53  var (
    54  	// MarkWithProbability optimize ResourceTracker gc for legacy resource by reducing the frequency of outdated rt check
    55  	MarkWithProbability = 0.1
    56  )
    57  
    58  // GCOption option for gc
    59  type GCOption interface {
    60  	ApplyToGCConfig(*gcConfig)
    61  }
    62  
    63  type gcConfig struct {
    64  	passive bool
    65  
    66  	disableMark                  bool
    67  	disableSweep                 bool
    68  	disableFinalize              bool
    69  	disableComponentRevisionGC   bool
    70  	disableLegacyGC              bool
    71  	disableApplicationRevisionGC bool
    72  
    73  	order v1alpha1.GarbageCollectOrder
    74  
    75  	appRevisionLimit int
    76  }
    77  
    78  func newGCConfig(options ...GCOption) *gcConfig {
    79  	cfg := &gcConfig{}
    80  	for _, option := range options {
    81  		option.ApplyToGCConfig(cfg)
    82  	}
    83  	return cfg
    84  }
    85  
    86  // GarbageCollect recycle resources and handle finalizers for resourcetracker
    87  // Application Resource Garbage Collection follows three stages
    88  //
    89  // 1. Mark Stage
    90  // Controller will find all resourcetrackers for the target application and decide which resourcetrackers should be
    91  // deleted. Decision rules including:
    92  //
    93  //	a. rootRT and currentRT will be marked as deleted only when application is marked as deleted (DeleteTimestamp is
    94  //	   not nil).
    95  //	b. historyRTs will be marked as deleted if at least one of the below conditions met
    96  //	   i.  GarbageCollectionMode is not set to `passive`
    97  //	   ii. All managed resources are RECYCLED. (RECYCLED means resource does not exist or managed by latest
    98  //	       resourcetrackers)
    99  //
   100  // NOTE: Mark Stage will always work for each application reconcile, not matter whether workflow is ended
   101  //
   102  // 2. Sweep Stage
   103  // Controller will check all resourcetrackers marked to be deleted. If all managed resources are recycled, finalizer in
   104  // resourcetracker will be removed.
   105  //
   106  // 3. Finalize Stage
   107  // Controller will finalize all resourcetrackers marked to be deleted. All managed resources are recycled.
   108  //
   109  // NOTE: Mark Stage will only work when Workflow succeeds. Check/Finalize Stage will always work.
   110  //
   111  //	For one single application, the deletion will follow Mark -> Finalize -> Sweep
   112  func (h *resourceKeeper) GarbageCollect(ctx context.Context, options ...GCOption) (finished bool, waiting []v1beta1.ManagedResource, err error) {
   113  	return h.garbageCollect(ctx, h.buildGCConfig(ctx, options...))
   114  }
   115  
   116  func (h *resourceKeeper) buildGCConfig(ctx context.Context, options ...GCOption) *gcConfig {
   117  	if h.garbageCollectPolicy != nil {
   118  		if h.garbageCollectPolicy.KeepLegacyResource {
   119  			options = append(options, PassiveGCOption{})
   120  		}
   121  		switch h.garbageCollectPolicy.Order {
   122  		case v1alpha1.OrderDependency:
   123  			options = append(options, DependencyGCOption{})
   124  		default:
   125  		}
   126  		if h.garbageCollectPolicy.ContinueOnFailure && PhaseFrom(ctx) == common.ApplicationWorkflowFailed {
   127  			options = slices.Filter(options, func(opt GCOption) bool {
   128  				_, ok := opt.(DisableMarkStageGCOption)
   129  				return !ok
   130  			})
   131  		}
   132  	}
   133  	return newGCConfig(options...)
   134  }
   135  
   136  func (h *resourceKeeper) garbageCollect(ctx context.Context, cfg *gcConfig) (finished bool, waiting []v1beta1.ManagedResource, err error) {
   137  	gc := gcHandler{
   138  		resourceKeeper: h,
   139  		cfg:            cfg,
   140  	}
   141  	gc.Init()
   142  	// Mark Stage
   143  	if !cfg.disableMark {
   144  		if err = gc.Mark(ctx); err != nil {
   145  			return false, waiting, errors.Wrapf(err, "failed to mark inactive resourcetrackers")
   146  		}
   147  	}
   148  	// Sweep Stage
   149  	if !cfg.disableSweep {
   150  		if finished, waiting, err = gc.Sweep(ctx); err != nil {
   151  			return false, waiting, errors.Wrapf(err, "failed to sweep resourcetrackers to be deleted")
   152  		}
   153  	}
   154  	// Finalize Stage
   155  	if !cfg.disableFinalize && !finished {
   156  		if err = gc.Finalize(ctx); err != nil {
   157  			return false, waiting, errors.Wrapf(err, "failed to finalize resourcetrackers to be deleted")
   158  		}
   159  	}
   160  	// Garbage Collect Component Revision in unused components
   161  	if !cfg.disableComponentRevisionGC {
   162  		if err = gc.GarbageCollectComponentRevisionResourceTracker(ctx); err != nil {
   163  			return false, waiting, errors.Wrapf(err, "failed to garbage collect component revisions in unused components")
   164  		}
   165  	}
   166  	// Garbage Collect Legacy ResourceTrackers
   167  	if !cfg.disableLegacyGC {
   168  		if err = gc.GarbageCollectLegacyResourceTrackers(ctx); err != nil {
   169  			return false, waiting, errors.Wrapf(err, "failed to garbage collect legacy resource trackers")
   170  		}
   171  	}
   172  
   173  	if !cfg.disableApplicationRevisionGC {
   174  		if err = gc.GarbageCollectApplicationRevision(ctx); err != nil {
   175  			return false, waiting, errors.Wrapf(err, "failed to garbage collect application revision")
   176  		}
   177  	}
   178  
   179  	return finished, waiting, nil
   180  }
   181  
   182  // gcHandler gc detail implementations
   183  type gcHandler struct {
   184  	*resourceKeeper
   185  	cfg *gcConfig
   186  }
   187  
   188  func (h *gcHandler) monitor(stage string) func() {
   189  	begin := time.Now()
   190  	return func() {
   191  		v := time.Since(begin).Seconds()
   192  		metrics.AppReconcileStageDurationHistogram.WithLabelValues("gc-rt." + stage).Observe(v)
   193  	}
   194  }
   195  
   196  func (h *gcHandler) regularizeResourceTracker(rts ...*v1beta1.ResourceTracker) {
   197  	for _, rt := range rts {
   198  		if rt == nil {
   199  			continue
   200  		}
   201  		for i, mr := range rt.Spec.ManagedResources {
   202  			if ok, err := utils.IsClusterScope(mr.GroupVersionKind(), h.Client.RESTMapper()); err == nil && ok {
   203  				rt.Spec.ManagedResources[i].Namespace = ""
   204  			}
   205  		}
   206  	}
   207  }
   208  
   209  func (h *gcHandler) Init() {
   210  	cb := h.monitor("init")
   211  	defer cb()
   212  	rts := append(h._historyRTs, h._currentRT, h._rootRT) // nolint
   213  	h.regularizeResourceTracker(rts...)
   214  	h.cache.registerResourceTrackers(rts...)
   215  }
   216  
   217  func (h *gcHandler) scan(ctx context.Context) (inactiveRTs []*v1beta1.ResourceTracker) {
   218  	if h.app.GetDeletionTimestamp() != nil {
   219  		inactiveRTs = append(inactiveRTs, h._historyRTs...)
   220  		inactiveRTs = append(inactiveRTs, h._currentRT, h._rootRT, h._crRT)
   221  	} else {
   222  		if h.cfg.passive {
   223  			inactiveRTs = []*v1beta1.ResourceTracker{}
   224  			if rand.Float64() > MarkWithProbability { //nolint
   225  				return inactiveRTs
   226  			}
   227  			for _, rt := range h._historyRTs {
   228  				if rt != nil {
   229  					inactive := true
   230  					for _, mr := range rt.Spec.ManagedResources {
   231  						entry := h.cache.get(auth.ContextWithUserInfo(ctx, h.app), mr)
   232  						if entry.err == nil && (entry.gcExecutorRT != rt || !entry.exists) {
   233  							continue
   234  						}
   235  						inactive = false
   236  					}
   237  					if inactive {
   238  						inactiveRTs = append(inactiveRTs, rt)
   239  					}
   240  				}
   241  			}
   242  		} else {
   243  			inactiveRTs = h._historyRTs
   244  		}
   245  	}
   246  	return inactiveRTs
   247  }
   248  
   249  func (h *gcHandler) Mark(ctx context.Context) error {
   250  	cb := h.monitor("mark")
   251  	defer cb()
   252  	inactiveRTs := h.scan(ctx)
   253  	for _, rt := range inactiveRTs {
   254  		if rt != nil && rt.GetDeletionTimestamp() == nil {
   255  			if err := h.Client.Delete(ctx, rt); err != nil && !kerrors.IsNotFound(err) {
   256  				return err
   257  			}
   258  			_rt := &v1beta1.ResourceTracker{}
   259  			if err := h.Client.Get(ctx, client.ObjectKeyFromObject(rt), _rt); err != nil {
   260  				if !kerrors.IsNotFound(err) {
   261  					return err
   262  				}
   263  			} else {
   264  				_rt.DeepCopyInto(rt)
   265  			}
   266  		}
   267  	}
   268  	return nil
   269  }
   270  
   271  // checkAndRemoveResourceTrackerFinalizer return (all resource recycled, error)
   272  func (h *gcHandler) checkAndRemoveResourceTrackerFinalizer(ctx context.Context, rt *v1beta1.ResourceTracker) (bool, v1beta1.ManagedResource, error) {
   273  	for _, mr := range rt.Spec.ManagedResources {
   274  		entry := h.cache.get(auth.ContextWithUserInfo(ctx, h.app), mr)
   275  		if entry.err != nil {
   276  			return false, entry.mr, entry.err
   277  		}
   278  		if entry.exists && entry.gcExecutorRT == rt {
   279  			return false, entry.mr, nil
   280  		}
   281  	}
   282  	meta.RemoveFinalizer(rt, resourcetracker.Finalizer)
   283  	return true, v1beta1.ManagedResource{}, h.Client.Update(ctx, rt)
   284  }
   285  
   286  func (h *gcHandler) Sweep(ctx context.Context) (finished bool, waiting []v1beta1.ManagedResource, err error) {
   287  	cb := h.monitor("sweep")
   288  	defer cb()
   289  	finished = true
   290  	for _, rt := range append(h._historyRTs, h._currentRT, h._rootRT) {
   291  		if rt != nil && rt.GetDeletionTimestamp() != nil {
   292  			_finished, mr, err := h.checkAndRemoveResourceTrackerFinalizer(ctx, rt)
   293  			if err != nil {
   294  				return false, waiting, err
   295  			}
   296  			if !_finished {
   297  				finished = false
   298  				waiting = append(waiting, mr)
   299  			}
   300  		}
   301  	}
   302  	return finished, waiting, nil
   303  }
   304  
   305  func (h *gcHandler) recycleResourceTracker(ctx context.Context, rt *v1beta1.ResourceTracker) error {
   306  	ctx = auth.ContextWithUserInfo(ctx, h.app)
   307  	switch h.cfg.order {
   308  	case v1alpha1.OrderDependency:
   309  		for _, mr := range rt.Spec.ManagedResources {
   310  			if err := h.deleteIndependentComponent(ctx, mr, rt); err != nil {
   311  				return err
   312  			}
   313  		}
   314  		return nil
   315  	default:
   316  	}
   317  	for _, mr := range rt.Spec.ManagedResources {
   318  		if err := h.deleteManagedResource(ctx, mr, rt); err != nil {
   319  			return err
   320  		}
   321  	}
   322  	return nil
   323  }
   324  
   325  func (h *gcHandler) deleteIndependentComponent(ctx context.Context, mr v1beta1.ManagedResource, rt *v1beta1.ResourceTracker) error {
   326  	dependent := h.checkDependentComponent(mr)
   327  	if len(dependent) == 0 {
   328  		if err := h.deleteManagedResource(ctx, mr, rt); err != nil {
   329  			return err
   330  		}
   331  	} else {
   332  		dependentClear := true
   333  		for _, mr := range rt.Spec.ManagedResources {
   334  			if slices.Contains(dependent, mr.Component) {
   335  				entry := h.cache.get(ctx, mr)
   336  				if entry.gcExecutorRT != rt {
   337  					continue
   338  				}
   339  				if entry.err != nil {
   340  					continue
   341  				}
   342  				if entry.exists {
   343  					dependentClear = false
   344  					break
   345  				}
   346  			}
   347  		}
   348  		if dependentClear {
   349  			if err := h.deleteManagedResource(ctx, mr, rt); err != nil {
   350  				return err
   351  			}
   352  		}
   353  	}
   354  	return nil
   355  }
   356  
   357  // UpdateSharedManagedResourceOwner update owner & sharer labels for managed resource
   358  func UpdateSharedManagedResourceOwner(ctx context.Context, cli client.Client, manifest *unstructured.Unstructured, sharedBy string) error {
   359  	parts := strings.Split(apply.FirstSharer(sharedBy), "/")
   360  	appName, appNs := "", metav1.NamespaceDefault
   361  	if len(parts) == 1 {
   362  		appName = parts[0]
   363  	} else if len(parts) == 2 {
   364  		appName, appNs = parts[1], parts[0]
   365  	}
   366  	util.AddAnnotations(manifest, map[string]string{oam.AnnotationAppSharedBy: sharedBy})
   367  	util.AddLabels(manifest, map[string]string{
   368  		oam.LabelAppName:      appName,
   369  		oam.LabelAppNamespace: appNs,
   370  	})
   371  	return cli.Update(ctx, manifest)
   372  }
   373  
   374  func (h *gcHandler) deleteManagedResource(ctx context.Context, mr v1beta1.ManagedResource, rt *v1beta1.ResourceTracker) error {
   375  	entry := h.cache.get(ctx, mr)
   376  	if entry.gcExecutorRT != rt {
   377  		return nil
   378  	}
   379  	if entry.err != nil {
   380  		return entry.err
   381  	}
   382  	if entry.exists {
   383  		return DeleteManagedResourceInApplication(ctx, h.Client, mr, entry.obj, h.app)
   384  	}
   385  	return nil
   386  }
   387  
   388  // DeleteManagedResourceInApplication delete managed resource in application
   389  func DeleteManagedResourceInApplication(ctx context.Context, cli client.Client, mr v1beta1.ManagedResource, obj *unstructured.Unstructured, app *v1beta1.Application) error {
   390  	_ctx := multicluster.ContextWithClusterName(ctx, mr.Cluster)
   391  	if annotations := obj.GetAnnotations(); annotations != nil && annotations[oam.AnnotationAppSharedBy] != "" {
   392  		sharedBy := apply.RemoveSharer(annotations[oam.AnnotationAppSharedBy], app)
   393  		if sharedBy != "" {
   394  			if err := UpdateSharedManagedResourceOwner(_ctx, cli, obj, sharedBy); err != nil {
   395  				return errors.Wrapf(err, "failed to remove sharer from resource %s", mr.ResourceKey())
   396  			}
   397  			return nil
   398  		}
   399  		util.RemoveAnnotations(obj, []string{oam.AnnotationAppSharedBy})
   400  	}
   401  	if mr.SkipGC || hasOrphanFinalizer(app) {
   402  		if labels := obj.GetLabels(); labels != nil {
   403  			delete(labels, oam.LabelAppName)
   404  			delete(labels, oam.LabelAppNamespace)
   405  			obj.SetLabels(labels)
   406  		}
   407  		return errors.Wrapf(cli.Update(_ctx, obj), "failed to remove owner labels for resource while skipping gc")
   408  	}
   409  	var opts []client.DeleteOption
   410  	if garbageCollectPolicy, _ := policy.ParsePolicy[v1alpha1.GarbageCollectPolicySpec](app); garbageCollectPolicy != nil {
   411  		opts = garbageCollectPolicy.FindDeleteOption(obj)
   412  	}
   413  	if err := cli.Delete(_ctx, obj, opts...); err != nil && !kerrors.IsNotFound(err) {
   414  		return errors.Wrapf(err, "failed to delete resource %s", mr.ResourceKey())
   415  	}
   416  	return nil
   417  }
   418  
   419  func (h *gcHandler) checkDependentComponent(mr v1beta1.ManagedResource) []string {
   420  	dependent := make([]string, 0)
   421  	outputs := make([]string, 0)
   422  	for _, comp := range h.app.Spec.Components {
   423  		if comp.Name == mr.Component {
   424  			for _, output := range comp.Outputs {
   425  				outputs = append(outputs, output.Name)
   426  			}
   427  		} else {
   428  			for _, dependsOn := range comp.DependsOn {
   429  				if dependsOn == mr.Component {
   430  					dependent = append(dependent, comp.Name)
   431  					break
   432  				}
   433  			}
   434  		}
   435  	}
   436  	for _, comp := range h.app.Spec.Components {
   437  		for _, input := range comp.Inputs {
   438  			if slices.Contains(outputs, input.From) {
   439  				dependent = append(dependent, comp.Name)
   440  			}
   441  		}
   442  	}
   443  	return dependent
   444  }
   445  
   446  func (h *gcHandler) Finalize(ctx context.Context) error {
   447  	cb := h.monitor("finalize")
   448  	defer cb()
   449  	for _, rt := range append(h._historyRTs, h._currentRT, h._rootRT) {
   450  		if rt != nil && rt.GetDeletionTimestamp() != nil && meta.FinalizerExists(rt, resourcetracker.Finalizer) {
   451  			if err := h.recycleResourceTracker(ctx, rt); err != nil {
   452  				return err
   453  			}
   454  		}
   455  	}
   456  	return nil
   457  }
   458  
   459  func (h *gcHandler) GarbageCollectComponentRevisionResourceTracker(ctx context.Context) error {
   460  	cb := h.monitor("comp-rev")
   461  	defer cb()
   462  	if h._crRT == nil {
   463  		return nil
   464  	}
   465  	inUseComponents := map[string]bool{}
   466  	for _, entry := range h.cache.m.Data() {
   467  		for _, rt := range entry.usedBy {
   468  			if rt.GetDeletionTimestamp() == nil || len(rt.GetFinalizers()) != 0 {
   469  				inUseComponents[entry.mr.ComponentKey()] = true
   470  			}
   471  		}
   472  	}
   473  	var managedResources []v1beta1.ManagedResource
   474  	for _, cr := range h._crRT.Spec.ManagedResources { // legacy code for rollout-plan
   475  		_ctx := multicluster.ContextWithClusterName(ctx, cr.Cluster)
   476  		_ctx = auth.ContextWithUserInfo(_ctx, h.app)
   477  		if _, exists := inUseComponents[cr.ComponentKey()]; !exists {
   478  			_cr := &appsv1.ControllerRevision{}
   479  			err := h.Client.Get(_ctx, cr.NamespacedName(), _cr)
   480  			if err != nil && !multicluster.IsNotFoundOrClusterNotExists(err) {
   481  				return errors.Wrapf(err, "failed to get component revision %s", cr.ResourceKey())
   482  			}
   483  			if err == nil {
   484  				if err = h.Client.Delete(_ctx, _cr); err != nil && !kerrors.IsNotFound(err) {
   485  					return errors.Wrapf(err, "failed to delete component revision %s", cr.ResourceKey())
   486  				}
   487  			}
   488  		} else {
   489  			managedResources = append(managedResources, cr)
   490  		}
   491  	}
   492  	h._crRT.Spec.ManagedResources = managedResources
   493  	if len(managedResources) == 0 && h._crRT.GetDeletionTimestamp() != nil {
   494  		meta.RemoveFinalizer(h._crRT, resourcetracker.Finalizer)
   495  	}
   496  	if err := h.Client.Update(ctx, h._crRT); err != nil {
   497  		return errors.Wrapf(err, "failed to update controllerrevision RT %s", h._crRT.Name)
   498  	}
   499  	return nil
   500  }
   501  
   502  const velaVersionNumberToUpgradeResourceTracker = "v1.2.0"
   503  
   504  func (h *gcHandler) GarbageCollectLegacyResourceTrackers(ctx context.Context) error {
   505  	// skip legacy gc if controller not enable this feature
   506  	if !utilfeature.DefaultMutableFeatureGate.Enabled(features.LegacyResourceTrackerGC) {
   507  		return nil
   508  	}
   509  	// skip legacy gc if application is not handled by new version rt
   510  	if h.app.GetDeletionTimestamp() == nil && h.resourceKeeper._currentRT == nil {
   511  		return nil
   512  	}
   513  	// check app version
   514  	velaVersionToUpgradeResourceTracker, _ := version.NewVersion(velaVersionNumberToUpgradeResourceTracker)
   515  	var currentVersionNumber string
   516  	if annotations := h.app.GetAnnotations(); annotations != nil && annotations[oam.AnnotationKubeVelaVersion] != "" {
   517  		currentVersionNumber = annotations[oam.AnnotationKubeVelaVersion]
   518  	}
   519  	currentVersion, err := version.NewVersion(currentVersionNumber)
   520  	if err == nil && velaVersionToUpgradeResourceTracker.LessThanOrEqual(currentVersion) {
   521  		return nil
   522  	}
   523  	// remove legacy ResourceTrackers
   524  	clusters := map[string]bool{multicluster.ClusterLocalName: true}
   525  	for _, rsc := range h.app.Status.AppliedResources {
   526  		if rsc.Cluster != "" {
   527  			clusters[rsc.Cluster] = true
   528  		}
   529  	}
   530  	for _, policy := range h.app.Spec.Policies {
   531  		if policy.Type == v1alpha1.EnvBindingPolicyType && policy.Properties != nil {
   532  			spec := &v1alpha1.EnvBindingSpec{}
   533  			if err = json.Unmarshal(policy.Properties.Raw, &spec); err == nil {
   534  				for _, env := range spec.Envs {
   535  					if env.Placement.ClusterSelector != nil && env.Placement.ClusterSelector.Name != "" {
   536  						clusters[env.Placement.ClusterSelector.Name] = true
   537  					}
   538  				}
   539  			}
   540  		}
   541  	}
   542  	for cluster := range clusters {
   543  		_ctx := multicluster.ContextWithClusterName(ctx, cluster)
   544  		rts := &unstructured.UnstructuredList{}
   545  		rts.SetGroupVersionKind(v1beta1.SchemeGroupVersion.WithKind("ResourceTrackerList"))
   546  		if err = h.Client.List(_ctx, rts, client.MatchingLabels(map[string]string{
   547  			oam.LabelAppName:      h.app.Name,
   548  			oam.LabelAppNamespace: h.app.Namespace,
   549  		})); err != nil {
   550  			if strings.Contains(err.Error(), "could not find the requested resource") {
   551  				continue
   552  			}
   553  			return errors.Wrapf(err, "failed to list resource trackers for app %s/%s in cluster %s", h.app.Namespace, h.app.Name, cluster)
   554  		}
   555  		for _, rt := range rts.Items {
   556  			if s, exists, _ := unstructured.NestedString(rt.Object, "spec", "type"); !exists || s == "" {
   557  				if err = h.Client.Delete(_ctx, rt.DeepCopy()); err != nil {
   558  					return errors.Wrapf(err, "failed to delete legacy resource tracker %s for app %s/%s in cluster %s", rt.GetName(), h.app.Namespace, h.app.Name, cluster)
   559  				}
   560  			}
   561  		}
   562  	}
   563  	// upgrade app version
   564  	app := &v1beta1.Application{}
   565  	if err = h.Client.Get(ctx, client.ObjectKeyFromObject(h.app), app); err != nil {
   566  		return errors.Wrapf(err, "failed to get app %s/%s for upgrade version", h.app.Namespace, h.app.Name)
   567  	}
   568  	if _, err = version.NewVersion(version2.VelaVersion); err != nil {
   569  		metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationKubeVelaVersion, velaVersionNumberToUpgradeResourceTracker)
   570  	} else {
   571  		metav1.SetMetaDataAnnotation(&app.ObjectMeta, oam.AnnotationKubeVelaVersion, version2.VelaVersion)
   572  	}
   573  	if err = h.Client.Update(ctx, app); err != nil {
   574  		return errors.Wrapf(err, "failed to upgrade app %s/%s", h.app.Namespace, h.app.Name)
   575  	}
   576  	h.app.ObjectMeta = app.ObjectMeta
   577  	return nil
   578  }