github.com/oam-dev/kubevela@v1.9.11/pkg/controller/core.oam.dev/v1beta1/application/apply.go (about)

     1  /*
     2  Copyright 2021 The KubeVela Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package application
    18  
    19  import (
    20  	"context"
    21  	"sync"
    22  
    23  	"github.com/kubevela/workflow/pkg/cue/packages"
    24  	"github.com/pkg/errors"
    25  	corev1 "k8s.io/api/core/v1"
    26  	kerrors "k8s.io/apimachinery/pkg/api/errors"
    27  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    28  	"sigs.k8s.io/controller-runtime/pkg/client"
    29  
    30  	monitorContext "github.com/kubevela/pkg/monitor/context"
    31  	pkgmulticluster "github.com/kubevela/pkg/multicluster"
    32  	terraformtypes "github.com/oam-dev/terraform-controller/api/types"
    33  	terraforv1beta1 "github.com/oam-dev/terraform-controller/api/v1beta1"
    34  	terraforv1beta2 "github.com/oam-dev/terraform-controller/api/v1beta2"
    35  
    36  	"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
    37  	"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
    38  	"github.com/oam-dev/kubevela/apis/types"
    39  	"github.com/oam-dev/kubevela/pkg/appfile"
    40  	"github.com/oam-dev/kubevela/pkg/monitor/metrics"
    41  	"github.com/oam-dev/kubevela/pkg/multicluster"
    42  	"github.com/oam-dev/kubevela/pkg/oam"
    43  	"github.com/oam-dev/kubevela/pkg/oam/util"
    44  	"github.com/oam-dev/kubevela/pkg/resourcekeeper"
    45  )
    46  
    47  // AppHandler handles application reconcile
    48  type AppHandler struct {
    49  	client.Client
    50  	pd *packages.PackageDiscover
    51  
    52  	app            *v1beta1.Application
    53  	currentAppRev  *v1beta1.ApplicationRevision
    54  	latestAppRev   *v1beta1.ApplicationRevision
    55  	resourceKeeper resourcekeeper.ResourceKeeper
    56  
    57  	isNewRevision  bool
    58  	currentRevHash string
    59  
    60  	services         []common.ApplicationComponentStatus
    61  	appliedResources []common.ClusterObjectReference
    62  	deletedResources []common.ClusterObjectReference
    63  
    64  	mu sync.Mutex
    65  }
    66  
    67  // NewAppHandler create new app handler
    68  func NewAppHandler(ctx context.Context, r *Reconciler, app *v1beta1.Application) (*AppHandler, error) {
    69  	if ctx, ok := ctx.(monitorContext.Context); ok {
    70  		subCtx := ctx.Fork("create-app-handler", monitorContext.DurationMetric(func(v float64) {
    71  			metrics.AppReconcileStageDurationHistogram.WithLabelValues("create-app-handler").Observe(v)
    72  		}))
    73  		defer subCtx.Commit("finish create appHandler")
    74  	}
    75  	resourceHandler, err := resourcekeeper.NewResourceKeeper(ctx, r.Client, app)
    76  	if err != nil {
    77  		return nil, errors.Wrapf(err, "failed to create resourceKeeper")
    78  	}
    79  	return &AppHandler{
    80  		Client:         r.Client,
    81  		pd:             r.pd,
    82  		app:            app,
    83  		resourceKeeper: resourceHandler,
    84  	}, nil
    85  }
    86  
    87  // Dispatch apply manifests into k8s.
    88  func (h *AppHandler) Dispatch(ctx context.Context, cluster string, owner string, manifests ...*unstructured.Unstructured) error {
    89  	manifests = multicluster.ResourcesWithClusterName(cluster, manifests...)
    90  	if err := h.resourceKeeper.Dispatch(ctx, manifests, nil); err != nil {
    91  		return err
    92  	}
    93  	for _, mf := range manifests {
    94  		if mf == nil {
    95  			continue
    96  		}
    97  		if oam.GetCluster(mf) != "" {
    98  			cluster = oam.GetCluster(mf)
    99  		}
   100  		ref := common.ClusterObjectReference{
   101  			Cluster: cluster,
   102  			Creator: owner,
   103  			ObjectReference: corev1.ObjectReference{
   104  				Name:       mf.GetName(),
   105  				Namespace:  mf.GetNamespace(),
   106  				Kind:       mf.GetKind(),
   107  				APIVersion: mf.GetAPIVersion(),
   108  			},
   109  		}
   110  		h.addAppliedResource(false, ref)
   111  	}
   112  	return nil
   113  }
   114  
   115  // Delete delete manifests from k8s.
   116  func (h *AppHandler) Delete(ctx context.Context, cluster string, owner string, manifest *unstructured.Unstructured) error {
   117  	manifests := multicluster.ResourcesWithClusterName(cluster, manifest)
   118  	if err := h.resourceKeeper.Delete(ctx, manifests); err != nil {
   119  		return err
   120  	}
   121  	ref := common.ClusterObjectReference{
   122  		Cluster: cluster,
   123  		Creator: owner,
   124  		ObjectReference: corev1.ObjectReference{
   125  			Name:       manifest.GetName(),
   126  			Namespace:  manifest.GetNamespace(),
   127  			Kind:       manifest.GetKind(),
   128  			APIVersion: manifest.GetAPIVersion(),
   129  		},
   130  	}
   131  	h.deleteAppliedResource(ref)
   132  	return nil
   133  }
   134  
   135  // addAppliedResource recorde applied resource.
   136  // reconcile run at single threaded. So there is no need to consider to use locker.
   137  func (h *AppHandler) addAppliedResource(previous bool, refs ...common.ClusterObjectReference) {
   138  	h.mu.Lock()
   139  	defer h.mu.Unlock()
   140  	for _, ref := range refs {
   141  		if previous {
   142  			for i, deleted := range h.deletedResources {
   143  				if deleted.Equal(ref) {
   144  					h.deletedResources = removeResources(h.deletedResources, i)
   145  					return
   146  				}
   147  			}
   148  		}
   149  
   150  		found := false
   151  		for _, current := range h.appliedResources {
   152  			if current.Equal(ref) {
   153  				found = true
   154  				break
   155  			}
   156  		}
   157  		if !found {
   158  			h.appliedResources = append(h.appliedResources, ref)
   159  		}
   160  	}
   161  }
   162  
   163  func (h *AppHandler) deleteAppliedResource(ref common.ClusterObjectReference) {
   164  	delIndex := -1
   165  	for i, current := range h.appliedResources {
   166  		if current.Equal(ref) {
   167  			delIndex = i
   168  		}
   169  	}
   170  	if delIndex < 0 {
   171  		isDeleted := false
   172  		for _, deleted := range h.deletedResources {
   173  			if deleted.Equal(ref) {
   174  				isDeleted = true
   175  				break
   176  			}
   177  		}
   178  		if !isDeleted {
   179  			h.deletedResources = append(h.deletedResources, ref)
   180  		}
   181  	} else {
   182  		h.appliedResources = removeResources(h.appliedResources, delIndex)
   183  	}
   184  
   185  }
   186  
   187  func removeResources(elements []common.ClusterObjectReference, index int) []common.ClusterObjectReference {
   188  	elements[index] = elements[len(elements)-1]
   189  	return elements[:len(elements)-1]
   190  }
   191  
   192  // getServiceStatus get specified component status
   193  func (h *AppHandler) getServiceStatus(svc common.ApplicationComponentStatus) common.ApplicationComponentStatus {
   194  	for i := range h.services {
   195  		current := h.services[i]
   196  		if current.Equal(svc) {
   197  			return current
   198  		}
   199  	}
   200  	return svc
   201  }
   202  
   203  // addServiceStatus recorde the whole component status.
   204  // reconcile run at single threaded. So there is no need to consider to use locker.
   205  func (h *AppHandler) addServiceStatus(cover bool, svcs ...common.ApplicationComponentStatus) {
   206  	h.mu.Lock()
   207  	defer h.mu.Unlock()
   208  	for _, svc := range svcs {
   209  		found := false
   210  		for i := range h.services {
   211  			current := h.services[i]
   212  			if current.Equal(svc) {
   213  				if cover {
   214  					h.services[i] = svc
   215  				}
   216  				found = true
   217  				break
   218  			}
   219  		}
   220  		if !found {
   221  			h.services = append(h.services, svc)
   222  		}
   223  	}
   224  }
   225  
   226  // collectTraitHealthStatus collect trait health status
   227  func (h *AppHandler) collectTraitHealthStatus(comp *appfile.Component, tr *appfile.Trait, appRev *v1beta1.ApplicationRevision, overrideNamespace string) (common.ApplicationTraitStatus, []*unstructured.Unstructured, error) {
   228  	defer func(clusterName string) {
   229  		comp.Ctx.SetCtx(pkgmulticluster.WithCluster(comp.Ctx.GetCtx(), clusterName))
   230  	}(multicluster.ClusterNameInContext(comp.Ctx.GetCtx()))
   231  
   232  	var (
   233  		pCtx        = comp.Ctx
   234  		appName     = appRev.Spec.Application.Name
   235  		traitStatus = common.ApplicationTraitStatus{
   236  			Type:    tr.Name,
   237  			Healthy: true,
   238  		}
   239  		traitOverrideNamespace = overrideNamespace
   240  		err                    error
   241  	)
   242  	if tr.FullTemplate.TraitDefinition.Spec.ControlPlaneOnly {
   243  		traitOverrideNamespace = appRev.GetNamespace()
   244  		pCtx.SetCtx(pkgmulticluster.WithCluster(pCtx.GetCtx(), pkgmulticluster.Local))
   245  	}
   246  	_accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, traitOverrideNamespace)
   247  	templateContext, err := tr.GetTemplateContext(pCtx, h.Client, _accessor)
   248  	if err != nil {
   249  		return common.ApplicationTraitStatus{}, nil, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, get template context error", appName, comp.Name, tr.Name)
   250  	}
   251  	if ok, err := tr.EvalHealth(templateContext); !ok || err != nil {
   252  		traitStatus.Healthy = false
   253  	}
   254  	traitStatus.Message, err = tr.EvalStatus(templateContext)
   255  	if err != nil {
   256  		return common.ApplicationTraitStatus{}, nil, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appName, comp.Name, tr.Name)
   257  	}
   258  	return traitStatus, extractOutputs(templateContext), nil
   259  }
   260  
   261  // collectWorkloadHealthStatus collect workload health status
   262  func (h *AppHandler) collectWorkloadHealthStatus(ctx context.Context, comp *appfile.Component, appRev *v1beta1.ApplicationRevision, status *common.ApplicationComponentStatus, accessor util.NamespaceAccessor) (bool, *unstructured.Unstructured, []*unstructured.Unstructured, error) {
   263  	var output *unstructured.Unstructured
   264  	var outputs []*unstructured.Unstructured
   265  	var (
   266  		appName  = appRev.Spec.Application.Name
   267  		isHealth = true
   268  	)
   269  	if comp.CapabilityCategory == types.TerraformCategory {
   270  		var configuration terraforv1beta2.Configuration
   271  		if err := h.Client.Get(ctx, client.ObjectKey{Name: comp.Name, Namespace: accessor.Namespace()}, &configuration); err != nil {
   272  			if kerrors.IsNotFound(err) {
   273  				var legacyConfiguration terraforv1beta1.Configuration
   274  				if err := h.Client.Get(ctx, client.ObjectKey{Name: comp.Name, Namespace: accessor.Namespace()}, &legacyConfiguration); err != nil {
   275  					return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, comp.Name)
   276  				}
   277  				isHealth = setStatus(status, legacyConfiguration.Status.ObservedGeneration, legacyConfiguration.Generation,
   278  					legacyConfiguration.GetLabels(), appRev.Name, legacyConfiguration.Status.Apply.State, legacyConfiguration.Status.Apply.Message)
   279  			} else {
   280  				return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, comp.Name)
   281  			}
   282  		} else {
   283  			isHealth = setStatus(status, configuration.Status.ObservedGeneration, configuration.Generation, configuration.GetLabels(),
   284  				appRev.Name, configuration.Status.Apply.State, configuration.Status.Apply.Message)
   285  		}
   286  	} else {
   287  		templateContext, err := comp.GetTemplateContext(comp.Ctx, h.Client, accessor)
   288  		if err != nil {
   289  			return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, get template context error", appName, comp.Name)
   290  		}
   291  		if ok, err := comp.EvalHealth(templateContext); !ok || err != nil {
   292  			isHealth = false
   293  		}
   294  		status.Healthy = isHealth
   295  		status.Message, err = comp.EvalStatus(templateContext)
   296  		if err != nil {
   297  			return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appName, comp.Name)
   298  		}
   299  		output, outputs = extractOutputAndOutputs(templateContext)
   300  	}
   301  	return isHealth, output, outputs, nil
   302  }
   303  
   304  // nolint
   305  // collectHealthStatus will collect health status of component, including component itself and traits.
   306  func (h *AppHandler) collectHealthStatus(ctx context.Context, comp *appfile.Component, appRev *v1beta1.ApplicationRevision, overrideNamespace string, skipWorkload bool, traitFilters ...TraitFilter) (*common.ApplicationComponentStatus, *unstructured.Unstructured, []*unstructured.Unstructured, bool, error) {
   307  	output := new(unstructured.Unstructured)
   308  	outputs := make([]*unstructured.Unstructured, 0)
   309  	accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, overrideNamespace)
   310  	var (
   311  		status = common.ApplicationComponentStatus{
   312  			Name:               comp.Name,
   313  			WorkloadDefinition: comp.FullTemplate.Reference.Definition,
   314  			Healthy:            true,
   315  			Namespace:          accessor.Namespace(),
   316  			Cluster:            multicluster.ClusterNameInContext(ctx),
   317  		}
   318  		isHealth = true
   319  		err      error
   320  	)
   321  
   322  	status = h.getServiceStatus(status)
   323  	if !skipWorkload {
   324  		isHealth, output, outputs, err = h.collectWorkloadHealthStatus(ctx, comp, appRev, &status, accessor)
   325  		if err != nil {
   326  			return nil, nil, nil, false, err
   327  		}
   328  	}
   329  
   330  	var traitStatusList []common.ApplicationTraitStatus
   331  collectNext:
   332  	for _, tr := range comp.Traits {
   333  		for _, filter := range traitFilters {
   334  			// If filtered out by one of the filters
   335  			if filter(*tr) {
   336  				continue collectNext
   337  			}
   338  		}
   339  
   340  		traitStatus, _outputs, err := h.collectTraitHealthStatus(comp, tr, appRev, overrideNamespace)
   341  		if err != nil {
   342  			return nil, nil, nil, false, err
   343  		}
   344  		outputs = append(outputs, _outputs...)
   345  
   346  		isHealth = isHealth && traitStatus.Healthy
   347  		if status.Message == "" && traitStatus.Message != "" {
   348  			status.Message = traitStatus.Message
   349  		}
   350  		traitStatusList = append(traitStatusList, traitStatus)
   351  
   352  		var oldStatus []common.ApplicationTraitStatus
   353  		for _, _trait := range status.Traits {
   354  			if _trait.Type != tr.Name {
   355  				oldStatus = append(oldStatus, _trait)
   356  			}
   357  		}
   358  		status.Traits = oldStatus
   359  	}
   360  	status.Traits = append(status.Traits, traitStatusList...)
   361  	h.addServiceStatus(true, status)
   362  	return &status, output, outputs, isHealth, nil
   363  }
   364  
   365  func setStatus(status *common.ApplicationComponentStatus, observedGeneration, generation int64, labels map[string]string,
   366  	appRevName string, state terraformtypes.ConfigurationState, message string) bool {
   367  	isLatest := func() bool {
   368  		if observedGeneration != 0 && observedGeneration != generation {
   369  			return false
   370  		}
   371  		// Use AppRevision to avoid getting the configuration before the patch.
   372  		if v, ok := labels[oam.LabelAppRevision]; ok {
   373  			if v != appRevName {
   374  				return false
   375  			}
   376  		}
   377  		return true
   378  	}
   379  	status.Message = message
   380  	if !isLatest() || state != terraformtypes.Available {
   381  		status.Healthy = false
   382  		return false
   383  	}
   384  	status.Healthy = true
   385  	return true
   386  }
   387  
   388  // ApplyPolicies will render policies into manifests from appfile and dispatch them
   389  // Note the builtin policy like apply-once, shared-resource, etc. is not handled here.
   390  func (h *AppHandler) ApplyPolicies(ctx context.Context, af *appfile.Appfile) error {
   391  	if ctx, ok := ctx.(monitorContext.Context); ok {
   392  		subCtx := ctx.Fork("apply-policies", monitorContext.DurationMetric(func(v float64) {
   393  			metrics.AppReconcileStageDurationHistogram.WithLabelValues("apply-policies").Observe(v)
   394  		}))
   395  		defer subCtx.Commit("finish apply policies")
   396  	}
   397  	policyManifests, err := af.GeneratePolicyManifests(ctx)
   398  	if err != nil {
   399  		return errors.Wrapf(err, "failed to render policy manifests")
   400  	}
   401  	if len(policyManifests) > 0 {
   402  		for _, policyManifest := range policyManifests {
   403  			util.AddLabels(policyManifest, map[string]string{
   404  				oam.LabelAppName:      h.app.GetName(),
   405  				oam.LabelAppNamespace: h.app.GetNamespace(),
   406  			})
   407  		}
   408  		if err = h.Dispatch(ctx, "", common.PolicyResourceCreator, policyManifests...); err != nil {
   409  			return errors.Wrapf(err, "failed to dispatch policy manifests")
   410  		}
   411  	}
   412  	return nil
   413  }
   414  
   415  func extractOutputAndOutputs(templateContext map[string]interface{}) (*unstructured.Unstructured, []*unstructured.Unstructured) {
   416  	output := new(unstructured.Unstructured)
   417  	if templateContext["output"] != nil {
   418  		output = &unstructured.Unstructured{Object: templateContext["output"].(map[string]interface{})}
   419  	}
   420  	outputs := extractOutputs(templateContext)
   421  	return output, outputs
   422  }
   423  
   424  func extractOutputs(templateContext map[string]interface{}) []*unstructured.Unstructured {
   425  	outputs := make([]*unstructured.Unstructured, 0)
   426  	if templateContext["outputs"] != nil {
   427  		for _, v := range templateContext["outputs"].(map[string]interface{}) {
   428  			outputs = append(outputs, &unstructured.Unstructured{Object: v.(map[string]interface{})})
   429  		}
   430  	}
   431  	return outputs
   432  }