github.com/latiif/helm@v2.15.0+incompatible/pkg/kube/client.go (about)

     1  /*
     2  Copyright The Helm Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package kube // import "k8s.io/helm/pkg/kube"
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/json"
    23  	goerrors "errors"
    24  	"fmt"
    25  	"io"
    26  	"log"
    27  	"sort"
    28  	"strings"
    29  	"sync"
    30  	"time"
    31  
    32  	"k8s.io/apimachinery/pkg/api/meta"
    33  
    34  	jsonpatch "github.com/evanphx/json-patch"
    35  	appsv1 "k8s.io/api/apps/v1"
    36  	appsv1beta1 "k8s.io/api/apps/v1beta1"
    37  	appsv1beta2 "k8s.io/api/apps/v1beta2"
    38  	batch "k8s.io/api/batch/v1"
    39  	v1 "k8s.io/api/core/v1"
    40  	extv1beta1 "k8s.io/api/extensions/v1beta1"
    41  	apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
    42  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    43  	"k8s.io/apimachinery/pkg/api/errors"
    44  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    45  	"k8s.io/apimachinery/pkg/fields"
    46  	"k8s.io/apimachinery/pkg/labels"
    47  	"k8s.io/apimachinery/pkg/runtime"
    48  	"k8s.io/apimachinery/pkg/runtime/schema"
    49  	"k8s.io/apimachinery/pkg/types"
    50  	"k8s.io/apimachinery/pkg/util/strategicpatch"
    51  	"k8s.io/apimachinery/pkg/util/wait"
    52  	"k8s.io/apimachinery/pkg/watch"
    53  	"k8s.io/cli-runtime/pkg/genericclioptions"
    54  	"k8s.io/cli-runtime/pkg/resource"
    55  	"k8s.io/client-go/kubernetes/scheme"
    56  	cachetools "k8s.io/client-go/tools/cache"
    57  	watchtools "k8s.io/client-go/tools/watch"
    58  	"k8s.io/kubernetes/pkg/api/legacyscheme"
    59  	"k8s.io/kubernetes/pkg/apis/core"
    60  	"k8s.io/kubernetes/pkg/kubectl/cmd/get"
    61  	cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
    62  	"k8s.io/kubernetes/pkg/kubectl/validation"
    63  )
    64  
    65  // MissingGetHeader is added to Get's output when a resource is not found.
    66  const MissingGetHeader = "==> MISSING\nKIND\t\tNAME\n"
    67  
    68  // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found.
    69  var ErrNoObjectsVisited = goerrors.New("no objects visited")
    70  
    71  var metadataAccessor = meta.NewAccessor()
    72  
    73  // Client represents a client capable of communicating with the Kubernetes API.
    74  type Client struct {
    75  	cmdutil.Factory
    76  	Log func(string, ...interface{})
    77  }
    78  
    79  // New creates a new Client.
    80  func New(getter genericclioptions.RESTClientGetter) *Client {
    81  	if getter == nil {
    82  		getter = genericclioptions.NewConfigFlags(true)
    83  	}
    84  
    85  	err := apiextv1beta1.AddToScheme(scheme.Scheme)
    86  	if err != nil {
    87  		panic(err)
    88  	}
    89  
    90  	return &Client{
    91  		Factory: cmdutil.NewFactory(getter),
    92  		Log:     nopLogger,
    93  	}
    94  }
    95  
    96  var nopLogger = func(_ string, _ ...interface{}) {}
    97  
    98  // ResourceActorFunc performs an action on a single resource.
    99  type ResourceActorFunc func(*resource.Info) error
   100  
   101  // Create creates Kubernetes resources from an io.reader.
   102  //
   103  // Namespace will set the namespace.
   104  func (c *Client) Create(namespace string, reader io.Reader, timeout int64, shouldWait bool) error {
   105  	client, err := c.KubernetesClientSet()
   106  	if err != nil {
   107  		return err
   108  	}
   109  	if err := ensureNamespace(client, namespace); err != nil {
   110  		return err
   111  	}
   112  	c.Log("building resources from manifest")
   113  	infos, buildErr := c.BuildUnstructured(namespace, reader)
   114  	if buildErr != nil {
   115  		return buildErr
   116  	}
   117  	c.Log("creating %d resource(s)", len(infos))
   118  	if err := perform(infos, createResource); err != nil {
   119  		return err
   120  	}
   121  	if shouldWait {
   122  		return c.waitForResources(time.Duration(timeout)*time.Second, infos)
   123  	}
   124  	return nil
   125  }
   126  
   127  func (c *Client) newBuilder(namespace string, reader io.Reader) *resource.Result {
   128  	return c.NewBuilder().
   129  		ContinueOnError().
   130  		WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
   131  		Schema(c.validator()).
   132  		NamespaceParam(namespace).
   133  		DefaultNamespace().
   134  		Stream(reader, "").
   135  		Flatten().
   136  		Do()
   137  }
   138  
   139  func (c *Client) validator() validation.Schema {
   140  	schema, err := c.Validator(true)
   141  	if err != nil {
   142  		c.Log("warning: failed to load schema: %s", err)
   143  	}
   144  	return schema
   145  }
   146  
   147  // BuildUnstructured reads Kubernetes objects and returns unstructured infos.
   148  func (c *Client) BuildUnstructured(namespace string, reader io.Reader) (Result, error) {
   149  	var result Result
   150  
   151  	result, err := c.NewBuilder().
   152  		Unstructured().
   153  		ContinueOnError().
   154  		NamespaceParam(namespace).
   155  		DefaultNamespace().
   156  		Stream(reader, "").
   157  		Flatten().
   158  		Do().Infos()
   159  	return result, scrubValidationError(err)
   160  }
   161  
   162  // Validate reads Kubernetes manifests and validates the content.
   163  //
   164  // This function does not actually do schema validation of manifests. Adding
   165  // validation now breaks existing clients of helm: https://github.com/helm/helm/issues/5750
   166  func (c *Client) Validate(namespace string, reader io.Reader) error {
   167  	_, err := c.NewBuilder().
   168  		Unstructured().
   169  		ContinueOnError().
   170  		NamespaceParam(namespace).
   171  		DefaultNamespace().
   172  		// Schema(c.validator()). // No schema validation
   173  		Stream(reader, "").
   174  		Flatten().
   175  		Do().Infos()
   176  	return scrubValidationError(err)
   177  }
   178  
   179  // Build validates for Kubernetes objects and returns resource Infos from a io.Reader.
   180  func (c *Client) Build(namespace string, reader io.Reader) (Result, error) {
   181  	var result Result
   182  	result, err := c.newBuilder(namespace, reader).Infos()
   183  	return result, scrubValidationError(err)
   184  }
   185  
   186  // Return the resource info as internal
   187  func resourceInfoToObject(info *resource.Info, c *Client) runtime.Object {
   188  	internalObj, err := asInternal(info)
   189  	if err != nil {
   190  		// If the problem is just that the resource is not registered, don't print any
   191  		// error. This is normal for custom resources.
   192  		if !runtime.IsNotRegisteredError(err) {
   193  			c.Log("Warning: conversion to internal type failed: %v", err)
   194  		}
   195  		// Add the unstructured object in this situation. It will still get listed, just
   196  		// with less information.
   197  		return info.Object
   198  	}
   199  
   200  	return internalObj
   201  }
   202  
   203  func sortByKey(objs map[string](map[string]runtime.Object)) []string {
   204  	var keys []string
   205  	// Create a simple slice, so we can sort it
   206  	for key := range objs {
   207  		keys = append(keys, key)
   208  	}
   209  	// Sort alphabetically by version/kind keys
   210  	sort.Strings(keys)
   211  	return keys
   212  }
   213  
   214  // Get gets Kubernetes resources as pretty-printed string.
   215  //
   216  // Namespace will set the namespace.
   217  func (c *Client) Get(namespace string, reader io.Reader) (string, error) {
   218  	// Since we don't know what order the objects come in, let's group them by the types and then sort them, so
   219  	// that when we print them, they come out looking good (headers apply to subgroups, etc.).
   220  	objs := make(map[string](map[string]runtime.Object))
   221  	mux := &sync.Mutex{}
   222  
   223  	infos, err := c.BuildUnstructured(namespace, reader)
   224  	if err != nil {
   225  		return "", err
   226  	}
   227  
   228  	var objPods = make(map[string][]v1.Pod)
   229  
   230  	missing := []string{}
   231  	err = perform(infos, func(info *resource.Info) error {
   232  		mux.Lock()
   233  		defer mux.Unlock()
   234  		c.Log("Doing get for %s: %q", info.Mapping.GroupVersionKind.Kind, info.Name)
   235  		if err := info.Get(); err != nil {
   236  			c.Log("WARNING: Failed Get for resource %q: %s", info.Name, err)
   237  			missing = append(missing, fmt.Sprintf("%v\t\t%s", info.Mapping.Resource, info.Name))
   238  			return nil
   239  		}
   240  
   241  		// Use APIVersion/Kind as grouping mechanism. I'm not sure if you can have multiple
   242  		// versions per cluster, but this certainly won't hurt anything, so let's be safe.
   243  		gvk := info.ResourceMapping().GroupVersionKind
   244  		vk := gvk.Version + "/" + gvk.Kind
   245  
   246  		// Initialize map. The main map groups resources based on version/kind
   247  		// The second level is a simple 'Name' to 'Object', that will help sort
   248  		// the individual resource later
   249  		if objs[vk] == nil {
   250  			objs[vk] = make(map[string]runtime.Object)
   251  		}
   252  		// Map between the resource name to the underlying info object
   253  		objs[vk][info.Name] = resourceInfoToObject(info, c)
   254  
   255  		//Get the relation pods
   256  		objPods, err = c.getSelectRelationPod(info, objPods)
   257  		if err != nil {
   258  			c.Log("Warning: get the relation pod is failed, err:%s", err.Error())
   259  		}
   260  
   261  		return nil
   262  	})
   263  	if err != nil {
   264  		return "", err
   265  	}
   266  
   267  	//here, we will add the objPods to the objs
   268  	for key, podItems := range objPods {
   269  		for i := range podItems {
   270  			pod := &core.Pod{}
   271  
   272  			legacyscheme.Scheme.Convert(&podItems[i], pod, nil)
   273  			if objs[key+"(related)"] == nil {
   274  				objs[key+"(related)"] = make(map[string]runtime.Object)
   275  			}
   276  			objs[key+"(related)"][pod.ObjectMeta.Name] = runtime.Object(pod)
   277  		}
   278  	}
   279  
   280  	// Ok, now we have all the objects grouped by types (say, by v1/Pod, v1/Service, etc.), so
   281  	// spin through them and print them. Printer is cool since it prints the header only when
   282  	// an object type changes, so we can just rely on that. Problem is it doesn't seem to keep
   283  	// track of tab widths.
   284  	buf := new(bytes.Buffer)
   285  	printFlags := get.NewHumanPrintFlags()
   286  
   287  	// Sort alphabetically by version/kind keys
   288  	vkKeys := sortByKey(objs)
   289  	// Iterate on sorted version/kind types
   290  	for _, t := range vkKeys {
   291  		if _, err = fmt.Fprintf(buf, "==> %s\n", t); err != nil {
   292  			return "", err
   293  		}
   294  		typePrinter, _ := printFlags.ToPrinter("")
   295  
   296  		var sortedResources []string
   297  		for resource := range objs[t] {
   298  			sortedResources = append(sortedResources, resource)
   299  		}
   300  		sort.Strings(sortedResources)
   301  
   302  		// Now that each individual resource within the specific version/kind
   303  		// is sorted, we print each resource using the k8s printer
   304  		vk := objs[t]
   305  		for _, resourceName := range sortedResources {
   306  			if err := typePrinter.PrintObj(vk[resourceName], buf); err != nil {
   307  				c.Log("failed to print object type %s, object: %q :\n %v", t, resourceName, err)
   308  				return "", err
   309  			}
   310  		}
   311  		if _, err := buf.WriteString("\n"); err != nil {
   312  			return "", err
   313  		}
   314  	}
   315  	if len(missing) > 0 {
   316  		buf.WriteString(MissingGetHeader)
   317  		for _, s := range missing {
   318  			fmt.Fprintln(buf, s)
   319  		}
   320  	}
   321  	return buf.String(), nil
   322  }
   323  
   324  // Update reads the current configuration and a target configuration from io.reader
   325  // and creates resources that don't already exist, updates resources that have been modified
   326  // in the target configuration and deletes resources from the current configuration that are
   327  // not present in the target configuration.
   328  //
   329  // Namespace will set the namespaces.
   330  //
   331  // Deprecated: use UpdateWithOptions instead.
   332  func (c *Client) Update(namespace string, originalReader, targetReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error {
   333  	return c.UpdateWithOptions(namespace, originalReader, targetReader, UpdateOptions{
   334  		Force:      force,
   335  		Recreate:   recreate,
   336  		Timeout:    timeout,
   337  		ShouldWait: shouldWait,
   338  	})
   339  }
   340  
   341  // UpdateOptions provides options to control update behavior
   342  type UpdateOptions struct {
   343  	Force      bool
   344  	Recreate   bool
   345  	Timeout    int64
   346  	ShouldWait bool
   347  	// Allow deletion of new resources created in this update when update failed
   348  	CleanupOnFail bool
   349  }
   350  
   351  // UpdateWithOptions reads the current configuration and a target configuration from io.reader
   352  // and creates resources that don't already exist, updates resources that have been modified
   353  // in the target configuration and deletes resources from the current configuration that are
   354  // not present in the target configuration.
   355  //
   356  // Namespace will set the namespaces. UpdateOptions provides additional parameters to control
   357  // update behavior.
   358  func (c *Client) UpdateWithOptions(namespace string, originalReader, targetReader io.Reader, opts UpdateOptions) error {
   359  	original, err := c.BuildUnstructured(namespace, originalReader)
   360  	if err != nil {
   361  		return fmt.Errorf("failed decoding reader into objects: %s", err)
   362  	}
   363  
   364  	c.Log("building resources from updated manifest")
   365  	target, err := c.BuildUnstructured(namespace, targetReader)
   366  	if err != nil {
   367  		return fmt.Errorf("failed decoding reader into objects: %s", err)
   368  	}
   369  
   370  	newlyCreatedResources := []*resource.Info{}
   371  	updateErrors := []string{}
   372  
   373  	c.Log("checking %d resources for changes", len(target))
   374  	err = target.Visit(func(info *resource.Info, err error) error {
   375  		if err != nil {
   376  			return err
   377  		}
   378  
   379  		helper := resource.NewHelper(info.Client, info.Mapping)
   380  		if _, err := helper.Get(info.Namespace, info.Name, info.Export); err != nil {
   381  			if !errors.IsNotFound(err) {
   382  				return fmt.Errorf("Could not get information about the resource: %s", err)
   383  			}
   384  
   385  			// Since the resource does not exist, create it.
   386  			if err := createResource(info); err != nil {
   387  				return fmt.Errorf("failed to create resource: %s", err)
   388  			}
   389  			newlyCreatedResources = append(newlyCreatedResources, info)
   390  
   391  			kind := info.Mapping.GroupVersionKind.Kind
   392  			c.Log("Created a new %s called %q\n", kind, info.Name)
   393  			return nil
   394  		}
   395  
   396  		originalInfo := original.Get(info)
   397  
   398  		// The resource already exists in the cluster, but it wasn't defined in the previous release.
   399  		// In this case, we consider it to be a resource that was previously un-managed by the release and error out,
   400  		// asking for the user to intervene.
   401  		//
   402  		// See https://github.com/helm/helm/issues/1193 for more info.
   403  		if originalInfo == nil {
   404  			return fmt.Errorf(
   405  				"kind %s with the name %q already exists in the cluster and wasn't defined in the previous release. Before upgrading, please either delete the resource from the cluster or remove it from the chart",
   406  				info.Mapping.GroupVersionKind.Kind,
   407  				info.Name,
   408  			)
   409  		}
   410  
   411  		if err := updateResource(c, info, originalInfo.Object, opts.Force, opts.Recreate); err != nil {
   412  			c.Log("error updating the resource %q:\n\t %v", info.Name, err)
   413  			updateErrors = append(updateErrors, err.Error())
   414  		}
   415  
   416  		return nil
   417  	})
   418  
   419  	cleanupErrors := []string{}
   420  
   421  	if opts.CleanupOnFail && (err != nil || len(updateErrors) != 0) {
   422  		c.Log("Cleanup on fail enabled: cleaning up newly created resources due to update manifests failures")
   423  		cleanupErrors = c.cleanup(newlyCreatedResources)
   424  	}
   425  
   426  	switch {
   427  	case err != nil:
   428  		return fmt.Errorf(strings.Join(append([]string{err.Error()}, cleanupErrors...), " && "))
   429  	case len(updateErrors) != 0:
   430  		return fmt.Errorf(strings.Join(append(updateErrors, cleanupErrors...), " && "))
   431  	}
   432  
   433  	for _, info := range original.Difference(target) {
   434  		c.Log("Deleting %q in %s...", info.Name, info.Namespace)
   435  
   436  		if err := info.Get(); err != nil {
   437  			c.Log("Unable to get obj %q, err: %s", info.Name, err)
   438  		}
   439  		annotations, err := metadataAccessor.Annotations(info.Object)
   440  		if err != nil {
   441  			c.Log("Unable to get annotations on %q, err: %s", info.Name, err)
   442  		}
   443  		if ResourcePolicyIsKeep(annotations) {
   444  			policy := annotations[ResourcePolicyAnno]
   445  			c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, policy)
   446  			continue
   447  		}
   448  
   449  		if err := deleteResource(info); err != nil {
   450  			c.Log("Failed to delete %q, err: %s", info.Name, err)
   451  		}
   452  	}
   453  	if opts.ShouldWait {
   454  		err := c.waitForResources(time.Duration(opts.Timeout)*time.Second, target)
   455  
   456  		if opts.CleanupOnFail && err != nil {
   457  			c.Log("Cleanup on fail enabled: cleaning up newly created resources due to wait failure during update")
   458  			cleanupErrors = c.cleanup(newlyCreatedResources)
   459  			return fmt.Errorf(strings.Join(append([]string{err.Error()}, cleanupErrors...), " && "))
   460  		}
   461  
   462  		return err
   463  	}
   464  	return nil
   465  }
   466  
   467  func (c *Client) cleanup(newlyCreatedResources []*resource.Info) (cleanupErrors []string) {
   468  	for _, info := range newlyCreatedResources {
   469  		kind := info.Mapping.GroupVersionKind.Kind
   470  		c.Log("Deleting newly created %s with the name %q in %s...", kind, info.Name, info.Namespace)
   471  		if err := deleteResource(info); err != nil {
   472  			c.Log("Error deleting newly created %s with the name %q in %s: %s", kind, info.Name, info.Namespace, err)
   473  			cleanupErrors = append(cleanupErrors, err.Error())
   474  		}
   475  	}
   476  	return
   477  }
   478  
   479  // Delete deletes Kubernetes resources from an io.reader.
   480  //
   481  // Namespace will set the namespace.
   482  func (c *Client) Delete(namespace string, reader io.Reader) error {
   483  	return c.DeleteWithTimeout(namespace, reader, 0, false)
   484  }
   485  
   486  // DeleteWithTimeout deletes Kubernetes resources from an io.reader. If shouldWait is true, the function
   487  // will wait for all resources to be deleted from etcd before returning, or when the timeout
   488  // has expired.
   489  //
   490  // Namespace will set the namespace.
   491  func (c *Client) DeleteWithTimeout(namespace string, reader io.Reader, timeout int64, shouldWait bool) error {
   492  	infos, err := c.BuildUnstructured(namespace, reader)
   493  	if err != nil {
   494  		return err
   495  	}
   496  	err = perform(infos, func(info *resource.Info) error {
   497  		c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind)
   498  		err := deleteResource(info)
   499  		return c.skipIfNotFound(err)
   500  	})
   501  	if err != nil {
   502  		return err
   503  	}
   504  
   505  	if shouldWait {
   506  		c.Log("Waiting for %d seconds for delete to be completed", timeout)
   507  		return waitUntilAllResourceDeleted(infos, time.Duration(timeout)*time.Second)
   508  	}
   509  
   510  	return nil
   511  }
   512  
   513  func (c *Client) skipIfNotFound(err error) error {
   514  	if errors.IsNotFound(err) {
   515  		c.Log("%v", err)
   516  		return nil
   517  	}
   518  	return err
   519  }
   520  
   521  func waitUntilAllResourceDeleted(infos Result, timeout time.Duration) error {
   522  	return wait.Poll(2*time.Second, timeout, func() (bool, error) {
   523  		allDeleted := true
   524  		err := perform(infos, func(info *resource.Info) error {
   525  			innerErr := info.Get()
   526  			if errors.IsNotFound(innerErr) {
   527  				return nil
   528  			}
   529  			if innerErr != nil {
   530  				return innerErr
   531  			}
   532  			allDeleted = false
   533  			return nil
   534  		})
   535  		if err != nil {
   536  			return false, err
   537  		}
   538  		return allDeleted, nil
   539  	})
   540  }
   541  
   542  func (c *Client) watchTimeout(t time.Duration) ResourceActorFunc {
   543  	return func(info *resource.Info) error {
   544  		return c.watchUntilReady(t, info)
   545  	}
   546  }
   547  
   548  // WatchUntilReady watches the resource given in the reader, and waits until it is ready.
   549  //
   550  // This function is mainly for hook implementations. It watches for a resource to
   551  // hit a particular milestone. The milestone depends on the Kind.
   552  //
   553  // For most kinds, it checks to see if the resource is marked as Added or Modified
   554  // by the Kubernetes event stream. For some kinds, it does more:
   555  //
   556  // - Jobs: A job is marked "Ready" when it has successfully completed. This is
   557  //   ascertained by watching the Status fields in a job's output.
   558  //
   559  // Handling for other kinds will be added as necessary.
   560  func (c *Client) WatchUntilReady(namespace string, reader io.Reader, timeout int64, shouldWait bool) error {
   561  	infos, err := c.BuildUnstructured(namespace, reader)
   562  	if err != nil {
   563  		return err
   564  	}
   565  	// For jobs, there's also the option to do poll c.Jobs(namespace).Get():
   566  	// https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300
   567  	return perform(infos, c.watchTimeout(time.Duration(timeout)*time.Second))
   568  }
   569  
   570  // WaitUntilCRDEstablished polls the given CRD until it reaches the established
   571  // state. A CRD needs to reach the established state before CRs can be created.
   572  //
   573  // If a naming conflict condition is found, this function will return an error.
   574  func (c *Client) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error {
   575  	infos, err := c.BuildUnstructured(metav1.NamespaceAll, reader)
   576  	if err != nil {
   577  		return err
   578  	}
   579  
   580  	return perform(infos, c.pollCRDEstablished(timeout))
   581  }
   582  
   583  func (c *Client) pollCRDEstablished(t time.Duration) ResourceActorFunc {
   584  	return func(info *resource.Info) error {
   585  		return c.pollCRDUntilEstablished(t, info)
   586  	}
   587  }
   588  
   589  func (c *Client) pollCRDUntilEstablished(timeout time.Duration, info *resource.Info) error {
   590  	return wait.PollImmediate(time.Second, timeout, func() (bool, error) {
   591  		err := info.Get()
   592  		if err != nil {
   593  			return false, fmt.Errorf("unable to get CRD: %v", err)
   594  		}
   595  
   596  		crd := &apiextv1beta1.CustomResourceDefinition{}
   597  		err = scheme.Scheme.Convert(info.Object, crd, nil)
   598  		if err != nil {
   599  			return false, fmt.Errorf("unable to convert to CRD type: %v", err)
   600  		}
   601  
   602  		for _, cond := range crd.Status.Conditions {
   603  			switch cond.Type {
   604  			case apiextv1beta1.Established:
   605  				if cond.Status == apiextv1beta1.ConditionTrue {
   606  					return true, nil
   607  				}
   608  			case apiextv1beta1.NamesAccepted:
   609  				if cond.Status == apiextv1beta1.ConditionFalse {
   610  					return false, fmt.Errorf("naming conflict detected for CRD %s", crd.GetName())
   611  				}
   612  			}
   613  		}
   614  
   615  		return false, nil
   616  	})
   617  }
   618  
   619  func perform(infos Result, fn ResourceActorFunc) error {
   620  	if len(infos) == 0 {
   621  		return ErrNoObjectsVisited
   622  	}
   623  
   624  	errs := make(chan error)
   625  	go batchPerform(infos, fn, errs)
   626  
   627  	for range infos {
   628  		err := <-errs
   629  		if err != nil {
   630  			return err
   631  		}
   632  	}
   633  	return nil
   634  }
   635  
   636  func batchPerform(infos Result, fn ResourceActorFunc, errs chan<- error) {
   637  	var kind string
   638  	var wg sync.WaitGroup
   639  	for _, info := range infos {
   640  		currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind
   641  		if kind != currentKind {
   642  			wg.Wait()
   643  			kind = currentKind
   644  		}
   645  		wg.Add(1)
   646  		go func(i *resource.Info) {
   647  			errs <- fn(i)
   648  			wg.Done()
   649  		}(info)
   650  	}
   651  }
   652  
   653  func createResource(info *resource.Info) error {
   654  	obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object, nil)
   655  	if err != nil {
   656  		return err
   657  	}
   658  	return info.Refresh(obj, true)
   659  }
   660  
   661  func deleteResource(info *resource.Info) error {
   662  	policy := metav1.DeletePropagationBackground
   663  	opts := &metav1.DeleteOptions{PropagationPolicy: &policy}
   664  	_, err := resource.NewHelper(info.Client, info.Mapping).DeleteWithOptions(info.Namespace, info.Name, opts)
   665  	return err
   666  }
   667  
   668  func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.PatchType, error) {
   669  	oldData, err := json.Marshal(current)
   670  	if err != nil {
   671  		return nil, types.StrategicMergePatchType, fmt.Errorf("serializing current configuration: %s", err)
   672  	}
   673  	newData, err := json.Marshal(target.Object)
   674  	if err != nil {
   675  		return nil, types.StrategicMergePatchType, fmt.Errorf("serializing target configuration: %s", err)
   676  	}
   677  
   678  	// While different objects need different merge types, the parent function
   679  	// that calls this does not try to create a patch when the data (first
   680  	// returned object) is nil. We can skip calculating the merge type as
   681  	// the returned merge type is ignored.
   682  	if apiequality.Semantic.DeepEqual(oldData, newData) {
   683  		return nil, types.StrategicMergePatchType, nil
   684  	}
   685  
   686  	// Get a versioned object
   687  	versionedObject, err := asVersioned(target)
   688  
   689  	// Unstructured objects, such as CRDs, may not have a not registered error
   690  	// returned from ConvertToVersion. Anything that's unstructured should
   691  	// use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported
   692  	// on objects like CRDs.
   693  	_, isUnstructured := versionedObject.(runtime.Unstructured)
   694  
   695  	// On newer K8s versions, CRDs aren't unstructured but has this dedicated type
   696  	_, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition)
   697  
   698  	switch {
   699  	case runtime.IsNotRegisteredError(err), isUnstructured, isCRD:
   700  		// fall back to generic JSON merge patch
   701  		patch, err := jsonpatch.CreateMergePatch(oldData, newData)
   702  		if err != nil {
   703  			return nil, types.MergePatchType, fmt.Errorf("failed to create merge patch: %v", err)
   704  		}
   705  		return patch, types.MergePatchType, nil
   706  	case err != nil:
   707  		return nil, types.StrategicMergePatchType, fmt.Errorf("failed to get versionedObject: %s", err)
   708  	default:
   709  		patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, versionedObject)
   710  		if err != nil {
   711  			return nil, types.StrategicMergePatchType, fmt.Errorf("failed to create two-way merge patch: %v", err)
   712  		}
   713  		return patch, types.StrategicMergePatchType, nil
   714  	}
   715  }
   716  
   717  func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool, recreate bool) error {
   718  	patch, patchType, err := createPatch(target, currentObj)
   719  	if err != nil {
   720  		return fmt.Errorf("failed to create patch: %s", err)
   721  	}
   722  	if patch == nil {
   723  		c.Log("Looks like there are no changes for %s %q", target.Mapping.GroupVersionKind.Kind, target.Name)
   724  		// This needs to happen to make sure that tiller has the latest info from the API
   725  		// Otherwise there will be no labels and other functions that use labels will panic
   726  		if err := target.Get(); err != nil {
   727  			return fmt.Errorf("error trying to refresh resource information: %v", err)
   728  		}
   729  	} else {
   730  		// send patch to server
   731  		helper := resource.NewHelper(target.Client, target.Mapping)
   732  
   733  		obj, err := helper.Patch(target.Namespace, target.Name, patchType, patch, nil)
   734  		if err != nil {
   735  			kind := target.Mapping.GroupVersionKind.Kind
   736  			log.Printf("Cannot patch %s: %q (%v)", kind, target.Name, err)
   737  
   738  			if force {
   739  				// Attempt to delete...
   740  				if err := deleteResource(target); err != nil {
   741  					return err
   742  				}
   743  				log.Printf("Deleted %s: %q", kind, target.Name)
   744  
   745  				// ... and recreate
   746  				if err := createResource(target); err != nil {
   747  					return fmt.Errorf("Failed to recreate resource: %s", err)
   748  				}
   749  				log.Printf("Created a new %s called %q\n", kind, target.Name)
   750  
   751  				// No need to refresh the target, as we recreated the resource based
   752  				// on it. In addition, it might not exist yet and a call to `Refresh`
   753  				// may fail.
   754  			} else {
   755  				log.Print("Use --force to force recreation of the resource")
   756  				return err
   757  			}
   758  		} else {
   759  			// When patch succeeds without needing to recreate, refresh target.
   760  			target.Refresh(obj, true)
   761  		}
   762  	}
   763  
   764  	if !recreate {
   765  		return nil
   766  	}
   767  
   768  	versioned := asVersionedOrUnstructured(target)
   769  	selector, ok := getSelectorFromObject(versioned)
   770  	if !ok {
   771  		return nil
   772  	}
   773  
   774  	client, err := c.KubernetesClientSet()
   775  	if err != nil {
   776  		return err
   777  	}
   778  
   779  	pods, err := client.CoreV1().Pods(target.Namespace).List(metav1.ListOptions{
   780  		LabelSelector: labels.Set(selector).AsSelector().String(),
   781  	})
   782  	if err != nil {
   783  		return err
   784  	}
   785  
   786  	// Restart pods
   787  	for _, pod := range pods.Items {
   788  		c.Log("Restarting pod: %v/%v", pod.Namespace, pod.Name)
   789  
   790  		// Delete each pod for get them restarted with changed spec.
   791  		if err := client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil {
   792  			return err
   793  		}
   794  	}
   795  	return nil
   796  }
   797  
   798  func getSelectorFromObject(obj runtime.Object) (map[string]string, bool) {
   799  	switch typed := obj.(type) {
   800  
   801  	case *v1.ReplicationController:
   802  		return typed.Spec.Selector, true
   803  
   804  	case *extv1beta1.ReplicaSet:
   805  		return typed.Spec.Selector.MatchLabels, true
   806  	case *appsv1.ReplicaSet:
   807  		return typed.Spec.Selector.MatchLabels, true
   808  
   809  	case *extv1beta1.Deployment:
   810  		return typed.Spec.Selector.MatchLabels, true
   811  	case *appsv1beta1.Deployment:
   812  		return typed.Spec.Selector.MatchLabels, true
   813  	case *appsv1beta2.Deployment:
   814  		return typed.Spec.Selector.MatchLabels, true
   815  	case *appsv1.Deployment:
   816  		return typed.Spec.Selector.MatchLabels, true
   817  
   818  	case *extv1beta1.DaemonSet:
   819  		return typed.Spec.Selector.MatchLabels, true
   820  	case *appsv1beta2.DaemonSet:
   821  		return typed.Spec.Selector.MatchLabels, true
   822  	case *appsv1.DaemonSet:
   823  		return typed.Spec.Selector.MatchLabels, true
   824  
   825  	case *batch.Job:
   826  		return typed.Spec.Selector.MatchLabels, true
   827  
   828  	case *appsv1beta1.StatefulSet:
   829  		return typed.Spec.Selector.MatchLabels, true
   830  	case *appsv1beta2.StatefulSet:
   831  		return typed.Spec.Selector.MatchLabels, true
   832  	case *appsv1.StatefulSet:
   833  		return typed.Spec.Selector.MatchLabels, true
   834  
   835  	default:
   836  		return nil, false
   837  	}
   838  }
   839  
   840  func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error {
   841  	lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.Everything())
   842  
   843  	kind := info.Mapping.GroupVersionKind.Kind
   844  	c.Log("Watching for changes to %s %s with timeout of %v", kind, info.Name, timeout)
   845  
   846  	// What we watch for depends on the Kind.
   847  	// - For a Job, we watch for completion.
   848  	// - For all else, we watch until Ready.
   849  	// In the future, we might want to add some special logic for types
   850  	// like Ingress, Volume, etc.
   851  
   852  	ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
   853  	defer cancel()
   854  	_, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) {
   855  		switch e.Type {
   856  		case watch.Added, watch.Modified:
   857  			// For things like a secret or a config map, this is the best indicator
   858  			// we get. We care mostly about jobs, where what we want to see is
   859  			// the status go into a good state. For other types, like ReplicaSet
   860  			// we don't really do anything to support these as hooks.
   861  			c.Log("Add/Modify event for %s: %v", info.Name, e.Type)
   862  			if kind == "Job" {
   863  				return c.waitForJob(e, info.Name)
   864  			}
   865  			return true, nil
   866  		case watch.Deleted:
   867  			c.Log("Deleted event for %s", info.Name)
   868  			return true, nil
   869  		case watch.Error:
   870  			// Handle error and return with an error.
   871  			c.Log("Error event for %s", info.Name)
   872  			return true, fmt.Errorf("Failed to deploy %s", info.Name)
   873  		default:
   874  			return false, nil
   875  		}
   876  	})
   877  	return err
   878  }
   879  
   880  // waitForJob is a helper that waits for a job to complete.
   881  //
   882  // This operates on an event returned from a watcher.
   883  func (c *Client) waitForJob(e watch.Event, name string) (bool, error) {
   884  	job := &batch.Job{}
   885  	err := legacyscheme.Scheme.Convert(e.Object, job, nil)
   886  	if err != nil {
   887  		return true, err
   888  	}
   889  
   890  	for _, c := range job.Status.Conditions {
   891  		if c.Type == batch.JobComplete && c.Status == v1.ConditionTrue {
   892  			return true, nil
   893  		} else if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
   894  			return true, fmt.Errorf("Job failed: %s", c.Reason)
   895  		}
   896  	}
   897  
   898  	c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, job.Status.Active, job.Status.Failed, job.Status.Succeeded)
   899  	return false, nil
   900  }
   901  
   902  // scrubValidationError removes kubectl info from the message.
   903  func scrubValidationError(err error) error {
   904  	if err == nil {
   905  		return nil
   906  	}
   907  	const stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false"
   908  
   909  	if strings.Contains(err.Error(), stopValidateMessage) {
   910  		return goerrors.New(strings.Replace(err.Error(), "; "+stopValidateMessage, "", -1))
   911  	}
   912  	return err
   913  }
   914  
   915  // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase
   916  // and returns said phase (PodSucceeded or PodFailed qualify).
   917  func (c *Client) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error) {
   918  	infos, err := c.Build(namespace, reader)
   919  	if err != nil {
   920  		return v1.PodUnknown, err
   921  	}
   922  	info := infos[0]
   923  
   924  	kind := info.Mapping.GroupVersionKind.Kind
   925  	if kind != "Pod" {
   926  		return v1.PodUnknown, fmt.Errorf("%s is not a Pod", info.Name)
   927  	}
   928  
   929  	if err := c.watchPodUntilComplete(timeout, info); err != nil {
   930  		return v1.PodUnknown, err
   931  	}
   932  
   933  	if err := info.Get(); err != nil {
   934  		return v1.PodUnknown, err
   935  	}
   936  	status := info.Object.(*v1.Pod).Status.Phase
   937  
   938  	return status, nil
   939  }
   940  
   941  func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Info) error {
   942  	lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", info.Name)))
   943  
   944  	c.Log("Watching pod %s for completion with timeout of %v", info.Name, timeout)
   945  	ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
   946  	defer cancel()
   947  	_, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) {
   948  		return isPodComplete(e)
   949  	})
   950  
   951  	return err
   952  }
   953  
   954  // GetPodLogs takes pod name and namespace and returns the current logs (streaming is NOT enabled).
   955  func (c *Client) GetPodLogs(name, ns string) (io.ReadCloser, error) {
   956  	client, err := c.KubernetesClientSet()
   957  	if err != nil {
   958  		return nil, err
   959  	}
   960  	req := client.CoreV1().Pods(ns).GetLogs(name, &v1.PodLogOptions{})
   961  	logReader, err := req.Stream()
   962  	if err != nil {
   963  		return nil, fmt.Errorf("error in opening log stream, got: %s", err)
   964  	}
   965  	return logReader, nil
   966  }
   967  
   968  func isPodComplete(event watch.Event) (bool, error) {
   969  	o, ok := event.Object.(*v1.Pod)
   970  	if !ok {
   971  		return true, fmt.Errorf("expected a *v1.Pod, got %T", event.Object)
   972  	}
   973  	if event.Type == watch.Deleted {
   974  		return false, fmt.Errorf("pod not found")
   975  	}
   976  	switch o.Status.Phase {
   977  	case v1.PodFailed, v1.PodSucceeded:
   978  		return true, nil
   979  	}
   980  	return false, nil
   981  }
   982  
   983  //get a kubernetes resources' relation pods
   984  // kubernetes resource used select labels to relate pods
   985  func (c *Client) getSelectRelationPod(info *resource.Info, objPods map[string][]v1.Pod) (map[string][]v1.Pod, error) {
   986  	if info == nil {
   987  		return objPods, nil
   988  	}
   989  
   990  	c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name)
   991  
   992  	versioned := asVersionedOrUnstructured(info)
   993  	selector, ok := getSelectorFromObject(versioned)
   994  	if !ok {
   995  		return objPods, nil
   996  	}
   997  
   998  	client, _ := c.KubernetesClientSet()
   999  
  1000  	pods, err := client.CoreV1().Pods(info.Namespace).List(metav1.ListOptions{
  1001  		LabelSelector: labels.Set(selector).AsSelector().String(),
  1002  	})
  1003  	if err != nil {
  1004  		return objPods, err
  1005  	}
  1006  
  1007  	for _, pod := range pods.Items {
  1008  		vk := "v1/Pod"
  1009  		if !isFoundPod(objPods[vk], pod) {
  1010  			objPods[vk] = append(objPods[vk], pod)
  1011  		}
  1012  	}
  1013  	return objPods, nil
  1014  }
  1015  
  1016  func isFoundPod(podItem []v1.Pod, pod v1.Pod) bool {
  1017  	for _, value := range podItem {
  1018  		if (value.Namespace == pod.Namespace) && (value.Name == pod.Name) {
  1019  			return true
  1020  		}
  1021  	}
  1022  	return false
  1023  }
  1024  
  1025  func asVersionedOrUnstructured(info *resource.Info) runtime.Object {
  1026  	obj, _ := asVersioned(info)
  1027  	return obj
  1028  }
  1029  
  1030  func asVersioned(info *resource.Info) (runtime.Object, error) {
  1031  	converter := runtime.ObjectConvertor(scheme.Scheme)
  1032  	groupVersioner := runtime.GroupVersioner(schema.GroupVersions(scheme.Scheme.PrioritizedVersionsAllGroups()))
  1033  	if info.Mapping != nil {
  1034  		groupVersioner = info.Mapping.GroupVersionKind.GroupVersion()
  1035  	}
  1036  
  1037  	obj, err := converter.ConvertToVersion(info.Object, groupVersioner)
  1038  	if err != nil {
  1039  		return info.Object, err
  1040  	}
  1041  	return obj, nil
  1042  }
  1043  
  1044  func asInternal(info *resource.Info) (runtime.Object, error) {
  1045  	groupVersioner := info.Mapping.GroupVersionKind.GroupKind().WithVersion(runtime.APIVersionInternal).GroupVersion()
  1046  	return legacyscheme.Scheme.ConvertToVersion(info.Object, groupVersioner)
  1047  }