github.com/amtisyAts/helm@v2.17.0+incompatible/pkg/kube/client.go (about) 1 /* 2 Copyright The Helm Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package kube // import "k8s.io/helm/pkg/kube" 18 19 import ( 20 "bytes" 21 "context" 22 "encoding/json" 23 goerrors "errors" 24 "fmt" 25 "io" 26 "io/ioutil" 27 "log" 28 "sort" 29 "strings" 30 "sync" 31 "time" 32 33 "k8s.io/apimachinery/pkg/api/meta" 34 35 jsonpatch "github.com/evanphx/json-patch" 36 appsv1 "k8s.io/api/apps/v1" 37 appsv1beta1 "k8s.io/api/apps/v1beta1" 38 appsv1beta2 "k8s.io/api/apps/v1beta2" 39 batch "k8s.io/api/batch/v1" 40 v1 "k8s.io/api/core/v1" 41 extv1beta1 "k8s.io/api/extensions/v1beta1" 42 apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" 43 apiequality "k8s.io/apimachinery/pkg/api/equality" 44 "k8s.io/apimachinery/pkg/api/errors" 45 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 46 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 47 metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" 48 "k8s.io/apimachinery/pkg/fields" 49 "k8s.io/apimachinery/pkg/labels" 50 "k8s.io/apimachinery/pkg/runtime" 51 "k8s.io/apimachinery/pkg/runtime/schema" 52 "k8s.io/apimachinery/pkg/types" 53 "k8s.io/apimachinery/pkg/util/strategicpatch" 54 "k8s.io/apimachinery/pkg/util/wait" 55 "k8s.io/apimachinery/pkg/watch" 56 "k8s.io/cli-runtime/pkg/genericclioptions" 57 "k8s.io/cli-runtime/pkg/printers" 58 "k8s.io/cli-runtime/pkg/resource" 59 "k8s.io/client-go/kubernetes/scheme" 60 "k8s.io/client-go/rest" 61 cachetools "k8s.io/client-go/tools/cache" 62 watchtools "k8s.io/client-go/tools/watch" 63 cmdutil "k8s.io/kubectl/pkg/cmd/util" 64 "k8s.io/kubectl/pkg/validation" 65 "k8s.io/kubernetes/pkg/kubectl/cmd/get" 66 ) 67 68 // MissingGetHeader is added to Get's output when a resource is not found. 69 const MissingGetHeader = "==> MISSING\nKIND\t\tNAME\n" 70 71 const KubsAPIErrorMsg = "unable to recognize \"\": no matches for kind" 72 73 // ErrNoObjectsVisited indicates that during a visit operation, no matching objects were found. 74 var ErrNoObjectsVisited = goerrors.New("no objects visited") 75 76 var metadataAccessor = meta.NewAccessor() 77 78 // Client represents a client capable of communicating with the Kubernetes API. 79 type Client struct { 80 cmdutil.Factory 81 Log func(string, ...interface{}) 82 } 83 84 // New creates a new Client. 85 func New(getter genericclioptions.RESTClientGetter) *Client { 86 if getter == nil { 87 getter = genericclioptions.NewConfigFlags(true) 88 } 89 90 err := apiextv1beta1.AddToScheme(scheme.Scheme) 91 if err != nil { 92 panic(err) 93 } 94 95 return &Client{ 96 Factory: cmdutil.NewFactory(getter), 97 Log: nopLogger, 98 } 99 } 100 101 var nopLogger = func(_ string, _ ...interface{}) {} 102 103 // ResourceActorFunc performs an action on a single resource. 104 type ResourceActorFunc func(*resource.Info) error 105 106 // Create creates Kubernetes resources from an io.reader. 107 // 108 // Namespace will set the namespace. 109 func (c *Client) Create(namespace string, reader io.Reader, timeout int64, shouldWait bool) error { 110 client, err := c.KubernetesClientSet() 111 if err != nil { 112 return err 113 } 114 if err := ensureNamespace(client, namespace); err != nil { 115 return err 116 } 117 c.Log("building resources from manifest") 118 infos, buildErr := c.BuildUnstructured(namespace, reader) 119 if buildErr != nil { 120 return buildErr 121 } 122 c.Log("creating %d resource(s)", len(infos)) 123 if err := perform(infos, createResource); err != nil { 124 return err 125 } 126 if shouldWait { 127 return c.waitForResources(time.Duration(timeout)*time.Second, infos) 128 } 129 return nil 130 } 131 132 func (c *Client) newBuilder(namespace string, reader io.Reader) *resource.Result { 133 return c.NewBuilder(). 134 ContinueOnError(). 135 WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). 136 Schema(c.validator()). 137 NamespaceParam(namespace). 138 DefaultNamespace(). 139 Stream(reader, ""). 140 Flatten(). 141 Do() 142 } 143 144 func (c *Client) validator() validation.Schema { 145 schema, err := c.Validator(true) 146 if err != nil { 147 c.Log("warning: failed to load schema: %s", err) 148 } 149 return schema 150 } 151 152 // BuildUnstructured reads Kubernetes objects and returns unstructured infos. 153 func (c *Client) BuildUnstructured(namespace string, reader io.Reader) (Result, error) { 154 var result Result 155 156 result, err := c.NewBuilder(). 157 Unstructured(). 158 ContinueOnError(). 159 NamespaceParam(namespace). 160 DefaultNamespace(). 161 Stream(reader, ""). 162 Flatten(). 163 Do().Infos() 164 return result, scrubValidationError(err) 165 } 166 167 // BuildUnstructuredTable reads Kubernetes objects and returns unstructured infos 168 // as a Table. This is meant for viewing resources and displaying them in a table. 169 // This is similar to BuildUnstructured but transforms the request for table 170 // display. 171 func (c *Client) BuildUnstructuredTable(namespace string, reader io.Reader) (Result, error) { 172 var result Result 173 174 result, err := c.NewBuilder(). 175 Unstructured(). 176 ContinueOnError(). 177 NamespaceParam(namespace). 178 DefaultNamespace(). 179 Stream(reader, ""). 180 Flatten(). 181 TransformRequests(transformRequests). 182 Do().Infos() 183 return result, scrubValidationError(err) 184 } 185 186 // This is used to retrieve a table view of the data. A table view is how kubectl 187 // retrieves the information Helm displays as resources in status. Note, table 188 // data is returned as a Table type that does not conform to the runtime.Object 189 // interface but is that type. So, you can't transform it into Go objects easily. 190 func transformRequests(req *rest.Request) { 191 192 // The request headers are for both the v1 and v1beta1 versions of the table 193 // as Kubernetes 1.14 and older used the beta version. 194 req.SetHeader("Accept", strings.Join([]string{ 195 fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName), 196 fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName), 197 "application/json", 198 }, ",")) 199 } 200 201 // Validate reads Kubernetes manifests and validates the content. 202 // 203 // This function does not actually do schema validation of manifests. Adding 204 // validation now breaks existing clients of helm: https://github.com/helm/helm/issues/5750 205 func (c *Client) Validate(namespace string, reader io.Reader) error { 206 _, err := c.NewBuilder(). 207 Unstructured(). 208 ContinueOnError(). 209 NamespaceParam(namespace). 210 DefaultNamespace(). 211 // Schema(c.validator()). // No schema validation 212 Stream(reader, ""). 213 Flatten(). 214 Do().Infos() 215 return scrubValidationError(err) 216 } 217 218 // Build validates for Kubernetes objects and returns resource Infos from a io.Reader. 219 func (c *Client) Build(namespace string, reader io.Reader) (Result, error) { 220 var result Result 221 result, err := c.newBuilder(namespace, reader).Infos() 222 return result, scrubValidationError(err) 223 } 224 225 // Return the resource info as internal 226 func resourceInfoToObject(info *resource.Info, c *Client) runtime.Object { 227 internalObj, err := asInternal(info) 228 if err != nil { 229 // If the problem is just that the resource is not registered, don't print any 230 // error. This is normal for custom resources. 231 if !runtime.IsNotRegisteredError(err) { 232 c.Log("Warning: conversion to internal type failed: %v", err) 233 } 234 // Add the unstructured object in this situation. It will still get listed, just 235 // with less information. 236 return info.Object 237 } 238 239 return internalObj 240 } 241 242 func sortByKey(objs map[string][]runtime.Object) []string { 243 var keys []string 244 // Create a simple slice, so we can sort it 245 for key := range objs { 246 keys = append(keys, key) 247 } 248 // Sort alphabetically by version/kind keys 249 sort.Strings(keys) 250 return keys 251 } 252 253 // We have slices of tables that need to be sorted by name. In this case the 254 // self link is used so the sorting will include namespace and name. 255 func sortTableSlice(objs []runtime.Object) []runtime.Object { 256 // If there are 0 or 1 objects to sort there is nothing to sort so 257 // the list can be returned 258 if len(objs) < 2 { 259 return objs 260 } 261 262 ntbl := &metav1.Table{} 263 unstr, ok := objs[0].(*unstructured.Unstructured) 264 if !ok { 265 return objs 266 } 267 if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, ntbl); err != nil { 268 return objs 269 } 270 271 // Sort the list of objects 272 var newObjs []runtime.Object 273 namesCache := make(map[string]runtime.Object, len(objs)) 274 var names []string 275 var key string 276 for _, obj := range objs { 277 unstr, ok := obj.(*unstructured.Unstructured) 278 if !ok { 279 return objs 280 } 281 if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, ntbl); err != nil { 282 return objs 283 } 284 285 // At this point we have a table. Each table has just one row. We are 286 // sorting the tables by the first cell (name) in the first and only 287 // row. If the first cell of the first row cannot be gotten as a string 288 // we return the original unsorted list. 289 if len(ntbl.Rows) == 0 { // Make sure there are rows to read from 290 return objs 291 } 292 if len(ntbl.Rows[0].Cells) == 0 { // Make sure there are cells to read 293 return objs 294 } 295 key, ok = ntbl.Rows[0].Cells[0].(string) 296 if !ok { 297 return objs 298 } 299 namesCache[key] = obj 300 names = append(names, key) 301 } 302 303 sort.Strings(names) 304 305 for _, name := range names { 306 newObjs = append(newObjs, namesCache[name]) 307 } 308 309 return newObjs 310 } 311 312 // Get gets Kubernetes resources as pretty-printed string. 313 // 314 // Namespace will set the namespace. 315 func (c *Client) Get(namespace string, reader io.Reader) (string, error) { 316 // Since we don't know what order the objects come in, let's group them by the types and then sort them, so 317 // that when we print them, they come out looking good (headers apply to subgroups, etc.). 318 objs := make(map[string][]runtime.Object) 319 gk := make(map[string]schema.GroupKind) 320 mux := &sync.Mutex{} 321 322 // The contents of the reader are used two times. The bytes are coppied out 323 // for use in future readers. 324 b, err := ioutil.ReadAll(reader) 325 if err != nil { 326 return "", err 327 } 328 329 // Get the table display for the objects associated with the release. This 330 // is done in table format so that it can be displayed in the status in 331 // the same way kubectl displays the resource information. 332 // Note, the response returns unstructured data instead of typed objects. 333 // These cannot be easily (i.e., via the go packages) transformed into 334 // Go types. 335 tinfos, err := c.BuildUnstructuredTable(namespace, bytes.NewBuffer(b)) 336 if err != nil { 337 return "", err 338 } 339 340 missing := []string{} 341 err = perform(tinfos, func(info *resource.Info) error { 342 mux.Lock() 343 defer mux.Unlock() 344 c.Log("Doing get for %s: %q in %q", info.Mapping.GroupVersionKind.Kind, info.Name, info.Namespace) 345 if err := info.Get(); err != nil { 346 c.Log("WARNING: Failed Get for resource %q: %s", info.Name, err) 347 missing = append(missing, fmt.Sprintf("%v\t\t%s", info.Mapping.Resource, info.Name)) 348 return nil 349 } 350 351 // Use APIVersion/Kind as grouping mechanism. I'm not sure if you can have multiple 352 // versions per cluster, but this certainly won't hurt anything, so let's be safe. 353 gvk := info.ResourceMapping().GroupVersionKind 354 vk := gvk.Version + "/" + gvk.Kind 355 gk[vk] = gvk.GroupKind() 356 357 // Initialize map. The main map groups resources based on version/kind 358 // The second level is a simple 'Name' to 'Object', that will help sort 359 // the individual resource later 360 if objs[vk] == nil { 361 objs[vk] = []runtime.Object{} 362 } 363 // Map between the resource name to the underlying info object 364 objs[vk] = append(objs[vk], resourceInfoToObject(info, c)) 365 366 return nil 367 }) 368 if err != nil { 369 return "", err 370 } 371 372 // This section finds related resources (e.g., pods). Before looking up pods 373 // the resources the pods are made from need to be looked up in a manner 374 // that can be turned into Go types and worked with. 375 infos, err := c.BuildUnstructured(namespace, bytes.NewBuffer(b)) 376 if err != nil { 377 return "", err 378 } 379 err = perform(infos, func(info *resource.Info) error { 380 mux.Lock() 381 defer mux.Unlock() 382 if err := info.Get(); err != nil { 383 c.Log("WARNING: Failed Get for resource %q: %s", info.Name, err) 384 missing = append(missing, fmt.Sprintf("%v\t\t%s", info.Mapping.Resource, info.Name)) 385 return nil 386 } 387 388 //Get the relation pods 389 objs, err = c.getSelectRelationPod(info, objs) 390 if err != nil { 391 c.Log("Warning: get the relation pod is failed, err:%s", err.Error()) 392 } 393 394 return nil 395 }) 396 if err != nil { 397 return "", err 398 } 399 400 // Ok, now we have all the objects grouped by types (say, by v1/Pod, v1/Service, etc.), so 401 // spin through them and print them. Printer is cool since it prints the header only when 402 // an object type changes, so we can just rely on that. Problem is it doesn't seem to keep 403 // track of tab widths. 404 buf := new(bytes.Buffer) 405 406 // Sort alphabetically by version/kind keys 407 vkKeys := sortByKey(objs) 408 // Iterate on sorted version/kind types 409 for _, t := range vkKeys { 410 if _, err = fmt.Fprintf(buf, "==> %s\n", t); err != nil { 411 return "", err 412 } 413 vk := objs[t] 414 415 // The request made for tables returns each Kubernetes object as its 416 // own table. The normal sorting provided by kubectl and cli-runtime 417 // does not handle this case. Here we sort within each of our own 418 // grouping. 419 vk = sortTableSlice(vk) 420 421 // The printer flag setup follows a simalar setup to kubectl 422 printFlags := get.NewHumanPrintFlags() 423 if lgk, ok := gk[t]; ok { 424 printFlags.SetKind(lgk) 425 } 426 printer, _ := printFlags.ToPrinter("") 427 printer, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(printer, nil) 428 if err != nil { 429 return "", err 430 } 431 printer = &get.TablePrinter{Delegate: printer} 432 433 for _, resource := range vk { 434 if err := printer.PrintObj(resource, buf); err != nil { 435 c.Log("failed to print object type %s: %v", t, err) 436 return "", err 437 } 438 } 439 if _, err := buf.WriteString("\n"); err != nil { 440 return "", err 441 } 442 } 443 if len(missing) > 0 { 444 buf.WriteString(MissingGetHeader) 445 for _, s := range missing { 446 fmt.Fprintln(buf, s) 447 } 448 } 449 return buf.String(), nil 450 } 451 452 // Update reads the current configuration and a target configuration from io.reader 453 // and creates resources that don't already exist, updates resources that have been modified 454 // in the target configuration and deletes resources from the current configuration that are 455 // not present in the target configuration. 456 // 457 // Namespace will set the namespaces. 458 // 459 // Deprecated: use UpdateWithOptions instead. 460 func (c *Client) Update(namespace string, originalReader, targetReader io.Reader, force bool, recreate bool, timeout int64, shouldWait bool) error { 461 return c.UpdateWithOptions(namespace, originalReader, targetReader, UpdateOptions{ 462 Force: force, 463 Recreate: recreate, 464 Timeout: timeout, 465 ShouldWait: shouldWait, 466 }) 467 } 468 469 // UpdateOptions provides options to control update behavior 470 type UpdateOptions struct { 471 Force bool 472 Recreate bool 473 Timeout int64 474 ShouldWait bool 475 // Allow deletion of new resources created in this update when update failed 476 CleanupOnFail bool 477 } 478 479 // UpdateWithOptions reads the current configuration and a target configuration from io.reader 480 // and creates resources that don't already exist, updates resources that have been modified 481 // in the target configuration and deletes resources from the current configuration that are 482 // not present in the target configuration. 483 // 484 // Namespace will set the namespaces. UpdateOptions provides additional parameters to control 485 // update behavior. 486 func (c *Client) UpdateWithOptions(namespace string, originalReader, targetReader io.Reader, opts UpdateOptions) error { 487 original, err := c.BuildUnstructured(namespace, originalReader) 488 if err != nil { 489 // Checking for removed Kubernetes API error so can provide a more informative error message to the user 490 // Ref: https://github.com/helm/helm/issues/7219 491 if strings.Contains(err.Error(), KubsAPIErrorMsg) { 492 return fmt.Errorf("current release manifest contains removed kubernetes api(s) for this "+ 493 "kubernetes version and it is therefore unable to build the kubernetes "+ 494 "objects for performing the diff. error from kubernetes: %s", err) 495 } else { 496 return fmt.Errorf("failed decoding reader into objects: %s", err) 497 } 498 } 499 500 c.Log("building resources from updated manifest") 501 target, err := c.BuildUnstructured(namespace, targetReader) 502 if err != nil { 503 // Checking for removed Kubernetes API error so can provide a more informative error message to the user 504 // Ref: https://github.com/helm/helm/issues/7219 505 if strings.Contains(err.Error(), KubsAPIErrorMsg) { 506 return fmt.Errorf("new release manifest contains removed kubernetes api(s) for this "+ 507 "kubernetes version and it is therefore unable to build the kubernetes "+ 508 "objects for deployment. error from kubernetes: %s", err) 509 510 } else { 511 return fmt.Errorf("failed decoding reader into objects: %s", err) 512 } 513 } 514 515 newlyCreatedResources := []*resource.Info{} 516 updateErrors := []string{} 517 518 c.Log("checking %d resources for changes", len(target)) 519 err = target.Visit(func(info *resource.Info, err error) error { 520 if err != nil { 521 return err 522 } 523 524 helper := resource.NewHelper(info.Client, info.Mapping) 525 if _, err := helper.Get(info.Namespace, info.Name, info.Export); err != nil { 526 if !errors.IsNotFound(err) { 527 return fmt.Errorf("Could not get information about the resource: %s", err) 528 } 529 530 // Since the resource does not exist, create it. 531 if err := createResource(info); err != nil { 532 return fmt.Errorf("failed to create resource: %s", err) 533 } 534 newlyCreatedResources = append(newlyCreatedResources, info) 535 536 kind := info.Mapping.GroupVersionKind.Kind 537 c.Log("Created a new %s called %q in %s\n", kind, info.Name, info.Namespace) 538 return nil 539 } 540 541 originalInfo := original.Get(info) 542 543 // The resource already exists in the cluster, but it wasn't defined in the previous release. 544 // In this case, we consider it to be a resource that was previously un-managed by the release and error out, 545 // asking for the user to intervene. 546 // 547 // See https://github.com/helm/helm/issues/1193 for more info. 548 if originalInfo == nil { 549 return fmt.Errorf( 550 "kind %s with the name %q in %q already exists in the cluster and wasn't defined in the previous release. Before upgrading, please either delete the resource from the cluster or remove it from the chart", 551 info.Mapping.GroupVersionKind.Kind, 552 info.Name, 553 info.Namespace, 554 ) 555 } 556 557 if err := updateResource(c, info, originalInfo.Object, opts.Force, opts.Recreate); err != nil { 558 c.Log("error updating the resource %q:\n\t %v", info.Name, err) 559 updateErrors = append(updateErrors, err.Error()) 560 } 561 562 return nil 563 }) 564 565 cleanupErrors := []string{} 566 567 if opts.CleanupOnFail && (err != nil || len(updateErrors) != 0) { 568 c.Log("Cleanup on fail enabled: cleaning up newly created resources due to update manifests failures") 569 cleanupErrors = c.cleanup(newlyCreatedResources) 570 } 571 572 switch { 573 case err != nil: 574 return fmt.Errorf(strings.Join(append([]string{err.Error()}, cleanupErrors...), " && ")) 575 case len(updateErrors) != 0: 576 return fmt.Errorf(strings.Join(append(updateErrors, cleanupErrors...), " && ")) 577 } 578 579 for _, info := range original.Difference(target) { 580 c.Log("Deleting %q in %s...", info.Name, info.Namespace) 581 582 if err := info.Get(); err != nil { 583 c.Log("Unable to get obj %q, err: %s", info.Name, err) 584 } 585 annotations, err := metadataAccessor.Annotations(info.Object) 586 if err != nil { 587 c.Log("Unable to get annotations on %q, err: %s", info.Name, err) 588 } 589 if ResourcePolicyIsKeep(annotations) { 590 policy := annotations[ResourcePolicyAnno] 591 c.Log("Skipping delete of %q due to annotation [%s=%s]", info.Name, ResourcePolicyAnno, policy) 592 continue 593 } 594 595 if err := deleteResource(info); err != nil { 596 c.Log("Failed to delete %q, err: %s", info.Name, err) 597 } 598 } 599 if opts.ShouldWait { 600 err := c.waitForResources(time.Duration(opts.Timeout)*time.Second, target) 601 602 if opts.CleanupOnFail && err != nil { 603 c.Log("Cleanup on fail enabled: cleaning up newly created resources due to wait failure during update") 604 cleanupErrors = c.cleanup(newlyCreatedResources) 605 return fmt.Errorf(strings.Join(append([]string{err.Error()}, cleanupErrors...), " && ")) 606 } 607 608 return err 609 } 610 return nil 611 } 612 613 func (c *Client) cleanup(newlyCreatedResources []*resource.Info) (cleanupErrors []string) { 614 for _, info := range newlyCreatedResources { 615 kind := info.Mapping.GroupVersionKind.Kind 616 c.Log("Deleting newly created %s with the name %q in %s...", kind, info.Name, info.Namespace) 617 if err := deleteResource(info); err != nil { 618 c.Log("Error deleting newly created %s with the name %q in %s: %s", kind, info.Name, info.Namespace, err) 619 cleanupErrors = append(cleanupErrors, err.Error()) 620 } 621 } 622 return 623 } 624 625 // Delete deletes Kubernetes resources from an io.reader. 626 // 627 // Namespace will set the namespace. 628 func (c *Client) Delete(namespace string, reader io.Reader) error { 629 return c.DeleteWithTimeout(namespace, reader, 0, false) 630 } 631 632 // DeleteWithTimeout deletes Kubernetes resources from an io.reader. If shouldWait is true, the function 633 // will wait for all resources to be deleted from etcd before returning, or when the timeout 634 // has expired. 635 // 636 // Namespace will set the namespace. 637 func (c *Client) DeleteWithTimeout(namespace string, reader io.Reader, timeout int64, shouldWait bool) error { 638 infos, err := c.BuildUnstructured(namespace, reader) 639 if err != nil { 640 return err 641 } 642 err = perform(infos, func(info *resource.Info) error { 643 c.Log("Starting delete for %q %s", info.Name, info.Mapping.GroupVersionKind.Kind) 644 err := deleteResource(info) 645 return c.skipIfNotFound(err) 646 }) 647 if err != nil { 648 return err 649 } 650 651 if shouldWait { 652 c.Log("Waiting for %d seconds for delete to be completed", timeout) 653 return waitUntilAllResourceDeleted(infos, time.Duration(timeout)*time.Second) 654 } 655 656 return nil 657 } 658 659 func (c *Client) skipIfNotFound(err error) error { 660 if errors.IsNotFound(err) { 661 c.Log("%v", err) 662 return nil 663 } 664 return err 665 } 666 667 func waitUntilAllResourceDeleted(infos Result, timeout time.Duration) error { 668 return wait.Poll(2*time.Second, timeout, func() (bool, error) { 669 allDeleted := true 670 err := perform(infos, func(info *resource.Info) error { 671 innerErr := info.Get() 672 if errors.IsNotFound(innerErr) { 673 return nil 674 } 675 if innerErr != nil { 676 return innerErr 677 } 678 allDeleted = false 679 return nil 680 }) 681 if err != nil { 682 return false, err 683 } 684 return allDeleted, nil 685 }) 686 } 687 688 func (c *Client) watchTimeout(t time.Duration) ResourceActorFunc { 689 return func(info *resource.Info) error { 690 return c.watchUntilReady(t, info) 691 } 692 } 693 694 // WatchUntilReady watches the resource given in the reader, and waits until it is ready. 695 // 696 // This function is mainly for hook implementations. It watches for a resource to 697 // hit a particular milestone. The milestone depends on the Kind. 698 // 699 // For most kinds, it checks to see if the resource is marked as Added or Modified 700 // by the Kubernetes event stream. For some kinds, it does more: 701 // 702 // - Jobs: A job is marked "Ready" when it has successfully completed. This is 703 // ascertained by watching the Status fields in a job's output. 704 // 705 // Handling for other kinds will be added as necessary. 706 func (c *Client) WatchUntilReady(namespace string, reader io.Reader, timeout int64, shouldWait bool) error { 707 infos, err := c.BuildUnstructured(namespace, reader) 708 if err != nil { 709 return err 710 } 711 // For jobs, there's also the option to do poll c.Jobs(namespace).Get(): 712 // https://github.com/adamreese/kubernetes/blob/master/test/e2e/job.go#L291-L300 713 return perform(infos, c.watchTimeout(time.Duration(timeout)*time.Second)) 714 } 715 716 // WaitUntilCRDEstablished polls the given CRD until it reaches the established 717 // state. A CRD needs to reach the established state before CRs can be created. 718 // 719 // If a naming conflict condition is found, this function will return an error. 720 func (c *Client) WaitUntilCRDEstablished(reader io.Reader, timeout time.Duration) error { 721 infos, err := c.BuildUnstructured(metav1.NamespaceAll, reader) 722 if err != nil { 723 return err 724 } 725 726 return perform(infos, c.pollCRDEstablished(timeout)) 727 } 728 729 func (c *Client) pollCRDEstablished(t time.Duration) ResourceActorFunc { 730 return func(info *resource.Info) error { 731 return c.pollCRDUntilEstablished(t, info) 732 } 733 } 734 735 func (c *Client) pollCRDUntilEstablished(timeout time.Duration, info *resource.Info) error { 736 return wait.PollImmediate(time.Second, timeout, func() (bool, error) { 737 err := info.Get() 738 if err != nil { 739 return false, fmt.Errorf("unable to get CRD: %v", err) 740 } 741 742 crd := &apiextv1beta1.CustomResourceDefinition{} 743 err = scheme.Scheme.Convert(info.Object, crd, nil) 744 if err != nil { 745 return false, fmt.Errorf("unable to convert to CRD type: %v", err) 746 } 747 748 for _, cond := range crd.Status.Conditions { 749 switch cond.Type { 750 case apiextv1beta1.Established: 751 if cond.Status == apiextv1beta1.ConditionTrue { 752 return true, nil 753 } 754 case apiextv1beta1.NamesAccepted: 755 if cond.Status == apiextv1beta1.ConditionFalse { 756 return false, fmt.Errorf("naming conflict detected for CRD %s", crd.GetName()) 757 } 758 } 759 } 760 761 return false, nil 762 }) 763 } 764 765 func perform(infos Result, fn ResourceActorFunc) error { 766 if len(infos) == 0 { 767 return ErrNoObjectsVisited 768 } 769 770 errs := make(chan error) 771 go batchPerform(infos, fn, errs) 772 773 for range infos { 774 err := <-errs 775 if err != nil { 776 return err 777 } 778 } 779 return nil 780 } 781 782 func batchPerform(infos Result, fn ResourceActorFunc, errs chan<- error) { 783 var kind string 784 var wg sync.WaitGroup 785 for _, info := range infos { 786 currentKind := info.Object.GetObjectKind().GroupVersionKind().Kind 787 if kind != currentKind { 788 wg.Wait() 789 kind = currentKind 790 } 791 wg.Add(1) 792 go func(i *resource.Info) { 793 errs <- fn(i) 794 wg.Done() 795 }(info) 796 } 797 } 798 799 func createResource(info *resource.Info) error { 800 obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object, nil) 801 if err != nil { 802 return err 803 } 804 return info.Refresh(obj, true) 805 } 806 807 func deleteResource(info *resource.Info) error { 808 policy := metav1.DeletePropagationBackground 809 opts := &metav1.DeleteOptions{PropagationPolicy: &policy} 810 _, err := resource.NewHelper(info.Client, info.Mapping).DeleteWithOptions(info.Namespace, info.Name, opts) 811 return err 812 } 813 814 func createPatch(target *resource.Info, current runtime.Object) ([]byte, types.PatchType, error) { 815 oldData, err := json.Marshal(current) 816 if err != nil { 817 return nil, types.StrategicMergePatchType, fmt.Errorf("serializing current configuration: %s", err) 818 } 819 newData, err := json.Marshal(target.Object) 820 if err != nil { 821 return nil, types.StrategicMergePatchType, fmt.Errorf("serializing target configuration: %s", err) 822 } 823 824 // While different objects need different merge types, the parent function 825 // that calls this does not try to create a patch when the data (first 826 // returned object) is nil. We can skip calculating the merge type as 827 // the returned merge type is ignored. 828 if apiequality.Semantic.DeepEqual(oldData, newData) { 829 return nil, types.StrategicMergePatchType, nil 830 } 831 832 // Get a versioned object 833 versionedObject, err := asVersioned(target) 834 835 // Unstructured objects, such as CRDs, may not have a not registered error 836 // returned from ConvertToVersion. Anything that's unstructured should 837 // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported 838 // on objects like CRDs. 839 _, isUnstructured := versionedObject.(runtime.Unstructured) 840 841 // On newer K8s versions, CRDs aren't unstructured but has this dedicated type 842 _, isCRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) 843 844 switch { 845 case runtime.IsNotRegisteredError(err), isUnstructured, isCRD: 846 // fall back to generic JSON merge patch 847 patch, err := jsonpatch.CreateMergePatch(oldData, newData) 848 if err != nil { 849 return nil, types.MergePatchType, fmt.Errorf("failed to create merge patch: %v", err) 850 } 851 return patch, types.MergePatchType, nil 852 case err != nil: 853 return nil, types.StrategicMergePatchType, fmt.Errorf("failed to get versionedObject: %s", err) 854 default: 855 patch, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, versionedObject) 856 if err != nil { 857 return nil, types.StrategicMergePatchType, fmt.Errorf("failed to create two-way merge patch: %v", err) 858 } 859 return patch, types.StrategicMergePatchType, nil 860 } 861 } 862 863 func updateResource(c *Client, target *resource.Info, currentObj runtime.Object, force bool, recreate bool) error { 864 patch, patchType, err := createPatch(target, currentObj) 865 if err != nil { 866 return fmt.Errorf("failed to create patch: %s", err) 867 } 868 if patch == nil { 869 c.Log("Looks like there are no changes for %s %q in %q", target.Mapping.GroupVersionKind.Kind, target.Name, target.Namespace) 870 // This needs to happen to make sure that tiller has the latest info from the API 871 // Otherwise there will be no labels and other functions that use labels will panic 872 if err := target.Get(); err != nil { 873 return fmt.Errorf("error trying to refresh resource information: %v", err) 874 } 875 } else { 876 // send patch to server 877 helper := resource.NewHelper(target.Client, target.Mapping) 878 879 obj, err := helper.Patch(target.Namespace, target.Name, patchType, patch, nil) 880 if err != nil { 881 kind := target.Mapping.GroupVersionKind.Kind 882 log.Printf("Cannot patch %s: %q in %q (%v)", kind, target.Name, target.Namespace, err) 883 884 if force { 885 // Attempt to delete... 886 if err := deleteResource(target); err != nil { 887 return err 888 } 889 log.Printf("Deleted %s: %q in %q", kind, target.Name, target.Namespace) 890 891 // ... and recreate 892 if err := createResource(target); err != nil { 893 return fmt.Errorf("Failed to recreate resource: %s", err) 894 } 895 log.Printf("Created a new %s called %q in %s\n", kind, target.Name, target.Namespace) 896 897 // No need to refresh the target, as we recreated the resource based 898 // on it. In addition, it might not exist yet and a call to `Refresh` 899 // may fail. 900 } else { 901 log.Print("Use --force to force recreation of the resource") 902 return err 903 } 904 } else { 905 // When patch succeeds without needing to recreate, refresh target. 906 target.Refresh(obj, true) 907 } 908 } 909 910 if !recreate { 911 return nil 912 } 913 914 versioned := asVersionedOrUnstructured(target) 915 selector, ok := getSelectorFromObject(versioned) 916 if !ok { 917 return nil 918 } 919 920 client, err := c.KubernetesClientSet() 921 if err != nil { 922 return err 923 } 924 925 pods, err := client.CoreV1().Pods(target.Namespace).List(metav1.ListOptions{ 926 LabelSelector: labels.Set(selector).AsSelector().String(), 927 }) 928 if err != nil { 929 return err 930 } 931 932 // Restart pods 933 for _, pod := range pods.Items { 934 c.Log("Restarting pod: %v/%v", pod.Namespace, pod.Name) 935 936 // Delete each pod for get them restarted with changed spec. 937 if err := client.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewPreconditionDeleteOptions(string(pod.UID))); err != nil { 938 return err 939 } 940 } 941 return nil 942 } 943 944 func getSelectorFromObject(obj runtime.Object) (map[string]string, bool) { 945 switch typed := obj.(type) { 946 947 case *v1.ReplicationController: 948 return typed.Spec.Selector, true 949 950 case *extv1beta1.ReplicaSet: 951 return typed.Spec.Selector.MatchLabels, true 952 case *appsv1.ReplicaSet: 953 return typed.Spec.Selector.MatchLabels, true 954 955 case *extv1beta1.Deployment: 956 return typed.Spec.Selector.MatchLabels, true 957 case *appsv1beta1.Deployment: 958 return typed.Spec.Selector.MatchLabels, true 959 case *appsv1beta2.Deployment: 960 return typed.Spec.Selector.MatchLabels, true 961 case *appsv1.Deployment: 962 return typed.Spec.Selector.MatchLabels, true 963 964 case *extv1beta1.DaemonSet: 965 return typed.Spec.Selector.MatchLabels, true 966 case *appsv1beta2.DaemonSet: 967 return typed.Spec.Selector.MatchLabels, true 968 case *appsv1.DaemonSet: 969 return typed.Spec.Selector.MatchLabels, true 970 971 case *batch.Job: 972 return typed.Spec.Selector.MatchLabels, true 973 974 case *appsv1beta1.StatefulSet: 975 return typed.Spec.Selector.MatchLabels, true 976 case *appsv1beta2.StatefulSet: 977 return typed.Spec.Selector.MatchLabels, true 978 case *appsv1.StatefulSet: 979 return typed.Spec.Selector.MatchLabels, true 980 981 default: 982 return nil, false 983 } 984 } 985 986 func (c *Client) watchUntilReady(timeout time.Duration, info *resource.Info) error { 987 // Use a selector on the name of the resource. This should be unique for the 988 // given version and kind 989 selector, err := fields.ParseSelector(fmt.Sprintf("metadata.name=%s", info.Name)) 990 if err != nil { 991 return err 992 } 993 lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, selector) 994 995 kind := info.Mapping.GroupVersionKind.Kind 996 c.Log("Watching for changes to %s %s in %s with timeout of %v", kind, info.Name, info.Namespace, timeout) 997 998 // What we watch for depends on the Kind. 999 // - For a Job, we watch for completion. 1000 // - For all else, we watch until Ready. 1001 // In the future, we might want to add some special logic for types 1002 // like Ingress, Volume, etc. 1003 1004 ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) 1005 defer cancel() 1006 _, err = watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) { 1007 switch e.Type { 1008 case watch.Added, watch.Modified: 1009 // For things like a secret or a config map, this is the best indicator 1010 // we get. We care mostly about jobs, where what we want to see is 1011 // the status go into a good state. For other types, like ReplicaSet 1012 // we don't really do anything to support these as hooks. 1013 c.Log("Add/Modify event for %s: %v", info.Name, e.Type) 1014 if kind == "Job" { 1015 return c.waitForJob(e, info.Name) 1016 } 1017 return true, nil 1018 case watch.Deleted: 1019 c.Log("Deleted event for %s", info.Name) 1020 return true, nil 1021 case watch.Error: 1022 // Handle error and return with an error. 1023 c.Log("Error event for %s", info.Name) 1024 return true, fmt.Errorf("Failed to deploy %s", info.Name) 1025 default: 1026 return false, nil 1027 } 1028 }) 1029 return err 1030 } 1031 1032 // waitForJob is a helper that waits for a job to complete. 1033 // 1034 // This operates on an event returned from a watcher. 1035 func (c *Client) waitForJob(e watch.Event, name string) (bool, error) { 1036 job := &batch.Job{} 1037 err := scheme.Scheme.Convert(e.Object, job, nil) 1038 if err != nil { 1039 return true, err 1040 } 1041 1042 for _, c := range job.Status.Conditions { 1043 if c.Type == batch.JobComplete && c.Status == v1.ConditionTrue { 1044 return true, nil 1045 } else if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue { 1046 return true, fmt.Errorf("Job failed: %s", c.Reason) 1047 } 1048 } 1049 1050 c.Log("%s: Jobs active: %d, jobs failed: %d, jobs succeeded: %d", name, job.Status.Active, job.Status.Failed, job.Status.Succeeded) 1051 return false, nil 1052 } 1053 1054 // scrubValidationError removes kubectl info from the message. 1055 func scrubValidationError(err error) error { 1056 if err == nil { 1057 return nil 1058 } 1059 const stopValidateMessage = "if you choose to ignore these errors, turn validation off with --validate=false" 1060 1061 if strings.Contains(err.Error(), stopValidateMessage) { 1062 return goerrors.New(strings.Replace(err.Error(), "; "+stopValidateMessage, "", -1)) 1063 } 1064 return err 1065 } 1066 1067 // WaitAndGetCompletedPodPhase waits up to a timeout until a pod enters a completed phase 1068 // and returns said phase (PodSucceeded or PodFailed qualify). 1069 func (c *Client) WaitAndGetCompletedPodPhase(namespace string, reader io.Reader, timeout time.Duration) (v1.PodPhase, error) { 1070 infos, err := c.Build(namespace, reader) 1071 if err != nil { 1072 return v1.PodUnknown, err 1073 } 1074 info := infos[0] 1075 1076 kind := info.Mapping.GroupVersionKind.Kind 1077 if kind != "Pod" { 1078 return v1.PodUnknown, fmt.Errorf("%s is not a Pod", info.Name) 1079 } 1080 1081 if err := c.watchPodUntilComplete(timeout, info); err != nil { 1082 return v1.PodUnknown, err 1083 } 1084 1085 if err := info.Get(); err != nil { 1086 return v1.PodUnknown, err 1087 } 1088 status := info.Object.(*v1.Pod).Status.Phase 1089 1090 return status, nil 1091 } 1092 1093 func (c *Client) watchPodUntilComplete(timeout time.Duration, info *resource.Info) error { 1094 lw := cachetools.NewListWatchFromClient(info.Client, info.Mapping.Resource.Resource, info.Namespace, fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", info.Name))) 1095 1096 c.Log("Watching pod %s for completion with timeout of %v", info.Name, timeout) 1097 ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) 1098 defer cancel() 1099 _, err := watchtools.ListWatchUntil(ctx, lw, func(e watch.Event) (bool, error) { 1100 return isPodComplete(e) 1101 }) 1102 1103 return err 1104 } 1105 1106 // GetPodLogs takes pod name and namespace and returns the current logs (streaming is NOT enabled). 1107 func (c *Client) GetPodLogs(name, ns string) (io.ReadCloser, error) { 1108 client, err := c.KubernetesClientSet() 1109 if err != nil { 1110 return nil, err 1111 } 1112 req := client.CoreV1().Pods(ns).GetLogs(name, &v1.PodLogOptions{}) 1113 logReader, err := req.Stream() 1114 if err != nil { 1115 return nil, fmt.Errorf("error in opening log stream, got: %s", err) 1116 } 1117 return logReader, nil 1118 } 1119 1120 func isPodComplete(event watch.Event) (bool, error) { 1121 o, ok := event.Object.(*v1.Pod) 1122 if !ok { 1123 return true, fmt.Errorf("expected a *v1.Pod, got %T", event.Object) 1124 } 1125 if event.Type == watch.Deleted { 1126 return false, fmt.Errorf("pod not found") 1127 } 1128 switch o.Status.Phase { 1129 case v1.PodFailed, v1.PodSucceeded: 1130 return true, nil 1131 } 1132 return false, nil 1133 } 1134 1135 // get a kubernetes resources' relation pods 1136 // kubernetes resource used select labels to relate pods 1137 func (c *Client) getSelectRelationPod(info *resource.Info, objs map[string][]runtime.Object) (map[string][]runtime.Object, error) { 1138 if info == nil { 1139 return objs, nil 1140 } 1141 1142 c.Log("get relation pod of object: %s/%s/%s", info.Namespace, info.Mapping.GroupVersionKind.Kind, info.Name) 1143 1144 versioned := asVersionedOrUnstructured(info) 1145 selector, ok := getSelectorFromObject(versioned) 1146 if !ok { 1147 return objs, nil 1148 } 1149 1150 // The related pods are looked up in Table format so that their display can 1151 // be printed in a manner similar to kubectl when it get pods. The response 1152 // can be used with a table printer. 1153 infos, err := c.NewBuilder(). 1154 Unstructured(). 1155 ContinueOnError(). 1156 NamespaceParam(info.Namespace). 1157 DefaultNamespace(). 1158 ResourceTypes("pods"). 1159 LabelSelector(labels.Set(selector).AsSelector().String()). 1160 TransformRequests(transformRequests). 1161 Do().Infos() 1162 if err != nil { 1163 return objs, err 1164 } 1165 1166 for _, info := range infos { 1167 vk := "v1/Pod(related)" 1168 objs[vk] = append(objs[vk], info.Object) 1169 } 1170 1171 return objs, nil 1172 } 1173 1174 func asVersionedOrUnstructured(info *resource.Info) runtime.Object { 1175 obj, _ := asVersioned(info) 1176 return obj 1177 } 1178 1179 func asVersioned(info *resource.Info) (runtime.Object, error) { 1180 converter := runtime.ObjectConvertor(scheme.Scheme) 1181 groupVersioner := runtime.GroupVersioner(schema.GroupVersions(scheme.Scheme.PrioritizedVersionsAllGroups())) 1182 if info.Mapping != nil { 1183 groupVersioner = info.Mapping.GroupVersionKind.GroupVersion() 1184 } 1185 1186 obj, err := converter.ConvertToVersion(info.Object, groupVersioner) 1187 if err != nil { 1188 return info.Object, err 1189 } 1190 return obj, nil 1191 } 1192 1193 func asInternal(info *resource.Info) (runtime.Object, error) { 1194 groupVersioner := info.Mapping.GroupVersionKind.GroupKind().WithVersion(runtime.APIVersionInternal).GroupVersion() 1195 return scheme.Scheme.ConvertToVersion(info.Object, groupVersioner) 1196 }