github.com/GoogleContainerTools/skaffold/v2@v2.13.2/pkg/diag/validator/validator_test.go (about)

     1  /*
     2  Copyright 2019 The Skaffold Authors
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package validator
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strings"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/google/go-cmp/cmp"
    27  	"google.golang.org/protobuf/testing/protocmp"
    28  	appsv1 "k8s.io/api/apps/v1"
    29  	v1 "k8s.io/api/core/v1"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/runtime"
    32  	"k8s.io/apimachinery/pkg/types"
    33  	"k8s.io/client-go/kubernetes"
    34  	fakekubeclientset "k8s.io/client-go/kubernetes/fake"
    35  	appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
    36  
    37  	"github.com/GoogleContainerTools/skaffold/v2/pkg/diag/recommender"
    38  	"github.com/GoogleContainerTools/skaffold/v2/proto/v1"
    39  	"github.com/GoogleContainerTools/skaffold/v2/testutil"
    40  )
    41  
    42  func TestRun(t *testing.T) {
    43  	type mockLogOutput struct {
    44  		output []byte
    45  		err    error
    46  	}
    47  	before := time.Now()
    48  	after := before.Add(3 * time.Second)
    49  	tests := []struct {
    50  		description string
    51  		uid         string
    52  		pods        []*v1.Pod
    53  		logOutput   mockLogOutput
    54  		events      []v1.Event
    55  		expected    []Resource
    56  	}{
    57  		{
    58  			description: "pod don't exist in test namespace",
    59  			pods: []*v1.Pod{
    60  				{
    61  					ObjectMeta: metav1.ObjectMeta{
    62  						Name:      "foo",
    63  						Namespace: "foo-ns",
    64  					},
    65  					TypeMeta: metav1.TypeMeta{Kind: "Pod"},
    66  				},
    67  			},
    68  			expected: nil,
    69  		},
    70  		{
    71  			description: "pod is Waiting conditions with error",
    72  			pods: []*v1.Pod{{
    73  				ObjectMeta: metav1.ObjectMeta{
    74  					Name:      "foo",
    75  					Namespace: "test",
    76  				},
    77  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
    78  				Status: v1.PodStatus{
    79  					Phase:      v1.PodPending,
    80  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
    81  					ContainerStatuses: []v1.ContainerStatus{
    82  						{
    83  							Name:  "foo-container",
    84  							Image: "foo-image",
    85  							State: v1.ContainerState{
    86  								Waiting: &v1.ContainerStateWaiting{
    87  									Reason:  "ErrImagePull",
    88  									Message: "rpc error: code = Unknown desc = Error response from daemon: pull access denied for leeroy-web1, repository does not exist or may require 'docker login': denied: requested access to the resource is denied",
    89  								},
    90  							},
    91  						},
    92  					},
    93  				},
    94  			}},
    95  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
    96  				&proto.ActionableErr{
    97  					Message: "container foo-container is waiting to start: foo-image can't be pulled",
    98  					ErrCode: proto.StatusCode_STATUSCHECK_IMAGE_PULL_ERR,
    99  					Suggestions: []*proto.Suggestion{{
   100  						SuggestionCode: proto.SuggestionCode_CHECK_CONTAINER_IMAGE,
   101  						Action:         "Try checking container config `image`",
   102  					}},
   103  				}, nil)},
   104  		},
   105  		{
   106  			description: "pod is Waiting condition due to ErrImageBackOffPullErr",
   107  			pods: []*v1.Pod{{
   108  				ObjectMeta: metav1.ObjectMeta{
   109  					Name:      "foo",
   110  					Namespace: "test",
   111  				},
   112  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   113  				Status: v1.PodStatus{
   114  					Phase:      v1.PodPending,
   115  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   116  					ContainerStatuses: []v1.ContainerStatus{
   117  						{
   118  							Name:  "foo-container",
   119  							Image: "foo-image",
   120  							State: v1.ContainerState{
   121  								Waiting: &v1.ContainerStateWaiting{
   122  									Reason:  "ErrImagePullBackOff",
   123  									Message: "rpc error: code = Unknown desc = Error response from daemon: pull access denied for leeroy-web1, repository does not exist or may require 'docker login': denied: requested access to the resource is denied",
   124  								},
   125  							},
   126  						},
   127  					},
   128  				},
   129  			}},
   130  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   131  				&proto.ActionableErr{
   132  					Message: "container foo-container is waiting to start: foo-image can't be pulled",
   133  					ErrCode: proto.StatusCode_STATUSCHECK_IMAGE_PULL_ERR,
   134  					Suggestions: []*proto.Suggestion{{
   135  						SuggestionCode: proto.SuggestionCode_CHECK_CONTAINER_IMAGE,
   136  						Action:         "Try checking container config `image`",
   137  					}},
   138  				}, nil)},
   139  		},
   140  		{
   141  			description: "pod is Waiting due to Image Backoff Pull error",
   142  			pods: []*v1.Pod{{
   143  				ObjectMeta: metav1.ObjectMeta{
   144  					Name:      "foo",
   145  					Namespace: "test",
   146  				},
   147  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   148  				Status: v1.PodStatus{
   149  					Phase:      v1.PodPending,
   150  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   151  					ContainerStatuses: []v1.ContainerStatus{
   152  						{
   153  							Name:  "foo-container",
   154  							Image: "foo-image",
   155  							State: v1.ContainerState{
   156  								Waiting: &v1.ContainerStateWaiting{
   157  									Reason:  "ImagePullBackOff",
   158  									Message: "rpc error: code = Unknown desc = Error response from daemon: pull access denied for leeroy-web1, repository does not exist or may require 'docker login': denied: requested access to the resource is denied",
   159  								},
   160  							},
   161  						},
   162  					},
   163  				},
   164  			}},
   165  			events: []v1.Event{
   166  				{
   167  					ObjectMeta: metav1.ObjectMeta{Namespace: "test"},
   168  					Reason:     "Failed", Type: "Warning", Message: "Failed to pull image foo-image: rpc error: code = Unknown desc = Error response from daemon: pull access denied for foo-image, repository does not exist or may require 'docker login'",
   169  				},
   170  			},
   171  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   172  				&proto.ActionableErr{
   173  					Message: "container foo-container is waiting to start: foo-image can't be pulled",
   174  					ErrCode: proto.StatusCode_STATUSCHECK_IMAGE_PULL_ERR,
   175  					Suggestions: []*proto.Suggestion{{
   176  						SuggestionCode: proto.SuggestionCode_CHECK_CONTAINER_IMAGE,
   177  						Action:         "Try checking container config `image`",
   178  					}},
   179  				}, nil)},
   180  		},
   181  		{
   182  			description: "pod is in Terminated State",
   183  			pods: []*v1.Pod{{
   184  				ObjectMeta: metav1.ObjectMeta{
   185  					Name:      "foo",
   186  					Namespace: "test",
   187  				},
   188  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   189  				Status: v1.PodStatus{
   190  					Phase:      v1.PodSucceeded,
   191  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   192  				},
   193  			}},
   194  			expected: []Resource{NewResource("test", "Pod", "foo", "Succeeded",
   195  				&proto.ActionableErr{
   196  					Message: "",
   197  					ErrCode: proto.StatusCode_STATUSCHECK_SUCCESS,
   198  				}, nil)},
   199  		},
   200  		{
   201  			description: "all pod containers are ready",
   202  			pods: []*v1.Pod{{
   203  				ObjectMeta: metav1.ObjectMeta{
   204  					Name:      "foo",
   205  					Namespace: "test",
   206  				},
   207  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   208  				Status: v1.PodStatus{
   209  					Phase:      v1.PodSucceeded,
   210  					Conditions: []v1.PodCondition{{Type: v1.ContainersReady, Status: v1.ConditionTrue}},
   211  				},
   212  			}},
   213  			expected: []Resource{NewResource("test", "Pod", "foo", "Succeeded",
   214  				&proto.ActionableErr{
   215  					Message: "",
   216  					ErrCode: proto.StatusCode_STATUSCHECK_SUCCESS,
   217  				}, nil)},
   218  		},
   219  		{
   220  			description: "One of the pod containers is in Terminated State",
   221  			pods: []*v1.Pod{{
   222  				ObjectMeta: metav1.ObjectMeta{
   223  					Name:      "foo",
   224  					Namespace: "test",
   225  				},
   226  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   227  				Status: v1.PodStatus{
   228  					Phase:      v1.PodRunning,
   229  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   230  					ContainerStatuses: []v1.ContainerStatus{
   231  						{
   232  							Name:  "foo-container",
   233  							Image: "foo-image",
   234  							State: v1.ContainerState{
   235  								Terminated: &v1.ContainerStateTerminated{ExitCode: 0},
   236  							},
   237  						},
   238  					},
   239  				},
   240  			}},
   241  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   242  				&proto.ActionableErr{
   243  					Message: "",
   244  					ErrCode: proto.StatusCode_STATUSCHECK_SUCCESS,
   245  				}, nil)},
   246  		},
   247  		{
   248  			description: "one of the pod containers is in Terminated State with non zero exit code but pod condition is Ready",
   249  			pods: []*v1.Pod{{
   250  				ObjectMeta: metav1.ObjectMeta{
   251  					Name:      "foo",
   252  					Namespace: "test",
   253  				},
   254  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   255  				Status: v1.PodStatus{
   256  					Phase:      v1.PodRunning,
   257  					Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionFalse}},
   258  					ContainerStatuses: []v1.ContainerStatus{
   259  						{
   260  							Name:  "foo-container",
   261  							Image: "foo-image",
   262  							State: v1.ContainerState{
   263  								Terminated: &v1.ContainerStateTerminated{ExitCode: 1, Message: "panic caused"},
   264  							},
   265  						},
   266  					},
   267  				},
   268  			}},
   269  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   270  				&proto.ActionableErr{
   271  					Message: "container foo-container terminated with exit code 1",
   272  					ErrCode: proto.StatusCode_STATUSCHECK_CONTAINER_TERMINATED,
   273  					Suggestions: []*proto.Suggestion{
   274  						{
   275  							SuggestionCode: proto.SuggestionCode_CHECK_CONTAINER_LOGS,
   276  							Action:         "Try checking container logs",
   277  						},
   278  					},
   279  				}, []string{})},
   280  		},
   281  		{
   282  			description: "one of the pod containers is in Terminated State with non zero exit code",
   283  			pods: []*v1.Pod{{
   284  				ObjectMeta: metav1.ObjectMeta{
   285  					Name:      "foo",
   286  					Namespace: "test",
   287  				},
   288  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   289  				Status: v1.PodStatus{
   290  					Phase:      v1.PodRunning,
   291  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   292  					ContainerStatuses: []v1.ContainerStatus{
   293  						{
   294  							Name:  "foo-container",
   295  							Image: "foo-image",
   296  							State: v1.ContainerState{
   297  								Terminated: &v1.ContainerStateTerminated{ExitCode: 1, Message: "panic caused"},
   298  							},
   299  						},
   300  					},
   301  				},
   302  			}},
   303  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   304  				&proto.ActionableErr{
   305  					Message: "container foo-container terminated with exit code 1",
   306  					ErrCode: proto.StatusCode_STATUSCHECK_CONTAINER_TERMINATED,
   307  					Suggestions: []*proto.Suggestion{
   308  						{
   309  							SuggestionCode: proto.SuggestionCode_CHECK_CONTAINER_LOGS,
   310  							Action:         "Try checking container logs",
   311  						},
   312  					},
   313  				}, []string{})},
   314  		},
   315  		{
   316  			description: "pod is in Stable State",
   317  			pods: []*v1.Pod{{
   318  				ObjectMeta: metav1.ObjectMeta{
   319  					Name:      "foo",
   320  					Namespace: "test",
   321  				},
   322  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   323  				Status: v1.PodStatus{
   324  					Phase:      v1.PodRunning,
   325  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   326  					ContainerStatuses: []v1.ContainerStatus{
   327  						{
   328  							Name:  "foo-container",
   329  							State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
   330  						},
   331  					},
   332  				},
   333  			}},
   334  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   335  				&proto.ActionableErr{
   336  					Message: "",
   337  					ErrCode: proto.StatusCode_STATUSCHECK_SUCCESS,
   338  				}, nil)},
   339  		},
   340  		{
   341  			description: "pod condition unknown",
   342  			pods: []*v1.Pod{{
   343  				ObjectMeta: metav1.ObjectMeta{
   344  					Name:      "foo",
   345  					Namespace: "test",
   346  				},
   347  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   348  				Status: v1.PodStatus{
   349  					Phase: v1.PodPending,
   350  					Conditions: []v1.PodCondition{{
   351  						Type:    v1.PodScheduled,
   352  						Status:  v1.ConditionUnknown,
   353  						Message: "could not determine",
   354  					}},
   355  				},
   356  			}},
   357  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   358  				&proto.ActionableErr{
   359  					Message: "could not determine",
   360  					ErrCode: proto.StatusCode_STATUSCHECK_UNKNOWN,
   361  				}, nil)},
   362  		},
   363  		{
   364  			description: "pod could not be scheduled",
   365  			pods: []*v1.Pod{{
   366  				ObjectMeta: metav1.ObjectMeta{
   367  					Name:      "foo",
   368  					Namespace: "test",
   369  				},
   370  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   371  				Status: v1.PodStatus{
   372  					Phase: v1.PodPending,
   373  					Conditions: []v1.PodCondition{{
   374  						Type:   v1.PodScheduled,
   375  						Status: v1.ConditionFalse,
   376  						Reason: v1.PodReasonUnschedulable,
   377  						Message: "0/7 nodes are available: " +
   378  							"1 node(s) had taint {node.kubernetes.io/memory-pressure: }, that the pod didn't tolerate, " +
   379  							"1 node(s) had taint {node.kubernetes.io/disk-pressure: }, that the pod didn't tolerate, " +
   380  							"1 node(s) had taint {node.kubernetes.io/pid-pressure: }, that the pod didn't tolerate, " +
   381  							"1 node(s) had taint {node.kubernetes.io/not-ready: }, that the pod didn't tolerate, " +
   382  							"1 node(s) had taint {node.kubernetes.io/unreachable: }, that the pod didn't tolerate, " +
   383  							"1 node(s) had taint {node.kubernetes.io/unschedulable: }, that the pod didn't tolerate, " +
   384  							"1 node(s) had taint {node.kubernetes.io/network-unavailable: }, that the pod didn't tolerate, ",
   385  					}},
   386  				},
   387  			}},
   388  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   389  				&proto.ActionableErr{
   390  					Message: "Unschedulable: 0/7 nodes available: 1 node has memory pressure, " +
   391  						"1 node has disk pressure, 1 node has PID pressure, 1 node is not ready, " +
   392  						"1 node is unreachable, 1 node is unschedulable, 1 node's network not available",
   393  					ErrCode: proto.StatusCode_STATUSCHECK_NODE_PID_PRESSURE,
   394  				}, nil)},
   395  		},
   396  		{
   397  			description: "pod is running but container terminated",
   398  			pods: []*v1.Pod{{
   399  				ObjectMeta: metav1.ObjectMeta{
   400  					Name:      "foo",
   401  					Namespace: "test",
   402  				},
   403  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   404  				Status: v1.PodStatus{
   405  					Phase:      v1.PodRunning,
   406  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   407  					ContainerStatuses: []v1.ContainerStatus{
   408  						{
   409  							Name:  "foo-container",
   410  							State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 1}},
   411  						},
   412  					},
   413  				},
   414  			}},
   415  			logOutput: mockLogOutput{
   416  				output: []byte("main.go:57 \ngo panic"),
   417  			},
   418  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   419  				&proto.ActionableErr{
   420  					Message: "container foo-container terminated with exit code 1",
   421  					ErrCode: proto.StatusCode_STATUSCHECK_CONTAINER_TERMINATED,
   422  					Suggestions: []*proto.Suggestion{{
   423  						SuggestionCode: proto.SuggestionCode_CHECK_CONTAINER_LOGS,
   424  						Action:         "Try checking container logs",
   425  					}},
   426  				}, []string{
   427  					"[foo foo-container] main.go:57 ",
   428  					"[foo foo-container] go panic",
   429  				},
   430  			)},
   431  		},
   432  		{
   433  			description: "pod is running but container terminated but could not retrieve logs",
   434  			pods: []*v1.Pod{{
   435  				ObjectMeta: metav1.ObjectMeta{
   436  					Name:      "foo",
   437  					Namespace: "test",
   438  				},
   439  				Status: v1.PodStatus{
   440  					Phase:      v1.PodRunning,
   441  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   442  					ContainerStatuses: []v1.ContainerStatus{
   443  						{
   444  							Name:  "foo-container",
   445  							State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 1}},
   446  						},
   447  					},
   448  				},
   449  			}},
   450  			logOutput: mockLogOutput{
   451  				err: fmt.Errorf("error retrieving"),
   452  			},
   453  			expected: []Resource{NewResource("test", "pod", "foo", "Running",
   454  				&proto.ActionableErr{
   455  					Message: "container foo-container terminated with exit code 1",
   456  					ErrCode: proto.StatusCode_STATUSCHECK_CONTAINER_TERMINATED,
   457  					Suggestions: []*proto.Suggestion{{
   458  						SuggestionCode: proto.SuggestionCode_CHECK_CONTAINER_LOGS,
   459  						Action:         "Try checking container logs",
   460  					}},
   461  				}, []string{
   462  					"Error retrieving logs for pod foo: error retrieving.\nTry `kubectl logs foo -n test -c foo-container`",
   463  				},
   464  			)},
   465  		},
   466  		// Events Test cases
   467  		{
   468  			description: "pod condition with events",
   469  			pods: []*v1.Pod{{
   470  				ObjectMeta: metav1.ObjectMeta{
   471  					Name:      "foo",
   472  					Namespace: "test",
   473  				},
   474  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   475  				Status: v1.PodStatus{
   476  					Phase: v1.PodPending,
   477  					Conditions: []v1.PodCondition{{
   478  						Type:    v1.PodScheduled,
   479  						Status:  v1.ConditionUnknown,
   480  						Message: "could not determine",
   481  					}},
   482  				},
   483  			}},
   484  			events: []v1.Event{
   485  				{
   486  					ObjectMeta: metav1.ObjectMeta{Namespace: "test"},
   487  					Reason:     "eventCode", Type: "Warning", Message: "dummy event",
   488  				},
   489  			},
   490  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   491  				&proto.ActionableErr{
   492  					Message: "eventCode: dummy event",
   493  					ErrCode: proto.StatusCode_STATUSCHECK_UNKNOWN_EVENT,
   494  				}, nil)},
   495  		},
   496  		{
   497  			description: "pod condition a warning event followed up normal event",
   498  			pods: []*v1.Pod{{
   499  				ObjectMeta: metav1.ObjectMeta{
   500  					Name:      "foo",
   501  					Namespace: "test",
   502  				},
   503  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   504  				Status: v1.PodStatus{
   505  					Phase: v1.PodPending,
   506  					Conditions: []v1.PodCondition{{
   507  						Type:    v1.PodScheduled,
   508  						Status:  v1.ConditionUnknown,
   509  						Message: "could not determine",
   510  					}},
   511  				},
   512  			}},
   513  			events: []v1.Event{
   514  				{
   515  					ObjectMeta: metav1.ObjectMeta{Name: "one", Namespace: "test"},
   516  					Reason:     "eventCode", Type: "Warning", Message: "dummy event",
   517  					LastTimestamp: metav1.Time{Time: before},
   518  				},
   519  				{
   520  					ObjectMeta: metav1.ObjectMeta{Name: "two", Namespace: "test"},
   521  					Reason:     "Created", Type: "Normal", Message: "Container Created",
   522  					LastTimestamp: metav1.Time{Time: after},
   523  				},
   524  			},
   525  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   526  				&proto.ActionableErr{
   527  					Message: "could not determine",
   528  					ErrCode: proto.StatusCode_STATUSCHECK_UNKNOWN,
   529  				}, nil)},
   530  		},
   531  		{
   532  			description: "pod condition a normal event followed by a warning event",
   533  			pods: []*v1.Pod{{
   534  				ObjectMeta: metav1.ObjectMeta{
   535  					Name:      "foo",
   536  					Namespace: "test",
   537  				},
   538  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   539  				Status: v1.PodStatus{
   540  					Phase: v1.PodPending,
   541  					Conditions: []v1.PodCondition{{
   542  						Type:    v1.PodScheduled,
   543  						Status:  v1.ConditionUnknown,
   544  						Message: "could not determine",
   545  					}},
   546  				},
   547  			}},
   548  			events: []v1.Event{
   549  				{
   550  					ObjectMeta: metav1.ObjectMeta{Name: "two", Namespace: "test"},
   551  					Reason:     "Created", Type: "Normal", Message: "Container Created",
   552  					LastTimestamp: metav1.Time{Time: before},
   553  				},
   554  				{
   555  					ObjectMeta: metav1.ObjectMeta{Name: "one", Namespace: "test"},
   556  					Reason:     "eventCode", Type: "Warning", Message: "dummy event",
   557  					LastTimestamp: metav1.Time{Time: after},
   558  				},
   559  			},
   560  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   561  				&proto.ActionableErr{
   562  					Message: "eventCode: dummy event",
   563  					ErrCode: proto.StatusCode_STATUSCHECK_UNKNOWN_EVENT,
   564  				}, nil)},
   565  		},
   566  		{
   567  			description: "pod condition a warning event followed up by warning adds last warning seen",
   568  			pods: []*v1.Pod{{
   569  				ObjectMeta: metav1.ObjectMeta{
   570  					Name:      "foo",
   571  					Namespace: "test",
   572  				},
   573  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   574  				Status: v1.PodStatus{
   575  					Phase: v1.PodPending,
   576  					Conditions: []v1.PodCondition{{
   577  						Type:    v1.PodScheduled,
   578  						Status:  v1.ConditionUnknown,
   579  						Message: "could not determine",
   580  					}},
   581  				},
   582  			}},
   583  			events: []v1.Event{
   584  				{
   585  					ObjectMeta: metav1.ObjectMeta{Name: "two", Namespace: "test"}, Reason: "FailedScheduling", Type: "Warning",
   586  					Message:       "0/1 nodes are available: 1 node(s) had taint {key: value}, that the pod didn't tolerate",
   587  					LastTimestamp: metav1.Time{Time: after},
   588  				},
   589  				{
   590  					ObjectMeta: metav1.ObjectMeta{Name: "one", Namespace: "test"},
   591  					Reason:     "eventCode", Type: "Warning", Message: "dummy event",
   592  					LastTimestamp: metav1.Time{Time: before},
   593  				},
   594  			},
   595  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   596  				&proto.ActionableErr{
   597  					Message: "0/1 nodes are available: 1 node(s) had taint {key: value}, that the pod didn't tolerate",
   598  					ErrCode: proto.StatusCode_STATUSCHECK_FAILED_SCHEDULING,
   599  				}, nil)},
   600  		},
   601  		{
   602  			description: "health check failed",
   603  			pods: []*v1.Pod{{
   604  				ObjectMeta: metav1.ObjectMeta{
   605  					Name:      "foo",
   606  					Namespace: "test",
   607  				},
   608  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   609  				Status: v1.PodStatus{
   610  					Phase: v1.PodRunning,
   611  					Conditions: []v1.PodCondition{{
   612  						Type:   v1.PodScheduled,
   613  						Status: v1.ConditionTrue,
   614  					}},
   615  				},
   616  			}},
   617  			events: []v1.Event{
   618  				{
   619  					ObjectMeta: metav1.ObjectMeta{Name: "two", Namespace: "test"}, Reason: "Unhealthy", Type: "Warning",
   620  					Message:   "Readiness probe failed: cat: /tmp/healthy: No such file or directory",
   621  					EventTime: metav1.MicroTime{Time: after},
   622  				},
   623  			},
   624  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   625  				&proto.ActionableErr{
   626  					Message: "Readiness probe failed: cat: /tmp/healthy: No such file or directory",
   627  					ErrCode: proto.StatusCode_STATUSCHECK_UNHEALTHY,
   628  					Suggestions: []*proto.Suggestion{
   629  						{
   630  							SuggestionCode: proto.SuggestionCode_CHECK_READINESS_PROBE,
   631  							Action:         "Try checking container config `readinessProbe`",
   632  						},
   633  					},
   634  				}, nil)},
   635  		},
   636  		{
   637  			description: "One of the pod containers is in Terminated State with non zero exit code followed by Waiting state",
   638  			pods: []*v1.Pod{{
   639  				ObjectMeta: metav1.ObjectMeta{
   640  					Name:      "foo",
   641  					Namespace: "test",
   642  				},
   643  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   644  				Status: v1.PodStatus{
   645  					Phase:      v1.PodRunning,
   646  					Conditions: []v1.PodCondition{{Type: v1.PodScheduled, Status: v1.ConditionTrue}},
   647  					ContainerStatuses: []v1.ContainerStatus{
   648  						{
   649  							Name:  "foo-success",
   650  							Image: "foo-image",
   651  							State: v1.ContainerState{
   652  								Terminated: &v1.ContainerStateTerminated{ExitCode: 0},
   653  							},
   654  						},
   655  						{
   656  							Name:  "foo-container",
   657  							Image: "foo-image",
   658  							State: v1.ContainerState{
   659  								Waiting: &v1.ContainerStateWaiting{
   660  									Reason:  "CrashLoopBackOff",
   661  									Message: "Back off restarting container",
   662  								},
   663  							},
   664  						},
   665  					},
   666  				},
   667  			}},
   668  			logOutput: mockLogOutput{
   669  				output: []byte("some panic"),
   670  			},
   671  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   672  				&proto.ActionableErr{
   673  					Message: "container foo-container is backing off waiting to restart",
   674  					ErrCode: proto.StatusCode_STATUSCHECK_CONTAINER_RESTARTING,
   675  				}, []string{"[foo foo-container] some panic"})},
   676  		},
   677  		{
   678  			description: "pod condition with events when pod is in Initializing phase",
   679  			pods: []*v1.Pod{{
   680  				ObjectMeta: metav1.ObjectMeta{
   681  					Name:      "foo",
   682  					Namespace: "test",
   683  				},
   684  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   685  				Status: v1.PodStatus{
   686  					Phase: v1.PodPending,
   687  					Conditions: []v1.PodCondition{{
   688  						Type:   v1.PodScheduled,
   689  						Status: v1.ConditionTrue,
   690  					}},
   691  					ContainerStatuses: []v1.ContainerStatus{
   692  						{
   693  							Name:  "foo-container",
   694  							Image: "foo-image",
   695  							State: v1.ContainerState{
   696  								Waiting: &v1.ContainerStateWaiting{
   697  									Reason:  "PodInitializing",
   698  									Message: "waiting to initialize",
   699  								},
   700  							},
   701  						},
   702  					},
   703  				},
   704  			}},
   705  			events: []v1.Event{
   706  				{
   707  					ObjectMeta: metav1.ObjectMeta{Namespace: "test"},
   708  					Reason:     "eventCode", Type: "Warning", Message: "dummy event",
   709  				},
   710  			},
   711  			expected: []Resource{NewResource("test", "Pod", "foo", "Pending",
   712  				&proto.ActionableErr{
   713  					Message: "eventCode: dummy event",
   714  					ErrCode: proto.StatusCode_STATUSCHECK_UNKNOWN_EVENT,
   715  				}, nil)},
   716  		},
   717  		{
   718  			description: "pod terminated with exec error",
   719  			pods: []*v1.Pod{{
   720  				ObjectMeta: metav1.ObjectMeta{
   721  					Name:      "foo",
   722  					Namespace: "test",
   723  				},
   724  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   725  				Status: v1.PodStatus{
   726  					Phase:      v1.PodRunning,
   727  					Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionFalse}},
   728  					ContainerStatuses: []v1.ContainerStatus{
   729  						{
   730  							Name:  "foo-container",
   731  							Image: "foo-image",
   732  							State: v1.ContainerState{
   733  								Terminated: &v1.ContainerStateTerminated{ExitCode: 1, Message: "panic caused"},
   734  							},
   735  						},
   736  					},
   737  				},
   738  			}},
   739  			logOutput: mockLogOutput{
   740  				output: []byte("standard_init_linux.go:219: exec user process caused: exec format error"),
   741  			},
   742  			expected: []Resource{NewResource("test", "Pod", "foo", "Running",
   743  				&proto.ActionableErr{
   744  					Message: "container foo-container terminated with exit code 1",
   745  					ErrCode: proto.StatusCode_STATUSCHECK_CONTAINER_EXEC_ERROR,
   746  				}, []string{"[foo foo-container] standard_init_linux.go:219: exec user process caused: exec format error"})},
   747  		},
   748  
   749  		// Check to diagnose pods with owner references
   750  		{
   751  			description: "pods owned by a uuid",
   752  			uid:         "foo",
   753  			pods: []*v1.Pod{{
   754  				ObjectMeta: metav1.ObjectMeta{
   755  					Name:      "foo",
   756  					Namespace: "test",
   757  				},
   758  				TypeMeta: metav1.TypeMeta{Kind: "Pod"},
   759  				Status: v1.PodStatus{
   760  					Phase:      v1.PodRunning,
   761  					Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionFalse}},
   762  					ContainerStatuses: []v1.ContainerStatus{
   763  						{
   764  							Name:  "foo-container",
   765  							Image: "foo-image",
   766  							State: v1.ContainerState{
   767  								Terminated: &v1.ContainerStateTerminated{ExitCode: 1, Message: "panic caused"},
   768  							},
   769  						},
   770  					},
   771  				},
   772  			}},
   773  		},
   774  	}
   775  
   776  	for _, test := range tests {
   777  		testutil.Run(t, test.description, func(t *testutil.T) {
   778  			rs := make([]runtime.Object, len(test.pods))
   779  			mRun := func(n string, args []string) ([]byte, error) {
   780  				actualCommand := strings.Join(append([]string{n}, args...), " ")
   781  				if expected := "kubectl logs foo -n test -c foo-container"; actualCommand != expected {
   782  					t.Errorf("got %s, expected %s", actualCommand, expected)
   783  				}
   784  				return test.logOutput.output, test.logOutput.err
   785  			}
   786  			t.Override(&runCli, mRun)
   787  			t.Override(&getReplicaSet, func(_ *appsv1.Deployment, _ appsclient.AppsV1Interface) ([]*appsv1.ReplicaSet, []*appsv1.ReplicaSet, *appsv1.ReplicaSet, error) {
   788  				return nil, nil, &appsv1.ReplicaSet{
   789  					ObjectMeta: metav1.ObjectMeta{
   790  						UID: types.UID(test.uid),
   791  					},
   792  				}, nil
   793  			})
   794  			for i, p := range test.pods {
   795  				p.OwnerReferences = []metav1.OwnerReference{{UID: "", Controller: truePtr()}}
   796  				rs[i] = p
   797  			}
   798  			rs = append(rs, &v1.EventList{Items: test.events})
   799  			f := fakekubeclientset.NewSimpleClientset(rs...)
   800  
   801  			actual, err := testPodValidator(f).Validate(context.Background(), "test", metav1.ListOptions{})
   802  			t.CheckNoError(err)
   803  			t.CheckDeepEqual(test.expected, actual, cmp.AllowUnexported(Resource{}), cmp.Comparer(func(x, y error) bool {
   804  				if x == nil && y == nil {
   805  					return true
   806  				} else if x != nil && y != nil {
   807  					return x.Error() == y.Error()
   808  				}
   809  				return false
   810  			}), protocmp.Transform())
   811  		})
   812  	}
   813  }
   814  
   815  // testPodValidator initializes a PodValidator like NewPodValidator except for loading custom rules
   816  func testPodValidator(k kubernetes.Interface) *PodValidator {
   817  	rs := []Recommender{recommender.ContainerError{}}
   818  	return &PodValidator{k: k, recos: rs, podSelector: NewDeploymentPodsSelector(k, appsv1.Deployment{})}
   819  }
   820  
   821  func TestPodConditionChecks(t *testing.T) {
   822  	tests := []struct {
   823  		description string
   824  		conditions  []v1.PodCondition
   825  		expected    result
   826  	}{
   827  		{
   828  			description: "pod is ready",
   829  			conditions: []v1.PodCondition{
   830  				{Type: v1.PodReady, Status: v1.ConditionTrue},
   831  				{Type: v1.ContainersReady, Status: v1.ConditionTrue},
   832  			},
   833  			expected: result{isReady: true},
   834  		},
   835  		{
   836  			description: "pod scheduling failed",
   837  			conditions: []v1.PodCondition{
   838  				{Type: v1.PodScheduled, Status: v1.ConditionFalse},
   839  			},
   840  			expected: result{isNotScheduled: true},
   841  		},
   842  		{
   843  			description: "pod scheduled with no ready event",
   844  			conditions: []v1.PodCondition{
   845  				{Type: v1.PodScheduled, Status: v1.ConditionTrue},
   846  			},
   847  			expected: result{iScheduledNotReady: true},
   848  		},
   849  		{
   850  			description: "pod is scheduled, with failed containers ready event",
   851  			conditions: []v1.PodCondition{
   852  				{Type: v1.ContainersReady, Status: v1.ConditionFalse},
   853  			},
   854  			expected: result{iScheduledNotReady: true},
   855  		},
   856  		{
   857  			description: "pod is scheduled, with failed pod ready event",
   858  			conditions: []v1.PodCondition{
   859  				{Type: v1.PodReady, Status: v1.ConditionFalse},
   860  			},
   861  			expected: result{iScheduledNotReady: true},
   862  		},
   863  		{
   864  			description: "pod status is unknown",
   865  			conditions: []v1.PodCondition{
   866  				{Type: v1.PodScheduled, Status: v1.ConditionUnknown},
   867  			},
   868  			expected: result{isUnknown: true},
   869  		},
   870  	}
   871  	for _, test := range tests {
   872  		testutil.Run(t, test.description, func(t *testutil.T) {
   873  			pod := v1.Pod{Status: v1.PodStatus{Conditions: test.conditions}}
   874  			r := result{}
   875  			r.isReady = isPodReady(&pod)
   876  			_, r.isNotScheduled = isPodNotScheduled(&pod)
   877  			r.iScheduledNotReady = isPodScheduledButNotReady(&pod)
   878  			_, r.isUnknown = isPodStatusUnknown(&pod)
   879  			t.CheckDeepEqual(test.expected, r, cmp.AllowUnexported(result{}))
   880  		})
   881  	}
   882  }
   883  
   884  type result struct {
   885  	isReady            bool
   886  	isNotScheduled     bool
   887  	iScheduledNotReady bool
   888  	isUnknown          bool
   889  }
   890  
   891  func truePtr() *bool {
   892  	t := true
   893  	return &t
   894  }