k8s.io/kubernetes@v1.29.3/pkg/kubelet/kubelet_pods_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package kubelet
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"net"
    24  	"os"
    25  	"path/filepath"
    26  	"reflect"
    27  	"sort"
    28  	"strings"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/google/go-cmp/cmp"
    33  	"github.com/stretchr/testify/assert"
    34  	"github.com/stretchr/testify/require"
    35  	v1 "k8s.io/api/core/v1"
    36  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    37  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    38  	"k8s.io/apimachinery/pkg/api/resource"
    39  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    40  	"k8s.io/apimachinery/pkg/labels"
    41  	"k8s.io/apimachinery/pkg/runtime"
    42  	"k8s.io/apimachinery/pkg/types"
    43  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    44  	core "k8s.io/client-go/testing"
    45  	"k8s.io/client-go/tools/record"
    46  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    47  	"k8s.io/component-base/metrics/testutil"
    48  	runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
    49  	"k8s.io/kubelet/pkg/cri/streaming/portforward"
    50  	"k8s.io/kubelet/pkg/cri/streaming/remotecommand"
    51  	_ "k8s.io/kubernetes/pkg/apis/core/install"
    52  	"k8s.io/kubernetes/pkg/features"
    53  	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
    54  	containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
    55  	"k8s.io/kubernetes/pkg/kubelet/metrics"
    56  	"k8s.io/kubernetes/pkg/kubelet/prober/results"
    57  	"k8s.io/kubernetes/pkg/kubelet/secret"
    58  	"k8s.io/kubernetes/pkg/kubelet/status"
    59  	kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
    60  	netutils "k8s.io/utils/net"
    61  )
    62  
    63  var containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
    64  
    65  func TestNodeHostsFileContent(t *testing.T) {
    66  	testCases := []struct {
    67  		hostsFileName            string
    68  		hostAliases              []v1.HostAlias
    69  		rawHostsFileContent      string
    70  		expectedHostsFileContent string
    71  	}{
    72  		{
    73  			hostsFileName: "hosts_test_file1",
    74  			hostAliases:   []v1.HostAlias{},
    75  			rawHostsFileContent: `# hosts file for testing.
    76  127.0.0.1	localhost
    77  ::1	localhost ip6-localhost ip6-loopback
    78  fe00::0	ip6-localnet
    79  fe00::0	ip6-mcastprefix
    80  fe00::1	ip6-allnodes
    81  fe00::2	ip6-allrouters
    82  123.45.67.89	some.domain
    83  `,
    84  			expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
    85  # hosts file for testing.
    86  127.0.0.1	localhost
    87  ::1	localhost ip6-localhost ip6-loopback
    88  fe00::0	ip6-localnet
    89  fe00::0	ip6-mcastprefix
    90  fe00::1	ip6-allnodes
    91  fe00::2	ip6-allrouters
    92  123.45.67.89	some.domain
    93  `,
    94  		},
    95  		{
    96  			hostsFileName: "hosts_test_file2",
    97  			hostAliases:   []v1.HostAlias{},
    98  			rawHostsFileContent: `# another hosts file for testing.
    99  127.0.0.1	localhost
   100  ::1	localhost ip6-localhost ip6-loopback
   101  fe00::0	ip6-localnet
   102  fe00::0	ip6-mcastprefix
   103  fe00::1	ip6-allnodes
   104  fe00::2	ip6-allrouters
   105  12.34.56.78	another.domain
   106  `,
   107  			expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
   108  # another hosts file for testing.
   109  127.0.0.1	localhost
   110  ::1	localhost ip6-localhost ip6-loopback
   111  fe00::0	ip6-localnet
   112  fe00::0	ip6-mcastprefix
   113  fe00::1	ip6-allnodes
   114  fe00::2	ip6-allrouters
   115  12.34.56.78	another.domain
   116  `,
   117  		},
   118  		{
   119  			hostsFileName: "hosts_test_file1_with_host_aliases",
   120  			hostAliases: []v1.HostAlias{
   121  				{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
   122  			},
   123  			rawHostsFileContent: `# hosts file for testing.
   124  127.0.0.1	localhost
   125  ::1	localhost ip6-localhost ip6-loopback
   126  fe00::0	ip6-localnet
   127  fe00::0	ip6-mcastprefix
   128  fe00::1	ip6-allnodes
   129  fe00::2	ip6-allrouters
   130  123.45.67.89	some.domain
   131  `,
   132  			expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
   133  # hosts file for testing.
   134  127.0.0.1	localhost
   135  ::1	localhost ip6-localhost ip6-loopback
   136  fe00::0	ip6-localnet
   137  fe00::0	ip6-mcastprefix
   138  fe00::1	ip6-allnodes
   139  fe00::2	ip6-allrouters
   140  123.45.67.89	some.domain
   141  
   142  # Entries added by HostAliases.
   143  123.45.67.89	foo	bar	baz
   144  `,
   145  		},
   146  		{
   147  			hostsFileName: "hosts_test_file2_with_host_aliases",
   148  			hostAliases: []v1.HostAlias{
   149  				{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
   150  				{IP: "456.78.90.123", Hostnames: []string{"park", "doo", "boo"}},
   151  			},
   152  			rawHostsFileContent: `# another hosts file for testing.
   153  127.0.0.1	localhost
   154  ::1	localhost ip6-localhost ip6-loopback
   155  fe00::0	ip6-localnet
   156  fe00::0	ip6-mcastprefix
   157  fe00::1	ip6-allnodes
   158  fe00::2	ip6-allrouters
   159  12.34.56.78	another.domain
   160  `,
   161  			expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
   162  # another hosts file for testing.
   163  127.0.0.1	localhost
   164  ::1	localhost ip6-localhost ip6-loopback
   165  fe00::0	ip6-localnet
   166  fe00::0	ip6-mcastprefix
   167  fe00::1	ip6-allnodes
   168  fe00::2	ip6-allrouters
   169  12.34.56.78	another.domain
   170  
   171  # Entries added by HostAliases.
   172  123.45.67.89	foo	bar	baz
   173  456.78.90.123	park	doo	boo
   174  `,
   175  		},
   176  	}
   177  
   178  	for _, testCase := range testCases {
   179  		t.Run(testCase.hostsFileName, func(t *testing.T) {
   180  			tmpdir, err := writeHostsFile(testCase.hostsFileName, testCase.rawHostsFileContent)
   181  			require.NoError(t, err, "could not create a temp hosts file")
   182  			defer os.RemoveAll(tmpdir)
   183  
   184  			actualContent, fileReadErr := nodeHostsFileContent(filepath.Join(tmpdir, testCase.hostsFileName), testCase.hostAliases)
   185  			require.NoError(t, fileReadErr, "could not create read hosts file")
   186  			assert.Equal(t, testCase.expectedHostsFileContent, string(actualContent), "hosts file content not expected")
   187  		})
   188  	}
   189  }
   190  
   191  // writeHostsFile will write a hosts file into a temporary dir, and return that dir.
   192  // Caller is responsible for deleting the dir and its contents.
   193  func writeHostsFile(filename string, cfg string) (string, error) {
   194  	tmpdir, err := os.MkdirTemp("", "kubelet=kubelet_pods_test.go=")
   195  	if err != nil {
   196  		return "", err
   197  	}
   198  	return tmpdir, os.WriteFile(filepath.Join(tmpdir, filename), []byte(cfg), 0644)
   199  }
   200  
   201  func TestManagedHostsFileContent(t *testing.T) {
   202  	testCases := []struct {
   203  		hostIPs         []string
   204  		hostName        string
   205  		hostDomainName  string
   206  		hostAliases     []v1.HostAlias
   207  		expectedContent string
   208  	}{
   209  		{
   210  			hostIPs:     []string{"123.45.67.89"},
   211  			hostName:    "podFoo",
   212  			hostAliases: []v1.HostAlias{},
   213  			expectedContent: `# Kubernetes-managed hosts file.
   214  127.0.0.1	localhost
   215  ::1	localhost ip6-localhost ip6-loopback
   216  fe00::0	ip6-localnet
   217  fe00::0	ip6-mcastprefix
   218  fe00::1	ip6-allnodes
   219  fe00::2	ip6-allrouters
   220  123.45.67.89	podFoo
   221  `,
   222  		},
   223  		{
   224  			hostIPs:        []string{"203.0.113.1"},
   225  			hostName:       "podFoo",
   226  			hostDomainName: "domainFoo",
   227  			hostAliases:    []v1.HostAlias{},
   228  			expectedContent: `# Kubernetes-managed hosts file.
   229  127.0.0.1	localhost
   230  ::1	localhost ip6-localhost ip6-loopback
   231  fe00::0	ip6-localnet
   232  fe00::0	ip6-mcastprefix
   233  fe00::1	ip6-allnodes
   234  fe00::2	ip6-allrouters
   235  203.0.113.1	podFoo.domainFoo	podFoo
   236  `,
   237  		},
   238  		{
   239  			hostIPs:        []string{"203.0.113.1"},
   240  			hostName:       "podFoo",
   241  			hostDomainName: "domainFoo",
   242  			hostAliases: []v1.HostAlias{
   243  				{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
   244  			},
   245  			expectedContent: `# Kubernetes-managed hosts file.
   246  127.0.0.1	localhost
   247  ::1	localhost ip6-localhost ip6-loopback
   248  fe00::0	ip6-localnet
   249  fe00::0	ip6-mcastprefix
   250  fe00::1	ip6-allnodes
   251  fe00::2	ip6-allrouters
   252  203.0.113.1	podFoo.domainFoo	podFoo
   253  
   254  # Entries added by HostAliases.
   255  123.45.67.89	foo	bar	baz
   256  `,
   257  		},
   258  		{
   259  			hostIPs:        []string{"203.0.113.1"},
   260  			hostName:       "podFoo",
   261  			hostDomainName: "domainFoo",
   262  			hostAliases: []v1.HostAlias{
   263  				{IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
   264  				{IP: "456.78.90.123", Hostnames: []string{"park", "doo", "boo"}},
   265  			},
   266  			expectedContent: `# Kubernetes-managed hosts file.
   267  127.0.0.1	localhost
   268  ::1	localhost ip6-localhost ip6-loopback
   269  fe00::0	ip6-localnet
   270  fe00::0	ip6-mcastprefix
   271  fe00::1	ip6-allnodes
   272  fe00::2	ip6-allrouters
   273  203.0.113.1	podFoo.domainFoo	podFoo
   274  
   275  # Entries added by HostAliases.
   276  123.45.67.89	foo	bar	baz
   277  456.78.90.123	park	doo	boo
   278  `,
   279  		},
   280  		{
   281  			hostIPs:        []string{"203.0.113.1", "fd00::6"},
   282  			hostName:       "podFoo",
   283  			hostDomainName: "domainFoo",
   284  			hostAliases:    []v1.HostAlias{},
   285  			expectedContent: `# Kubernetes-managed hosts file.
   286  127.0.0.1	localhost
   287  ::1	localhost ip6-localhost ip6-loopback
   288  fe00::0	ip6-localnet
   289  fe00::0	ip6-mcastprefix
   290  fe00::1	ip6-allnodes
   291  fe00::2	ip6-allrouters
   292  203.0.113.1	podFoo.domainFoo	podFoo
   293  fd00::6	podFoo.domainFoo	podFoo
   294  `,
   295  		},
   296  	}
   297  
   298  	for _, testCase := range testCases {
   299  		actualContent := managedHostsFileContent(testCase.hostIPs, testCase.hostName, testCase.hostDomainName, testCase.hostAliases)
   300  		assert.Equal(t, testCase.expectedContent, string(actualContent), "hosts file content not expected")
   301  	}
   302  }
   303  
   304  func TestRunInContainerNoSuchPod(t *testing.T) {
   305  	ctx := context.Background()
   306  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   307  	defer testKubelet.Cleanup()
   308  	kubelet := testKubelet.kubelet
   309  	fakeRuntime := testKubelet.fakeRuntime
   310  	fakeRuntime.PodList = []*containertest.FakePod{}
   311  
   312  	podName := "podFoo"
   313  	podNamespace := "nsFoo"
   314  	containerName := "containerFoo"
   315  	output, err := kubelet.RunInContainer(
   316  		ctx,
   317  		kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}),
   318  		"",
   319  		containerName,
   320  		[]string{"ls"})
   321  	assert.Error(t, err)
   322  	assert.Nil(t, output, "output should be nil")
   323  }
   324  
   325  func TestRunInContainer(t *testing.T) {
   326  	ctx := context.Background()
   327  	for _, testError := range []error{nil, errors.New("bar")} {
   328  		testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   329  		defer testKubelet.Cleanup()
   330  		kubelet := testKubelet.kubelet
   331  		fakeRuntime := testKubelet.fakeRuntime
   332  		fakeCommandRunner := containertest.FakeContainerCommandRunner{
   333  			Err:    testError,
   334  			Stdout: "foo",
   335  		}
   336  		kubelet.runner = &fakeCommandRunner
   337  
   338  		containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
   339  		fakeRuntime.PodList = []*containertest.FakePod{
   340  			{Pod: &kubecontainer.Pod{
   341  				ID:        "12345678",
   342  				Name:      "podFoo",
   343  				Namespace: "nsFoo",
   344  				Containers: []*kubecontainer.Container{
   345  					{Name: "containerFoo",
   346  						ID: containerID,
   347  					},
   348  				},
   349  			}},
   350  		}
   351  		cmd := []string{"ls"}
   352  		actualOutput, err := kubelet.RunInContainer(ctx, "podFoo_nsFoo", "", "containerFoo", cmd)
   353  		assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError)
   354  		assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
   355  		// this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test
   356  		assert.Equal(t, "foo", string(actualOutput), "(testError=%v) output", testError)
   357  		assert.Equal(t, err, testError, "(testError=%v) err", testError)
   358  	}
   359  }
   360  
   361  type testServiceLister struct {
   362  	services []*v1.Service
   363  }
   364  
   365  func (ls testServiceLister) List(labels.Selector) ([]*v1.Service, error) {
   366  	return ls.services, nil
   367  }
   368  
   369  type envs []kubecontainer.EnvVar
   370  
   371  func (e envs) Len() int {
   372  	return len(e)
   373  }
   374  
   375  func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
   376  
   377  func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
   378  
   379  func buildService(name, namespace, clusterIP, protocol string, port int) *v1.Service {
   380  	return &v1.Service{
   381  		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
   382  		Spec: v1.ServiceSpec{
   383  			Ports: []v1.ServicePort{{
   384  				Protocol: v1.Protocol(protocol),
   385  				Port:     int32(port),
   386  			}},
   387  			ClusterIP: clusterIP,
   388  		},
   389  	}
   390  }
   391  
   392  func TestMakeEnvironmentVariables(t *testing.T) {
   393  	trueVal := true
   394  	services := []*v1.Service{
   395  		buildService("kubernetes", metav1.NamespaceDefault, "1.2.3.1", "TCP", 8081),
   396  		buildService("test", "test1", "1.2.3.3", "TCP", 8083),
   397  		buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084),
   398  		buildService("test", "test2", "1.2.3.5", "TCP", 8085),
   399  		buildService("test", "test2", "None", "TCP", 8085),
   400  		buildService("test", "test2", "", "TCP", 8085),
   401  		buildService("not-special", metav1.NamespaceDefault, "1.2.3.8", "TCP", 8088),
   402  		buildService("not-special", metav1.NamespaceDefault, "None", "TCP", 8088),
   403  		buildService("not-special", metav1.NamespaceDefault, "", "TCP", 8088),
   404  	}
   405  
   406  	trueValue := true
   407  	falseValue := false
   408  	testCases := []struct {
   409  		name               string                 // the name of the test case
   410  		ns                 string                 // the namespace to generate environment for
   411  		enablePodHostIPs   bool                   // enable PodHostIPs feature gate
   412  		enableServiceLinks *bool                  // enabling service links
   413  		container          *v1.Container          // the container to use
   414  		nilLister          bool                   // whether the lister should be nil
   415  		staticPod          bool                   // whether the pod should be a static pod (versus an API pod)
   416  		unsyncedServices   bool                   // whether the services should NOT be synced
   417  		configMap          *v1.ConfigMap          // an optional ConfigMap to pull from
   418  		secret             *v1.Secret             // an optional Secret to pull from
   419  		podIPs             []string               // the pod IPs
   420  		expectedEnvs       []kubecontainer.EnvVar // a set of expected environment vars
   421  		expectedError      bool                   // does the test fail
   422  		expectedEvent      string                 // does the test emit an event
   423  	}{
   424  		{
   425  			name:               "if services aren't synced, non-static pods should fail",
   426  			ns:                 "test1",
   427  			enableServiceLinks: &falseValue,
   428  			container:          &v1.Container{Env: []v1.EnvVar{}},
   429  			nilLister:          false,
   430  			staticPod:          false,
   431  			unsyncedServices:   true,
   432  			expectedEnvs:       []kubecontainer.EnvVar{},
   433  			expectedError:      true,
   434  		},
   435  		{
   436  			name:               "if services aren't synced, static pods should succeed", // if there is no service
   437  			ns:                 "test1",
   438  			enableServiceLinks: &falseValue,
   439  			container:          &v1.Container{Env: []v1.EnvVar{}},
   440  			nilLister:          false,
   441  			staticPod:          true,
   442  			unsyncedServices:   true,
   443  		},
   444  		{
   445  			name:               "api server = Y, kubelet = Y",
   446  			ns:                 "test1",
   447  			enableServiceLinks: &falseValue,
   448  			container: &v1.Container{
   449  				Env: []v1.EnvVar{
   450  					{Name: "FOO", Value: "BAR"},
   451  					{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
   452  					{Name: "TEST_SERVICE_PORT", Value: "8083"},
   453  					{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
   454  					{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
   455  					{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
   456  					{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
   457  					{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
   458  				},
   459  			},
   460  			nilLister: false,
   461  			expectedEnvs: []kubecontainer.EnvVar{
   462  				{Name: "FOO", Value: "BAR"},
   463  				{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
   464  				{Name: "TEST_SERVICE_PORT", Value: "8083"},
   465  				{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
   466  				{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
   467  				{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
   468  				{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
   469  				{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
   470  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
   471  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
   472  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
   473  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
   474  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
   475  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
   476  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
   477  			},
   478  		},
   479  		{
   480  			name:               "api server = Y, kubelet = N",
   481  			ns:                 "test1",
   482  			enableServiceLinks: &falseValue,
   483  			container: &v1.Container{
   484  				Env: []v1.EnvVar{
   485  					{Name: "FOO", Value: "BAR"},
   486  					{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
   487  					{Name: "TEST_SERVICE_PORT", Value: "8083"},
   488  					{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
   489  					{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
   490  					{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
   491  					{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
   492  					{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
   493  				},
   494  			},
   495  			nilLister: true,
   496  			expectedEnvs: []kubecontainer.EnvVar{
   497  				{Name: "FOO", Value: "BAR"},
   498  				{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
   499  				{Name: "TEST_SERVICE_PORT", Value: "8083"},
   500  				{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
   501  				{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
   502  				{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
   503  				{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
   504  				{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
   505  			},
   506  		},
   507  		{
   508  			name:               "api server = N; kubelet = Y",
   509  			ns:                 "test1",
   510  			enableServiceLinks: &falseValue,
   511  			container: &v1.Container{
   512  				Env: []v1.EnvVar{
   513  					{Name: "FOO", Value: "BAZ"},
   514  				},
   515  			},
   516  			nilLister: false,
   517  			expectedEnvs: []kubecontainer.EnvVar{
   518  				{Name: "FOO", Value: "BAZ"},
   519  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
   520  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
   521  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
   522  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
   523  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
   524  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
   525  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
   526  			},
   527  		},
   528  		{
   529  			name:               "api server = N; kubelet = Y; service env vars",
   530  			ns:                 "test1",
   531  			enableServiceLinks: &trueValue,
   532  			container: &v1.Container{
   533  				Env: []v1.EnvVar{
   534  					{Name: "FOO", Value: "BAZ"},
   535  				},
   536  			},
   537  			nilLister: false,
   538  			expectedEnvs: []kubecontainer.EnvVar{
   539  				{Name: "FOO", Value: "BAZ"},
   540  				{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
   541  				{Name: "TEST_SERVICE_PORT", Value: "8083"},
   542  				{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
   543  				{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
   544  				{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
   545  				{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
   546  				{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
   547  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
   548  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
   549  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
   550  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
   551  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
   552  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
   553  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
   554  			},
   555  		},
   556  		{
   557  			name:               "master service in pod ns",
   558  			ns:                 "test2",
   559  			enableServiceLinks: &falseValue,
   560  			container: &v1.Container{
   561  				Env: []v1.EnvVar{
   562  					{Name: "FOO", Value: "ZAP"},
   563  				},
   564  			},
   565  			nilLister: false,
   566  			expectedEnvs: []kubecontainer.EnvVar{
   567  				{Name: "FOO", Value: "ZAP"},
   568  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
   569  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
   570  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
   571  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
   572  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
   573  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
   574  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
   575  			},
   576  		},
   577  		{
   578  			name:               "master service in pod ns, service env vars",
   579  			ns:                 "test2",
   580  			enableServiceLinks: &trueValue,
   581  			container: &v1.Container{
   582  				Env: []v1.EnvVar{
   583  					{Name: "FOO", Value: "ZAP"},
   584  				},
   585  			},
   586  			nilLister: false,
   587  			expectedEnvs: []kubecontainer.EnvVar{
   588  				{Name: "FOO", Value: "ZAP"},
   589  				{Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
   590  				{Name: "TEST_SERVICE_PORT", Value: "8085"},
   591  				{Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
   592  				{Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
   593  				{Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
   594  				{Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
   595  				{Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
   596  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
   597  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
   598  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
   599  				{Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
   600  				{Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
   601  				{Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
   602  				{Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
   603  			},
   604  		},
   605  		{
   606  			name:               "pod in master service ns",
   607  			ns:                 metav1.NamespaceDefault,
   608  			enableServiceLinks: &falseValue,
   609  			container:          &v1.Container{},
   610  			nilLister:          false,
   611  			expectedEnvs: []kubecontainer.EnvVar{
   612  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
   613  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
   614  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
   615  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
   616  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
   617  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
   618  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
   619  			},
   620  		},
   621  		{
   622  			name:               "pod in master service ns, service env vars",
   623  			ns:                 metav1.NamespaceDefault,
   624  			enableServiceLinks: &trueValue,
   625  			container:          &v1.Container{},
   626  			nilLister:          false,
   627  			expectedEnvs: []kubecontainer.EnvVar{
   628  				{Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
   629  				{Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
   630  				{Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
   631  				{Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
   632  				{Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
   633  				{Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
   634  				{Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
   635  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
   636  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
   637  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
   638  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
   639  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
   640  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
   641  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
   642  			},
   643  		},
   644  		{
   645  			name:               "downward api pod",
   646  			enablePodHostIPs:   true,
   647  			ns:                 "downward-api",
   648  			enableServiceLinks: &falseValue,
   649  			container: &v1.Container{
   650  				Env: []v1.EnvVar{
   651  					{
   652  						Name: "POD_NAME",
   653  						ValueFrom: &v1.EnvVarSource{
   654  							FieldRef: &v1.ObjectFieldSelector{
   655  								APIVersion: "v1",
   656  								FieldPath:  "metadata.name",
   657  							},
   658  						},
   659  					},
   660  					{
   661  						Name: "POD_NAMESPACE",
   662  						ValueFrom: &v1.EnvVarSource{
   663  							FieldRef: &v1.ObjectFieldSelector{
   664  								APIVersion: "v1",
   665  								FieldPath:  "metadata.namespace",
   666  							},
   667  						},
   668  					},
   669  					{
   670  						Name: "POD_NODE_NAME",
   671  						ValueFrom: &v1.EnvVarSource{
   672  							FieldRef: &v1.ObjectFieldSelector{
   673  								APIVersion: "v1",
   674  								FieldPath:  "spec.nodeName",
   675  							},
   676  						},
   677  					},
   678  					{
   679  						Name: "POD_SERVICE_ACCOUNT_NAME",
   680  						ValueFrom: &v1.EnvVarSource{
   681  							FieldRef: &v1.ObjectFieldSelector{
   682  								APIVersion: "v1",
   683  								FieldPath:  "spec.serviceAccountName",
   684  							},
   685  						},
   686  					},
   687  					{
   688  						Name: "POD_IP",
   689  						ValueFrom: &v1.EnvVarSource{
   690  							FieldRef: &v1.ObjectFieldSelector{
   691  								APIVersion: "v1",
   692  								FieldPath:  "status.podIP",
   693  							},
   694  						},
   695  					},
   696  					{
   697  						Name: "POD_IPS",
   698  						ValueFrom: &v1.EnvVarSource{
   699  							FieldRef: &v1.ObjectFieldSelector{
   700  								APIVersion: "v1",
   701  								FieldPath:  "status.podIPs",
   702  							},
   703  						},
   704  					},
   705  					{
   706  						Name: "HOST_IP",
   707  						ValueFrom: &v1.EnvVarSource{
   708  							FieldRef: &v1.ObjectFieldSelector{
   709  								APIVersion: "v1",
   710  								FieldPath:  "status.hostIP",
   711  							},
   712  						},
   713  					},
   714  					{
   715  						Name: "HOST_IPS",
   716  						ValueFrom: &v1.EnvVarSource{
   717  							FieldRef: &v1.ObjectFieldSelector{
   718  								APIVersion: "v1",
   719  								FieldPath:  "status.hostIPs",
   720  							},
   721  						},
   722  					},
   723  				},
   724  			},
   725  			podIPs:    []string{"1.2.3.4", "fd00::6"},
   726  			nilLister: true,
   727  			expectedEnvs: []kubecontainer.EnvVar{
   728  				{Name: "POD_NAME", Value: "dapi-test-pod-name"},
   729  				{Name: "POD_NAMESPACE", Value: "downward-api"},
   730  				{Name: "POD_NODE_NAME", Value: "node-name"},
   731  				{Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"},
   732  				{Name: "POD_IP", Value: "1.2.3.4"},
   733  				{Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
   734  				{Name: "HOST_IP", Value: testKubeletHostIP},
   735  				{Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
   736  			},
   737  		},
   738  		{
   739  			name:               "downward api pod ips reverse order",
   740  			enablePodHostIPs:   true,
   741  			ns:                 "downward-api",
   742  			enableServiceLinks: &falseValue,
   743  			container: &v1.Container{
   744  				Env: []v1.EnvVar{
   745  					{
   746  						Name: "POD_IP",
   747  						ValueFrom: &v1.EnvVarSource{
   748  							FieldRef: &v1.ObjectFieldSelector{
   749  								APIVersion: "v1",
   750  								FieldPath:  "status.podIP",
   751  							},
   752  						},
   753  					},
   754  					{
   755  						Name: "POD_IPS",
   756  						ValueFrom: &v1.EnvVarSource{
   757  							FieldRef: &v1.ObjectFieldSelector{
   758  								APIVersion: "v1",
   759  								FieldPath:  "status.podIPs",
   760  							},
   761  						},
   762  					},
   763  					{
   764  						Name: "HOST_IP",
   765  						ValueFrom: &v1.EnvVarSource{
   766  							FieldRef: &v1.ObjectFieldSelector{
   767  								APIVersion: "v1",
   768  								FieldPath:  "status.hostIP",
   769  							},
   770  						},
   771  					},
   772  					{
   773  						Name: "HOST_IPS",
   774  						ValueFrom: &v1.EnvVarSource{
   775  							FieldRef: &v1.ObjectFieldSelector{
   776  								APIVersion: "v1",
   777  								FieldPath:  "status.hostIPs",
   778  							},
   779  						},
   780  					},
   781  				},
   782  			},
   783  			podIPs:    []string{"fd00::6", "1.2.3.4"},
   784  			nilLister: true,
   785  			expectedEnvs: []kubecontainer.EnvVar{
   786  				{Name: "POD_IP", Value: "1.2.3.4"},
   787  				{Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
   788  				{Name: "HOST_IP", Value: testKubeletHostIP},
   789  				{Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
   790  			},
   791  		},
   792  		{
   793  			name:               "downward api pod ips multiple ips",
   794  			enablePodHostIPs:   true,
   795  			ns:                 "downward-api",
   796  			enableServiceLinks: &falseValue,
   797  			container: &v1.Container{
   798  				Env: []v1.EnvVar{
   799  					{
   800  						Name: "POD_IP",
   801  						ValueFrom: &v1.EnvVarSource{
   802  							FieldRef: &v1.ObjectFieldSelector{
   803  								APIVersion: "v1",
   804  								FieldPath:  "status.podIP",
   805  							},
   806  						},
   807  					},
   808  					{
   809  						Name: "POD_IPS",
   810  						ValueFrom: &v1.EnvVarSource{
   811  							FieldRef: &v1.ObjectFieldSelector{
   812  								APIVersion: "v1",
   813  								FieldPath:  "status.podIPs",
   814  							},
   815  						},
   816  					},
   817  					{
   818  						Name: "HOST_IP",
   819  						ValueFrom: &v1.EnvVarSource{
   820  							FieldRef: &v1.ObjectFieldSelector{
   821  								APIVersion: "v1",
   822  								FieldPath:  "status.hostIP",
   823  							},
   824  						},
   825  					},
   826  					{
   827  						Name: "HOST_IPS",
   828  						ValueFrom: &v1.EnvVarSource{
   829  							FieldRef: &v1.ObjectFieldSelector{
   830  								APIVersion: "v1",
   831  								FieldPath:  "status.hostIPs",
   832  							},
   833  						},
   834  					},
   835  				},
   836  			},
   837  			podIPs:    []string{"1.2.3.4", "192.168.1.1.", "fd00::6"},
   838  			nilLister: true,
   839  			expectedEnvs: []kubecontainer.EnvVar{
   840  				{Name: "POD_IP", Value: "1.2.3.4"},
   841  				{Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
   842  				{Name: "HOST_IP", Value: testKubeletHostIP},
   843  				{Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
   844  			},
   845  		},
   846  		{
   847  			name:               "env expansion",
   848  			ns:                 "test1",
   849  			enableServiceLinks: &falseValue,
   850  			container: &v1.Container{
   851  				Env: []v1.EnvVar{
   852  					{
   853  						Name:  "TEST_LITERAL",
   854  						Value: "test-test-test",
   855  					},
   856  					{
   857  						Name: "POD_NAME",
   858  						ValueFrom: &v1.EnvVarSource{
   859  							FieldRef: &v1.ObjectFieldSelector{
   860  								APIVersion: "v1", //legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
   861  								FieldPath:  "metadata.name",
   862  							},
   863  						},
   864  					},
   865  					{
   866  						Name:  "OUT_OF_ORDER_TEST",
   867  						Value: "$(OUT_OF_ORDER_TARGET)",
   868  					},
   869  					{
   870  						Name:  "OUT_OF_ORDER_TARGET",
   871  						Value: "FOO",
   872  					},
   873  					{
   874  						Name: "EMPTY_VAR",
   875  					},
   876  					{
   877  						Name:  "EMPTY_TEST",
   878  						Value: "foo-$(EMPTY_VAR)",
   879  					},
   880  					{
   881  						Name:  "POD_NAME_TEST2",
   882  						Value: "test2-$(POD_NAME)",
   883  					},
   884  					{
   885  						Name:  "POD_NAME_TEST3",
   886  						Value: "$(POD_NAME_TEST2)-3",
   887  					},
   888  					{
   889  						Name:  "LITERAL_TEST",
   890  						Value: "literal-$(TEST_LITERAL)",
   891  					},
   892  					{
   893  						Name:  "TEST_UNDEFINED",
   894  						Value: "$(UNDEFINED_VAR)",
   895  					},
   896  				},
   897  			},
   898  			nilLister: false,
   899  			expectedEnvs: []kubecontainer.EnvVar{
   900  				{
   901  					Name:  "TEST_LITERAL",
   902  					Value: "test-test-test",
   903  				},
   904  				{
   905  					Name:  "POD_NAME",
   906  					Value: "dapi-test-pod-name",
   907  				},
   908  				{
   909  					Name:  "POD_NAME_TEST2",
   910  					Value: "test2-dapi-test-pod-name",
   911  				},
   912  				{
   913  					Name:  "POD_NAME_TEST3",
   914  					Value: "test2-dapi-test-pod-name-3",
   915  				},
   916  				{
   917  					Name:  "LITERAL_TEST",
   918  					Value: "literal-test-test-test",
   919  				},
   920  				{
   921  					Name:  "OUT_OF_ORDER_TEST",
   922  					Value: "$(OUT_OF_ORDER_TARGET)",
   923  				},
   924  				{
   925  					Name:  "OUT_OF_ORDER_TARGET",
   926  					Value: "FOO",
   927  				},
   928  				{
   929  					Name:  "TEST_UNDEFINED",
   930  					Value: "$(UNDEFINED_VAR)",
   931  				},
   932  				{
   933  					Name: "EMPTY_VAR",
   934  				},
   935  				{
   936  					Name:  "EMPTY_TEST",
   937  					Value: "foo-",
   938  				},
   939  				{
   940  					Name:  "KUBERNETES_SERVICE_HOST",
   941  					Value: "1.2.3.1",
   942  				},
   943  				{
   944  					Name:  "KUBERNETES_SERVICE_PORT",
   945  					Value: "8081",
   946  				},
   947  				{
   948  					Name:  "KUBERNETES_PORT",
   949  					Value: "tcp://1.2.3.1:8081",
   950  				},
   951  				{
   952  					Name:  "KUBERNETES_PORT_8081_TCP",
   953  					Value: "tcp://1.2.3.1:8081",
   954  				},
   955  				{
   956  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
   957  					Value: "tcp",
   958  				},
   959  				{
   960  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
   961  					Value: "8081",
   962  				},
   963  				{
   964  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
   965  					Value: "1.2.3.1",
   966  				},
   967  			},
   968  		},
   969  		{
   970  			name:               "env expansion, service env vars",
   971  			ns:                 "test1",
   972  			enableServiceLinks: &trueValue,
   973  			container: &v1.Container{
   974  				Env: []v1.EnvVar{
   975  					{
   976  						Name:  "TEST_LITERAL",
   977  						Value: "test-test-test",
   978  					},
   979  					{
   980  						Name: "POD_NAME",
   981  						ValueFrom: &v1.EnvVarSource{
   982  							FieldRef: &v1.ObjectFieldSelector{
   983  								APIVersion: "v1",
   984  								FieldPath:  "metadata.name",
   985  							},
   986  						},
   987  					},
   988  					{
   989  						Name:  "OUT_OF_ORDER_TEST",
   990  						Value: "$(OUT_OF_ORDER_TARGET)",
   991  					},
   992  					{
   993  						Name:  "OUT_OF_ORDER_TARGET",
   994  						Value: "FOO",
   995  					},
   996  					{
   997  						Name: "EMPTY_VAR",
   998  					},
   999  					{
  1000  						Name:  "EMPTY_TEST",
  1001  						Value: "foo-$(EMPTY_VAR)",
  1002  					},
  1003  					{
  1004  						Name:  "POD_NAME_TEST2",
  1005  						Value: "test2-$(POD_NAME)",
  1006  					},
  1007  					{
  1008  						Name:  "POD_NAME_TEST3",
  1009  						Value: "$(POD_NAME_TEST2)-3",
  1010  					},
  1011  					{
  1012  						Name:  "LITERAL_TEST",
  1013  						Value: "literal-$(TEST_LITERAL)",
  1014  					},
  1015  					{
  1016  						Name:  "SERVICE_VAR_TEST",
  1017  						Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
  1018  					},
  1019  					{
  1020  						Name:  "TEST_UNDEFINED",
  1021  						Value: "$(UNDEFINED_VAR)",
  1022  					},
  1023  				},
  1024  			},
  1025  			nilLister: false,
  1026  			expectedEnvs: []kubecontainer.EnvVar{
  1027  				{
  1028  					Name:  "TEST_LITERAL",
  1029  					Value: "test-test-test",
  1030  				},
  1031  				{
  1032  					Name:  "POD_NAME",
  1033  					Value: "dapi-test-pod-name",
  1034  				},
  1035  				{
  1036  					Name:  "POD_NAME_TEST2",
  1037  					Value: "test2-dapi-test-pod-name",
  1038  				},
  1039  				{
  1040  					Name:  "POD_NAME_TEST3",
  1041  					Value: "test2-dapi-test-pod-name-3",
  1042  				},
  1043  				{
  1044  					Name:  "LITERAL_TEST",
  1045  					Value: "literal-test-test-test",
  1046  				},
  1047  				{
  1048  					Name:  "TEST_SERVICE_HOST",
  1049  					Value: "1.2.3.3",
  1050  				},
  1051  				{
  1052  					Name:  "TEST_SERVICE_PORT",
  1053  					Value: "8083",
  1054  				},
  1055  				{
  1056  					Name:  "TEST_PORT",
  1057  					Value: "tcp://1.2.3.3:8083",
  1058  				},
  1059  				{
  1060  					Name:  "TEST_PORT_8083_TCP",
  1061  					Value: "tcp://1.2.3.3:8083",
  1062  				},
  1063  				{
  1064  					Name:  "TEST_PORT_8083_TCP_PROTO",
  1065  					Value: "tcp",
  1066  				},
  1067  				{
  1068  					Name:  "TEST_PORT_8083_TCP_PORT",
  1069  					Value: "8083",
  1070  				},
  1071  				{
  1072  					Name:  "TEST_PORT_8083_TCP_ADDR",
  1073  					Value: "1.2.3.3",
  1074  				},
  1075  				{
  1076  					Name:  "SERVICE_VAR_TEST",
  1077  					Value: "1.2.3.3:8083",
  1078  				},
  1079  				{
  1080  					Name:  "OUT_OF_ORDER_TEST",
  1081  					Value: "$(OUT_OF_ORDER_TARGET)",
  1082  				},
  1083  				{
  1084  					Name:  "OUT_OF_ORDER_TARGET",
  1085  					Value: "FOO",
  1086  				},
  1087  				{
  1088  					Name:  "TEST_UNDEFINED",
  1089  					Value: "$(UNDEFINED_VAR)",
  1090  				},
  1091  				{
  1092  					Name: "EMPTY_VAR",
  1093  				},
  1094  				{
  1095  					Name:  "EMPTY_TEST",
  1096  					Value: "foo-",
  1097  				},
  1098  				{
  1099  					Name:  "KUBERNETES_SERVICE_HOST",
  1100  					Value: "1.2.3.1",
  1101  				},
  1102  				{
  1103  					Name:  "KUBERNETES_SERVICE_PORT",
  1104  					Value: "8081",
  1105  				},
  1106  				{
  1107  					Name:  "KUBERNETES_PORT",
  1108  					Value: "tcp://1.2.3.1:8081",
  1109  				},
  1110  				{
  1111  					Name:  "KUBERNETES_PORT_8081_TCP",
  1112  					Value: "tcp://1.2.3.1:8081",
  1113  				},
  1114  				{
  1115  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1116  					Value: "tcp",
  1117  				},
  1118  				{
  1119  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1120  					Value: "8081",
  1121  				},
  1122  				{
  1123  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1124  					Value: "1.2.3.1",
  1125  				},
  1126  			},
  1127  		},
  1128  		{
  1129  			name:               "configmapkeyref_missing_optional",
  1130  			ns:                 "test",
  1131  			enableServiceLinks: &falseValue,
  1132  			container: &v1.Container{
  1133  				Env: []v1.EnvVar{
  1134  					{
  1135  						Name: "POD_NAME",
  1136  						ValueFrom: &v1.EnvVarSource{
  1137  							ConfigMapKeyRef: &v1.ConfigMapKeySelector{
  1138  								LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"},
  1139  								Key:                  "key",
  1140  								Optional:             &trueVal,
  1141  							},
  1142  						},
  1143  					},
  1144  				},
  1145  			},
  1146  			expectedEnvs: []kubecontainer.EnvVar{
  1147  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
  1148  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
  1149  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
  1150  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
  1151  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
  1152  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
  1153  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
  1154  			},
  1155  		},
  1156  		{
  1157  			name:               "configmapkeyref_missing_key_optional",
  1158  			ns:                 "test",
  1159  			enableServiceLinks: &falseValue,
  1160  			container: &v1.Container{
  1161  				Env: []v1.EnvVar{
  1162  					{
  1163  						Name: "POD_NAME",
  1164  						ValueFrom: &v1.EnvVarSource{
  1165  							ConfigMapKeyRef: &v1.ConfigMapKeySelector{
  1166  								LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"},
  1167  								Key:                  "key",
  1168  								Optional:             &trueVal,
  1169  							},
  1170  						},
  1171  					},
  1172  				},
  1173  			},
  1174  			nilLister: true,
  1175  			configMap: &v1.ConfigMap{
  1176  				ObjectMeta: metav1.ObjectMeta{
  1177  					Namespace: "test1",
  1178  					Name:      "test-configmap",
  1179  				},
  1180  				Data: map[string]string{
  1181  					"a": "b",
  1182  				},
  1183  			},
  1184  			expectedEnvs: nil,
  1185  		},
  1186  		{
  1187  			name:               "secretkeyref_missing_optional",
  1188  			ns:                 "test",
  1189  			enableServiceLinks: &falseValue,
  1190  			container: &v1.Container{
  1191  				Env: []v1.EnvVar{
  1192  					{
  1193  						Name: "POD_NAME",
  1194  						ValueFrom: &v1.EnvVarSource{
  1195  							SecretKeyRef: &v1.SecretKeySelector{
  1196  								LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
  1197  								Key:                  "key",
  1198  								Optional:             &trueVal,
  1199  							},
  1200  						},
  1201  					},
  1202  				},
  1203  			},
  1204  			expectedEnvs: []kubecontainer.EnvVar{
  1205  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
  1206  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
  1207  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
  1208  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
  1209  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
  1210  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
  1211  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
  1212  			},
  1213  		},
  1214  		{
  1215  			name:               "secretkeyref_missing_key_optional",
  1216  			ns:                 "test",
  1217  			enableServiceLinks: &falseValue,
  1218  			container: &v1.Container{
  1219  				Env: []v1.EnvVar{
  1220  					{
  1221  						Name: "POD_NAME",
  1222  						ValueFrom: &v1.EnvVarSource{
  1223  							SecretKeyRef: &v1.SecretKeySelector{
  1224  								LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"},
  1225  								Key:                  "key",
  1226  								Optional:             &trueVal,
  1227  							},
  1228  						},
  1229  					},
  1230  				},
  1231  			},
  1232  			nilLister: true,
  1233  			secret: &v1.Secret{
  1234  				ObjectMeta: metav1.ObjectMeta{
  1235  					Namespace: "test1",
  1236  					Name:      "test-secret",
  1237  				},
  1238  				Data: map[string][]byte{
  1239  					"a": []byte("b"),
  1240  				},
  1241  			},
  1242  			expectedEnvs: nil,
  1243  		},
  1244  		{
  1245  			name:               "configmap",
  1246  			ns:                 "test1",
  1247  			enableServiceLinks: &falseValue,
  1248  			container: &v1.Container{
  1249  				EnvFrom: []v1.EnvFromSource{
  1250  					{
  1251  						ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
  1252  					},
  1253  					{
  1254  						Prefix:       "p_",
  1255  						ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
  1256  					},
  1257  				},
  1258  				Env: []v1.EnvVar{
  1259  					{
  1260  						Name:  "TEST_LITERAL",
  1261  						Value: "test-test-test",
  1262  					},
  1263  					{
  1264  						Name:  "EXPANSION_TEST",
  1265  						Value: "$(REPLACE_ME)",
  1266  					},
  1267  					{
  1268  						Name:  "DUPE_TEST",
  1269  						Value: "ENV_VAR",
  1270  					},
  1271  				},
  1272  			},
  1273  			nilLister: false,
  1274  			configMap: &v1.ConfigMap{
  1275  				ObjectMeta: metav1.ObjectMeta{
  1276  					Namespace: "test1",
  1277  					Name:      "test-configmap",
  1278  				},
  1279  				Data: map[string]string{
  1280  					"REPLACE_ME": "FROM_CONFIG_MAP",
  1281  					"DUPE_TEST":  "CONFIG_MAP",
  1282  				},
  1283  			},
  1284  			expectedEnvs: []kubecontainer.EnvVar{
  1285  				{
  1286  					Name:  "TEST_LITERAL",
  1287  					Value: "test-test-test",
  1288  				},
  1289  				{
  1290  					Name:  "REPLACE_ME",
  1291  					Value: "FROM_CONFIG_MAP",
  1292  				},
  1293  				{
  1294  					Name:  "EXPANSION_TEST",
  1295  					Value: "FROM_CONFIG_MAP",
  1296  				},
  1297  				{
  1298  					Name:  "DUPE_TEST",
  1299  					Value: "ENV_VAR",
  1300  				},
  1301  				{
  1302  					Name:  "p_REPLACE_ME",
  1303  					Value: "FROM_CONFIG_MAP",
  1304  				},
  1305  				{
  1306  					Name:  "p_DUPE_TEST",
  1307  					Value: "CONFIG_MAP",
  1308  				},
  1309  				{
  1310  					Name:  "KUBERNETES_SERVICE_HOST",
  1311  					Value: "1.2.3.1",
  1312  				},
  1313  				{
  1314  					Name:  "KUBERNETES_SERVICE_PORT",
  1315  					Value: "8081",
  1316  				},
  1317  				{
  1318  					Name:  "KUBERNETES_PORT",
  1319  					Value: "tcp://1.2.3.1:8081",
  1320  				},
  1321  				{
  1322  					Name:  "KUBERNETES_PORT_8081_TCP",
  1323  					Value: "tcp://1.2.3.1:8081",
  1324  				},
  1325  				{
  1326  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1327  					Value: "tcp",
  1328  				},
  1329  				{
  1330  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1331  					Value: "8081",
  1332  				},
  1333  				{
  1334  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1335  					Value: "1.2.3.1",
  1336  				},
  1337  			},
  1338  		},
  1339  		{
  1340  			name:               "configmap, service env vars",
  1341  			ns:                 "test1",
  1342  			enableServiceLinks: &trueValue,
  1343  			container: &v1.Container{
  1344  				EnvFrom: []v1.EnvFromSource{
  1345  					{
  1346  						ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
  1347  					},
  1348  					{
  1349  						Prefix:       "p_",
  1350  						ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
  1351  					},
  1352  				},
  1353  				Env: []v1.EnvVar{
  1354  					{
  1355  						Name:  "TEST_LITERAL",
  1356  						Value: "test-test-test",
  1357  					},
  1358  					{
  1359  						Name:  "EXPANSION_TEST",
  1360  						Value: "$(REPLACE_ME)",
  1361  					},
  1362  					{
  1363  						Name:  "DUPE_TEST",
  1364  						Value: "ENV_VAR",
  1365  					},
  1366  				},
  1367  			},
  1368  			nilLister: false,
  1369  			configMap: &v1.ConfigMap{
  1370  				ObjectMeta: metav1.ObjectMeta{
  1371  					Namespace: "test1",
  1372  					Name:      "test-configmap",
  1373  				},
  1374  				Data: map[string]string{
  1375  					"REPLACE_ME": "FROM_CONFIG_MAP",
  1376  					"DUPE_TEST":  "CONFIG_MAP",
  1377  				},
  1378  			},
  1379  			expectedEnvs: []kubecontainer.EnvVar{
  1380  				{
  1381  					Name:  "TEST_LITERAL",
  1382  					Value: "test-test-test",
  1383  				},
  1384  				{
  1385  					Name:  "TEST_SERVICE_HOST",
  1386  					Value: "1.2.3.3",
  1387  				},
  1388  				{
  1389  					Name:  "TEST_SERVICE_PORT",
  1390  					Value: "8083",
  1391  				},
  1392  				{
  1393  					Name:  "TEST_PORT",
  1394  					Value: "tcp://1.2.3.3:8083",
  1395  				},
  1396  				{
  1397  					Name:  "TEST_PORT_8083_TCP",
  1398  					Value: "tcp://1.2.3.3:8083",
  1399  				},
  1400  				{
  1401  					Name:  "TEST_PORT_8083_TCP_PROTO",
  1402  					Value: "tcp",
  1403  				},
  1404  				{
  1405  					Name:  "TEST_PORT_8083_TCP_PORT",
  1406  					Value: "8083",
  1407  				},
  1408  				{
  1409  					Name:  "TEST_PORT_8083_TCP_ADDR",
  1410  					Value: "1.2.3.3",
  1411  				},
  1412  				{
  1413  					Name:  "REPLACE_ME",
  1414  					Value: "FROM_CONFIG_MAP",
  1415  				},
  1416  				{
  1417  					Name:  "EXPANSION_TEST",
  1418  					Value: "FROM_CONFIG_MAP",
  1419  				},
  1420  				{
  1421  					Name:  "DUPE_TEST",
  1422  					Value: "ENV_VAR",
  1423  				},
  1424  				{
  1425  					Name:  "p_REPLACE_ME",
  1426  					Value: "FROM_CONFIG_MAP",
  1427  				},
  1428  				{
  1429  					Name:  "p_DUPE_TEST",
  1430  					Value: "CONFIG_MAP",
  1431  				},
  1432  				{
  1433  					Name:  "KUBERNETES_SERVICE_HOST",
  1434  					Value: "1.2.3.1",
  1435  				},
  1436  				{
  1437  					Name:  "KUBERNETES_SERVICE_PORT",
  1438  					Value: "8081",
  1439  				},
  1440  				{
  1441  					Name:  "KUBERNETES_PORT",
  1442  					Value: "tcp://1.2.3.1:8081",
  1443  				},
  1444  				{
  1445  					Name:  "KUBERNETES_PORT_8081_TCP",
  1446  					Value: "tcp://1.2.3.1:8081",
  1447  				},
  1448  				{
  1449  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1450  					Value: "tcp",
  1451  				},
  1452  				{
  1453  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1454  					Value: "8081",
  1455  				},
  1456  				{
  1457  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1458  					Value: "1.2.3.1",
  1459  				},
  1460  			},
  1461  		},
  1462  		{
  1463  			name:               "configmap_missing",
  1464  			ns:                 "test1",
  1465  			enableServiceLinks: &falseValue,
  1466  			container: &v1.Container{
  1467  				EnvFrom: []v1.EnvFromSource{
  1468  					{ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}},
  1469  				},
  1470  			},
  1471  			expectedError: true,
  1472  		},
  1473  		{
  1474  			name:               "configmap_missing_optional",
  1475  			ns:                 "test",
  1476  			enableServiceLinks: &falseValue,
  1477  			container: &v1.Container{
  1478  				EnvFrom: []v1.EnvFromSource{
  1479  					{ConfigMapRef: &v1.ConfigMapEnvSource{
  1480  						Optional:             &trueVal,
  1481  						LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"}}},
  1482  				},
  1483  			},
  1484  			expectedEnvs: []kubecontainer.EnvVar{
  1485  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
  1486  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
  1487  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
  1488  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
  1489  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
  1490  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
  1491  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
  1492  			},
  1493  		},
  1494  		{
  1495  			name:               "configmap_invalid_keys",
  1496  			ns:                 "test",
  1497  			enableServiceLinks: &falseValue,
  1498  			container: &v1.Container{
  1499  				EnvFrom: []v1.EnvFromSource{
  1500  					{ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}},
  1501  				},
  1502  			},
  1503  			configMap: &v1.ConfigMap{
  1504  				ObjectMeta: metav1.ObjectMeta{
  1505  					Namespace: "test1",
  1506  					Name:      "test-configmap",
  1507  				},
  1508  				Data: map[string]string{
  1509  					"1234": "abc",
  1510  					"1z":   "abc",
  1511  					"key":  "value",
  1512  				},
  1513  			},
  1514  			expectedEnvs: []kubecontainer.EnvVar{
  1515  				{
  1516  					Name:  "key",
  1517  					Value: "value",
  1518  				},
  1519  				{
  1520  					Name:  "KUBERNETES_SERVICE_HOST",
  1521  					Value: "1.2.3.1",
  1522  				},
  1523  				{
  1524  					Name:  "KUBERNETES_SERVICE_PORT",
  1525  					Value: "8081",
  1526  				},
  1527  				{
  1528  					Name:  "KUBERNETES_PORT",
  1529  					Value: "tcp://1.2.3.1:8081",
  1530  				},
  1531  				{
  1532  					Name:  "KUBERNETES_PORT_8081_TCP",
  1533  					Value: "tcp://1.2.3.1:8081",
  1534  				},
  1535  				{
  1536  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1537  					Value: "tcp",
  1538  				},
  1539  				{
  1540  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1541  					Value: "8081",
  1542  				},
  1543  				{
  1544  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1545  					Value: "1.2.3.1",
  1546  				},
  1547  			},
  1548  			expectedEvent: "Warning InvalidEnvironmentVariableNames Keys [1234, 1z] from the EnvFrom configMap test/test-config-map were skipped since they are considered invalid environment variable names.",
  1549  		},
  1550  		{
  1551  			name:               "configmap_invalid_keys_valid",
  1552  			ns:                 "test",
  1553  			enableServiceLinks: &falseValue,
  1554  			container: &v1.Container{
  1555  				EnvFrom: []v1.EnvFromSource{
  1556  					{
  1557  						Prefix:       "p_",
  1558  						ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
  1559  					},
  1560  				},
  1561  			},
  1562  			configMap: &v1.ConfigMap{
  1563  				ObjectMeta: metav1.ObjectMeta{
  1564  					Namespace: "test1",
  1565  					Name:      "test-configmap",
  1566  				},
  1567  				Data: map[string]string{
  1568  					"1234": "abc",
  1569  				},
  1570  			},
  1571  			expectedEnvs: []kubecontainer.EnvVar{
  1572  				{
  1573  					Name:  "p_1234",
  1574  					Value: "abc",
  1575  				},
  1576  				{
  1577  					Name:  "KUBERNETES_SERVICE_HOST",
  1578  					Value: "1.2.3.1",
  1579  				},
  1580  				{
  1581  					Name:  "KUBERNETES_SERVICE_PORT",
  1582  					Value: "8081",
  1583  				},
  1584  				{
  1585  					Name:  "KUBERNETES_PORT",
  1586  					Value: "tcp://1.2.3.1:8081",
  1587  				},
  1588  				{
  1589  					Name:  "KUBERNETES_PORT_8081_TCP",
  1590  					Value: "tcp://1.2.3.1:8081",
  1591  				},
  1592  				{
  1593  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1594  					Value: "tcp",
  1595  				},
  1596  				{
  1597  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1598  					Value: "8081",
  1599  				},
  1600  				{
  1601  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1602  					Value: "1.2.3.1",
  1603  				},
  1604  			},
  1605  		},
  1606  		{
  1607  			name:               "secret",
  1608  			ns:                 "test1",
  1609  			enableServiceLinks: &falseValue,
  1610  			container: &v1.Container{
  1611  				EnvFrom: []v1.EnvFromSource{
  1612  					{
  1613  						SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
  1614  					},
  1615  					{
  1616  						Prefix:    "p_",
  1617  						SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
  1618  					},
  1619  				},
  1620  				Env: []v1.EnvVar{
  1621  					{
  1622  						Name:  "TEST_LITERAL",
  1623  						Value: "test-test-test",
  1624  					},
  1625  					{
  1626  						Name:  "EXPANSION_TEST",
  1627  						Value: "$(REPLACE_ME)",
  1628  					},
  1629  					{
  1630  						Name:  "DUPE_TEST",
  1631  						Value: "ENV_VAR",
  1632  					},
  1633  				},
  1634  			},
  1635  			nilLister: false,
  1636  			secret: &v1.Secret{
  1637  				ObjectMeta: metav1.ObjectMeta{
  1638  					Namespace: "test1",
  1639  					Name:      "test-secret",
  1640  				},
  1641  				Data: map[string][]byte{
  1642  					"REPLACE_ME": []byte("FROM_SECRET"),
  1643  					"DUPE_TEST":  []byte("SECRET"),
  1644  				},
  1645  			},
  1646  			expectedEnvs: []kubecontainer.EnvVar{
  1647  				{
  1648  					Name:  "TEST_LITERAL",
  1649  					Value: "test-test-test",
  1650  				},
  1651  				{
  1652  					Name:  "REPLACE_ME",
  1653  					Value: "FROM_SECRET",
  1654  				},
  1655  				{
  1656  					Name:  "EXPANSION_TEST",
  1657  					Value: "FROM_SECRET",
  1658  				},
  1659  				{
  1660  					Name:  "DUPE_TEST",
  1661  					Value: "ENV_VAR",
  1662  				},
  1663  				{
  1664  					Name:  "p_REPLACE_ME",
  1665  					Value: "FROM_SECRET",
  1666  				},
  1667  				{
  1668  					Name:  "p_DUPE_TEST",
  1669  					Value: "SECRET",
  1670  				},
  1671  				{
  1672  					Name:  "KUBERNETES_SERVICE_HOST",
  1673  					Value: "1.2.3.1",
  1674  				},
  1675  				{
  1676  					Name:  "KUBERNETES_SERVICE_PORT",
  1677  					Value: "8081",
  1678  				},
  1679  				{
  1680  					Name:  "KUBERNETES_PORT",
  1681  					Value: "tcp://1.2.3.1:8081",
  1682  				},
  1683  				{
  1684  					Name:  "KUBERNETES_PORT_8081_TCP",
  1685  					Value: "tcp://1.2.3.1:8081",
  1686  				},
  1687  				{
  1688  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1689  					Value: "tcp",
  1690  				},
  1691  				{
  1692  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1693  					Value: "8081",
  1694  				},
  1695  				{
  1696  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1697  					Value: "1.2.3.1",
  1698  				},
  1699  			},
  1700  		},
  1701  		{
  1702  			name:               "secret, service env vars",
  1703  			ns:                 "test1",
  1704  			enableServiceLinks: &trueValue,
  1705  			container: &v1.Container{
  1706  				EnvFrom: []v1.EnvFromSource{
  1707  					{
  1708  						SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
  1709  					},
  1710  					{
  1711  						Prefix:    "p_",
  1712  						SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
  1713  					},
  1714  				},
  1715  				Env: []v1.EnvVar{
  1716  					{
  1717  						Name:  "TEST_LITERAL",
  1718  						Value: "test-test-test",
  1719  					},
  1720  					{
  1721  						Name:  "EXPANSION_TEST",
  1722  						Value: "$(REPLACE_ME)",
  1723  					},
  1724  					{
  1725  						Name:  "DUPE_TEST",
  1726  						Value: "ENV_VAR",
  1727  					},
  1728  				},
  1729  			},
  1730  			nilLister: false,
  1731  			secret: &v1.Secret{
  1732  				ObjectMeta: metav1.ObjectMeta{
  1733  					Namespace: "test1",
  1734  					Name:      "test-secret",
  1735  				},
  1736  				Data: map[string][]byte{
  1737  					"REPLACE_ME": []byte("FROM_SECRET"),
  1738  					"DUPE_TEST":  []byte("SECRET"),
  1739  				},
  1740  			},
  1741  			expectedEnvs: []kubecontainer.EnvVar{
  1742  				{
  1743  					Name:  "TEST_LITERAL",
  1744  					Value: "test-test-test",
  1745  				},
  1746  				{
  1747  					Name:  "TEST_SERVICE_HOST",
  1748  					Value: "1.2.3.3",
  1749  				},
  1750  				{
  1751  					Name:  "TEST_SERVICE_PORT",
  1752  					Value: "8083",
  1753  				},
  1754  				{
  1755  					Name:  "TEST_PORT",
  1756  					Value: "tcp://1.2.3.3:8083",
  1757  				},
  1758  				{
  1759  					Name:  "TEST_PORT_8083_TCP",
  1760  					Value: "tcp://1.2.3.3:8083",
  1761  				},
  1762  				{
  1763  					Name:  "TEST_PORT_8083_TCP_PROTO",
  1764  					Value: "tcp",
  1765  				},
  1766  				{
  1767  					Name:  "TEST_PORT_8083_TCP_PORT",
  1768  					Value: "8083",
  1769  				},
  1770  				{
  1771  					Name:  "TEST_PORT_8083_TCP_ADDR",
  1772  					Value: "1.2.3.3",
  1773  				},
  1774  				{
  1775  					Name:  "REPLACE_ME",
  1776  					Value: "FROM_SECRET",
  1777  				},
  1778  				{
  1779  					Name:  "EXPANSION_TEST",
  1780  					Value: "FROM_SECRET",
  1781  				},
  1782  				{
  1783  					Name:  "DUPE_TEST",
  1784  					Value: "ENV_VAR",
  1785  				},
  1786  				{
  1787  					Name:  "p_REPLACE_ME",
  1788  					Value: "FROM_SECRET",
  1789  				},
  1790  				{
  1791  					Name:  "p_DUPE_TEST",
  1792  					Value: "SECRET",
  1793  				},
  1794  				{
  1795  					Name:  "KUBERNETES_SERVICE_HOST",
  1796  					Value: "1.2.3.1",
  1797  				},
  1798  				{
  1799  					Name:  "KUBERNETES_SERVICE_PORT",
  1800  					Value: "8081",
  1801  				},
  1802  				{
  1803  					Name:  "KUBERNETES_PORT",
  1804  					Value: "tcp://1.2.3.1:8081",
  1805  				},
  1806  				{
  1807  					Name:  "KUBERNETES_PORT_8081_TCP",
  1808  					Value: "tcp://1.2.3.1:8081",
  1809  				},
  1810  				{
  1811  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1812  					Value: "tcp",
  1813  				},
  1814  				{
  1815  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1816  					Value: "8081",
  1817  				},
  1818  				{
  1819  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1820  					Value: "1.2.3.1",
  1821  				},
  1822  			},
  1823  		},
  1824  		{
  1825  			name:               "secret_missing",
  1826  			ns:                 "test1",
  1827  			enableServiceLinks: &falseValue,
  1828  			container: &v1.Container{
  1829  				EnvFrom: []v1.EnvFromSource{
  1830  					{SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}},
  1831  				},
  1832  			},
  1833  			expectedError: true,
  1834  		},
  1835  		{
  1836  			name:               "secret_missing_optional",
  1837  			ns:                 "test",
  1838  			enableServiceLinks: &falseValue,
  1839  			container: &v1.Container{
  1840  				EnvFrom: []v1.EnvFromSource{
  1841  					{SecretRef: &v1.SecretEnvSource{
  1842  						LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
  1843  						Optional:             &trueVal}},
  1844  				},
  1845  			},
  1846  			expectedEnvs: []kubecontainer.EnvVar{
  1847  				{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
  1848  				{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
  1849  				{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
  1850  				{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
  1851  				{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
  1852  				{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
  1853  				{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
  1854  			},
  1855  		},
  1856  		{
  1857  			name:               "secret_invalid_keys",
  1858  			ns:                 "test",
  1859  			enableServiceLinks: &falseValue,
  1860  			container: &v1.Container{
  1861  				EnvFrom: []v1.EnvFromSource{
  1862  					{SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}},
  1863  				},
  1864  			},
  1865  			secret: &v1.Secret{
  1866  				ObjectMeta: metav1.ObjectMeta{
  1867  					Namespace: "test1",
  1868  					Name:      "test-secret",
  1869  				},
  1870  				Data: map[string][]byte{
  1871  					"1234":  []byte("abc"),
  1872  					"1z":    []byte("abc"),
  1873  					"key.1": []byte("value"),
  1874  				},
  1875  			},
  1876  			expectedEnvs: []kubecontainer.EnvVar{
  1877  				{
  1878  					Name:  "key.1",
  1879  					Value: "value",
  1880  				},
  1881  				{
  1882  					Name:  "KUBERNETES_SERVICE_HOST",
  1883  					Value: "1.2.3.1",
  1884  				},
  1885  				{
  1886  					Name:  "KUBERNETES_SERVICE_PORT",
  1887  					Value: "8081",
  1888  				},
  1889  				{
  1890  					Name:  "KUBERNETES_PORT",
  1891  					Value: "tcp://1.2.3.1:8081",
  1892  				},
  1893  				{
  1894  					Name:  "KUBERNETES_PORT_8081_TCP",
  1895  					Value: "tcp://1.2.3.1:8081",
  1896  				},
  1897  				{
  1898  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1899  					Value: "tcp",
  1900  				},
  1901  				{
  1902  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1903  					Value: "8081",
  1904  				},
  1905  				{
  1906  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1907  					Value: "1.2.3.1",
  1908  				},
  1909  			},
  1910  			expectedEvent: "Warning InvalidEnvironmentVariableNames Keys [1234, 1z] from the EnvFrom secret test/test-secret were skipped since they are considered invalid environment variable names.",
  1911  		},
  1912  		{
  1913  			name:               "secret_invalid_keys_valid",
  1914  			ns:                 "test",
  1915  			enableServiceLinks: &falseValue,
  1916  			container: &v1.Container{
  1917  				EnvFrom: []v1.EnvFromSource{
  1918  					{
  1919  						Prefix:    "p_",
  1920  						SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
  1921  					},
  1922  				},
  1923  			},
  1924  			secret: &v1.Secret{
  1925  				ObjectMeta: metav1.ObjectMeta{
  1926  					Namespace: "test1",
  1927  					Name:      "test-secret",
  1928  				},
  1929  				Data: map[string][]byte{
  1930  					"1234.name": []byte("abc"),
  1931  				},
  1932  			},
  1933  			expectedEnvs: []kubecontainer.EnvVar{
  1934  				{
  1935  					Name:  "p_1234.name",
  1936  					Value: "abc",
  1937  				},
  1938  				{
  1939  					Name:  "KUBERNETES_SERVICE_HOST",
  1940  					Value: "1.2.3.1",
  1941  				},
  1942  				{
  1943  					Name:  "KUBERNETES_SERVICE_PORT",
  1944  					Value: "8081",
  1945  				},
  1946  				{
  1947  					Name:  "KUBERNETES_PORT",
  1948  					Value: "tcp://1.2.3.1:8081",
  1949  				},
  1950  				{
  1951  					Name:  "KUBERNETES_PORT_8081_TCP",
  1952  					Value: "tcp://1.2.3.1:8081",
  1953  				},
  1954  				{
  1955  					Name:  "KUBERNETES_PORT_8081_TCP_PROTO",
  1956  					Value: "tcp",
  1957  				},
  1958  				{
  1959  					Name:  "KUBERNETES_PORT_8081_TCP_PORT",
  1960  					Value: "8081",
  1961  				},
  1962  				{
  1963  					Name:  "KUBERNETES_PORT_8081_TCP_ADDR",
  1964  					Value: "1.2.3.1",
  1965  				},
  1966  			},
  1967  		},
  1968  		{
  1969  			name:               "nil_enableServiceLinks",
  1970  			ns:                 "test",
  1971  			enableServiceLinks: nil,
  1972  			container: &v1.Container{
  1973  				EnvFrom: []v1.EnvFromSource{
  1974  					{
  1975  						Prefix:    "p_",
  1976  						SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
  1977  					},
  1978  				},
  1979  			},
  1980  			secret: &v1.Secret{
  1981  				ObjectMeta: metav1.ObjectMeta{
  1982  					Namespace: "test1",
  1983  					Name:      "test-secret",
  1984  				},
  1985  				Data: map[string][]byte{
  1986  					"1234.name": []byte("abc"),
  1987  				},
  1988  			},
  1989  			expectedError: true,
  1990  		},
  1991  		{
  1992  			name:               "downward api pod without host ips",
  1993  			enablePodHostIPs:   false,
  1994  			ns:                 "downward-api",
  1995  			enableServiceLinks: &falseValue,
  1996  			container: &v1.Container{
  1997  				Env: []v1.EnvVar{
  1998  					{
  1999  						Name: "HOST_IPS",
  2000  						ValueFrom: &v1.EnvVarSource{
  2001  							FieldRef: &v1.ObjectFieldSelector{
  2002  								APIVersion: "v1",
  2003  								FieldPath:  "status.hostIPs",
  2004  							},
  2005  						},
  2006  					},
  2007  				},
  2008  			},
  2009  			podIPs:    []string{"1.2.3.4", "fd00::6"},
  2010  			nilLister: true,
  2011  			expectedEnvs: []kubecontainer.EnvVar{
  2012  				{Name: "HOST_IPS", Value: ""},
  2013  			},
  2014  		},
  2015  	}
  2016  
  2017  	for _, tc := range testCases {
  2018  		t.Run(tc.name, func(t *testing.T) {
  2019  			defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodHostIPs, tc.enablePodHostIPs)()
  2020  
  2021  			fakeRecorder := record.NewFakeRecorder(1)
  2022  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2023  			testKubelet.kubelet.recorder = fakeRecorder
  2024  			defer testKubelet.Cleanup()
  2025  			kl := testKubelet.kubelet
  2026  			if tc.nilLister {
  2027  				kl.serviceLister = nil
  2028  			} else if tc.unsyncedServices {
  2029  				kl.serviceLister = testServiceLister{}
  2030  				kl.serviceHasSynced = func() bool { return false }
  2031  			} else {
  2032  				kl.serviceLister = testServiceLister{services}
  2033  				kl.serviceHasSynced = func() bool { return true }
  2034  			}
  2035  
  2036  			testKubelet.fakeKubeClient.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
  2037  				var err error
  2038  				if tc.configMap == nil {
  2039  					err = apierrors.NewNotFound(action.GetResource().GroupResource(), "configmap-name")
  2040  				}
  2041  				return true, tc.configMap, err
  2042  			})
  2043  			testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
  2044  				var err error
  2045  				if tc.secret == nil {
  2046  					err = apierrors.NewNotFound(action.GetResource().GroupResource(), "secret-name")
  2047  				}
  2048  				return true, tc.secret, err
  2049  			})
  2050  
  2051  			testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
  2052  				var err error
  2053  				if tc.secret == nil {
  2054  					err = errors.New("no secret defined")
  2055  				}
  2056  				return true, tc.secret, err
  2057  			})
  2058  
  2059  			testPod := &v1.Pod{
  2060  				ObjectMeta: metav1.ObjectMeta{
  2061  					Namespace:   tc.ns,
  2062  					Name:        "dapi-test-pod-name",
  2063  					Annotations: map[string]string{},
  2064  				},
  2065  				Spec: v1.PodSpec{
  2066  					ServiceAccountName: "special",
  2067  					NodeName:           "node-name",
  2068  					EnableServiceLinks: tc.enableServiceLinks,
  2069  				},
  2070  			}
  2071  			podIP := ""
  2072  			if len(tc.podIPs) > 0 {
  2073  				podIP = tc.podIPs[0]
  2074  			}
  2075  			if tc.staticPod {
  2076  				testPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
  2077  			}
  2078  
  2079  			result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP, tc.podIPs)
  2080  			select {
  2081  			case e := <-fakeRecorder.Events:
  2082  				assert.Equal(t, tc.expectedEvent, e)
  2083  			default:
  2084  				assert.Equal(t, "", tc.expectedEvent)
  2085  			}
  2086  			if tc.expectedError {
  2087  				assert.Error(t, err, tc.name)
  2088  			} else {
  2089  				assert.NoError(t, err, "[%s]", tc.name)
  2090  
  2091  				sort.Sort(envs(result))
  2092  				sort.Sort(envs(tc.expectedEnvs))
  2093  				assert.Equal(t, tc.expectedEnvs, result, "[%s] env entries", tc.name)
  2094  			}
  2095  		})
  2096  
  2097  	}
  2098  }
  2099  
  2100  func waitingState(cName string) v1.ContainerStatus {
  2101  	return waitingStateWithReason(cName, "")
  2102  }
  2103  func waitingStateWithReason(cName, reason string) v1.ContainerStatus {
  2104  	return v1.ContainerStatus{
  2105  		Name: cName,
  2106  		State: v1.ContainerState{
  2107  			Waiting: &v1.ContainerStateWaiting{Reason: reason},
  2108  		},
  2109  	}
  2110  }
  2111  func waitingStateWithLastTermination(cName string) v1.ContainerStatus {
  2112  	return v1.ContainerStatus{
  2113  		Name: cName,
  2114  		State: v1.ContainerState{
  2115  			Waiting: &v1.ContainerStateWaiting{},
  2116  		},
  2117  		LastTerminationState: v1.ContainerState{
  2118  			Terminated: &v1.ContainerStateTerminated{
  2119  				ExitCode: 0,
  2120  			},
  2121  		},
  2122  	}
  2123  }
  2124  func waitingStateWithNonZeroTermination(cName string) v1.ContainerStatus {
  2125  	return v1.ContainerStatus{
  2126  		Name: cName,
  2127  		State: v1.ContainerState{
  2128  			Waiting: &v1.ContainerStateWaiting{},
  2129  		},
  2130  		LastTerminationState: v1.ContainerState{
  2131  			Terminated: &v1.ContainerStateTerminated{
  2132  				ExitCode: -1,
  2133  			},
  2134  		},
  2135  	}
  2136  }
  2137  func runningState(cName string) v1.ContainerStatus {
  2138  	return v1.ContainerStatus{
  2139  		Name: cName,
  2140  		State: v1.ContainerState{
  2141  			Running: &v1.ContainerStateRunning{},
  2142  		},
  2143  	}
  2144  }
  2145  func startedState(cName string) v1.ContainerStatus {
  2146  	started := true
  2147  	return v1.ContainerStatus{
  2148  		Name: cName,
  2149  		State: v1.ContainerState{
  2150  			Running: &v1.ContainerStateRunning{},
  2151  		},
  2152  		Started: &started,
  2153  	}
  2154  }
  2155  func runningStateWithStartedAt(cName string, startedAt time.Time) v1.ContainerStatus {
  2156  	return v1.ContainerStatus{
  2157  		Name: cName,
  2158  		State: v1.ContainerState{
  2159  			Running: &v1.ContainerStateRunning{StartedAt: metav1.Time{Time: startedAt}},
  2160  		},
  2161  	}
  2162  }
  2163  func stoppedState(cName string) v1.ContainerStatus {
  2164  	return v1.ContainerStatus{
  2165  		Name: cName,
  2166  		State: v1.ContainerState{
  2167  			Terminated: &v1.ContainerStateTerminated{},
  2168  		},
  2169  	}
  2170  }
  2171  func succeededState(cName string) v1.ContainerStatus {
  2172  	return v1.ContainerStatus{
  2173  		Name: cName,
  2174  		State: v1.ContainerState{
  2175  			Terminated: &v1.ContainerStateTerminated{
  2176  				ExitCode: 0,
  2177  			},
  2178  		},
  2179  	}
  2180  }
  2181  func failedState(cName string) v1.ContainerStatus {
  2182  	return v1.ContainerStatus{
  2183  		Name: cName,
  2184  		State: v1.ContainerState{
  2185  			Terminated: &v1.ContainerStateTerminated{
  2186  				ExitCode: -1,
  2187  			},
  2188  		},
  2189  	}
  2190  }
  2191  func waitingWithLastTerminationUnknown(cName string, restartCount int32) v1.ContainerStatus {
  2192  	return v1.ContainerStatus{
  2193  		Name: cName,
  2194  		State: v1.ContainerState{
  2195  			Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"},
  2196  		},
  2197  		LastTerminationState: v1.ContainerState{
  2198  			Terminated: &v1.ContainerStateTerminated{
  2199  				Reason:   "ContainerStatusUnknown",
  2200  				Message:  "The container could not be located when the pod was deleted.  The container used to be Running",
  2201  				ExitCode: 137,
  2202  			},
  2203  		},
  2204  		RestartCount: restartCount,
  2205  	}
  2206  }
  2207  func ready(status v1.ContainerStatus) v1.ContainerStatus {
  2208  	status.Ready = true
  2209  	return status
  2210  }
  2211  func withID(status v1.ContainerStatus, id string) v1.ContainerStatus {
  2212  	status.ContainerID = id
  2213  	return status
  2214  }
  2215  
  2216  func TestPodPhaseWithRestartAlways(t *testing.T) {
  2217  	desiredState := v1.PodSpec{
  2218  		NodeName: "machine",
  2219  		Containers: []v1.Container{
  2220  			{Name: "containerA"},
  2221  			{Name: "containerB"},
  2222  		},
  2223  		RestartPolicy: v1.RestartPolicyAlways,
  2224  	}
  2225  
  2226  	tests := []struct {
  2227  		pod           *v1.Pod
  2228  		podIsTerminal bool
  2229  		status        v1.PodPhase
  2230  		test          string
  2231  	}{
  2232  		{
  2233  			&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}},
  2234  			false,
  2235  			v1.PodPending,
  2236  			"waiting",
  2237  		},
  2238  		{
  2239  			&v1.Pod{
  2240  				Spec: desiredState,
  2241  				Status: v1.PodStatus{
  2242  					ContainerStatuses: []v1.ContainerStatus{
  2243  						runningState("containerA"),
  2244  						runningState("containerB"),
  2245  					},
  2246  				},
  2247  			},
  2248  			false,
  2249  			v1.PodRunning,
  2250  			"all running",
  2251  		},
  2252  		{
  2253  			&v1.Pod{
  2254  				Spec: desiredState,
  2255  				Status: v1.PodStatus{
  2256  					ContainerStatuses: []v1.ContainerStatus{
  2257  						stoppedState("containerA"),
  2258  						stoppedState("containerB"),
  2259  					},
  2260  				},
  2261  			},
  2262  			false,
  2263  			v1.PodRunning,
  2264  			"all stopped with restart always",
  2265  		},
  2266  		{
  2267  			&v1.Pod{
  2268  				Spec: desiredState,
  2269  				Status: v1.PodStatus{
  2270  					ContainerStatuses: []v1.ContainerStatus{
  2271  						succeededState("containerA"),
  2272  						succeededState("containerB"),
  2273  					},
  2274  				},
  2275  			},
  2276  			true,
  2277  			v1.PodSucceeded,
  2278  			"all succeeded with restart always, but the pod is terminal",
  2279  		},
  2280  		{
  2281  			&v1.Pod{
  2282  				Spec: desiredState,
  2283  				Status: v1.PodStatus{
  2284  					ContainerStatuses: []v1.ContainerStatus{
  2285  						succeededState("containerA"),
  2286  						failedState("containerB"),
  2287  					},
  2288  				},
  2289  			},
  2290  			true,
  2291  			v1.PodFailed,
  2292  			"all stopped with restart always, but the pod is terminal",
  2293  		},
  2294  		{
  2295  			&v1.Pod{
  2296  				Spec: desiredState,
  2297  				Status: v1.PodStatus{
  2298  					ContainerStatuses: []v1.ContainerStatus{
  2299  						runningState("containerA"),
  2300  						stoppedState("containerB"),
  2301  					},
  2302  				},
  2303  			},
  2304  			false,
  2305  			v1.PodRunning,
  2306  			"mixed state #1 with restart always",
  2307  		},
  2308  		{
  2309  			&v1.Pod{
  2310  				Spec: desiredState,
  2311  				Status: v1.PodStatus{
  2312  					ContainerStatuses: []v1.ContainerStatus{
  2313  						runningState("containerA"),
  2314  					},
  2315  				},
  2316  			},
  2317  			false,
  2318  			v1.PodPending,
  2319  			"mixed state #2 with restart always",
  2320  		},
  2321  		{
  2322  			&v1.Pod{
  2323  				Spec: desiredState,
  2324  				Status: v1.PodStatus{
  2325  					ContainerStatuses: []v1.ContainerStatus{
  2326  						runningState("containerA"),
  2327  						waitingState("containerB"),
  2328  					},
  2329  				},
  2330  			},
  2331  			false,
  2332  			v1.PodPending,
  2333  			"mixed state #3 with restart always",
  2334  		},
  2335  		{
  2336  			&v1.Pod{
  2337  				Spec: desiredState,
  2338  				Status: v1.PodStatus{
  2339  					ContainerStatuses: []v1.ContainerStatus{
  2340  						runningState("containerA"),
  2341  						waitingStateWithLastTermination("containerB"),
  2342  					},
  2343  				},
  2344  			},
  2345  			false,
  2346  			v1.PodRunning,
  2347  			"backoff crashloop container with restart always",
  2348  		},
  2349  	}
  2350  	for _, test := range tests {
  2351  		status := getPhase(test.pod, test.pod.Status.ContainerStatuses, test.podIsTerminal)
  2352  		assert.Equal(t, test.status, status, "[test %s]", test.test)
  2353  	}
  2354  }
  2355  
  2356  func TestPodPhaseWithRestartAlwaysInitContainers(t *testing.T) {
  2357  	desiredState := v1.PodSpec{
  2358  		NodeName: "machine",
  2359  		InitContainers: []v1.Container{
  2360  			{Name: "containerX"},
  2361  		},
  2362  		Containers: []v1.Container{
  2363  			{Name: "containerA"},
  2364  			{Name: "containerB"},
  2365  		},
  2366  		RestartPolicy: v1.RestartPolicyAlways,
  2367  	}
  2368  
  2369  	tests := []struct {
  2370  		pod    *v1.Pod
  2371  		status v1.PodPhase
  2372  		test   string
  2373  	}{
  2374  		{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
  2375  		{
  2376  			&v1.Pod{
  2377  				Spec: desiredState,
  2378  				Status: v1.PodStatus{
  2379  					InitContainerStatuses: []v1.ContainerStatus{
  2380  						runningState("containerX"),
  2381  					},
  2382  				},
  2383  			},
  2384  			v1.PodPending,
  2385  			"init container running",
  2386  		},
  2387  		{
  2388  			&v1.Pod{
  2389  				Spec: desiredState,
  2390  				Status: v1.PodStatus{
  2391  					InitContainerStatuses: []v1.ContainerStatus{
  2392  						failedState("containerX"),
  2393  					},
  2394  				},
  2395  			},
  2396  			v1.PodPending,
  2397  			"init container terminated non-zero",
  2398  		},
  2399  		{
  2400  			&v1.Pod{
  2401  				Spec: desiredState,
  2402  				Status: v1.PodStatus{
  2403  					InitContainerStatuses: []v1.ContainerStatus{
  2404  						waitingStateWithLastTermination("containerX"),
  2405  					},
  2406  				},
  2407  			},
  2408  			v1.PodPending,
  2409  			"init container waiting, terminated zero",
  2410  		},
  2411  		{
  2412  			&v1.Pod{
  2413  				Spec: desiredState,
  2414  				Status: v1.PodStatus{
  2415  					InitContainerStatuses: []v1.ContainerStatus{
  2416  						waitingStateWithNonZeroTermination("containerX"),
  2417  					},
  2418  				},
  2419  			},
  2420  			v1.PodPending,
  2421  			"init container waiting, terminated non-zero",
  2422  		},
  2423  		{
  2424  			&v1.Pod{
  2425  				Spec: desiredState,
  2426  				Status: v1.PodStatus{
  2427  					InitContainerStatuses: []v1.ContainerStatus{
  2428  						waitingState("containerX"),
  2429  					},
  2430  				},
  2431  			},
  2432  			v1.PodPending,
  2433  			"init container waiting, not terminated",
  2434  		},
  2435  		{
  2436  			&v1.Pod{
  2437  				Spec: desiredState,
  2438  				Status: v1.PodStatus{
  2439  					InitContainerStatuses: []v1.ContainerStatus{
  2440  						succeededState("containerX"),
  2441  					},
  2442  					ContainerStatuses: []v1.ContainerStatus{
  2443  						runningState("containerA"),
  2444  						runningState("containerB"),
  2445  					},
  2446  				},
  2447  			},
  2448  			v1.PodRunning,
  2449  			"init container succeeded",
  2450  		},
  2451  	}
  2452  	for _, test := range tests {
  2453  		statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
  2454  		status := getPhase(test.pod, statusInfo, false)
  2455  		assert.Equal(t, test.status, status, "[test %s]", test.test)
  2456  	}
  2457  }
  2458  
  2459  func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
  2460  	desiredState := v1.PodSpec{
  2461  		NodeName: "machine",
  2462  		InitContainers: []v1.Container{
  2463  			{Name: "containerX", RestartPolicy: &containerRestartPolicyAlways},
  2464  		},
  2465  		Containers: []v1.Container{
  2466  			{Name: "containerA"},
  2467  			{Name: "containerB"},
  2468  		},
  2469  		RestartPolicy: v1.RestartPolicyAlways,
  2470  	}
  2471  
  2472  	tests := []struct {
  2473  		pod           *v1.Pod
  2474  		podIsTerminal bool
  2475  		status        v1.PodPhase
  2476  		test          string
  2477  	}{
  2478  		{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, false, v1.PodPending, "empty, waiting"},
  2479  		{
  2480  			&v1.Pod{
  2481  				Spec: desiredState,
  2482  				Status: v1.PodStatus{
  2483  					InitContainerStatuses: []v1.ContainerStatus{
  2484  						runningState("containerX"),
  2485  					},
  2486  				},
  2487  			},
  2488  			false,
  2489  			v1.PodPending,
  2490  			"restartable init container running",
  2491  		},
  2492  		{
  2493  			&v1.Pod{
  2494  				Spec: desiredState,
  2495  				Status: v1.PodStatus{
  2496  					InitContainerStatuses: []v1.ContainerStatus{
  2497  						stoppedState("containerX"),
  2498  					},
  2499  				},
  2500  			},
  2501  			false,
  2502  			v1.PodPending,
  2503  			"restartable init container stopped",
  2504  		},
  2505  		{
  2506  			&v1.Pod{
  2507  				Spec: desiredState,
  2508  				Status: v1.PodStatus{
  2509  					InitContainerStatuses: []v1.ContainerStatus{
  2510  						waitingStateWithLastTermination("containerX"),
  2511  					},
  2512  				},
  2513  			},
  2514  			false,
  2515  			v1.PodPending,
  2516  			"restartable init container waiting, terminated zero",
  2517  		},
  2518  		{
  2519  			&v1.Pod{
  2520  				Spec: desiredState,
  2521  				Status: v1.PodStatus{
  2522  					InitContainerStatuses: []v1.ContainerStatus{
  2523  						waitingStateWithNonZeroTermination("containerX"),
  2524  					},
  2525  				},
  2526  			},
  2527  			false,
  2528  			v1.PodPending,
  2529  			"restartable init container waiting, terminated non-zero",
  2530  		},
  2531  		{
  2532  			&v1.Pod{
  2533  				Spec: desiredState,
  2534  				Status: v1.PodStatus{
  2535  					InitContainerStatuses: []v1.ContainerStatus{
  2536  						waitingState("containerX"),
  2537  					},
  2538  				},
  2539  			},
  2540  			false,
  2541  			v1.PodPending,
  2542  			"restartable init container waiting, not terminated",
  2543  		},
  2544  		{
  2545  			&v1.Pod{
  2546  				Spec: desiredState,
  2547  				Status: v1.PodStatus{
  2548  					InitContainerStatuses: []v1.ContainerStatus{
  2549  						startedState("containerX"),
  2550  					},
  2551  					ContainerStatuses: []v1.ContainerStatus{
  2552  						runningState("containerA"),
  2553  					},
  2554  				},
  2555  			},
  2556  			false,
  2557  			v1.PodPending,
  2558  			"restartable init container started, 1/2 regular container running",
  2559  		},
  2560  		{
  2561  			&v1.Pod{
  2562  				Spec: desiredState,
  2563  				Status: v1.PodStatus{
  2564  					InitContainerStatuses: []v1.ContainerStatus{
  2565  						startedState("containerX"),
  2566  					},
  2567  					ContainerStatuses: []v1.ContainerStatus{
  2568  						runningState("containerA"),
  2569  						runningState("containerB"),
  2570  					},
  2571  				},
  2572  			},
  2573  			false,
  2574  			v1.PodRunning,
  2575  			"restartable init container started, all regular containers running",
  2576  		},
  2577  		{
  2578  			&v1.Pod{
  2579  				Spec: desiredState,
  2580  				Status: v1.PodStatus{
  2581  					InitContainerStatuses: []v1.ContainerStatus{
  2582  						runningState("containerX"),
  2583  					},
  2584  					ContainerStatuses: []v1.ContainerStatus{
  2585  						runningState("containerA"),
  2586  						runningState("containerB"),
  2587  					},
  2588  				},
  2589  			},
  2590  			false,
  2591  			v1.PodRunning,
  2592  			"restartable init container running, all regular containers running",
  2593  		},
  2594  		{
  2595  			&v1.Pod{
  2596  				Spec: desiredState,
  2597  				Status: v1.PodStatus{
  2598  					InitContainerStatuses: []v1.ContainerStatus{
  2599  						stoppedState("containerX"),
  2600  					},
  2601  					ContainerStatuses: []v1.ContainerStatus{
  2602  						runningState("containerA"),
  2603  						runningState("containerB"),
  2604  					},
  2605  				},
  2606  			},
  2607  			false,
  2608  			v1.PodRunning,
  2609  			"restartable init container stopped, all regular containers running",
  2610  		},
  2611  		{
  2612  			&v1.Pod{
  2613  				Spec: desiredState,
  2614  				Status: v1.PodStatus{
  2615  					InitContainerStatuses: []v1.ContainerStatus{
  2616  						waitingStateWithLastTermination("containerX"),
  2617  					},
  2618  					ContainerStatuses: []v1.ContainerStatus{
  2619  						runningState("containerA"),
  2620  						runningState("containerB"),
  2621  					},
  2622  				},
  2623  			},
  2624  			false,
  2625  			v1.PodRunning,
  2626  			"backoff crashloop restartable init container, all regular containers running",
  2627  		},
  2628  		{
  2629  			&v1.Pod{
  2630  				Spec: desiredState,
  2631  				Status: v1.PodStatus{
  2632  					InitContainerStatuses: []v1.ContainerStatus{
  2633  						failedState("containerX"),
  2634  					},
  2635  					ContainerStatuses: []v1.ContainerStatus{
  2636  						succeededState("containerA"),
  2637  						succeededState("containerB"),
  2638  					},
  2639  				},
  2640  			},
  2641  			true,
  2642  			v1.PodSucceeded,
  2643  			"all regular containers succeeded and restartable init container failed with restart always, but the pod is terminal",
  2644  		},
  2645  		{
  2646  			&v1.Pod{
  2647  				Spec: desiredState,
  2648  				Status: v1.PodStatus{
  2649  					InitContainerStatuses: []v1.ContainerStatus{
  2650  						succeededState("containerX"),
  2651  					},
  2652  					ContainerStatuses: []v1.ContainerStatus{
  2653  						succeededState("containerA"),
  2654  						succeededState("containerB"),
  2655  					},
  2656  				},
  2657  			},
  2658  			true,
  2659  			v1.PodSucceeded,
  2660  			"all regular containers succeeded and restartable init container succeeded with restart always, but the pod is terminal",
  2661  		},
  2662  	}
  2663  	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)()
  2664  	for _, test := range tests {
  2665  		statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
  2666  		status := getPhase(test.pod, statusInfo, test.podIsTerminal)
  2667  		assert.Equal(t, test.status, status, "[test %s]", test.test)
  2668  	}
  2669  }
  2670  
  2671  func TestPodPhaseWithRestartNever(t *testing.T) {
  2672  	desiredState := v1.PodSpec{
  2673  		NodeName: "machine",
  2674  		Containers: []v1.Container{
  2675  			{Name: "containerA"},
  2676  			{Name: "containerB"},
  2677  		},
  2678  		RestartPolicy: v1.RestartPolicyNever,
  2679  	}
  2680  
  2681  	tests := []struct {
  2682  		pod    *v1.Pod
  2683  		status v1.PodPhase
  2684  		test   string
  2685  	}{
  2686  		{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"},
  2687  		{
  2688  			&v1.Pod{
  2689  				Spec: desiredState,
  2690  				Status: v1.PodStatus{
  2691  					ContainerStatuses: []v1.ContainerStatus{
  2692  						runningState("containerA"),
  2693  						runningState("containerB"),
  2694  					},
  2695  				},
  2696  			},
  2697  			v1.PodRunning,
  2698  			"all running with restart never",
  2699  		},
  2700  		{
  2701  			&v1.Pod{
  2702  				Spec: desiredState,
  2703  				Status: v1.PodStatus{
  2704  					ContainerStatuses: []v1.ContainerStatus{
  2705  						succeededState("containerA"),
  2706  						succeededState("containerB"),
  2707  					},
  2708  				},
  2709  			},
  2710  			v1.PodSucceeded,
  2711  			"all succeeded with restart never",
  2712  		},
  2713  		{
  2714  			&v1.Pod{
  2715  				Spec: desiredState,
  2716  				Status: v1.PodStatus{
  2717  					ContainerStatuses: []v1.ContainerStatus{
  2718  						failedState("containerA"),
  2719  						failedState("containerB"),
  2720  					},
  2721  				},
  2722  			},
  2723  			v1.PodFailed,
  2724  			"all failed with restart never",
  2725  		},
  2726  		{
  2727  			&v1.Pod{
  2728  				Spec: desiredState,
  2729  				Status: v1.PodStatus{
  2730  					ContainerStatuses: []v1.ContainerStatus{
  2731  						runningState("containerA"),
  2732  						succeededState("containerB"),
  2733  					},
  2734  				},
  2735  			},
  2736  			v1.PodRunning,
  2737  			"mixed state #1 with restart never",
  2738  		},
  2739  		{
  2740  			&v1.Pod{
  2741  				Spec: desiredState,
  2742  				Status: v1.PodStatus{
  2743  					ContainerStatuses: []v1.ContainerStatus{
  2744  						runningState("containerA"),
  2745  					},
  2746  				},
  2747  			},
  2748  			v1.PodPending,
  2749  			"mixed state #2 with restart never",
  2750  		},
  2751  		{
  2752  			&v1.Pod{
  2753  				Spec: desiredState,
  2754  				Status: v1.PodStatus{
  2755  					ContainerStatuses: []v1.ContainerStatus{
  2756  						runningState("containerA"),
  2757  						waitingState("containerB"),
  2758  					},
  2759  				},
  2760  			},
  2761  			v1.PodPending,
  2762  			"mixed state #3 with restart never",
  2763  		},
  2764  	}
  2765  	for _, test := range tests {
  2766  		status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false)
  2767  		assert.Equal(t, test.status, status, "[test %s]", test.test)
  2768  	}
  2769  }
  2770  
  2771  func TestPodPhaseWithRestartNeverInitContainers(t *testing.T) {
  2772  	desiredState := v1.PodSpec{
  2773  		NodeName: "machine",
  2774  		InitContainers: []v1.Container{
  2775  			{Name: "containerX"},
  2776  		},
  2777  		Containers: []v1.Container{
  2778  			{Name: "containerA"},
  2779  			{Name: "containerB"},
  2780  		},
  2781  		RestartPolicy: v1.RestartPolicyNever,
  2782  	}
  2783  
  2784  	tests := []struct {
  2785  		pod    *v1.Pod
  2786  		status v1.PodPhase
  2787  		test   string
  2788  	}{
  2789  		{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
  2790  		{
  2791  			&v1.Pod{
  2792  				Spec: desiredState,
  2793  				Status: v1.PodStatus{
  2794  					InitContainerStatuses: []v1.ContainerStatus{
  2795  						runningState("containerX"),
  2796  					},
  2797  				},
  2798  			},
  2799  			v1.PodPending,
  2800  			"init container running",
  2801  		},
  2802  		{
  2803  			&v1.Pod{
  2804  				Spec: desiredState,
  2805  				Status: v1.PodStatus{
  2806  					InitContainerStatuses: []v1.ContainerStatus{
  2807  						failedState("containerX"),
  2808  					},
  2809  				},
  2810  			},
  2811  			v1.PodFailed,
  2812  			"init container terminated non-zero",
  2813  		},
  2814  		{
  2815  			&v1.Pod{
  2816  				Spec: desiredState,
  2817  				Status: v1.PodStatus{
  2818  					InitContainerStatuses: []v1.ContainerStatus{
  2819  						waitingStateWithLastTermination("containerX"),
  2820  					},
  2821  				},
  2822  			},
  2823  			v1.PodPending,
  2824  			"init container waiting, terminated zero",
  2825  		},
  2826  		{
  2827  			&v1.Pod{
  2828  				Spec: desiredState,
  2829  				Status: v1.PodStatus{
  2830  					InitContainerStatuses: []v1.ContainerStatus{
  2831  						waitingStateWithNonZeroTermination("containerX"),
  2832  					},
  2833  				},
  2834  			},
  2835  			v1.PodFailed,
  2836  			"init container waiting, terminated non-zero",
  2837  		},
  2838  		{
  2839  			&v1.Pod{
  2840  				Spec: desiredState,
  2841  				Status: v1.PodStatus{
  2842  					InitContainerStatuses: []v1.ContainerStatus{
  2843  						waitingState("containerX"),
  2844  					},
  2845  				},
  2846  			},
  2847  			v1.PodPending,
  2848  			"init container waiting, not terminated",
  2849  		},
  2850  		{
  2851  			&v1.Pod{
  2852  				Spec: desiredState,
  2853  				Status: v1.PodStatus{
  2854  					InitContainerStatuses: []v1.ContainerStatus{
  2855  						succeededState("containerX"),
  2856  					},
  2857  					ContainerStatuses: []v1.ContainerStatus{
  2858  						runningState("containerA"),
  2859  						runningState("containerB"),
  2860  					},
  2861  				},
  2862  			},
  2863  			v1.PodRunning,
  2864  			"init container succeeded",
  2865  		},
  2866  	}
  2867  	for _, test := range tests {
  2868  		statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
  2869  		status := getPhase(test.pod, statusInfo, false)
  2870  		assert.Equal(t, test.status, status, "[test %s]", test.test)
  2871  	}
  2872  }
  2873  
  2874  func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
  2875  	desiredState := v1.PodSpec{
  2876  		NodeName: "machine",
  2877  		InitContainers: []v1.Container{
  2878  			{Name: "containerX", RestartPolicy: &containerRestartPolicyAlways},
  2879  		},
  2880  		Containers: []v1.Container{
  2881  			{Name: "containerA"},
  2882  			{Name: "containerB"},
  2883  		},
  2884  		RestartPolicy: v1.RestartPolicyNever,
  2885  	}
  2886  
  2887  	tests := []struct {
  2888  		pod    *v1.Pod
  2889  		status v1.PodPhase
  2890  		test   string
  2891  	}{
  2892  		{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
  2893  		{
  2894  			&v1.Pod{
  2895  				Spec: desiredState,
  2896  				Status: v1.PodStatus{
  2897  					InitContainerStatuses: []v1.ContainerStatus{
  2898  						runningState("containerX"),
  2899  					},
  2900  				},
  2901  			},
  2902  			v1.PodPending,
  2903  			"restartable init container running",
  2904  		},
  2905  		{
  2906  			&v1.Pod{
  2907  				Spec: desiredState,
  2908  				Status: v1.PodStatus{
  2909  					InitContainerStatuses: []v1.ContainerStatus{
  2910  						stoppedState("containerX"),
  2911  					},
  2912  				},
  2913  			},
  2914  			v1.PodPending,
  2915  			"restartable init container stopped",
  2916  		},
  2917  		{
  2918  			&v1.Pod{
  2919  				Spec: desiredState,
  2920  				Status: v1.PodStatus{
  2921  					InitContainerStatuses: []v1.ContainerStatus{
  2922  						waitingStateWithLastTermination("containerX"),
  2923  					},
  2924  				},
  2925  			},
  2926  			v1.PodPending,
  2927  			"restartable init container waiting, terminated zero",
  2928  		},
  2929  		{
  2930  			&v1.Pod{
  2931  				Spec: desiredState,
  2932  				Status: v1.PodStatus{
  2933  					InitContainerStatuses: []v1.ContainerStatus{
  2934  						waitingStateWithNonZeroTermination("containerX"),
  2935  					},
  2936  				},
  2937  			},
  2938  			v1.PodPending,
  2939  			"restartable init container waiting, terminated non-zero",
  2940  		},
  2941  		{
  2942  			&v1.Pod{
  2943  				Spec: desiredState,
  2944  				Status: v1.PodStatus{
  2945  					InitContainerStatuses: []v1.ContainerStatus{
  2946  						waitingState("containerX"),
  2947  					},
  2948  				},
  2949  			},
  2950  			v1.PodPending,
  2951  			"restartable init container waiting, not terminated",
  2952  		},
  2953  		{
  2954  			&v1.Pod{
  2955  				Spec: desiredState,
  2956  				Status: v1.PodStatus{
  2957  					InitContainerStatuses: []v1.ContainerStatus{
  2958  						startedState("containerX"),
  2959  					},
  2960  					ContainerStatuses: []v1.ContainerStatus{
  2961  						runningState("containerA"),
  2962  					},
  2963  				},
  2964  			},
  2965  			v1.PodPending,
  2966  			"restartable init container started, one main container running",
  2967  		},
  2968  		{
  2969  			&v1.Pod{
  2970  				Spec: desiredState,
  2971  				Status: v1.PodStatus{
  2972  					InitContainerStatuses: []v1.ContainerStatus{
  2973  						startedState("containerX"),
  2974  					},
  2975  					ContainerStatuses: []v1.ContainerStatus{
  2976  						succeededState("containerA"),
  2977  						succeededState("containerB"),
  2978  					},
  2979  				},
  2980  			},
  2981  			v1.PodRunning,
  2982  			"restartable init container started, main containers succeeded",
  2983  		},
  2984  		{
  2985  			&v1.Pod{
  2986  				Spec: desiredState,
  2987  				Status: v1.PodStatus{
  2988  					InitContainerStatuses: []v1.ContainerStatus{
  2989  						runningState("containerX"),
  2990  					},
  2991  					ContainerStatuses: []v1.ContainerStatus{
  2992  						succeededState("containerA"),
  2993  						succeededState("containerB"),
  2994  					},
  2995  				},
  2996  			},
  2997  			v1.PodRunning,
  2998  			"restartable init container running, main containers succeeded",
  2999  		},
  3000  		{
  3001  			&v1.Pod{
  3002  				Spec: desiredState,
  3003  				Status: v1.PodStatus{
  3004  					InitContainerStatuses: []v1.ContainerStatus{
  3005  						succeededState("containerX"),
  3006  					},
  3007  					ContainerStatuses: []v1.ContainerStatus{
  3008  						succeededState("containerA"),
  3009  						succeededState("containerB"),
  3010  					},
  3011  				},
  3012  			},
  3013  			v1.PodSucceeded,
  3014  			"all containers succeeded",
  3015  		},
  3016  		{
  3017  			&v1.Pod{
  3018  				Spec: desiredState,
  3019  				Status: v1.PodStatus{
  3020  					InitContainerStatuses: []v1.ContainerStatus{
  3021  						failedState("containerX"),
  3022  					},
  3023  					ContainerStatuses: []v1.ContainerStatus{
  3024  						succeededState("containerA"),
  3025  						succeededState("containerB"),
  3026  					},
  3027  				},
  3028  			},
  3029  			v1.PodSucceeded,
  3030  			"restartable init container terminated non-zero, main containers succeeded",
  3031  		},
  3032  		{
  3033  			&v1.Pod{
  3034  				Spec: desiredState,
  3035  				Status: v1.PodStatus{
  3036  					InitContainerStatuses: []v1.ContainerStatus{
  3037  						waitingStateWithLastTermination("containerX"),
  3038  					},
  3039  					ContainerStatuses: []v1.ContainerStatus{
  3040  						succeededState("containerA"),
  3041  						succeededState("containerB"),
  3042  					},
  3043  				},
  3044  			},
  3045  			v1.PodSucceeded,
  3046  			"backoff crashloop restartable init container, main containers succeeded",
  3047  		},
  3048  		{
  3049  			&v1.Pod{
  3050  				Spec: desiredState,
  3051  				Status: v1.PodStatus{
  3052  					InitContainerStatuses: []v1.ContainerStatus{
  3053  						waitingStateWithNonZeroTermination("containerX"),
  3054  					},
  3055  					ContainerStatuses: []v1.ContainerStatus{
  3056  						succeededState("containerA"),
  3057  						succeededState("containerB"),
  3058  					},
  3059  				},
  3060  			},
  3061  			v1.PodSucceeded,
  3062  			"backoff crashloop with non-zero restartable init container, main containers succeeded",
  3063  		},
  3064  	}
  3065  	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)()
  3066  	for _, test := range tests {
  3067  		statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
  3068  		status := getPhase(test.pod, statusInfo, false)
  3069  		assert.Equal(t, test.status, status, "[test %s]", test.test)
  3070  	}
  3071  }
  3072  
  3073  func TestPodPhaseWithRestartOnFailure(t *testing.T) {
  3074  	desiredState := v1.PodSpec{
  3075  		NodeName: "machine",
  3076  		Containers: []v1.Container{
  3077  			{Name: "containerA"},
  3078  			{Name: "containerB"},
  3079  		},
  3080  		RestartPolicy: v1.RestartPolicyOnFailure,
  3081  	}
  3082  
  3083  	tests := []struct {
  3084  		pod    *v1.Pod
  3085  		status v1.PodPhase
  3086  		test   string
  3087  	}{
  3088  		{&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"},
  3089  		{
  3090  			&v1.Pod{
  3091  				Spec: desiredState,
  3092  				Status: v1.PodStatus{
  3093  					ContainerStatuses: []v1.ContainerStatus{
  3094  						runningState("containerA"),
  3095  						runningState("containerB"),
  3096  					},
  3097  				},
  3098  			},
  3099  			v1.PodRunning,
  3100  			"all running with restart onfailure",
  3101  		},
  3102  		{
  3103  			&v1.Pod{
  3104  				Spec: desiredState,
  3105  				Status: v1.PodStatus{
  3106  					ContainerStatuses: []v1.ContainerStatus{
  3107  						succeededState("containerA"),
  3108  						succeededState("containerB"),
  3109  					},
  3110  				},
  3111  			},
  3112  			v1.PodSucceeded,
  3113  			"all succeeded with restart onfailure",
  3114  		},
  3115  		{
  3116  			&v1.Pod{
  3117  				Spec: desiredState,
  3118  				Status: v1.PodStatus{
  3119  					ContainerStatuses: []v1.ContainerStatus{
  3120  						failedState("containerA"),
  3121  						failedState("containerB"),
  3122  					},
  3123  				},
  3124  			},
  3125  			v1.PodRunning,
  3126  			"all failed with restart never",
  3127  		},
  3128  		{
  3129  			&v1.Pod{
  3130  				Spec: desiredState,
  3131  				Status: v1.PodStatus{
  3132  					ContainerStatuses: []v1.ContainerStatus{
  3133  						runningState("containerA"),
  3134  						succeededState("containerB"),
  3135  					},
  3136  				},
  3137  			},
  3138  			v1.PodRunning,
  3139  			"mixed state #1 with restart onfailure",
  3140  		},
  3141  		{
  3142  			&v1.Pod{
  3143  				Spec: desiredState,
  3144  				Status: v1.PodStatus{
  3145  					ContainerStatuses: []v1.ContainerStatus{
  3146  						runningState("containerA"),
  3147  					},
  3148  				},
  3149  			},
  3150  			v1.PodPending,
  3151  			"mixed state #2 with restart onfailure",
  3152  		},
  3153  		{
  3154  			&v1.Pod{
  3155  				Spec: desiredState,
  3156  				Status: v1.PodStatus{
  3157  					ContainerStatuses: []v1.ContainerStatus{
  3158  						runningState("containerA"),
  3159  						waitingState("containerB"),
  3160  					},
  3161  				},
  3162  			},
  3163  			v1.PodPending,
  3164  			"mixed state #3 with restart onfailure",
  3165  		},
  3166  		{
  3167  			&v1.Pod{
  3168  				Spec: desiredState,
  3169  				Status: v1.PodStatus{
  3170  					ContainerStatuses: []v1.ContainerStatus{
  3171  						runningState("containerA"),
  3172  						waitingStateWithLastTermination("containerB"),
  3173  					},
  3174  				},
  3175  			},
  3176  			v1.PodRunning,
  3177  			"backoff crashloop container with restart onfailure",
  3178  		},
  3179  	}
  3180  	for _, test := range tests {
  3181  		status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false)
  3182  		assert.Equal(t, test.status, status, "[test %s]", test.test)
  3183  	}
  3184  }
  3185  
  3186  // No special init-specific logic for this, see RestartAlways case
  3187  // func TestPodPhaseWithRestartOnFailureInitContainers(t *testing.T) {
  3188  // }
  3189  
  3190  func TestConvertToAPIContainerStatuses(t *testing.T) {
  3191  	desiredState := v1.PodSpec{
  3192  		NodeName: "machine",
  3193  		Containers: []v1.Container{
  3194  			{Name: "containerA"},
  3195  			{Name: "containerB"},
  3196  		},
  3197  		RestartPolicy: v1.RestartPolicyAlways,
  3198  	}
  3199  	now := metav1.Now()
  3200  
  3201  	tests := []struct {
  3202  		name              string
  3203  		pod               *v1.Pod
  3204  		currentStatus     *kubecontainer.PodStatus
  3205  		previousStatus    []v1.ContainerStatus
  3206  		containers        []v1.Container
  3207  		hasInitContainers bool
  3208  		isInitContainer   bool
  3209  		expected          []v1.ContainerStatus
  3210  	}{
  3211  		{
  3212  			name: "no current status, with previous statuses and deletion",
  3213  			pod: &v1.Pod{
  3214  				Spec: desiredState,
  3215  				Status: v1.PodStatus{
  3216  					ContainerStatuses: []v1.ContainerStatus{
  3217  						runningState("containerA"),
  3218  						runningState("containerB"),
  3219  					},
  3220  				},
  3221  				ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
  3222  			},
  3223  			currentStatus: &kubecontainer.PodStatus{},
  3224  			previousStatus: []v1.ContainerStatus{
  3225  				runningState("containerA"),
  3226  				runningState("containerB"),
  3227  			},
  3228  			containers: desiredState.Containers,
  3229  			// no init containers
  3230  			// is not an init container
  3231  			expected: []v1.ContainerStatus{
  3232  				waitingWithLastTerminationUnknown("containerA", 0),
  3233  				waitingWithLastTerminationUnknown("containerB", 0),
  3234  			},
  3235  		},
  3236  		{
  3237  			name: "no current status, with previous statuses and no deletion",
  3238  			pod: &v1.Pod{
  3239  				Spec: desiredState,
  3240  				Status: v1.PodStatus{
  3241  					ContainerStatuses: []v1.ContainerStatus{
  3242  						runningState("containerA"),
  3243  						runningState("containerB"),
  3244  					},
  3245  				},
  3246  			},
  3247  			currentStatus: &kubecontainer.PodStatus{},
  3248  			previousStatus: []v1.ContainerStatus{
  3249  				runningState("containerA"),
  3250  				runningState("containerB"),
  3251  			},
  3252  			containers: desiredState.Containers,
  3253  			// no init containers
  3254  			// is not an init container
  3255  			expected: []v1.ContainerStatus{
  3256  				waitingWithLastTerminationUnknown("containerA", 1),
  3257  				waitingWithLastTerminationUnknown("containerB", 1),
  3258  			},
  3259  		},
  3260  	}
  3261  	for _, test := range tests {
  3262  		t.Run(test.name, func(t *testing.T) {
  3263  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  3264  			defer testKubelet.Cleanup()
  3265  			kl := testKubelet.kubelet
  3266  			containerStatuses := kl.convertToAPIContainerStatuses(
  3267  				test.pod,
  3268  				test.currentStatus,
  3269  				test.previousStatus,
  3270  				test.containers,
  3271  				test.hasInitContainers,
  3272  				test.isInitContainer,
  3273  			)
  3274  			for i, status := range containerStatuses {
  3275  				assert.Equal(t, test.expected[i], status, "[test %s]", test.name)
  3276  			}
  3277  		})
  3278  	}
  3279  }
  3280  
  3281  func Test_generateAPIPodStatus(t *testing.T) {
  3282  	desiredState := v1.PodSpec{
  3283  		NodeName: "machine",
  3284  		Containers: []v1.Container{
  3285  			{Name: "containerA"},
  3286  			{Name: "containerB"},
  3287  		},
  3288  		RestartPolicy: v1.RestartPolicyAlways,
  3289  	}
  3290  	sandboxReadyStatus := &kubecontainer.PodStatus{
  3291  		SandboxStatuses: []*runtimeapi.PodSandboxStatus{
  3292  			{
  3293  				Network: &runtimeapi.PodSandboxNetworkStatus{
  3294  					Ip: "10.0.0.10",
  3295  				},
  3296  				Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
  3297  				State:    runtimeapi.PodSandboxState_SANDBOX_READY,
  3298  			},
  3299  		},
  3300  	}
  3301  
  3302  	now := metav1.Now()
  3303  	normalized_now := now.Rfc3339Copy()
  3304  
  3305  	tests := []struct {
  3306  		name                                       string
  3307  		enablePodHostIPs                           bool // enable PodHostIPs feature gate
  3308  		pod                                        *v1.Pod
  3309  		currentStatus                              *kubecontainer.PodStatus
  3310  		unreadyContainer                           []string
  3311  		previousStatus                             v1.PodStatus
  3312  		isPodTerminal                              bool
  3313  		enablePodDisruptionConditions              bool
  3314  		expected                                   v1.PodStatus
  3315  		expectedPodDisruptionCondition             v1.PodCondition
  3316  		expectedPodReadyToStartContainersCondition v1.PodCondition
  3317  	}{
  3318  		{
  3319  			name: "pod disruption condition is copied over and the phase is set to failed when deleted; PodDisruptionConditions enabled",
  3320  			pod: &v1.Pod{
  3321  				Spec: desiredState,
  3322  				Status: v1.PodStatus{
  3323  					ContainerStatuses: []v1.ContainerStatus{
  3324  						runningState("containerA"),
  3325  						runningState("containerB"),
  3326  					},
  3327  					Conditions: []v1.PodCondition{{
  3328  						Type:               v1.DisruptionTarget,
  3329  						Status:             v1.ConditionTrue,
  3330  						LastTransitionTime: normalized_now,
  3331  					}},
  3332  				},
  3333  				ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
  3334  			},
  3335  			currentStatus: sandboxReadyStatus,
  3336  			previousStatus: v1.PodStatus{
  3337  				ContainerStatuses: []v1.ContainerStatus{
  3338  					runningState("containerA"),
  3339  					runningState("containerB"),
  3340  				},
  3341  				Conditions: []v1.PodCondition{{
  3342  					Type:               v1.DisruptionTarget,
  3343  					Status:             v1.ConditionTrue,
  3344  					LastTransitionTime: normalized_now,
  3345  				}},
  3346  			},
  3347  			isPodTerminal:                 true,
  3348  			enablePodDisruptionConditions: true,
  3349  			expected: v1.PodStatus{
  3350  				Phase:    v1.PodFailed,
  3351  				HostIP:   "127.0.0.1",
  3352  				QOSClass: v1.PodQOSBestEffort,
  3353  				Conditions: []v1.PodCondition{
  3354  					{Type: v1.PodInitialized, Status: v1.ConditionTrue},
  3355  					{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodFailed"},
  3356  					{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodFailed"},
  3357  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3358  				},
  3359  				ContainerStatuses: []v1.ContainerStatus{
  3360  					ready(waitingWithLastTerminationUnknown("containerA", 0)),
  3361  					ready(waitingWithLastTerminationUnknown("containerB", 0)),
  3362  				},
  3363  			},
  3364  			expectedPodDisruptionCondition: v1.PodCondition{
  3365  				Type:               v1.DisruptionTarget,
  3366  				Status:             v1.ConditionTrue,
  3367  				LastTransitionTime: normalized_now,
  3368  			},
  3369  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3370  				Type:   v1.PodReadyToStartContainers,
  3371  				Status: v1.ConditionTrue,
  3372  			},
  3373  		},
  3374  		{
  3375  			name: "current status ready, with previous statuses and deletion",
  3376  			pod: &v1.Pod{
  3377  				Spec: desiredState,
  3378  				Status: v1.PodStatus{
  3379  					ContainerStatuses: []v1.ContainerStatus{
  3380  						runningState("containerA"),
  3381  						runningState("containerB"),
  3382  					},
  3383  				},
  3384  				ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
  3385  			},
  3386  			currentStatus: sandboxReadyStatus,
  3387  			previousStatus: v1.PodStatus{
  3388  				ContainerStatuses: []v1.ContainerStatus{
  3389  					runningState("containerA"),
  3390  					runningState("containerB"),
  3391  				},
  3392  			},
  3393  			enablePodHostIPs: true,
  3394  			expected: v1.PodStatus{
  3395  				Phase:    v1.PodRunning,
  3396  				HostIP:   "127.0.0.1",
  3397  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3398  				QOSClass: v1.PodQOSBestEffort,
  3399  				Conditions: []v1.PodCondition{
  3400  					{Type: v1.PodInitialized, Status: v1.ConditionTrue},
  3401  					{Type: v1.PodReady, Status: v1.ConditionTrue},
  3402  					{Type: v1.ContainersReady, Status: v1.ConditionTrue},
  3403  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3404  				},
  3405  				ContainerStatuses: []v1.ContainerStatus{
  3406  					ready(waitingWithLastTerminationUnknown("containerA", 0)),
  3407  					ready(waitingWithLastTerminationUnknown("containerB", 0)),
  3408  				},
  3409  			},
  3410  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3411  				Type:   v1.PodReadyToStartContainers,
  3412  				Status: v1.ConditionTrue,
  3413  			},
  3414  		},
  3415  		{
  3416  			name: "current status ready, with previous statuses and no deletion",
  3417  			pod: &v1.Pod{
  3418  				Spec: desiredState,
  3419  				Status: v1.PodStatus{
  3420  					ContainerStatuses: []v1.ContainerStatus{
  3421  						runningState("containerA"),
  3422  						runningState("containerB"),
  3423  					},
  3424  				},
  3425  			},
  3426  			currentStatus: sandboxReadyStatus,
  3427  			previousStatus: v1.PodStatus{
  3428  				ContainerStatuses: []v1.ContainerStatus{
  3429  					runningState("containerA"),
  3430  					runningState("containerB"),
  3431  				},
  3432  			},
  3433  			enablePodHostIPs: true,
  3434  			expected: v1.PodStatus{
  3435  				Phase:    v1.PodRunning,
  3436  				HostIP:   "127.0.0.1",
  3437  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3438  				QOSClass: v1.PodQOSBestEffort,
  3439  				Conditions: []v1.PodCondition{
  3440  					{Type: v1.PodInitialized, Status: v1.ConditionTrue},
  3441  					{Type: v1.PodReady, Status: v1.ConditionTrue},
  3442  					{Type: v1.ContainersReady, Status: v1.ConditionTrue},
  3443  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3444  				},
  3445  				ContainerStatuses: []v1.ContainerStatus{
  3446  					ready(waitingWithLastTerminationUnknown("containerA", 1)),
  3447  					ready(waitingWithLastTerminationUnknown("containerB", 1)),
  3448  				},
  3449  			},
  3450  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3451  				Type:   v1.PodReadyToStartContainers,
  3452  				Status: v1.ConditionTrue,
  3453  			},
  3454  		},
  3455  		{
  3456  			name: "terminal phase cannot be changed (apiserver previous is succeeded)",
  3457  			pod: &v1.Pod{
  3458  				Spec: desiredState,
  3459  				Status: v1.PodStatus{
  3460  					Phase: v1.PodSucceeded,
  3461  					ContainerStatuses: []v1.ContainerStatus{
  3462  						runningState("containerA"),
  3463  						runningState("containerB"),
  3464  					},
  3465  				},
  3466  			},
  3467  			currentStatus: &kubecontainer.PodStatus{},
  3468  			previousStatus: v1.PodStatus{
  3469  				ContainerStatuses: []v1.ContainerStatus{
  3470  					runningState("containerA"),
  3471  					runningState("containerB"),
  3472  				},
  3473  			},
  3474  			enablePodHostIPs: true,
  3475  			expected: v1.PodStatus{
  3476  				Phase:    v1.PodSucceeded,
  3477  				HostIP:   "127.0.0.1",
  3478  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3479  				QOSClass: v1.PodQOSBestEffort,
  3480  				Conditions: []v1.PodCondition{
  3481  					{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
  3482  					{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
  3483  					{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
  3484  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3485  				},
  3486  				ContainerStatuses: []v1.ContainerStatus{
  3487  					ready(waitingWithLastTerminationUnknown("containerA", 1)),
  3488  					ready(waitingWithLastTerminationUnknown("containerB", 1)),
  3489  				},
  3490  			},
  3491  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3492  				Type:   v1.PodReadyToStartContainers,
  3493  				Status: v1.ConditionFalse,
  3494  			},
  3495  		},
  3496  		{
  3497  			name: "terminal phase from previous status must remain terminal, restartAlways",
  3498  			pod: &v1.Pod{
  3499  				Spec: desiredState,
  3500  				Status: v1.PodStatus{
  3501  					Phase: v1.PodRunning,
  3502  					ContainerStatuses: []v1.ContainerStatus{
  3503  						runningState("containerA"),
  3504  						runningState("containerB"),
  3505  					},
  3506  				},
  3507  			},
  3508  			currentStatus: &kubecontainer.PodStatus{},
  3509  			previousStatus: v1.PodStatus{
  3510  				Phase: v1.PodSucceeded,
  3511  				ContainerStatuses: []v1.ContainerStatus{
  3512  					runningState("containerA"),
  3513  					runningState("containerB"),
  3514  				},
  3515  				// Reason and message should be preserved
  3516  				Reason:  "Test",
  3517  				Message: "test",
  3518  			},
  3519  			enablePodHostIPs: true,
  3520  			expected: v1.PodStatus{
  3521  				Phase:    v1.PodSucceeded,
  3522  				HostIP:   "127.0.0.1",
  3523  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3524  				QOSClass: v1.PodQOSBestEffort,
  3525  				Conditions: []v1.PodCondition{
  3526  					{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
  3527  					{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
  3528  					{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
  3529  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3530  				},
  3531  				ContainerStatuses: []v1.ContainerStatus{
  3532  					ready(waitingWithLastTerminationUnknown("containerA", 1)),
  3533  					ready(waitingWithLastTerminationUnknown("containerB", 1)),
  3534  				},
  3535  				Reason:  "Test",
  3536  				Message: "test",
  3537  			},
  3538  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3539  				Type:   v1.PodReadyToStartContainers,
  3540  				Status: v1.ConditionFalse,
  3541  			},
  3542  		},
  3543  		{
  3544  			name: "terminal phase from previous status must remain terminal, restartNever",
  3545  			pod: &v1.Pod{
  3546  				Spec: v1.PodSpec{
  3547  					NodeName: "machine",
  3548  					Containers: []v1.Container{
  3549  						{Name: "containerA"},
  3550  						{Name: "containerB"},
  3551  					},
  3552  					RestartPolicy: v1.RestartPolicyNever,
  3553  				},
  3554  				Status: v1.PodStatus{
  3555  					Phase: v1.PodRunning,
  3556  					ContainerStatuses: []v1.ContainerStatus{
  3557  						runningState("containerA"),
  3558  						runningState("containerB"),
  3559  					},
  3560  				},
  3561  			},
  3562  			currentStatus: &kubecontainer.PodStatus{},
  3563  			previousStatus: v1.PodStatus{
  3564  				Phase: v1.PodSucceeded,
  3565  				ContainerStatuses: []v1.ContainerStatus{
  3566  					succeededState("containerA"),
  3567  					succeededState("containerB"),
  3568  				},
  3569  				// Reason and message should be preserved
  3570  				Reason:  "Test",
  3571  				Message: "test",
  3572  			},
  3573  			enablePodHostIPs: true,
  3574  			expected: v1.PodStatus{
  3575  				Phase:    v1.PodSucceeded,
  3576  				HostIP:   "127.0.0.1",
  3577  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3578  				QOSClass: v1.PodQOSBestEffort,
  3579  				Conditions: []v1.PodCondition{
  3580  					{Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
  3581  					{Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
  3582  					{Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
  3583  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3584  				},
  3585  				ContainerStatuses: []v1.ContainerStatus{
  3586  					ready(succeededState("containerA")),
  3587  					ready(succeededState("containerB")),
  3588  				},
  3589  				Reason:  "Test",
  3590  				Message: "test",
  3591  			},
  3592  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3593  				Type:   v1.PodReadyToStartContainers,
  3594  				Status: v1.ConditionFalse,
  3595  			},
  3596  		},
  3597  		{
  3598  			name: "running can revert to pending",
  3599  			pod: &v1.Pod{
  3600  				Spec: desiredState,
  3601  				Status: v1.PodStatus{
  3602  					Phase: v1.PodRunning,
  3603  					ContainerStatuses: []v1.ContainerStatus{
  3604  						runningState("containerA"),
  3605  						runningState("containerB"),
  3606  					},
  3607  				},
  3608  			},
  3609  			currentStatus: sandboxReadyStatus,
  3610  			previousStatus: v1.PodStatus{
  3611  				ContainerStatuses: []v1.ContainerStatus{
  3612  					waitingState("containerA"),
  3613  					waitingState("containerB"),
  3614  				},
  3615  			},
  3616  			enablePodHostIPs: true,
  3617  			expected: v1.PodStatus{
  3618  				Phase:    v1.PodPending,
  3619  				HostIP:   "127.0.0.1",
  3620  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3621  				QOSClass: v1.PodQOSBestEffort,
  3622  				Conditions: []v1.PodCondition{
  3623  					{Type: v1.PodInitialized, Status: v1.ConditionTrue},
  3624  					{Type: v1.PodReady, Status: v1.ConditionTrue},
  3625  					{Type: v1.ContainersReady, Status: v1.ConditionTrue},
  3626  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3627  				},
  3628  				ContainerStatuses: []v1.ContainerStatus{
  3629  					ready(waitingStateWithReason("containerA", "ContainerCreating")),
  3630  					ready(waitingStateWithReason("containerB", "ContainerCreating")),
  3631  				},
  3632  			},
  3633  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3634  				Type:   v1.PodReadyToStartContainers,
  3635  				Status: v1.ConditionTrue,
  3636  			},
  3637  		},
  3638  		{
  3639  			name: "reason and message are preserved when phase doesn't change",
  3640  			pod: &v1.Pod{
  3641  				Spec: desiredState,
  3642  				Status: v1.PodStatus{
  3643  					Phase: v1.PodRunning,
  3644  					ContainerStatuses: []v1.ContainerStatus{
  3645  						waitingState("containerA"),
  3646  						waitingState("containerB"),
  3647  					},
  3648  				},
  3649  			},
  3650  			currentStatus: &kubecontainer.PodStatus{
  3651  				SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
  3652  				ContainerStatuses: []*kubecontainer.Status{
  3653  					{
  3654  						ID:        kubecontainer.ContainerID{ID: "foo"},
  3655  						Name:      "containerB",
  3656  						StartedAt: time.Unix(1, 0).UTC(),
  3657  						State:     kubecontainer.ContainerStateRunning,
  3658  					},
  3659  				},
  3660  			},
  3661  			previousStatus: v1.PodStatus{
  3662  				Phase:   v1.PodPending,
  3663  				Reason:  "Test",
  3664  				Message: "test",
  3665  				ContainerStatuses: []v1.ContainerStatus{
  3666  					waitingState("containerA"),
  3667  					runningState("containerB"),
  3668  				},
  3669  			},
  3670  			enablePodHostIPs: true,
  3671  			expected: v1.PodStatus{
  3672  				Phase:    v1.PodPending,
  3673  				Reason:   "Test",
  3674  				Message:  "test",
  3675  				HostIP:   "127.0.0.1",
  3676  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3677  				QOSClass: v1.PodQOSBestEffort,
  3678  				Conditions: []v1.PodCondition{
  3679  					{Type: v1.PodInitialized, Status: v1.ConditionTrue},
  3680  					{Type: v1.PodReady, Status: v1.ConditionTrue},
  3681  					{Type: v1.ContainersReady, Status: v1.ConditionTrue},
  3682  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3683  				},
  3684  				ContainerStatuses: []v1.ContainerStatus{
  3685  					ready(waitingStateWithReason("containerA", "ContainerCreating")),
  3686  					ready(withID(runningStateWithStartedAt("containerB", time.Unix(1, 0).UTC()), "://foo")),
  3687  				},
  3688  			},
  3689  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3690  				Type:   v1.PodReadyToStartContainers,
  3691  				Status: v1.ConditionTrue,
  3692  			},
  3693  		},
  3694  		{
  3695  			name: "reason and message are cleared when phase changes",
  3696  			pod: &v1.Pod{
  3697  				Spec: desiredState,
  3698  				Status: v1.PodStatus{
  3699  					Phase: v1.PodPending,
  3700  					ContainerStatuses: []v1.ContainerStatus{
  3701  						waitingState("containerA"),
  3702  						waitingState("containerB"),
  3703  					},
  3704  				},
  3705  			},
  3706  			currentStatus: &kubecontainer.PodStatus{
  3707  				SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
  3708  				ContainerStatuses: []*kubecontainer.Status{
  3709  					{
  3710  						ID:        kubecontainer.ContainerID{ID: "c1"},
  3711  						Name:      "containerA",
  3712  						StartedAt: time.Unix(1, 0).UTC(),
  3713  						State:     kubecontainer.ContainerStateRunning,
  3714  					},
  3715  					{
  3716  						ID:        kubecontainer.ContainerID{ID: "c2"},
  3717  						Name:      "containerB",
  3718  						StartedAt: time.Unix(2, 0).UTC(),
  3719  						State:     kubecontainer.ContainerStateRunning,
  3720  					},
  3721  				},
  3722  			},
  3723  			previousStatus: v1.PodStatus{
  3724  				Phase:   v1.PodPending,
  3725  				Reason:  "Test",
  3726  				Message: "test",
  3727  				ContainerStatuses: []v1.ContainerStatus{
  3728  					runningState("containerA"),
  3729  					runningState("containerB"),
  3730  				},
  3731  			},
  3732  			enablePodHostIPs: true,
  3733  			expected: v1.PodStatus{
  3734  				Phase:    v1.PodRunning,
  3735  				HostIP:   "127.0.0.1",
  3736  				HostIPs:  []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
  3737  				QOSClass: v1.PodQOSBestEffort,
  3738  				Conditions: []v1.PodCondition{
  3739  					{Type: v1.PodInitialized, Status: v1.ConditionTrue},
  3740  					{Type: v1.PodReady, Status: v1.ConditionTrue},
  3741  					{Type: v1.ContainersReady, Status: v1.ConditionTrue},
  3742  					{Type: v1.PodScheduled, Status: v1.ConditionTrue},
  3743  				},
  3744  				ContainerStatuses: []v1.ContainerStatus{
  3745  					ready(withID(runningStateWithStartedAt("containerA", time.Unix(1, 0).UTC()), "://c1")),
  3746  					ready(withID(runningStateWithStartedAt("containerB", time.Unix(2, 0).UTC()), "://c2")),
  3747  				},
  3748  			},
  3749  			expectedPodReadyToStartContainersCondition: v1.PodCondition{
  3750  				Type:   v1.PodReadyToStartContainers,
  3751  				Status: v1.ConditionTrue,
  3752  			},
  3753  		},
  3754  	}
  3755  	for _, test := range tests {
  3756  		for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
  3757  			t.Run(test.name, func(t *testing.T) {
  3758  				defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
  3759  				defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodHostIPs, test.enablePodHostIPs)()
  3760  				defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)()
  3761  				testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  3762  				defer testKubelet.Cleanup()
  3763  				kl := testKubelet.kubelet
  3764  				kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
  3765  				for _, name := range test.unreadyContainer {
  3766  					kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
  3767  				}
  3768  				expected := test.expected.DeepCopy()
  3769  				actual := kl.generateAPIPodStatus(test.pod, test.currentStatus, test.isPodTerminal)
  3770  				if enablePodReadyToStartContainersCondition {
  3771  					expected.Conditions = append([]v1.PodCondition{test.expectedPodReadyToStartContainersCondition}, expected.Conditions...)
  3772  				}
  3773  				if test.enablePodDisruptionConditions {
  3774  					expected.Conditions = append([]v1.PodCondition{test.expectedPodDisruptionCondition}, expected.Conditions...)
  3775  				}
  3776  				if !apiequality.Semantic.DeepEqual(*expected, actual) {
  3777  					t.Fatalf("Unexpected status: %s", cmp.Diff(*expected, actual))
  3778  				}
  3779  			})
  3780  		}
  3781  	}
  3782  }
  3783  
  3784  func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
  3785  	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
  3786  	testContainerName := "ctr0"
  3787  	testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
  3788  
  3789  	CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
  3790  	CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
  3791  	CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
  3792  	CPU1AndMem1GAndStorage2GAndCustomResource := CPU1AndMem1GAndStorage2G.DeepCopy()
  3793  	CPU1AndMem1GAndStorage2GAndCustomResource["unknown-resource"] = resource.MustParse("1")
  3794  
  3795  	testKubecontainerPodStatus := kubecontainer.PodStatus{
  3796  		ContainerStatuses: []*kubecontainer.Status{
  3797  			{
  3798  				ID:   testContainerID,
  3799  				Name: testContainerName,
  3800  				Resources: &kubecontainer.ContainerResources{
  3801  					CPURequest:    CPU1AndMem1G.Cpu(),
  3802  					MemoryRequest: CPU1AndMem1G.Memory(),
  3803  					CPULimit:      CPU1AndMem1G.Cpu(),
  3804  					MemoryLimit:   CPU1AndMem1G.Memory(),
  3805  				},
  3806  			},
  3807  		},
  3808  	}
  3809  
  3810  	tests := []struct {
  3811  		name      string
  3812  		pod       *v1.Pod
  3813  		oldStatus *v1.PodStatus
  3814  	}{
  3815  		{
  3816  			name: "custom resource in ResourcesAllocated, resize should be null",
  3817  			pod: &v1.Pod{
  3818  				ObjectMeta: metav1.ObjectMeta{
  3819  					UID:       "1234560",
  3820  					Name:      "foo0",
  3821  					Namespace: "bar0",
  3822  				},
  3823  				Spec: v1.PodSpec{
  3824  					NodeName: "machine",
  3825  					Containers: []v1.Container{
  3826  						{
  3827  							Name:      testContainerName,
  3828  							Image:     "img",
  3829  							Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2GAndCustomResource, Requests: CPU1AndMem1GAndStorage2GAndCustomResource},
  3830  						},
  3831  					},
  3832  					RestartPolicy: v1.RestartPolicyAlways,
  3833  				},
  3834  				Status: v1.PodStatus{
  3835  					ContainerStatuses: []v1.ContainerStatus{
  3836  						{
  3837  							Name:               testContainerName,
  3838  							Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
  3839  							AllocatedResources: CPU1AndMem1GAndStorage2GAndCustomResource,
  3840  						},
  3841  					},
  3842  					Resize: "InProgress",
  3843  				},
  3844  			},
  3845  		},
  3846  		{
  3847  			name: "cpu/memory resource in ResourcesAllocated, resize should be null",
  3848  			pod: &v1.Pod{
  3849  				ObjectMeta: metav1.ObjectMeta{
  3850  					UID:       "1234560",
  3851  					Name:      "foo0",
  3852  					Namespace: "bar0",
  3853  				},
  3854  				Spec: v1.PodSpec{
  3855  					NodeName: "machine",
  3856  					Containers: []v1.Container{
  3857  						{
  3858  							Name:      testContainerName,
  3859  							Image:     "img",
  3860  							Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
  3861  						},
  3862  					},
  3863  					RestartPolicy: v1.RestartPolicyAlways,
  3864  				},
  3865  				Status: v1.PodStatus{
  3866  					ContainerStatuses: []v1.ContainerStatus{
  3867  						{
  3868  							Name:               testContainerName,
  3869  							Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
  3870  							AllocatedResources: CPU1AndMem1GAndStorage2G,
  3871  						},
  3872  					},
  3873  					Resize: "InProgress",
  3874  				},
  3875  			},
  3876  		},
  3877  	}
  3878  	for _, test := range tests {
  3879  		t.Run(test.name, func(t *testing.T) {
  3880  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  3881  			defer testKubelet.Cleanup()
  3882  			kl := testKubelet.kubelet
  3883  
  3884  			oldStatus := test.pod.Status
  3885  			kl.statusManager = status.NewFakeManager()
  3886  			kl.statusManager.SetPodStatus(test.pod, oldStatus)
  3887  			actual := kl.generateAPIPodStatus(test.pod, &testKubecontainerPodStatus /* criStatus */, false /* test.isPodTerminal */)
  3888  
  3889  			if actual.Resize != "" {
  3890  				t.Fatalf("Unexpected Resize status: %s", actual.Resize)
  3891  			}
  3892  		})
  3893  	}
  3894  }
  3895  
  3896  func findContainerStatusByName(status v1.PodStatus, name string) *v1.ContainerStatus {
  3897  	for i, c := range status.InitContainerStatuses {
  3898  		if c.Name == name {
  3899  			return &status.InitContainerStatuses[i]
  3900  		}
  3901  	}
  3902  	for i, c := range status.ContainerStatuses {
  3903  		if c.Name == name {
  3904  			return &status.ContainerStatuses[i]
  3905  		}
  3906  	}
  3907  	for i, c := range status.EphemeralContainerStatuses {
  3908  		if c.Name == name {
  3909  			return &status.EphemeralContainerStatuses[i]
  3910  		}
  3911  	}
  3912  	return nil
  3913  }
  3914  
  3915  func TestGetExec(t *testing.T) {
  3916  	const (
  3917  		podName                = "podFoo"
  3918  		podNamespace           = "nsFoo"
  3919  		podUID       types.UID = "12345678"
  3920  		containerID            = "containerFoo"
  3921  		tty                    = true
  3922  	)
  3923  	var (
  3924  		podFullName = kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, podName, podNamespace))
  3925  	)
  3926  
  3927  	testcases := []struct {
  3928  		description string
  3929  		podFullName string
  3930  		container   string
  3931  		command     []string
  3932  		expectError bool
  3933  	}{{
  3934  		description: "success case",
  3935  		podFullName: podFullName,
  3936  		container:   containerID,
  3937  		command:     []string{"ls"},
  3938  		expectError: false,
  3939  	}, {
  3940  		description: "no such pod",
  3941  		podFullName: "bar" + podFullName,
  3942  		container:   containerID,
  3943  		command:     []string{"ls"},
  3944  		expectError: true,
  3945  	}, {
  3946  		description: "no such container",
  3947  		podFullName: podFullName,
  3948  		container:   "containerBar",
  3949  		command:     []string{"ls"},
  3950  		expectError: true,
  3951  	}, {
  3952  		description: "null exec command",
  3953  		podFullName: podFullName,
  3954  		container:   containerID,
  3955  		expectError: false,
  3956  	}, {
  3957  		description: "multi exec commands",
  3958  		podFullName: podFullName,
  3959  		container:   containerID,
  3960  		command:     []string{"bash", "-c", "ls"},
  3961  		expectError: false,
  3962  	}}
  3963  
  3964  	for _, tc := range testcases {
  3965  		t.Run(tc.description, func(t *testing.T) {
  3966  			ctx := context.Background()
  3967  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  3968  			defer testKubelet.Cleanup()
  3969  			kubelet := testKubelet.kubelet
  3970  			testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
  3971  				{Pod: &kubecontainer.Pod{
  3972  					ID:        podUID,
  3973  					Name:      podName,
  3974  					Namespace: podNamespace,
  3975  					Containers: []*kubecontainer.Container{
  3976  						{Name: containerID,
  3977  							ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
  3978  						},
  3979  					},
  3980  				}},
  3981  			}
  3982  
  3983  			description := "streaming - " + tc.description
  3984  			fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime}
  3985  			kubelet.containerRuntime = fakeRuntime
  3986  			kubelet.streamingRuntime = fakeRuntime
  3987  
  3988  			redirect, err := kubelet.GetExec(ctx, tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{})
  3989  			if tc.expectError {
  3990  				assert.Error(t, err, description)
  3991  			} else {
  3992  				assert.NoError(t, err, description)
  3993  				assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect")
  3994  			}
  3995  		})
  3996  	}
  3997  }
  3998  
  3999  func TestGetPortForward(t *testing.T) {
  4000  	const (
  4001  		podName                = "podFoo"
  4002  		podNamespace           = "nsFoo"
  4003  		podUID       types.UID = "12345678"
  4004  		port         int32     = 5000
  4005  	)
  4006  
  4007  	testcases := []struct {
  4008  		description string
  4009  		podName     string
  4010  		expectError bool
  4011  	}{{
  4012  		description: "success case",
  4013  		podName:     podName,
  4014  	}, {
  4015  		description: "no such pod",
  4016  		podName:     "bar",
  4017  		expectError: true,
  4018  	}}
  4019  
  4020  	for _, tc := range testcases {
  4021  		ctx := context.Background()
  4022  		testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  4023  		defer testKubelet.Cleanup()
  4024  		kubelet := testKubelet.kubelet
  4025  		testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
  4026  			{Pod: &kubecontainer.Pod{
  4027  				ID:        podUID,
  4028  				Name:      podName,
  4029  				Namespace: podNamespace,
  4030  				Containers: []*kubecontainer.Container{
  4031  					{Name: "foo",
  4032  						ID: kubecontainer.ContainerID{Type: "test", ID: "foo"},
  4033  					},
  4034  				},
  4035  			}},
  4036  		}
  4037  
  4038  		description := "streaming - " + tc.description
  4039  		fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime}
  4040  		kubelet.containerRuntime = fakeRuntime
  4041  		kubelet.streamingRuntime = fakeRuntime
  4042  
  4043  		redirect, err := kubelet.GetPortForward(ctx, tc.podName, podNamespace, podUID, portforward.V4Options{})
  4044  		if tc.expectError {
  4045  			assert.Error(t, err, description)
  4046  		} else {
  4047  			assert.NoError(t, err, description)
  4048  			assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect")
  4049  		}
  4050  	}
  4051  }
  4052  
  4053  func TestTruncatePodHostname(t *testing.T) {
  4054  	for c, test := range map[string]struct {
  4055  		input  string
  4056  		output string
  4057  	}{
  4058  		"valid hostname": {
  4059  			input:  "test.pod.hostname",
  4060  			output: "test.pod.hostname",
  4061  		},
  4062  		"too long hostname": {
  4063  			input:  "1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567.", // 8*9=72 chars
  4064  			output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567",          //8*8-1=63 chars
  4065  		},
  4066  		"hostname end with .": {
  4067  			input:  "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456.1234567.", // 8*9-1=71 chars
  4068  			output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456",          //8*8-2=62 chars
  4069  		},
  4070  		"hostname end with -": {
  4071  			input:  "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456-1234567.", // 8*9-1=71 chars
  4072  			output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456",          //8*8-2=62 chars
  4073  		},
  4074  	} {
  4075  		t.Logf("TestCase: %q", c)
  4076  		output, err := truncatePodHostnameIfNeeded("test-pod", test.input)
  4077  		assert.NoError(t, err)
  4078  		assert.Equal(t, test.output, output)
  4079  	}
  4080  }
  4081  
  4082  func TestGenerateAPIPodStatusHostNetworkPodIPs(t *testing.T) {
  4083  	testcases := []struct {
  4084  		name          string
  4085  		nodeAddresses []v1.NodeAddress
  4086  		criPodIPs     []string
  4087  		podIPs        []v1.PodIP
  4088  	}{
  4089  		{
  4090  			name: "Simple",
  4091  			nodeAddresses: []v1.NodeAddress{
  4092  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4093  			},
  4094  			podIPs: []v1.PodIP{
  4095  				{IP: "10.0.0.1"},
  4096  			},
  4097  		},
  4098  		{
  4099  			name: "InternalIP is preferred over ExternalIP",
  4100  			nodeAddresses: []v1.NodeAddress{
  4101  				{Type: v1.NodeExternalIP, Address: "192.168.0.1"},
  4102  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4103  			},
  4104  			podIPs: []v1.PodIP{
  4105  				{IP: "10.0.0.1"},
  4106  			},
  4107  		},
  4108  		{
  4109  			name: "Single-stack addresses in dual-stack cluster",
  4110  			nodeAddresses: []v1.NodeAddress{
  4111  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4112  			},
  4113  			podIPs: []v1.PodIP{
  4114  				{IP: "10.0.0.1"},
  4115  			},
  4116  		},
  4117  		{
  4118  			name: "Multiple single-stack addresses in dual-stack cluster",
  4119  			nodeAddresses: []v1.NodeAddress{
  4120  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4121  				{Type: v1.NodeInternalIP, Address: "10.0.0.2"},
  4122  				{Type: v1.NodeExternalIP, Address: "192.168.0.1"},
  4123  			},
  4124  			podIPs: []v1.PodIP{
  4125  				{IP: "10.0.0.1"},
  4126  			},
  4127  		},
  4128  		{
  4129  			name: "Dual-stack addresses in dual-stack cluster",
  4130  			nodeAddresses: []v1.NodeAddress{
  4131  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4132  				{Type: v1.NodeInternalIP, Address: "fd01::1234"},
  4133  			},
  4134  			podIPs: []v1.PodIP{
  4135  				{IP: "10.0.0.1"},
  4136  				{IP: "fd01::1234"},
  4137  			},
  4138  		},
  4139  		{
  4140  			name: "CRI PodIPs override NodeAddresses",
  4141  			nodeAddresses: []v1.NodeAddress{
  4142  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4143  				{Type: v1.NodeInternalIP, Address: "fd01::1234"},
  4144  			},
  4145  			criPodIPs: []string{"192.168.0.1"},
  4146  			podIPs: []v1.PodIP{
  4147  				{IP: "192.168.0.1"},
  4148  				{IP: "fd01::1234"},
  4149  			},
  4150  		},
  4151  		{
  4152  			name: "CRI dual-stack PodIPs override NodeAddresses",
  4153  			nodeAddresses: []v1.NodeAddress{
  4154  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4155  				{Type: v1.NodeInternalIP, Address: "fd01::1234"},
  4156  			},
  4157  			criPodIPs: []string{"192.168.0.1", "2001:db8::2"},
  4158  			podIPs: []v1.PodIP{
  4159  				{IP: "192.168.0.1"},
  4160  				{IP: "2001:db8::2"},
  4161  			},
  4162  		},
  4163  		{
  4164  			// by default the cluster prefers IPv4
  4165  			name: "CRI dual-stack PodIPs override NodeAddresses prefer IPv4",
  4166  			nodeAddresses: []v1.NodeAddress{
  4167  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4168  				{Type: v1.NodeInternalIP, Address: "fd01::1234"},
  4169  			},
  4170  			criPodIPs: []string{"2001:db8::2", "192.168.0.1"},
  4171  			podIPs: []v1.PodIP{
  4172  				{IP: "192.168.0.1"},
  4173  				{IP: "2001:db8::2"},
  4174  			},
  4175  		},
  4176  	}
  4177  
  4178  	for _, tc := range testcases {
  4179  		t.Run(tc.name, func(t *testing.T) {
  4180  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  4181  			defer testKubelet.Cleanup()
  4182  			kl := testKubelet.kubelet
  4183  
  4184  			kl.nodeLister = testNodeLister{nodes: []*v1.Node{
  4185  				{
  4186  					ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
  4187  					Status: v1.NodeStatus{
  4188  						Addresses: tc.nodeAddresses,
  4189  					},
  4190  				},
  4191  			}}
  4192  
  4193  			pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
  4194  			pod.Spec.HostNetwork = true
  4195  
  4196  			criStatus := &kubecontainer.PodStatus{
  4197  				ID:        pod.UID,
  4198  				Name:      pod.Name,
  4199  				Namespace: pod.Namespace,
  4200  				IPs:       tc.criPodIPs,
  4201  			}
  4202  
  4203  			status := kl.generateAPIPodStatus(pod, criStatus, false)
  4204  			if !reflect.DeepEqual(status.PodIPs, tc.podIPs) {
  4205  				t.Fatalf("Expected PodIPs %#v, got %#v", tc.podIPs, status.PodIPs)
  4206  			}
  4207  			if tc.criPodIPs == nil && status.HostIP != status.PodIPs[0].IP {
  4208  				t.Fatalf("Expected HostIP %q to equal PodIPs[0].IP %q", status.HostIP, status.PodIPs[0].IP)
  4209  			}
  4210  		})
  4211  	}
  4212  }
  4213  
  4214  func TestNodeAddressUpdatesGenerateAPIPodStatusHostNetworkPodIPs(t *testing.T) {
  4215  	testcases := []struct {
  4216  		name           string
  4217  		nodeIPs        []string
  4218  		nodeAddresses  []v1.NodeAddress
  4219  		expectedPodIPs []v1.PodIP
  4220  	}{
  4221  
  4222  		{
  4223  			name:    "Immutable after update node addresses single-stack",
  4224  			nodeIPs: []string{"10.0.0.1"},
  4225  			nodeAddresses: []v1.NodeAddress{
  4226  				{Type: v1.NodeInternalIP, Address: "1.1.1.1"},
  4227  			},
  4228  			expectedPodIPs: []v1.PodIP{
  4229  				{IP: "10.0.0.1"},
  4230  			},
  4231  		},
  4232  		{
  4233  			name:    "Immutable after update node addresses dual-stack - primary address",
  4234  			nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
  4235  			nodeAddresses: []v1.NodeAddress{
  4236  				{Type: v1.NodeInternalIP, Address: "1.1.1.1"},
  4237  				{Type: v1.NodeInternalIP, Address: "2001:db8::2"},
  4238  			},
  4239  			expectedPodIPs: []v1.PodIP{
  4240  				{IP: "10.0.0.1"},
  4241  				{IP: "2001:db8::2"},
  4242  			},
  4243  		},
  4244  		{
  4245  			name:    "Immutable after update node addresses dual-stack - secondary address",
  4246  			nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
  4247  			nodeAddresses: []v1.NodeAddress{
  4248  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4249  				{Type: v1.NodeInternalIP, Address: "2001:db8:1:2:3::2"},
  4250  			},
  4251  			expectedPodIPs: []v1.PodIP{
  4252  				{IP: "10.0.0.1"},
  4253  				{IP: "2001:db8::2"},
  4254  			},
  4255  		},
  4256  		{
  4257  			name:    "Immutable after update node addresses dual-stack - primary and secondary address",
  4258  			nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
  4259  			nodeAddresses: []v1.NodeAddress{
  4260  				{Type: v1.NodeInternalIP, Address: "1.1.1.1"},
  4261  				{Type: v1.NodeInternalIP, Address: "2001:db8:1:2:3::2"},
  4262  			},
  4263  			expectedPodIPs: []v1.PodIP{
  4264  				{IP: "10.0.0.1"},
  4265  				{IP: "2001:db8::2"},
  4266  			},
  4267  		},
  4268  		{
  4269  			name:    "Update secondary after new secondary address dual-stack",
  4270  			nodeIPs: []string{"10.0.0.1"},
  4271  			nodeAddresses: []v1.NodeAddress{
  4272  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4273  				{Type: v1.NodeInternalIP, Address: "2001:db8::2"},
  4274  			},
  4275  			expectedPodIPs: []v1.PodIP{
  4276  				{IP: "10.0.0.1"},
  4277  				{IP: "2001:db8::2"},
  4278  			},
  4279  		},
  4280  		{
  4281  			name:    "Update secondary after new secondary address dual-stack - reverse order",
  4282  			nodeIPs: []string{"2001:db8::2"},
  4283  			nodeAddresses: []v1.NodeAddress{
  4284  				{Type: v1.NodeInternalIP, Address: "10.0.0.1"},
  4285  				{Type: v1.NodeInternalIP, Address: "2001:db8::2"},
  4286  			},
  4287  			expectedPodIPs: []v1.PodIP{
  4288  				{IP: "2001:db8::2"},
  4289  			},
  4290  		},
  4291  	}
  4292  
  4293  	for _, tc := range testcases {
  4294  		t.Run(tc.name, func(t *testing.T) {
  4295  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  4296  			defer testKubelet.Cleanup()
  4297  			kl := testKubelet.kubelet
  4298  			for _, ip := range tc.nodeIPs {
  4299  				kl.nodeIPs = append(kl.nodeIPs, netutils.ParseIPSloppy(ip))
  4300  			}
  4301  			kl.nodeLister = testNodeLister{nodes: []*v1.Node{
  4302  				{
  4303  					ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
  4304  					Status: v1.NodeStatus{
  4305  						Addresses: tc.nodeAddresses,
  4306  					},
  4307  				},
  4308  			}}
  4309  
  4310  			pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
  4311  			pod.Spec.HostNetwork = true
  4312  			for _, ip := range tc.nodeIPs {
  4313  				pod.Status.PodIPs = append(pod.Status.PodIPs, v1.PodIP{IP: ip})
  4314  			}
  4315  			if len(pod.Status.PodIPs) > 0 {
  4316  				pod.Status.PodIP = pod.Status.PodIPs[0].IP
  4317  			}
  4318  
  4319  			// set old status
  4320  			podStatus := &kubecontainer.PodStatus{
  4321  				ID:        pod.UID,
  4322  				Name:      pod.Name,
  4323  				Namespace: pod.Namespace,
  4324  			}
  4325  			podStatus.IPs = tc.nodeIPs
  4326  
  4327  			status := kl.generateAPIPodStatus(pod, podStatus, false)
  4328  			if !reflect.DeepEqual(status.PodIPs, tc.expectedPodIPs) {
  4329  				t.Fatalf("Expected PodIPs %#v, got %#v", tc.expectedPodIPs, status.PodIPs)
  4330  			}
  4331  			if kl.nodeIPs[0].String() != status.PodIPs[0].IP {
  4332  				t.Fatalf("Expected HostIP %q to equal PodIPs[0].IP %q", status.HostIP, status.PodIPs[0].IP)
  4333  			}
  4334  		})
  4335  	}
  4336  }
  4337  
  4338  func TestGenerateAPIPodStatusPodIPs(t *testing.T) {
  4339  	testcases := []struct {
  4340  		name      string
  4341  		nodeIP    string
  4342  		criPodIPs []string
  4343  		podIPs    []v1.PodIP
  4344  	}{
  4345  		{
  4346  			name:      "Simple",
  4347  			nodeIP:    "",
  4348  			criPodIPs: []string{"10.0.0.1"},
  4349  			podIPs: []v1.PodIP{
  4350  				{IP: "10.0.0.1"},
  4351  			},
  4352  		},
  4353  		{
  4354  			name:      "Dual-stack",
  4355  			nodeIP:    "",
  4356  			criPodIPs: []string{"10.0.0.1", "fd01::1234"},
  4357  			podIPs: []v1.PodIP{
  4358  				{IP: "10.0.0.1"},
  4359  				{IP: "fd01::1234"},
  4360  			},
  4361  		},
  4362  		{
  4363  			name:      "Dual-stack with explicit node IP",
  4364  			nodeIP:    "192.168.1.1",
  4365  			criPodIPs: []string{"10.0.0.1", "fd01::1234"},
  4366  			podIPs: []v1.PodIP{
  4367  				{IP: "10.0.0.1"},
  4368  				{IP: "fd01::1234"},
  4369  			},
  4370  		},
  4371  		{
  4372  			name:      "Dual-stack with CRI returning wrong family first",
  4373  			nodeIP:    "",
  4374  			criPodIPs: []string{"fd01::1234", "10.0.0.1"},
  4375  			podIPs: []v1.PodIP{
  4376  				{IP: "10.0.0.1"},
  4377  				{IP: "fd01::1234"},
  4378  			},
  4379  		},
  4380  		{
  4381  			name:      "Dual-stack with explicit node IP with CRI returning wrong family first",
  4382  			nodeIP:    "192.168.1.1",
  4383  			criPodIPs: []string{"fd01::1234", "10.0.0.1"},
  4384  			podIPs: []v1.PodIP{
  4385  				{IP: "10.0.0.1"},
  4386  				{IP: "fd01::1234"},
  4387  			},
  4388  		},
  4389  		{
  4390  			name:      "Dual-stack with IPv6 node IP",
  4391  			nodeIP:    "fd00::5678",
  4392  			criPodIPs: []string{"10.0.0.1", "fd01::1234"},
  4393  			podIPs: []v1.PodIP{
  4394  				{IP: "fd01::1234"},
  4395  				{IP: "10.0.0.1"},
  4396  			},
  4397  		},
  4398  		{
  4399  			name:      "Dual-stack with IPv6 node IP, other CRI order",
  4400  			nodeIP:    "fd00::5678",
  4401  			criPodIPs: []string{"fd01::1234", "10.0.0.1"},
  4402  			podIPs: []v1.PodIP{
  4403  				{IP: "fd01::1234"},
  4404  				{IP: "10.0.0.1"},
  4405  			},
  4406  		},
  4407  		{
  4408  			name:      "No Pod IP matching Node IP",
  4409  			nodeIP:    "fd00::5678",
  4410  			criPodIPs: []string{"10.0.0.1"},
  4411  			podIPs: []v1.PodIP{
  4412  				{IP: "10.0.0.1"},
  4413  			},
  4414  		},
  4415  		{
  4416  			name:      "No Pod IP matching (unspecified) Node IP",
  4417  			nodeIP:    "",
  4418  			criPodIPs: []string{"fd01::1234"},
  4419  			podIPs: []v1.PodIP{
  4420  				{IP: "fd01::1234"},
  4421  			},
  4422  		},
  4423  		{
  4424  			name:      "Multiple IPv4 IPs",
  4425  			nodeIP:    "",
  4426  			criPodIPs: []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"},
  4427  			podIPs: []v1.PodIP{
  4428  				{IP: "10.0.0.1"},
  4429  			},
  4430  		},
  4431  		{
  4432  			name:      "Multiple Dual-Stack IPs",
  4433  			nodeIP:    "",
  4434  			criPodIPs: []string{"10.0.0.1", "10.0.0.2", "fd01::1234", "10.0.0.3", "fd01::5678"},
  4435  			podIPs: []v1.PodIP{
  4436  				{IP: "10.0.0.1"},
  4437  				{IP: "fd01::1234"},
  4438  			},
  4439  		},
  4440  	}
  4441  
  4442  	for _, tc := range testcases {
  4443  		t.Run(tc.name, func(t *testing.T) {
  4444  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  4445  			defer testKubelet.Cleanup()
  4446  			kl := testKubelet.kubelet
  4447  			if tc.nodeIP != "" {
  4448  				kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
  4449  			}
  4450  
  4451  			pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
  4452  
  4453  			criStatus := &kubecontainer.PodStatus{
  4454  				ID:        pod.UID,
  4455  				Name:      pod.Name,
  4456  				Namespace: pod.Namespace,
  4457  				IPs:       tc.criPodIPs,
  4458  			}
  4459  
  4460  			status := kl.generateAPIPodStatus(pod, criStatus, false)
  4461  			if !reflect.DeepEqual(status.PodIPs, tc.podIPs) {
  4462  				t.Fatalf("Expected PodIPs %#v, got %#v", tc.podIPs, status.PodIPs)
  4463  			}
  4464  			if status.PodIP != status.PodIPs[0].IP {
  4465  				t.Fatalf("Expected PodIP %q to equal PodIPs[0].IP %q", status.PodIP, status.PodIPs[0].IP)
  4466  			}
  4467  		})
  4468  	}
  4469  }
  4470  
  4471  func TestSortPodIPs(t *testing.T) {
  4472  	testcases := []struct {
  4473  		name        string
  4474  		nodeIP      string
  4475  		podIPs      []string
  4476  		expectedIPs []string
  4477  	}{
  4478  		{
  4479  			name:        "Simple",
  4480  			nodeIP:      "",
  4481  			podIPs:      []string{"10.0.0.1"},
  4482  			expectedIPs: []string{"10.0.0.1"},
  4483  		},
  4484  		{
  4485  			name:        "Dual-stack",
  4486  			nodeIP:      "",
  4487  			podIPs:      []string{"10.0.0.1", "fd01::1234"},
  4488  			expectedIPs: []string{"10.0.0.1", "fd01::1234"},
  4489  		},
  4490  		{
  4491  			name:        "Dual-stack with explicit node IP",
  4492  			nodeIP:      "192.168.1.1",
  4493  			podIPs:      []string{"10.0.0.1", "fd01::1234"},
  4494  			expectedIPs: []string{"10.0.0.1", "fd01::1234"},
  4495  		},
  4496  		{
  4497  			name:        "Dual-stack with CRI returning wrong family first",
  4498  			nodeIP:      "",
  4499  			podIPs:      []string{"fd01::1234", "10.0.0.1"},
  4500  			expectedIPs: []string{"10.0.0.1", "fd01::1234"},
  4501  		},
  4502  		{
  4503  			name:        "Dual-stack with explicit node IP with CRI returning wrong family first",
  4504  			nodeIP:      "192.168.1.1",
  4505  			podIPs:      []string{"fd01::1234", "10.0.0.1"},
  4506  			expectedIPs: []string{"10.0.0.1", "fd01::1234"},
  4507  		},
  4508  		{
  4509  			name:        "Dual-stack with IPv6 node IP",
  4510  			nodeIP:      "fd00::5678",
  4511  			podIPs:      []string{"10.0.0.1", "fd01::1234"},
  4512  			expectedIPs: []string{"fd01::1234", "10.0.0.1"},
  4513  		},
  4514  		{
  4515  			name:        "Dual-stack with IPv6 node IP, other CRI order",
  4516  			nodeIP:      "fd00::5678",
  4517  			podIPs:      []string{"fd01::1234", "10.0.0.1"},
  4518  			expectedIPs: []string{"fd01::1234", "10.0.0.1"},
  4519  		},
  4520  		{
  4521  			name:        "No Pod IP matching Node IP",
  4522  			nodeIP:      "fd00::5678",
  4523  			podIPs:      []string{"10.0.0.1"},
  4524  			expectedIPs: []string{"10.0.0.1"},
  4525  		},
  4526  		{
  4527  			name:        "No Pod IP matching (unspecified) Node IP",
  4528  			nodeIP:      "",
  4529  			podIPs:      []string{"fd01::1234"},
  4530  			expectedIPs: []string{"fd01::1234"},
  4531  		},
  4532  		{
  4533  			name:        "Multiple IPv4 IPs",
  4534  			nodeIP:      "",
  4535  			podIPs:      []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"},
  4536  			expectedIPs: []string{"10.0.0.1"},
  4537  		},
  4538  		{
  4539  			name:        "Multiple Dual-Stack IPs",
  4540  			nodeIP:      "",
  4541  			podIPs:      []string{"10.0.0.1", "10.0.0.2", "fd01::1234", "10.0.0.3", "fd01::5678"},
  4542  			expectedIPs: []string{"10.0.0.1", "fd01::1234"},
  4543  		},
  4544  	}
  4545  
  4546  	for _, tc := range testcases {
  4547  		t.Run(tc.name, func(t *testing.T) {
  4548  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  4549  			defer testKubelet.Cleanup()
  4550  			kl := testKubelet.kubelet
  4551  			if tc.nodeIP != "" {
  4552  				kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
  4553  			}
  4554  
  4555  			podIPs := kl.sortPodIPs(tc.podIPs)
  4556  			if !reflect.DeepEqual(podIPs, tc.expectedIPs) {
  4557  				t.Fatalf("Expected PodIPs %#v, got %#v", tc.expectedIPs, podIPs)
  4558  			}
  4559  		})
  4560  	}
  4561  }
  4562  
  4563  // func init() {
  4564  // 	klog.InitFlags(flag.CommandLine)
  4565  // 	flag.CommandLine.Lookup("v").Value.Set("5")
  4566  // }
  4567  
  4568  func TestConvertToAPIContainerStatusesDataRace(t *testing.T) {
  4569  	pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
  4570  
  4571  	testTimestamp := time.Unix(123456789, 987654321)
  4572  
  4573  	criStatus := &kubecontainer.PodStatus{
  4574  		ID:        pod.UID,
  4575  		Name:      pod.Name,
  4576  		Namespace: pod.Namespace,
  4577  		ContainerStatuses: []*kubecontainer.Status{
  4578  			{Name: "containerA", CreatedAt: testTimestamp},
  4579  			{Name: "containerB", CreatedAt: testTimestamp.Add(1)},
  4580  		},
  4581  	}
  4582  
  4583  	testKubelet := newTestKubelet(t, false)
  4584  	defer testKubelet.Cleanup()
  4585  	kl := testKubelet.kubelet
  4586  
  4587  	// convertToAPIContainerStatuses is purely transformative and shouldn't alter the state of the kubelet
  4588  	// as there are no synchronisation events in that function (no locks, no channels, ...) each test routine
  4589  	// should have its own vector clock increased independently. Golang race detector uses pure happens-before
  4590  	// detection, so would catch a race condition consistently, despite only spawning 2 goroutines
  4591  	for i := 0; i < 2; i++ {
  4592  		go func() {
  4593  			kl.convertToAPIContainerStatuses(pod, criStatus, []v1.ContainerStatus{}, []v1.Container{}, false, false)
  4594  		}()
  4595  	}
  4596  }
  4597  
  4598  func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
  4599  	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
  4600  	nowTime := time.Now()
  4601  	testContainerName := "ctr0"
  4602  	testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
  4603  	testContainer := v1.Container{
  4604  		Name:  testContainerName,
  4605  		Image: "img",
  4606  	}
  4607  	testContainerStatus := v1.ContainerStatus{
  4608  		Name: testContainerName,
  4609  	}
  4610  	testPod := &v1.Pod{
  4611  		ObjectMeta: metav1.ObjectMeta{
  4612  			UID:       "123456",
  4613  			Name:      "foo",
  4614  			Namespace: "bar",
  4615  		},
  4616  		Spec: v1.PodSpec{
  4617  			Containers: []v1.Container{testContainer},
  4618  		},
  4619  		Status: v1.PodStatus{
  4620  			ContainerStatuses: []v1.ContainerStatus{testContainerStatus},
  4621  		},
  4622  	}
  4623  	testKubeContainerStatus := kubecontainer.Status{
  4624  		Name:      testContainerName,
  4625  		ID:        testContainerID,
  4626  		Image:     "img",
  4627  		ImageID:   "img1234",
  4628  		State:     kubecontainer.ContainerStateRunning,
  4629  		StartedAt: nowTime,
  4630  	}
  4631  	testPodStatus := &kubecontainer.PodStatus{
  4632  		ID:                testPod.UID,
  4633  		Name:              testPod.Name,
  4634  		Namespace:         testPod.Namespace,
  4635  		ContainerStatuses: []*kubecontainer.Status{&testKubeContainerStatus},
  4636  	}
  4637  	CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
  4638  	CPU2AndMem2G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi")}
  4639  	CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
  4640  	CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
  4641  	CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy()
  4642  	CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
  4643  
  4644  	testKubelet := newTestKubelet(t, false)
  4645  	defer testKubelet.Cleanup()
  4646  	kubelet := testKubelet.kubelet
  4647  	kubelet.statusManager = status.NewFakeManager()
  4648  
  4649  	idx := 0
  4650  	for tdesc, tc := range map[string]struct {
  4651  		Resources []v1.ResourceRequirements
  4652  		OldStatus []v1.ContainerStatus
  4653  		Expected  []v1.ContainerStatus
  4654  	}{
  4655  		"GuaranteedQoSPod with CPU and memory CRI status": {
  4656  			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
  4657  			OldStatus: []v1.ContainerStatus{
  4658  				{
  4659  					Name:      testContainerName,
  4660  					Image:     "img",
  4661  					ImageID:   "img1234",
  4662  					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
  4663  					Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
  4664  				},
  4665  			},
  4666  			Expected: []v1.ContainerStatus{
  4667  				{
  4668  					Name:               testContainerName,
  4669  					ContainerID:        testContainerID.String(),
  4670  					Image:              "img",
  4671  					ImageID:            "img1234",
  4672  					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
  4673  					AllocatedResources: CPU1AndMem1G,
  4674  					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
  4675  				},
  4676  			},
  4677  		},
  4678  		"BurstableQoSPod with CPU and memory CRI status": {
  4679  			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
  4680  			OldStatus: []v1.ContainerStatus{
  4681  				{
  4682  					Name:      testContainerName,
  4683  					Image:     "img",
  4684  					ImageID:   "img1234",
  4685  					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
  4686  					Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2G, Requests: CPU1AndMem1G},
  4687  				},
  4688  			},
  4689  			Expected: []v1.ContainerStatus{
  4690  				{
  4691  					Name:               testContainerName,
  4692  					ContainerID:        testContainerID.String(),
  4693  					Image:              "img",
  4694  					ImageID:            "img1234",
  4695  					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
  4696  					AllocatedResources: CPU1AndMem1G,
  4697  					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
  4698  				},
  4699  			},
  4700  		},
  4701  		"GuaranteedQoSPod with CPU and memory CRI status, with ephemeral storage": {
  4702  			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
  4703  			OldStatus: []v1.ContainerStatus{
  4704  				{
  4705  					Name:      testContainerName,
  4706  					Image:     "img",
  4707  					ImageID:   "img1234",
  4708  					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
  4709  					Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
  4710  				},
  4711  			},
  4712  			Expected: []v1.ContainerStatus{
  4713  				{
  4714  					Name:               testContainerName,
  4715  					ContainerID:        testContainerID.String(),
  4716  					Image:              "img",
  4717  					ImageID:            "img1234",
  4718  					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
  4719  					AllocatedResources: CPU1AndMem1GAndStorage2G,
  4720  					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
  4721  				},
  4722  			},
  4723  		},
  4724  		"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage": {
  4725  			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
  4726  			OldStatus: []v1.ContainerStatus{
  4727  				{
  4728  					Name:      testContainerName,
  4729  					Image:     "img",
  4730  					ImageID:   "img1234",
  4731  					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
  4732  					Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
  4733  				},
  4734  			},
  4735  			Expected: []v1.ContainerStatus{
  4736  				{
  4737  					Name:               testContainerName,
  4738  					ContainerID:        testContainerID.String(),
  4739  					Image:              "img",
  4740  					ImageID:            "img1234",
  4741  					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
  4742  					AllocatedResources: CPU1AndMem1GAndStorage2G,
  4743  					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
  4744  				},
  4745  			},
  4746  		},
  4747  		"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage, nil resources in OldStatus": {
  4748  			Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
  4749  			OldStatus: []v1.ContainerStatus{
  4750  				{
  4751  					Name:    testContainerName,
  4752  					Image:   "img",
  4753  					ImageID: "img1234",
  4754  					State:   v1.ContainerState{Running: &v1.ContainerStateRunning{}},
  4755  				},
  4756  			},
  4757  			Expected: []v1.ContainerStatus{
  4758  				{
  4759  					Name:               testContainerName,
  4760  					ContainerID:        testContainerID.String(),
  4761  					Image:              "img",
  4762  					ImageID:            "img1234",
  4763  					State:              v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
  4764  					AllocatedResources: CPU1AndMem1GAndStorage2G,
  4765  					Resources:          &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
  4766  				},
  4767  			},
  4768  		},
  4769  		"BestEffortQoSPod": {
  4770  			OldStatus: []v1.ContainerStatus{
  4771  				{
  4772  					Name:      testContainerName,
  4773  					Image:     "img",
  4774  					ImageID:   "img1234",
  4775  					State:     v1.ContainerState{Running: &v1.ContainerStateRunning{}},
  4776  					Resources: &v1.ResourceRequirements{},
  4777  				},
  4778  			},
  4779  			Expected: []v1.ContainerStatus{
  4780  				{
  4781  					Name:        testContainerName,
  4782  					ContainerID: testContainerID.String(),
  4783  					Image:       "img",
  4784  					ImageID:     "img1234",
  4785  					State:       v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
  4786  					Resources:   &v1.ResourceRequirements{},
  4787  				},
  4788  			},
  4789  		},
  4790  	} {
  4791  		tPod := testPod.DeepCopy()
  4792  		tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
  4793  		for i := range tPod.Spec.Containers {
  4794  			if tc.Resources != nil {
  4795  				tPod.Spec.Containers[i].Resources = tc.Resources[i]
  4796  			}
  4797  			kubelet.statusManager.SetPodAllocation(tPod)
  4798  			if tc.Resources != nil {
  4799  				tPod.Status.ContainerStatuses[i].AllocatedResources = tc.Resources[i].Requests
  4800  				testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
  4801  					MemoryLimit: tc.Resources[i].Limits.Memory(),
  4802  					CPULimit:    tc.Resources[i].Limits.Cpu(),
  4803  					CPURequest:  tc.Resources[i].Requests.Cpu(),
  4804  				}
  4805  			}
  4806  		}
  4807  
  4808  		t.Logf("TestCase: %q", tdesc)
  4809  		cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
  4810  		assert.Equal(t, tc.Expected, cStatuses)
  4811  	}
  4812  }
  4813  
  4814  func TestKubelet_HandlePodCleanups(t *testing.T) {
  4815  	one := int64(1)
  4816  	two := int64(2)
  4817  	deleted := metav1.NewTime(time.Unix(2, 0).UTC())
  4818  	type rejectedPod struct {
  4819  		uid     types.UID
  4820  		reason  string
  4821  		message string
  4822  	}
  4823  	simplePod := func() *v1.Pod {
  4824  		return &v1.Pod{
  4825  			ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1", UID: types.UID("1")},
  4826  			Spec: v1.PodSpec{
  4827  				Containers: []v1.Container{
  4828  					{Name: "container-1"},
  4829  				},
  4830  			},
  4831  		}
  4832  	}
  4833  	withPhase := func(pod *v1.Pod, phase v1.PodPhase) *v1.Pod {
  4834  		pod.Status.Phase = phase
  4835  		return pod
  4836  	}
  4837  	staticPod := func() *v1.Pod {
  4838  		return &v1.Pod{
  4839  			ObjectMeta: metav1.ObjectMeta{
  4840  				Name:      "pod1",
  4841  				Namespace: "ns1",
  4842  				UID:       types.UID("1"),
  4843  				Annotations: map[string]string{
  4844  					kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
  4845  				},
  4846  			},
  4847  			Spec: v1.PodSpec{
  4848  				Containers: []v1.Container{
  4849  					{Name: "container-1"},
  4850  				},
  4851  			},
  4852  		}
  4853  	}
  4854  	runtimePod := func(pod *v1.Pod) *kubecontainer.Pod {
  4855  		runningPod := &kubecontainer.Pod{
  4856  			ID:        types.UID(pod.UID),
  4857  			Name:      pod.Name,
  4858  			Namespace: pod.Namespace,
  4859  			Containers: []*kubecontainer.Container{
  4860  				{Name: "container-1", ID: kubecontainer.ContainerID{Type: "test", ID: "c1"}},
  4861  			},
  4862  		}
  4863  		for i, container := range pod.Spec.Containers {
  4864  			runningPod.Containers = append(runningPod.Containers, &kubecontainer.Container{
  4865  				Name: container.Name,
  4866  				ID:   kubecontainer.ContainerID{Type: "test", ID: fmt.Sprintf("c%d", i)},
  4867  			})
  4868  		}
  4869  		return runningPod
  4870  	}
  4871  	mirrorPod := func(pod *v1.Pod, nodeName string, nodeUID types.UID) *v1.Pod {
  4872  		copied := pod.DeepCopy()
  4873  		if copied.Annotations == nil {
  4874  			copied.Annotations = make(map[string]string)
  4875  		}
  4876  		copied.Annotations[kubetypes.ConfigMirrorAnnotationKey] = pod.Annotations[kubetypes.ConfigHashAnnotationKey]
  4877  		isTrue := true
  4878  		copied.OwnerReferences = append(copied.OwnerReferences, metav1.OwnerReference{
  4879  			APIVersion: v1.SchemeGroupVersion.String(),
  4880  			Kind:       "Node",
  4881  			Name:       nodeName,
  4882  			UID:        nodeUID,
  4883  			Controller: &isTrue,
  4884  		})
  4885  		return copied
  4886  	}
  4887  
  4888  	tests := []struct {
  4889  		name                    string
  4890  		pods                    []*v1.Pod
  4891  		runtimePods             []*containertest.FakePod
  4892  		rejectedPods            []rejectedPod
  4893  		terminatingErr          error
  4894  		prepareWorker           func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
  4895  		wantWorker              func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
  4896  		wantWorkerAfterRetry    func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
  4897  		wantErr                 bool
  4898  		expectMetrics           map[string]string
  4899  		expectMetricsAfterRetry map[string]string
  4900  	}{
  4901  		{
  4902  			name:    "missing pod is requested for termination with short grace period",
  4903  			wantErr: false,
  4904  			runtimePods: []*containertest.FakePod{
  4905  				{
  4906  					Pod: runtimePod(staticPod()),
  4907  				},
  4908  			},
  4909  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  4910  				drainAllWorkers(w)
  4911  				uid := types.UID("1")
  4912  				// we expect runtime pods to be cleared from the status history as soon as they
  4913  				// reach completion
  4914  				if len(w.podSyncStatuses) != 0 {
  4915  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  4916  				}
  4917  				r, ok := records[uid]
  4918  				if !ok || len(r) != 1 || r[0].updateType != kubetypes.SyncPodKill || r[0].terminated || r[0].runningPod == nil || r[0].gracePeriod != nil {
  4919  					t.Fatalf("unexpected pod sync records: %#v", r)
  4920  				}
  4921  			},
  4922  			expectMetrics: map[string]string{
  4923  				metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
  4924  				# TYPE kubelet_orphaned_runtime_pods_total counter
  4925  				kubelet_orphaned_runtime_pods_total 1
  4926  				`,
  4927  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  4928  				# TYPE kubelet_working_pods gauge
  4929  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
  4930  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
  4931  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  4932  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  4933  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  4934  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
  4935  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  4936  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  4937  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  4938  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  4939  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
  4940  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  4941  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  4942  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  4943  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 1
  4944  				`,
  4945  			},
  4946  		},
  4947  		{
  4948  			name:    "terminating pod that errored and is not in config is notified by the cleanup",
  4949  			wantErr: false,
  4950  			runtimePods: []*containertest.FakePod{
  4951  				{
  4952  					Pod: runtimePod(simplePod()),
  4953  				},
  4954  			},
  4955  			terminatingErr: errors.New("unable to terminate"),
  4956  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  4957  				// send a create
  4958  				pod := simplePod()
  4959  				w.UpdatePod(UpdatePodOptions{
  4960  					UpdateType: kubetypes.SyncPodCreate,
  4961  					StartTime:  time.Unix(1, 0).UTC(),
  4962  					Pod:        pod,
  4963  				})
  4964  				drainAllWorkers(w)
  4965  
  4966  				// send a delete update
  4967  				two := int64(2)
  4968  				deleted := metav1.NewTime(time.Unix(2, 0).UTC())
  4969  				updatedPod := &v1.Pod{
  4970  					ObjectMeta: metav1.ObjectMeta{
  4971  						Name:                       "pod1",
  4972  						Namespace:                  "ns1",
  4973  						UID:                        types.UID("1"),
  4974  						DeletionGracePeriodSeconds: &two,
  4975  						DeletionTimestamp:          &deleted,
  4976  					},
  4977  					Spec: v1.PodSpec{
  4978  						TerminationGracePeriodSeconds: &two,
  4979  						Containers: []v1.Container{
  4980  							{Name: "container-1"},
  4981  						},
  4982  					},
  4983  				}
  4984  				w.UpdatePod(UpdatePodOptions{
  4985  					UpdateType: kubetypes.SyncPodKill,
  4986  					StartTime:  time.Unix(3, 0).UTC(),
  4987  					Pod:        updatedPod,
  4988  				})
  4989  				drainAllWorkers(w)
  4990  				r, ok := records[updatedPod.UID]
  4991  				if !ok || len(r) != 2 || r[1].gracePeriod == nil || *r[1].gracePeriod != 2 {
  4992  					t.Fatalf("unexpected records: %#v", records)
  4993  				}
  4994  				// pod worker thinks pod1 exists, but the kubelet will not have it in the pod manager
  4995  			},
  4996  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  4997  				uid := types.UID("1")
  4998  				if len(w.podSyncStatuses) != 1 {
  4999  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5000  				}
  5001  				s, ok := w.podSyncStatuses[uid]
  5002  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5003  					t.Fatalf("unexpected requested pod termination: %#v", s)
  5004  				}
  5005  				// expect we get a pod sync record for kill that should have the same grace period as before (2), but no
  5006  				// running pod because the SyncKnownPods method killed it
  5007  				if actual, expected := records[uid], []syncPodRecord{
  5008  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5009  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
  5010  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
  5011  				}; !reflect.DeepEqual(expected, actual) {
  5012  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5013  				}
  5014  			},
  5015  			expectMetrics: map[string]string{
  5016  				metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
  5017  				# TYPE kubelet_desired_pods gauge
  5018  				kubelet_desired_pods{static=""} 0
  5019  				kubelet_desired_pods{static="true"} 0
  5020  				`,
  5021  				metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
  5022  				# TYPE kubelet_active_pods gauge
  5023  				kubelet_active_pods{static=""} 0
  5024  				kubelet_active_pods{static="true"} 0
  5025  				`,
  5026  				metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
  5027  				# TYPE kubelet_orphaned_runtime_pods_total counter
  5028  				kubelet_orphaned_runtime_pods_total 0
  5029  				`,
  5030  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  5031  				# TYPE kubelet_working_pods gauge
  5032  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
  5033  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
  5034  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  5035  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  5036  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  5037  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
  5038  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  5039  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  5040  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  5041  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  5042  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 1
  5043  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  5044  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  5045  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  5046  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
  5047  				`,
  5048  			},
  5049  			wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5050  				uid := types.UID("1")
  5051  				if len(w.podSyncStatuses) != 1 {
  5052  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5053  				}
  5054  				s, ok := w.podSyncStatuses[uid]
  5055  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || !s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5056  					t.Fatalf("unexpected requested pod termination: %#v", s)
  5057  				}
  5058  				// expect we get a pod sync record for kill that should have the same grace period as before (2), but no
  5059  				// running pod because the SyncKnownPods method killed it
  5060  				if actual, expected := records[uid], []syncPodRecord{
  5061  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5062  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
  5063  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
  5064  					// after the second attempt
  5065  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
  5066  					// from termination
  5067  					{name: "pod1", terminated: true},
  5068  				}; !reflect.DeepEqual(expected, actual) {
  5069  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5070  				}
  5071  			},
  5072  		},
  5073  		{
  5074  			name:    "terminating pod that errored and is not in config or worker is force killed by the cleanup",
  5075  			wantErr: false,
  5076  			runtimePods: []*containertest.FakePod{
  5077  				{
  5078  					Pod: runtimePod(simplePod()),
  5079  				},
  5080  			},
  5081  			terminatingErr: errors.New("unable to terminate"),
  5082  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5083  				uid := types.UID("1")
  5084  				if len(w.podSyncStatuses) != 1 {
  5085  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5086  				}
  5087  				s, ok := w.podSyncStatuses[uid]
  5088  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5089  					t.Fatalf("unexpected requested pod termination: %#v", s)
  5090  				}
  5091  
  5092  				// ensure that we recorded the appropriate state for replays
  5093  				expectedRunningPod := runtimePod(simplePod())
  5094  				if actual, expected := s.activeUpdate, (&UpdatePodOptions{
  5095  					RunningPod:     expectedRunningPod,
  5096  					KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: &one},
  5097  				}); !reflect.DeepEqual(expected, actual) {
  5098  					t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
  5099  				}
  5100  
  5101  				// expect that a pod the pod worker does not recognize is force killed with grace period 1
  5102  				if actual, expected := records[uid], []syncPodRecord{
  5103  					{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
  5104  				}; !reflect.DeepEqual(expected, actual) {
  5105  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5106  				}
  5107  			},
  5108  			wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5109  				uid := types.UID("1")
  5110  				if len(w.podSyncStatuses) != 0 {
  5111  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5112  				}
  5113  
  5114  				// expect that a pod the pod worker does not recognize is force killed with grace period 1
  5115  				expectedRunningPod := runtimePod(simplePod())
  5116  				if actual, expected := records[uid], []syncPodRecord{
  5117  					// first attempt, did not succeed
  5118  					{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
  5119  					// second attempt, should succeed
  5120  					{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
  5121  					// because this is a runtime pod, we don't have enough info to invoke syncTerminatedPod and so
  5122  					// we exit after the retry succeeds
  5123  				}; !reflect.DeepEqual(expected, actual) {
  5124  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5125  				}
  5126  			},
  5127  		},
  5128  		{
  5129  			name:    "pod is added to worker by sync method",
  5130  			wantErr: false,
  5131  			pods: []*v1.Pod{
  5132  				simplePod(),
  5133  			},
  5134  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5135  				uid := types.UID("1")
  5136  				if len(w.podSyncStatuses) != 1 {
  5137  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5138  				}
  5139  				s, ok := w.podSyncStatuses[uid]
  5140  				if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() {
  5141  					t.Fatalf("unexpected requested pod termination: %#v", s)
  5142  				}
  5143  
  5144  				// pod was synced once
  5145  				if actual, expected := records[uid], []syncPodRecord{
  5146  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5147  				}; !reflect.DeepEqual(expected, actual) {
  5148  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5149  				}
  5150  			},
  5151  			expectMetrics: map[string]string{
  5152  				metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
  5153  				# TYPE kubelet_desired_pods gauge
  5154  				kubelet_desired_pods{static=""} 1
  5155  				kubelet_desired_pods{static="true"} 0
  5156  				`,
  5157  				metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
  5158  				# TYPE kubelet_active_pods gauge
  5159  				kubelet_active_pods{static=""} 1
  5160  				kubelet_active_pods{static="true"} 0
  5161  				`,
  5162  				metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
  5163  				# TYPE kubelet_orphaned_runtime_pods_total counter
  5164  				kubelet_orphaned_runtime_pods_total 0
  5165  				`,
  5166  				// Note that this test simulates a net-new pod being discovered during HandlePodCleanups that was not
  5167  				// delivered to the pod worker via HandlePodAdditions - there is no *known* scenario that can happen, but
  5168  				// we want to capture it in the metric. The more likely scenario is that a static pod with a predefined
  5169  				// UID is updated, which causes pod config to deliver DELETE -> ADD while the old pod is still shutting
  5170  				// down and the pod worker to ignore the ADD. The HandlePodCleanups method then is responsible for syncing
  5171  				// that pod to the pod worker so that it restarts.
  5172  				metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
  5173  				# TYPE kubelet_restarted_pods_total counter
  5174  				kubelet_restarted_pods_total{static=""} 1
  5175  				kubelet_restarted_pods_total{static="true"} 0
  5176  				`,
  5177  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  5178  				# TYPE kubelet_working_pods gauge
  5179  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 1
  5180  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
  5181  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  5182  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  5183  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  5184  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
  5185  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  5186  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  5187  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  5188  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  5189  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
  5190  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  5191  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  5192  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  5193  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
  5194  				`,
  5195  			},
  5196  		},
  5197  		{
  5198  			name:    "pod is not added to worker by sync method because it is in a terminal phase",
  5199  			wantErr: false,
  5200  			pods: []*v1.Pod{
  5201  				withPhase(simplePod(), v1.PodFailed),
  5202  			},
  5203  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5204  				uid := types.UID("1")
  5205  				if len(w.podSyncStatuses) != 0 {
  5206  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5207  				}
  5208  				// no pod sync record was delivered
  5209  				if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
  5210  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5211  				}
  5212  			},
  5213  			expectMetrics: map[string]string{
  5214  				metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
  5215  				# TYPE kubelet_desired_pods gauge
  5216  				kubelet_desired_pods{static=""} 1
  5217  				kubelet_desired_pods{static="true"} 0
  5218  				`,
  5219  				metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
  5220  				# TYPE kubelet_active_pods gauge
  5221  				kubelet_active_pods{static=""} 0
  5222  				kubelet_active_pods{static="true"} 0
  5223  				`,
  5224  				metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
  5225  				# TYPE kubelet_orphaned_runtime_pods_total counter
  5226  				kubelet_orphaned_runtime_pods_total 0
  5227  				`,
  5228  				// Note that this test simulates a net-new pod being discovered during HandlePodCleanups that was not
  5229  				// delivered to the pod worker via HandlePodAdditions - there is no *known* scenario that can happen, but
  5230  				// we want to capture it in the metric. The more likely scenario is that a static pod with a predefined
  5231  				// UID is updated, which causes pod config to deliver DELETE -> ADD while the old pod is still shutting
  5232  				// down and the pod worker to ignore the ADD. The HandlePodCleanups method then is responsible for syncing
  5233  				// that pod to the pod worker so that it restarts.
  5234  				metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
  5235  				# TYPE kubelet_restarted_pods_total counter
  5236  				kubelet_restarted_pods_total{static=""} 0
  5237  				kubelet_restarted_pods_total{static="true"} 0
  5238  				`,
  5239  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  5240  				# TYPE kubelet_working_pods gauge
  5241  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
  5242  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
  5243  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  5244  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  5245  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  5246  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
  5247  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  5248  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  5249  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  5250  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  5251  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
  5252  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  5253  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  5254  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  5255  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
  5256  				`,
  5257  			},
  5258  		},
  5259  		{
  5260  			name:    "pod is not added to worker by sync method because it has been rejected",
  5261  			wantErr: false,
  5262  			pods: []*v1.Pod{
  5263  				simplePod(),
  5264  			},
  5265  			rejectedPods: []rejectedPod{
  5266  				{uid: "1", reason: "Test", message: "rejected"},
  5267  			},
  5268  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5269  				uid := types.UID("1")
  5270  				if len(w.podSyncStatuses) != 0 {
  5271  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5272  				}
  5273  				// no pod sync record was delivered
  5274  				if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
  5275  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5276  				}
  5277  			},
  5278  			expectMetrics: map[string]string{
  5279  				metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
  5280  				# TYPE kubelet_desired_pods gauge
  5281  				kubelet_desired_pods{static=""} 1
  5282  				kubelet_desired_pods{static="true"} 0
  5283  				`,
  5284  				metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
  5285  				# TYPE kubelet_active_pods gauge
  5286  				kubelet_active_pods{static=""} 0
  5287  				kubelet_active_pods{static="true"} 0
  5288  				`,
  5289  				metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
  5290  				# TYPE kubelet_orphaned_runtime_pods_total counter
  5291  				kubelet_orphaned_runtime_pods_total 0
  5292  				`,
  5293  				// Note that this test simulates a net-new pod being discovered during HandlePodCleanups that was not
  5294  				// delivered to the pod worker via HandlePodAdditions - there is no *known* scenario that can happen, but
  5295  				// we want to capture it in the metric. The more likely scenario is that a static pod with a predefined
  5296  				// UID is updated, which causes pod config to deliver DELETE -> ADD while the old pod is still shutting
  5297  				// down and the pod worker to ignore the ADD. The HandlePodCleanups method then is responsible for syncing
  5298  				// that pod to the pod worker so that it restarts.
  5299  				metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
  5300  				# TYPE kubelet_restarted_pods_total counter
  5301  				kubelet_restarted_pods_total{static=""} 0
  5302  				kubelet_restarted_pods_total{static="true"} 0
  5303  				`,
  5304  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  5305  				# TYPE kubelet_working_pods gauge
  5306  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
  5307  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
  5308  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  5309  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  5310  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  5311  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
  5312  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  5313  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  5314  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  5315  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  5316  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
  5317  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  5318  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  5319  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  5320  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
  5321  				`,
  5322  			},
  5323  		},
  5324  		{
  5325  			name:    "terminating pod that is known to the config gets no update during pod cleanup",
  5326  			wantErr: false,
  5327  			pods: []*v1.Pod{
  5328  				{
  5329  					ObjectMeta: metav1.ObjectMeta{
  5330  						Name:                       "pod1",
  5331  						Namespace:                  "ns1",
  5332  						UID:                        types.UID("1"),
  5333  						DeletionGracePeriodSeconds: &two,
  5334  						DeletionTimestamp:          &deleted,
  5335  					},
  5336  					Spec: v1.PodSpec{
  5337  						TerminationGracePeriodSeconds: &two,
  5338  						Containers: []v1.Container{
  5339  							{Name: "container-1"},
  5340  						},
  5341  					},
  5342  				},
  5343  			},
  5344  			runtimePods: []*containertest.FakePod{
  5345  				{
  5346  					Pod: runtimePod(simplePod()),
  5347  				},
  5348  			},
  5349  			terminatingErr: errors.New("unable to terminate"),
  5350  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5351  				// send a create
  5352  				pod := &v1.Pod{
  5353  					ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1", UID: types.UID("1")},
  5354  					Spec: v1.PodSpec{
  5355  						Containers: []v1.Container{
  5356  							{Name: "container-1"},
  5357  						},
  5358  					},
  5359  				}
  5360  				w.UpdatePod(UpdatePodOptions{
  5361  					UpdateType: kubetypes.SyncPodCreate,
  5362  					StartTime:  time.Unix(1, 0).UTC(),
  5363  					Pod:        pod,
  5364  				})
  5365  				drainAllWorkers(w)
  5366  
  5367  				// send a delete update
  5368  				updatedPod := &v1.Pod{
  5369  					ObjectMeta: metav1.ObjectMeta{
  5370  						Name:                       "pod1",
  5371  						Namespace:                  "ns1",
  5372  						UID:                        types.UID("1"),
  5373  						DeletionGracePeriodSeconds: &two,
  5374  						DeletionTimestamp:          &deleted,
  5375  					},
  5376  					Spec: v1.PodSpec{
  5377  						TerminationGracePeriodSeconds: &two,
  5378  						Containers: []v1.Container{
  5379  							{Name: "container-1"},
  5380  						},
  5381  					},
  5382  				}
  5383  				w.UpdatePod(UpdatePodOptions{
  5384  					UpdateType: kubetypes.SyncPodKill,
  5385  					StartTime:  time.Unix(3, 0).UTC(),
  5386  					Pod:        updatedPod,
  5387  				})
  5388  				drainAllWorkers(w)
  5389  
  5390  				// pod worker thinks pod1 is terminated and pod1 visible to config
  5391  				if actual, expected := records[updatedPod.UID], []syncPodRecord{
  5392  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5393  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
  5394  				}; !reflect.DeepEqual(expected, actual) {
  5395  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5396  				}
  5397  			},
  5398  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5399  				uid := types.UID("1")
  5400  				if len(w.podSyncStatuses) != 1 {
  5401  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5402  				}
  5403  				s, ok := w.podSyncStatuses[uid]
  5404  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5405  					t.Fatalf("unexpected requested pod termination: %#v", s)
  5406  				}
  5407  
  5408  				// no pod sync record was delivered
  5409  				if actual, expected := records[uid], []syncPodRecord{
  5410  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5411  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
  5412  				}; !reflect.DeepEqual(expected, actual) {
  5413  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5414  				}
  5415  			},
  5416  		},
  5417  		{
  5418  			name:    "pod that could not start and is not in config is force terminated during pod cleanup",
  5419  			wantErr: false,
  5420  			runtimePods: []*containertest.FakePod{
  5421  				{
  5422  					Pod: runtimePod(simplePod()),
  5423  				},
  5424  			},
  5425  			terminatingErr: errors.New("unable to terminate"),
  5426  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5427  				// send a create of a static pod
  5428  				pod := staticPod()
  5429  				// block startup of the static pod due to full name collision
  5430  				w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
  5431  
  5432  				w.UpdatePod(UpdatePodOptions{
  5433  					UpdateType: kubetypes.SyncPodCreate,
  5434  					StartTime:  time.Unix(1, 0).UTC(),
  5435  					Pod:        pod,
  5436  				})
  5437  				drainAllWorkers(w)
  5438  
  5439  				if _, ok := records[pod.UID]; ok {
  5440  					t.Fatalf("unexpected records: %#v", records)
  5441  				}
  5442  				// pod worker is unaware of pod1 yet, and the kubelet will not have it in the pod manager
  5443  			},
  5444  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5445  				// the pod is not started and is cleaned, but the runtime state causes us to reenter
  5446  				// and perform a direct termination (we never observed the pod as being started by
  5447  				// us, and so it is safe to completely tear down)
  5448  				uid := types.UID("1")
  5449  				if len(w.podSyncStatuses) != 1 {
  5450  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5451  				}
  5452  
  5453  				s, ok := w.podSyncStatuses[uid]
  5454  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5455  					t.Errorf("unexpected requested pod termination: %#v", s)
  5456  				}
  5457  
  5458  				// ensure that we recorded the appropriate state for replays
  5459  				expectedRunningPod := runtimePod(simplePod())
  5460  				if actual, expected := s.activeUpdate, (&UpdatePodOptions{
  5461  					RunningPod:     expectedRunningPod,
  5462  					KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: &one},
  5463  				}); !reflect.DeepEqual(expected, actual) {
  5464  					t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
  5465  				}
  5466  
  5467  				// sync is never invoked
  5468  				if actual, expected := records[uid], []syncPodRecord{
  5469  					{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
  5470  					// this pod is detected as an orphaned running pod and will exit
  5471  				}; !reflect.DeepEqual(expected, actual) {
  5472  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5473  				}
  5474  			},
  5475  			wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5476  				uid := types.UID("1")
  5477  				if len(w.podSyncStatuses) != 0 {
  5478  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5479  				}
  5480  
  5481  				// expect we get a pod sync record for kill that should have the default grace period
  5482  				expectedRunningPod := runtimePod(simplePod())
  5483  				if actual, expected := records[uid], []syncPodRecord{
  5484  					// first attempt, syncTerminatingPod failed with an error
  5485  					{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
  5486  					// second attempt
  5487  					{name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
  5488  				}; !reflect.DeepEqual(expected, actual) {
  5489  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5490  				}
  5491  			},
  5492  		},
  5493  		{
  5494  			name:    "pod that could not start still has a pending update and is tracked in metrics",
  5495  			wantErr: false,
  5496  			pods: []*v1.Pod{
  5497  				staticPod(),
  5498  			},
  5499  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5500  				// send a create of a static pod
  5501  				pod := staticPod()
  5502  				// block startup of the static pod due to full name collision
  5503  				w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
  5504  
  5505  				w.UpdatePod(UpdatePodOptions{
  5506  					UpdateType: kubetypes.SyncPodCreate,
  5507  					StartTime:  time.Unix(1, 0).UTC(),
  5508  					Pod:        pod,
  5509  				})
  5510  				drainAllWorkers(w)
  5511  
  5512  				if _, ok := records[pod.UID]; ok {
  5513  					t.Fatalf("unexpected records: %#v", records)
  5514  				}
  5515  				// pod worker is unaware of pod1 yet
  5516  			},
  5517  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5518  				uid := types.UID("1")
  5519  				if len(w.podSyncStatuses) != 1 {
  5520  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5521  				}
  5522  				s, ok := w.podSyncStatuses[uid]
  5523  				if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() || s.restartRequested || s.activeUpdate != nil || s.pendingUpdate == nil {
  5524  					t.Errorf("unexpected requested pod termination: %#v", s)
  5525  				}
  5526  
  5527  				// expect that no sync calls are made, since the pod doesn't ever start
  5528  				if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
  5529  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5530  				}
  5531  			},
  5532  			expectMetrics: map[string]string{
  5533  				metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
  5534  				# TYPE kubelet_desired_pods gauge
  5535  				kubelet_desired_pods{static=""} 0
  5536  				kubelet_desired_pods{static="true"} 1
  5537  				`,
  5538  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  5539  				# TYPE kubelet_working_pods gauge
  5540  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
  5541  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 1
  5542  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  5543  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  5544  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  5545  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
  5546  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  5547  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  5548  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  5549  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  5550  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
  5551  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  5552  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  5553  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  5554  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
  5555  				`,
  5556  			},
  5557  		},
  5558  		{
  5559  			name:           "pod that could not start and is not in config is force terminated without runtime during pod cleanup",
  5560  			wantErr:        false,
  5561  			terminatingErr: errors.New("unable to terminate"),
  5562  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5563  				// send a create of a static pod
  5564  				pod := staticPod()
  5565  				// block startup of the static pod due to full name collision
  5566  				w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
  5567  
  5568  				w.UpdatePod(UpdatePodOptions{
  5569  					UpdateType: kubetypes.SyncPodCreate,
  5570  					StartTime:  time.Unix(1, 0).UTC(),
  5571  					Pod:        pod,
  5572  				})
  5573  				drainAllWorkers(w)
  5574  
  5575  				if _, ok := records[pod.UID]; ok {
  5576  					t.Fatalf("unexpected records: %#v", records)
  5577  				}
  5578  				// pod worker is unaware of pod1 yet, and the kubelet will not have it in the pod manager
  5579  			},
  5580  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5581  				uid := types.UID("1")
  5582  				if len(w.podSyncStatuses) != 0 {
  5583  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5584  				}
  5585  
  5586  				// expect that no sync calls are made, since the pod doesn't ever start
  5587  				if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
  5588  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5589  				}
  5590  			},
  5591  		},
  5592  		{
  5593  			name:    "pod that is terminating is recreated by config with the same UID",
  5594  			wantErr: false,
  5595  			pods: []*v1.Pod{
  5596  				func() *v1.Pod {
  5597  					pod := staticPod()
  5598  					pod.Annotations["version"] = "2"
  5599  					return pod
  5600  				}(),
  5601  			},
  5602  
  5603  			runtimePods: []*containertest.FakePod{
  5604  				{
  5605  					Pod: runtimePod(staticPod()),
  5606  				},
  5607  			},
  5608  			terminatingErr: errors.New("unable to terminate"),
  5609  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5610  				// send a create of a static pod
  5611  				pod := staticPod()
  5612  
  5613  				w.UpdatePod(UpdatePodOptions{
  5614  					UpdateType: kubetypes.SyncPodCreate,
  5615  					StartTime:  time.Unix(1, 0).UTC(),
  5616  					Pod:        pod,
  5617  				})
  5618  				drainAllWorkers(w)
  5619  
  5620  				// terminate the pod (which won't complete) and then deliver a recreate by that same UID
  5621  				w.UpdatePod(UpdatePodOptions{
  5622  					UpdateType: kubetypes.SyncPodKill,
  5623  					StartTime:  time.Unix(2, 0).UTC(),
  5624  					Pod:        pod,
  5625  				})
  5626  				pod = staticPod()
  5627  				pod.Annotations["version"] = "2"
  5628  				w.UpdatePod(UpdatePodOptions{
  5629  					UpdateType: kubetypes.SyncPodCreate,
  5630  					StartTime:  time.Unix(3, 0).UTC(),
  5631  					Pod:        pod,
  5632  				})
  5633  				drainAllWorkers(w)
  5634  
  5635  				// expect we get a pod sync record for kill that should have the default grace period
  5636  				if actual, expected := records[pod.UID], []syncPodRecord{
  5637  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5638  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
  5639  				}; !reflect.DeepEqual(expected, actual) {
  5640  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5641  				}
  5642  				// pod worker is aware of pod1, but the kubelet will not have it in the pod manager
  5643  			},
  5644  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5645  				uid := types.UID("1")
  5646  				if len(w.podSyncStatuses) != 1 {
  5647  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5648  				}
  5649  				s, ok := w.podSyncStatuses[uid]
  5650  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() || !s.restartRequested {
  5651  					t.Errorf("unexpected requested pod termination: %#v", s)
  5652  				}
  5653  
  5654  				// expect we get a pod sync record for kill that should have the default grace period
  5655  				if actual, expected := records[uid], []syncPodRecord{
  5656  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5657  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
  5658  				}; !reflect.DeepEqual(expected, actual) {
  5659  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5660  				}
  5661  			},
  5662  			expectMetrics: map[string]string{
  5663  				metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
  5664  				# TYPE kubelet_desired_pods gauge
  5665  				kubelet_desired_pods{static=""} 0
  5666  				kubelet_desired_pods{static="true"} 1
  5667  				`,
  5668  				metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
  5669  				# TYPE kubelet_active_pods gauge
  5670  				kubelet_active_pods{static=""} 0
  5671  				kubelet_active_pods{static="true"} 1
  5672  				`,
  5673  				metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
  5674  				# TYPE kubelet_orphaned_runtime_pods_total counter
  5675  				kubelet_orphaned_runtime_pods_total 0
  5676  				`,
  5677  				metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
  5678  				# TYPE kubelet_restarted_pods_total counter
  5679  				kubelet_restarted_pods_total{static=""} 0
  5680  				kubelet_restarted_pods_total{static="true"} 0
  5681  				`,
  5682  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  5683  				# TYPE kubelet_working_pods gauge
  5684  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
  5685  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
  5686  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  5687  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  5688  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  5689  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 1
  5690  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  5691  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  5692  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  5693  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  5694  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
  5695  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  5696  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  5697  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  5698  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
  5699  				`,
  5700  			},
  5701  			expectMetricsAfterRetry: map[string]string{
  5702  				metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
  5703  				# TYPE kubelet_restarted_pods_total counter
  5704  				kubelet_restarted_pods_total{static=""} 0
  5705  				kubelet_restarted_pods_total{static="true"} 1
  5706  				`,
  5707  			},
  5708  		},
  5709  		{
  5710  			name:    "started pod that is not in config is force terminated during pod cleanup",
  5711  			wantErr: false,
  5712  			runtimePods: []*containertest.FakePod{
  5713  				{
  5714  					Pod: runtimePod(simplePod()),
  5715  				},
  5716  			},
  5717  			terminatingErr: errors.New("unable to terminate"),
  5718  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5719  				// send a create of a static pod
  5720  				pod := staticPod()
  5721  
  5722  				w.UpdatePod(UpdatePodOptions{
  5723  					UpdateType: kubetypes.SyncPodCreate,
  5724  					StartTime:  time.Unix(1, 0).UTC(),
  5725  					Pod:        pod,
  5726  				})
  5727  				drainAllWorkers(w)
  5728  
  5729  				// expect we get a pod sync record for kill that should have the default grace period
  5730  				if actual, expected := records[pod.UID], []syncPodRecord{
  5731  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5732  				}; !reflect.DeepEqual(expected, actual) {
  5733  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5734  				}
  5735  				// pod worker is aware of pod1, but the kubelet will not have it in the pod manager
  5736  			},
  5737  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5738  				uid := types.UID("1")
  5739  				if len(w.podSyncStatuses) != 1 {
  5740  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5741  				}
  5742  				s, ok := w.podSyncStatuses[uid]
  5743  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5744  					t.Errorf("unexpected requested pod termination: %#v", s)
  5745  				}
  5746  
  5747  				// expect we get a pod sync record for kill that should have the default grace period
  5748  				if actual, expected := records[uid], []syncPodRecord{
  5749  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5750  					{name: "pod1", updateType: kubetypes.SyncPodKill},
  5751  				}; !reflect.DeepEqual(expected, actual) {
  5752  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5753  				}
  5754  			},
  5755  		},
  5756  		{
  5757  			name:           "started pod that is not in config or runtime is force terminated during pod cleanup",
  5758  			wantErr:        false,
  5759  			runtimePods:    []*containertest.FakePod{},
  5760  			terminatingErr: errors.New("unable to terminate"),
  5761  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5762  				// send a create of a static pod
  5763  				pod := staticPod()
  5764  
  5765  				w.UpdatePod(UpdatePodOptions{
  5766  					UpdateType: kubetypes.SyncPodCreate,
  5767  					StartTime:  time.Unix(1, 0).UTC(),
  5768  					Pod:        pod,
  5769  					MirrorPod:  mirrorPod(pod, "node-1", "node-uid-1"),
  5770  				})
  5771  				drainAllWorkers(w)
  5772  
  5773  				// expect we get a pod sync record for kill that should have the default grace period
  5774  				if actual, expected := records[pod.UID], []syncPodRecord{
  5775  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5776  				}; !reflect.DeepEqual(expected, actual) {
  5777  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5778  				}
  5779  				// pod worker is aware of pod1, but the kubelet will not have it in the pod manager
  5780  			},
  5781  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5782  				uid := types.UID("1")
  5783  				if len(w.podSyncStatuses) != 1 {
  5784  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5785  				}
  5786  				s, ok := w.podSyncStatuses[uid]
  5787  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5788  					t.Errorf("unexpected requested pod termination: %#v", s)
  5789  				}
  5790  
  5791  				// ensure that we recorded the appropriate state for replays
  5792  				expectedPod := staticPod()
  5793  				if actual, expected := s.activeUpdate, (&UpdatePodOptions{
  5794  					Pod:       expectedPod,
  5795  					MirrorPod: mirrorPod(expectedPod, "node-1", "node-uid-1"),
  5796  				}); !reflect.DeepEqual(expected, actual) {
  5797  					t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
  5798  				}
  5799  
  5800  				// expect we get a pod sync record for kill that should have the default grace period
  5801  				if actual, expected := records[uid], []syncPodRecord{
  5802  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5803  					{name: "pod1", updateType: kubetypes.SyncPodKill},
  5804  				}; !reflect.DeepEqual(expected, actual) {
  5805  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5806  				}
  5807  			},
  5808  			wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5809  				uid := types.UID("1")
  5810  				if len(w.podSyncStatuses) != 1 {
  5811  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5812  				}
  5813  				s, ok := w.podSyncStatuses[uid]
  5814  				if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || !s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
  5815  					t.Errorf("unexpected requested pod termination: %#v", s)
  5816  				}
  5817  
  5818  				// ensure that we recorded the appropriate state for replays
  5819  				expectedPod := staticPod()
  5820  				if actual, expected := s.activeUpdate, (&UpdatePodOptions{
  5821  					Pod:       expectedPod,
  5822  					MirrorPod: mirrorPod(expectedPod, "node-1", "node-uid-1"),
  5823  				}); !reflect.DeepEqual(expected, actual) {
  5824  					t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
  5825  				}
  5826  
  5827  				// expect we get a pod sync record for kill that should have the default grace period
  5828  				if actual, expected := records[uid], []syncPodRecord{
  5829  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5830  					{name: "pod1", updateType: kubetypes.SyncPodKill},
  5831  					// second attempt at kill
  5832  					{name: "pod1", updateType: kubetypes.SyncPodKill},
  5833  					{name: "pod1", terminated: true},
  5834  				}; !reflect.DeepEqual(expected, actual) {
  5835  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5836  				}
  5837  			},
  5838  		},
  5839  		{
  5840  			name:    "terminated pod is restarted in the same invocation that it is detected",
  5841  			wantErr: false,
  5842  			pods: []*v1.Pod{
  5843  				func() *v1.Pod {
  5844  					pod := staticPod()
  5845  					pod.Annotations = map[string]string{"version": "2"}
  5846  					return pod
  5847  				}(),
  5848  			},
  5849  			prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5850  				// simulate a delete and recreate of the static pod
  5851  				pod := simplePod()
  5852  				w.UpdatePod(UpdatePodOptions{
  5853  					UpdateType: kubetypes.SyncPodCreate,
  5854  					StartTime:  time.Unix(1, 0).UTC(),
  5855  					Pod:        pod,
  5856  				})
  5857  				drainAllWorkers(w)
  5858  				w.UpdatePod(UpdatePodOptions{
  5859  					UpdateType: kubetypes.SyncPodKill,
  5860  					Pod:        pod,
  5861  				})
  5862  				pod2 := simplePod()
  5863  				pod2.Annotations = map[string]string{"version": "2"}
  5864  				w.UpdatePod(UpdatePodOptions{
  5865  					UpdateType: kubetypes.SyncPodCreate,
  5866  					Pod:        pod2,
  5867  				})
  5868  				drainAllWorkers(w)
  5869  			},
  5870  			wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
  5871  				uid := types.UID("1")
  5872  				if len(w.podSyncStatuses) != 1 {
  5873  					t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
  5874  				}
  5875  				s, ok := w.podSyncStatuses[uid]
  5876  				if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() {
  5877  					t.Fatalf("unexpected requested pod termination: %#v", s)
  5878  				}
  5879  				if s.pendingUpdate != nil || s.activeUpdate == nil || s.activeUpdate.Pod == nil || s.activeUpdate.Pod.Annotations["version"] != "2" {
  5880  					t.Fatalf("unexpected restarted pod: %#v", s.activeUpdate.Pod)
  5881  				}
  5882  				// expect we get a pod sync record for kill that should have the same grace period as before (2), but no
  5883  				// running pod because the SyncKnownPods method killed it
  5884  				if actual, expected := records[uid], []syncPodRecord{
  5885  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5886  					{name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
  5887  					{name: "pod1", terminated: true},
  5888  					{name: "pod1", updateType: kubetypes.SyncPodCreate},
  5889  				}; !reflect.DeepEqual(expected, actual) {
  5890  					t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
  5891  				}
  5892  			},
  5893  			expectMetrics: map[string]string{
  5894  				metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
  5895  				# TYPE kubelet_desired_pods gauge
  5896  				kubelet_desired_pods{static=""} 1
  5897  				kubelet_desired_pods{static="true"} 0
  5898  				`,
  5899  				metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
  5900  				# TYPE kubelet_active_pods gauge
  5901  				kubelet_active_pods{static=""} 1
  5902  				kubelet_active_pods{static="true"} 0
  5903  				`,
  5904  				metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
  5905  				# TYPE kubelet_orphaned_runtime_pods_total counter
  5906  				kubelet_orphaned_runtime_pods_total 0
  5907  				`,
  5908  				metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
  5909  				# TYPE kubelet_restarted_pods_total counter
  5910  				kubelet_restarted_pods_total{static=""} 1
  5911  				kubelet_restarted_pods_total{static="true"} 0
  5912  				`,
  5913  				metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
  5914  				# TYPE kubelet_working_pods gauge
  5915  				kubelet_working_pods{config="desired",lifecycle="sync",static=""} 1
  5916  				kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
  5917  				kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
  5918  				kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
  5919  				kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
  5920  				kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
  5921  				kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
  5922  				kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
  5923  				kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
  5924  				kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
  5925  				kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
  5926  				kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
  5927  				kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
  5928  				kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
  5929  				kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
  5930  				`,
  5931  			},
  5932  		},
  5933  	}
  5934  	for _, tt := range tests {
  5935  		t.Run(tt.name, func(t *testing.T) {
  5936  			// clear the metrics for testing
  5937  			metrics.Register()
  5938  			for _, metric := range []interface{ Reset() }{
  5939  				metrics.DesiredPodCount,
  5940  				metrics.ActivePodCount,
  5941  				metrics.RestartedPodTotal,
  5942  				metrics.OrphanedRuntimePodTotal,
  5943  				metrics.WorkingPodCount,
  5944  			} {
  5945  				metric.Reset()
  5946  			}
  5947  			metrics.MirrorPodCount.Set(0)
  5948  
  5949  			testKubelet := newTestKubelet(t, false)
  5950  			defer testKubelet.Cleanup()
  5951  			kl := testKubelet.kubelet
  5952  
  5953  			podWorkers, _, processed := createPodWorkers()
  5954  			kl.podWorkers = podWorkers
  5955  			originalPodSyncer := podWorkers.podSyncer
  5956  			syncFuncs := newPodSyncerFuncs(originalPodSyncer)
  5957  			podWorkers.podSyncer = &syncFuncs
  5958  			if tt.terminatingErr != nil {
  5959  				syncFuncs.syncTerminatingPod = func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
  5960  					t.Logf("called syncTerminatingPod")
  5961  					if err := originalPodSyncer.SyncTerminatingPod(ctx, pod, podStatus, gracePeriod, podStatusFn); err != nil {
  5962  						t.Fatalf("unexpected error in syncTerminatingPodFn: %v", err)
  5963  					}
  5964  					return tt.terminatingErr
  5965  				}
  5966  				syncFuncs.syncTerminatingRuntimePod = func(ctx context.Context, runningPod *kubecontainer.Pod) error {
  5967  					if err := originalPodSyncer.SyncTerminatingRuntimePod(ctx, runningPod); err != nil {
  5968  						t.Fatalf("unexpected error in syncTerminatingRuntimePodFn: %v", err)
  5969  					}
  5970  					return tt.terminatingErr
  5971  				}
  5972  			}
  5973  			if tt.prepareWorker != nil {
  5974  				tt.prepareWorker(t, podWorkers, processed)
  5975  			}
  5976  
  5977  			testKubelet.fakeRuntime.PodList = tt.runtimePods
  5978  			kl.podManager.SetPods(tt.pods)
  5979  
  5980  			for _, reject := range tt.rejectedPods {
  5981  				pod, ok := kl.podManager.GetPodByUID(reject.uid)
  5982  				if !ok {
  5983  					t.Fatalf("unable to reject pod by UID %v", reject.uid)
  5984  				}
  5985  				kl.rejectPod(pod, reject.reason, reject.message)
  5986  			}
  5987  
  5988  			if err := kl.HandlePodCleanups(context.Background()); (err != nil) != tt.wantErr {
  5989  				t.Errorf("Kubelet.HandlePodCleanups() error = %v, wantErr %v", err, tt.wantErr)
  5990  			}
  5991  			drainAllWorkers(podWorkers)
  5992  			if tt.wantWorker != nil {
  5993  				tt.wantWorker(t, podWorkers, processed)
  5994  			}
  5995  
  5996  			for k, v := range tt.expectMetrics {
  5997  				testMetric(t, k, v)
  5998  			}
  5999  
  6000  			// check after the terminating error clears
  6001  			if tt.wantWorkerAfterRetry != nil {
  6002  				podWorkers.podSyncer = originalPodSyncer
  6003  				if err := kl.HandlePodCleanups(context.Background()); (err != nil) != tt.wantErr {
  6004  					t.Errorf("Kubelet.HandlePodCleanups() second error = %v, wantErr %v", err, tt.wantErr)
  6005  				}
  6006  				drainAllWorkers(podWorkers)
  6007  				tt.wantWorkerAfterRetry(t, podWorkers, processed)
  6008  
  6009  				for k, v := range tt.expectMetricsAfterRetry {
  6010  					testMetric(t, k, v)
  6011  				}
  6012  			}
  6013  		})
  6014  	}
  6015  }
  6016  
  6017  func testMetric(t *testing.T, metricName string, expectedMetric string) {
  6018  	t.Helper()
  6019  	err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(expectedMetric), metricName)
  6020  	if err != nil {
  6021  		t.Error(err)
  6022  	}
  6023  }
  6024  
  6025  func TestGetNonExistentImagePullSecret(t *testing.T) {
  6026  	secrets := make([]*v1.Secret, 0)
  6027  	fakeRecorder := record.NewFakeRecorder(1)
  6028  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  6029  	testKubelet.kubelet.recorder = fakeRecorder
  6030  	testKubelet.kubelet.secretManager = secret.NewFakeManagerWithSecrets(secrets)
  6031  	defer testKubelet.Cleanup()
  6032  
  6033  	expectedEvent := "Warning FailedToRetrieveImagePullSecret Unable to retrieve some image pull secrets (secretFoo); attempting to pull the image may not succeed."
  6034  
  6035  	testPod := &v1.Pod{
  6036  		ObjectMeta: metav1.ObjectMeta{
  6037  			Namespace:   "nsFoo",
  6038  			Name:        "podFoo",
  6039  			Annotations: map[string]string{},
  6040  		},
  6041  		Spec: v1.PodSpec{
  6042  			ImagePullSecrets: []v1.LocalObjectReference{
  6043  				{Name: "secretFoo"},
  6044  			},
  6045  		},
  6046  	}
  6047  
  6048  	pullSecrets := testKubelet.kubelet.getPullSecretsForPod(testPod)
  6049  	assert.Equal(t, 0, len(pullSecrets))
  6050  
  6051  	assert.Equal(t, 1, len(fakeRecorder.Events))
  6052  	event := <-fakeRecorder.Events
  6053  	assert.Equal(t, event, expectedEvent)
  6054  }