k8s.io/kubernetes@v1.29.3/test/e2e/node/kubelet.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package node
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"io"
    24  	"os/exec"
    25  	"path/filepath"
    26  	"strings"
    27  	"time"
    28  
    29  	"github.com/onsi/gomega"
    30  	v1 "k8s.io/api/core/v1"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/util/sets"
    33  	"k8s.io/apimachinery/pkg/util/uuid"
    34  	"k8s.io/apimachinery/pkg/util/wait"
    35  	clientset "k8s.io/client-go/kubernetes"
    36  	"k8s.io/kubernetes/test/e2e/feature"
    37  	"k8s.io/kubernetes/test/e2e/framework"
    38  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    39  	e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
    40  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    41  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    42  	e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
    43  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    44  	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
    45  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    46  	testutils "k8s.io/kubernetes/test/utils"
    47  	imageutils "k8s.io/kubernetes/test/utils/image"
    48  	admissionapi "k8s.io/pod-security-admission/api"
    49  
    50  	"github.com/onsi/ginkgo/v2"
    51  )
    52  
    53  const (
    54  	// Interval to framework.Poll /runningpods on a node
    55  	pollInterval = 1 * time.Second
    56  	// Interval to framework.Poll /stats/container on a node
    57  	containerStatsPollingInterval = 5 * time.Second
    58  	// Maximum number of nodes that we constraint to
    59  	maxNodesToCheck = 10
    60  )
    61  
    62  // getPodMatches returns a set of pod names on the given node that matches the
    63  // podNamePrefix and namespace.
    64  func getPodMatches(ctx context.Context, c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
    65  	matches := sets.NewString()
    66  	framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
    67  	runningPods, err := e2ekubelet.GetKubeletPods(ctx, c, nodeName)
    68  	if err != nil {
    69  		framework.Logf("Error checking running pods on %v: %v", nodeName, err)
    70  		return matches
    71  	}
    72  	for _, pod := range runningPods.Items {
    73  		if pod.Namespace == namespace && strings.HasPrefix(pod.Name, podNamePrefix) {
    74  			matches.Insert(pod.Name)
    75  		}
    76  	}
    77  	return matches
    78  }
    79  
    80  // waitTillNPodsRunningOnNodes polls the /runningpods endpoint on kubelet until
    81  // it finds targetNumPods pods that match the given criteria (namespace and
    82  // podNamePrefix). Note that we usually use label selector to filter pods that
    83  // belong to the same RC. However, we use podNamePrefix with namespace here
    84  // because pods returned from /runningpods do not contain the original label
    85  // information; they are reconstructed by examining the container runtime. In
    86  // the scope of this test, we do not expect pod naming conflicts so
    87  // podNamePrefix should be sufficient to identify the pods.
    88  func waitTillNPodsRunningOnNodes(ctx context.Context, c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
    89  	return wait.PollWithContext(ctx, pollInterval, timeout, func(ctx context.Context) (bool, error) {
    90  		matchCh := make(chan sets.String, len(nodeNames))
    91  		for _, item := range nodeNames.List() {
    92  			// Launch a goroutine per node to check the pods running on the nodes.
    93  			nodeName := item
    94  			go func() {
    95  				matchCh <- getPodMatches(ctx, c, nodeName, podNamePrefix, namespace)
    96  			}()
    97  		}
    98  
    99  		seen := sets.NewString()
   100  		for i := 0; i < len(nodeNames.List()); i++ {
   101  			seen = seen.Union(<-matchCh)
   102  		}
   103  		if seen.Len() == targetNumPods {
   104  			return true, nil
   105  		}
   106  		framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
   107  		return false, nil
   108  	})
   109  }
   110  
   111  // Restart the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 1` command in the
   112  // pod's (only) container. This command changes the number of nfs server threads from
   113  // (presumably) zero back to 1, and therefore allows nfs to open connections again.
   114  func restartNfsServer(serverPod *v1.Pod) {
   115  	const startcmd = "/usr/sbin/rpc.nfsd 1"
   116  	ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
   117  	e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd)
   118  }
   119  
   120  // Stop the passed-in nfs-server by issuing a `/usr/sbin/rpc.nfsd 0` command in the
   121  // pod's (only) container. This command changes the number of nfs server threads to 0,
   122  // thus closing all open nfs connections.
   123  func stopNfsServer(serverPod *v1.Pod) {
   124  	const stopcmd = "/usr/sbin/rpc.nfsd 0"
   125  	ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
   126  	e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd)
   127  }
   128  
   129  // Creates a pod that mounts an nfs volume that is served by the nfs-server pod. The container
   130  // will execute the passed in shell cmd. Waits for the pod to start.
   131  // Note: the nfs plugin is defined inline, no PV or PVC.
   132  func createPodUsingNfs(ctx context.Context, f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod {
   133  	ginkgo.By("create pod using nfs volume")
   134  
   135  	isPrivileged := true
   136  	cmdLine := []string{"-c", cmd}
   137  	pod := &v1.Pod{
   138  		TypeMeta: metav1.TypeMeta{
   139  			Kind:       "Pod",
   140  			APIVersion: "v1",
   141  		},
   142  		ObjectMeta: metav1.ObjectMeta{
   143  			GenerateName: "pod-nfs-vol-",
   144  			Namespace:    ns,
   145  		},
   146  		Spec: v1.PodSpec{
   147  			Containers: []v1.Container{
   148  				{
   149  					Name:    "pod-nfs-vol",
   150  					Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   151  					Command: []string{"/bin/sh"},
   152  					Args:    cmdLine,
   153  					VolumeMounts: []v1.VolumeMount{
   154  						{
   155  							Name:      "nfs-vol",
   156  							MountPath: "/mnt",
   157  						},
   158  					},
   159  					SecurityContext: &v1.SecurityContext{
   160  						Privileged: &isPrivileged,
   161  					},
   162  				},
   163  			},
   164  			RestartPolicy: v1.RestartPolicyNever, //don't restart pod
   165  			Volumes: []v1.Volume{
   166  				{
   167  					Name: "nfs-vol",
   168  					VolumeSource: v1.VolumeSource{
   169  						NFS: &v1.NFSVolumeSource{
   170  							Server:   nfsIP,
   171  							Path:     "/exports",
   172  							ReadOnly: false,
   173  						},
   174  					},
   175  				},
   176  			},
   177  		},
   178  	}
   179  	rtnPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
   180  	framework.ExpectNoError(err)
   181  
   182  	err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, rtnPod.Name, f.Namespace.Name, framework.PodStartTimeout) // running & ready
   183  	framework.ExpectNoError(err)
   184  
   185  	rtnPod, err = c.CoreV1().Pods(ns).Get(ctx, rtnPod.Name, metav1.GetOptions{}) // return fresh pod
   186  	framework.ExpectNoError(err)
   187  	return rtnPod
   188  }
   189  
   190  // getHostExternalAddress gets the node for a pod and returns the first External
   191  // address. Returns an error if the node the pod is on doesn't have an External
   192  // address.
   193  func getHostExternalAddress(ctx context.Context, client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
   194  	node, err := client.CoreV1().Nodes().Get(ctx, p.Spec.NodeName, metav1.GetOptions{})
   195  	if err != nil {
   196  		return "", err
   197  	}
   198  	for _, address := range node.Status.Addresses {
   199  		if address.Type == v1.NodeExternalIP {
   200  			if address.Address != "" {
   201  				externalAddress = address.Address
   202  				break
   203  			}
   204  		}
   205  	}
   206  	if externalAddress == "" {
   207  		err = fmt.Errorf("No external address for pod %v on node %v",
   208  			p.Name, p.Spec.NodeName)
   209  	}
   210  	return
   211  }
   212  
   213  // Checks for a lingering nfs mount and/or uid directory on the pod's host. The host IP is used
   214  // so that this test runs in GCE, where it appears that SSH cannot resolve the hostname.
   215  // If expectClean is true then we expect the node to be cleaned up and thus commands like
   216  // `ls <uid-dir>` should fail (since that dir was removed). If expectClean is false then we expect
   217  // the node is not cleaned up, and thus cmds like `ls <uid-dir>` should succeed. We wait for the
   218  // kubelet to be cleaned up, afterwhich an error is reported.
   219  func checkPodCleanup(ctx context.Context, c clientset.Interface, pod *v1.Pod, expectClean bool) {
   220  	timeout := 5 * time.Minute
   221  	poll := 20 * time.Second
   222  	podDir := filepath.Join("/var/lib/kubelet/pods", string(pod.UID))
   223  	mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
   224  	// use ip rather than hostname in GCE
   225  	nodeIP, err := getHostExternalAddress(ctx, c, pod)
   226  	framework.ExpectNoError(err)
   227  
   228  	condMsg := "deleted"
   229  	if !expectClean {
   230  		condMsg = "present"
   231  	}
   232  
   233  	// table of host tests to perform (order may matter so not using a map)
   234  	type testT struct {
   235  		feature string // feature to test
   236  		cmd     string // remote command to execute on node
   237  	}
   238  	tests := []testT{
   239  		{
   240  			feature: "pod UID directory",
   241  			cmd:     fmt.Sprintf("sudo ls %v", podDir),
   242  		},
   243  		{
   244  			feature: "pod nfs mount",
   245  			cmd:     fmt.Sprintf("sudo mount | grep %v", mountDir),
   246  		},
   247  	}
   248  
   249  	for _, test := range tests {
   250  		framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
   251  		err = wait.PollWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
   252  			result, err := e2essh.NodeExec(ctx, nodeIP, test.cmd, framework.TestContext.Provider)
   253  			framework.ExpectNoError(err)
   254  			e2essh.LogResult(result)
   255  			ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
   256  			if expectClean && ok { // keep trying
   257  				return false, nil
   258  			}
   259  			if !expectClean && !ok { // stop wait loop
   260  				return true, fmt.Errorf("%v is gone but expected to exist", test.feature)
   261  			}
   262  			return true, nil // done, host is as expected
   263  		})
   264  		framework.ExpectNoError(err, fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
   265  	}
   266  
   267  	if expectClean {
   268  		framework.Logf("Pod's host has been cleaned up")
   269  	} else {
   270  		framework.Logf("Pod's host has not been cleaned up (per expectation)")
   271  	}
   272  }
   273  
   274  var _ = SIGDescribe("kubelet", func() {
   275  	var (
   276  		c  clientset.Interface
   277  		ns string
   278  	)
   279  	f := framework.NewDefaultFramework("kubelet")
   280  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   281  
   282  	ginkgo.BeforeEach(func() {
   283  		c = f.ClientSet
   284  		ns = f.Namespace.Name
   285  	})
   286  
   287  	ginkgo.Describe("Clean up pods on node", func() {
   288  		var (
   289  			numNodes        int
   290  			nodeNames       sets.String
   291  			nodeLabels      map[string]string
   292  			resourceMonitor *e2ekubelet.ResourceMonitor
   293  		)
   294  		type DeleteTest struct {
   295  			podsPerNode int
   296  			timeout     time.Duration
   297  		}
   298  
   299  		deleteTests := []DeleteTest{
   300  			{podsPerNode: 10, timeout: 1 * time.Minute},
   301  		}
   302  
   303  		// Must be called in each It with the context of the test.
   304  		start := func(ctx context.Context) {
   305  			// Use node labels to restrict the pods to be assigned only to the
   306  			// nodes we observe initially.
   307  			nodeLabels = make(map[string]string)
   308  			nodeLabels["kubelet_cleanup"] = "true"
   309  			nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, c, maxNodesToCheck)
   310  			numNodes = len(nodes.Items)
   311  			framework.ExpectNoError(err)
   312  			nodeNames = sets.NewString()
   313  			for i := 0; i < len(nodes.Items); i++ {
   314  				nodeNames.Insert(nodes.Items[i].Name)
   315  			}
   316  			for nodeName := range nodeNames {
   317  				for k, v := range nodeLabels {
   318  					e2enode.AddOrUpdateLabelOnNode(c, nodeName, k, v)
   319  					ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, k)
   320  				}
   321  			}
   322  
   323  			// While we only use a bounded number of nodes in the test. We need to know
   324  			// the actual number of nodes in the cluster, to avoid running resourceMonitor
   325  			// against large clusters.
   326  			actualNodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
   327  			framework.ExpectNoError(err)
   328  
   329  			// Start resourceMonitor only in small clusters.
   330  			if len(actualNodes.Items) <= maxNodesToCheck {
   331  				resourceMonitor = e2ekubelet.NewResourceMonitor(f.ClientSet, e2ekubelet.TargetContainers(), containerStatsPollingInterval)
   332  				resourceMonitor.Start(ctx)
   333  				ginkgo.DeferCleanup(resourceMonitor.Stop)
   334  			}
   335  		}
   336  
   337  		for _, itArg := range deleteTests {
   338  			name := fmt.Sprintf(
   339  				"kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
   340  			itArg := itArg
   341  			ginkgo.It(name, func(ctx context.Context) {
   342  				start(ctx)
   343  				totalPods := itArg.podsPerNode * numNodes
   344  				ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
   345  				rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
   346  
   347  				err := e2erc.RunRC(ctx, testutils.RCConfig{
   348  					Client:       f.ClientSet,
   349  					Name:         rcName,
   350  					Namespace:    f.Namespace.Name,
   351  					Image:        imageutils.GetPauseImageName(),
   352  					Replicas:     totalPods,
   353  					NodeSelector: nodeLabels,
   354  				})
   355  				framework.ExpectNoError(err)
   356  				// Perform a sanity check so that we know all desired pods are
   357  				// running on the nodes according to kubelet. The timeout is set to
   358  				// only 30 seconds here because e2erc.RunRC already waited for all pods to
   359  				// transition to the running status.
   360  				err = waitTillNPodsRunningOnNodes(ctx, f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30)
   361  				framework.ExpectNoError(err)
   362  				if resourceMonitor != nil {
   363  					resourceMonitor.LogLatest()
   364  				}
   365  
   366  				ginkgo.By("Deleting the RC")
   367  				e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, rcName)
   368  				// Check that the pods really are gone by querying /runningpods on the
   369  				// node. The /runningpods handler checks the container runtime (or its
   370  				// cache) and  returns a list of running pods. Some possible causes of
   371  				// failures are:
   372  				//   - kubelet deadlock
   373  				//   - a bug in graceful termination (if it is enabled)
   374  				//   - docker slow to delete pods (or resource problems causing slowness)
   375  				start := time.Now()
   376  				err = waitTillNPodsRunningOnNodes(ctx, f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout)
   377  				framework.ExpectNoError(err)
   378  				framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
   379  					time.Since(start))
   380  				if resourceMonitor != nil {
   381  					resourceMonitor.LogCPUSummary()
   382  				}
   383  			})
   384  		}
   385  	})
   386  
   387  	// Test host cleanup when disrupting the volume environment.
   388  	ginkgo.Describe("host cleanup with volume mounts [HostCleanup][Flaky]", func() {
   389  
   390  		type hostCleanupTest struct {
   391  			itDescr string
   392  			podCmd  string
   393  		}
   394  
   395  		// Disrupt the nfs-server pod after a client pod accesses the nfs volume.
   396  		// Note: the nfs-server is stopped NOT deleted. This is done to preserve its ip addr.
   397  		//       If the nfs-server pod is deleted the client pod's mount can not be unmounted.
   398  		//       If the nfs-server pod is deleted and re-created, due to having a different ip
   399  		//       addr, the client pod's mount still cannot be unmounted.
   400  		ginkgo.Context("Host cleanup after disrupting NFS volume [NFS]", func() {
   401  			// issue #31272
   402  			var (
   403  				nfsServerPod *v1.Pod
   404  				nfsIP        string
   405  				pod          *v1.Pod // client pod
   406  			)
   407  
   408  			// fill in test slice for this context
   409  			testTbl := []hostCleanupTest{
   410  				{
   411  					itDescr: "after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed.",
   412  					podCmd:  "sleep 6000", // keep pod running
   413  				},
   414  				{
   415  					itDescr: "after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed.",
   416  					podCmd:  "while true; do echo FeFieFoFum >>/mnt/SUCCESS; sleep 1; cat /mnt/SUCCESS; done",
   417  				},
   418  			}
   419  
   420  			ginkgo.BeforeEach(func(ctx context.Context) {
   421  				e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
   422  				_, nfsServerPod, nfsIP = e2evolume.NewNFSServer(ctx, c, ns, []string{"-G", "777", "/exports"})
   423  			})
   424  
   425  			ginkgo.AfterEach(func(ctx context.Context) {
   426  				err := e2epod.DeletePodWithWait(ctx, c, pod)
   427  				framework.ExpectNoError(err, "AfterEach: Failed to delete client pod ", pod.Name)
   428  				err = e2epod.DeletePodWithWait(ctx, c, nfsServerPod)
   429  				framework.ExpectNoError(err, "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
   430  			})
   431  
   432  			// execute It blocks from above table of tests
   433  			for _, t := range testTbl {
   434  				t := t
   435  				ginkgo.It(t.itDescr, func(ctx context.Context) {
   436  					pod = createPodUsingNfs(ctx, f, c, ns, nfsIP, t.podCmd)
   437  
   438  					ginkgo.By("Stop the NFS server")
   439  					stopNfsServer(nfsServerPod)
   440  
   441  					ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
   442  					err := e2epod.DeletePodWithWait(ctx, c, pod)
   443  					gomega.Expect(err).To(gomega.HaveOccurred())
   444  					// pod object is now stale, but is intentionally not nil
   445  
   446  					ginkgo.By("Check if pod's host has been cleaned up -- expect not")
   447  					checkPodCleanup(ctx, c, pod, false)
   448  
   449  					ginkgo.By("Restart the nfs server")
   450  					restartNfsServer(nfsServerPod)
   451  
   452  					ginkgo.By("Verify that the deleted client pod is now cleaned up")
   453  					checkPodCleanup(ctx, c, pod, true)
   454  				})
   455  			}
   456  		})
   457  	})
   458  
   459  	// Tests for NodeLogQuery feature
   460  	f.Describe("kubectl get --raw \"/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query=/<insert-log-file-name-here>", feature.NodeLogQuery, "[LinuxOnly]", func() {
   461  		var linuxNodeName string
   462  
   463  		ginkgo.BeforeEach(func(ctx context.Context) {
   464  			nodes, err := e2enode.GetReadyNodesIncludingTainted(ctx, c)
   465  			framework.ExpectNoError(err)
   466  			if len(nodes.Items) == 0 {
   467  				framework.Fail("Expected at least one Linux node to be present")
   468  			}
   469  			linuxNodeName = nodes.Items[0].Name
   470  		})
   471  
   472  		/*
   473  			Test if kubectl get --raw "/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query"
   474  			returns an error!
   475  		*/
   476  
   477  		ginkgo.It("should return the error with an empty --query option", func() {
   478  			ginkgo.By("Starting the command")
   479  			tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
   480  
   481  			queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query", linuxNodeName)
   482  			cmd := tk.KubectlCmd("get", "--raw", queryCommand)
   483  			_, _, err := framework.StartCmdAndStreamOutput(cmd)
   484  			if err != nil {
   485  				framework.Failf("Failed to start kubectl command! Error: %v", err)
   486  			}
   487  			err = cmd.Wait()
   488  			gomega.Expect(err).To(gomega.HaveOccurred(), "Command kubectl get --raw "+queryCommand+" was expected to return an error!")
   489  		})
   490  
   491  		/*
   492  			Test if kubectl get --raw "/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query=kubelet"
   493  			returns the kubelet logs
   494  		*/
   495  
   496  		ginkgo.It("should return the kubelet logs ", func(ctx context.Context) {
   497  			ginkgo.By("Starting the command")
   498  			tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
   499  
   500  			queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet", linuxNodeName)
   501  			cmd := tk.KubectlCmd("get", "--raw", queryCommand)
   502  			result := runKubectlCommand(cmd)
   503  			assertContains("kubelet", result)
   504  		})
   505  
   506  		/*
   507  			Test if kubectl get --raw "/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query=kubelet&boot=0"
   508  			returns kubelet logs from the current boot
   509  		*/
   510  
   511  		ginkgo.It("should return the kubelet logs for the current boot", func(ctx context.Context) {
   512  			ginkgo.By("Starting the command")
   513  			tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
   514  
   515  			queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&boot=0", linuxNodeName)
   516  			cmd := tk.KubectlCmd("get", "--raw", queryCommand)
   517  			result := runKubectlCommand(cmd)
   518  			assertContains("kubelet", result)
   519  		})
   520  
   521  		/*
   522  			Test if kubectl get --raw "/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query=kubelet&tailLines=3"
   523  			returns the last three lines of the kubelet log
   524  		*/
   525  
   526  		ginkgo.It("should return the last three lines of the kubelet logs", func(ctx context.Context) {
   527  			e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
   528  			ginkgo.By("Starting the command")
   529  			tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
   530  
   531  			queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&tailLines=3", linuxNodeName)
   532  			cmd := tk.KubectlCmd("get", "--raw", queryCommand)
   533  			result := runKubectlCommand(cmd)
   534  			logs := journalctlCommandOnNode(linuxNodeName, "-u kubelet -n 3")
   535  			if result != logs {
   536  				framework.Failf("Failed to receive the correct kubelet logs or the correct amount of lines of logs")
   537  			}
   538  		})
   539  
   540  		/*
   541  			Test if kubectl get --raw "/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query=kubelet&pattern=kubelet"
   542  			returns kubelet logs for the current boot with the pattern container
   543  		*/
   544  
   545  		ginkgo.It("should return the kubelet logs for the current boot with the pattern container", func(ctx context.Context) {
   546  			e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
   547  			ginkgo.By("Starting the command")
   548  			tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
   549  
   550  			queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&boot=0&pattern=container", linuxNodeName)
   551  			cmd := tk.KubectlCmd("get", "--raw", queryCommand)
   552  			result := runKubectlCommand(cmd)
   553  			logs := journalctlCommandOnNode(linuxNodeName, "-u kubelet --grep container --boot 0")
   554  			if result != logs {
   555  				framework.Failf("Failed to receive the correct kubelet logs")
   556  			}
   557  		})
   558  
   559  		/*
   560  			Test if kubectl get --raw "/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query=kubelet&sinceTime=<now>"
   561  			returns the kubelet since the current date and time. This can be "-- No entries --" which is correct.
   562  		*/
   563  
   564  		ginkgo.It("should return the kubelet logs since the current date and time", func() {
   565  			ginkgo.By("Starting the command")
   566  			start := time.Now().UTC()
   567  			tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
   568  
   569  			currentTime := start.Format(time.RFC3339)
   570  			queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&sinceTime=%s", linuxNodeName, currentTime)
   571  			cmd := tk.KubectlCmd("get", "--raw", queryCommand)
   572  			journalctlDateLayout := "2006-1-2 15:4:5"
   573  			result := runKubectlCommand(cmd)
   574  			logs := journalctlCommandOnNode(linuxNodeName, fmt.Sprintf("-u kubelet --since \"%s\"", start.Format(journalctlDateLayout)))
   575  			if result != logs {
   576  				framework.Failf("Failed to receive the correct kubelet logs or the correct amount of lines of logs")
   577  			}
   578  		})
   579  	})
   580  })
   581  
   582  func runKubectlCommand(cmd *exec.Cmd) (result string) {
   583  	stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
   584  	var buf bytes.Buffer
   585  	if err != nil {
   586  		framework.Failf("Failed to start kubectl command! Stderr: %v, error: %v", stderr, err)
   587  	}
   588  	defer stdout.Close()
   589  	defer stderr.Close()
   590  	defer framework.TryKill(cmd)
   591  
   592  	b_read, err := io.Copy(&buf, stdout)
   593  	if err != nil {
   594  		framework.Failf("Expected output from kubectl alpha node-logs %s: %v\n Stderr: %v", cmd.Args, err, stderr)
   595  	}
   596  	out := ""
   597  	if b_read >= 0 {
   598  		out = buf.String()
   599  	}
   600  
   601  	framework.Logf("Kubectl output: %s", out)
   602  	return out
   603  }
   604  
   605  func assertContains(expectedString string, result string) {
   606  	if strings.Contains(result, expectedString) {
   607  		return
   608  	}
   609  	framework.Failf("Failed to find \"%s\"", expectedString)
   610  }
   611  
   612  func journalctlCommandOnNode(nodeName string, args string) string {
   613  	result, err := e2essh.NodeExec(context.Background(), nodeName,
   614  		"journalctl --utc --no-pager --output=short-precise "+args, framework.TestContext.Provider)
   615  	framework.ExpectNoError(err)
   616  	e2essh.LogResult(result)
   617  	return result.Stdout
   618  }