k8s.io/kubernetes@v1.29.3/test/e2e/storage/csi_mock/csi_selinux_mount.go (about)

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package csi_mock
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"sort"
    23  	"strings"
    24  	"sync/atomic"
    25  	"time"
    26  
    27  	"github.com/onsi/ginkgo/v2"
    28  	"github.com/onsi/gomega"
    29  	v1 "k8s.io/api/core/v1"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/fields"
    32  	"k8s.io/apimachinery/pkg/util/sets"
    33  	"k8s.io/apimachinery/pkg/util/wait"
    34  	"k8s.io/kubernetes/pkg/kubelet/events"
    35  	"k8s.io/kubernetes/test/e2e/feature"
    36  	"k8s.io/kubernetes/test/e2e/framework"
    37  	e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
    38  	e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
    39  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    40  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    41  	"k8s.io/kubernetes/test/e2e/storage/utils"
    42  	admissionapi "k8s.io/pod-security-admission/api"
    43  )
    44  
    45  var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
    46  	f := framework.NewDefaultFramework("csi-mock-volumes-selinux")
    47  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    48  	m := newMockDriverSetup(f)
    49  
    50  	f.Context("SELinuxMount [LinuxOnly]", feature.SELinux, func() {
    51  		// Make sure all options are set so system specific defaults are not used.
    52  		seLinuxOpts1 := v1.SELinuxOptions{
    53  			User:  "system_u",
    54  			Role:  "system_r",
    55  			Type:  "container_t",
    56  			Level: "s0:c0,c1",
    57  		}
    58  		seLinuxMountOption1 := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
    59  		seLinuxOpts2 := v1.SELinuxOptions{
    60  			User:  "system_u",
    61  			Role:  "system_r",
    62  			Type:  "container_t",
    63  			Level: "s0:c98,c99",
    64  		}
    65  		seLinuxMountOption2 := "context=\"system_u:object_r:container_file_t:s0:c98,c99\""
    66  
    67  		tests := []struct {
    68  			name                       string
    69  			csiDriverSELinuxEnabled    bool
    70  			firstPodSELinuxOpts        *v1.SELinuxOptions
    71  			startSecondPod             bool
    72  			secondPodSELinuxOpts       *v1.SELinuxOptions
    73  			mountOptions               []string
    74  			volumeMode                 v1.PersistentVolumeAccessMode
    75  			expectedFirstMountOptions  []string
    76  			expectedSecondMountOptions []string
    77  			expectedUnstage            bool
    78  		}{
    79  			// Start just a single pod and check its volume is mounted correctly
    80  			{
    81  				name:                      "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
    82  				csiDriverSELinuxEnabled:   true,
    83  				firstPodSELinuxOpts:       &seLinuxOpts1,
    84  				volumeMode:                v1.ReadWriteOncePod,
    85  				expectedFirstMountOptions: []string{seLinuxMountOption1},
    86  			},
    87  			{
    88  				name:                      "should add SELinux mount option to existing mount options",
    89  				csiDriverSELinuxEnabled:   true,
    90  				firstPodSELinuxOpts:       &seLinuxOpts1,
    91  				mountOptions:              []string{"noexec", "noatime"},
    92  				volumeMode:                v1.ReadWriteOncePod,
    93  				expectedFirstMountOptions: []string{"noexec", "noatime", seLinuxMountOption1},
    94  			},
    95  			{
    96  				name:                      "should not pass SELinux mount option for RWO volume",
    97  				csiDriverSELinuxEnabled:   true,
    98  				firstPodSELinuxOpts:       &seLinuxOpts1,
    99  				volumeMode:                v1.ReadWriteOnce,
   100  				expectedFirstMountOptions: nil,
   101  			},
   102  			{
   103  				name:                      "should not pass SELinux mount option for Pod without SELinux context",
   104  				csiDriverSELinuxEnabled:   true,
   105  				firstPodSELinuxOpts:       nil,
   106  				volumeMode:                v1.ReadWriteOncePod,
   107  				expectedFirstMountOptions: nil,
   108  			},
   109  			{
   110  				name:                      "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
   111  				csiDriverSELinuxEnabled:   false,
   112  				firstPodSELinuxOpts:       &seLinuxOpts1,
   113  				volumeMode:                v1.ReadWriteOncePod,
   114  				expectedFirstMountOptions: nil,
   115  			},
   116  			// Start two pods in a sequence and check their volume is / is not unmounted in between
   117  			{
   118  				name:                       "should not unstage volume when starting a second pod with the same SELinux context",
   119  				csiDriverSELinuxEnabled:    true,
   120  				firstPodSELinuxOpts:        &seLinuxOpts1,
   121  				startSecondPod:             true,
   122  				secondPodSELinuxOpts:       &seLinuxOpts1,
   123  				volumeMode:                 v1.ReadWriteOncePod,
   124  				expectedFirstMountOptions:  []string{seLinuxMountOption1},
   125  				expectedSecondMountOptions: []string{seLinuxMountOption1},
   126  				expectedUnstage:            false,
   127  			},
   128  			{
   129  				name:                       "should unstage volume when starting a second pod with different SELinux context",
   130  				csiDriverSELinuxEnabled:    true,
   131  				firstPodSELinuxOpts:        &seLinuxOpts1,
   132  				startSecondPod:             true,
   133  				secondPodSELinuxOpts:       &seLinuxOpts2,
   134  				volumeMode:                 v1.ReadWriteOncePod,
   135  				expectedFirstMountOptions:  []string{seLinuxMountOption1},
   136  				expectedSecondMountOptions: []string{seLinuxMountOption2},
   137  				expectedUnstage:            true,
   138  			},
   139  		}
   140  		for _, t := range tests {
   141  			t := t
   142  			ginkgo.It(t.name, func(ctx context.Context) {
   143  				if framework.NodeOSDistroIs("windows") {
   144  					e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
   145  				}
   146  				var nodeStageMountOpts, nodePublishMountOpts []string
   147  				var unstageCalls, stageCalls, unpublishCalls, publishCalls atomic.Int32
   148  				m.init(ctx, testParameters{
   149  					disableAttach:      true,
   150  					registerDriver:     true,
   151  					enableSELinuxMount: &t.csiDriverSELinuxEnabled,
   152  					hooks:              createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls),
   153  				})
   154  				ginkgo.DeferCleanup(m.cleanup)
   155  
   156  				// Act
   157  				ginkgo.By("Starting the initial pod")
   158  				accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
   159  				_, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts)
   160  				err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
   161  				framework.ExpectNoError(err, "starting the initial pod")
   162  
   163  				// Assert
   164  				ginkgo.By("Checking the initial pod mount options")
   165  				gomega.Expect(nodeStageMountOpts).To(gomega.Equal(t.expectedFirstMountOptions), "NodeStage MountFlags for the initial pod")
   166  				gomega.Expect(nodePublishMountOpts).To(gomega.Equal(t.expectedFirstMountOptions), "NodePublish MountFlags for the initial pod")
   167  
   168  				ginkgo.By("Checking the CSI driver calls for the initial pod")
   169  				gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage call count for the initial pod")
   170  				gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnpublish call count for the initial pod")
   171  				gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage for the initial pod")
   172  				gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish for the initial pod")
   173  
   174  				if !t.startSecondPod {
   175  					return
   176  				}
   177  
   178  				// Arrange 2nd part of the test
   179  				ginkgo.By("Starting the second pod to check if a volume used by the initial pod is / is not unmounted based on SELinux context")
   180  
   181  				// Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV.
   182  				pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   183  				framework.ExpectNoError(err, "getting the initial pod")
   184  				nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName}
   185  				pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts)
   186  				framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts)
   187  				m.pods = append(m.pods, pod2)
   188  
   189  				// Delete the initial pod only after kubelet processes the second pod and adds its volumes to
   190  				// DesiredStateOfWorld.
   191  				// In this state, any volume UnPublish / UnStage must be done because of SELinux contexts and not
   192  				// because of random races because volumes of the second pod are not in DesiredStateOfWorld yet.
   193  				ginkgo.By("Waiting for the second pod to fail to start because of ReadWriteOncePod.")
   194  				eventSelector := fields.Set{
   195  					"involvedObject.kind":      "Pod",
   196  					"involvedObject.name":      pod2.Name,
   197  					"involvedObject.namespace": pod2.Namespace,
   198  					"reason":                   events.FailedMountVolume,
   199  				}.AsSelector().String()
   200  				var msg string
   201  				if t.expectedUnstage {
   202  					// This message is emitted before kubelet checks for ReadWriteOncePod
   203  					msg = "conflicting SELinux labels of volume"
   204  				} else {
   205  					msg = "volume uses the ReadWriteOncePod access mode and is already in use by another pod"
   206  				}
   207  				err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart)
   208  				framework.ExpectNoError(err, "waiting for event %q in the second test pod", msg)
   209  
   210  				// count fresh CSI driver calls between the first and the second pod
   211  				nodeStageMountOpts = nil
   212  				nodePublishMountOpts = nil
   213  				unstageCalls.Store(0)
   214  				unpublishCalls.Store(0)
   215  				stageCalls.Store(0)
   216  				publishCalls.Store(0)
   217  
   218  				// Act 2nd part of the test
   219  				ginkgo.By("Deleting the initial pod")
   220  				err = e2epod.DeletePodWithWait(ctx, m.cs, pod)
   221  				framework.ExpectNoError(err, "deleting the initial pod")
   222  
   223  				// Assert 2nd part of the test
   224  				ginkgo.By("Waiting for the second pod to start")
   225  				err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace)
   226  				framework.ExpectNoError(err, "starting the second pod")
   227  
   228  				ginkgo.By("Checking CSI driver calls for the second pod")
   229  				if t.expectedUnstage {
   230  					// Volume should be fully unstaged between the first and the second pod
   231  					gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage calls after the first pod is deleted")
   232  					gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage calls for the second pod")
   233  					// The second pod got the right mount option
   234  					gomega.Expect(nodeStageMountOpts).To(gomega.Equal(t.expectedSecondMountOptions), "NodeStage MountFlags for the second pod")
   235  				} else {
   236  					// Volume should not be fully unstaged between the first and the second pod
   237  					gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage calls after the first pod is deleted")
   238  					gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeStage calls for the second pod")
   239  				}
   240  				// In both cases, Unublish and Publish is called, with the right mount opts
   241  				gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish calls after the first pod is deleted")
   242  				gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish calls for the second pod")
   243  				gomega.Expect(nodePublishMountOpts).To(gomega.Equal(t.expectedSecondMountOptions), "NodePublish MountFlags for the second pod")
   244  			})
   245  		}
   246  	})
   247  })
   248  
   249  var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics", func() {
   250  	f := framework.NewDefaultFramework("csi-mock-volumes-selinux-metrics")
   251  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   252  	m := newMockDriverSetup(f)
   253  
   254  	// [Serial]: the tests read global kube-controller-manager metrics, so no other test changes them in parallel.
   255  	f.Context("SELinuxMount metrics [LinuxOnly]", feature.SELinux, feature.SELinuxMountReadWriteOncePod, f.WithSerial(), func() {
   256  		// Make sure all options are set so system specific defaults are not used.
   257  		seLinuxOpts1 := v1.SELinuxOptions{
   258  			User:  "system_u",
   259  			Role:  "system_r",
   260  			Type:  "container_t",
   261  			Level: "s0:c0,c1",
   262  		}
   263  		seLinuxOpts2 := v1.SELinuxOptions{
   264  			User:  "system_u",
   265  			Role:  "system_r",
   266  			Type:  "container_t",
   267  			Level: "s0:c98,c99",
   268  		}
   269  
   270  		tests := []struct {
   271  			name                    string
   272  			csiDriverSELinuxEnabled bool
   273  			firstPodSELinuxOpts     *v1.SELinuxOptions
   274  			secondPodSELinuxOpts    *v1.SELinuxOptions
   275  			volumeMode              v1.PersistentVolumeAccessMode
   276  			waitForSecondPodStart   bool
   277  			secondPodFailureEvent   string
   278  			expectIncreases         sets.String
   279  		}{
   280  			{
   281  				name:                    "warning is not bumped on two Pods with the same context on RWO volume",
   282  				csiDriverSELinuxEnabled: true,
   283  				firstPodSELinuxOpts:     &seLinuxOpts1,
   284  				secondPodSELinuxOpts:    &seLinuxOpts1,
   285  				volumeMode:              v1.ReadWriteOnce,
   286  				waitForSecondPodStart:   true,
   287  				expectIncreases:         sets.NewString( /* no metric is increased, admitted_total was already increased when the first pod started */ ),
   288  			},
   289  			{
   290  				name:                    "warning is bumped on two Pods with a different context on RWO volume",
   291  				csiDriverSELinuxEnabled: true,
   292  				firstPodSELinuxOpts:     &seLinuxOpts1,
   293  				secondPodSELinuxOpts:    &seLinuxOpts2,
   294  				volumeMode:              v1.ReadWriteOnce,
   295  				waitForSecondPodStart:   true,
   296  				expectIncreases:         sets.NewString("volume_manager_selinux_volume_context_mismatch_warnings_total"),
   297  			},
   298  			{
   299  				name:                    "error is bumped on two Pods with a different context on RWOP volume",
   300  				csiDriverSELinuxEnabled: true,
   301  				firstPodSELinuxOpts:     &seLinuxOpts1,
   302  				secondPodSELinuxOpts:    &seLinuxOpts2,
   303  				secondPodFailureEvent:   "conflicting SELinux labels of volume",
   304  				volumeMode:              v1.ReadWriteOncePod,
   305  				waitForSecondPodStart:   false,
   306  				expectIncreases:         sets.NewString("volume_manager_selinux_volume_context_mismatch_errors_total"),
   307  			},
   308  		}
   309  		for _, t := range tests {
   310  			t := t
   311  			ginkgo.It(t.name, func(ctx context.Context) {
   312  				// Some metrics use CSI driver name as a label, which is "csi-mock-" + the namespace name.
   313  				volumePluginLabel := "{volume_plugin=\"kubernetes.io/csi/csi-mock-" + f.Namespace.Name + "\"}"
   314  
   315  				// All SELinux metrics. Unless explicitly mentioned in test.expectIncreases, these metrics must not grow during
   316  				// a test.
   317  				allMetrics := sets.NewString(
   318  					"volume_manager_selinux_container_errors_total",
   319  					"volume_manager_selinux_container_warnings_total",
   320  					"volume_manager_selinux_pod_context_mismatch_errors_total",
   321  					"volume_manager_selinux_pod_context_mismatch_warnings_total",
   322  					"volume_manager_selinux_volume_context_mismatch_errors_total"+volumePluginLabel,
   323  					"volume_manager_selinux_volume_context_mismatch_warnings_total"+volumePluginLabel,
   324  					"volume_manager_selinux_volumes_admitted_total"+volumePluginLabel,
   325  				)
   326  
   327  				if framework.NodeOSDistroIs("windows") {
   328  					e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
   329  				}
   330  				grabber, err := e2emetrics.NewMetricsGrabber(ctx, f.ClientSet, nil, f.ClientConfig(), true, false, false, false, false, false)
   331  				framework.ExpectNoError(err, "creating the metrics grabber")
   332  
   333  				var nodeStageMountOpts, nodePublishMountOpts []string
   334  				var unstageCalls, stageCalls, unpublishCalls, publishCalls atomic.Int32
   335  				m.init(ctx, testParameters{
   336  					disableAttach:      true,
   337  					registerDriver:     true,
   338  					enableSELinuxMount: &t.csiDriverSELinuxEnabled,
   339  					hooks:              createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls),
   340  				})
   341  				ginkgo.DeferCleanup(m.cleanup)
   342  
   343  				ginkgo.By("Starting the first pod")
   344  				accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
   345  				_, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts)
   346  				err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
   347  				framework.ExpectNoError(err, "starting the initial pod")
   348  
   349  				ginkgo.By("Grabbing initial metrics")
   350  				pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   351  				framework.ExpectNoError(err, "getting the initial pod")
   352  				metrics, err := grabMetrics(ctx, grabber, pod.Spec.NodeName, allMetrics)
   353  				framework.ExpectNoError(err, "collecting the initial metrics")
   354  				dumpMetrics(metrics)
   355  
   356  				// Act
   357  				ginkgo.By("Starting the second pod")
   358  				// Skip scheduler, it would block scheduling the second pod with ReadWriteOncePod PV.
   359  				nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName}
   360  				pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts)
   361  				framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts)
   362  				m.pods = append(m.pods, pod2)
   363  
   364  				if t.waitForSecondPodStart {
   365  					err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace)
   366  					framework.ExpectNoError(err, "starting the second pod")
   367  				} else {
   368  					ginkgo.By("Waiting for the second pod to fail to start")
   369  					eventSelector := fields.Set{
   370  						"involvedObject.kind":      "Pod",
   371  						"involvedObject.name":      pod2.Name,
   372  						"involvedObject.namespace": pod2.Namespace,
   373  						"reason":                   events.FailedMountVolume,
   374  					}.AsSelector().String()
   375  					err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, t.secondPodFailureEvent, f.Timeouts.PodStart)
   376  					framework.ExpectNoError(err, "waiting for event %q in the second test pod", t.secondPodFailureEvent)
   377  				}
   378  
   379  				// Assert: count the metrics
   380  				ginkgo.By("Waiting for expected metric changes")
   381  				err = waitForMetricIncrease(ctx, grabber, pod.Spec.NodeName, allMetrics, t.expectIncreases, metrics, framework.PodStartShortTimeout)
   382  				framework.ExpectNoError(err, "waiting for metrics %s to increase", t.expectIncreases)
   383  			})
   384  		}
   385  	})
   386  })
   387  
   388  func grabMetrics(ctx context.Context, grabber *e2emetrics.Grabber, nodeName string, metricNames sets.String) (map[string]float64, error) {
   389  	response, err := grabber.GrabFromKubelet(ctx, nodeName)
   390  	framework.ExpectNoError(err)
   391  
   392  	metrics := map[string]float64{}
   393  	for _, samples := range response {
   394  		if len(samples) == 0 {
   395  			continue
   396  		}
   397  		// Find the *last* sample that has the label we are interested in.
   398  		for i := len(samples) - 1; i >= 0; i-- {
   399  			metricNameWithLabels := samples[i].Metric.String()
   400  			if metricNames.Has(metricNameWithLabels) {
   401  				metrics[metricNameWithLabels] = float64(samples[i].Value)
   402  				break
   403  			}
   404  		}
   405  	}
   406  
   407  	return metrics, nil
   408  }
   409  
   410  func waitForMetricIncrease(ctx context.Context, grabber *e2emetrics.Grabber, nodeName string, allMetricNames, expectedIncreaseNames sets.String, initialValues map[string]float64, timeout time.Duration) error {
   411  	var noIncreaseMetrics sets.String
   412  	var metrics map[string]float64
   413  
   414  	err := wait.Poll(time.Second, timeout, func() (bool, error) {
   415  		var err error
   416  		metrics, err = grabMetrics(ctx, grabber, nodeName, allMetricNames)
   417  		if err != nil {
   418  			return false, err
   419  		}
   420  
   421  		noIncreaseMetrics = sets.NewString()
   422  		// Always evaluate all SELinux metrics to check that the other metrics are not unexpectedly increased.
   423  		for name := range allMetricNames {
   424  			expectIncrease := false
   425  
   426  			// allMetricNames can include "{volume_plugin="XXX"}", while expectedIncreaseNames does not.
   427  			// Compare them properly. Value of volume_plugin="XXX" was already checked in grabMetrics(),
   428  			// we can ignore it here.
   429  			for expectedIncreaseMetricName := range expectedIncreaseNames {
   430  				if strings.HasPrefix(name, expectedIncreaseMetricName) {
   431  					expectIncrease = true
   432  					break
   433  				}
   434  			}
   435  			if expectIncrease {
   436  				if metrics[name] <= initialValues[name] {
   437  					noIncreaseMetrics.Insert(name)
   438  				}
   439  			} else {
   440  				// Expect the metric to be stable
   441  				if initialValues[name] != metrics[name] {
   442  					return false, fmt.Errorf("metric %s unexpectedly increased to %v", name, metrics[name])
   443  				}
   444  			}
   445  		}
   446  		return noIncreaseMetrics.Len() == 0, nil
   447  	})
   448  
   449  	ginkgo.By("Dumping final metrics")
   450  	dumpMetrics(metrics)
   451  
   452  	if err == context.DeadlineExceeded {
   453  		return fmt.Errorf("timed out waiting for metrics %v", noIncreaseMetrics.List())
   454  	}
   455  	return err
   456  }
   457  
   458  func dumpMetrics(metrics map[string]float64) {
   459  	// Print the metrics sorted by metric name for better readability
   460  	keys := make([]string, 0, len(metrics))
   461  	for key := range metrics {
   462  		keys = append(keys, key)
   463  	}
   464  	sort.Strings(keys)
   465  
   466  	for _, key := range keys {
   467  		framework.Logf("Metric %s: %v", key, metrics[key])
   468  	}
   469  }