k8s.io/kubernetes@v1.29.3/test/e2e_node/benchmark_util.go (about) 1 //go:build linux 2 // +build linux 3 4 /* 5 Copyright 2015 The Kubernetes Authors. 6 7 Licensed under the Apache License, Version 2.0 (the "License"); 8 you may not use this file except in compliance with the License. 9 You may obtain a copy of the License at 10 11 http://www.apache.org/licenses/LICENSE-2.0 12 13 Unless required by applicable law or agreed to in writing, software 14 distributed under the License is distributed on an "AS IS" BASIS, 15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 See the License for the specific language governing permissions and 17 limitations under the License. 18 */ 19 20 package e2enode 21 22 import ( 23 "context" 24 "fmt" 25 "os" 26 "path" 27 "sort" 28 "strconv" 29 "time" 30 31 v1 "k8s.io/api/core/v1" 32 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 33 "k8s.io/kubernetes/test/e2e/framework" 34 e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" 35 e2eperf "k8s.io/kubernetes/test/e2e/framework/perf" 36 "k8s.io/kubernetes/test/e2e/perftype" 37 nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype" 38 ) 39 40 const ( 41 // TimeSeriesTag is the tag for time series. 42 TimeSeriesTag = "[Result:TimeSeries]" 43 // TimeSeriesEnd is the end tag for time series. 44 TimeSeriesEnd = "[Finish:TimeSeries]" 45 ) 46 47 // dumpDataToFile inserts the current timestamp into the labels and writes the 48 // data for the test into the file with the specified prefix. 49 func dumpDataToFile(data interface{}, labels map[string]string, prefix string) { 50 testName := labels["test"] 51 fileName := path.Join(framework.TestContext.ReportDir, fmt.Sprintf("%s-%s-%s.json", prefix, framework.TestContext.ReportPrefix, testName)) 52 labels["timestamp"] = strconv.FormatInt(time.Now().UTC().Unix(), 10) 53 framework.Logf("Dumping perf data for test %q to %q.", testName, fileName) 54 if err := os.WriteFile(fileName, []byte(framework.PrettyPrintJSON(data)), 0644); err != nil { 55 framework.Logf("Failed to write perf data for test %q to %q: %v", testName, fileName, err) 56 } 57 } 58 59 // logPerfData writes the perf data to a standalone json file if the 60 // framework.TestContext.ReportDir is non-empty, or to the general build log 61 // otherwise. The perfType identifies which type of the perf data it is, such 62 // as "cpu" and "memory". If an error occurs, no perf data will be logged. 63 func logPerfData(p *perftype.PerfData, perfType string) { 64 if framework.TestContext.ReportDir == "" { 65 printPerfData(p) 66 return 67 } 68 dumpDataToFile(p, p.Labels, "performance-"+perfType) 69 } 70 71 // logDensityTimeSeries writes the time series data of operation and resource 72 // usage to a standalone json file if the framework.TestContext.ReportDir is 73 // non-empty, or to the general build log otherwise. If an error occurs, 74 // no perf data will be logged. 75 func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]metav1.Time, testInfo map[string]string) { 76 timeSeries := &nodeperftype.NodeTimeSeries{ 77 Labels: testInfo, 78 Version: e2eperf.CurrentKubeletPerfMetricsVersion, 79 } 80 // Attach operation time series. 81 timeSeries.OperationData = map[string][]int64{ 82 "create": getCumulatedPodTimeSeries(create), 83 "running": getCumulatedPodTimeSeries(watch), 84 } 85 // Attach resource time series. 86 timeSeries.ResourceData = rc.GetResourceTimeSeries() 87 88 if framework.TestContext.ReportDir == "" { 89 framework.Logf("%s %s\n%s", TimeSeriesTag, framework.PrettyPrintJSON(timeSeries), TimeSeriesEnd) 90 return 91 } 92 dumpDataToFile(timeSeries, timeSeries.Labels, "time_series") 93 } 94 95 type int64arr []int64 96 97 func (a int64arr) Len() int { return len(a) } 98 func (a int64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 99 func (a int64arr) Less(i, j int) bool { return a[i] < a[j] } 100 101 // getCumulatedPodTimeSeries gets the cumulative pod number time series. 102 func getCumulatedPodTimeSeries(timePerPod map[string]metav1.Time) []int64 { 103 timeSeries := make(int64arr, 0) 104 for _, ts := range timePerPod { 105 timeSeries = append(timeSeries, ts.Time.UnixNano()) 106 } 107 // Sort all timestamps. 108 sort.Sort(timeSeries) 109 return timeSeries 110 } 111 112 // getLatencyPerfData returns perf data of pod startup latency. 113 func getLatencyPerfData(latency e2emetrics.LatencyMetric, testInfo map[string]string) *perftype.PerfData { 114 return &perftype.PerfData{ 115 Version: e2eperf.CurrentKubeletPerfMetricsVersion, 116 DataItems: []perftype.DataItem{ 117 { 118 Data: map[string]float64{ 119 "Perc50": float64(latency.Perc50) / 1000000, 120 "Perc90": float64(latency.Perc90) / 1000000, 121 "Perc99": float64(latency.Perc99) / 1000000, 122 "Perc100": float64(latency.Perc100) / 1000000, 123 }, 124 Unit: "ms", 125 Labels: map[string]string{ 126 "datatype": "latency", 127 "latencytype": "create-pod", 128 }, 129 }, 130 }, 131 Labels: testInfo, 132 } 133 } 134 135 // getThroughputPerfData returns perf data of pod creation startup throughput. 136 func getThroughputPerfData(batchLag time.Duration, e2eLags []e2emetrics.PodLatencyData, podsNr int, testInfo map[string]string) *perftype.PerfData { 137 return &perftype.PerfData{ 138 Version: e2eperf.CurrentKubeletPerfMetricsVersion, 139 DataItems: []perftype.DataItem{ 140 { 141 Data: map[string]float64{ 142 "batch": float64(podsNr) / batchLag.Minutes(), 143 "single-worst": 1.0 / e2eLags[len(e2eLags)-1].Latency.Minutes(), 144 }, 145 Unit: "pods/min", 146 Labels: map[string]string{ 147 "datatype": "throughput", 148 "latencytype": "create-pod", 149 }, 150 }, 151 }, 152 Labels: testInfo, 153 } 154 } 155 156 // getTestNodeInfo returns a label map containing the test name and 157 // description, the name of the node on which the test will be run, the image 158 // name of the node, and the node capacities. 159 func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[string]string { 160 nodeName := framework.TestContext.NodeName 161 node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) 162 framework.ExpectNoError(err) 163 164 cpu, ok := node.Status.Capacity[v1.ResourceCPU] 165 if !ok { 166 framework.Failf("Fail to fetch CPU capacity value of test node.") 167 } 168 169 memory, ok := node.Status.Capacity[v1.ResourceMemory] 170 if !ok { 171 framework.Failf("Fail to fetch Memory capacity value of test node.") 172 } 173 174 cpuValue, ok := cpu.AsInt64() 175 if !ok { 176 framework.Failf("Fail to fetch CPU capacity value as Int64.") 177 } 178 179 memoryValue, ok := memory.AsInt64() 180 if !ok { 181 framework.Failf("Fail to fetch Memory capacity value as Int64.") 182 } 183 184 image := node.Status.NodeInfo.OSImage 185 if framework.TestContext.ImageDescription != "" { 186 image = fmt.Sprintf("%s (%s)", image, framework.TestContext.ImageDescription) 187 } 188 return map[string]string{ 189 "node": nodeName, 190 "test": testName, 191 "image": image, 192 "machine": fmt.Sprintf("cpu:%dcore,memory:%.1fGB", cpuValue, float32(memoryValue)/(1024*1024*1024)), 193 "desc": testDesc, 194 } 195 } 196 197 // printPerfData prints the perfdata in json format with PerfResultTag prefix. 198 // If an error occurs, nothing will be printed. 199 func printPerfData(p *perftype.PerfData) { 200 // Notice that we must make sure the perftype.PerfResultEnd is in a new line. 201 if str := framework.PrettyPrintJSON(p); str != "" { 202 framework.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd) 203 } 204 }