k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/integration/logs/benchmark/benchmark_test.go (about) 1 /* 2 Copyright 2021 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package benchmark 18 19 import ( 20 "errors" 21 "io" 22 "io/fs" 23 "log/slog" 24 "os" 25 "path/filepath" 26 "regexp" 27 "strconv" 28 "strings" 29 "sync" 30 "testing" 31 "time" 32 33 "github.com/go-logr/logr" 34 "go.uber.org/zap/zapcore" 35 "k8s.io/component-base/featuregate" 36 logsapi "k8s.io/component-base/logs/api/v1" 37 _ "k8s.io/component-base/logs/json/register" 38 "k8s.io/klog/v2" 39 ) 40 41 func BenchmarkEncoding(b *testing.B) { 42 seen := map[string]bool{} 43 44 // Each "data/(v[0-9]/)?*.log" file is expected to contain JSON log 45 // messages. We generate one sub-benchmark for each file where logging 46 // is tested with the log level from the directory. 47 if err := filepath.Walk("data", func(path string, info fs.FileInfo, err error) error { 48 if err != nil { 49 return err 50 } 51 if !strings.HasSuffix(path, ".log") { 52 return nil 53 } 54 messages, stats, err := loadLog(path) 55 if err != nil { 56 return err 57 } 58 // Only print unique file statistics. They get shown for the 59 // first file where new statistics are encountered. The 60 // assumption here is that the there are no files with 61 // different content and exactly the same statistics. 62 statsStr := stats.String() 63 if !seen[statsStr] { 64 b.Log(path + "\n" + statsStr) 65 seen[statsStr] = true 66 } 67 b.Run(strings.TrimSuffix(strings.TrimPrefix(path, "data/"), ".log"), func(b *testing.B) { 68 // Take verbosity threshold from directory, if present. 69 vMatch := regexp.MustCompile(`/v(\d+)/`).FindStringSubmatch(path) 70 v := 0 71 if vMatch != nil { 72 v, _ = strconv.Atoi(vMatch[1]) 73 } 74 75 fileSizes := map[string]int{} 76 test := func(b *testing.B, format string, print func(logger klog.Logger, item logMessage)) { 77 state := klog.CaptureState() 78 defer state.Restore() 79 80 // To make the tests a bit more realistic, at 81 // least do system calls during each write. 82 output := newBytesWritten(b, "/dev/null") 83 c := logsapi.NewLoggingConfiguration() 84 c.Format = format 85 o := logsapi.LoggingOptions{ 86 ErrorStream: output, 87 InfoStream: output, 88 } 89 klog.SetOutput(output) 90 defer func() { 91 if err := logsapi.ResetForTest(nil); err != nil { 92 b.Errorf("error resetting logsapi: %v", err) 93 } 94 }() 95 if err := logsapi.ValidateAndApplyWithOptions(c, &o, nil); err != nil { 96 b.Fatalf("Unexpected error configuring logging: %v", err) 97 } 98 logger := klog.Background() 99 100 // Edit and run with this if branch enabled to use slog instead of zapr for JSON. 101 if format == "json" && false { 102 var level slog.LevelVar 103 level.Set(slog.Level(-3)) // hack 104 logger = logr.FromSlogHandler(slog.NewJSONHandler(output, &slog.HandlerOptions{ 105 AddSource: true, 106 Level: &level, 107 ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { 108 switch a.Key { 109 case slog.TimeKey: 110 // Could be user-supplied "time". 111 if a.Value.Kind() == slog.KindTime { 112 return slog.Float64("ts", float64(a.Value.Time().UnixMicro())/1000) 113 } 114 case slog.LevelKey: 115 level := a.Value.Any().(slog.Level) 116 if level >= slog.LevelError { 117 // No verbosity on errors. 118 return slog.Attr{} 119 } 120 if level >= 0 { 121 return slog.Int("v", 0) 122 } 123 return slog.Int("v", int(-level)) 124 case slog.SourceKey: 125 caller := zapcore.EntryCaller{ 126 Defined: true, 127 File: a.Value.String(), 128 } 129 return slog.String("caller", caller.TrimmedPath()) 130 } 131 return a 132 }, 133 })) 134 } 135 136 b.ResetTimer() 137 start := time.Now() 138 total := int64(0) 139 for i := 0; i < b.N; i++ { 140 for _, item := range messages { 141 if item.verbosity <= v { 142 print(logger, item) 143 total++ 144 } 145 } 146 } 147 end := time.Now() 148 duration := end.Sub(start) 149 150 // Report messages/s instead of ns/op because "op" varies. 151 b.ReportMetric(0, "ns/op") 152 b.ReportMetric(float64(total)/duration.Seconds(), "msgs/s") 153 fileSizes[filepath.Base(b.Name())] = int(output.bytesWritten) 154 } 155 156 b.Run("printf", func(b *testing.B) { 157 test(b, "text", func(_ klog.Logger, item logMessage) { 158 printf(item) 159 }) 160 }) 161 b.Run("structured", func(b *testing.B) { 162 test(b, "text", prints) 163 }) 164 b.Run("JSON", func(b *testing.B) { 165 test(b, "json", prints) 166 }) 167 168 b.Logf("%s: file sizes: %v\n", path, fileSizes) 169 }) 170 return nil 171 }); err != nil { 172 b.Fatalf("reading 'data' directory: %v", err) 173 } 174 } 175 176 type loadGeneratorConfig struct { 177 // Length of the message written in each log entry. 178 messageLength int 179 180 // Percentage of error log entries written. 181 errorPercentage float64 182 183 // Number of concurrent goroutines that generate log entries. 184 workers int 185 } 186 187 // BenchmarkWriting simulates writing of a stream which mixes info and error log 188 // messages at a certain ratio. In contrast to BenchmarkEncoding, this stresses 189 // the output handling and includes the usual additional information (caller, 190 // time stamp). 191 // 192 // See https://github.com/kubernetes/kubernetes/issues/107029 for the 193 // motivation. 194 func BenchmarkWriting(b *testing.B) { 195 // This could be made configurable and/or we could benchmark different 196 // configurations automatically. 197 config := loadGeneratorConfig{ 198 messageLength: 300, 199 errorPercentage: 1.0, 200 workers: 100, 201 } 202 203 benchmarkWriting(b, config) 204 } 205 206 func benchmarkWriting(b *testing.B, config loadGeneratorConfig) { 207 b.Run("discard", func(b *testing.B) { 208 benchmarkOutputFormats(b, config, true) 209 }) 210 b.Run("tmp-files", func(b *testing.B) { 211 benchmarkOutputFormats(b, config, false) 212 }) 213 } 214 215 func benchmarkOutputFormats(b *testing.B, config loadGeneratorConfig, discard bool) { 216 b.Run("structured", func(b *testing.B) { 217 benchmarkOutputFormat(b, config, discard, "text") 218 }) 219 b.Run("JSON", func(b *testing.B) { 220 benchmarkOutputFormat(b, config, discard, "json") 221 }) 222 } 223 224 func benchmarkOutputFormat(b *testing.B, config loadGeneratorConfig, discard bool, format string) { 225 b.Run("single-stream", func(b *testing.B) { 226 benchmarkOutputFormatStream(b, config, discard, format, false) 227 }) 228 b.Run("split-stream", func(b *testing.B) { 229 benchmarkOutputFormatStream(b, config, discard, format, true) 230 }) 231 } 232 233 func benchmarkOutputFormatStream(b *testing.B, config loadGeneratorConfig, discard bool, format string, splitStreams bool) { 234 tmpDir := b.TempDir() 235 state := klog.CaptureState() 236 defer state.Restore() 237 238 featureGate := featuregate.NewFeatureGate() 239 logsapi.AddFeatureGates(featureGate) 240 if err := featureGate.SetFromMap(map[string]bool{ 241 string(logsapi.LoggingAlphaOptions): true, 242 string(logsapi.LoggingBetaOptions): true, 243 }); err != nil { 244 b.Fatalf("Set feature gates: %v", err) 245 } 246 247 // Create a logging configuration using the exact same code as a normal 248 // component. In order to redirect output, we provide a LoggingOptions 249 // instance. 250 var o logsapi.LoggingOptions 251 c := logsapi.NewLoggingConfiguration() 252 c.Format = format 253 if splitStreams { 254 c.Options.JSON.SplitStream = true 255 if err := c.Options.JSON.InfoBufferSize.Set("64Ki"); err != nil { 256 b.Fatalf("Error setting buffer size: %v", err) 257 } 258 c.Options.Text.SplitStream = true 259 if err := c.Options.Text.InfoBufferSize.Set("64Ki"); err != nil { 260 b.Fatalf("Error setting buffer size: %v", err) 261 } 262 } 263 var files []*os.File 264 if discard { 265 o.ErrorStream = io.Discard 266 o.InfoStream = io.Discard 267 } else { 268 out1, err := os.Create(filepath.Join(tmpDir, "stream-1.log")) 269 if err != nil { 270 b.Fatal(err) 271 } 272 defer out1.Close() 273 out2, err := os.Create(filepath.Join(tmpDir, "stream-2.log")) 274 if err != nil { 275 b.Fatal(err) 276 } 277 defer out2.Close() 278 279 if splitStreams { 280 files = append(files, out1, out2) 281 o.ErrorStream = out1 282 o.InfoStream = out2 283 } else { 284 files = append(files, out1) 285 o.ErrorStream = out1 286 o.InfoStream = out1 287 } 288 } 289 290 klog.SetOutput(o.ErrorStream) 291 defer func() { 292 if err := logsapi.ResetForTest(nil); err != nil { 293 b.Errorf("error resetting logsapi: %v", err) 294 } 295 }() 296 if err := logsapi.ValidateAndApplyWithOptions(c, &o, featureGate); err != nil { 297 b.Fatalf("Unexpected error configuring logging: %v", err) 298 } 299 300 generateOutput(b, config, files...) 301 } 302 303 func generateOutput(b *testing.B, config loadGeneratorConfig, files ...*os.File) { 304 msg := strings.Repeat("X", config.messageLength) 305 err := errors.New("fail") 306 start := time.Now() 307 308 // Scale by 1000 because "go test -bench" starts with b.N == 1, which is very low. 309 n := b.N * 1000 310 total := config.workers * n 311 312 b.ResetTimer() 313 var wg sync.WaitGroup 314 for i := 0; i < config.workers; i++ { 315 wg.Add(1) 316 go func() { 317 defer wg.Done() 318 319 acc := 0.0 320 for i := 0; i < n; i++ { 321 if acc > 100 { 322 klog.ErrorS(err, msg, "key", "value") 323 acc -= 100 324 } else { 325 klog.InfoS(msg, "key", "value") 326 } 327 acc += config.errorPercentage 328 } 329 }() 330 } 331 wg.Wait() 332 klog.Flush() 333 b.StopTimer() 334 end := time.Now() 335 duration := end.Sub(start) 336 337 // Report messages/s instead of ns/op because "op" varies. 338 b.ReportMetric(0, "ns/op") 339 b.ReportMetric(float64(total)/duration.Seconds(), "msgs/s") 340 341 // Print some information about the result. 342 b.Logf("Wrote %d log entries in %s -> %.1f/s", total, duration, float64(total)/duration.Seconds()) 343 for i, file := range files { 344 if file != nil { 345 pos, err := file.Seek(0, io.SeekEnd) 346 if err != nil { 347 b.Fatal(err) 348 } 349 if _, err := file.Seek(0, io.SeekStart); err != nil { 350 b.Fatal(err) 351 } 352 max := 50 353 buffer := make([]byte, max) 354 actual, err := file.Read(buffer) 355 if err != nil { 356 if err != io.EOF { 357 b.Fatal(err) 358 } 359 buffer = nil 360 } 361 if actual == max { 362 buffer[max-3] = '.' 363 buffer[max-2] = '.' 364 buffer[max-1] = '.' 365 } 366 b.Logf(" %d bytes to file #%d -> %.1fMiB/s (starts with: %s)", pos, i, float64(pos)/duration.Seconds()/1024/1024, string(buffer)) 367 } 368 } 369 }