k8s.io/kubernetes@v1.29.3/test/integration/logs/benchmark/benchmark_test.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package benchmark
    18  
    19  import (
    20  	"errors"
    21  	"io"
    22  	"io/fs"
    23  	"os"
    24  	"path/filepath"
    25  	"regexp"
    26  	"strconv"
    27  	"strings"
    28  	"sync"
    29  	"testing"
    30  	"time"
    31  
    32  	"k8s.io/component-base/featuregate"
    33  	logsapi "k8s.io/component-base/logs/api/v1"
    34  	_ "k8s.io/component-base/logs/json/register"
    35  	"k8s.io/klog/v2"
    36  )
    37  
    38  func BenchmarkEncoding(b *testing.B) {
    39  	seen := map[string]bool{}
    40  
    41  	// Each "data/(v[0-9]/)?*.log" file is expected to contain JSON log
    42  	// messages. We generate one sub-benchmark for each file where logging
    43  	// is tested with the log level from the directory.
    44  	if err := filepath.Walk("data", func(path string, info fs.FileInfo, err error) error {
    45  		if err != nil {
    46  			return err
    47  		}
    48  		if !strings.HasSuffix(path, ".log") {
    49  			return nil
    50  		}
    51  		messages, stats, err := loadLog(path)
    52  		if err != nil {
    53  			return err
    54  		}
    55  		// Only print unique file statistics. They get shown for the
    56  		// first file where new statistics are encountered. The
    57  		// assumption here is that the there are no files with
    58  		// different content and exactly the same statistics.
    59  		statsStr := stats.String()
    60  		if !seen[statsStr] {
    61  			b.Log(path + "\n" + statsStr)
    62  			seen[statsStr] = true
    63  		}
    64  		b.Run(strings.TrimSuffix(strings.TrimPrefix(path, "data/"), ".log"), func(b *testing.B) {
    65  			// Take verbosity threshold from directory, if present.
    66  			vMatch := regexp.MustCompile(`/v(\d+)/`).FindStringSubmatch(path)
    67  			v := 0
    68  			if vMatch != nil {
    69  				v, _ = strconv.Atoi(vMatch[1])
    70  			}
    71  
    72  			fileSizes := map[string]int{}
    73  			test := func(b *testing.B, format string, print func(logger klog.Logger, item logMessage)) {
    74  				state := klog.CaptureState()
    75  				defer state.Restore()
    76  
    77  				var output bytesWritten
    78  				c := logsapi.NewLoggingConfiguration()
    79  				c.Format = format
    80  				o := logsapi.LoggingOptions{
    81  					ErrorStream: &output,
    82  					InfoStream:  &output,
    83  				}
    84  				klog.SetOutput(&output)
    85  				if err := logsapi.ValidateAndApplyWithOptions(c, &o, nil); err != nil {
    86  					b.Fatalf("Unexpected error configuring logging: %v", err)
    87  				}
    88  				logger := klog.Background()
    89  				b.ResetTimer()
    90  				start := time.Now()
    91  				total := int64(0)
    92  				for i := 0; i < b.N; i++ {
    93  					for _, item := range messages {
    94  						if item.verbosity <= v {
    95  							print(logger, item)
    96  							total++
    97  						}
    98  					}
    99  				}
   100  				end := time.Now()
   101  				duration := end.Sub(start)
   102  
   103  				// Report messages/s instead of ns/op because "op" varies.
   104  				b.ReportMetric(0, "ns/op")
   105  				b.ReportMetric(float64(total)/duration.Seconds(), "msgs/s")
   106  				fileSizes[filepath.Base(b.Name())] = int(output)
   107  			}
   108  
   109  			b.Run("printf", func(b *testing.B) {
   110  				test(b, "text", func(_ klog.Logger, item logMessage) {
   111  					printf(item)
   112  				})
   113  			})
   114  			b.Run("structured", func(b *testing.B) {
   115  				test(b, "text", prints)
   116  			})
   117  			b.Run("JSON", func(b *testing.B) {
   118  				test(b, "json", prints)
   119  			})
   120  
   121  			b.Logf("%s: file sizes: %v\n", path, fileSizes)
   122  		})
   123  		return nil
   124  	}); err != nil {
   125  		b.Fatalf("reading 'data' directory: %v", err)
   126  	}
   127  }
   128  
   129  type loadGeneratorConfig struct {
   130  	// Length of the message written in each log entry.
   131  	messageLength int
   132  
   133  	// Percentage of error log entries written.
   134  	errorPercentage float64
   135  
   136  	// Number of concurrent goroutines that generate log entries.
   137  	workers int
   138  }
   139  
   140  // BenchmarkWriting simulates writing of a stream which mixes info and error log
   141  // messages at a certain ratio. In contrast to BenchmarkEncoding, this stresses
   142  // the output handling and includes the usual additional information (caller,
   143  // time stamp).
   144  //
   145  // See https://github.com/kubernetes/kubernetes/issues/107029 for the
   146  // motivation.
   147  func BenchmarkWriting(b *testing.B) {
   148  	// This could be made configurable and/or we could benchmark different
   149  	// configurations automatically.
   150  	config := loadGeneratorConfig{
   151  		messageLength:   300,
   152  		errorPercentage: 1.0,
   153  		workers:         100,
   154  	}
   155  
   156  	benchmarkWriting(b, config)
   157  }
   158  
   159  func benchmarkWriting(b *testing.B, config loadGeneratorConfig) {
   160  	b.Run("discard", func(b *testing.B) {
   161  		benchmarkOutputFormats(b, config, true)
   162  	})
   163  	b.Run("tmp-files", func(b *testing.B) {
   164  		benchmarkOutputFormats(b, config, false)
   165  	})
   166  }
   167  
   168  func benchmarkOutputFormats(b *testing.B, config loadGeneratorConfig, discard bool) {
   169  	b.Run("structured", func(b *testing.B) {
   170  		benchmarkOutputFormat(b, config, discard, "text")
   171  	})
   172  	b.Run("JSON", func(b *testing.B) {
   173  		benchmarkOutputFormat(b, config, discard, "json")
   174  	})
   175  }
   176  
   177  func benchmarkOutputFormat(b *testing.B, config loadGeneratorConfig, discard bool, format string) {
   178  	b.Run("single-stream", func(b *testing.B) {
   179  		benchmarkOutputFormatStream(b, config, discard, format, false)
   180  	})
   181  	b.Run("split-stream", func(b *testing.B) {
   182  		benchmarkOutputFormatStream(b, config, discard, format, true)
   183  	})
   184  }
   185  
   186  func benchmarkOutputFormatStream(b *testing.B, config loadGeneratorConfig, discard bool, format string, splitStreams bool) {
   187  	tmpDir := b.TempDir()
   188  	state := klog.CaptureState()
   189  	defer state.Restore()
   190  
   191  	featureGate := featuregate.NewFeatureGate()
   192  	logsapi.AddFeatureGates(featureGate)
   193  	if err := featureGate.SetFromMap(map[string]bool{
   194  		string(logsapi.LoggingAlphaOptions): true,
   195  		string(logsapi.LoggingBetaOptions):  true,
   196  	}); err != nil {
   197  		b.Fatalf("Set feature gates: %v", err)
   198  	}
   199  
   200  	// Create a logging configuration using the exact same code as a normal
   201  	// component. In order to redirect output, we provide a LoggingOptions
   202  	// instance.
   203  	var o logsapi.LoggingOptions
   204  	c := logsapi.NewLoggingConfiguration()
   205  	c.Format = format
   206  	if splitStreams {
   207  		c.Options.JSON.SplitStream = true
   208  		if err := c.Options.JSON.InfoBufferSize.Set("64Ki"); err != nil {
   209  			b.Fatalf("Error setting buffer size: %v", err)
   210  		}
   211  	}
   212  	var files []*os.File
   213  	if discard {
   214  		o.ErrorStream = io.Discard
   215  		o.InfoStream = io.Discard
   216  	} else {
   217  		out1, err := os.Create(filepath.Join(tmpDir, "stream-1.log"))
   218  		if err != nil {
   219  			b.Fatal(err)
   220  		}
   221  		defer out1.Close()
   222  		out2, err := os.Create(filepath.Join(tmpDir, "stream-2.log"))
   223  		if err != nil {
   224  			b.Fatal(err)
   225  		}
   226  		defer out2.Close()
   227  
   228  		if splitStreams {
   229  			files = append(files, out1, out2)
   230  			o.ErrorStream = out1
   231  			o.InfoStream = out2
   232  		} else {
   233  			files = append(files, out1)
   234  			o.ErrorStream = out1
   235  			o.InfoStream = out1
   236  		}
   237  	}
   238  
   239  	klog.SetOutput(o.ErrorStream)
   240  	if err := logsapi.ValidateAndApplyWithOptions(c, &o, featureGate); err != nil {
   241  		b.Fatalf("Unexpected error configuring logging: %v", err)
   242  	}
   243  
   244  	generateOutput(b, config, files...)
   245  }
   246  
   247  func generateOutput(b *testing.B, config loadGeneratorConfig, files ...*os.File) {
   248  	msg := strings.Repeat("X", config.messageLength)
   249  	err := errors.New("fail")
   250  	start := time.Now()
   251  
   252  	// Scale by 1000 because "go test -bench" starts with b.N == 1, which is very low.
   253  	n := b.N * 1000
   254  	total := config.workers * n
   255  
   256  	b.ResetTimer()
   257  	var wg sync.WaitGroup
   258  	for i := 0; i < config.workers; i++ {
   259  		wg.Add(1)
   260  		go func() {
   261  			defer wg.Done()
   262  
   263  			acc := 0.0
   264  			for i := 0; i < n; i++ {
   265  				if acc > 100 {
   266  					klog.ErrorS(err, msg, "key", "value")
   267  					acc -= 100
   268  				} else {
   269  					klog.InfoS(msg, "key", "value")
   270  				}
   271  				acc += config.errorPercentage
   272  			}
   273  		}()
   274  	}
   275  	wg.Wait()
   276  	klog.Flush()
   277  	b.StopTimer()
   278  	end := time.Now()
   279  	duration := end.Sub(start)
   280  
   281  	// Report messages/s instead of ns/op because "op" varies.
   282  	b.ReportMetric(0, "ns/op")
   283  	b.ReportMetric(float64(total)/duration.Seconds(), "msgs/s")
   284  
   285  	// Print some information about the result.
   286  	b.Logf("Wrote %d log entries in %s -> %.1f/s", total, duration, float64(total)/duration.Seconds())
   287  	for i, file := range files {
   288  		if file != nil {
   289  			pos, err := file.Seek(0, io.SeekEnd)
   290  			if err != nil {
   291  				b.Fatal(err)
   292  			}
   293  			if _, err := file.Seek(0, io.SeekStart); err != nil {
   294  				b.Fatal(err)
   295  			}
   296  			max := 50
   297  			buffer := make([]byte, max)
   298  			actual, err := file.Read(buffer)
   299  			if err != nil {
   300  				if err != io.EOF {
   301  					b.Fatal(err)
   302  				}
   303  				buffer = nil
   304  			}
   305  			if actual == max {
   306  				buffer[max-3] = '.'
   307  				buffer[max-2] = '.'
   308  				buffer[max-1] = '.'
   309  			}
   310  			b.Logf("      %d bytes to file #%d -> %.1fMiB/s (starts with: %s)", pos, i, float64(pos)/duration.Seconds()/1024/1024, string(buffer))
   311  		}
   312  	}
   313  }