gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/state/statefile/statefile_test.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package statefile
    16  
    17  import (
    18  	"bytes"
    19  	crand "crypto/rand"
    20  	"encoding/base64"
    21  	"io"
    22  	"math/rand"
    23  	"runtime"
    24  	"testing"
    25  	"time"
    26  
    27  	"gvisor.dev/gvisor/pkg/compressio"
    28  )
    29  
    30  func randomKey() ([]byte, error) {
    31  	r := make([]byte, base64.RawStdEncoding.DecodedLen(keySize))
    32  	if _, err := io.ReadFull(crand.Reader, r); err != nil {
    33  		return nil, err
    34  	}
    35  	key := make([]byte, keySize)
    36  	base64.RawStdEncoding.Encode(key, r)
    37  	return key, nil
    38  }
    39  
    40  type testCase struct {
    41  	name     string
    42  	data     []byte
    43  	metadata map[string]string
    44  }
    45  
    46  func TestStatefile(t *testing.T) {
    47  	rand.Seed(time.Now().Unix())
    48  
    49  	compression := map[string]CompressionLevel{
    50  		"none":       CompressionLevelNone,
    51  		"compressed": CompressionLevelFlateBestSpeed,
    52  	}
    53  
    54  	cases := []testCase{
    55  		// Various data sizes.
    56  		{"nil", nil, nil},
    57  		{"empty", []byte(""), nil},
    58  		{"some", []byte("_"), nil},
    59  		{"one", []byte("0"), nil},
    60  		{"two", []byte("01"), nil},
    61  		{"three", []byte("012"), nil},
    62  		{"four", []byte("0123"), nil},
    63  		{"five", []byte("01234"), nil},
    64  		{"six", []byte("012356"), nil},
    65  		{"seven", []byte("0123567"), nil},
    66  		{"eight", []byte("01235678"), nil},
    67  
    68  		// Make sure we have one longer than the hash length.
    69  		{"longer than hash", []byte("012356asdjflkasjlk3jlk23j4lkjaso0d789f0aujw3lkjlkxsdf78asdful2kj3ljka78"), nil},
    70  
    71  		// Make sure we have one longer than the chunk size.
    72  		{"chunks", make([]byte, 3*compressionChunkSize), nil},
    73  		{"large", make([]byte, 30*compressionChunkSize), nil},
    74  
    75  		// Different metadata.
    76  		{"one metadata", []byte("data"), map[string]string{"foo": "bar"}},
    77  		{"two metadata", []byte("data"), map[string]string{"foo": "bar", "one": "two"}},
    78  	}
    79  
    80  	for cKey, compress := range compression {
    81  		t.Run(cKey, func(t *testing.T) {
    82  			for _, c := range cases {
    83  				// Generate a key.
    84  				integrityKey, err := randomKey()
    85  				if err != nil {
    86  					t.Errorf("can't generate key: got %v, excepted nil", err)
    87  					continue
    88  				}
    89  
    90  				// Save compression state
    91  				if c.metadata == nil {
    92  					c.metadata = map[string]string{}
    93  				}
    94  
    95  				c.metadata[compressionKey] = string(compress)
    96  
    97  				t.Run(c.name, func(t *testing.T) {
    98  					for _, key := range [][]byte{nil, integrityKey} {
    99  						t.Run("key="+string(key), func(t *testing.T) {
   100  							// Encoding happens via a buffer.
   101  							var bufEncoded bytes.Buffer
   102  							var bufDecoded bytes.Buffer
   103  
   104  							// Do all the writing.
   105  							w, err := NewWriter(&bufEncoded, key, c.metadata)
   106  							if err != nil {
   107  								t.Fatalf("error creating writer: got %v, expected nil", err)
   108  							}
   109  							if _, err := io.Copy(w, bytes.NewBuffer(c.data)); err != nil {
   110  								t.Fatalf("error during write: got %v, expected nil", err)
   111  							}
   112  
   113  							// Finish the sum.
   114  							if err := w.Close(); err != nil {
   115  								t.Fatalf("error during close: got %v, expected nil", err)
   116  							}
   117  
   118  							t.Logf("original data: %d bytes, encoded: %d bytes.",
   119  								len(c.data), len(bufEncoded.Bytes()))
   120  
   121  							// Do all the reading.
   122  							r, metadata, err := NewReader(bytes.NewReader(bufEncoded.Bytes()), key)
   123  							if err != nil {
   124  								t.Fatalf("error creating reader: got %v, expected nil", err)
   125  							}
   126  							if _, err := io.Copy(&bufDecoded, r); err != nil {
   127  								t.Fatalf("error during read: got %v, expected nil", err)
   128  							}
   129  
   130  							// Check that the data matches.
   131  							if !bytes.Equal(c.data, bufDecoded.Bytes()) {
   132  								t.Fatalf("data didn't match (%d vs %d bytes)", len(bufDecoded.Bytes()), len(c.data))
   133  							}
   134  
   135  							// Check that the metadata matches.
   136  							for k, v := range c.metadata {
   137  								nv, ok := metadata[k]
   138  								if !ok {
   139  									t.Fatalf("missing metadata: %s", k)
   140  								}
   141  								if v != nv {
   142  									t.Fatalf("mismatched metadata for %s: got %s, expected %s", k, nv, v)
   143  								}
   144  							}
   145  
   146  							// Change the data and verify that it fails.
   147  							if key != nil {
   148  								b := append([]byte(nil), bufEncoded.Bytes()...)
   149  								i := rand.Intn(len(b))
   150  								b[i]++
   151  								bufDecoded.Reset()
   152  								r, _, err = NewReader(bytes.NewReader(b), key)
   153  								if err == nil {
   154  									_, err = io.Copy(&bufDecoded, r)
   155  								}
   156  								if err == nil {
   157  									t.Errorf("got no error: expected error on data corruption in byte [%d] = %x", i, b[i])
   158  								}
   159  							}
   160  
   161  							// Change the key and verify that it fails.
   162  							newKey := integrityKey
   163  							if len(key) > 0 {
   164  								newKey = append([]byte{}, key...)
   165  								newKey[rand.Intn(len(newKey))]++
   166  							}
   167  							bufDecoded.Reset()
   168  							r, _, err = NewReader(bytes.NewReader(bufEncoded.Bytes()), newKey)
   169  							if err == nil {
   170  								_, err = io.Copy(&bufDecoded, r)
   171  							}
   172  							if err != compressio.ErrHashMismatch {
   173  								t.Errorf("got error: %v, expected ErrHashMismatch on key mismatch", err)
   174  							}
   175  						})
   176  					}
   177  				})
   178  			}
   179  		})
   180  	}
   181  }
   182  
   183  const benchmarkDataSize = 100 * 1024 * 1024
   184  
   185  func benchmark(b *testing.B, size int, write bool, compressible bool) {
   186  	b.StopTimer()
   187  	b.SetBytes(benchmarkDataSize)
   188  
   189  	// Generate source data.
   190  	var source []byte
   191  	if compressible {
   192  		// For compressible data, we use essentially all zeros.
   193  		source = make([]byte, benchmarkDataSize)
   194  	} else {
   195  		// For non-compressible data, we use random base64 data (to
   196  		// make it marginally compressible, a ratio of 75%).
   197  		var sourceBuf bytes.Buffer
   198  		bufW := base64.NewEncoder(base64.RawStdEncoding, &sourceBuf)
   199  		bufR := rand.New(rand.NewSource(0))
   200  		if _, err := io.CopyN(bufW, bufR, benchmarkDataSize); err != nil {
   201  			b.Fatalf("unable to seed random data: %v", err)
   202  		}
   203  		source = sourceBuf.Bytes()
   204  	}
   205  
   206  	// Generate a random key for integrity check.
   207  	key, err := randomKey()
   208  	if err != nil {
   209  		b.Fatalf("error generating key: %v", err)
   210  	}
   211  
   212  	// Define our benchmark functions. Prior to running the readState
   213  	// function here, you must execute the writeState function at least
   214  	// once (done below).
   215  	var stateBuf bytes.Buffer
   216  	writeState := func() {
   217  		stateBuf.Reset()
   218  		w, err := NewWriter(&stateBuf, key, Options{Compression: CompressionLevelFlateBestSpeed}.WriteToMetadata(map[string]string{}))
   219  		if err != nil {
   220  			b.Fatalf("error creating writer: %v", err)
   221  		}
   222  		for done := 0; done < len(source); {
   223  			chunk := size // limit size.
   224  			if done+chunk > len(source) {
   225  				chunk = len(source) - done
   226  			}
   227  			n, err := w.Write(source[done : done+chunk])
   228  			done += n
   229  			if n == 0 && err != nil {
   230  				b.Fatalf("error during write: %v", err)
   231  			}
   232  		}
   233  		if err := w.Close(); err != nil {
   234  			b.Fatalf("error closing writer: %v", err)
   235  		}
   236  	}
   237  	readState := func() {
   238  		tmpBuf := bytes.NewBuffer(stateBuf.Bytes())
   239  		r, _, err := NewReader(tmpBuf, key)
   240  		if err != nil {
   241  			b.Fatalf("error creating reader: %v", err)
   242  		}
   243  		for done := 0; done < len(source); {
   244  			chunk := size // limit size.
   245  			if done+chunk > len(source) {
   246  				chunk = len(source) - done
   247  			}
   248  			n, err := r.Read(source[done : done+chunk])
   249  			done += n
   250  			if n == 0 && err != nil {
   251  				b.Fatalf("error during read: %v", err)
   252  			}
   253  		}
   254  	}
   255  	// Generate the state once without timing to ensure that buffers have
   256  	// been appropriately allocated.
   257  	writeState()
   258  	if write {
   259  		b.StartTimer()
   260  		for i := 0; i < b.N; i++ {
   261  			writeState()
   262  		}
   263  		b.StopTimer()
   264  	} else {
   265  		b.StartTimer()
   266  		for i := 0; i < b.N; i++ {
   267  			readState()
   268  		}
   269  		b.StopTimer()
   270  	}
   271  }
   272  
   273  func BenchmarkWrite4KCompressible(b *testing.B) {
   274  	benchmark(b, 4096, true, true)
   275  }
   276  
   277  func BenchmarkWrite4KNoncompressible(b *testing.B) {
   278  	benchmark(b, 4096, true, false)
   279  }
   280  
   281  func BenchmarkWrite1MCompressible(b *testing.B) {
   282  	benchmark(b, 1024*1024, true, true)
   283  }
   284  
   285  func BenchmarkWrite1MNoncompressible(b *testing.B) {
   286  	benchmark(b, 1024*1024, true, false)
   287  }
   288  
   289  func BenchmarkRead4KCompressible(b *testing.B) {
   290  	benchmark(b, 4096, false, true)
   291  }
   292  
   293  func BenchmarkRead4KNoncompressible(b *testing.B) {
   294  	benchmark(b, 4096, false, false)
   295  }
   296  
   297  func BenchmarkRead1MCompressible(b *testing.B) {
   298  	benchmark(b, 1024*1024, false, true)
   299  }
   300  
   301  func BenchmarkRead1MNoncompressible(b *testing.B) {
   302  	benchmark(b, 1024*1024, false, false)
   303  }
   304  
   305  func init() {
   306  	runtime.GOMAXPROCS(runtime.NumCPU())
   307  }