github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/fs/proc/seqfile/seqfile_test.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package seqfile
    16  
    17  import (
    18  	"bytes"
    19  	"fmt"
    20  	"io"
    21  	"testing"
    22  
    23  	"github.com/SagerNet/gvisor/pkg/context"
    24  	"github.com/SagerNet/gvisor/pkg/sentry/contexttest"
    25  	"github.com/SagerNet/gvisor/pkg/sentry/fs"
    26  	"github.com/SagerNet/gvisor/pkg/sentry/fs/ramfs"
    27  	"github.com/SagerNet/gvisor/pkg/usermem"
    28  )
    29  
    30  type seqTest struct {
    31  	actual []SeqData
    32  	update bool
    33  }
    34  
    35  func (s *seqTest) Init() {
    36  	var sq []SeqData
    37  	// Create some SeqData.
    38  	for i := 0; i < 10; i++ {
    39  		var b []byte
    40  		for j := 0; j < 10; j++ {
    41  			b = append(b, byte(i))
    42  		}
    43  		sq = append(sq, SeqData{
    44  			Buf:    b,
    45  			Handle: &testHandle{i: i},
    46  		})
    47  	}
    48  	s.actual = sq
    49  }
    50  
    51  // NeedsUpdate reports whether we need to update the data we've previously read.
    52  func (s *seqTest) NeedsUpdate(int64) bool {
    53  	return s.update
    54  }
    55  
    56  // ReadSeqFiledata returns a slice of SeqData which contains elements
    57  // greater than the handle.
    58  func (s *seqTest) ReadSeqFileData(ctx context.Context, handle SeqHandle) ([]SeqData, int64) {
    59  	if handle == nil {
    60  		return s.actual, 0
    61  	}
    62  	h := *handle.(*testHandle)
    63  	var ret []SeqData
    64  	for _, b := range s.actual {
    65  		// We want the next one.
    66  		h2 := *b.Handle.(*testHandle)
    67  		if h2.i > h.i {
    68  			ret = append(ret, b)
    69  		}
    70  	}
    71  	return ret, 0
    72  }
    73  
    74  // Flatten a slice of slices into one slice.
    75  func flatten(buf ...[]byte) []byte {
    76  	var flat []byte
    77  	for _, b := range buf {
    78  		flat = append(flat, b...)
    79  	}
    80  	return flat
    81  }
    82  
    83  type testHandle struct {
    84  	i int
    85  }
    86  
    87  type testTable struct {
    88  	offset         int64
    89  	readBufferSize int
    90  	expectedData   []byte
    91  	expectedError  error
    92  }
    93  
    94  func runTableTests(ctx context.Context, table []testTable, dirent *fs.Dirent) error {
    95  	for _, tt := range table {
    96  		file, err := dirent.Inode.InodeOperations.GetFile(ctx, dirent, fs.FileFlags{Read: true})
    97  		if err != nil {
    98  			return fmt.Errorf("GetFile returned error: %v", err)
    99  		}
   100  
   101  		data := make([]byte, tt.readBufferSize)
   102  		resultLen, err := file.Preadv(ctx, usermem.BytesIOSequence(data), tt.offset)
   103  		if err != tt.expectedError {
   104  			return fmt.Errorf("t.Preadv(len: %v, offset: %v) (error) => %v expected %v", tt.readBufferSize, tt.offset, err, tt.expectedError)
   105  		}
   106  		expectedLen := int64(len(tt.expectedData))
   107  		if resultLen != expectedLen {
   108  			// We make this just an error so we wall through and print the data below.
   109  			return fmt.Errorf("t.Preadv(len: %v, offset: %v) (size) => %v expected %v", tt.readBufferSize, tt.offset, resultLen, expectedLen)
   110  		}
   111  		if !bytes.Equal(data[:expectedLen], tt.expectedData) {
   112  			return fmt.Errorf("t.Preadv(len: %v, offset: %v) (data) => %v expected %v", tt.readBufferSize, tt.offset, data[:expectedLen], tt.expectedData)
   113  		}
   114  	}
   115  	return nil
   116  }
   117  
   118  func TestSeqFile(t *testing.T) {
   119  	testSource := &seqTest{}
   120  	testSource.Init()
   121  
   122  	// Create a file that can be R/W.
   123  	ctx := contexttest.Context(t)
   124  	m := fs.NewPseudoMountSource(ctx)
   125  	contents := map[string]*fs.Inode{
   126  		"foo": NewSeqFileInode(ctx, testSource, m),
   127  	}
   128  	root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777))
   129  
   130  	// How about opening it?
   131  	inode := fs.NewInode(ctx, root, m, fs.StableAttr{Type: fs.Directory})
   132  	dirent2, err := root.Lookup(ctx, inode, "foo")
   133  	if err != nil {
   134  		t.Fatalf("failed to walk to foo for n2: %v", err)
   135  	}
   136  	n2 := dirent2.Inode.InodeOperations
   137  	file2, err := n2.GetFile(ctx, dirent2, fs.FileFlags{Read: true, Write: true})
   138  	if err != nil {
   139  		t.Fatalf("GetFile returned error: %v", err)
   140  	}
   141  
   142  	// Writing?
   143  	if _, err := file2.Writev(ctx, usermem.BytesIOSequence([]byte("test"))); err == nil {
   144  		t.Fatalf("managed to write to n2: %v", err)
   145  	}
   146  
   147  	// How about reading?
   148  	dirent3, err := root.Lookup(ctx, inode, "foo")
   149  	if err != nil {
   150  		t.Fatalf("failed to walk to foo: %v", err)
   151  	}
   152  	n3 := dirent3.Inode.InodeOperations
   153  	if n2 != n3 {
   154  		t.Error("got n2 != n3, want same")
   155  	}
   156  
   157  	testSource.update = true
   158  
   159  	table := []testTable{
   160  		// Read past the end.
   161  		{100, 4, []byte{}, io.EOF},
   162  		{110, 4, []byte{}, io.EOF},
   163  		{200, 4, []byte{}, io.EOF},
   164  		// Read a truncated first line.
   165  		{0, 4, testSource.actual[0].Buf[:4], nil},
   166  		// Read the whole first line.
   167  		{0, 10, testSource.actual[0].Buf, nil},
   168  		// Read the whole first line + 5 bytes of second line.
   169  		{0, 15, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:5]), nil},
   170  		// First 4 bytes of the second line.
   171  		{10, 4, testSource.actual[1].Buf[:4], nil},
   172  		// Read the two first lines.
   173  		{0, 20, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf), nil},
   174  		// Read three lines.
   175  		{0, 30, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf), nil},
   176  		// Read everything, but use a bigger buffer than necessary.
   177  		{0, 150, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf, testSource.actual[2].Buf, testSource.actual[3].Buf, testSource.actual[4].Buf, testSource.actual[5].Buf, testSource.actual[6].Buf, testSource.actual[7].Buf, testSource.actual[8].Buf, testSource.actual[9].Buf), nil},
   178  		// Read the last 3 bytes.
   179  		{97, 10, testSource.actual[9].Buf[7:], nil},
   180  	}
   181  	if err := runTableTests(ctx, table, dirent2); err != nil {
   182  		t.Errorf("runTableTest failed with testSource.update = %v : %v", testSource.update, err)
   183  	}
   184  
   185  	// Disable updates and do it again.
   186  	testSource.update = false
   187  	if err := runTableTests(ctx, table, dirent2); err != nil {
   188  		t.Errorf("runTableTest failed with testSource.update = %v: %v", testSource.update, err)
   189  	}
   190  }
   191  
   192  // Test that we behave correctly when the file is updated.
   193  func TestSeqFileFileUpdated(t *testing.T) {
   194  	testSource := &seqTest{}
   195  	testSource.Init()
   196  	testSource.update = true
   197  
   198  	// Create a file that can be R/W.
   199  	ctx := contexttest.Context(t)
   200  	m := fs.NewPseudoMountSource(ctx)
   201  	contents := map[string]*fs.Inode{
   202  		"foo": NewSeqFileInode(ctx, testSource, m),
   203  	}
   204  	root := ramfs.NewDir(ctx, contents, fs.RootOwner, fs.FilePermsFromMode(0777))
   205  
   206  	// How about opening it?
   207  	inode := fs.NewInode(ctx, root, m, fs.StableAttr{Type: fs.Directory})
   208  	dirent2, err := root.Lookup(ctx, inode, "foo")
   209  	if err != nil {
   210  		t.Fatalf("failed to walk to foo for dirent2: %v", err)
   211  	}
   212  
   213  	table := []testTable{
   214  		{0, 16, flatten(testSource.actual[0].Buf, testSource.actual[1].Buf[:6]), nil},
   215  	}
   216  	if err := runTableTests(ctx, table, dirent2); err != nil {
   217  		t.Errorf("runTableTest failed: %v", err)
   218  	}
   219  	// Delete the first entry.
   220  	cut := testSource.actual[0].Buf
   221  	testSource.actual = testSource.actual[1:]
   222  
   223  	table = []testTable{
   224  		// Try reading buffer 0 with an offset. This will not delete the old data.
   225  		{1, 5, cut[1:6], nil},
   226  		// Reset our file by reading at offset 0.
   227  		{0, 10, testSource.actual[0].Buf, nil},
   228  		{16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
   229  		// Read the same data a second time.
   230  		{16, 14, flatten(testSource.actual[1].Buf[6:], testSource.actual[2].Buf), nil},
   231  		// Read the following two lines.
   232  		{30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
   233  	}
   234  	if err := runTableTests(ctx, table, dirent2); err != nil {
   235  		t.Errorf("runTableTest failed after removing first entry: %v", err)
   236  	}
   237  
   238  	// Add a new duplicate line in the middle (6666...)
   239  	after := testSource.actual[5:]
   240  	testSource.actual = testSource.actual[:4]
   241  	// Note the list must be sorted.
   242  	testSource.actual = append(testSource.actual, after[0])
   243  	testSource.actual = append(testSource.actual, after...)
   244  
   245  	table = []testTable{
   246  		{50, 20, flatten(testSource.actual[4].Buf, testSource.actual[5].Buf), nil},
   247  	}
   248  	if err := runTableTests(ctx, table, dirent2); err != nil {
   249  		t.Errorf("runTableTest failed after adding middle entry: %v", err)
   250  	}
   251  	// This will be used in a later test.
   252  	oldTestData := testSource.actual
   253  
   254  	// Delete everything.
   255  	testSource.actual = testSource.actual[:0]
   256  	table = []testTable{
   257  		{20, 20, []byte{}, io.EOF},
   258  	}
   259  	if err := runTableTests(ctx, table, dirent2); err != nil {
   260  		t.Errorf("runTableTest failed after removing all entries: %v", err)
   261  	}
   262  	// Restore some of the data.
   263  	testSource.actual = oldTestData[:1]
   264  	table = []testTable{
   265  		{6, 20, testSource.actual[0].Buf[6:], nil},
   266  	}
   267  	if err := runTableTests(ctx, table, dirent2); err != nil {
   268  		t.Errorf("runTableTest failed after adding first entry back: %v", err)
   269  	}
   270  
   271  	// Re-extend the data
   272  	testSource.actual = oldTestData
   273  	table = []testTable{
   274  		{30, 20, flatten(testSource.actual[3].Buf, testSource.actual[4].Buf), nil},
   275  	}
   276  	if err := runTableTests(ctx, table, dirent2); err != nil {
   277  		t.Errorf("runTableTest failed after extending testSource: %v", err)
   278  	}
   279  }