gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/renter/streambuffer_test.go (about)

     1  package renter
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"io"
     7  	"io/ioutil"
     8  	"sync"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/opentracing/opentracing-go"
    13  	"gitlab.com/SkynetLabs/skyd/skymodules"
    14  	"go.sia.tech/siad/crypto"
    15  	"go.sia.tech/siad/types"
    16  
    17  	"gitlab.com/NebulousLabs/fastrand"
    18  	"gitlab.com/NebulousLabs/threadgroup"
    19  )
    20  
    21  // mockDataSource implements a stream buffer data source that can be used to
    22  // test the stream buffer. It's a simple in-memory buffer.
    23  //
    24  // staticDataLen is a separate field so we can have a sanity check on the reads
    25  // in ReadAt of the mockDataSource without having a race condition if ReadAt is
    26  // called after the stream is closed.
    27  type mockDataSource struct {
    28  	data              []byte
    29  	staticEC          skymodules.ErasureCoder
    30  	staticDataLen     uint64
    31  	staticRequestSize uint64
    32  	mu                sync.Mutex
    33  }
    34  
    35  // newMockDataSource will return a data source that is ready to use.
    36  func newMockDataSource(data []byte, requestSize uint64) *mockDataSource {
    37  	dataLen := uint64(len(data))
    38  
    39  	// Add padding if necessary.
    40  	if mod := len(data) % crypto.SegmentSize; mod != 0 {
    41  		data = append(data, make([]byte, crypto.SegmentSize-mod)...)
    42  	}
    43  	return &mockDataSource{
    44  		data:              data,
    45  		staticEC:          skymodules.NewRSSubCodeDefault(),
    46  		staticDataLen:     dataLen,
    47  		staticRequestSize: requestSize,
    48  	}
    49  }
    50  
    51  // DataSize implements streamBufferDataSource
    52  func (mds *mockDataSource) DataSize() uint64 {
    53  	mds.mu.Lock()
    54  	defer mds.mu.Unlock()
    55  	return mds.staticDataLen
    56  }
    57  
    58  // ID implements streamBufferDataSource
    59  func (mds *mockDataSource) ID() skymodules.DataSourceID {
    60  	mds.mu.Lock()
    61  	defer mds.mu.Unlock()
    62  	return skymodules.DataSourceID(crypto.HashObject(mds.data))
    63  }
    64  
    65  // HasRecursiveFanout returns 'false' for the mocked data source.
    66  func (mds *mockDataSource) HasRecursiveFanout() bool {
    67  	return false
    68  }
    69  
    70  // Metadata implements streamBufferDataSource
    71  func (mds *mockDataSource) Metadata() skymodules.SkyfileMetadata {
    72  	return skymodules.SkyfileMetadata{}
    73  }
    74  
    75  // RawMetadata implements streamBufferDataSource
    76  func (mds *mockDataSource) RawMetadata() []byte {
    77  	return nil
    78  }
    79  
    80  // ReadFanout implements streamBufferDataSource
    81  func (mds *mockDataSource) ReadFanout(chunkIndex, pieceIndex uint64) ([]byte, []crypto.Hash, uint32, error) {
    82  	panic("not imlemented")
    83  }
    84  
    85  // ReadBaseSectorPayload implements the streamBufferDataSource interface.
    86  func (mds *mockDataSource) ReadBaseSectorPayload(_, _ uint64) (*downloadResponse, error) {
    87  	return nil, nil
    88  }
    89  
    90  // Layout implements streamBufferDataSource
    91  func (mds *mockDataSource) Layout() skymodules.SkyfileLayout {
    92  	return skymodules.SkyfileLayout{}
    93  }
    94  
    95  // RawLayout implements streamBufferDataSource
    96  func (mds *mockDataSource) RawLayout() (skymodules.SkyfileLayout, []byte, []crypto.Hash) {
    97  	return skymodules.SkyfileLayout{}, nil, nil
    98  }
    99  
   100  // RequestSize implements streamBufferDataSource.
   101  func (mds *mockDataSource) RequestSize() uint64 {
   102  	return mds.staticRequestSize
   103  }
   104  
   105  // ReadSection implements streamBufferDataSource.
   106  func (mds *mockDataSource) ReadSection(ctx context.Context, index uint64, pricePerMS types.Currency) (<-chan *downloadResponse, error) {
   107  	mds.mu.Lock()
   108  	defer mds.mu.Unlock()
   109  
   110  	// panic if these error during testing - the error being returned may not
   111  	// make it all the way back to the test program because of failovers and
   112  	// such, but this is an incorrect call that should never be made by the
   113  	// stream buffer.
   114  	offset := index * mds.staticRequestSize
   115  	if offset > mds.staticDataLen {
   116  		panic("bad call to mocked ReadSection")
   117  	}
   118  
   119  	// Adjust fetchSize if necessary.
   120  	fetchSize := mds.staticRequestSize
   121  	if offset+mds.staticRequestSize > mds.staticDataLen {
   122  		fetchSize = mds.staticDataLen - offset
   123  	}
   124  
   125  	// Create the response.
   126  	dr, err := newTestDownloadResponse(mds.staticEC, mds.data, offset, fetchSize)
   127  	if err != nil {
   128  		return nil, err
   129  	}
   130  
   131  	// Send it.
   132  	responseChan := make(chan *downloadResponse, 1)
   133  	responseChan <- dr
   134  	return responseChan, nil
   135  }
   136  
   137  // SilentClose implements streamBufferDataSource.
   138  func (mds *mockDataSource) SilentClose() {
   139  	mds.mu.Lock()
   140  	mds.data = nil
   141  	mds.mu.Unlock()
   142  }
   143  
   144  // Skylink implements streamBufferDataSource
   145  func (mds *mockDataSource) Skylink() skymodules.Skylink {
   146  	return skymodules.Skylink{}
   147  }
   148  
   149  // TestStreamSmoke checks basic logic on the stream to see that reading and
   150  // seeking and closing works.
   151  func TestStreamSmoke(t *testing.T) {
   152  	if testing.Short() {
   153  		t.SkipNow()
   154  	}
   155  
   156  	// Create a bg context with a testSpan
   157  	ctx := opentracing.ContextWithSpan(context.Background(), testSpan())
   158  
   159  	// Create a usable stream, starting at offset 0.
   160  	var tg threadgroup.ThreadGroup
   161  	data := fastrand.Bytes(15999) // 1 byte short of 1000 data sections.
   162  	dataSectionSize := uint64(16)
   163  	dataSource := newMockDataSource(data, dataSectionSize)
   164  	dt := skymodules.NewDistributionTrackerStandard()
   165  	sbs := newStreamBufferSet(dt, &tg, newNoOpLRU())
   166  	stream := sbs.callNewStream(ctx, dataSource, 0, 0, types.ZeroCurrency, 0, false)
   167  
   168  	// Check that there is one reference in the stream buffer.
   169  	sbs.mu.Lock()
   170  	refs := stream.staticStreamBuffer.externRefCount
   171  	sbs.mu.Unlock()
   172  	if refs != 1 {
   173  		t.Fatal("bad")
   174  	}
   175  	// Create a new stream from an id, check that the ref count goes up.
   176  	streamFromID, exists := sbs.callNewStreamFromID(ctx, dataSource.ID(), 0, 0, 0, false)
   177  	if !exists {
   178  		t.Fatal("bad")
   179  	}
   180  	sbs.mu.Lock()
   181  	refs = stream.staticStreamBuffer.externRefCount
   182  	sbs.mu.Unlock()
   183  	if refs != 2 {
   184  		t.Fatal("bad")
   185  	}
   186  	// Create a second, different data source with the same id and try to use
   187  	// that.
   188  	dataSource2 := newMockDataSource(data, dataSectionSize)
   189  	repeatStream := sbs.callNewStream(ctx, dataSource2, 0, 0, types.ZeroCurrency, 0, false)
   190  	sbs.mu.Lock()
   191  	refs = stream.staticStreamBuffer.externRefCount
   192  	sbs.mu.Unlock()
   193  	if refs != 3 {
   194  		t.Fatal("bad")
   195  	}
   196  	err := repeatStream.Close()
   197  	if err != nil {
   198  		t.Fatal(err)
   199  	}
   200  	err = streamFromID.Close()
   201  	if err != nil {
   202  		t.Fatal(err)
   203  	}
   204  	time.Sleep(keepOldBuffersDuration)
   205  	time.Sleep(keepOldBuffersDuration / 3)
   206  	sbs.mu.Lock()
   207  	refs = stream.staticStreamBuffer.externRefCount
   208  	sbs.mu.Unlock()
   209  	if refs != 1 {
   210  		t.Fatal("bad")
   211  	}
   212  
   213  	// Perform the ritual that the http.ResponseWriter performs - seek to front,
   214  	// seek to back, read 512 bytes, seek to front, read a bigger chunk of data.
   215  	offset, err := stream.Seek(0, io.SeekStart)
   216  	if err != nil {
   217  		t.Fatal(err)
   218  	}
   219  	if offset != 0 {
   220  		t.Fatal("bad")
   221  	}
   222  	offset, err = stream.Seek(0, io.SeekEnd)
   223  	if err != nil {
   224  		t.Fatal(err)
   225  	}
   226  	if offset != 15999 {
   227  		t.Fatal("bad", offset)
   228  	}
   229  	offset, err = stream.Seek(0, io.SeekStart)
   230  	if err != nil {
   231  		t.Fatal(err)
   232  	}
   233  	if offset != 0 {
   234  		t.Fatal("bad")
   235  	}
   236  	// Check that the right pieces have been buffered. 4 sections should be in
   237  	// the buffer total.
   238  	streamBuf := stream.staticStreamBuffer
   239  	streamBuf.mu.Lock()
   240  	_, exists0 := streamBuf.dataSections[0]
   241  	_, exists1 := streamBuf.dataSections[1]
   242  	_, exists2 := streamBuf.dataSections[2]
   243  	_, exists3 := streamBuf.dataSections[3]
   244  	_, exists4 := streamBuf.dataSections[4]
   245  	streamBuf.mu.Unlock()
   246  	if !exists0 || !exists1 || !exists2 || !exists3 || exists4 {
   247  		t.Fatal("bad")
   248  	}
   249  	// Check every data section has a unique identifier
   250  	streamBuf.mu.Lock()
   251  	seen := make(map[string]struct{})
   252  	for _, ds := range streamBuf.dataSections {
   253  		if ds.staticID == "" {
   254  			t.Fatal("bad")
   255  		}
   256  		if _, exists := seen[ds.staticID]; exists {
   257  			t.Fatal("bad")
   258  		}
   259  		seen[ds.staticID] = struct{}{}
   260  	}
   261  	if len(seen) < 2 {
   262  		t.Fatal("bad") // quickly assert there was more than one data section
   263  	}
   264  	streamBuf.mu.Unlock()
   265  
   266  	buf := make([]byte, 512)
   267  	bytesRead, err := io.ReadFull(stream, buf)
   268  	if err != nil {
   269  		t.Fatal(err)
   270  	}
   271  	if bytesRead != 512 {
   272  		t.Fatal("bad", bytesRead)
   273  	}
   274  	if !bytes.Equal(buf, data[:512]) {
   275  		t.Fatal("bad")
   276  	}
   277  	streamBuf.mu.Lock()
   278  	_, exists0 = streamBuf.dataSections[32]
   279  	_, exists1 = streamBuf.dataSections[33]
   280  	_, exists2 = streamBuf.dataSections[34]
   281  	_, exists3 = streamBuf.dataSections[35]
   282  	_, exists4 = streamBuf.dataSections[36]
   283  	streamBuf.mu.Unlock()
   284  	if !exists0 || !exists1 || !exists2 || !exists3 || exists4 {
   285  		t.Fatal("bad")
   286  	}
   287  	offset, err = stream.Seek(0, io.SeekStart)
   288  	if err != nil {
   289  		t.Fatal(err)
   290  	}
   291  	if offset != 0 {
   292  		t.Fatal("bad")
   293  	}
   294  	streamBuf.mu.Lock()
   295  	_, exists0 = streamBuf.dataSections[0]
   296  	_, exists1 = streamBuf.dataSections[1]
   297  	_, exists2 = streamBuf.dataSections[2]
   298  	_, exists3 = streamBuf.dataSections[3]
   299  	_, exists4 = streamBuf.dataSections[4]
   300  	streamBuf.mu.Unlock()
   301  	if !exists0 || !exists1 || !exists2 || !exists3 || exists4 {
   302  		t.Fatal("bad")
   303  	}
   304  	buf = make([]byte, 1000)
   305  	bytesRead, err = io.ReadFull(stream, buf)
   306  	if err != nil {
   307  		t.Fatal(err)
   308  	}
   309  	if bytesRead != 1000 {
   310  		t.Fatal("bad")
   311  	}
   312  	if !bytes.Equal(buf, data[:1000]) {
   313  		t.Fatal("bad")
   314  	}
   315  	streamBuf.mu.Lock()
   316  	_, exists0 = streamBuf.dataSections[62]
   317  	_, exists1 = streamBuf.dataSections[63]
   318  	_, exists2 = streamBuf.dataSections[64]
   319  	_, exists3 = streamBuf.dataSections[65]
   320  	_, exists4 = streamBuf.dataSections[66]
   321  	streamBuf.mu.Unlock()
   322  	if !exists0 || !exists1 || !exists2 || !exists3 || exists4 {
   323  		t.Fatal("bad")
   324  	}
   325  	// Seek to near the end to see that the cache tool collects the correct
   326  	// pieces.
   327  	offset, err = stream.Seek(35, io.SeekEnd)
   328  	if err != nil {
   329  		t.Fatal(err)
   330  	}
   331  	streamBuf.mu.Lock()
   332  	_, existsi := streamBuf.dataSections[996] // Should not be buffered
   333  	_, exists0 = streamBuf.dataSections[997]  // Up to byte 15968
   334  	_, exists1 = streamBuf.dataSections[998]  // Up to byte 15984
   335  	_, exists2 = streamBuf.dataSections[999]  // Up to byte 16000
   336  	_, exists3 = streamBuf.dataSections[1000] // Beyond end of file.
   337  	streamBuf.mu.Unlock()
   338  	if existsi || !exists0 || !exists1 || !exists2 || exists3 {
   339  		t.Fatal("bad")
   340  	}
   341  	// Seek back to the beginning one more time to do a full read of the data.
   342  	offset, err = stream.Seek(0, io.SeekStart)
   343  	if err != nil {
   344  		t.Fatal(err)
   345  	}
   346  	if offset != 0 {
   347  		t.Fatal("bad")
   348  	}
   349  	// Read all of the data.
   350  	fullRead, err := ioutil.ReadAll(stream)
   351  	if err != nil {
   352  		t.Fatal(err)
   353  	}
   354  	if !bytes.Equal(fullRead, data) {
   355  		t.Fatal("bad")
   356  	}
   357  
   358  	// The reads should have exceeded the cache size. Check that the number of
   359  	// nodes in the stream buffer and lru match what is expected.
   360  	expectedNodes := int(bytesBufferedPerStream / dataSectionSize)
   361  	if len(stream.staticStreamBuffer.dataSections) != expectedNodes {
   362  		t.Fatal("bad")
   363  	}
   364  	if len(stream.lru.nodes) != expectedNodes {
   365  		t.Fatal("bad")
   366  	}
   367  
   368  	// Open up a second stream and read the front of the data. This should cause
   369  	// the stream buffer to have a full cache for each stream and stream2, since
   370  	// there is no overlap between their lrus.
   371  	//
   372  	// NOTE: Need to use a second data source, because it'll be closed when it's
   373  	// not used. The stream buffer expects that when multiple data sources have
   374  	// the same ID, they are actually separate objects which need to be closed
   375  	// individually.
   376  	dataSource3 := newMockDataSource(data, dataSectionSize)
   377  	stream2 := sbs.callNewStream(ctx, dataSource3, 0, 0, types.ZeroCurrency, 0, false)
   378  	bytesRead, err = io.ReadFull(stream2, buf)
   379  	if err != nil {
   380  		t.Fatal(err)
   381  	}
   382  	if bytesRead != len(buf) {
   383  		t.Fatal("bad")
   384  	}
   385  	if !bytes.Equal(buf, data[:len(buf)]) {
   386  		t.Fatal("bad")
   387  	}
   388  	if len(stream.staticStreamBuffer.dataSections) != (expectedNodes * 2) {
   389  		t.Fatal("bad")
   390  	}
   391  	if len(stream2.staticStreamBuffer.dataSections) != (expectedNodes * 2) {
   392  		t.Fatal("bad")
   393  	}
   394  	if len(stream.lru.nodes) != expectedNodes {
   395  		t.Fatal("bad")
   396  	}
   397  	if len(stream2.lru.nodes) != expectedNodes {
   398  		t.Fatal("bad")
   399  	}
   400  
   401  	// Read the full data on stream2, this should cause the lrus to match, and
   402  	// therefore the stream buffer to only have 1 set of data.
   403  	fullRead, err = ioutil.ReadAll(stream2)
   404  	if err != nil {
   405  		t.Fatal(err)
   406  	}
   407  	if len(stream.staticStreamBuffer.dataSections) != expectedNodes {
   408  		t.Fatal("bad")
   409  	}
   410  	if len(stream2.staticStreamBuffer.dataSections) != expectedNodes {
   411  		t.Fatal("bad")
   412  	}
   413  	if len(stream.lru.nodes) != expectedNodes {
   414  		t.Fatal("bad")
   415  	}
   416  	if len(stream2.lru.nodes) != expectedNodes {
   417  		t.Fatal("bad")
   418  	}
   419  
   420  	// Close the stream and see that all resources for the stream are dropped,
   421  	// but that the stream buffer sticks around.
   422  	err = stream.Close()
   423  	if err != nil {
   424  		t.Fatal(err)
   425  	}
   426  	time.Sleep(keepOldBuffersDuration / 3)
   427  	// Check that the stream hangs around a bit after close.
   428  	stream.lru.mu.Lock()
   429  	if len(stream.lru.nodes) == 0 {
   430  		stream.lru.mu.Unlock()
   431  		t.Fatal("bad")
   432  	}
   433  	if stream.lru.head == nil {
   434  		stream.lru.mu.Unlock()
   435  		t.Fatal("bad")
   436  	}
   437  	if stream.lru.tail == nil {
   438  		stream.lru.mu.Unlock()
   439  		t.Fatal("bad")
   440  	}
   441  	stream.lru.mu.Unlock()
   442  	// Sleep until the stream is cleared.
   443  	time.Sleep(keepOldBuffersDuration)
   444  	if dataSource.data == nil {
   445  		t.Fatal("bad")
   446  	}
   447  	stream.lru.mu.Lock()
   448  	if len(stream.lru.nodes) != 0 {
   449  		stream.lru.mu.Unlock()
   450  		t.Fatal("bad")
   451  	}
   452  	if stream.lru.head != nil {
   453  		stream.lru.mu.Unlock()
   454  		t.Fatal("bad")
   455  	}
   456  	if stream.lru.tail != nil {
   457  		stream.lru.mu.Unlock()
   458  		t.Fatal("bad")
   459  	}
   460  	stream.lru.mu.Unlock()
   461  	if len(sbs.streams) != 1 {
   462  		t.Fatal("bad")
   463  	}
   464  	if len(stream.staticStreamBuffer.dataSections) != expectedNodes {
   465  		t.Fatal("bad")
   466  	}
   467  
   468  	// Close the second stream and see that all resources are dropped.
   469  	err = stream2.Close()
   470  	if err != nil {
   471  		t.Fatal(err)
   472  	}
   473  	// Sleep until the stream is cleared.
   474  	time.Sleep(keepOldBuffersDuration / 3)
   475  	time.Sleep(keepOldBuffersDuration)
   476  	dataSource.mu.Lock()
   477  	if dataSource.data != nil {
   478  		dataSource.mu.Unlock()
   479  		t.Fatal("bad")
   480  	}
   481  	dataSource.mu.Unlock()
   482  	stream2.lru.mu.Lock()
   483  	if len(stream2.lru.nodes) != 0 {
   484  		stream2.lru.mu.Unlock()
   485  		t.Fatal("bad")
   486  	}
   487  	if stream2.lru.head != nil {
   488  		stream2.lru.mu.Unlock()
   489  		t.Fatal("bad")
   490  	}
   491  	if stream2.lru.tail != nil {
   492  		stream2.lru.mu.Unlock()
   493  		t.Fatal("bad")
   494  	}
   495  	stream2.lru.mu.Unlock()
   496  	if len(sbs.streams) != 0 {
   497  		t.Fatal("bad")
   498  	}
   499  	if len(stream2.staticStreamBuffer.dataSections) != 0 {
   500  		t.Fatal("bad")
   501  	}
   502  
   503  	// Check that if the tg is stopped, the stream closes immediately.
   504  	dataSource4 := newMockDataSource(data, dataSectionSize)
   505  	stream3 := sbs.callNewStream(ctx, dataSource4, 0, 0, types.ZeroCurrency, 0, false)
   506  	bytesRead, err = io.ReadFull(stream3, buf)
   507  	if err != nil {
   508  		t.Fatal(err)
   509  	}
   510  	err = stream3.Close()
   511  	if err != nil {
   512  		t.Fatal(err)
   513  	}
   514  	err = tg.Stop()
   515  	if err != nil {
   516  		t.Fatal(err)
   517  	}
   518  	time.Sleep(keepOldBuffersDuration / 5)
   519  	dataSource.mu.Lock()
   520  	if dataSource.data != nil {
   521  		dataSource.mu.Unlock()
   522  		t.Fatal("bad")
   523  	}
   524  	dataSource.mu.Unlock()
   525  	stream3.lru.mu.Lock()
   526  	if len(stream3.lru.nodes) != 0 {
   527  		stream3.lru.mu.Unlock()
   528  		t.Fatal("bad")
   529  	}
   530  	if stream3.lru.head != nil {
   531  		stream3.lru.mu.Unlock()
   532  		t.Fatal("bad")
   533  	}
   534  	if stream3.lru.tail != nil {
   535  		stream3.lru.mu.Unlock()
   536  		t.Fatal("bad")
   537  	}
   538  	stream3.lru.mu.Unlock()
   539  	sbs.mu.Lock()
   540  	if len(sbs.streams) != 0 {
   541  		sbs.mu.Unlock()
   542  		t.Fatal("bad")
   543  	}
   544  	sbs.mu.Unlock()
   545  	if len(stream3.staticStreamBuffer.dataSections) != 0 {
   546  		t.Fatal("bad")
   547  	}
   548  }