github.com/susy-go/susy-graviton@v0.0.0-20190614130430-36cddae42305/swarm/storage/localstore/subscription_pull_test.go (about)

     1  // Copyleft 2019 The susy-graviton Authors
     2  // This file is part of the susy-graviton library.
     3  //
     4  // The susy-graviton library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The susy-graviton library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MSRCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the susy-graviton library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package localstore
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"sync"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/susy-go/susy-graviton/swarm/storage"
    28  )
    29  
    30  // TestDB_SubscribePull uploads some chunks before and after
    31  // pull syncing subscription is created and validates if
    32  // all addresses are received in the right order
    33  // for expected proximity order bins.
    34  func TestDB_SubscribePull(t *testing.T) {
    35  	db, cleanupFunc := newTestDB(t, nil)
    36  	defer cleanupFunc()
    37  
    38  	uploader := db.NewPutter(ModePutUpload)
    39  
    40  	addrs := make(map[uint8][]storage.Address)
    41  	var addrsMu sync.Mutex
    42  	var wantedChunksCount int
    43  
    44  	// prepopulate database with some chunks
    45  	// before the subscription
    46  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
    47  
    48  	// set a timeout on subscription
    49  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
    50  	defer cancel()
    51  
    52  	// collect all errors from validating addresses, even nil ones
    53  	// to validate the number of addresses received by the subscription
    54  	errChan := make(chan error)
    55  
    56  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
    57  		ch, stop := db.SubscribePull(ctx, bin, nil, nil)
    58  		defer stop()
    59  
    60  		// receive and validate addresses from the subscription
    61  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
    62  	}
    63  
    64  	// upload some chunks just after subscribe
    65  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
    66  
    67  	time.Sleep(200 * time.Millisecond)
    68  
    69  	// upload some chunks after some short time
    70  	// to ensure that subscription will include them
    71  	// in a dynamic environment
    72  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
    73  
    74  	checkErrChan(ctx, t, errChan, wantedChunksCount)
    75  }
    76  
    77  // TestDB_SubscribePull_multiple uploads chunks before and after
    78  // multiple pull syncing subscriptions are created and
    79  // validates if all addresses are received in the right order
    80  // for expected proximity order bins.
    81  func TestDB_SubscribePull_multiple(t *testing.T) {
    82  	db, cleanupFunc := newTestDB(t, nil)
    83  	defer cleanupFunc()
    84  
    85  	uploader := db.NewPutter(ModePutUpload)
    86  
    87  	addrs := make(map[uint8][]storage.Address)
    88  	var addrsMu sync.Mutex
    89  	var wantedChunksCount int
    90  
    91  	// prepopulate database with some chunks
    92  	// before the subscription
    93  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
    94  
    95  	// set a timeout on subscription
    96  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
    97  	defer cancel()
    98  
    99  	// collect all errors from validating addresses, even nil ones
   100  	// to validate the number of addresses received by the subscription
   101  	errChan := make(chan error)
   102  
   103  	subsCount := 10
   104  
   105  	// start a number of subscriptions
   106  	// that all of them will write every address error to errChan
   107  	for j := 0; j < subsCount; j++ {
   108  		for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   109  			ch, stop := db.SubscribePull(ctx, bin, nil, nil)
   110  			defer stop()
   111  
   112  			// receive and validate addresses from the subscription
   113  			go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   114  		}
   115  	}
   116  
   117  	// upload some chunks just after subscribe
   118  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
   119  
   120  	time.Sleep(200 * time.Millisecond)
   121  
   122  	// upload some chunks after some short time
   123  	// to ensure that subscription will include them
   124  	// in a dynamic environment
   125  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
   126  
   127  	checkErrChan(ctx, t, errChan, wantedChunksCount*subsCount)
   128  }
   129  
   130  // TestDB_SubscribePull_since uploads chunks before and after
   131  // pull syncing subscriptions are created with a since argument
   132  // and validates if all expected addresses are received in the
   133  // right order for expected proximity order bins.
   134  func TestDB_SubscribePull_since(t *testing.T) {
   135  	db, cleanupFunc := newTestDB(t, nil)
   136  	defer cleanupFunc()
   137  
   138  	uploader := db.NewPutter(ModePutUpload)
   139  
   140  	addrs := make(map[uint8][]storage.Address)
   141  	var addrsMu sync.Mutex
   142  	var wantedChunksCount int
   143  
   144  	lastTimestamp := time.Now().UTC().UnixNano()
   145  	var lastTimestampMu sync.RWMutex
   146  	defer setNow(func() (t int64) {
   147  		lastTimestampMu.Lock()
   148  		defer lastTimestampMu.Unlock()
   149  		lastTimestamp++
   150  		return lastTimestamp
   151  	})()
   152  
   153  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   154  		last = make(map[uint8]ChunkDescriptor)
   155  		for i := 0; i < count; i++ {
   156  			chunk := generateRandomChunk()
   157  
   158  			err := uploader.Put(chunk)
   159  			if err != nil {
   160  				t.Fatal(err)
   161  			}
   162  
   163  			bin := db.po(chunk.Address())
   164  
   165  			addrsMu.Lock()
   166  			if _, ok := addrs[bin]; !ok {
   167  				addrs[bin] = make([]storage.Address, 0)
   168  			}
   169  			if wanted {
   170  				addrs[bin] = append(addrs[bin], chunk.Address())
   171  				wantedChunksCount++
   172  			}
   173  			addrsMu.Unlock()
   174  
   175  			lastTimestampMu.RLock()
   176  			storeTimestamp := lastTimestamp
   177  			lastTimestampMu.RUnlock()
   178  
   179  			last[bin] = ChunkDescriptor{
   180  				Address:        chunk.Address(),
   181  				StoreTimestamp: storeTimestamp,
   182  			}
   183  		}
   184  		return last
   185  	}
   186  
   187  	// prepopulate database with some chunks
   188  	// before the subscription
   189  	last := uploadRandomChunks(30, false)
   190  
   191  	uploadRandomChunks(25, true)
   192  
   193  	// set a timeout on subscription
   194  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   195  	defer cancel()
   196  
   197  	// collect all errors from validating addresses, even nil ones
   198  	// to validate the number of addresses received by the subscription
   199  	errChan := make(chan error)
   200  
   201  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   202  		var since *ChunkDescriptor
   203  		if c, ok := last[bin]; ok {
   204  			since = &c
   205  		}
   206  		ch, stop := db.SubscribePull(ctx, bin, since, nil)
   207  		defer stop()
   208  
   209  		// receive and validate addresses from the subscription
   210  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   211  
   212  	}
   213  
   214  	// upload some chunks just after subscribe
   215  	uploadRandomChunks(15, true)
   216  
   217  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   218  }
   219  
   220  // TestDB_SubscribePull_until uploads chunks before and after
   221  // pull syncing subscriptions are created with an until argument
   222  // and validates if all expected addresses are received in the
   223  // right order for expected proximity order bins.
   224  func TestDB_SubscribePull_until(t *testing.T) {
   225  	db, cleanupFunc := newTestDB(t, nil)
   226  	defer cleanupFunc()
   227  
   228  	uploader := db.NewPutter(ModePutUpload)
   229  
   230  	addrs := make(map[uint8][]storage.Address)
   231  	var addrsMu sync.Mutex
   232  	var wantedChunksCount int
   233  
   234  	lastTimestamp := time.Now().UTC().UnixNano()
   235  	var lastTimestampMu sync.RWMutex
   236  	defer setNow(func() (t int64) {
   237  		lastTimestampMu.Lock()
   238  		defer lastTimestampMu.Unlock()
   239  		lastTimestamp++
   240  		return lastTimestamp
   241  	})()
   242  
   243  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   244  		last = make(map[uint8]ChunkDescriptor)
   245  		for i := 0; i < count; i++ {
   246  			chunk := generateRandomChunk()
   247  
   248  			err := uploader.Put(chunk)
   249  			if err != nil {
   250  				t.Fatal(err)
   251  			}
   252  
   253  			bin := db.po(chunk.Address())
   254  
   255  			addrsMu.Lock()
   256  			if _, ok := addrs[bin]; !ok {
   257  				addrs[bin] = make([]storage.Address, 0)
   258  			}
   259  			if wanted {
   260  				addrs[bin] = append(addrs[bin], chunk.Address())
   261  				wantedChunksCount++
   262  			}
   263  			addrsMu.Unlock()
   264  
   265  			lastTimestampMu.RLock()
   266  			storeTimestamp := lastTimestamp
   267  			lastTimestampMu.RUnlock()
   268  
   269  			last[bin] = ChunkDescriptor{
   270  				Address:        chunk.Address(),
   271  				StoreTimestamp: storeTimestamp,
   272  			}
   273  		}
   274  		return last
   275  	}
   276  
   277  	// prepopulate database with some chunks
   278  	// before the subscription
   279  	last := uploadRandomChunks(30, true)
   280  
   281  	uploadRandomChunks(25, false)
   282  
   283  	// set a timeout on subscription
   284  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   285  	defer cancel()
   286  
   287  	// collect all errors from validating addresses, even nil ones
   288  	// to validate the number of addresses received by the subscription
   289  	errChan := make(chan error)
   290  
   291  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   292  		until, ok := last[bin]
   293  		if !ok {
   294  			continue
   295  		}
   296  		ch, stop := db.SubscribePull(ctx, bin, nil, &until)
   297  		defer stop()
   298  
   299  		// receive and validate addresses from the subscription
   300  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   301  	}
   302  
   303  	// upload some chunks just after subscribe
   304  	uploadRandomChunks(15, false)
   305  
   306  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   307  }
   308  
   309  // TestDB_SubscribePull_sinceAndUntil uploads chunks before and
   310  // after pull syncing subscriptions are created with since
   311  // and until arguments, and validates if all expected addresses
   312  // are received in the right order for expected proximity order bins.
   313  func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
   314  	db, cleanupFunc := newTestDB(t, nil)
   315  	defer cleanupFunc()
   316  
   317  	uploader := db.NewPutter(ModePutUpload)
   318  
   319  	addrs := make(map[uint8][]storage.Address)
   320  	var addrsMu sync.Mutex
   321  	var wantedChunksCount int
   322  
   323  	lastTimestamp := time.Now().UTC().UnixNano()
   324  	var lastTimestampMu sync.RWMutex
   325  	defer setNow(func() (t int64) {
   326  		lastTimestampMu.Lock()
   327  		defer lastTimestampMu.Unlock()
   328  		lastTimestamp++
   329  		return lastTimestamp
   330  	})()
   331  
   332  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   333  		last = make(map[uint8]ChunkDescriptor)
   334  		for i := 0; i < count; i++ {
   335  			chunk := generateRandomChunk()
   336  
   337  			err := uploader.Put(chunk)
   338  			if err != nil {
   339  				t.Fatal(err)
   340  			}
   341  
   342  			bin := db.po(chunk.Address())
   343  
   344  			addrsMu.Lock()
   345  			if _, ok := addrs[bin]; !ok {
   346  				addrs[bin] = make([]storage.Address, 0)
   347  			}
   348  			if wanted {
   349  				addrs[bin] = append(addrs[bin], chunk.Address())
   350  				wantedChunksCount++
   351  			}
   352  			addrsMu.Unlock()
   353  
   354  			lastTimestampMu.RLock()
   355  			storeTimestamp := lastTimestamp
   356  			lastTimestampMu.RUnlock()
   357  
   358  			last[bin] = ChunkDescriptor{
   359  				Address:        chunk.Address(),
   360  				StoreTimestamp: storeTimestamp,
   361  			}
   362  		}
   363  		return last
   364  	}
   365  
   366  	// all chunks from upload1 are not expected
   367  	// as upload1 chunk is used as since for subscriptions
   368  	upload1 := uploadRandomChunks(100, false)
   369  
   370  	// all chunks from upload2 are expected
   371  	// as upload2 chunk is used as until for subscriptions
   372  	upload2 := uploadRandomChunks(100, true)
   373  
   374  	// upload some chunks before subscribe but after
   375  	// wanted chunks
   376  	uploadRandomChunks(8, false)
   377  
   378  	// set a timeout on subscription
   379  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   380  	defer cancel()
   381  
   382  	// collect all errors from validating addresses, even nil ones
   383  	// to validate the number of addresses received by the subscription
   384  	errChan := make(chan error)
   385  
   386  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   387  		var since *ChunkDescriptor
   388  		if c, ok := upload1[bin]; ok {
   389  			since = &c
   390  		}
   391  		until, ok := upload2[bin]
   392  		if !ok {
   393  			// no chunks un this bin uploaded in the upload2
   394  			// skip this bin from testing
   395  			continue
   396  		}
   397  		ch, stop := db.SubscribePull(ctx, bin, since, &until)
   398  		defer stop()
   399  
   400  		// receive and validate addresses from the subscription
   401  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   402  	}
   403  
   404  	// upload some chunks just after subscribe
   405  	uploadRandomChunks(15, false)
   406  
   407  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   408  }
   409  
   410  // uploadRandomChunksBin uploads random chunks to database and adds them to
   411  // the map of addresses ber bin.
   412  func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uint8][]storage.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
   413  	for i := 0; i < count; i++ {
   414  		chunk := generateRandomChunk()
   415  
   416  		err := uploader.Put(chunk)
   417  		if err != nil {
   418  			t.Fatal(err)
   419  		}
   420  
   421  		addrsMu.Lock()
   422  		bin := db.po(chunk.Address())
   423  		if _, ok := addrs[bin]; !ok {
   424  			addrs[bin] = make([]storage.Address, 0)
   425  		}
   426  		addrs[bin] = append(addrs[bin], chunk.Address())
   427  		addrsMu.Unlock()
   428  
   429  		*wantedChunksCount++
   430  	}
   431  }
   432  
   433  // readPullSubscriptionBin is a helper function that reads all ChunkDescriptors from a channel and
   434  // sends error to errChan, even if it is nil, to count the number of ChunkDescriptors
   435  // returned by the channel.
   436  func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDescriptor, addrs map[uint8][]storage.Address, addrsMu *sync.Mutex, errChan chan error) {
   437  	var i int // address index
   438  	for {
   439  		select {
   440  		case got, ok := <-ch:
   441  			if !ok {
   442  				return
   443  			}
   444  			addrsMu.Lock()
   445  			if i+1 > len(addrs[bin]) {
   446  				errChan <- fmt.Errorf("got more chunk addresses %v, then expected %v, for bin %v", i+1, len(addrs[bin]), bin)
   447  			}
   448  			want := addrs[bin][i]
   449  			addrsMu.Unlock()
   450  			var err error
   451  			if !bytes.Equal(got.Address, want) {
   452  				err = fmt.Errorf("got chunk address %v in bin %v %s, want %s", i, bin, got.Address.Hex(), want)
   453  			}
   454  			i++
   455  			// send one and only one error per received address
   456  			errChan <- err
   457  		case <-ctx.Done():
   458  			return
   459  		}
   460  	}
   461  }
   462  
   463  // checkErrChan expects the number of wantedChunksCount errors from errChan
   464  // and calls t.Error for the ones that are not nil.
   465  func checkErrChan(ctx context.Context, t *testing.T, errChan chan error, wantedChunksCount int) {
   466  	t.Helper()
   467  
   468  	for i := 0; i < wantedChunksCount; i++ {
   469  		select {
   470  		case err := <-errChan:
   471  			if err != nil {
   472  				t.Error(err)
   473  			}
   474  		case <-ctx.Done():
   475  			t.Fatal(ctx.Err())
   476  		}
   477  	}
   478  }