github.com/codingfuture/orig-energi3@v0.8.4/swarm/storage/localstore/subscription_pull_test.go (about)

     1  // Copyright 2019 The Energi Core Authors
     2  // Copyright 2018 The go-ethereum Authors
     3  // This file is part of the Energi Core library.
     4  //
     5  // The Energi Core library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The Energi Core library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the Energi Core library. If not, see <http://www.gnu.org/licenses/>.
    17  
    18  package localstore
    19  
    20  import (
    21  	"bytes"
    22  	"context"
    23  	"fmt"
    24  	"sync"
    25  	"testing"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/swarm/storage"
    29  )
    30  
    31  // TestDB_SubscribePull uploads some chunks before and after
    32  // pull syncing subscription is created and validates if
    33  // all addresses are received in the right order
    34  // for expected proximity order bins.
    35  func TestDB_SubscribePull(t *testing.T) {
    36  	db, cleanupFunc := newTestDB(t, nil)
    37  	defer cleanupFunc()
    38  
    39  	uploader := db.NewPutter(ModePutUpload)
    40  
    41  	addrs := make(map[uint8][]storage.Address)
    42  	var addrsMu sync.Mutex
    43  	var wantedChunksCount int
    44  
    45  	// prepopulate database with some chunks
    46  	// before the subscription
    47  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
    48  
    49  	// set a timeout on subscription
    50  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
    51  	defer cancel()
    52  
    53  	// collect all errors from validating addresses, even nil ones
    54  	// to validate the number of addresses received by the subscription
    55  	errChan := make(chan error)
    56  
    57  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
    58  		ch, stop := db.SubscribePull(ctx, bin, nil, nil)
    59  		defer stop()
    60  
    61  		// receive and validate addresses from the subscription
    62  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
    63  	}
    64  
    65  	// upload some chunks just after subscribe
    66  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
    67  
    68  	time.Sleep(200 * time.Millisecond)
    69  
    70  	// upload some chunks after some short time
    71  	// to ensure that subscription will include them
    72  	// in a dynamic environment
    73  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
    74  
    75  	checkErrChan(ctx, t, errChan, wantedChunksCount)
    76  }
    77  
    78  // TestDB_SubscribePull_multiple uploads chunks before and after
    79  // multiple pull syncing subscriptions are created and
    80  // validates if all addresses are received in the right order
    81  // for expected proximity order bins.
    82  func TestDB_SubscribePull_multiple(t *testing.T) {
    83  	db, cleanupFunc := newTestDB(t, nil)
    84  	defer cleanupFunc()
    85  
    86  	uploader := db.NewPutter(ModePutUpload)
    87  
    88  	addrs := make(map[uint8][]storage.Address)
    89  	var addrsMu sync.Mutex
    90  	var wantedChunksCount int
    91  
    92  	// prepopulate database with some chunks
    93  	// before the subscription
    94  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
    95  
    96  	// set a timeout on subscription
    97  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
    98  	defer cancel()
    99  
   100  	// collect all errors from validating addresses, even nil ones
   101  	// to validate the number of addresses received by the subscription
   102  	errChan := make(chan error)
   103  
   104  	subsCount := 10
   105  
   106  	// start a number of subscriptions
   107  	// that all of them will write every address error to errChan
   108  	for j := 0; j < subsCount; j++ {
   109  		for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   110  			ch, stop := db.SubscribePull(ctx, bin, nil, nil)
   111  			defer stop()
   112  
   113  			// receive and validate addresses from the subscription
   114  			go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   115  		}
   116  	}
   117  
   118  	// upload some chunks just after subscribe
   119  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
   120  
   121  	time.Sleep(200 * time.Millisecond)
   122  
   123  	// upload some chunks after some short time
   124  	// to ensure that subscription will include them
   125  	// in a dynamic environment
   126  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
   127  
   128  	checkErrChan(ctx, t, errChan, wantedChunksCount*subsCount)
   129  }
   130  
   131  // TestDB_SubscribePull_since uploads chunks before and after
   132  // pull syncing subscriptions are created with a since argument
   133  // and validates if all expected addresses are received in the
   134  // right order for expected proximity order bins.
   135  func TestDB_SubscribePull_since(t *testing.T) {
   136  	db, cleanupFunc := newTestDB(t, nil)
   137  	defer cleanupFunc()
   138  
   139  	uploader := db.NewPutter(ModePutUpload)
   140  
   141  	addrs := make(map[uint8][]storage.Address)
   142  	var addrsMu sync.Mutex
   143  	var wantedChunksCount int
   144  
   145  	lastTimestamp := time.Now().UTC().UnixNano()
   146  	var lastTimestampMu sync.RWMutex
   147  	defer setNow(func() (t int64) {
   148  		lastTimestampMu.Lock()
   149  		defer lastTimestampMu.Unlock()
   150  		lastTimestamp++
   151  		return lastTimestamp
   152  	})()
   153  
   154  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   155  		last = make(map[uint8]ChunkDescriptor)
   156  		for i := 0; i < count; i++ {
   157  			chunk := generateRandomChunk()
   158  
   159  			err := uploader.Put(chunk)
   160  			if err != nil {
   161  				t.Fatal(err)
   162  			}
   163  
   164  			bin := db.po(chunk.Address())
   165  
   166  			addrsMu.Lock()
   167  			if _, ok := addrs[bin]; !ok {
   168  				addrs[bin] = make([]storage.Address, 0)
   169  			}
   170  			if wanted {
   171  				addrs[bin] = append(addrs[bin], chunk.Address())
   172  				wantedChunksCount++
   173  			}
   174  			addrsMu.Unlock()
   175  
   176  			lastTimestampMu.RLock()
   177  			storeTimestamp := lastTimestamp
   178  			lastTimestampMu.RUnlock()
   179  
   180  			last[bin] = ChunkDescriptor{
   181  				Address:        chunk.Address(),
   182  				StoreTimestamp: storeTimestamp,
   183  			}
   184  		}
   185  		return last
   186  	}
   187  
   188  	// prepopulate database with some chunks
   189  	// before the subscription
   190  	last := uploadRandomChunks(30, false)
   191  
   192  	uploadRandomChunks(25, true)
   193  
   194  	// set a timeout on subscription
   195  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   196  	defer cancel()
   197  
   198  	// collect all errors from validating addresses, even nil ones
   199  	// to validate the number of addresses received by the subscription
   200  	errChan := make(chan error)
   201  
   202  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   203  		var since *ChunkDescriptor
   204  		if c, ok := last[bin]; ok {
   205  			since = &c
   206  		}
   207  		ch, stop := db.SubscribePull(ctx, bin, since, nil)
   208  		defer stop()
   209  
   210  		// receive and validate addresses from the subscription
   211  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   212  
   213  	}
   214  
   215  	// upload some chunks just after subscribe
   216  	uploadRandomChunks(15, true)
   217  
   218  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   219  }
   220  
   221  // TestDB_SubscribePull_until uploads chunks before and after
   222  // pull syncing subscriptions are created with an until argument
   223  // and validates if all expected addresses are received in the
   224  // right order for expected proximity order bins.
   225  func TestDB_SubscribePull_until(t *testing.T) {
   226  	db, cleanupFunc := newTestDB(t, nil)
   227  	defer cleanupFunc()
   228  
   229  	uploader := db.NewPutter(ModePutUpload)
   230  
   231  	addrs := make(map[uint8][]storage.Address)
   232  	var addrsMu sync.Mutex
   233  	var wantedChunksCount int
   234  
   235  	lastTimestamp := time.Now().UTC().UnixNano()
   236  	var lastTimestampMu sync.RWMutex
   237  	defer setNow(func() (t int64) {
   238  		lastTimestampMu.Lock()
   239  		defer lastTimestampMu.Unlock()
   240  		lastTimestamp++
   241  		return lastTimestamp
   242  	})()
   243  
   244  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   245  		last = make(map[uint8]ChunkDescriptor)
   246  		for i := 0; i < count; i++ {
   247  			chunk := generateRandomChunk()
   248  
   249  			err := uploader.Put(chunk)
   250  			if err != nil {
   251  				t.Fatal(err)
   252  			}
   253  
   254  			bin := db.po(chunk.Address())
   255  
   256  			addrsMu.Lock()
   257  			if _, ok := addrs[bin]; !ok {
   258  				addrs[bin] = make([]storage.Address, 0)
   259  			}
   260  			if wanted {
   261  				addrs[bin] = append(addrs[bin], chunk.Address())
   262  				wantedChunksCount++
   263  			}
   264  			addrsMu.Unlock()
   265  
   266  			lastTimestampMu.RLock()
   267  			storeTimestamp := lastTimestamp
   268  			lastTimestampMu.RUnlock()
   269  
   270  			last[bin] = ChunkDescriptor{
   271  				Address:        chunk.Address(),
   272  				StoreTimestamp: storeTimestamp,
   273  			}
   274  		}
   275  		return last
   276  	}
   277  
   278  	// prepopulate database with some chunks
   279  	// before the subscription
   280  	last := uploadRandomChunks(30, true)
   281  
   282  	uploadRandomChunks(25, false)
   283  
   284  	// set a timeout on subscription
   285  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   286  	defer cancel()
   287  
   288  	// collect all errors from validating addresses, even nil ones
   289  	// to validate the number of addresses received by the subscription
   290  	errChan := make(chan error)
   291  
   292  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   293  		until, ok := last[bin]
   294  		if !ok {
   295  			continue
   296  		}
   297  		ch, stop := db.SubscribePull(ctx, bin, nil, &until)
   298  		defer stop()
   299  
   300  		// receive and validate addresses from the subscription
   301  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   302  	}
   303  
   304  	// upload some chunks just after subscribe
   305  	uploadRandomChunks(15, false)
   306  
   307  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   308  }
   309  
   310  // TestDB_SubscribePull_sinceAndUntil uploads chunks before and
   311  // after pull syncing subscriptions are created with since
   312  // and until arguments, and validates if all expected addresses
   313  // are received in the right order for expected proximity order bins.
   314  func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
   315  	db, cleanupFunc := newTestDB(t, nil)
   316  	defer cleanupFunc()
   317  
   318  	uploader := db.NewPutter(ModePutUpload)
   319  
   320  	addrs := make(map[uint8][]storage.Address)
   321  	var addrsMu sync.Mutex
   322  	var wantedChunksCount int
   323  
   324  	lastTimestamp := time.Now().UTC().UnixNano()
   325  	var lastTimestampMu sync.RWMutex
   326  	defer setNow(func() (t int64) {
   327  		lastTimestampMu.Lock()
   328  		defer lastTimestampMu.Unlock()
   329  		lastTimestamp++
   330  		return lastTimestamp
   331  	})()
   332  
   333  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   334  		last = make(map[uint8]ChunkDescriptor)
   335  		for i := 0; i < count; i++ {
   336  			chunk := generateRandomChunk()
   337  
   338  			err := uploader.Put(chunk)
   339  			if err != nil {
   340  				t.Fatal(err)
   341  			}
   342  
   343  			bin := db.po(chunk.Address())
   344  
   345  			addrsMu.Lock()
   346  			if _, ok := addrs[bin]; !ok {
   347  				addrs[bin] = make([]storage.Address, 0)
   348  			}
   349  			if wanted {
   350  				addrs[bin] = append(addrs[bin], chunk.Address())
   351  				wantedChunksCount++
   352  			}
   353  			addrsMu.Unlock()
   354  
   355  			lastTimestampMu.RLock()
   356  			storeTimestamp := lastTimestamp
   357  			lastTimestampMu.RUnlock()
   358  
   359  			last[bin] = ChunkDescriptor{
   360  				Address:        chunk.Address(),
   361  				StoreTimestamp: storeTimestamp,
   362  			}
   363  		}
   364  		return last
   365  	}
   366  
   367  	// all chunks from upload1 are not expected
   368  	// as upload1 chunk is used as since for subscriptions
   369  	upload1 := uploadRandomChunks(100, false)
   370  
   371  	// all chunks from upload2 are expected
   372  	// as upload2 chunk is used as until for subscriptions
   373  	upload2 := uploadRandomChunks(100, true)
   374  
   375  	// upload some chunks before subscribe but after
   376  	// wanted chunks
   377  	uploadRandomChunks(8, false)
   378  
   379  	// set a timeout on subscription
   380  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   381  	defer cancel()
   382  
   383  	// collect all errors from validating addresses, even nil ones
   384  	// to validate the number of addresses received by the subscription
   385  	errChan := make(chan error)
   386  
   387  	for bin := uint8(0); bin <= uint8(storage.MaxPO); bin++ {
   388  		var since *ChunkDescriptor
   389  		if c, ok := upload1[bin]; ok {
   390  			since = &c
   391  		}
   392  		until, ok := upload2[bin]
   393  		if !ok {
   394  			// no chunks un this bin uploaded in the upload2
   395  			// skip this bin from testing
   396  			continue
   397  		}
   398  		ch, stop := db.SubscribePull(ctx, bin, since, &until)
   399  		defer stop()
   400  
   401  		// receive and validate addresses from the subscription
   402  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   403  	}
   404  
   405  	// upload some chunks just after subscribe
   406  	uploadRandomChunks(15, false)
   407  
   408  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   409  }
   410  
   411  // uploadRandomChunksBin uploads random chunks to database and adds them to
   412  // the map of addresses ber bin.
   413  func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uint8][]storage.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
   414  	for i := 0; i < count; i++ {
   415  		chunk := generateRandomChunk()
   416  
   417  		err := uploader.Put(chunk)
   418  		if err != nil {
   419  			t.Fatal(err)
   420  		}
   421  
   422  		addrsMu.Lock()
   423  		bin := db.po(chunk.Address())
   424  		if _, ok := addrs[bin]; !ok {
   425  			addrs[bin] = make([]storage.Address, 0)
   426  		}
   427  		addrs[bin] = append(addrs[bin], chunk.Address())
   428  		addrsMu.Unlock()
   429  
   430  		*wantedChunksCount++
   431  	}
   432  }
   433  
   434  // readPullSubscriptionBin is a helper function that reads all ChunkDescriptors from a channel and
   435  // sends error to errChan, even if it is nil, to count the number of ChunkDescriptors
   436  // returned by the channel.
   437  func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDescriptor, addrs map[uint8][]storage.Address, addrsMu *sync.Mutex, errChan chan error) {
   438  	var i int // address index
   439  	for {
   440  		select {
   441  		case got, ok := <-ch:
   442  			if !ok {
   443  				return
   444  			}
   445  			addrsMu.Lock()
   446  			if i+1 > len(addrs[bin]) {
   447  				errChan <- fmt.Errorf("got more chunk addresses %v, then expected %v, for bin %v", i+1, len(addrs[bin]), bin)
   448  			}
   449  			want := addrs[bin][i]
   450  			addrsMu.Unlock()
   451  			var err error
   452  			if !bytes.Equal(got.Address, want) {
   453  				err = fmt.Errorf("got chunk address %v in bin %v %s, want %s", i, bin, got.Address.Hex(), want)
   454  			}
   455  			i++
   456  			// send one and only one error per received address
   457  			errChan <- err
   458  		case <-ctx.Done():
   459  			return
   460  		}
   461  	}
   462  }
   463  
   464  // checkErrChan expects the number of wantedChunksCount errors from errChan
   465  // and calls t.Error for the ones that are not nil.
   466  func checkErrChan(ctx context.Context, t *testing.T, errChan chan error, wantedChunksCount int) {
   467  	t.Helper()
   468  
   469  	for i := 0; i < wantedChunksCount; i++ {
   470  		select {
   471  		case err := <-errChan:
   472  			if err != nil {
   473  				t.Error(err)
   474  			}
   475  		case <-ctx.Done():
   476  			t.Fatal(ctx.Err())
   477  		}
   478  	}
   479  }