github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/swarm/storage/localstore/subscription_pull_test.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package localstore
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"sync"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/ethereum/go-ethereum/swarm/chunk"
    28  )
    29  
    30  // TestDB_SubscribePull uploads some chunks before and after
    31  // pull syncing subscription is created and validates if
    32  // all addresses are received in the right order
    33  // for expected proximity order bins.
    34  func TestDB_SubscribePull(t *testing.T) {
    35  	db, cleanupFunc := newTestDB(t, nil)
    36  	defer cleanupFunc()
    37  
    38  	uploader := db.NewPutter(ModePutUpload)
    39  
    40  	addrs := make(map[uint8][]chunk.Address)
    41  	var addrsMu sync.Mutex
    42  	var wantedChunksCount int
    43  
    44  	// prepopulate database with some chunks
    45  	// before the subscription
    46  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
    47  
    48  	// set a timeout on subscription
    49  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
    50  	defer cancel()
    51  
    52  	// collect all errors from validating addresses, even nil ones
    53  	// to validate the number of addresses received by the subscription
    54  	errChan := make(chan error)
    55  
    56  	for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
    57  		ch, stop := db.SubscribePull(ctx, bin, nil, nil)
    58  		defer stop()
    59  
    60  		// receive and validate addresses from the subscription
    61  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
    62  	}
    63  
    64  	// upload some chunks just after subscribe
    65  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
    66  
    67  	time.Sleep(200 * time.Millisecond)
    68  
    69  	// upload some chunks after some short time
    70  	// to ensure that subscription will include them
    71  	// in a dynamic environment
    72  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
    73  
    74  	checkErrChan(ctx, t, errChan, wantedChunksCount)
    75  }
    76  
    77  // TestDB_SubscribePull_multiple uploads chunks before and after
    78  // multiple pull syncing subscriptions are created and
    79  // validates if all addresses are received in the right order
    80  // for expected proximity order bins.
    81  func TestDB_SubscribePull_multiple(t *testing.T) {
    82  	db, cleanupFunc := newTestDB(t, nil)
    83  	defer cleanupFunc()
    84  
    85  	uploader := db.NewPutter(ModePutUpload)
    86  
    87  	addrs := make(map[uint8][]chunk.Address)
    88  	var addrsMu sync.Mutex
    89  	var wantedChunksCount int
    90  
    91  	// prepopulate database with some chunks
    92  	// before the subscription
    93  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 10)
    94  
    95  	// set a timeout on subscription
    96  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
    97  	defer cancel()
    98  
    99  	// collect all errors from validating addresses, even nil ones
   100  	// to validate the number of addresses received by the subscription
   101  	errChan := make(chan error)
   102  
   103  	subsCount := 10
   104  
   105  	// start a number of subscriptions
   106  	// that all of them will write every address error to errChan
   107  	for j := 0; j < subsCount; j++ {
   108  		for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
   109  			ch, stop := db.SubscribePull(ctx, bin, nil, nil)
   110  			defer stop()
   111  
   112  			// receive and validate addresses from the subscription
   113  			go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   114  		}
   115  	}
   116  
   117  	// upload some chunks just after subscribe
   118  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 5)
   119  
   120  	time.Sleep(200 * time.Millisecond)
   121  
   122  	// upload some chunks after some short time
   123  	// to ensure that subscription will include them
   124  	// in a dynamic environment
   125  	uploadRandomChunksBin(t, db, uploader, addrs, &addrsMu, &wantedChunksCount, 3)
   126  
   127  	checkErrChan(ctx, t, errChan, wantedChunksCount*subsCount)
   128  }
   129  
   130  // TestDB_SubscribePull_since uploads chunks before and after
   131  // pull syncing subscriptions are created with a since argument
   132  // and validates if all expected addresses are received in the
   133  // right order for expected proximity order bins.
   134  func TestDB_SubscribePull_since(t *testing.T) {
   135  	db, cleanupFunc := newTestDB(t, nil)
   136  	defer cleanupFunc()
   137  
   138  	uploader := db.NewPutter(ModePutUpload)
   139  
   140  	addrs := make(map[uint8][]chunk.Address)
   141  	var addrsMu sync.Mutex
   142  	var wantedChunksCount int
   143  
   144  	lastTimestamp := time.Now().UTC().UnixNano()
   145  	var lastTimestampMu sync.RWMutex
   146  	defer setNow(func() (t int64) {
   147  		lastTimestampMu.Lock()
   148  		defer lastTimestampMu.Unlock()
   149  		lastTimestamp++
   150  		return lastTimestamp
   151  	})()
   152  
   153  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   154  		addrsMu.Lock()
   155  		defer addrsMu.Unlock()
   156  
   157  		last = make(map[uint8]ChunkDescriptor)
   158  		for i := 0; i < count; i++ {
   159  			ch := generateTestRandomChunk()
   160  
   161  			err := uploader.Put(ch)
   162  			if err != nil {
   163  				t.Fatal(err)
   164  			}
   165  
   166  			bin := db.po(ch.Address())
   167  
   168  			if _, ok := addrs[bin]; !ok {
   169  				addrs[bin] = make([]chunk.Address, 0)
   170  			}
   171  			if wanted {
   172  				addrs[bin] = append(addrs[bin], ch.Address())
   173  				wantedChunksCount++
   174  			}
   175  
   176  			lastTimestampMu.RLock()
   177  			storeTimestamp := lastTimestamp
   178  			lastTimestampMu.RUnlock()
   179  
   180  			last[bin] = ChunkDescriptor{
   181  				Address:        ch.Address(),
   182  				StoreTimestamp: storeTimestamp,
   183  			}
   184  		}
   185  		return last
   186  	}
   187  
   188  	// prepopulate database with some chunks
   189  	// before the subscription
   190  	last := uploadRandomChunks(30, false)
   191  
   192  	uploadRandomChunks(25, true)
   193  
   194  	// set a timeout on subscription
   195  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   196  	defer cancel()
   197  
   198  	// collect all errors from validating addresses, even nil ones
   199  	// to validate the number of addresses received by the subscription
   200  	errChan := make(chan error)
   201  
   202  	for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
   203  		var since *ChunkDescriptor
   204  		if c, ok := last[bin]; ok {
   205  			since = &c
   206  		}
   207  		ch, stop := db.SubscribePull(ctx, bin, since, nil)
   208  		defer stop()
   209  
   210  		// receive and validate addresses from the subscription
   211  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   212  
   213  	}
   214  
   215  	// upload some chunks just after subscribe
   216  	uploadRandomChunks(15, true)
   217  
   218  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   219  }
   220  
   221  // TestDB_SubscribePull_until uploads chunks before and after
   222  // pull syncing subscriptions are created with an until argument
   223  // and validates if all expected addresses are received in the
   224  // right order for expected proximity order bins.
   225  func TestDB_SubscribePull_until(t *testing.T) {
   226  	db, cleanupFunc := newTestDB(t, nil)
   227  	defer cleanupFunc()
   228  
   229  	uploader := db.NewPutter(ModePutUpload)
   230  
   231  	addrs := make(map[uint8][]chunk.Address)
   232  	var addrsMu sync.Mutex
   233  	var wantedChunksCount int
   234  
   235  	lastTimestamp := time.Now().UTC().UnixNano()
   236  	var lastTimestampMu sync.RWMutex
   237  	defer setNow(func() (t int64) {
   238  		lastTimestampMu.Lock()
   239  		defer lastTimestampMu.Unlock()
   240  		lastTimestamp++
   241  		return lastTimestamp
   242  	})()
   243  
   244  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   245  		addrsMu.Lock()
   246  		defer addrsMu.Unlock()
   247  
   248  		last = make(map[uint8]ChunkDescriptor)
   249  		for i := 0; i < count; i++ {
   250  			ch := generateTestRandomChunk()
   251  
   252  			err := uploader.Put(ch)
   253  			if err != nil {
   254  				t.Fatal(err)
   255  			}
   256  
   257  			bin := db.po(ch.Address())
   258  
   259  			if _, ok := addrs[bin]; !ok {
   260  				addrs[bin] = make([]chunk.Address, 0)
   261  			}
   262  			if wanted {
   263  				addrs[bin] = append(addrs[bin], ch.Address())
   264  				wantedChunksCount++
   265  			}
   266  
   267  			lastTimestampMu.RLock()
   268  			storeTimestamp := lastTimestamp
   269  			lastTimestampMu.RUnlock()
   270  
   271  			last[bin] = ChunkDescriptor{
   272  				Address:        ch.Address(),
   273  				StoreTimestamp: storeTimestamp,
   274  			}
   275  		}
   276  		return last
   277  	}
   278  
   279  	// prepopulate database with some chunks
   280  	// before the subscription
   281  	last := uploadRandomChunks(30, true)
   282  
   283  	uploadRandomChunks(25, false)
   284  
   285  	// set a timeout on subscription
   286  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   287  	defer cancel()
   288  
   289  	// collect all errors from validating addresses, even nil ones
   290  	// to validate the number of addresses received by the subscription
   291  	errChan := make(chan error)
   292  
   293  	for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
   294  		until, ok := last[bin]
   295  		if !ok {
   296  			continue
   297  		}
   298  		ch, stop := db.SubscribePull(ctx, bin, nil, &until)
   299  		defer stop()
   300  
   301  		// receive and validate addresses from the subscription
   302  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   303  	}
   304  
   305  	// upload some chunks just after subscribe
   306  	uploadRandomChunks(15, false)
   307  
   308  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   309  }
   310  
   311  // TestDB_SubscribePull_sinceAndUntil uploads chunks before and
   312  // after pull syncing subscriptions are created with since
   313  // and until arguments, and validates if all expected addresses
   314  // are received in the right order for expected proximity order bins.
   315  func TestDB_SubscribePull_sinceAndUntil(t *testing.T) {
   316  	db, cleanupFunc := newTestDB(t, nil)
   317  	defer cleanupFunc()
   318  
   319  	uploader := db.NewPutter(ModePutUpload)
   320  
   321  	addrs := make(map[uint8][]chunk.Address)
   322  	var addrsMu sync.Mutex
   323  	var wantedChunksCount int
   324  
   325  	lastTimestamp := time.Now().UTC().UnixNano()
   326  	var lastTimestampMu sync.RWMutex
   327  	defer setNow(func() (t int64) {
   328  		lastTimestampMu.Lock()
   329  		defer lastTimestampMu.Unlock()
   330  		lastTimestamp++
   331  		return lastTimestamp
   332  	})()
   333  
   334  	uploadRandomChunks := func(count int, wanted bool) (last map[uint8]ChunkDescriptor) {
   335  		addrsMu.Lock()
   336  		defer addrsMu.Unlock()
   337  
   338  		last = make(map[uint8]ChunkDescriptor)
   339  		for i := 0; i < count; i++ {
   340  			ch := generateTestRandomChunk()
   341  
   342  			err := uploader.Put(ch)
   343  			if err != nil {
   344  				t.Fatal(err)
   345  			}
   346  
   347  			bin := db.po(ch.Address())
   348  
   349  			if _, ok := addrs[bin]; !ok {
   350  				addrs[bin] = make([]chunk.Address, 0)
   351  			}
   352  			if wanted {
   353  				addrs[bin] = append(addrs[bin], ch.Address())
   354  				wantedChunksCount++
   355  			}
   356  
   357  			lastTimestampMu.RLock()
   358  			storeTimestamp := lastTimestamp
   359  			lastTimestampMu.RUnlock()
   360  
   361  			last[bin] = ChunkDescriptor{
   362  				Address:        ch.Address(),
   363  				StoreTimestamp: storeTimestamp,
   364  			}
   365  		}
   366  		return last
   367  	}
   368  
   369  	// all chunks from upload1 are not expected
   370  	// as upload1 chunk is used as since for subscriptions
   371  	upload1 := uploadRandomChunks(100, false)
   372  
   373  	// all chunks from upload2 are expected
   374  	// as upload2 chunk is used as until for subscriptions
   375  	upload2 := uploadRandomChunks(100, true)
   376  
   377  	// upload some chunks before subscribe but after
   378  	// wanted chunks
   379  	uploadRandomChunks(8, false)
   380  
   381  	// set a timeout on subscription
   382  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   383  	defer cancel()
   384  
   385  	// collect all errors from validating addresses, even nil ones
   386  	// to validate the number of addresses received by the subscription
   387  	errChan := make(chan error)
   388  
   389  	for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
   390  		var since *ChunkDescriptor
   391  		if c, ok := upload1[bin]; ok {
   392  			since = &c
   393  		}
   394  		until, ok := upload2[bin]
   395  		if !ok {
   396  			// no chunks un this bin uploaded in the upload2
   397  			// skip this bin from testing
   398  			continue
   399  		}
   400  		ch, stop := db.SubscribePull(ctx, bin, since, &until)
   401  		defer stop()
   402  
   403  		// receive and validate addresses from the subscription
   404  		go readPullSubscriptionBin(ctx, bin, ch, addrs, &addrsMu, errChan)
   405  	}
   406  
   407  	// upload some chunks just after subscribe
   408  	uploadRandomChunks(15, false)
   409  
   410  	checkErrChan(ctx, t, errChan, wantedChunksCount)
   411  }
   412  
   413  // uploadRandomChunksBin uploads random chunks to database and adds them to
   414  // the map of addresses ber bin.
   415  func uploadRandomChunksBin(t *testing.T, db *DB, uploader *Putter, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, wantedChunksCount *int, count int) {
   416  	addrsMu.Lock()
   417  	defer addrsMu.Unlock()
   418  
   419  	for i := 0; i < count; i++ {
   420  		ch := generateTestRandomChunk()
   421  
   422  		err := uploader.Put(ch)
   423  		if err != nil {
   424  			t.Fatal(err)
   425  		}
   426  
   427  		bin := db.po(ch.Address())
   428  		if _, ok := addrs[bin]; !ok {
   429  			addrs[bin] = make([]chunk.Address, 0)
   430  		}
   431  		addrs[bin] = append(addrs[bin], ch.Address())
   432  
   433  		*wantedChunksCount++
   434  	}
   435  }
   436  
   437  // readPullSubscriptionBin is a helper function that reads all ChunkDescriptors from a channel and
   438  // sends error to errChan, even if it is nil, to count the number of ChunkDescriptors
   439  // returned by the channel.
   440  func readPullSubscriptionBin(ctx context.Context, bin uint8, ch <-chan ChunkDescriptor, addrs map[uint8][]chunk.Address, addrsMu *sync.Mutex, errChan chan error) {
   441  	var i int // address index
   442  	for {
   443  		select {
   444  		case got, ok := <-ch:
   445  			if !ok {
   446  				return
   447  			}
   448  			var err error
   449  			addrsMu.Lock()
   450  			if i+1 > len(addrs[bin]) {
   451  				err = fmt.Errorf("got more chunk addresses %v, then expected %v, for bin %v", i+1, len(addrs[bin]), bin)
   452  			} else {
   453  				want := addrs[bin][i]
   454  				if !bytes.Equal(got.Address, want) {
   455  					err = fmt.Errorf("got chunk address %v in bin %v %s, want %s", i, bin, got.Address.Hex(), want)
   456  				}
   457  			}
   458  			addrsMu.Unlock()
   459  			i++
   460  			// send one and only one error per received address
   461  			select {
   462  			case errChan <- err:
   463  			case <-ctx.Done():
   464  				return
   465  			}
   466  		case <-ctx.Done():
   467  			return
   468  		}
   469  	}
   470  }
   471  
   472  // checkErrChan expects the number of wantedChunksCount errors from errChan
   473  // and calls t.Error for the ones that are not nil.
   474  func checkErrChan(ctx context.Context, t *testing.T, errChan chan error, wantedChunksCount int) {
   475  	t.Helper()
   476  
   477  	for i := 0; i < wantedChunksCount; i++ {
   478  		select {
   479  		case err := <-errChan:
   480  			if err != nil {
   481  				t.Error(err)
   482  			}
   483  		case <-ctx.Done():
   484  			t.Fatal(ctx.Err())
   485  		}
   486  	}
   487  }
   488  
   489  // TestDB_LastPullSubscriptionChunk validates that LastPullSubscriptionChunk
   490  // is returning the last chunk descriptor for proximity order bins by
   491  // doing a few rounds of chunk uploads.
   492  func TestDB_LastPullSubscriptionChunk(t *testing.T) {
   493  	db, cleanupFunc := newTestDB(t, nil)
   494  	defer cleanupFunc()
   495  
   496  	uploader := db.NewPutter(ModePutUpload)
   497  
   498  	addrs := make(map[uint8][]chunk.Address)
   499  
   500  	lastTimestamp := time.Now().UTC().UnixNano()
   501  	var lastTimestampMu sync.RWMutex
   502  	defer setNow(func() (t int64) {
   503  		lastTimestampMu.Lock()
   504  		defer lastTimestampMu.Unlock()
   505  		lastTimestamp++
   506  		return lastTimestamp
   507  	})()
   508  
   509  	last := make(map[uint8]ChunkDescriptor)
   510  
   511  	// do a few rounds of uploads and check if
   512  	// last pull subscription chunk is correct
   513  	for _, count := range []int{1, 3, 10, 11, 100, 120} {
   514  
   515  		// upload
   516  		for i := 0; i < count; i++ {
   517  			ch := generateTestRandomChunk()
   518  
   519  			err := uploader.Put(ch)
   520  			if err != nil {
   521  				t.Fatal(err)
   522  			}
   523  
   524  			bin := db.po(ch.Address())
   525  
   526  			if _, ok := addrs[bin]; !ok {
   527  				addrs[bin] = make([]chunk.Address, 0)
   528  			}
   529  			addrs[bin] = append(addrs[bin], ch.Address())
   530  
   531  			lastTimestampMu.RLock()
   532  			storeTimestamp := lastTimestamp
   533  			lastTimestampMu.RUnlock()
   534  
   535  			last[bin] = ChunkDescriptor{
   536  				Address:        ch.Address(),
   537  				StoreTimestamp: storeTimestamp,
   538  			}
   539  		}
   540  
   541  		// check
   542  		for bin := uint8(0); bin <= uint8(chunk.MaxPO); bin++ {
   543  			want, ok := last[bin]
   544  			got, err := db.LastPullSubscriptionChunk(bin)
   545  			if ok {
   546  				if err != nil {
   547  					t.Errorf("got unexpected error value %v", err)
   548  				}
   549  				if !bytes.Equal(got.Address, want.Address) {
   550  					t.Errorf("got last address %s, want %s", got.Address.Hex(), want.Address.Hex())
   551  				}
   552  			} else {
   553  				if err != chunk.ErrChunkNotFound {
   554  					t.Errorf("got unexpected error value %v, want %v", err, chunk.ErrChunkNotFound)
   555  				}
   556  			}
   557  		}
   558  	}
   559  }