github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/swarm/network/stream/intervals_test.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 12:09:48</date>
    10  //</624342675616698368>
    11  
    12  //
    13  //
    14  //
    15  //
    16  //
    17  //
    18  //
    19  //
    20  //
    21  //
    22  //
    23  //
    24  //
    25  //
    26  //
    27  
    28  package stream
    29  
    30  import (
    31  	"context"
    32  	crand "crypto/rand"
    33  	"encoding/binary"
    34  	"fmt"
    35  	"io"
    36  	"os"
    37  	"sync"
    38  	"testing"
    39  	"time"
    40  
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/node"
    43  	"github.com/ethereum/go-ethereum/p2p"
    44  	"github.com/ethereum/go-ethereum/p2p/discover"
    45  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    46  	"github.com/ethereum/go-ethereum/swarm/network"
    47  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    48  	"github.com/ethereum/go-ethereum/swarm/state"
    49  	"github.com/ethereum/go-ethereum/swarm/storage"
    50  )
    51  
    52  func TestIntervals(t *testing.T) {
    53  	testIntervals(t, true, nil, false)
    54  	testIntervals(t, false, NewRange(9, 26), false)
    55  	testIntervals(t, true, NewRange(9, 26), false)
    56  
    57  	testIntervals(t, true, nil, true)
    58  	testIntervals(t, false, NewRange(9, 26), true)
    59  	testIntervals(t, true, NewRange(9, 26), true)
    60  }
    61  
    62  func testIntervals(t *testing.T, live bool, history *Range, skipCheck bool) {
    63  	nodes := 2
    64  	chunkCount := dataChunkCount
    65  	externalStreamName := "externalStream"
    66  	externalStreamSessionAt := uint64(50)
    67  	externalStreamMaxKeys := uint64(100)
    68  
    69  	sim := simulation.New(map[string]simulation.ServiceFunc{
    70  		"intervalsStreamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
    71  
    72  			id := ctx.Config.ID
    73  			addr := network.NewAddrFromNodeID(id)
    74  			store, datadir, err := createTestLocalStorageForID(id, addr)
    75  			if err != nil {
    76  				return nil, nil, err
    77  			}
    78  			bucket.Store(bucketKeyStore, store)
    79  			cleanup = func() {
    80  				store.Close()
    81  				os.RemoveAll(datadir)
    82  			}
    83  			localStore := store.(*storage.LocalStore)
    84  			db := storage.NewDBAPI(localStore)
    85  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
    86  			delivery := NewDelivery(kad, db)
    87  
    88  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
    89  				SkipCheck: skipCheck,
    90  			})
    91  			bucket.Store(bucketKeyRegistry, r)
    92  
    93  			r.RegisterClientFunc(externalStreamName, func(p *Peer, t string, live bool) (Client, error) {
    94  				return newTestExternalClient(db), nil
    95  			})
    96  			r.RegisterServerFunc(externalStreamName, func(p *Peer, t string, live bool) (Server, error) {
    97  				return newTestExternalServer(t, externalStreamSessionAt, externalStreamMaxKeys, nil), nil
    98  			})
    99  
   100  			fileStore := storage.NewFileStore(localStore, storage.NewFileStoreParams())
   101  			bucket.Store(bucketKeyFileStore, fileStore)
   102  
   103  			return r, cleanup, nil
   104  
   105  		},
   106  	})
   107  	defer sim.Close()
   108  
   109  	log.Info("Adding nodes to simulation")
   110  	_, err := sim.AddNodesAndConnectChain(nodes)
   111  	if err != nil {
   112  		t.Fatal(err)
   113  	}
   114  
   115  	ctx, cancel := context.WithTimeout(context.Background(), 100*time.Second)
   116  	defer cancel()
   117  
   118  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   119  		nodeIDs := sim.UpNodeIDs()
   120  		storer := nodeIDs[0]
   121  		checker := nodeIDs[1]
   122  
   123  		item, ok := sim.NodeItem(storer, bucketKeyFileStore)
   124  		if !ok {
   125  			return fmt.Errorf("No filestore")
   126  		}
   127  		fileStore := item.(*storage.FileStore)
   128  
   129  		size := chunkCount * chunkSize
   130  		_, wait, err := fileStore.Store(ctx, io.LimitReader(crand.Reader, int64(size)), int64(size), false)
   131  		if err != nil {
   132  			log.Error("Store error: %v", "err", err)
   133  			t.Fatal(err)
   134  		}
   135  		err = wait(ctx)
   136  		if err != nil {
   137  			log.Error("Wait error: %v", "err", err)
   138  			t.Fatal(err)
   139  		}
   140  
   141  		item, ok = sim.NodeItem(checker, bucketKeyRegistry)
   142  		if !ok {
   143  			return fmt.Errorf("No registry")
   144  		}
   145  		registry := item.(*Registry)
   146  
   147  		liveErrC := make(chan error)
   148  		historyErrC := make(chan error)
   149  
   150  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   151  			log.Error("WaitKademlia error: %v", "err", err)
   152  			return err
   153  		}
   154  
   155  		log.Debug("Watching for disconnections")
   156  		disconnections := sim.PeerEvents(
   157  			context.Background(),
   158  			sim.NodeIDs(),
   159  			simulation.NewPeerEventsFilter().Type(p2p.PeerEventTypeDrop),
   160  		)
   161  
   162  		go func() {
   163  			for d := range disconnections {
   164  				if d.Error != nil {
   165  					log.Error("peer drop", "node", d.NodeID, "peer", d.Event.Peer)
   166  					t.Fatal(d.Error)
   167  				}
   168  			}
   169  		}()
   170  
   171  		go func() {
   172  			if !live {
   173  				close(liveErrC)
   174  				return
   175  			}
   176  
   177  			var err error
   178  			defer func() {
   179  				liveErrC <- err
   180  			}()
   181  
   182  //
   183  			var liveHashesChan chan []byte
   184  			liveHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", true))
   185  			if err != nil {
   186  				log.Error("Subscription error: %v", "err", err)
   187  				return
   188  			}
   189  			i := externalStreamSessionAt
   190  
   191  //
   192  			err = enableNotifications(registry, storer, NewStream(externalStreamName, "", true))
   193  			if err != nil {
   194  				return
   195  			}
   196  
   197  			for {
   198  				select {
   199  				case hash := <-liveHashesChan:
   200  					h := binary.BigEndian.Uint64(hash)
   201  					if h != i {
   202  						err = fmt.Errorf("expected live hash %d, got %d", i, h)
   203  						return
   204  					}
   205  					i++
   206  					if i > externalStreamMaxKeys {
   207  						return
   208  					}
   209  				case <-ctx.Done():
   210  					return
   211  				}
   212  			}
   213  		}()
   214  
   215  		go func() {
   216  			if live && history == nil {
   217  				close(historyErrC)
   218  				return
   219  			}
   220  
   221  			var err error
   222  			defer func() {
   223  				historyErrC <- err
   224  			}()
   225  
   226  //
   227  			var historyHashesChan chan []byte
   228  			historyHashesChan, err = getHashes(ctx, registry, storer, NewStream(externalStreamName, "", false))
   229  			if err != nil {
   230  				return
   231  			}
   232  
   233  			var i uint64
   234  			historyTo := externalStreamMaxKeys
   235  			if history != nil {
   236  				i = history.From
   237  				if history.To != 0 {
   238  					historyTo = history.To
   239  				}
   240  			}
   241  
   242  //
   243  			err = enableNotifications(registry, storer, NewStream(externalStreamName, "", false))
   244  			if err != nil {
   245  				return
   246  			}
   247  
   248  			for {
   249  				select {
   250  				case hash := <-historyHashesChan:
   251  					h := binary.BigEndian.Uint64(hash)
   252  					if h != i {
   253  						err = fmt.Errorf("expected history hash %d, got %d", i, h)
   254  						return
   255  					}
   256  					i++
   257  					if i > historyTo {
   258  						return
   259  					}
   260  				case <-ctx.Done():
   261  					return
   262  				}
   263  			}
   264  		}()
   265  
   266  		err = registry.Subscribe(storer, NewStream(externalStreamName, "", live), history, Top)
   267  		if err != nil {
   268  			return err
   269  		}
   270  		if err := <-liveErrC; err != nil {
   271  			return err
   272  		}
   273  		if err := <-historyErrC; err != nil {
   274  			return err
   275  		}
   276  
   277  		return nil
   278  	})
   279  
   280  	if result.Error != nil {
   281  		t.Fatal(result.Error)
   282  	}
   283  }
   284  
   285  func getHashes(ctx context.Context, r *Registry, peerID discover.NodeID, s Stream) (chan []byte, error) {
   286  	peer := r.getPeer(peerID)
   287  
   288  	client, err := peer.getClient(ctx, s)
   289  	if err != nil {
   290  		return nil, err
   291  	}
   292  
   293  	c := client.Client.(*testExternalClient)
   294  
   295  	return c.hashes, nil
   296  }
   297  
   298  func enableNotifications(r *Registry, peerID discover.NodeID, s Stream) error {
   299  	peer := r.getPeer(peerID)
   300  
   301  	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
   302  	defer cancel()
   303  
   304  	client, err := peer.getClient(ctx, s)
   305  	if err != nil {
   306  		return err
   307  	}
   308  
   309  	close(client.Client.(*testExternalClient).enableNotificationsC)
   310  
   311  	return nil
   312  }
   313  
   314  type testExternalClient struct {
   315  	hashes               chan []byte
   316  	db                   *storage.DBAPI
   317  	enableNotificationsC chan struct{}
   318  }
   319  
   320  func newTestExternalClient(db *storage.DBAPI) *testExternalClient {
   321  	return &testExternalClient{
   322  		hashes:               make(chan []byte),
   323  		db:                   db,
   324  		enableNotificationsC: make(chan struct{}),
   325  	}
   326  }
   327  
   328  func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func() {
   329  	chunk, _ := c.db.GetOrCreateRequest(ctx, hash)
   330  	if chunk.ReqC == nil {
   331  		return nil
   332  	}
   333  	c.hashes <- hash
   334  //
   335  //
   336  //
   337  //
   338   /*
   339    
   340     
   341    
   342   */
   343  
   344  	return nil
   345  }
   346  
   347  func (c *testExternalClient) BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) {
   348  	return nil
   349  }
   350  
   351  func (c *testExternalClient) Close() {}
   352  
   353  const testExternalServerBatchSize = 10
   354  
   355  type testExternalServer struct {
   356  	t         string
   357  	keyFunc   func(key []byte, index uint64)
   358  	sessionAt uint64
   359  	maxKeys   uint64
   360  }
   361  
   362  func newTestExternalServer(t string, sessionAt, maxKeys uint64, keyFunc func(key []byte, index uint64)) *testExternalServer {
   363  	if keyFunc == nil {
   364  		keyFunc = binary.BigEndian.PutUint64
   365  	}
   366  	return &testExternalServer{
   367  		t:         t,
   368  		keyFunc:   keyFunc,
   369  		sessionAt: sessionAt,
   370  		maxKeys:   maxKeys,
   371  	}
   372  }
   373  
   374  func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, uint64, *HandoverProof, error) {
   375  	if from == 0 && to == 0 {
   376  		from = s.sessionAt
   377  		to = s.sessionAt + testExternalServerBatchSize
   378  	}
   379  	if to-from > testExternalServerBatchSize {
   380  		to = from + testExternalServerBatchSize - 1
   381  	}
   382  	if from >= s.maxKeys && to > s.maxKeys {
   383  		return nil, 0, 0, nil, io.EOF
   384  	}
   385  	if to > s.maxKeys {
   386  		to = s.maxKeys
   387  	}
   388  	b := make([]byte, HashSize*(to-from+1))
   389  	for i := from; i <= to; i++ {
   390  		s.keyFunc(b[(i-from)*HashSize:(i-from+1)*HashSize], i)
   391  	}
   392  	return b, from, to, nil, nil
   393  }
   394  
   395  func (s *testExternalServer) GetData(context.Context, []byte) ([]byte, error) {
   396  	return make([]byte, 4096), nil
   397  }
   398  
   399  func (s *testExternalServer) Close() {}
   400