github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/swarm/network/stream/snapshot_retrieval_test.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 12:09:48</date>
    10  //</624342675872550912>
    11  
    12  //
    13  //
    14  //
    15  //
    16  //
    17  //
    18  //
    19  //
    20  //
    21  //
    22  //
    23  //
    24  //
    25  //
    26  //
    27  package stream
    28  
    29  import (
    30  	"context"
    31  	"fmt"
    32  	"os"
    33  	"sync"
    34  	"testing"
    35  	"time"
    36  
    37  	"github.com/ethereum/go-ethereum/node"
    38  	"github.com/ethereum/go-ethereum/p2p/discover"
    39  	"github.com/ethereum/go-ethereum/p2p/simulations/adapters"
    40  	"github.com/ethereum/go-ethereum/swarm/log"
    41  	"github.com/ethereum/go-ethereum/swarm/network"
    42  	"github.com/ethereum/go-ethereum/swarm/network/simulation"
    43  	"github.com/ethereum/go-ethereum/swarm/state"
    44  	"github.com/ethereum/go-ethereum/swarm/storage"
    45  )
    46  
    47  //
    48  const (
    49  	minFileSize = 2
    50  	maxFileSize = 40
    51  )
    52  
    53  //
    54  //
    55  //
    56  //
    57  //
    58  func TestFileRetrieval(t *testing.T) {
    59  	if *nodes != 0 {
    60  		err := runFileRetrievalTest(*nodes)
    61  		if err != nil {
    62  			t.Fatal(err)
    63  		}
    64  	} else {
    65  		nodeCnt := []int{16}
    66  //
    67  //
    68  		if *longrunning {
    69  			nodeCnt = append(nodeCnt, 32, 64, 128)
    70  		}
    71  		for _, n := range nodeCnt {
    72  			err := runFileRetrievalTest(n)
    73  			if err != nil {
    74  				t.Fatal(err)
    75  			}
    76  		}
    77  	}
    78  }
    79  
    80  //
    81  //
    82  //
    83  //
    84  //
    85  //
    86  func TestRetrieval(t *testing.T) {
    87  //
    88  //
    89  	if *nodes != 0 && *chunks != 0 {
    90  		err := runRetrievalTest(*chunks, *nodes)
    91  		if err != nil {
    92  			t.Fatal(err)
    93  		}
    94  	} else {
    95  		var nodeCnt []int
    96  		var chnkCnt []int
    97  //
    98  //
    99  		if *longrunning {
   100  			nodeCnt = []int{16, 32, 128}
   101  			chnkCnt = []int{4, 32, 256}
   102  		} else {
   103  //
   104  			nodeCnt = []int{16}
   105  			chnkCnt = []int{32}
   106  		}
   107  		for _, n := range nodeCnt {
   108  			for _, c := range chnkCnt {
   109  				err := runRetrievalTest(c, n)
   110  				if err != nil {
   111  					t.Fatal(err)
   112  				}
   113  			}
   114  		}
   115  	}
   116  }
   117  
   118  /*
   119  
   120  
   121  
   122  
   123  
   124  
   125  
   126  */
   127  
   128  func runFileRetrievalTest(nodeCount int) error {
   129  	sim := simulation.New(map[string]simulation.ServiceFunc{
   130  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   131  
   132  			id := ctx.Config.ID
   133  			addr := network.NewAddrFromNodeID(id)
   134  			store, datadir, err := createTestLocalStorageForID(id, addr)
   135  			if err != nil {
   136  				return nil, nil, err
   137  			}
   138  			bucket.Store(bucketKeyStore, store)
   139  			cleanup = func() {
   140  				os.RemoveAll(datadir)
   141  				store.Close()
   142  			}
   143  			localStore := store.(*storage.LocalStore)
   144  			db := storage.NewDBAPI(localStore)
   145  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   146  			delivery := NewDelivery(kad, db)
   147  
   148  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   149  				DoSync:          true,
   150  				SyncUpdateDelay: 3 * time.Second,
   151  			})
   152  
   153  			fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
   154  			bucket.Store(bucketKeyFileStore, fileStore)
   155  
   156  			return r, cleanup, nil
   157  
   158  		},
   159  	})
   160  	defer sim.Close()
   161  
   162  	log.Info("Initializing test config")
   163  
   164  	conf := &synctestConfig{}
   165  //
   166  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   167  //
   168  	conf.addrToIDMap = make(map[string]discover.NodeID)
   169  //
   170  	conf.hashes = make([]storage.Address, 0)
   171  
   172  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   173  	if err != nil {
   174  		return err
   175  	}
   176  
   177  	ctx, cancelSimRun := context.WithTimeout(context.Background(), 1*time.Minute)
   178  	defer cancelSimRun()
   179  
   180  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   181  		nodeIDs := sim.UpNodeIDs()
   182  		for _, n := range nodeIDs {
   183  //
   184  			a := network.ToOverlayAddr(n.Bytes())
   185  //
   186  			conf.addrs = append(conf.addrs, a)
   187  //
   188  //
   189  //
   190  			conf.addrToIDMap[string(a)] = n
   191  		}
   192  
   193  //
   194  		var randomFiles []string
   195  //
   196  //
   197  //
   198  
   199  		conf.hashes, randomFiles, err = uploadFilesToNodes(sim)
   200  		if err != nil {
   201  			return err
   202  		}
   203  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   204  			return err
   205  		}
   206  
   207  //
   208  //
   209  		allSuccess := false
   210  		for !allSuccess {
   211  			for _, id := range nodeIDs {
   212  //
   213  				localChunks := conf.idToChunksMap[id]
   214  				localSuccess := true
   215  				for _, ch := range localChunks {
   216  //
   217  					chunk := conf.hashes[ch]
   218  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   219  //
   220  					var err error
   221  //
   222  					item, ok := sim.NodeItem(id, bucketKeyFileStore)
   223  					if !ok {
   224  						return fmt.Errorf("No registry")
   225  					}
   226  					fileStore := item.(*storage.FileStore)
   227  //
   228  					for i, hash := range conf.hashes {
   229  						reader, _ := fileStore.Retrieve(context.TODO(), hash)
   230  //
   231  						if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
   232  							allSuccess = false
   233  							log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
   234  						} else {
   235  							log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
   236  						}
   237  					}
   238  					if err != nil {
   239  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   240  						localSuccess = false
   241  					} else {
   242  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   243  					}
   244  				}
   245  				allSuccess = localSuccess
   246  			}
   247  		}
   248  		if !allSuccess {
   249  			return fmt.Errorf("Not all chunks succeeded!")
   250  		}
   251  		return nil
   252  	})
   253  
   254  	if result.Error != nil {
   255  		return result.Error
   256  	}
   257  
   258  	return nil
   259  }
   260  
   261  /*
   262  
   263  
   264  
   265  
   266  
   267  
   268  
   269  
   270  */
   271  
   272  func runRetrievalTest(chunkCount int, nodeCount int) error {
   273  	sim := simulation.New(map[string]simulation.ServiceFunc{
   274  		"streamer": func(ctx *adapters.ServiceContext, bucket *sync.Map) (s node.Service, cleanup func(), err error) {
   275  
   276  			id := ctx.Config.ID
   277  			addr := network.NewAddrFromNodeID(id)
   278  			store, datadir, err := createTestLocalStorageForID(id, addr)
   279  			if err != nil {
   280  				return nil, nil, err
   281  			}
   282  			bucket.Store(bucketKeyStore, store)
   283  			cleanup = func() {
   284  				os.RemoveAll(datadir)
   285  				store.Close()
   286  			}
   287  			localStore := store.(*storage.LocalStore)
   288  			db := storage.NewDBAPI(localStore)
   289  			kad := network.NewKademlia(addr.Over(), network.NewKadParams())
   290  			delivery := NewDelivery(kad, db)
   291  
   292  			r := NewRegistry(addr, delivery, db, state.NewInmemoryStore(), &RegistryOptions{
   293  				DoSync:          true,
   294  				SyncUpdateDelay: 0,
   295  			})
   296  
   297  			fileStore := storage.NewFileStore(storage.NewNetStore(localStore, nil), storage.NewFileStoreParams())
   298  			bucketKeyFileStore = simulation.BucketKey("filestore")
   299  			bucket.Store(bucketKeyFileStore, fileStore)
   300  
   301  			return r, cleanup, nil
   302  
   303  		},
   304  	})
   305  	defer sim.Close()
   306  
   307  	conf := &synctestConfig{}
   308  //
   309  	conf.idToChunksMap = make(map[discover.NodeID][]int)
   310  //
   311  	conf.addrToIDMap = make(map[string]discover.NodeID)
   312  //
   313  	conf.hashes = make([]storage.Address, 0)
   314  
   315  	err := sim.UploadSnapshot(fmt.Sprintf("testing/snapshot_%d.json", nodeCount))
   316  	if err != nil {
   317  		return err
   318  	}
   319  
   320  	ctx := context.Background()
   321  	result := sim.Run(ctx, func(ctx context.Context, sim *simulation.Simulation) error {
   322  		nodeIDs := sim.UpNodeIDs()
   323  		for _, n := range nodeIDs {
   324  //
   325  			a := network.ToOverlayAddr(n.Bytes())
   326  //
   327  			conf.addrs = append(conf.addrs, a)
   328  //
   329  //
   330  //
   331  			conf.addrToIDMap[string(a)] = n
   332  		}
   333  
   334  //
   335  		var randomFiles []string
   336  //
   337  		node := sim.RandomUpNode()
   338  		item, ok := sim.NodeItem(node.ID, bucketKeyStore)
   339  		if !ok {
   340  			return fmt.Errorf("No localstore")
   341  		}
   342  		lstore := item.(*storage.LocalStore)
   343  		conf.hashes, err = uploadFileToSingleNodeStore(node.ID, chunkCount, lstore)
   344  		if err != nil {
   345  			return err
   346  		}
   347  		if _, err := sim.WaitTillHealthy(ctx, 2); err != nil {
   348  			return err
   349  		}
   350  
   351  //
   352  //
   353  		allSuccess := false
   354  		for !allSuccess {
   355  			for _, id := range nodeIDs {
   356  //
   357  				localChunks := conf.idToChunksMap[id]
   358  				localSuccess := true
   359  				for _, ch := range localChunks {
   360  //
   361  					chunk := conf.hashes[ch]
   362  					log.Trace(fmt.Sprintf("node has chunk: %s:", chunk))
   363  //
   364  					var err error
   365  //
   366  					item, ok := sim.NodeItem(id, bucketKeyFileStore)
   367  					if !ok {
   368  						return fmt.Errorf("No registry")
   369  					}
   370  					fileStore := item.(*storage.FileStore)
   371  //
   372  					for i, hash := range conf.hashes {
   373  						reader, _ := fileStore.Retrieve(context.TODO(), hash)
   374  //
   375  						if s, err := reader.Size(ctx, nil); err != nil || s != int64(len(randomFiles[i])) {
   376  							allSuccess = false
   377  							log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id)
   378  						} else {
   379  							log.Debug(fmt.Sprintf("File with root hash %x successfully retrieved", hash))
   380  						}
   381  					}
   382  					if err != nil {
   383  						log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id))
   384  						localSuccess = false
   385  					} else {
   386  						log.Debug(fmt.Sprintf("Chunk %s IS FOUND for id %s", chunk, id))
   387  					}
   388  				}
   389  				allSuccess = localSuccess
   390  			}
   391  		}
   392  		if !allSuccess {
   393  			return fmt.Errorf("Not all chunks succeeded!")
   394  		}
   395  		return nil
   396  	})
   397  
   398  	if result.Error != nil {
   399  		return result.Error
   400  	}
   401  
   402  	return nil
   403  }
   404