github.com/daeglee/go-ethereum@v0.0.0-20190504220456-cad3e8d18e9b/cmd/swarm/swarm-smoke/sliding_window.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"math/rand"
    23  	"time"
    24  
    25  	"github.com/ethereum/go-ethereum/log"
    26  	"github.com/ethereum/go-ethereum/metrics"
    27  	"github.com/ethereum/go-ethereum/swarm/testutil"
    28  	"github.com/pborman/uuid"
    29  
    30  	cli "gopkg.in/urfave/cli.v1"
    31  )
    32  
    33  type uploadResult struct {
    34  	hash   string
    35  	digest []byte
    36  }
    37  
    38  func slidingWindowCmd(ctx *cli.Context, tuid string) error {
    39  	errc := make(chan error)
    40  
    41  	go func() {
    42  		errc <- slidingWindow(ctx, tuid)
    43  	}()
    44  
    45  	err := <-errc
    46  	if err != nil {
    47  		metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
    48  	}
    49  	return err
    50  }
    51  
    52  func slidingWindow(ctx *cli.Context, tuid string) error {
    53  	var hashes []uploadResult //swarm hashes of the uploads
    54  	nodes := len(hosts)
    55  	log.Info("sliding window test started", "tuid", tuid, "nodes", nodes, "filesize(kb)", filesize, "timeout", timeout)
    56  	uploadedBytes := 0
    57  	networkDepth := 0
    58  	errored := false
    59  
    60  outer:
    61  	for {
    62  		seed = int(time.Now().UTC().UnixNano())
    63  		log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "seed", seed)
    64  
    65  		t1 := time.Now()
    66  
    67  		randomBytes := testutil.RandomBytes(seed, filesize*1000)
    68  
    69  		hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
    70  		if err != nil {
    71  			log.Error(err.Error())
    72  			return err
    73  		}
    74  
    75  		metrics.GetOrRegisterResettingTimer("sliding-window.upload-time", nil).UpdateSince(t1)
    76  		metrics.GetOrRegisterGauge("sliding-window.upload-depth", nil).Update(int64(len(hashes)))
    77  
    78  		fhash, err := digest(bytes.NewReader(randomBytes))
    79  		if err != nil {
    80  			log.Error(err.Error())
    81  			return err
    82  		}
    83  
    84  		log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash), "sleeping", syncDelay)
    85  		hashes = append(hashes, uploadResult{hash: hash, digest: fhash})
    86  		time.Sleep(time.Duration(syncDelay) * time.Second)
    87  		uploadedBytes += filesize * 1000
    88  		q := make(chan struct{}, 1)
    89  		d := make(chan struct{})
    90  		defer close(q)
    91  		defer close(d)
    92  		for i, v := range hashes {
    93  			timeoutC := time.After(time.Duration(timeout) * time.Second)
    94  			errored = false
    95  
    96  		task:
    97  			for {
    98  				select {
    99  				case q <- struct{}{}:
   100  					go func() {
   101  						var start time.Time
   102  						done := false
   103  						for !done {
   104  							log.Info("trying to retrieve hash", "hash", v.hash)
   105  							idx := 1 + rand.Intn(len(hosts)-1)
   106  							ruid := uuid.New()[:8]
   107  							start = time.Now()
   108  							// fetch hangs when swarm dies out, so we have to jump through a bit more hoops to actually
   109  							// catch the timeout, but also allow this retry logic
   110  							err := fetch(v.hash, httpEndpoint(hosts[idx]), v.digest, ruid, "")
   111  							if err != nil {
   112  								log.Error("error fetching hash", "err", err)
   113  								continue
   114  							}
   115  							done = true
   116  						}
   117  						metrics.GetOrRegisterResettingTimer("sliding-window.single.fetch-time", nil).UpdateSince(start)
   118  						d <- struct{}{}
   119  					}()
   120  				case <-d:
   121  					<-q
   122  					break task
   123  				case <-timeoutC:
   124  					errored = true
   125  					log.Error("error retrieving hash. timeout", "hash idx", i)
   126  					metrics.GetOrRegisterCounter("sliding-window.single.error", nil).Inc(1)
   127  					break outer
   128  				default:
   129  				}
   130  			}
   131  
   132  			networkDepth = i
   133  			metrics.GetOrRegisterGauge("sliding-window.network-depth", nil).Update(int64(networkDepth))
   134  			log.Info("sliding window test successfully fetched file", "currentDepth", networkDepth)
   135  			// this test might take a long time to finish - but we'd like to see metrics while they accumulate and not just when
   136  			// the test finishes. therefore emit the metrics on each iteration
   137  			emitMetrics(ctx)
   138  		}
   139  	}
   140  
   141  	log.Info("sliding window test finished", "errored?", errored, "networkDepth", networkDepth, "networkDepth(kb)", networkDepth*filesize)
   142  	log.Info("stats", "uploadedFiles", len(hashes), "uploadedKb", uploadedBytes/1000, "filesizeKb", filesize)
   143  
   144  	return nil
   145  }