github.com/Blockdaemon/celo-blockchain@v0.0.0-20200129231733-e667f6b08419/cmd/swarm/swarm-smoke/upload_and_sync.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"fmt"
    23  	"io/ioutil"
    24  	"math/rand"
    25  	"os"
    26  	"sync"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/log"
    30  	"github.com/ethereum/go-ethereum/metrics"
    31  	"github.com/ethereum/go-ethereum/rpc"
    32  	"github.com/ethereum/go-ethereum/swarm/api"
    33  	"github.com/ethereum/go-ethereum/swarm/storage"
    34  	"github.com/ethereum/go-ethereum/swarm/testutil"
    35  	"github.com/pborman/uuid"
    36  	cli "gopkg.in/urfave/cli.v1"
    37  )
    38  
    39  func uploadAndSyncCmd(ctx *cli.Context, tuid string) error {
    40  	randomBytes := testutil.RandomBytes(seed, filesize*1000)
    41  
    42  	errc := make(chan error)
    43  
    44  	go func() {
    45  		errc <- uplaodAndSync(ctx, randomBytes, tuid)
    46  	}()
    47  
    48  	select {
    49  	case err := <-errc:
    50  		if err != nil {
    51  			metrics.GetOrRegisterCounter(fmt.Sprintf("%s.fail", commandName), nil).Inc(1)
    52  		}
    53  		return err
    54  	case <-time.After(time.Duration(timeout) * time.Second):
    55  		metrics.GetOrRegisterCounter(fmt.Sprintf("%s.timeout", commandName), nil).Inc(1)
    56  
    57  		e := fmt.Errorf("timeout after %v sec", timeout)
    58  		// trigger debug functionality on randomBytes
    59  		err := trackChunks(randomBytes[:])
    60  		if err != nil {
    61  			e = fmt.Errorf("%v; triggerChunkDebug failed: %v", e, err)
    62  		}
    63  
    64  		return e
    65  	}
    66  }
    67  
    68  func trackChunks(testData []byte) error {
    69  	log.Warn("Test timed out; running chunk debug sequence")
    70  
    71  	addrs, err := getAllRefs(testData)
    72  	if err != nil {
    73  		return err
    74  	}
    75  	log.Trace("All references retrieved")
    76  
    77  	// has-chunks
    78  	for _, host := range hosts {
    79  		httpHost := fmt.Sprintf("ws://%s:%d", host, 8546)
    80  		log.Trace("Calling `Has` on host", "httpHost", httpHost)
    81  		rpcClient, err := rpc.Dial(httpHost)
    82  		if err != nil {
    83  			log.Trace("Error dialing host", "err", err)
    84  			return err
    85  		}
    86  		log.Trace("rpc dial ok")
    87  		var hasInfo []api.HasInfo
    88  		err = rpcClient.Call(&hasInfo, "bzz_has", addrs)
    89  		if err != nil {
    90  			log.Trace("Error calling host", "err", err)
    91  			return err
    92  		}
    93  		log.Trace("rpc call ok")
    94  		count := 0
    95  		for _, info := range hasInfo {
    96  			if !info.Has {
    97  				count++
    98  				log.Error("Host does not have chunk", "host", httpHost, "chunk", info.Addr)
    99  			}
   100  		}
   101  		if count == 0 {
   102  			log.Info("Host reported to have all chunks", "host", httpHost)
   103  		}
   104  	}
   105  	return nil
   106  }
   107  
   108  func getAllRefs(testData []byte) (storage.AddressCollection, error) {
   109  	log.Trace("Getting all references for given root hash")
   110  	datadir, err := ioutil.TempDir("", "chunk-debug")
   111  	if err != nil {
   112  		return nil, fmt.Errorf("unable to create temp dir: %v", err)
   113  	}
   114  	defer os.RemoveAll(datadir)
   115  	fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32))
   116  	if err != nil {
   117  		return nil, err
   118  	}
   119  	ctx, cancel := context.WithTimeout(context.Background(), time.Duration(trackTimeout)*time.Second)
   120  	defer cancel()
   121  
   122  	reader := bytes.NewReader(testData)
   123  	return fileStore.GetAllReferences(ctx, reader, false)
   124  }
   125  
   126  func uplaodAndSync(c *cli.Context, randomBytes []byte, tuid string) error {
   127  	log.Info("uploading to "+httpEndpoint(hosts[0])+" and syncing", "tuid", tuid, "seed", seed)
   128  
   129  	t1 := time.Now()
   130  	hash, err := upload(randomBytes, httpEndpoint(hosts[0]))
   131  	if err != nil {
   132  		log.Error(err.Error())
   133  		return err
   134  	}
   135  	t2 := time.Since(t1)
   136  	metrics.GetOrRegisterResettingTimer("upload-and-sync.upload-time", nil).Update(t2)
   137  
   138  	fhash, err := digest(bytes.NewReader(randomBytes))
   139  	if err != nil {
   140  		log.Error(err.Error())
   141  		return err
   142  	}
   143  
   144  	log.Info("uploaded successfully", "tuid", tuid, "hash", hash, "took", t2, "digest", fmt.Sprintf("%x", fhash))
   145  
   146  	time.Sleep(time.Duration(syncDelay) * time.Second)
   147  
   148  	wg := sync.WaitGroup{}
   149  	if single {
   150  		randIndex := 1 + rand.Intn(len(hosts)-1)
   151  		ruid := uuid.New()[:8]
   152  		wg.Add(1)
   153  		go func(endpoint string, ruid string) {
   154  			for {
   155  				start := time.Now()
   156  				err := fetch(hash, endpoint, fhash, ruid, tuid)
   157  				if err != nil {
   158  					continue
   159  				}
   160  				ended := time.Since(start)
   161  
   162  				metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).Update(ended)
   163  				log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
   164  				wg.Done()
   165  				return
   166  			}
   167  		}(httpEndpoint(hosts[randIndex]), ruid)
   168  	} else {
   169  		for _, endpoint := range hosts[1:] {
   170  			ruid := uuid.New()[:8]
   171  			wg.Add(1)
   172  			go func(endpoint string, ruid string) {
   173  				for {
   174  					start := time.Now()
   175  					err := fetch(hash, endpoint, fhash, ruid, tuid)
   176  					if err != nil {
   177  						continue
   178  					}
   179  					ended := time.Since(start)
   180  
   181  					metrics.GetOrRegisterResettingTimer("upload-and-sync.each.fetch-time", nil).Update(ended)
   182  					log.Info("fetch successful", "tuid", tuid, "ruid", ruid, "took", ended, "endpoint", endpoint)
   183  					wg.Done()
   184  					return
   185  				}
   186  			}(httpEndpoint(endpoint), ruid)
   187  		}
   188  	}
   189  	wg.Wait()
   190  	log.Info("all hosts synced random file successfully")
   191  
   192  	return nil
   193  }