github.com/linapex/ethereum-go-chinese@v0.0.0-20190316121929-f8b7a73c3fa1/cmd/swarm/swarm-smoke/upload_and_sync.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 19:16:33</date>
    10  //</624450071651422208>
    11  
    12  
    13  package main
    14  
    15  import (
    16  	"bytes"
    17  	"context"
    18  	"crypto/md5"
    19  	crand "crypto/rand"
    20  	"errors"
    21  	"fmt"
    22  	"io"
    23  	"io/ioutil"
    24  	"math/rand"
    25  	"net/http"
    26  	"net/http/httptrace"
    27  	"os"
    28  	"sync"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum/log"
    32  	"github.com/ethereum/go-ethereum/metrics"
    33  	"github.com/ethereum/go-ethereum/swarm/api"
    34  	"github.com/ethereum/go-ethereum/swarm/api/client"
    35  	"github.com/ethereum/go-ethereum/swarm/spancontext"
    36  	"github.com/ethereum/go-ethereum/swarm/testutil"
    37  	opentracing "github.com/opentracing/opentracing-go"
    38  	"github.com/pborman/uuid"
    39  
    40  	cli "gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  func generateEndpoints(scheme string, cluster string, app string, from int, to int) {
    44  	if cluster == "prod" {
    45  		for port := from; port < to; port++ {
    46  endpoints = append(endpoints, fmt.Sprintf("%s://%v.swarm-gateways.net“,方案,端口)
    47  		}
    48  	} else {
    49  		for port := from; port < to; port++ {
    50  endpoints = append(endpoints, fmt.Sprintf("%s://%s-%v-%s.stg.swarm gateways.net“,方案,应用程序,端口,群集)
    51  		}
    52  	}
    53  
    54  	if includeLocalhost {
    55  endpoints = append(endpoints, "http://本地主机:8500“)
    56  	}
    57  }
    58  
    59  func cliUploadAndSync(c *cli.Context) error {
    60  	log.PrintOrigins(true)
    61  	log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(verbosity), log.StreamHandler(os.Stdout, log.TerminalFormat(true))))
    62  
    63  	metrics.GetOrRegisterCounter("upload-and-sync", nil).Inc(1)
    64  
    65  	errc := make(chan error)
    66  	go func() {
    67  		errc <- uploadAndSync(c)
    68  	}()
    69  
    70  	select {
    71  	case err := <-errc:
    72  		if err != nil {
    73  			metrics.GetOrRegisterCounter("upload-and-sync.fail", nil).Inc(1)
    74  		}
    75  		return err
    76  	case <-time.After(time.Duration(timeout) * time.Second):
    77  		metrics.GetOrRegisterCounter("upload-and-sync.timeout", nil).Inc(1)
    78  		return fmt.Errorf("timeout after %v sec", timeout)
    79  	}
    80  }
    81  
    82  func uploadAndSync(c *cli.Context) error {
    83  	defer func(now time.Time) {
    84  		totalTime := time.Since(now)
    85  		log.Info("total time", "time", totalTime, "kb", filesize)
    86  		metrics.GetOrRegisterResettingTimer("upload-and-sync.total-time", nil).Update(totalTime)
    87  	}(time.Now())
    88  
    89  	generateEndpoints(scheme, cluster, appName, from, to)
    90  	seed := int(time.Now().UnixNano() / 1e6)
    91  	log.Info("uploading to "+endpoints[0]+" and syncing", "seed", seed)
    92  
    93  	randomBytes := testutil.RandomBytes(seed, filesize*1000)
    94  
    95  	t1 := time.Now()
    96  	hash, err := upload(&randomBytes, endpoints[0])
    97  	if err != nil {
    98  		log.Error(err.Error())
    99  		return err
   100  	}
   101  	metrics.GetOrRegisterResettingTimer("upload-and-sync.upload-time", nil).UpdateSince(t1)
   102  
   103  	fhash, err := digest(bytes.NewReader(randomBytes))
   104  	if err != nil {
   105  		log.Error(err.Error())
   106  		return err
   107  	}
   108  
   109  	log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash))
   110  
   111  	time.Sleep(time.Duration(syncDelay) * time.Second)
   112  
   113  	wg := sync.WaitGroup{}
   114  	if single {
   115  		rand.Seed(time.Now().UTC().UnixNano())
   116  		randIndex := 1 + rand.Intn(len(endpoints)-1)
   117  		ruid := uuid.New()[:8]
   118  		wg.Add(1)
   119  		go func(endpoint string, ruid string) {
   120  			for {
   121  				start := time.Now()
   122  				err := fetch(hash, endpoint, fhash, ruid)
   123  				if err != nil {
   124  					continue
   125  				}
   126  
   127  				metrics.GetOrRegisterResettingTimer("upload-and-sync.single.fetch-time", nil).UpdateSince(start)
   128  				wg.Done()
   129  				return
   130  			}
   131  		}(endpoints[randIndex], ruid)
   132  	} else {
   133  		for _, endpoint := range endpoints[1:] {
   134  			ruid := uuid.New()[:8]
   135  			wg.Add(1)
   136  			go func(endpoint string, ruid string) {
   137  				for {
   138  					start := time.Now()
   139  					err := fetch(hash, endpoint, fhash, ruid)
   140  					if err != nil {
   141  						continue
   142  					}
   143  
   144  					metrics.GetOrRegisterResettingTimer("upload-and-sync.each.fetch-time", nil).UpdateSince(start)
   145  					wg.Done()
   146  					return
   147  				}
   148  			}(endpoint, ruid)
   149  		}
   150  	}
   151  	wg.Wait()
   152  	log.Info("all endpoints synced random file successfully")
   153  
   154  	return nil
   155  }
   156  
   157  //
   158  func fetch(hash string, endpoint string, original []byte, ruid string) error {
   159  	ctx, sp := spancontext.StartSpan(context.Background(), "upload-and-sync.fetch")
   160  	defer sp.Finish()
   161  
   162  	log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash)
   163  
   164  	var tn time.Time
   165  	reqUri := endpoint + "/bzz:/" + hash + "/"
   166  	req, _ := http.NewRequest("GET", reqUri, nil)
   167  
   168  	opentracing.GlobalTracer().Inject(
   169  		sp.Context(),
   170  		opentracing.HTTPHeaders,
   171  		opentracing.HTTPHeadersCarrier(req.Header))
   172  
   173  	trace := client.GetClientTrace("upload-and-sync - http get", "upload-and-sync", ruid, &tn)
   174  
   175  	req = req.WithContext(httptrace.WithClientTrace(ctx, trace))
   176  	transport := http.DefaultTransport
   177  
   178  //transport.tlsclientconfig=&tls.config不安全的skipverify:true
   179  
   180  	tn = time.Now()
   181  	res, err := transport.RoundTrip(req)
   182  	if err != nil {
   183  		log.Error(err.Error(), "ruid", ruid)
   184  		return err
   185  	}
   186  	log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength)
   187  
   188  	if res.StatusCode != 200 {
   189  		err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode)
   190  		log.Warn(err.Error(), "ruid", ruid)
   191  		return err
   192  	}
   193  
   194  	defer res.Body.Close()
   195  
   196  	rdigest, err := digest(res.Body)
   197  	if err != nil {
   198  		log.Warn(err.Error(), "ruid", ruid)
   199  		return err
   200  	}
   201  
   202  	if !bytes.Equal(rdigest, original) {
   203  		err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original)
   204  		log.Warn(err.Error(), "ruid", ruid)
   205  		return err
   206  	}
   207  
   208  	log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength)
   209  
   210  	return nil
   211  }
   212  
   213  //upload正在通过“swarm up”命令将文件“f”上载到“endpoint”
   214  func upload(dataBytes *[]byte, endpoint string) (string, error) {
   215  	swarm := client.NewClient(endpoint)
   216  	f := &client.File{
   217  		ReadCloser: ioutil.NopCloser(bytes.NewReader(*dataBytes)),
   218  		ManifestEntry: api.ManifestEntry{
   219  			ContentType: "text/plain",
   220  			Mode:        0660,
   221  			Size:        int64(len(*dataBytes)),
   222  		},
   223  	}
   224  
   225  //将数据上载到bzz://并检索内容寻址清单哈希,十六进制编码。
   226  	return swarm.Upload(f, "", false)
   227  }
   228  
   229  func digest(r io.Reader) ([]byte, error) {
   230  	h := md5.New()
   231  	_, err := io.Copy(h, r)
   232  	if err != nil {
   233  		return nil, err
   234  	}
   235  	return h.Sum(nil), nil
   236  }
   237  
   238  //在堆缓冲区中生成随机数据
   239  func generateRandomData(datasize int) ([]byte, error) {
   240  	b := make([]byte, datasize)
   241  	c, err := crand.Read(b)
   242  	if err != nil {
   243  		return nil, err
   244  	} else if c != datasize {
   245  		return nil, errors.New("short read")
   246  	}
   247  	return b, nil
   248  }
   249