github.com/jbendotnet/noms@v0.0.0-20190904222105-c43e4293ea92/cmd/noms/noms_sync.go (about) 1 // Copyright 2016 Attic Labs, Inc. All rights reserved. 2 // Licensed under the Apache License, version 2.0: 3 // http://www.apache.org/licenses/LICENSE-2.0 4 5 package main 6 7 import ( 8 "fmt" 9 "log" 10 "time" 11 12 "github.com/attic-labs/kingpin" 13 "github.com/attic-labs/noms/cmd/util" 14 "github.com/attic-labs/noms/go/config" 15 "github.com/attic-labs/noms/go/d" 16 "github.com/attic-labs/noms/go/datas" 17 "github.com/attic-labs/noms/go/types" 18 "github.com/attic-labs/noms/go/util/profile" 19 "github.com/attic-labs/noms/go/util/status" 20 humanize "github.com/dustin/go-humanize" 21 ) 22 23 func nomsSync(noms *kingpin.Application) (*kingpin.CmdClause, util.KingpinHandler) { 24 cmd := noms.Command("sync", "Efficiently moves values between databases.") 25 source := cmd.Arg("source-value", "see Spelling Values at https://github.com/attic-labs/noms/blob/master/doc/spelling.md").Required().String() 26 dest := cmd.Arg("dest-dataset", "see Spelling Datasets at https://github.com/attic-labs/noms/blob/master/doc/spelling.md").Required().String() 27 28 return cmd, func(_ string) int { 29 cfg := config.NewResolver() 30 sourceStore, sourceObj, err := cfg.GetPath(*source) 31 d.CheckError(err) 32 defer sourceStore.Close() 33 34 if sourceObj == nil { 35 d.CheckErrorNoUsage(fmt.Errorf("Object not found: %s", *source)) 36 } 37 38 sinkDB, sinkDataset, err := cfg.GetDataset(*dest) 39 d.CheckError(err) 40 defer sinkDB.Close() 41 42 start := time.Now() 43 progressCh := make(chan datas.PullProgress) 44 lastProgressCh := make(chan datas.PullProgress) 45 46 go func() { 47 var last datas.PullProgress 48 49 for info := range progressCh { 50 last = info 51 if info.KnownCount == 1 { 52 // It's better to print "up to date" than "0% (0/1); 100% (1/1)". 53 continue 54 } 55 56 if status.WillPrint() { 57 pct := 100.0 * float64(info.DoneCount) / float64(info.KnownCount) 58 status.Printf("Syncing - %.2f%% (%s/s)", pct, bytesPerSec(info.ApproxWrittenBytes, start)) 59 } 60 } 61 lastProgressCh <- last 62 }() 63 64 sourceRef := types.NewRef(sourceObj) 65 sinkRef, sinkExists := sinkDataset.MaybeHeadRef() 66 nonFF := false 67 err = d.Try(func() { 68 defer profile.MaybeStartProfile().Stop() 69 datas.Pull(sourceStore, sinkDB, sourceRef, progressCh) 70 71 var err error 72 sinkDataset, err = sinkDB.FastForward(sinkDataset, sourceRef) 73 if err == datas.ErrMergeNeeded { 74 sinkDataset, err = sinkDB.SetHead(sinkDataset, sourceRef) 75 nonFF = true 76 } 77 d.PanicIfError(err) 78 }) 79 80 if err != nil { 81 log.Fatal(err) 82 } 83 84 close(progressCh) 85 if last := <-lastProgressCh; last.DoneCount > 0 { 86 status.Printf("Done - Synced %s in %s (%s/s)", 87 humanize.Bytes(last.ApproxWrittenBytes), since(start), bytesPerSec(last.ApproxWrittenBytes, start)) 88 status.Done() 89 } else if !sinkExists { 90 fmt.Printf("All chunks already exist at destination! Created new dataset %s.\n", *dest) 91 } else if nonFF && !sourceRef.Equals(sinkRef) { 92 fmt.Printf("Abandoning %s; new head is %s\n", sinkRef.TargetHash(), sourceRef.TargetHash()) 93 } else { 94 fmt.Printf("Dataset %s is already up to date.\n", *dest) 95 } 96 97 return 0 98 } 99 } 100 101 func bytesPerSec(bytes uint64, start time.Time) string { 102 bps := float64(bytes) / float64(time.Since(start).Seconds()) 103 return humanize.Bytes(uint64(bps)) 104 } 105 106 func since(start time.Time) string { 107 round := time.Second / 100 108 now := time.Now().Round(round) 109 return now.Sub(start.Round(round)).String() 110 }