github.com/attic-labs/noms@v0.0.0-20210827224422-e5fa29d95e8b/go/nbs/benchmarks/main.go (about) 1 // Copyright 2016 Attic Labs, Inc. All rights reserved. 2 // Licensed under the Apache License, version 2.0: 3 // http://www.apache.org/licenses/LICENSE-2.0 4 5 package main 6 7 import ( 8 "flag" 9 "fmt" 10 "io/ioutil" 11 "os" 12 "regexp" 13 "sort" 14 "time" 15 16 "github.com/attic-labs/kingpin" 17 "github.com/attic-labs/noms/go/chunks" 18 "github.com/attic-labs/noms/go/d" 19 "github.com/attic-labs/noms/go/nbs" 20 "github.com/attic-labs/noms/go/util/profile" 21 "github.com/aws/aws-sdk-go/aws" 22 "github.com/aws/aws-sdk-go/aws/session" 23 "github.com/aws/aws-sdk-go/service/dynamodb" 24 "github.com/aws/aws-sdk-go/service/s3" 25 "github.com/dustin/go-humanize" 26 "github.com/stretchr/testify/assert" 27 ) 28 29 var ( 30 count = kingpin.Flag("c", "Number of iterations to run").Default("10").Int() 31 dataSize = kingpin.Flag("data", "MiB of data to test with").Default("4096").Uint64() 32 mtMiB = kingpin.Flag("mem", "Size in MiB of memTable").Default("64").Uint64() 33 useNBS = kingpin.Flag("useNBS", "Existing Database to use for not-WriteNovel benchmarks").String() 34 toNBS = kingpin.Flag("toNBS", "Write to an NBS store in the given directory").String() 35 useAWS = kingpin.Flag("useAWS", "Name of existing Database to use for not-WriteNovel benchmarks").String() 36 toAWS = kingpin.Flag("toAWS", "Write to an NBS store in AWS").String() 37 toFile = kingpin.Flag("toFile", "Write to a file in the given directory").String() 38 ) 39 40 const s3Bucket = "attic-nbs" 41 const dynamoTable = "attic-nbs" 42 43 type panickingBencher struct { 44 n int 45 } 46 47 func (pb panickingBencher) Errorf(format string, args ...interface{}) { 48 panic(fmt.Sprintf(format, args...)) 49 } 50 51 func (pb panickingBencher) N() int { 52 return pb.n 53 } 54 55 func (pb panickingBencher) ResetTimer() {} 56 func (pb panickingBencher) StartTimer() {} 57 func (pb panickingBencher) StopTimer() {} 58 59 func main() { 60 profile.RegisterProfileFlags(kingpin.CommandLine) 61 kingpin.Parse() 62 63 pb := panickingBencher{*count} 64 65 src, err := getInput((*dataSize) * humanize.MiByte) 66 d.PanicIfError(err) 67 defer src.Close() 68 69 bufSize := (*mtMiB) * humanize.MiByte 70 71 open := newNullBlockStore 72 wrote := false 73 var writeDB func() 74 var refresh func() chunks.ChunkStore 75 if *toNBS != "" || *toFile != "" || *toAWS != "" { 76 var reset func() 77 if *toNBS != "" { 78 dir := makeTempDir(*toNBS, pb) 79 defer os.RemoveAll(dir) 80 open = func() chunks.ChunkStore { return nbs.NewLocalStore(dir, bufSize) } 81 reset = func() { os.RemoveAll(dir); os.MkdirAll(dir, 0777) } 82 83 } else if *toFile != "" { 84 dir := makeTempDir(*toFile, pb) 85 defer os.RemoveAll(dir) 86 open = func() chunks.ChunkStore { 87 f, err := ioutil.TempFile(dir, "") 88 d.Chk.NoError(err) 89 return newFileBlockStore(f) 90 } 91 reset = func() { os.RemoveAll(dir); os.MkdirAll(dir, 0777) } 92 93 } else if *toAWS != "" { 94 sess := session.Must(session.NewSession(aws.NewConfig().WithRegion("us-west-2"))) 95 open = func() chunks.ChunkStore { 96 return nbs.NewAWSStore(dynamoTable, *toAWS, s3Bucket, s3.New(sess), dynamodb.New(sess), bufSize) 97 } 98 reset = func() { 99 ddb := dynamodb.New(sess) 100 _, err := ddb.DeleteItem(&dynamodb.DeleteItemInput{ 101 TableName: aws.String(dynamoTable), 102 Key: map[string]*dynamodb.AttributeValue{ 103 "db": {S: toAWS}, 104 }, 105 }) 106 d.PanicIfError(err) 107 } 108 } 109 110 writeDB = func() { wrote = ensureNovelWrite(wrote, open, src, pb) } 111 refresh = func() chunks.ChunkStore { 112 reset() 113 return open() 114 } 115 } else { 116 if *useNBS != "" { 117 open = func() chunks.ChunkStore { return nbs.NewLocalStore(*useNBS, bufSize) } 118 } else if *useAWS != "" { 119 sess := session.Must(session.NewSession(aws.NewConfig().WithRegion("us-west-2"))) 120 open = func() chunks.ChunkStore { 121 return nbs.NewAWSStore(dynamoTable, *useAWS, s3Bucket, s3.New(sess), dynamodb.New(sess), bufSize) 122 } 123 } 124 writeDB = func() {} 125 refresh = func() chunks.ChunkStore { panic("WriteNovel unsupported with --useLDB and --useNBS") } 126 } 127 128 benchmarks := []struct { 129 name string 130 setup func() 131 run func() 132 }{ 133 {"WriteNovel", func() {}, func() { wrote = benchmarkNovelWrite(refresh, src, pb) }}, 134 {"WriteDuplicate", writeDB, func() { benchmarkNoRefreshWrite(open, src, pb) }}, 135 {"ReadSequential", writeDB, func() { 136 benchmarkRead(open, src.GetHashes(), src, pb) 137 }}, 138 {"ReadHashOrder", writeDB, func() { 139 ordered := src.GetHashes() 140 sort.Sort(ordered) 141 benchmarkRead(open, ordered, src, pb) 142 }}, 143 {"ReadManySequential", writeDB, func() { benchmarkReadMany(open, src.GetHashes(), src, 1<<8, 6, pb) }}, 144 {"ReadManyHashOrder", writeDB, func() { 145 ordered := src.GetHashes() 146 sort.Sort(ordered) 147 benchmarkReadMany(open, ordered, src, 1<<8, 6, pb) 148 }}, 149 } 150 w := 0 151 for _, bm := range benchmarks { 152 if len(bm.name) > w { 153 w = len(bm.name) 154 } 155 } 156 defer profile.MaybeStartProfile().Stop() 157 for _, bm := range benchmarks { 158 if matched, _ := regexp.MatchString(flag.Arg(0), bm.name); matched { 159 trialName := fmt.Sprintf("%dMiB/%sbuffer/%-[3]*s", *dataSize, humanize.IBytes(bufSize), w, bm.name) 160 bm.setup() 161 dur := time.Duration(0) 162 var trials []time.Duration 163 for i := 0; i < *count; i++ { 164 d.Chk.NoError(dropCache()) 165 src.PrimeFilesystemCache() 166 167 t := time.Now() 168 bm.run() 169 trialDur := time.Since(t) 170 trials = append(trials, trialDur) 171 dur += trialDur 172 } 173 fmt.Printf("%s\t%d\t%ss/iter %v\n", trialName, *count, humanize.FormatFloat("", (dur/time.Duration(*count)).Seconds()), formatTrials(trials)) 174 } 175 } 176 } 177 178 func makeTempDir(tmpdir string, t assert.TestingT) (dir string) { 179 dir, err := ioutil.TempDir(tmpdir, "") 180 assert.NoError(t, err) 181 return 182 } 183 184 func formatTrials(trials []time.Duration) (formatted []string) { 185 for _, trial := range trials { 186 formatted = append(formatted, humanize.FormatFloat("", trial.Seconds())) 187 } 188 return 189 }