github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/ccl/storageccl/bench_test.go (about) 1 // Copyright 2017 The Cockroach Authors. 2 // 3 // Licensed as a CockroachDB Enterprise file under the Cockroach Community 4 // License (the "License"); you may not use this file except in compliance with 5 // the License. You may obtain a copy of the License at 6 // 7 // https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt 8 9 package storageccl_test 10 11 import ( 12 "context" 13 "fmt" 14 "path/filepath" 15 "strconv" 16 "testing" 17 18 "github.com/cockroachdb/cockroach/pkg/base" 19 "github.com/cockroachdb/cockroach/pkg/ccl/utilccl/sampledataccl" 20 "github.com/cockroachdb/cockroach/pkg/keys" 21 "github.com/cockroachdb/cockroach/pkg/kv" 22 "github.com/cockroachdb/cockroach/pkg/roachpb" 23 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 24 "github.com/cockroachdb/cockroach/pkg/storage" 25 "github.com/cockroachdb/cockroach/pkg/storage/cloud" 26 "github.com/cockroachdb/cockroach/pkg/testutils" 27 "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" 28 "github.com/cockroachdb/cockroach/pkg/util/hlc" 29 "github.com/cockroachdb/cockroach/pkg/util/protoutil" 30 "github.com/cockroachdb/cockroach/pkg/workload/bank" 31 ) 32 33 func BenchmarkAddSSTable(b *testing.B) { 34 tempDir, dirCleanupFn := testutils.TempDir(b) 35 defer dirCleanupFn() 36 37 for _, numEntries := range []int{100, 1000, 10000, 300000} { 38 b.Run(fmt.Sprintf("numEntries=%d", numEntries), func(b *testing.B) { 39 bankData := bank.FromRows(numEntries).Tables()[0] 40 backupDir := filepath.Join(tempDir, strconv.Itoa(numEntries)) 41 backup, err := sampledataccl.ToBackup(b, bankData, backupDir) 42 if err != nil { 43 b.Fatalf("%+v", err) 44 } 45 46 ctx := context.Background() 47 tc := testcluster.StartTestCluster(b, 3, base.TestClusterArgs{}) 48 defer tc.Stopper().Stop(ctx) 49 kvDB := tc.Server(0).DB() 50 51 id := sqlbase.ID(keys.MinUserDescID) 52 53 var totalLen int64 54 b.StopTimer() 55 b.ResetTimer() 56 for i := 0; i < b.N; i++ { 57 sstFile := &storage.MemFile{} 58 sst := storage.MakeBackupSSTWriter(sstFile) 59 60 id++ 61 backup.ResetKeyValueIteration() 62 kvs, span, err := backup.NextKeyValues(numEntries, id) 63 if err != nil { 64 b.Fatalf("%+v", err) 65 } 66 for _, kv := range kvs { 67 if err := sst.Put(kv.Key, kv.Value); err != nil { 68 b.Fatalf("%+v", err) 69 } 70 } 71 if err := sst.Finish(); err != nil { 72 b.Fatalf("%+v", err) 73 } 74 sst.Close() 75 data := sstFile.Data() 76 totalLen += int64(len(data)) 77 78 b.StartTimer() 79 if err := kvDB.AddSSTable( 80 ctx, span.Key, span.EndKey, data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */ 81 ); err != nil { 82 b.Fatalf("%+v", err) 83 } 84 b.StopTimer() 85 } 86 b.SetBytes(totalLen / int64(b.N)) 87 }) 88 } 89 } 90 91 func BenchmarkWriteBatch(b *testing.B) { 92 tempDir, dirCleanupFn := testutils.TempDir(b) 93 defer dirCleanupFn() 94 95 for _, numEntries := range []int{100, 1000, 10000} { 96 b.Run(fmt.Sprintf("numEntries=%d", numEntries), func(b *testing.B) { 97 bankData := bank.FromRows(numEntries).Tables()[0] 98 backupDir := filepath.Join(tempDir, strconv.Itoa(numEntries)) 99 backup, err := sampledataccl.ToBackup(b, bankData, backupDir) 100 if err != nil { 101 b.Fatalf("%+v", err) 102 } 103 104 ctx := context.Background() 105 tc := testcluster.StartTestCluster(b, 3, base.TestClusterArgs{}) 106 defer tc.Stopper().Stop(ctx) 107 kvDB := tc.Server(0).DB() 108 109 id := sqlbase.ID(keys.MinUserDescID) 110 var batch storage.RocksDBBatchBuilder 111 112 var totalLen int64 113 b.StopTimer() 114 b.ResetTimer() 115 for i := 0; i < b.N; i++ { 116 id++ 117 backup.ResetKeyValueIteration() 118 kvs, span, err := backup.NextKeyValues(numEntries, id) 119 if err != nil { 120 b.Fatalf("%+v", err) 121 } 122 for _, kv := range kvs { 123 batch.Put(kv.Key, kv.Value) 124 } 125 repr := batch.Finish() 126 totalLen += int64(len(repr)) 127 128 b.StartTimer() 129 if err := kvDB.WriteBatch(ctx, span.Key, span.EndKey, repr); err != nil { 130 b.Fatalf("%+v", err) 131 } 132 b.StopTimer() 133 } 134 b.SetBytes(totalLen / int64(b.N)) 135 }) 136 } 137 } 138 139 func BenchmarkImport(b *testing.B) { 140 tempDir, dirCleanupFn := testutils.TempDir(b) 141 defer dirCleanupFn() 142 143 args := base.TestClusterArgs{} 144 args.ServerArgs.ExternalIODir = tempDir 145 146 for _, numEntries := range []int{1, 100, 10000, 300000} { 147 b.Run(fmt.Sprintf("numEntries=%d", numEntries), func(b *testing.B) { 148 bankData := bank.FromRows(numEntries).Tables()[0] 149 subdir := strconv.Itoa(numEntries) 150 backupDir := filepath.Join(tempDir, subdir) 151 backup, err := sampledataccl.ToBackup(b, bankData, backupDir) 152 if err != nil { 153 b.Fatalf("%+v", err) 154 } 155 storage, err := cloud.ExternalStorageConfFromURI(`nodelocal://0/` + subdir) 156 if err != nil { 157 b.Fatalf("%+v", err) 158 } 159 160 ctx := context.Background() 161 tc := testcluster.StartTestCluster(b, 3, args) 162 defer tc.Stopper().Stop(ctx) 163 kvDB := tc.Server(0).DB() 164 165 id := sqlbase.ID(keys.MinUserDescID) 166 167 var totalLen int64 168 b.StopTimer() 169 b.ResetTimer() 170 for i := 0; i < b.N; i++ { 171 id++ 172 var rekeys []roachpb.ImportRequest_TableRekey 173 var oldStartKey roachpb.Key 174 { 175 // TODO(dan): The following should probably make it into 176 // dataccl.Backup somehow. 177 tableDesc := backup.Desc.Descriptors[len(backup.Desc.Descriptors)-1].Table(hlc.Timestamp{}) 178 if tableDesc == nil || tableDesc.ParentID == keys.SystemDatabaseID { 179 b.Fatalf("bad table descriptor: %+v", tableDesc) 180 } 181 oldStartKey = sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, tableDesc, tableDesc.PrimaryIndex.ID) 182 newDesc := *tableDesc 183 newDesc.ID = id 184 newDescBytes, err := protoutil.Marshal(sqlbase.WrapDescriptor(&newDesc)) 185 if err != nil { 186 panic(err) 187 } 188 rekeys = append(rekeys, roachpb.ImportRequest_TableRekey{ 189 OldID: uint32(tableDesc.ID), NewDesc: newDescBytes, 190 }) 191 } 192 newStartKey := keys.SystemSQLCodec.TablePrefix(uint32(id)) 193 194 b.StartTimer() 195 var files []roachpb.ImportRequest_File 196 for _, file := range backup.Desc.Files { 197 files = append(files, roachpb.ImportRequest_File{Dir: storage, Path: file.Path}) 198 } 199 req := &roachpb.ImportRequest{ 200 // Import is a point request because we don't want DistSender to split 201 // it. Assume (but don't require) the entire post-rewrite span is on the 202 // same range. 203 RequestHeader: roachpb.RequestHeader{Key: newStartKey}, 204 DataSpan: roachpb.Span{Key: oldStartKey, EndKey: oldStartKey.PrefixEnd()}, 205 Files: files, 206 Rekeys: rekeys, 207 } 208 res, pErr := kv.SendWrapped(ctx, kvDB.NonTransactionalSender(), req) 209 if pErr != nil { 210 b.Fatalf("%+v", pErr.GoError()) 211 } 212 totalLen += res.(*roachpb.ImportResponse).Imported.DataSize 213 b.StopTimer() 214 } 215 b.SetBytes(totalLen / int64(b.N)) 216 }) 217 } 218 }