github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/rowexec/tablereader_test.go (about) 1 // Copyright 2016 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package rowexec 12 13 import ( 14 "context" 15 "fmt" 16 "regexp" 17 "sort" 18 "testing" 19 20 "github.com/cockroachdb/cockroach/pkg/base" 21 "github.com/cockroachdb/cockroach/pkg/keys" 22 "github.com/cockroachdb/cockroach/pkg/kv" 23 "github.com/cockroachdb/cockroach/pkg/roachpb" 24 "github.com/cockroachdb/cockroach/pkg/sql/execinfra" 25 "github.com/cockroachdb/cockroach/pkg/sql/execinfrapb" 26 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 27 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 28 "github.com/cockroachdb/cockroach/pkg/testutils" 29 "github.com/cockroachdb/cockroach/pkg/testutils/distsqlutils" 30 "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" 31 "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" 32 "github.com/cockroachdb/cockroach/pkg/util/encoding" 33 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 34 "github.com/cockroachdb/cockroach/pkg/util/log" 35 "github.com/cockroachdb/cockroach/pkg/util/tracing" 36 "github.com/gogo/protobuf/types" 37 "github.com/opentracing/opentracing-go" 38 ) 39 40 func TestTableReader(t *testing.T) { 41 defer leaktest.AfterTest(t)() 42 ctx := context.Background() 43 44 s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) 45 defer s.Stopper().Stop(ctx) 46 47 // Create a table where each row is: 48 // 49 // | a | b | sum | s | 50 // |-----------------------------------------------------------------| 51 // | rowId/10 | rowId%10 | rowId/10 + rowId%10 | IntToEnglish(rowId) | 52 53 aFn := func(row int) tree.Datum { 54 return tree.NewDInt(tree.DInt(row / 10)) 55 } 56 bFn := func(row int) tree.Datum { 57 return tree.NewDInt(tree.DInt(row % 10)) 58 } 59 sumFn := func(row int) tree.Datum { 60 return tree.NewDInt(tree.DInt(row/10 + row%10)) 61 } 62 63 sqlutils.CreateTable(t, sqlDB, "t", 64 "a INT, b INT, sum INT, s STRING, PRIMARY KEY (a,b), INDEX bs (b,s)", 65 99, 66 sqlutils.ToRowFn(aFn, bFn, sumFn, sqlutils.RowEnglishFn)) 67 68 td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") 69 70 makeIndexSpan := func(start, end int) execinfrapb.TableReaderSpan { 71 var span roachpb.Span 72 prefix := roachpb.Key(sqlbase.MakeIndexKeyPrefix(keys.SystemSQLCodec, td, td.Indexes[0].ID)) 73 span.Key = append(prefix, encoding.EncodeVarintAscending(nil, int64(start))...) 74 span.EndKey = append(span.EndKey, prefix...) 75 span.EndKey = append(span.EndKey, encoding.EncodeVarintAscending(nil, int64(end))...) 76 return execinfrapb.TableReaderSpan{Span: span} 77 } 78 79 testCases := []struct { 80 spec execinfrapb.TableReaderSpec 81 post execinfrapb.PostProcessSpec 82 expected string 83 }{ 84 { 85 spec: execinfrapb.TableReaderSpec{ 86 Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, 87 }, 88 post: execinfrapb.PostProcessSpec{ 89 Filter: execinfrapb.Expression{Expr: "@3 < 5 AND @2 != 3"}, // sum < 5 && b != 3 90 Projection: true, 91 OutputColumns: []uint32{0, 1}, 92 }, 93 expected: "[[0 1] [0 2] [0 4] [1 0] [1 1] [1 2] [2 0] [2 1] [2 2] [3 0] [3 1] [4 0]]", 94 }, 95 { 96 spec: execinfrapb.TableReaderSpec{ 97 Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, 98 }, 99 post: execinfrapb.PostProcessSpec{ 100 Filter: execinfrapb.Expression{Expr: "@3 < 5 AND @2 != 3"}, 101 Projection: true, 102 OutputColumns: []uint32{3}, // s 103 Limit: 4, 104 }, 105 expected: "[['one'] ['two'] ['four'] ['one-zero']]", 106 }, 107 { 108 spec: execinfrapb.TableReaderSpec{ 109 IndexIdx: 1, 110 Reverse: true, 111 Spans: []execinfrapb.TableReaderSpan{makeIndexSpan(4, 6)}, 112 LimitHint: 1, 113 }, 114 post: execinfrapb.PostProcessSpec{ 115 Filter: execinfrapb.Expression{Expr: "@1 < 3"}, // sum < 8 116 Projection: true, 117 OutputColumns: []uint32{0, 1}, 118 }, 119 expected: "[[2 5] [1 5] [0 5] [2 4] [1 4] [0 4]]", 120 }, 121 } 122 123 for _, c := range testCases { 124 t.Run("", func(t *testing.T) { 125 testutils.RunTrueAndFalse(t, "row-source", func(t *testing.T, rowSource bool) { 126 ts := c.spec 127 ts.Table = *td 128 129 evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) 130 defer evalCtx.Stop(ctx) 131 flowCtx := execinfra.FlowCtx{ 132 EvalCtx: &evalCtx, 133 Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, 134 Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), 135 NodeID: evalCtx.NodeID, 136 } 137 138 var out execinfra.RowReceiver 139 var buf *distsqlutils.RowBuffer 140 if !rowSource { 141 buf = &distsqlutils.RowBuffer{} 142 out = buf 143 } 144 tr, err := newTableReader(&flowCtx, 0 /* processorID */, &ts, &c.post, out) 145 if err != nil { 146 t.Fatal(err) 147 } 148 149 var results execinfra.RowSource 150 if rowSource { 151 tr.Start(ctx) 152 results = tr 153 } else { 154 tr.Run(ctx) 155 if !buf.ProducerClosed() { 156 t.Fatalf("output RowReceiver not closed") 157 } 158 buf.Start(ctx) 159 results = buf 160 } 161 162 var res sqlbase.EncDatumRows 163 for { 164 row, meta := results.Next() 165 if meta != nil && meta.LeafTxnFinalState == nil && meta.Metrics == nil { 166 t.Fatalf("unexpected metadata: %+v", meta) 167 } 168 if row == nil { 169 break 170 } 171 res = append(res, row.Copy()) 172 } 173 if result := res.String(tr.OutputTypes()); result != c.expected { 174 t.Errorf("invalid results: %s, expected %s'", result, c.expected) 175 } 176 }) 177 }) 178 } 179 } 180 181 // Test that a TableReader outputs metadata about non-local ranges that it read. 182 func TestMisplannedRangesMetadata(t *testing.T) { 183 defer leaktest.AfterTest(t)() 184 ctx := context.Background() 185 186 tc := serverutils.StartTestCluster(t, 3, /* numNodes */ 187 base.TestClusterArgs{ 188 ReplicationMode: base.ReplicationManual, 189 ServerArgs: base.TestServerArgs{ 190 UseDatabase: "test", 191 }, 192 }) 193 defer tc.Stopper().Stop(ctx) 194 195 db := tc.ServerConn(0) 196 sqlutils.CreateTable(t, db, "t", 197 "num INT PRIMARY KEY", 198 3, /* numRows */ 199 sqlutils.ToRowFn(sqlutils.RowIdxFn)) 200 201 _, err := db.Exec(` 202 ALTER TABLE t SPLIT AT VALUES (1), (2), (3); 203 ALTER TABLE t EXPERIMENTAL_RELOCATE VALUES (ARRAY[2], 1), (ARRAY[1], 2), (ARRAY[3], 3); 204 `) 205 if err != nil { 206 t.Fatal(err) 207 } 208 209 kvDB := tc.Server(0).DB() 210 td := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") 211 212 st := tc.Server(0).ClusterSettings() 213 evalCtx := tree.MakeTestingEvalContext(st) 214 defer evalCtx.Stop(ctx) 215 216 flowCtx := execinfra.FlowCtx{ 217 EvalCtx: &evalCtx, 218 Cfg: &execinfra.ServerConfig{Settings: st}, 219 Txn: kv.NewTxn(ctx, tc.Server(0).DB(), tc.Server(0).NodeID()), 220 NodeID: evalCtx.NodeID, 221 } 222 spec := execinfrapb.TableReaderSpec{ 223 Spans: []execinfrapb.TableReaderSpan{{Span: td.PrimaryIndexSpan(keys.SystemSQLCodec)}}, 224 Table: *td, 225 } 226 post := execinfrapb.PostProcessSpec{ 227 Projection: true, 228 OutputColumns: []uint32{0}, 229 } 230 231 testutils.RunTrueAndFalse(t, "row-source", func(t *testing.T, rowSource bool) { 232 var out execinfra.RowReceiver 233 var buf *distsqlutils.RowBuffer 234 if !rowSource { 235 buf = &distsqlutils.RowBuffer{} 236 out = buf 237 } 238 tr, err := newTableReader(&flowCtx, 0 /* processorID */, &spec, &post, out) 239 if err != nil { 240 t.Fatal(err) 241 } 242 243 var results execinfra.RowSource 244 if rowSource { 245 tr.Start(ctx) 246 results = tr 247 } else { 248 tr.Run(ctx) 249 if !buf.ProducerClosed() { 250 t.Fatalf("output RowReceiver not closed") 251 } 252 buf.Start(ctx) 253 results = buf 254 } 255 256 var res sqlbase.EncDatumRows 257 var metas []*execinfrapb.ProducerMetadata 258 for { 259 row, meta := results.Next() 260 if meta != nil { 261 metas = append(metas, meta) 262 continue 263 } 264 if row == nil { 265 break 266 } 267 res = append(res, row) 268 } 269 270 if len(res) != 3 { 271 t.Fatalf("expected 3 rows, got: %d", len(res)) 272 } 273 var misplannedRanges []roachpb.RangeInfo 274 for _, m := range metas { 275 if len(m.Ranges) > 0 { 276 misplannedRanges = m.Ranges 277 } else if m.LeafTxnFinalState == nil && m.Metrics == nil { 278 t.Fatalf("expected only txn coord meta, metrics, or misplanned ranges, got: %+v", metas) 279 } 280 } 281 if len(misplannedRanges) != 2 { 282 t.Fatalf("expected 2 misplanned ranges, got: %+v", misplannedRanges) 283 } 284 // The metadata about misplanned ranges can come in any order (it depends on 285 // the order in which parallel sub-batches complete after having been split by 286 // DistSender). 287 sort.Slice(misplannedRanges, func(i, j int) bool { 288 return misplannedRanges[i].Lease.Replica.NodeID < misplannedRanges[j].Lease.Replica.NodeID 289 }) 290 if misplannedRanges[0].Lease.Replica.NodeID != 2 || 291 misplannedRanges[1].Lease.Replica.NodeID != 3 { 292 t.Fatalf("expected misplanned ranges from nodes 2 and 3, got: %+v", metas[0]) 293 } 294 }) 295 } 296 297 // Test that a scan with a limit doesn't touch more ranges than necessary (i.e. 298 // we properly set the limit on the underlying Fetcher/KVFetcher). 299 func TestLimitScans(t *testing.T) { 300 defer leaktest.AfterTest(t)() 301 ctx := context.Background() 302 303 s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{ 304 UseDatabase: "test", 305 }) 306 defer s.Stopper().Stop(ctx) 307 308 sqlutils.CreateTable(t, sqlDB, "t", 309 "num INT PRIMARY KEY", 310 100, /* numRows */ 311 sqlutils.ToRowFn(sqlutils.RowIdxFn)) 312 313 if _, err := sqlDB.Exec("ALTER TABLE t SPLIT AT VALUES (5)"); err != nil { 314 t.Fatal(err) 315 } 316 317 tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") 318 319 evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) 320 defer evalCtx.Stop(ctx) 321 flowCtx := execinfra.FlowCtx{ 322 EvalCtx: &evalCtx, 323 Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, 324 Txn: kv.NewTxn(ctx, kvDB, s.NodeID()), 325 NodeID: evalCtx.NodeID, 326 } 327 spec := execinfrapb.TableReaderSpec{ 328 Table: *tableDesc, 329 Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)}}, 330 } 331 // We're going to ask for 3 rows, all contained in the first range. 332 const limit = 3 333 post := execinfrapb.PostProcessSpec{Limit: limit} 334 335 // Now we're going to run the tableReader and trace it. 336 tracer := tracing.NewTracer() 337 sp := tracer.StartSpan("root", tracing.Recordable) 338 tracing.StartRecording(sp, tracing.SnowballRecording) 339 ctx = opentracing.ContextWithSpan(ctx, sp) 340 flowCtx.EvalCtx.Context = ctx 341 342 tr, err := newTableReader(&flowCtx, 0 /* processorID */, &spec, &post, nil /* output */) 343 if err != nil { 344 t.Fatal(err) 345 } 346 347 tr.Start(ctx) 348 rows := 0 349 for { 350 row, meta := tr.Next() 351 if row != nil { 352 rows++ 353 } 354 355 // Simulate what the DistSQLReceiver does and ingest the trace. 356 if meta != nil && len(meta.TraceData) > 0 { 357 if err := tracing.ImportRemoteSpans(sp, meta.TraceData); err != nil { 358 t.Fatal(err) 359 } 360 } 361 362 if row == nil && meta == nil { 363 break 364 } 365 } 366 if rows != limit { 367 t.Fatalf("expected %d rows, got: %d", limit, rows) 368 } 369 370 // We're now going to count how many distinct scans we've done. This regex is 371 // specific so that we don't count range resolving requests, and we dedupe 372 // scans from the same key as the DistSender retries scans when it detects 373 // splits. 374 re := regexp.MustCompile(fmt.Sprintf(`querying next range at /Table/%d/1(\S.*)?`, tableDesc.ID)) 375 spans := tracing.GetRecording(sp) 376 ranges := make(map[string]struct{}) 377 for _, span := range spans { 378 if span.Operation == tableReaderProcName { 379 // Verify that stat collection lines up with results. 380 trs := TableReaderStats{} 381 if err := types.UnmarshalAny(span.Stats, &trs); err != nil { 382 t.Fatal(err) 383 } 384 if trs.InputStats.NumRows != limit { 385 t.Fatalf("read %d rows, but stats only counted: %d", limit, trs.InputStats.NumRows) 386 } 387 } 388 for _, l := range span.Logs { 389 for _, f := range l.Fields { 390 match := re.FindStringSubmatch(f.Value) 391 if match == nil { 392 continue 393 } 394 ranges[match[1]] = struct{}{} 395 } 396 } 397 } 398 if len(ranges) != 1 { 399 t.Fatalf("expected one ranges scanned, got: %d (%+v)", len(ranges), ranges) 400 } 401 } 402 403 func BenchmarkTableReader(b *testing.B) { 404 defer leaktest.AfterTest(b)() 405 logScope := log.Scope(b) 406 defer logScope.Close(b) 407 ctx := context.Background() 408 409 s, sqlDB, kvDB := serverutils.StartServer(b, base.TestServerArgs{}) 410 defer s.Stopper().Stop(ctx) 411 412 evalCtx := tree.MakeTestingEvalContext(s.ClusterSettings()) 413 defer evalCtx.Stop(ctx) 414 415 const numCols = 2 416 for _, numRows := range []int{1 << 4, 1 << 8, 1 << 12, 1 << 16} { 417 tableName := fmt.Sprintf("t%d", numRows) 418 sqlutils.CreateTable( 419 b, sqlDB, tableName, 420 "k INT PRIMARY KEY, v INT", 421 numRows, 422 sqlutils.ToRowFn(sqlutils.RowIdxFn, sqlutils.RowModuloFn(42)), 423 ) 424 tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) 425 flowCtx := execinfra.FlowCtx{ 426 EvalCtx: &evalCtx, 427 Cfg: &execinfra.ServerConfig{Settings: s.ClusterSettings()}, 428 Txn: kv.NewTxn(ctx, s.DB(), s.NodeID()), 429 NodeID: evalCtx.NodeID, 430 } 431 432 b.Run(fmt.Sprintf("rows=%d", numRows), func(b *testing.B) { 433 spec := execinfrapb.TableReaderSpec{ 434 Table: *tableDesc, 435 Spans: []execinfrapb.TableReaderSpan{{Span: tableDesc.PrimaryIndexSpan(keys.SystemSQLCodec)}}, 436 } 437 post := execinfrapb.PostProcessSpec{} 438 439 b.SetBytes(int64(numRows * numCols * 8)) 440 b.ResetTimer() 441 for i := 0; i < b.N; i++ { 442 tr, err := newTableReader(&flowCtx, 0 /* processorID */, &spec, &post, nil /* output */) 443 if err != nil { 444 b.Fatal(err) 445 } 446 tr.Start(ctx) 447 count := 0 448 for { 449 row, meta := tr.Next() 450 if meta != nil && meta.LeafTxnFinalState == nil && meta.Metrics == nil { 451 b.Fatalf("unexpected metadata: %+v", meta) 452 } 453 if row != nil { 454 count++ 455 } else if meta == nil { 456 break 457 } 458 } 459 if count != numRows { 460 b.Fatalf("found %d rows, expected %d", count, numRows) 461 } 462 } 463 }) 464 } 465 }