github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/crdb_internal.go (about) 1 // Copyright 2017 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package sql 12 13 import ( 14 "bytes" 15 "context" 16 "fmt" 17 "net" 18 "net/url" 19 "sort" 20 "strings" 21 "time" 22 23 "github.com/cockroachdb/cockroach/pkg/base" 24 "github.com/cockroachdb/cockroach/pkg/build" 25 "github.com/cockroachdb/cockroach/pkg/clusterversion" 26 "github.com/cockroachdb/cockroach/pkg/config/zonepb" 27 "github.com/cockroachdb/cockroach/pkg/gossip" 28 "github.com/cockroachdb/cockroach/pkg/jobs" 29 "github.com/cockroachdb/cockroach/pkg/keys" 30 "github.com/cockroachdb/cockroach/pkg/kv" 31 "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverpb" 32 "github.com/cockroachdb/cockroach/pkg/roachpb" 33 "github.com/cockroachdb/cockroach/pkg/security" 34 "github.com/cockroachdb/cockroach/pkg/server/serverpb" 35 "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" 36 "github.com/cockroachdb/cockroach/pkg/server/telemetry" 37 "github.com/cockroachdb/cockroach/pkg/settings" 38 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror" 39 "github.com/cockroachdb/cockroach/pkg/sql/schemaexpr" 40 "github.com/cockroachdb/cockroach/pkg/sql/sem/builtins" 41 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 42 "github.com/cockroachdb/cockroach/pkg/sql/sessiondata" 43 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 44 "github.com/cockroachdb/cockroach/pkg/sql/types" 45 "github.com/cockroachdb/cockroach/pkg/util/hlc" 46 "github.com/cockroachdb/cockroach/pkg/util/json" 47 "github.com/cockroachdb/cockroach/pkg/util/log" 48 "github.com/cockroachdb/cockroach/pkg/util/protoutil" 49 "github.com/cockroachdb/cockroach/pkg/util/timeutil" 50 "github.com/cockroachdb/errors" 51 ) 52 53 const crdbInternalName = sessiondata.CRDBInternalSchemaName 54 55 // Naming convention: 56 // - if the response is served from memory, prefix with node_ 57 // - if the response is served via a kv request, prefix with kv_ 58 // - if the response is not from kv requests but is cluster-wide (i.e. the 59 // answer isn't specific to the sql connection being used, prefix with cluster_. 60 // 61 // Adding something new here will require an update to `pkg/cli` for inclusion in 62 // a `debug zip`; the unit tests will guide you. 63 // 64 // Many existing tables don't follow the conventions above, but please apply 65 // them to future additions. 66 var crdbInternal = virtualSchema{ 67 name: crdbInternalName, 68 tableDefs: map[sqlbase.ID]virtualSchemaDef{ 69 sqlbase.CrdbInternalBackwardDependenciesTableID: crdbInternalBackwardDependenciesTable, 70 sqlbase.CrdbInternalBuildInfoTableID: crdbInternalBuildInfoTable, 71 sqlbase.CrdbInternalBuiltinFunctionsTableID: crdbInternalBuiltinFunctionsTable, 72 sqlbase.CrdbInternalClusterQueriesTableID: crdbInternalClusterQueriesTable, 73 sqlbase.CrdbInternalClusterTransactionsTableID: crdbInternalClusterTxnsTable, 74 sqlbase.CrdbInternalClusterSessionsTableID: crdbInternalClusterSessionsTable, 75 sqlbase.CrdbInternalClusterSettingsTableID: crdbInternalClusterSettingsTable, 76 sqlbase.CrdbInternalCreateStmtsTableID: crdbInternalCreateStmtsTable, 77 sqlbase.CrdbInternalCreateTypeStmtsTableID: crdbInternalCreateTypeStmtsTable, 78 sqlbase.CrdbInternalFeatureUsageID: crdbInternalFeatureUsage, 79 sqlbase.CrdbInternalForwardDependenciesTableID: crdbInternalForwardDependenciesTable, 80 sqlbase.CrdbInternalGossipNodesTableID: crdbInternalGossipNodesTable, 81 sqlbase.CrdbInternalGossipAlertsTableID: crdbInternalGossipAlertsTable, 82 sqlbase.CrdbInternalGossipLivenessTableID: crdbInternalGossipLivenessTable, 83 sqlbase.CrdbInternalGossipNetworkTableID: crdbInternalGossipNetworkTable, 84 sqlbase.CrdbInternalIndexColumnsTableID: crdbInternalIndexColumnsTable, 85 sqlbase.CrdbInternalJobsTableID: crdbInternalJobsTable, 86 sqlbase.CrdbInternalKVNodeStatusTableID: crdbInternalKVNodeStatusTable, 87 sqlbase.CrdbInternalKVStoreStatusTableID: crdbInternalKVStoreStatusTable, 88 sqlbase.CrdbInternalLeasesTableID: crdbInternalLeasesTable, 89 sqlbase.CrdbInternalLocalQueriesTableID: crdbInternalLocalQueriesTable, 90 sqlbase.CrdbInternalLocalTransactionsTableID: crdbInternalLocalTxnsTable, 91 sqlbase.CrdbInternalLocalSessionsTableID: crdbInternalLocalSessionsTable, 92 sqlbase.CrdbInternalLocalMetricsTableID: crdbInternalLocalMetricsTable, 93 sqlbase.CrdbInternalPartitionsTableID: crdbInternalPartitionsTable, 94 sqlbase.CrdbInternalPredefinedCommentsTableID: crdbInternalPredefinedCommentsTable, 95 sqlbase.CrdbInternalRangesNoLeasesTableID: crdbInternalRangesNoLeasesTable, 96 sqlbase.CrdbInternalRangesViewID: crdbInternalRangesView, 97 sqlbase.CrdbInternalRuntimeInfoTableID: crdbInternalRuntimeInfoTable, 98 sqlbase.CrdbInternalSchemaChangesTableID: crdbInternalSchemaChangesTable, 99 sqlbase.CrdbInternalSessionTraceTableID: crdbInternalSessionTraceTable, 100 sqlbase.CrdbInternalSessionVariablesTableID: crdbInternalSessionVariablesTable, 101 sqlbase.CrdbInternalStmtStatsTableID: crdbInternalStmtStatsTable, 102 sqlbase.CrdbInternalTableColumnsTableID: crdbInternalTableColumnsTable, 103 sqlbase.CrdbInternalTableIndexesTableID: crdbInternalTableIndexesTable, 104 sqlbase.CrdbInternalTablesTableID: crdbInternalTablesTable, 105 sqlbase.CrdbInternalTxnStatsTableID: crdbInternalTxnStatsTable, 106 sqlbase.CrdbInternalZonesTableID: crdbInternalZonesTable, 107 }, 108 validWithNoDatabaseContext: true, 109 } 110 111 var crdbInternalBuildInfoTable = virtualSchemaTable{ 112 comment: `detailed identification strings (RAM, local node only)`, 113 schema: ` 114 CREATE TABLE crdb_internal.node_build_info ( 115 node_id INT NOT NULL, 116 field STRING NOT NULL, 117 value STRING NOT NULL 118 )`, 119 populate: func(_ context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 120 execCfg := p.ExecCfg() 121 nodeID, _ := execCfg.NodeID.OptionalNodeID() // zero if not available 122 123 info := build.GetInfo() 124 for k, v := range map[string]string{ 125 "Name": "CockroachDB", 126 "ClusterID": execCfg.ClusterID().String(), 127 "Organization": execCfg.Organization(), 128 "Build": info.Short(), 129 "Version": info.Tag, 130 "Channel": info.Channel, 131 } { 132 if err := addRow( 133 tree.NewDInt(tree.DInt(nodeID)), 134 tree.NewDString(k), 135 tree.NewDString(v), 136 ); err != nil { 137 return err 138 } 139 } 140 return nil 141 }, 142 } 143 144 var crdbInternalRuntimeInfoTable = virtualSchemaTable{ 145 comment: `server parameters, useful to construct connection URLs (RAM, local node only)`, 146 schema: ` 147 CREATE TABLE crdb_internal.node_runtime_info ( 148 node_id INT NOT NULL, 149 component STRING NOT NULL, 150 field STRING NOT NULL, 151 value STRING NOT NULL 152 )`, 153 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 154 if err := p.RequireAdminRole(ctx, "access the node runtime information"); err != nil { 155 return err 156 } 157 158 node := p.ExecCfg().NodeInfo 159 160 nodeID := tree.NewDInt(tree.DInt(int64(node.NodeID.Get()))) 161 dbURL, err := node.PGURL(url.User(security.RootUser)) 162 if err != nil { 163 return err 164 } 165 166 for _, item := range []struct { 167 component string 168 url *url.URL 169 }{ 170 {"DB", dbURL}, {"UI", node.AdminURL()}, 171 } { 172 var user string 173 if item.url.User != nil { 174 user = item.url.User.String() 175 } 176 host, port, err := net.SplitHostPort(item.url.Host) 177 if err != nil { 178 return err 179 } 180 for _, kv := range [][2]string{ 181 {"URL", item.url.String()}, 182 {"Scheme", item.url.Scheme}, 183 {"User", user}, 184 {"Host", host}, 185 {"Port", port}, 186 {"URI", item.url.RequestURI()}, 187 } { 188 k, v := kv[0], kv[1] 189 if err := addRow( 190 nodeID, 191 tree.NewDString(item.component), 192 tree.NewDString(k), 193 tree.NewDString(v), 194 ); err != nil { 195 return err 196 } 197 } 198 } 199 return nil 200 }, 201 } 202 203 // TODO(tbg): prefix with kv_. 204 var crdbInternalTablesTable = virtualSchemaTable{ 205 comment: `table descriptors accessible by current user, including non-public and virtual (KV scan; expensive!)`, 206 schema: ` 207 CREATE TABLE crdb_internal.tables ( 208 table_id INT NOT NULL, 209 parent_id INT NOT NULL, 210 name STRING NOT NULL, 211 database_name STRING, 212 version INT NOT NULL, 213 mod_time TIMESTAMP NOT NULL, 214 mod_time_logical DECIMAL NOT NULL, 215 format_version STRING NOT NULL, 216 state STRING NOT NULL, 217 sc_lease_node_id INT, 218 sc_lease_expiration_time TIMESTAMP, 219 drop_time TIMESTAMP, 220 audit_mode STRING NOT NULL, 221 schema_name STRING NOT NULL 222 )`, 223 generator: func(ctx context.Context, p *planner, dbDesc *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { 224 row := make(tree.Datums, 14) 225 worker := func(pusher rowPusher) error { 226 descs, err := p.Tables().GetAllDescriptors(ctx, p.txn) 227 if err != nil { 228 return err 229 } 230 dbNames := make(map[sqlbase.ID]string) 231 // Record database descriptors for name lookups. 232 for _, desc := range descs { 233 db, ok := desc.(*sqlbase.DatabaseDescriptor) 234 if ok { 235 dbNames[db.ID] = db.Name 236 } 237 } 238 239 addDesc := func(table *sqlbase.TableDescriptor, dbName tree.Datum, scName string) error { 240 leaseNodeDatum := tree.DNull 241 leaseExpDatum := tree.DNull 242 if table.Lease != nil { 243 leaseNodeDatum = tree.NewDInt(tree.DInt(int64(table.Lease.NodeID))) 244 leaseExpDatum, err = tree.MakeDTimestamp( 245 timeutil.Unix(0, table.Lease.ExpirationTime), time.Nanosecond, 246 ) 247 if err != nil { 248 return err 249 } 250 } 251 dropTimeDatum := tree.DNull 252 if table.DropTime != 0 { 253 dropTimeDatum, err = tree.MakeDTimestamp( 254 timeutil.Unix(0, table.DropTime), time.Nanosecond, 255 ) 256 if err != nil { 257 return err 258 } 259 } 260 row = row[:0] 261 row = append(row, 262 tree.NewDInt(tree.DInt(int64(table.ID))), 263 tree.NewDInt(tree.DInt(int64(table.GetParentID()))), 264 tree.NewDString(table.Name), 265 dbName, 266 tree.NewDInt(tree.DInt(int64(table.Version))), 267 tree.TimestampToInexactDTimestamp(table.ModificationTime), 268 tree.TimestampToDecimal(table.ModificationTime), 269 tree.NewDString(table.FormatVersion.String()), 270 tree.NewDString(table.State.String()), 271 leaseNodeDatum, 272 leaseExpDatum, 273 dropTimeDatum, 274 tree.NewDString(table.AuditMode.String()), 275 tree.NewDString(scName), 276 ) 277 return pusher.pushRow(row...) 278 } 279 280 // Note: we do not use forEachTableDesc() here because we want to 281 // include added and dropped descriptors. 282 for _, desc := range descs { 283 table, ok := desc.(*sqlbase.TableDescriptor) 284 if !ok || p.CheckAnyPrivilege(ctx, table) != nil { 285 continue 286 } 287 dbName := dbNames[table.GetParentID()] 288 if dbName == "" { 289 // The parent database was deleted. This is possible e.g. when 290 // a database is dropped with CASCADE, and someone queries 291 // this virtual table before the dropped table descriptors are 292 // effectively deleted. 293 dbName = fmt.Sprintf("[%d]", table.GetParentID()) 294 } 295 if err := addDesc(table, tree.NewDString(dbName), "public"); err != nil { 296 return err 297 } 298 } 299 300 // Also add all the virtual descriptors. 301 vt := p.getVirtualTabler() 302 vEntries := vt.getEntries() 303 for _, virtSchemaName := range vt.getSchemaNames() { 304 e := vEntries[virtSchemaName] 305 for _, tName := range e.orderedDefNames { 306 vTableEntry := e.defs[tName] 307 if err := addDesc(vTableEntry.desc, tree.DNull, virtSchemaName); err != nil { 308 return err 309 } 310 } 311 } 312 return nil 313 } 314 next, cleanup := setupGenerator(ctx, worker) 315 return next, cleanup, nil 316 }, 317 } 318 319 // TODO(tbg): prefix with kv_. 320 var crdbInternalSchemaChangesTable = virtualSchemaTable{ 321 comment: `ongoing schema changes, across all descriptors accessible by current user (KV scan; expensive!)`, 322 schema: ` 323 CREATE TABLE crdb_internal.schema_changes ( 324 table_id INT NOT NULL, 325 parent_id INT NOT NULL, 326 name STRING NOT NULL, 327 type STRING NOT NULL, 328 target_id INT, 329 target_name STRING, 330 state STRING NOT NULL, 331 direction STRING NOT NULL 332 )`, 333 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 334 descs, err := p.Tables().GetAllDescriptors(ctx, p.txn) 335 if err != nil { 336 return err 337 } 338 // Note: we do not use forEachTableDesc() here because we want to 339 // include added and dropped descriptors. 340 for _, desc := range descs { 341 table, ok := desc.(*sqlbase.TableDescriptor) 342 if !ok || p.CheckAnyPrivilege(ctx, table) != nil { 343 continue 344 } 345 tableID := tree.NewDInt(tree.DInt(int64(table.ID))) 346 parentID := tree.NewDInt(tree.DInt(int64(table.GetParentID()))) 347 tableName := tree.NewDString(table.Name) 348 for _, mut := range table.Mutations { 349 mutType := "UNKNOWN" 350 targetID := tree.DNull 351 targetName := tree.DNull 352 switch d := mut.Descriptor_.(type) { 353 case *sqlbase.DescriptorMutation_Column: 354 mutType = "COLUMN" 355 targetID = tree.NewDInt(tree.DInt(int64(d.Column.ID))) 356 targetName = tree.NewDString(d.Column.Name) 357 case *sqlbase.DescriptorMutation_Index: 358 mutType = "INDEX" 359 targetID = tree.NewDInt(tree.DInt(int64(d.Index.ID))) 360 targetName = tree.NewDString(d.Index.Name) 361 case *sqlbase.DescriptorMutation_Constraint: 362 mutType = "CONSTRAINT VALIDATION" 363 targetName = tree.NewDString(d.Constraint.Name) 364 } 365 if err := addRow( 366 tableID, 367 parentID, 368 tableName, 369 tree.NewDString(mutType), 370 targetID, 371 targetName, 372 tree.NewDString(mut.State.String()), 373 tree.NewDString(mut.Direction.String()), 374 ); err != nil { 375 return err 376 } 377 } 378 } 379 return nil 380 }, 381 } 382 383 // TODO(tbg): prefix with node_. 384 var crdbInternalLeasesTable = virtualSchemaTable{ 385 comment: `acquired table leases (RAM; local node only)`, 386 schema: ` 387 CREATE TABLE crdb_internal.leases ( 388 node_id INT NOT NULL, 389 table_id INT NOT NULL, 390 name STRING NOT NULL, 391 parent_id INT NOT NULL, 392 expiration TIMESTAMP NOT NULL, 393 deleted BOOL NOT NULL 394 )`, 395 populate: func( 396 ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error, 397 ) (err error) { 398 nodeID := tree.NewDInt(tree.DInt(int64(p.execCfg.NodeID.Get()))) 399 p.LeaseMgr().VisitLeases(func(desc sqlbase.TableDescriptor, dropped bool, _ int, expiration tree.DTimestamp) (wantMore bool) { 400 if p.CheckAnyPrivilege(ctx, &desc) != nil { 401 // TODO(ajwerner): inspect what type of error got returned. 402 return true 403 } 404 405 err = addRow( 406 nodeID, 407 tree.NewDInt(tree.DInt(int64(desc.ID))), 408 tree.NewDString(desc.Name), 409 tree.NewDInt(tree.DInt(int64(desc.ParentID))), 410 &expiration, 411 tree.MakeDBool(tree.DBool(dropped)), 412 ) 413 return err == nil 414 }) 415 return err 416 }, 417 } 418 419 func tsOrNull(micros int64) (tree.Datum, error) { 420 if micros == 0 { 421 return tree.DNull, nil 422 } 423 ts := timeutil.Unix(0, micros*time.Microsecond.Nanoseconds()) 424 return tree.MakeDTimestamp(ts, time.Microsecond) 425 } 426 427 // TODO(tbg): prefix with kv_. 428 var crdbInternalJobsTable = virtualSchemaTable{ 429 schema: ` 430 CREATE TABLE crdb_internal.jobs ( 431 job_id INT, 432 job_type STRING, 433 description STRING, 434 statement STRING, 435 user_name STRING, 436 descriptor_ids INT[], 437 status STRING, 438 running_status STRING, 439 created TIMESTAMP, 440 started TIMESTAMP, 441 finished TIMESTAMP, 442 modified TIMESTAMP, 443 fraction_completed FLOAT, 444 high_water_timestamp DECIMAL, 445 error STRING, 446 coordinator_id INT 447 )`, 448 comment: `decoded job metadata from system.jobs (KV scan)`, 449 generator: func(ctx context.Context, p *planner, _ *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { 450 currentUser := p.SessionData().User 451 isAdmin, err := p.HasAdminRole(ctx) 452 if err != nil { 453 return nil, nil, err 454 } 455 456 // Beware: we're querying system.jobs as root; we need to be careful to filter 457 // out results that the current user is not able to see. 458 query := `SELECT id, status, created, payload, progress FROM system.jobs` 459 rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.QueryEx( 460 ctx, "crdb-internal-jobs-table", p.txn, 461 sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser}, 462 query) 463 if err != nil { 464 return nil, nil, err 465 } 466 467 // Attempt to account for the memory of the retrieved rows and the data 468 // we're going to unmarshal and keep bufferred in RAM. 469 // 470 // TODO(ajwerner): This is a pretty terrible hack. Instead the internal 471 // executor should be hooked into the memory monitor associated with this 472 // conn executor. If we did that we would still want to account for the 473 // unmarshaling. Additionally, it's probably a good idea to paginate this 474 // and other virtual table queries but that's a bigger task. 475 ba := p.ExtendedEvalContext().Mon.MakeBoundAccount() 476 defer ba.Close(ctx) 477 var totalMem int64 478 for _, r := range rows { 479 for _, d := range r { 480 totalMem += int64(d.Size()) 481 } 482 } 483 if err := ba.Grow(ctx, totalMem); err != nil { 484 return nil, nil, err 485 } 486 487 // We'll reuse this container on each loop. 488 container := make(tree.Datums, 0, 16) 489 return func() (datums tree.Datums, e error) { 490 // Loop while we need to skip a row. 491 for { 492 if len(rows) == 0 { 493 return nil, nil 494 } 495 r := rows[0] 496 rows = rows[1:] 497 id, status, created, payloadBytes, progressBytes := r[0], r[1], r[2], r[3], r[4] 498 499 var jobType, description, statement, username, descriptorIDs, started, runningStatus, 500 finished, modified, fractionCompleted, highWaterTimestamp, errorStr, leaseNode = tree.DNull, 501 tree.DNull, tree.DNull, tree.DNull, tree.DNull, tree.DNull, tree.DNull, tree.DNull, 502 tree.DNull, tree.DNull, tree.DNull, tree.DNull, tree.DNull 503 504 // Extract data from the payload. 505 payload, err := jobs.UnmarshalPayload(payloadBytes) 506 507 // We filter out masked rows before we allocate all the 508 // datums. Needless allocate when not necessary. 509 sameUser := payload != nil && payload.Username == currentUser 510 if canAccess := isAdmin || sameUser; !canAccess { 511 // This user is neither an admin nor the user who created the 512 // job. They cannot see this row. 513 continue 514 } 515 516 if err != nil { 517 errorStr = tree.NewDString(fmt.Sprintf("error decoding payload: %v", err)) 518 } else { 519 jobType = tree.NewDString(payload.Type().String()) 520 description = tree.NewDString(payload.Description) 521 statement = tree.NewDString(payload.Statement) 522 username = tree.NewDString(payload.Username) 523 descriptorIDsArr := tree.NewDArray(types.Int) 524 for _, descID := range payload.DescriptorIDs { 525 if err := descriptorIDsArr.Append(tree.NewDInt(tree.DInt(int(descID)))); err != nil { 526 return nil, err 527 } 528 } 529 descriptorIDs = descriptorIDsArr 530 started, err = tsOrNull(payload.StartedMicros) 531 if err != nil { 532 return nil, err 533 } 534 finished, err = tsOrNull(payload.FinishedMicros) 535 if err != nil { 536 return nil, err 537 } 538 if payload.Lease != nil { 539 leaseNode = tree.NewDInt(tree.DInt(payload.Lease.NodeID)) 540 } 541 errorStr = tree.NewDString(payload.Error) 542 } 543 544 // Extract data from the progress field. 545 if progressBytes != tree.DNull { 546 progress, err := jobs.UnmarshalProgress(progressBytes) 547 if err != nil { 548 baseErr := "" 549 if s, ok := errorStr.(*tree.DString); ok { 550 baseErr = string(*s) 551 if baseErr != "" { 552 baseErr += "\n" 553 } 554 } 555 errorStr = tree.NewDString(fmt.Sprintf("%serror decoding progress: %v", baseErr, err)) 556 } else { 557 // Progress contains either fractionCompleted for traditional jobs, 558 // or the highWaterTimestamp for change feeds. 559 if highwater := progress.GetHighWater(); highwater != nil { 560 highWaterTimestamp = tree.TimestampToDecimal(*highwater) 561 } else { 562 fractionCompleted = tree.NewDFloat(tree.DFloat(progress.GetFractionCompleted())) 563 } 564 modified, err = tsOrNull(progress.ModifiedMicros) 565 if err != nil { 566 return nil, err 567 } 568 569 if len(progress.RunningStatus) > 0 { 570 if s, ok := status.(*tree.DString); ok { 571 if jobs.Status(string(*s)) == jobs.StatusRunning { 572 runningStatus = tree.NewDString(progress.RunningStatus) 573 } 574 } 575 } 576 } 577 } 578 579 container = container[:0] 580 container = append(container, 581 id, 582 jobType, 583 description, 584 statement, 585 username, 586 descriptorIDs, 587 status, 588 runningStatus, 589 created, 590 started, 591 finished, 592 modified, 593 fractionCompleted, 594 highWaterTimestamp, 595 errorStr, 596 leaseNode, 597 ) 598 return container, nil 599 } 600 }, nil, nil 601 }, 602 } 603 604 type stmtList []stmtKey 605 606 func (s stmtList) Len() int { 607 return len(s) 608 } 609 func (s stmtList) Swap(i, j int) { 610 s[i], s[j] = s[j], s[i] 611 } 612 func (s stmtList) Less(i, j int) bool { 613 return s[i].stmt < s[j].stmt 614 } 615 616 var crdbInternalStmtStatsTable = virtualSchemaTable{ 617 comment: `statement statistics (in-memory, not durable; local node only). ` + 618 `This table is wiped periodically (by default, at least every two hours)`, 619 schema: ` 620 CREATE TABLE crdb_internal.node_statement_statistics ( 621 node_id INT NOT NULL, 622 application_name STRING NOT NULL, 623 flags STRING NOT NULL, 624 key STRING NOT NULL, 625 anonymized STRING, 626 count INT NOT NULL, 627 first_attempt_count INT NOT NULL, 628 max_retries INT NOT NULL, 629 last_error STRING, 630 rows_avg FLOAT NOT NULL, 631 rows_var FLOAT NOT NULL, 632 parse_lat_avg FLOAT NOT NULL, 633 parse_lat_var FLOAT NOT NULL, 634 plan_lat_avg FLOAT NOT NULL, 635 plan_lat_var FLOAT NOT NULL, 636 run_lat_avg FLOAT NOT NULL, 637 run_lat_var FLOAT NOT NULL, 638 service_lat_avg FLOAT NOT NULL, 639 service_lat_var FLOAT NOT NULL, 640 overhead_lat_avg FLOAT NOT NULL, 641 overhead_lat_var FLOAT NOT NULL, 642 bytes_read INT NOT NULL, 643 rows_read INT NOT NULL, 644 implicit_txn BOOL NOT NULL 645 )`, 646 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 647 if err := p.RequireAdminRole(ctx, "access application statistics"); err != nil { 648 return err 649 } 650 651 sqlStats := p.extendedEvalCtx.sqlStatsCollector.sqlStats 652 if sqlStats == nil { 653 return errors.AssertionFailedf( 654 "cannot access sql statistics from this context") 655 } 656 657 nodeID, _ := p.execCfg.NodeID.OptionalNodeID() // zero if not available 658 659 // Retrieve the application names and sort them to ensure the 660 // output is deterministic. 661 var appNames []string 662 sqlStats.Lock() 663 for n := range sqlStats.apps { 664 appNames = append(appNames, n) 665 } 666 sqlStats.Unlock() 667 sort.Strings(appNames) 668 669 // Now retrieve the application stats proper. 670 for _, appName := range appNames { 671 appStats := sqlStats.getStatsForApplication(appName) 672 673 // Retrieve the statement keys and sort them to ensure the 674 // output is deterministic. 675 var stmtKeys stmtList 676 appStats.Lock() 677 for k := range appStats.stmts { 678 stmtKeys = append(stmtKeys, k) 679 } 680 appStats.Unlock() 681 sort.Sort(stmtKeys) 682 683 // Now retrieve the per-stmt stats proper. 684 for _, stmtKey := range stmtKeys { 685 anonymized := tree.DNull 686 anonStr, ok := scrubStmtStatKey(p.getVirtualTabler(), stmtKey.stmt) 687 if ok { 688 anonymized = tree.NewDString(anonStr) 689 } 690 691 s := appStats.getStatsForStmtWithKey(stmtKey, true /* createIfNonexistent */) 692 693 s.Lock() 694 errString := tree.DNull 695 if s.data.SensitiveInfo.LastErr != "" { 696 errString = tree.NewDString(s.data.SensitiveInfo.LastErr) 697 } 698 err := addRow( 699 tree.NewDInt(tree.DInt(nodeID)), 700 tree.NewDString(appName), 701 tree.NewDString(stmtKey.flags()), 702 tree.NewDString(stmtKey.stmt), 703 anonymized, 704 tree.NewDInt(tree.DInt(s.data.Count)), 705 tree.NewDInt(tree.DInt(s.data.FirstAttemptCount)), 706 tree.NewDInt(tree.DInt(s.data.MaxRetries)), 707 errString, 708 tree.NewDFloat(tree.DFloat(s.data.NumRows.Mean)), 709 tree.NewDFloat(tree.DFloat(s.data.NumRows.GetVariance(s.data.Count))), 710 tree.NewDFloat(tree.DFloat(s.data.ParseLat.Mean)), 711 tree.NewDFloat(tree.DFloat(s.data.ParseLat.GetVariance(s.data.Count))), 712 tree.NewDFloat(tree.DFloat(s.data.PlanLat.Mean)), 713 tree.NewDFloat(tree.DFloat(s.data.PlanLat.GetVariance(s.data.Count))), 714 tree.NewDFloat(tree.DFloat(s.data.RunLat.Mean)), 715 tree.NewDFloat(tree.DFloat(s.data.RunLat.GetVariance(s.data.Count))), 716 tree.NewDFloat(tree.DFloat(s.data.ServiceLat.Mean)), 717 tree.NewDFloat(tree.DFloat(s.data.ServiceLat.GetVariance(s.data.Count))), 718 tree.NewDFloat(tree.DFloat(s.data.OverheadLat.Mean)), 719 tree.NewDFloat(tree.DFloat(s.data.OverheadLat.GetVariance(s.data.Count))), 720 tree.NewDInt(tree.DInt(s.data.BytesRead)), 721 tree.NewDInt(tree.DInt(s.data.RowsRead)), 722 tree.MakeDBool(tree.DBool(stmtKey.implicitTxn)), 723 ) 724 s.Unlock() 725 if err != nil { 726 return err 727 } 728 } 729 } 730 return nil 731 }, 732 } 733 734 var crdbInternalTxnStatsTable = virtualSchemaTable{ 735 comment: `per-application transaction statistics (in-memory, not durable; local node only). ` + 736 `This table is wiped periodically (by default, at least every two hours)`, 737 schema: ` 738 CREATE TABLE crdb_internal.node_txn_stats ( 739 node_id INT NOT NULL, 740 application_name STRING NOT NULL, 741 txn_count INT NOT NULL, 742 txn_time_avg_sec FLOAT NOT NULL, 743 txn_time_var_sec FLOAT NOT NULL, 744 committed_count INT NOT NULL, 745 implicit_count INT NOT NULL 746 )`, 747 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 748 if err := p.RequireAdminRole(ctx, "access application statistics"); err != nil { 749 return err 750 } 751 752 sqlStats := p.extendedEvalCtx.sqlStatsCollector.sqlStats 753 if sqlStats == nil { 754 return errors.AssertionFailedf( 755 "cannot access sql statistics from this context") 756 } 757 758 nodeID, _ := p.execCfg.NodeID.OptionalNodeID() // zero if not available 759 760 // Retrieve the application names and sort them to ensure the 761 // output is deterministic. 762 var appNames []string 763 sqlStats.Lock() 764 for n := range sqlStats.apps { 765 appNames = append(appNames, n) 766 } 767 sqlStats.Unlock() 768 sort.Strings(appNames) 769 770 for _, appName := range appNames { 771 appStats := sqlStats.getStatsForApplication(appName) 772 txnCount, txnTimeAvg, txnTimeVar, committedCount, implicitCount := appStats.txns.getStats() 773 err := addRow( 774 tree.NewDInt(tree.DInt(nodeID)), 775 tree.NewDString(appName), 776 tree.NewDInt(tree.DInt(txnCount)), 777 tree.NewDFloat(tree.DFloat(txnTimeAvg)), 778 tree.NewDFloat(tree.DFloat(txnTimeVar)), 779 tree.NewDInt(tree.DInt(committedCount)), 780 tree.NewDInt(tree.DInt(implicitCount)), 781 ) 782 if err != nil { 783 return err 784 } 785 } 786 return nil 787 }, 788 } 789 790 // crdbInternalSessionTraceTable exposes the latest trace collected on this 791 // session (via SET TRACING={ON/OFF}) 792 // 793 // TODO(tbg): prefix with node_. 794 var crdbInternalSessionTraceTable = virtualSchemaTable{ 795 comment: `session trace accumulated so far (RAM)`, 796 schema: ` 797 CREATE TABLE crdb_internal.session_trace ( 798 span_idx INT NOT NULL, -- The span's index. 799 message_idx INT NOT NULL, -- The message's index within its span. 800 timestamp TIMESTAMPTZ NOT NULL,-- The message's timestamp. 801 duration INTERVAL, -- The span's duration. Set only on the first 802 -- (dummy) message on a span. 803 -- NULL if the span was not finished at the time 804 -- the trace has been collected. 805 operation STRING NULL, -- The span's operation. 806 loc STRING NOT NULL, -- The file name / line number prefix, if any. 807 tag STRING NOT NULL, -- The logging tag, if any. 808 message STRING NOT NULL, -- The logged message. 809 age INTERVAL NOT NULL -- The age of this message relative to the beginning of the trace. 810 )`, 811 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 812 rows, err := p.ExtendedEvalContext().Tracing.getSessionTrace() 813 if err != nil { 814 return err 815 } 816 for _, r := range rows { 817 if err := addRow(r[:]...); err != nil { 818 return err 819 } 820 } 821 return nil 822 }, 823 } 824 825 // crdbInternalClusterSettingsTable exposes the list of current 826 // cluster settings. 827 // 828 // TODO(tbg): prefix with node_. 829 var crdbInternalClusterSettingsTable = virtualSchemaTable{ 830 comment: `cluster settings (RAM)`, 831 schema: ` 832 CREATE TABLE crdb_internal.cluster_settings ( 833 variable STRING NOT NULL, 834 value STRING NOT NULL, 835 type STRING NOT NULL, 836 public BOOL NOT NULL, -- whether the setting is documented, which implies the user can expect support. 837 description STRING NOT NULL 838 )`, 839 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 840 if err := p.RequireAdminRole(ctx, "read crdb_internal.cluster_settings"); err != nil { 841 return err 842 } 843 for _, k := range settings.Keys() { 844 setting, _ := settings.Lookup(k, settings.LookupForLocalAccess) 845 strVal := setting.String(&p.ExecCfg().Settings.SV) 846 isPublic := setting.Visibility() == settings.Public 847 desc := setting.Description() 848 if err := addRow( 849 tree.NewDString(k), 850 tree.NewDString(strVal), 851 tree.NewDString(setting.Typ()), 852 tree.MakeDBool(tree.DBool(isPublic)), 853 tree.NewDString(desc), 854 ); err != nil { 855 return err 856 } 857 } 858 return nil 859 }, 860 } 861 862 // crdbInternalSessionVariablesTable exposes the session variables. 863 var crdbInternalSessionVariablesTable = virtualSchemaTable{ 864 comment: `session variables (RAM)`, 865 schema: ` 866 CREATE TABLE crdb_internal.session_variables ( 867 variable STRING NOT NULL, 868 value STRING NOT NULL, 869 hidden BOOL NOT NULL 870 )`, 871 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 872 for _, vName := range varNames { 873 gen := varGen[vName] 874 value := gen.Get(&p.extendedEvalCtx) 875 if err := addRow( 876 tree.NewDString(vName), 877 tree.NewDString(value), 878 tree.MakeDBool(tree.DBool(gen.Hidden)), 879 ); err != nil { 880 return err 881 } 882 } 883 return nil 884 }, 885 } 886 887 const txnsSchemaPattern = ` 888 CREATE TABLE crdb_internal.%s ( 889 id UUID, -- the unique ID of the transaction 890 node_id INT, -- the ID of the node running the transaction 891 session_id STRING, -- the ID of the session 892 start TIMESTAMP, -- the start time of the transaction 893 txn_string STRING, -- the string representation of the transcation 894 application_name STRING -- the name of the application as per SET application_name 895 )` 896 897 var crdbInternalLocalTxnsTable = virtualSchemaTable{ 898 comment: "running user transactions visible by the current user (RAM; local node only)", 899 schema: fmt.Sprintf(txnsSchemaPattern, "node_transactions"), 900 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 901 if err := p.RequireAdminRole(ctx, "read crdb_internal.node_transactions"); err != nil { 902 return err 903 } 904 req := p.makeSessionsRequest(ctx) 905 ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() 906 if err != nil { 907 return err 908 } 909 response, err := ss.ListLocalSessions(ctx, &req) 910 if err != nil { 911 return err 912 } 913 return populateTransactionsTable(ctx, addRow, response) 914 }, 915 } 916 917 var crdbInternalClusterTxnsTable = virtualSchemaTable{ 918 comment: "running user transactions visible by the current user (cluster RPC; expensive!)", 919 schema: fmt.Sprintf(txnsSchemaPattern, "cluster_transactions"), 920 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 921 if err := p.RequireAdminRole(ctx, "read crdb_internal.cluster_transactions"); err != nil { 922 return err 923 } 924 req := p.makeSessionsRequest(ctx) 925 ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() 926 if err != nil { 927 return err 928 } 929 response, err := ss.ListSessions(ctx, &req) 930 if err != nil { 931 return err 932 } 933 return populateTransactionsTable(ctx, addRow, response) 934 }, 935 } 936 937 func populateTransactionsTable( 938 ctx context.Context, addRow func(...tree.Datum) error, response *serverpb.ListSessionsResponse, 939 ) error { 940 for _, session := range response.Sessions { 941 sessionID := getSessionID(session) 942 if txn := session.ActiveTxn; txn != nil { 943 ts, err := tree.MakeDTimestamp(txn.Start, time.Microsecond) 944 if err != nil { 945 return err 946 } 947 if err := addRow( 948 tree.NewDUuid(tree.DUuid{UUID: txn.ID}), 949 tree.NewDInt(tree.DInt(session.NodeID)), 950 sessionID, 951 ts, 952 tree.NewDString(txn.TxnDescription), 953 tree.NewDString(session.ApplicationName), 954 ); err != nil { 955 return err 956 } 957 } 958 } 959 for _, rpcErr := range response.Errors { 960 log.Warningf(ctx, "%v", rpcErr.Message) 961 if rpcErr.NodeID != 0 { 962 // Add a row with this node ID, the error for the txn string, 963 // and nulls for all other columns. 964 if err := addRow( 965 tree.DNull, // txn ID 966 tree.NewDInt(tree.DInt(rpcErr.NodeID)), // node ID 967 tree.DNull, // session ID 968 tree.DNull, // start 969 tree.NewDString("-- "+rpcErr.Message), // txn string 970 tree.DNull, // application name 971 ); err != nil { 972 return err 973 } 974 } 975 } 976 return nil 977 } 978 979 const queriesSchemaPattern = ` 980 CREATE TABLE crdb_internal.%s ( 981 query_id STRING, -- the cluster-unique ID of the query 982 txn_id UUID, -- the unique ID of the query's transaction 983 node_id INT NOT NULL, -- the node on which the query is running 984 session_id STRING, -- the ID of the session 985 user_name STRING, -- the user running the query 986 start TIMESTAMP, -- the start time of the query 987 query STRING, -- the SQL code of the query 988 client_address STRING, -- the address of the client that issued the query 989 application_name STRING, -- the name of the application as per SET application_name 990 distributed BOOL, -- whether the query is running distributed 991 phase STRING -- the current execution phase 992 )` 993 994 func (p *planner) makeSessionsRequest(ctx context.Context) serverpb.ListSessionsRequest { 995 req := serverpb.ListSessionsRequest{Username: p.SessionData().User} 996 if err := p.RequireAdminRole(ctx, "list sessions"); err == nil { 997 // The root user can see all sessions. 998 req.Username = "" 999 } 1000 return req 1001 } 1002 1003 func getSessionID(session serverpb.Session) tree.Datum { 1004 // TODO(knz): serverpb.Session is always constructed with an ID 1005 // set from a 16-byte session ID. Yet we get crash reports 1006 // that fail in BytesToClusterWideID() with a byte slice that's 1007 // too short. See #32517. 1008 var sessionID tree.Datum 1009 if session.ID == nil { 1010 // TODO(knz): NewInternalTrackingError is misdesigned. Change to 1011 // not use this. See the other facilities in 1012 // pgerror/internal_errors.go. 1013 telemetry.RecordError( 1014 pgerror.NewInternalTrackingError(32517 /* issue */, "null")) 1015 sessionID = tree.DNull 1016 } else if len(session.ID) != 16 { 1017 // TODO(knz): ditto above. 1018 telemetry.RecordError( 1019 pgerror.NewInternalTrackingError(32517 /* issue */, fmt.Sprintf("len=%d", len(session.ID)))) 1020 sessionID = tree.NewDString("<invalid>") 1021 } else { 1022 clusterSessionID := BytesToClusterWideID(session.ID) 1023 sessionID = tree.NewDString(clusterSessionID.String()) 1024 } 1025 return sessionID 1026 } 1027 1028 // crdbInternalLocalQueriesTable exposes the list of running queries 1029 // on the current node. The results are dependent on the current user. 1030 var crdbInternalLocalQueriesTable = virtualSchemaTable{ 1031 comment: "running queries visible by current user (RAM; local node only)", 1032 schema: fmt.Sprintf(queriesSchemaPattern, "node_queries"), 1033 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1034 req := p.makeSessionsRequest(ctx) 1035 ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() 1036 if err != nil { 1037 return err 1038 } 1039 response, err := ss.ListLocalSessions(ctx, &req) 1040 if err != nil { 1041 return err 1042 } 1043 return populateQueriesTable(ctx, addRow, response) 1044 }, 1045 } 1046 1047 // crdbInternalClusterQueriesTable exposes the list of running queries 1048 // on the entire cluster. The result is dependent on the current user. 1049 var crdbInternalClusterQueriesTable = virtualSchemaTable{ 1050 comment: "running queries visible by current user (cluster RPC; expensive!)", 1051 schema: fmt.Sprintf(queriesSchemaPattern, "cluster_queries"), 1052 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1053 req := p.makeSessionsRequest(ctx) 1054 ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() 1055 if err != nil { 1056 return err 1057 } 1058 response, err := ss.ListSessions(ctx, &req) 1059 if err != nil { 1060 return err 1061 } 1062 return populateQueriesTable(ctx, addRow, response) 1063 }, 1064 } 1065 1066 func populateQueriesTable( 1067 ctx context.Context, addRow func(...tree.Datum) error, response *serverpb.ListSessionsResponse, 1068 ) error { 1069 for _, session := range response.Sessions { 1070 sessionID := getSessionID(session) 1071 for _, query := range session.ActiveQueries { 1072 isDistributedDatum := tree.DNull 1073 phase := strings.ToLower(query.Phase.String()) 1074 if phase == "executing" { 1075 isDistributedDatum = tree.DBoolFalse 1076 if query.IsDistributed { 1077 isDistributedDatum = tree.DBoolTrue 1078 } 1079 } 1080 1081 if query.Progress > 0 { 1082 phase = fmt.Sprintf("%s (%.2f%%)", phase, query.Progress*100) 1083 } 1084 1085 var txnID tree.Datum 1086 // query.TxnID and query.TxnStart were only added in 20.1. In case this 1087 // is a mixed cluster setting, report NULL if these values were not filled 1088 // out by the remote session. 1089 if query.ID == "" { 1090 txnID = tree.DNull 1091 } else { 1092 txnID = tree.NewDUuid(tree.DUuid{UUID: query.TxnID}) 1093 } 1094 1095 ts, err := tree.MakeDTimestamp(query.Start, time.Microsecond) 1096 if err != nil { 1097 return err 1098 } 1099 if err := addRow( 1100 tree.NewDString(query.ID), 1101 txnID, 1102 tree.NewDInt(tree.DInt(session.NodeID)), 1103 sessionID, 1104 tree.NewDString(session.Username), 1105 ts, 1106 tree.NewDString(query.Sql), 1107 tree.NewDString(session.ClientAddress), 1108 tree.NewDString(session.ApplicationName), 1109 isDistributedDatum, 1110 tree.NewDString(phase), 1111 ); err != nil { 1112 return err 1113 } 1114 } 1115 } 1116 1117 for _, rpcErr := range response.Errors { 1118 log.Warningf(ctx, "%v", rpcErr.Message) 1119 if rpcErr.NodeID != 0 { 1120 // Add a row with this node ID, the error for query, and 1121 // nulls for all other columns. 1122 if err := addRow( 1123 tree.DNull, // query ID 1124 tree.DNull, // txn ID 1125 tree.NewDInt(tree.DInt(rpcErr.NodeID)), // node ID 1126 tree.DNull, // session ID 1127 tree.DNull, // username 1128 tree.DNull, // start 1129 tree.NewDString("-- "+rpcErr.Message), // query 1130 tree.DNull, // client_address 1131 tree.DNull, // application_name 1132 tree.DNull, // distributed 1133 tree.DNull, // phase 1134 ); err != nil { 1135 return err 1136 } 1137 } 1138 } 1139 return nil 1140 } 1141 1142 const sessionsSchemaPattern = ` 1143 CREATE TABLE crdb_internal.%s ( 1144 node_id INT NOT NULL, -- the node on which the query is running 1145 session_id STRING, -- the ID of the session 1146 user_name STRING, -- the user running the query 1147 client_address STRING, -- the address of the client that issued the query 1148 application_name STRING, -- the name of the application as per SET application_name 1149 active_queries STRING, -- the currently running queries as SQL 1150 last_active_query STRING, -- the query that finished last on this session as SQL 1151 session_start TIMESTAMP, -- the time when the session was opened 1152 oldest_query_start TIMESTAMP, -- the time when the oldest query in the session was started 1153 kv_txn STRING, -- the ID of the current KV transaction 1154 alloc_bytes INT, -- the number of bytes allocated by the session 1155 max_alloc_bytes INT -- the high water mark of bytes allocated by the session 1156 ) 1157 ` 1158 1159 // crdbInternalLocalSessionsTable exposes the list of running sessions 1160 // on the current node. The results are dependent on the current user. 1161 var crdbInternalLocalSessionsTable = virtualSchemaTable{ 1162 comment: "running sessions visible by current user (RAM; local node only)", 1163 schema: fmt.Sprintf(sessionsSchemaPattern, "node_sessions"), 1164 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1165 req := p.makeSessionsRequest(ctx) 1166 ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() 1167 if err != nil { 1168 return err 1169 } 1170 response, err := ss.ListLocalSessions(ctx, &req) 1171 if err != nil { 1172 return err 1173 } 1174 return populateSessionsTable(ctx, addRow, response) 1175 }, 1176 } 1177 1178 // crdbInternalClusterSessionsTable exposes the list of running sessions 1179 // on the entire cluster. The result is dependent on the current user. 1180 var crdbInternalClusterSessionsTable = virtualSchemaTable{ 1181 comment: "running sessions visible to current user (cluster RPC; expensive!)", 1182 schema: fmt.Sprintf(sessionsSchemaPattern, "cluster_sessions"), 1183 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1184 req := p.makeSessionsRequest(ctx) 1185 ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() 1186 if err != nil { 1187 return err 1188 } 1189 response, err := ss.ListSessions(ctx, &req) 1190 if err != nil { 1191 return err 1192 } 1193 return populateSessionsTable(ctx, addRow, response) 1194 }, 1195 } 1196 1197 func populateSessionsTable( 1198 ctx context.Context, addRow func(...tree.Datum) error, response *serverpb.ListSessionsResponse, 1199 ) error { 1200 for _, session := range response.Sessions { 1201 // Generate active_queries and oldest_query_start 1202 var activeQueries bytes.Buffer 1203 var oldestStart time.Time 1204 var oldestStartDatum tree.Datum 1205 1206 for idx, query := range session.ActiveQueries { 1207 if idx > 0 { 1208 activeQueries.WriteString("; ") 1209 } 1210 activeQueries.WriteString(query.Sql) 1211 1212 if oldestStart.IsZero() || query.Start.Before(oldestStart) { 1213 oldestStart = query.Start 1214 } 1215 } 1216 1217 var err error 1218 if oldestStart.IsZero() { 1219 oldestStartDatum = tree.DNull 1220 } else { 1221 oldestStartDatum, err = tree.MakeDTimestamp(oldestStart, time.Microsecond) 1222 if err != nil { 1223 return err 1224 } 1225 } 1226 1227 kvTxnIDDatum := tree.DNull 1228 if session.KvTxnID != nil { 1229 kvTxnIDDatum = tree.NewDString(session.KvTxnID.String()) 1230 } 1231 1232 sessionID := getSessionID(session) 1233 startTSDatum, err := tree.MakeDTimestamp(session.Start, time.Microsecond) 1234 if err != nil { 1235 return err 1236 } 1237 if err := addRow( 1238 tree.NewDInt(tree.DInt(session.NodeID)), 1239 sessionID, 1240 tree.NewDString(session.Username), 1241 tree.NewDString(session.ClientAddress), 1242 tree.NewDString(session.ApplicationName), 1243 tree.NewDString(activeQueries.String()), 1244 tree.NewDString(session.LastActiveQuery), 1245 startTSDatum, 1246 oldestStartDatum, 1247 kvTxnIDDatum, 1248 tree.NewDInt(tree.DInt(session.AllocBytes)), 1249 tree.NewDInt(tree.DInt(session.MaxAllocBytes)), 1250 ); err != nil { 1251 return err 1252 } 1253 } 1254 1255 for _, rpcErr := range response.Errors { 1256 log.Warningf(ctx, "%v", rpcErr.Message) 1257 if rpcErr.NodeID != 0 { 1258 // Add a row with this node ID, error in active queries, and nulls 1259 // for all other columns. 1260 if err := addRow( 1261 tree.NewDInt(tree.DInt(rpcErr.NodeID)), // node ID 1262 tree.DNull, // session ID 1263 tree.DNull, // username 1264 tree.DNull, // client address 1265 tree.DNull, // application name 1266 tree.NewDString("-- "+rpcErr.Message), // active queries 1267 tree.DNull, // last active query 1268 tree.DNull, // session start 1269 tree.DNull, // oldest_query_start 1270 tree.DNull, // kv_txn 1271 tree.DNull, // alloc_bytes 1272 tree.DNull, // max_alloc_bytes 1273 ); err != nil { 1274 return err 1275 } 1276 } 1277 } 1278 1279 return nil 1280 } 1281 1282 // crdbInternalLocalMetricsTable exposes a snapshot of the metrics on the 1283 // current node. 1284 var crdbInternalLocalMetricsTable = virtualSchemaTable{ 1285 comment: "current values for metrics (RAM; local node only)", 1286 schema: `CREATE TABLE crdb_internal.node_metrics ( 1287 store_id INT NULL, -- the store, if any, for this metric 1288 name STRING NOT NULL, -- name of the metric 1289 value FLOAT NOT NULL -- value of the metric 1290 )`, 1291 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1292 if err := p.RequireAdminRole(ctx, "read crdb_internal.node_metrics"); err != nil { 1293 return err 1294 } 1295 1296 mr := p.ExecCfg().MetricsRecorder 1297 if mr == nil { 1298 return nil 1299 } 1300 nodeStatus := mr.GenerateNodeStatus(ctx) 1301 for i := 0; i <= len(nodeStatus.StoreStatuses); i++ { 1302 storeID := tree.DNull 1303 mtr := nodeStatus.Metrics 1304 if i > 0 { 1305 storeID = tree.NewDInt(tree.DInt(nodeStatus.StoreStatuses[i-1].Desc.StoreID)) 1306 mtr = nodeStatus.StoreStatuses[i-1].Metrics 1307 } 1308 for name, value := range mtr { 1309 if err := addRow( 1310 storeID, 1311 tree.NewDString(name), 1312 tree.NewDFloat(tree.DFloat(value)), 1313 ); err != nil { 1314 return err 1315 } 1316 } 1317 } 1318 return nil 1319 }, 1320 } 1321 1322 // crdbInternalBuiltinFunctionsTable exposes the built-in function 1323 // metadata. 1324 var crdbInternalBuiltinFunctionsTable = virtualSchemaTable{ 1325 comment: "built-in functions (RAM/static)", 1326 schema: ` 1327 CREATE TABLE crdb_internal.builtin_functions ( 1328 function STRING NOT NULL, 1329 signature STRING NOT NULL, 1330 category STRING NOT NULL, 1331 details STRING NOT NULL 1332 )`, 1333 populate: func(ctx context.Context, _ *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1334 for _, name := range builtins.AllBuiltinNames { 1335 props, overloads := builtins.GetBuiltinProperties(name) 1336 for _, f := range overloads { 1337 if err := addRow( 1338 tree.NewDString(name), 1339 tree.NewDString(f.Signature(false /* simplify */)), 1340 tree.NewDString(props.Category), 1341 tree.NewDString(f.Info), 1342 ); err != nil { 1343 return err 1344 } 1345 } 1346 } 1347 return nil 1348 }, 1349 } 1350 1351 var crdbInternalCreateTypeStmtsTable = virtualSchemaTable{ 1352 comment: "CREATE statements for all user defined types accessible by the current user in current database (KV scan)", 1353 schema: ` 1354 CREATE TABLE crdb_internal.create_type_statements ( 1355 database_id INT, 1356 database_name STRING, 1357 schema_name STRING, 1358 descriptor_id INT, 1359 descriptor_name STRING, 1360 create_statement STRING, 1361 INDEX (descriptor_id) 1362 ) 1363 `, 1364 populate: func(ctx context.Context, p *planner, db *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1365 return forEachTypeDesc(ctx, p, db, func(db *DatabaseDescriptor, sc string, typeDesc *TypeDescriptor) error { 1366 switch typeDesc.Kind { 1367 case sqlbase.TypeDescriptor_ENUM: 1368 var enumLabels []string 1369 for i := range typeDesc.EnumMembers { 1370 enumLabels = append(enumLabels, typeDesc.EnumMembers[i].LogicalRepresentation) 1371 } 1372 name, err := tree.NewUnresolvedObjectName(3, [3]string{typeDesc.Name, sc, db.Name}, 0) 1373 if err != nil { 1374 return err 1375 } 1376 node := &tree.CreateType{ 1377 Variety: tree.Enum, 1378 TypeName: name, 1379 EnumLabels: enumLabels, 1380 } 1381 if err := addRow( 1382 tree.NewDInt(tree.DInt(db.ID)), // database_id 1383 tree.NewDString(db.Name), // database_name 1384 tree.NewDString(sc), // schema_name 1385 tree.NewDInt(tree.DInt(typeDesc.ID)), // descriptor_id 1386 tree.NewDString(typeDesc.Name), // descriptor_name 1387 tree.NewDString(tree.AsString(node)), // create_statement 1388 ); err != nil { 1389 return err 1390 } 1391 case sqlbase.TypeDescriptor_ALIAS: 1392 // Alias types are created implicitly, so we don't have create 1393 // statements for them. 1394 default: 1395 return errors.AssertionFailedf("unknown type descriptor kind %s", typeDesc.Kind.String()) 1396 } 1397 return nil 1398 }) 1399 }, 1400 } 1401 1402 // Prepare the row populate function. 1403 var typeView = tree.NewDString("view") 1404 var typeTable = tree.NewDString("table") 1405 var typeSequence = tree.NewDString("sequence") 1406 1407 // crdbInternalCreateStmtsTable exposes the CREATE TABLE/CREATE VIEW 1408 // statements. 1409 // 1410 // TODO(tbg): prefix with kv_. 1411 var crdbInternalCreateStmtsTable = makeAllRelationsVirtualTableWithDescriptorIDIndex( 1412 `CREATE and ALTER statements for all tables accessible by current user in current database (KV scan)`, 1413 ` 1414 CREATE TABLE crdb_internal.create_statements ( 1415 database_id INT, 1416 database_name STRING, 1417 schema_name STRING NOT NULL, 1418 descriptor_id INT, 1419 descriptor_type STRING NOT NULL, 1420 descriptor_name STRING NOT NULL, 1421 create_statement STRING NOT NULL, 1422 state STRING NOT NULL, 1423 create_nofks STRING NOT NULL, 1424 alter_statements STRING[] NOT NULL, 1425 validate_statements STRING[] NOT NULL, 1426 has_partitions BOOL NOT NULL, 1427 INDEX(descriptor_id) 1428 ) 1429 `, virtualOnce, false, /* includesIndexEntries */ 1430 func(ctx context.Context, p *planner, h oidHasher, db *sqlbase.DatabaseDescriptor, scName string, 1431 table *sqlbase.TableDescriptor, lookup simpleSchemaResolver, addRow func(...tree.Datum) error) error { 1432 contextName := "" 1433 parentNameStr := tree.DNull 1434 if db != nil { 1435 contextName = db.Name 1436 parentNameStr = tree.NewDString(db.Name) 1437 } 1438 scNameStr := tree.NewDString(scName) 1439 1440 var descType tree.Datum 1441 var stmt, createNofk string 1442 alterStmts := tree.NewDArray(types.String) 1443 validateStmts := tree.NewDArray(types.String) 1444 var err error 1445 if table.IsView() { 1446 descType = typeView 1447 stmt, err = ShowCreateView(ctx, (*tree.Name)(&table.Name), table) 1448 } else if table.IsSequence() { 1449 descType = typeSequence 1450 stmt, err = ShowCreateSequence(ctx, (*tree.Name)(&table.Name), table) 1451 } else { 1452 descType = typeTable 1453 tn := (*tree.Name)(&table.Name) 1454 displayOptions := ShowCreateDisplayOptions{ 1455 FKDisplayMode: OmitFKClausesFromCreate, 1456 } 1457 createNofk, err = ShowCreateTable(ctx, p, tn, contextName, table, lookup, displayOptions) 1458 if err != nil { 1459 return err 1460 } 1461 if err := showAlterStatementWithInterleave(ctx, tn, contextName, lookup, table.Indexes, table, alterStmts, 1462 validateStmts); err != nil { 1463 return err 1464 } 1465 displayOptions.FKDisplayMode = IncludeFkClausesInCreate 1466 stmt, err = ShowCreateTable(ctx, p, tn, contextName, table, lookup, displayOptions) 1467 } 1468 if err != nil { 1469 return err 1470 } 1471 1472 descID := tree.NewDInt(tree.DInt(table.ID)) 1473 dbDescID := tree.NewDInt(tree.DInt(table.GetParentID())) 1474 if createNofk == "" { 1475 createNofk = stmt 1476 } 1477 hasPartitions := false 1478 for i := range table.Indexes { 1479 if table.Indexes[i].Partitioning.NumColumns != 0 { 1480 hasPartitions = true 1481 break 1482 } 1483 } 1484 hasPartitions = hasPartitions || table.PrimaryIndex.Partitioning.NumColumns != 0 1485 return addRow( 1486 dbDescID, 1487 parentNameStr, 1488 scNameStr, 1489 descID, 1490 descType, 1491 tree.NewDString(table.Name), 1492 tree.NewDString(stmt), 1493 tree.NewDString(table.State.String()), 1494 tree.NewDString(createNofk), 1495 alterStmts, 1496 validateStmts, 1497 tree.MakeDBool(tree.DBool(hasPartitions)), 1498 ) 1499 }) 1500 1501 func showAlterStatementWithInterleave( 1502 ctx context.Context, 1503 tn *tree.Name, 1504 contextName string, 1505 lCtx simpleSchemaResolver, 1506 allIdx []sqlbase.IndexDescriptor, 1507 table *sqlbase.TableDescriptor, 1508 alterStmts *tree.DArray, 1509 validateStmts *tree.DArray, 1510 ) error { 1511 for i := range table.OutboundFKs { 1512 fk := &table.OutboundFKs[i] 1513 f := tree.NewFmtCtx(tree.FmtSimple) 1514 f.WriteString("ALTER TABLE ") 1515 f.FormatNode(tn) 1516 f.WriteString(" ADD CONSTRAINT ") 1517 f.FormatNameP(&fk.Name) 1518 f.WriteByte(' ') 1519 if err := showForeignKeyConstraint(&f.Buffer, contextName, table, fk, lCtx); err != nil { 1520 return err 1521 } 1522 if err := alterStmts.Append(tree.NewDString(f.CloseAndGetString())); err != nil { 1523 return err 1524 } 1525 1526 f = tree.NewFmtCtx(tree.FmtSimple) 1527 f.WriteString("ALTER TABLE ") 1528 f.FormatNode(tn) 1529 f.WriteString(" VALIDATE CONSTRAINT ") 1530 f.FormatNameP(&fk.Name) 1531 1532 if err := validateStmts.Append(tree.NewDString(f.CloseAndGetString())); err != nil { 1533 return err 1534 } 1535 } 1536 1537 for i := range allIdx { 1538 idx := &allIdx[i] 1539 // Create CREATE INDEX commands for INTERLEAVE tables. These commands 1540 // are included in the ALTER TABLE statements. 1541 if len(idx.Interleave.Ancestors) > 0 { 1542 f := tree.NewFmtCtx(tree.FmtSimple) 1543 intl := idx.Interleave 1544 parentTableID := intl.Ancestors[len(intl.Ancestors)-1].TableID 1545 var err error 1546 var parentName tree.TableName 1547 if lCtx != nil { 1548 parentName, err = getParentAsTableName(lCtx, parentTableID, contextName) 1549 if err != nil { 1550 return err 1551 } 1552 } else { 1553 parentName = tree.MakeTableName(tree.Name(""), tree.Name(fmt.Sprintf("[%d as parent]", parentTableID))) 1554 parentName.ExplicitCatalog = false 1555 parentName.ExplicitSchema = false 1556 } 1557 1558 var tableName tree.TableName 1559 if lCtx != nil { 1560 tableName, err = getTableAsTableName(lCtx, table, contextName) 1561 if err != nil { 1562 return err 1563 } 1564 } else { 1565 tableName = tree.MakeTableName(tree.Name(""), tree.Name(fmt.Sprintf("[%d as parent]", table.ID))) 1566 tableName.ExplicitCatalog = false 1567 tableName.ExplicitSchema = false 1568 } 1569 var sharedPrefixLen int 1570 for _, ancestor := range intl.Ancestors { 1571 sharedPrefixLen += int(ancestor.SharedPrefixLen) 1572 } 1573 // Write the CREATE INDEX statements. 1574 showCreateIndexWithInterleave(f, idx, tableName, parentName, sharedPrefixLen) 1575 if err := alterStmts.Append(tree.NewDString(f.CloseAndGetString())); err != nil { 1576 return err 1577 } 1578 } 1579 } 1580 return nil 1581 } 1582 1583 func showCreateIndexWithInterleave( 1584 f *tree.FmtCtx, 1585 idx *sqlbase.IndexDescriptor, 1586 tableName tree.TableName, 1587 parentName tree.TableName, 1588 sharedPrefixLen int, 1589 ) { 1590 f.WriteString("CREATE ") 1591 f.WriteString(idx.SQLString(&tableName)) 1592 f.WriteString(" INTERLEAVE IN PARENT ") 1593 parentName.Format(f) 1594 f.WriteString(" (") 1595 // Get all of the columns and write them. 1596 comma := "" 1597 for _, name := range idx.ColumnNames[:sharedPrefixLen] { 1598 f.WriteString(comma) 1599 f.FormatNameP(&name) 1600 comma = ", " 1601 } 1602 f.WriteString(")") 1603 } 1604 1605 // crdbInternalTableColumnsTable exposes the column descriptors. 1606 // 1607 // TODO(tbg): prefix with kv_. 1608 var crdbInternalTableColumnsTable = virtualSchemaTable{ 1609 comment: "details for all columns accessible by current user in current database (KV scan)", 1610 schema: ` 1611 CREATE TABLE crdb_internal.table_columns ( 1612 descriptor_id INT, 1613 descriptor_name STRING NOT NULL, 1614 column_id INT NOT NULL, 1615 column_name STRING NOT NULL, 1616 column_type STRING NOT NULL, 1617 nullable BOOL NOT NULL, 1618 default_expr STRING, 1619 hidden BOOL NOT NULL 1620 ) 1621 `, 1622 generator: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { 1623 row := make(tree.Datums, 8) 1624 worker := func(pusher rowPusher) error { 1625 return forEachTableDescAll(ctx, p, dbContext, hideVirtual, 1626 func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { 1627 tableID := tree.NewDInt(tree.DInt(table.ID)) 1628 tableName := tree.NewDString(table.Name) 1629 for i := range table.Columns { 1630 col := &table.Columns[i] 1631 defStr := tree.DNull 1632 if col.DefaultExpr != nil { 1633 def, err := schemaexpr.DeserializeTableDescExpr(ctx, &p.semaCtx, table, *col.DefaultExpr) 1634 if err != nil { 1635 return err 1636 } 1637 defStr = tree.NewDString(tree.SerializeForDisplay(def)) 1638 } 1639 row = row[:0] 1640 row = append(row, 1641 tableID, 1642 tableName, 1643 tree.NewDInt(tree.DInt(col.ID)), 1644 tree.NewDString(col.Name), 1645 tree.NewDString(col.Type.DebugString()), 1646 tree.MakeDBool(tree.DBool(col.Nullable)), 1647 defStr, 1648 tree.MakeDBool(tree.DBool(col.Hidden)), 1649 ) 1650 if err := pusher.pushRow(row...); err != nil { 1651 return err 1652 } 1653 } 1654 return nil 1655 }, 1656 ) 1657 } 1658 next, cleanup := setupGenerator(ctx, worker) 1659 return next, cleanup, nil 1660 }, 1661 } 1662 1663 // crdbInternalTableIndexesTable exposes the index descriptors. 1664 // 1665 // TODO(tbg): prefix with kv_. 1666 var crdbInternalTableIndexesTable = virtualSchemaTable{ 1667 comment: "indexes accessible by current user in current database (KV scan)", 1668 schema: ` 1669 CREATE TABLE crdb_internal.table_indexes ( 1670 descriptor_id INT, 1671 descriptor_name STRING NOT NULL, 1672 index_id INT NOT NULL, 1673 index_name STRING NOT NULL, 1674 index_type STRING NOT NULL, 1675 is_unique BOOL NOT NULL, 1676 is_inverted BOOL NOT NULL 1677 ) 1678 `, 1679 generator: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { 1680 primary := tree.NewDString("primary") 1681 secondary := tree.NewDString("secondary") 1682 row := make(tree.Datums, 7) 1683 worker := func(pusher rowPusher) error { 1684 return forEachTableDescAll(ctx, p, dbContext, hideVirtual, 1685 func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { 1686 tableID := tree.NewDInt(tree.DInt(table.ID)) 1687 tableName := tree.NewDString(table.Name) 1688 row = row[:0] 1689 row = append(row, 1690 tableID, 1691 tableName, 1692 tree.NewDInt(tree.DInt(table.PrimaryIndex.ID)), 1693 tree.NewDString(table.PrimaryIndex.Name), 1694 primary, 1695 tree.MakeDBool(tree.DBool(table.PrimaryIndex.Unique)), 1696 tree.MakeDBool(table.PrimaryIndex.Type == sqlbase.IndexDescriptor_INVERTED), 1697 ) 1698 if err := pusher.pushRow(row...); err != nil { 1699 return err 1700 } 1701 for _, idx := range table.Indexes { 1702 row = row[:0] 1703 row = append(row, 1704 tableID, 1705 tableName, 1706 tree.NewDInt(tree.DInt(idx.ID)), 1707 tree.NewDString(idx.Name), 1708 secondary, 1709 tree.MakeDBool(tree.DBool(idx.Unique)), 1710 tree.MakeDBool(idx.Type == sqlbase.IndexDescriptor_INVERTED), 1711 ) 1712 if err := pusher.pushRow(row...); err != nil { 1713 return err 1714 } 1715 } 1716 return nil 1717 }, 1718 ) 1719 } 1720 next, cleanup := setupGenerator(ctx, worker) 1721 return next, cleanup, nil 1722 }, 1723 } 1724 1725 // crdbInternalIndexColumnsTable exposes the index columns. 1726 // 1727 // TODO(tbg): prefix with kv_. 1728 var crdbInternalIndexColumnsTable = virtualSchemaTable{ 1729 comment: "index columns for all indexes accessible by current user in current database (KV scan)", 1730 schema: ` 1731 CREATE TABLE crdb_internal.index_columns ( 1732 descriptor_id INT, 1733 descriptor_name STRING NOT NULL, 1734 index_id INT NOT NULL, 1735 index_name STRING NOT NULL, 1736 column_type STRING NOT NULL, 1737 column_id INT NOT NULL, 1738 column_name STRING, 1739 column_direction STRING 1740 ) 1741 `, 1742 populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1743 key := tree.NewDString("key") 1744 storing := tree.NewDString("storing") 1745 extra := tree.NewDString("extra") 1746 composite := tree.NewDString("composite") 1747 idxDirMap := map[sqlbase.IndexDescriptor_Direction]tree.Datum{ 1748 sqlbase.IndexDescriptor_ASC: tree.NewDString(sqlbase.IndexDescriptor_ASC.String()), 1749 sqlbase.IndexDescriptor_DESC: tree.NewDString(sqlbase.IndexDescriptor_DESC.String()), 1750 } 1751 1752 return forEachTableDescAll(ctx, p, dbContext, hideVirtual, 1753 func(parent *DatabaseDescriptor, _ string, table *TableDescriptor) error { 1754 tableID := tree.NewDInt(tree.DInt(table.ID)) 1755 parentName := parent.Name 1756 tableName := tree.NewDString(table.Name) 1757 1758 reportIndex := func(idx *sqlbase.IndexDescriptor) error { 1759 idxID := tree.NewDInt(tree.DInt(idx.ID)) 1760 idxName := tree.NewDString(idx.Name) 1761 1762 // Report the main (key) columns. 1763 for i, c := range idx.ColumnIDs { 1764 colName := tree.DNull 1765 colDir := tree.DNull 1766 if i >= len(idx.ColumnNames) { 1767 // We log an error here, instead of reporting an error 1768 // to the user, because we really want to see the 1769 // erroneous data in the virtual table. 1770 log.Errorf(ctx, "index descriptor for [%d@%d] (%s.%s@%s) has more key column IDs (%d) than names (%d) (corrupted schema?)", 1771 table.ID, idx.ID, parentName, table.Name, idx.Name, 1772 len(idx.ColumnIDs), len(idx.ColumnNames)) 1773 } else { 1774 colName = tree.NewDString(idx.ColumnNames[i]) 1775 } 1776 if i >= len(idx.ColumnDirections) { 1777 // See comment above. 1778 log.Errorf(ctx, "index descriptor for [%d@%d] (%s.%s@%s) has more key column IDs (%d) than directions (%d) (corrupted schema?)", 1779 table.ID, idx.ID, parentName, table.Name, idx.Name, 1780 len(idx.ColumnIDs), len(idx.ColumnDirections)) 1781 } else { 1782 colDir = idxDirMap[idx.ColumnDirections[i]] 1783 } 1784 1785 if err := addRow( 1786 tableID, tableName, idxID, idxName, 1787 key, tree.NewDInt(tree.DInt(c)), colName, colDir, 1788 ); err != nil { 1789 return err 1790 } 1791 } 1792 1793 // Report the stored columns. 1794 for _, c := range idx.StoreColumnIDs { 1795 if err := addRow( 1796 tableID, tableName, idxID, idxName, 1797 storing, tree.NewDInt(tree.DInt(c)), tree.DNull, tree.DNull, 1798 ); err != nil { 1799 return err 1800 } 1801 } 1802 1803 // Report the extra columns. 1804 for _, c := range idx.ExtraColumnIDs { 1805 if err := addRow( 1806 tableID, tableName, idxID, idxName, 1807 extra, tree.NewDInt(tree.DInt(c)), tree.DNull, tree.DNull, 1808 ); err != nil { 1809 return err 1810 } 1811 } 1812 1813 // Report the composite columns 1814 for _, c := range idx.CompositeColumnIDs { 1815 if err := addRow( 1816 tableID, tableName, idxID, idxName, 1817 composite, tree.NewDInt(tree.DInt(c)), tree.DNull, tree.DNull, 1818 ); err != nil { 1819 return err 1820 } 1821 } 1822 1823 return nil 1824 } 1825 1826 if err := reportIndex(&table.PrimaryIndex); err != nil { 1827 return err 1828 } 1829 for i := range table.Indexes { 1830 if err := reportIndex(&table.Indexes[i]); err != nil { 1831 return err 1832 } 1833 } 1834 return nil 1835 }) 1836 }, 1837 } 1838 1839 // crdbInternalBackwardDependenciesTable exposes the backward 1840 // inter-descriptor dependencies. 1841 // 1842 // TODO(tbg): prefix with kv_. 1843 var crdbInternalBackwardDependenciesTable = virtualSchemaTable{ 1844 comment: "backward inter-descriptor dependencies starting from tables accessible by current user in current database (KV scan)", 1845 schema: ` 1846 CREATE TABLE crdb_internal.backward_dependencies ( 1847 descriptor_id INT, 1848 descriptor_name STRING NOT NULL, 1849 index_id INT, 1850 column_id INT, 1851 dependson_id INT NOT NULL, 1852 dependson_type STRING NOT NULL, 1853 dependson_index_id INT, 1854 dependson_name STRING, 1855 dependson_details STRING 1856 ) 1857 `, 1858 populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1859 fkDep := tree.NewDString("fk") 1860 viewDep := tree.NewDString("view") 1861 sequenceDep := tree.NewDString("sequence") 1862 interleaveDep := tree.NewDString("interleave") 1863 return forEachTableDescAllWithTableLookup(ctx, p, dbContext, hideVirtual, 1864 /* virtual tables have no backward/forward dependencies*/ 1865 func(db *DatabaseDescriptor, _ string, table *TableDescriptor, tableLookup tableLookupFn) error { 1866 tableID := tree.NewDInt(tree.DInt(table.ID)) 1867 tableName := tree.NewDString(table.Name) 1868 1869 reportIdxDeps := func(idx *sqlbase.IndexDescriptor) error { 1870 for _, interleaveParent := range idx.Interleave.Ancestors { 1871 if err := addRow( 1872 tableID, tableName, 1873 tree.NewDInt(tree.DInt(idx.ID)), 1874 tree.DNull, 1875 tree.NewDInt(tree.DInt(interleaveParent.TableID)), 1876 interleaveDep, 1877 tree.NewDInt(tree.DInt(interleaveParent.IndexID)), 1878 tree.DNull, 1879 tree.NewDString(fmt.Sprintf("SharedPrefixLen: %d", 1880 interleaveParent.SharedPrefixLen)), 1881 ); err != nil { 1882 return err 1883 } 1884 } 1885 return nil 1886 } 1887 1888 for i := range table.OutboundFKs { 1889 fk := &table.OutboundFKs[i] 1890 refTbl, err := tableLookup.getTableByID(fk.ReferencedTableID) 1891 if err != nil { 1892 return err 1893 } 1894 refIdx, err := sqlbase.FindFKReferencedIndex(refTbl, fk.ReferencedColumnIDs) 1895 if err != nil { 1896 return err 1897 } 1898 if err := addRow( 1899 tableID, tableName, 1900 tree.DNull, 1901 tree.DNull, 1902 tree.NewDInt(tree.DInt(fk.ReferencedTableID)), 1903 fkDep, 1904 tree.NewDInt(tree.DInt(refIdx.ID)), 1905 tree.NewDString(fk.Name), 1906 tree.DNull, 1907 ); err != nil { 1908 return err 1909 } 1910 } 1911 1912 // Record the backward references of the primary index. 1913 if err := reportIdxDeps(&table.PrimaryIndex); err != nil { 1914 return err 1915 } 1916 1917 // Record the backward references of secondary indexes. 1918 for i := range table.Indexes { 1919 if err := reportIdxDeps(&table.Indexes[i]); err != nil { 1920 return err 1921 } 1922 } 1923 1924 // Record the view dependencies. 1925 for _, tIdx := range table.DependsOn { 1926 if err := addRow( 1927 tableID, tableName, 1928 tree.DNull, 1929 tree.DNull, 1930 tree.NewDInt(tree.DInt(tIdx)), 1931 viewDep, 1932 tree.DNull, 1933 tree.DNull, 1934 tree.DNull, 1935 ); err != nil { 1936 return err 1937 } 1938 } 1939 1940 // Record sequence dependencies. 1941 for i := range table.Columns { 1942 col := &table.Columns[i] 1943 for _, sequenceID := range col.UsesSequenceIds { 1944 if err := addRow( 1945 tableID, tableName, 1946 tree.DNull, 1947 tree.NewDInt(tree.DInt(col.ID)), 1948 tree.NewDInt(tree.DInt(sequenceID)), 1949 sequenceDep, 1950 tree.DNull, 1951 tree.DNull, 1952 tree.DNull, 1953 ); err != nil { 1954 return err 1955 } 1956 } 1957 } 1958 return nil 1959 }) 1960 }, 1961 } 1962 1963 // crdbInternalFeatureUsage exposes the telemetry counters. 1964 var crdbInternalFeatureUsage = virtualSchemaTable{ 1965 comment: "telemetry counters (RAM; local node only)", 1966 schema: ` 1967 CREATE TABLE crdb_internal.feature_usage ( 1968 feature_name STRING NOT NULL, 1969 usage_count INT NOT NULL 1970 ) 1971 `, 1972 populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 1973 for feature, count := range telemetry.GetFeatureCounts(telemetry.Raw, telemetry.ReadOnly) { 1974 if count == 0 { 1975 // Skip over empty counters to avoid polluting the output. 1976 continue 1977 } 1978 if err := addRow( 1979 tree.NewDString(feature), 1980 tree.NewDInt(tree.DInt(int64(count))), 1981 ); err != nil { 1982 return err 1983 } 1984 } 1985 return nil 1986 }, 1987 } 1988 1989 // crdbInternalForwardDependenciesTable exposes the forward 1990 // inter-descriptor dependencies. 1991 // 1992 // TODO(tbg): prefix with kv_. 1993 var crdbInternalForwardDependenciesTable = virtualSchemaTable{ 1994 comment: "forward inter-descriptor dependencies starting from tables accessible by current user in current database (KV scan)", 1995 schema: ` 1996 CREATE TABLE crdb_internal.forward_dependencies ( 1997 descriptor_id INT, 1998 descriptor_name STRING NOT NULL, 1999 index_id INT, 2000 dependedonby_id INT NOT NULL, 2001 dependedonby_type STRING NOT NULL, 2002 dependedonby_index_id INT, 2003 dependedonby_name STRING, 2004 dependedonby_details STRING 2005 ) 2006 `, 2007 populate: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 2008 fkDep := tree.NewDString("fk") 2009 viewDep := tree.NewDString("view") 2010 interleaveDep := tree.NewDString("interleave") 2011 sequenceDep := tree.NewDString("sequence") 2012 return forEachTableDescAll(ctx, p, dbContext, hideVirtual, /* virtual tables have no backward/forward dependencies*/ 2013 func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { 2014 tableID := tree.NewDInt(tree.DInt(table.ID)) 2015 tableName := tree.NewDString(table.Name) 2016 2017 reportIdxDeps := func(idx *sqlbase.IndexDescriptor) error { 2018 for _, interleaveRef := range idx.InterleavedBy { 2019 if err := addRow( 2020 tableID, tableName, 2021 tree.NewDInt(tree.DInt(idx.ID)), 2022 tree.NewDInt(tree.DInt(interleaveRef.Table)), 2023 interleaveDep, 2024 tree.NewDInt(tree.DInt(interleaveRef.Index)), 2025 tree.DNull, 2026 tree.NewDString(fmt.Sprintf("SharedPrefixLen: %d", 2027 interleaveRef.SharedPrefixLen)), 2028 ); err != nil { 2029 return err 2030 } 2031 } 2032 return nil 2033 } 2034 2035 for i := range table.InboundFKs { 2036 fk := &table.InboundFKs[i] 2037 if err := addRow( 2038 tableID, tableName, 2039 tree.DNull, 2040 tree.NewDInt(tree.DInt(fk.OriginTableID)), 2041 fkDep, 2042 tree.DNull, 2043 tree.DNull, 2044 tree.DNull, 2045 ); err != nil { 2046 return err 2047 } 2048 } 2049 2050 // Record the backward references of the primary index. 2051 if err := reportIdxDeps(&table.PrimaryIndex); err != nil { 2052 return err 2053 } 2054 2055 // Record the backward references of secondary indexes. 2056 for i := range table.Indexes { 2057 if err := reportIdxDeps(&table.Indexes[i]); err != nil { 2058 return err 2059 } 2060 } 2061 2062 if table.IsTable() || table.IsView() { 2063 // Record the view dependencies. 2064 for _, dep := range table.DependedOnBy { 2065 if err := addRow( 2066 tableID, tableName, 2067 tree.DNull, 2068 tree.NewDInt(tree.DInt(dep.ID)), 2069 viewDep, 2070 tree.NewDInt(tree.DInt(dep.IndexID)), 2071 tree.DNull, 2072 tree.NewDString(fmt.Sprintf("Columns: %v", dep.ColumnIDs)), 2073 ); err != nil { 2074 return err 2075 } 2076 } 2077 } else if table.IsSequence() { 2078 // Record the sequence dependencies. 2079 for _, dep := range table.DependedOnBy { 2080 if err := addRow( 2081 tableID, tableName, 2082 tree.DNull, 2083 tree.NewDInt(tree.DInt(dep.ID)), 2084 sequenceDep, 2085 tree.NewDInt(tree.DInt(dep.IndexID)), 2086 tree.DNull, 2087 tree.NewDString(fmt.Sprintf("Columns: %v", dep.ColumnIDs)), 2088 ); err != nil { 2089 return err 2090 } 2091 } 2092 } 2093 return nil 2094 }) 2095 }, 2096 } 2097 2098 // crdbInternalRangesView exposes system ranges. 2099 var crdbInternalRangesView = virtualSchemaView{ 2100 schema: ` 2101 CREATE VIEW crdb_internal.ranges AS SELECT 2102 range_id, 2103 start_key, 2104 start_pretty, 2105 end_key, 2106 end_pretty, 2107 database_name, 2108 table_name, 2109 index_name, 2110 replicas, 2111 replica_localities, 2112 learner_replicas, 2113 split_enforced_until, 2114 crdb_internal.lease_holder(start_key) AS lease_holder, 2115 (crdb_internal.range_stats(start_key)->>'key_bytes')::INT + 2116 (crdb_internal.range_stats(start_key)->>'val_bytes')::INT AS range_size 2117 FROM crdb_internal.ranges_no_leases 2118 `, 2119 resultColumns: sqlbase.ResultColumns{ 2120 {Name: "range_id", Typ: types.Int}, 2121 {Name: "start_key", Typ: types.Bytes}, 2122 {Name: "start_pretty", Typ: types.String}, 2123 {Name: "end_key", Typ: types.Bytes}, 2124 {Name: "end_pretty", Typ: types.String}, 2125 {Name: "database_name", Typ: types.String}, 2126 {Name: "table_name", Typ: types.String}, 2127 {Name: "index_name", Typ: types.String}, 2128 {Name: "replicas", Typ: types.Int2Vector}, 2129 {Name: "replica_localities", Typ: types.StringArray}, 2130 {Name: "learner_replicas", Typ: types.Int2Vector}, 2131 {Name: "split_enforced_until", Typ: types.Timestamp}, 2132 {Name: "lease_holder", Typ: types.Int}, 2133 {Name: "range_size", Typ: types.Int}, 2134 }, 2135 } 2136 2137 // crdbInternalRangesNoLeasesTable exposes all ranges in the system without the 2138 // `lease_holder` information. 2139 // 2140 // TODO(tbg): prefix with kv_. 2141 var crdbInternalRangesNoLeasesTable = virtualSchemaTable{ 2142 comment: `range metadata without leaseholder details (KV join; expensive!)`, 2143 schema: ` 2144 CREATE TABLE crdb_internal.ranges_no_leases ( 2145 range_id INT NOT NULL, 2146 start_key BYTES NOT NULL, 2147 start_pretty STRING NOT NULL, 2148 end_key BYTES NOT NULL, 2149 end_pretty STRING NOT NULL, 2150 database_name STRING NOT NULL, 2151 table_name STRING NOT NULL, 2152 index_name STRING NOT NULL, 2153 replicas INT[] NOT NULL, 2154 replica_localities STRING[] NOT NULL, 2155 learner_replicas INT[] NOT NULL, 2156 split_enforced_until TIMESTAMP 2157 ) 2158 `, 2159 generator: func(ctx context.Context, p *planner, _ *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { 2160 if err := p.RequireAdminRole(ctx, "read crdb_internal.ranges_no_leases"); err != nil { 2161 return nil, nil, err 2162 } 2163 descs, err := p.Tables().GetAllDescriptors(ctx, p.txn) 2164 if err != nil { 2165 return nil, nil, err 2166 } 2167 // TODO(knz): maybe this could use internalLookupCtx. 2168 dbNames := make(map[uint32]string) 2169 tableNames := make(map[uint32]string) 2170 indexNames := make(map[uint32]map[uint32]string) 2171 parents := make(map[uint32]uint32) 2172 for _, desc := range descs { 2173 id := uint32(desc.GetID()) 2174 switch desc := desc.(type) { 2175 case *sqlbase.TableDescriptor: 2176 parents[id] = uint32(desc.ParentID) 2177 tableNames[id] = desc.GetName() 2178 indexNames[id] = make(map[uint32]string) 2179 for _, idx := range desc.Indexes { 2180 indexNames[id][uint32(idx.ID)] = idx.Name 2181 } 2182 case *sqlbase.DatabaseDescriptor: 2183 dbNames[id] = desc.GetName() 2184 } 2185 } 2186 ranges, err := ScanMetaKVs(ctx, p.txn, roachpb.Span{ 2187 Key: keys.MinKey, 2188 EndKey: keys.MaxKey, 2189 }) 2190 if err != nil { 2191 return nil, nil, err 2192 } 2193 2194 // Map node descriptors to localities 2195 descriptors, err := getAllNodeDescriptors(p) 2196 if err != nil { 2197 return nil, nil, err 2198 } 2199 nodeIDToLocality := make(map[roachpb.NodeID]roachpb.Locality) 2200 for _, desc := range descriptors { 2201 nodeIDToLocality[desc.NodeID] = desc.Locality 2202 } 2203 2204 var desc roachpb.RangeDescriptor 2205 2206 i := 0 2207 2208 return func() (tree.Datums, error) { 2209 if i >= len(ranges) { 2210 return nil, nil 2211 } 2212 2213 r := ranges[i] 2214 i++ 2215 2216 if err := r.ValueProto(&desc); err != nil { 2217 return nil, err 2218 } 2219 2220 voterReplicas := append([]roachpb.ReplicaDescriptor(nil), desc.Replicas().Voters()...) 2221 var learnerReplicaStoreIDs []int 2222 for _, rd := range desc.Replicas().Learners() { 2223 learnerReplicaStoreIDs = append(learnerReplicaStoreIDs, int(rd.StoreID)) 2224 } 2225 sort.Slice(voterReplicas, func(i, j int) bool { 2226 return voterReplicas[i].StoreID < voterReplicas[j].StoreID 2227 }) 2228 sort.Ints(learnerReplicaStoreIDs) 2229 votersArr := tree.NewDArray(types.Int) 2230 for _, replica := range voterReplicas { 2231 if err := votersArr.Append(tree.NewDInt(tree.DInt(replica.StoreID))); err != nil { 2232 return nil, err 2233 } 2234 } 2235 learnersArr := tree.NewDArray(types.Int) 2236 for _, replica := range learnerReplicaStoreIDs { 2237 if err := learnersArr.Append(tree.NewDInt(tree.DInt(replica))); err != nil { 2238 return nil, err 2239 } 2240 } 2241 2242 replicaLocalityArr := tree.NewDArray(types.String) 2243 for _, replica := range voterReplicas { 2244 replicaLocality := nodeIDToLocality[replica.NodeID].String() 2245 if err := replicaLocalityArr.Append(tree.NewDString(replicaLocality)); err != nil { 2246 return nil, err 2247 } 2248 } 2249 2250 var dbName, tableName, indexName string 2251 if _, tableID, err := p.ExecCfg().Codec.DecodeTablePrefix(desc.StartKey.AsRawKey()); err == nil { 2252 parent := parents[tableID] 2253 if parent != 0 { 2254 tableName = tableNames[tableID] 2255 dbName = dbNames[parent] 2256 if _, _, idxID, err := p.ExecCfg().Codec.DecodeIndexPrefix(desc.StartKey.AsRawKey()); err == nil { 2257 indexName = indexNames[tableID][idxID] 2258 } 2259 } else { 2260 dbName = dbNames[tableID] 2261 } 2262 } 2263 2264 splitEnforcedUntil := tree.DNull 2265 if (desc.GetStickyBit() != hlc.Timestamp{}) { 2266 splitEnforcedUntil = tree.TimestampToInexactDTimestamp(*desc.StickyBit) 2267 } 2268 2269 return tree.Datums{ 2270 tree.NewDInt(tree.DInt(desc.RangeID)), 2271 tree.NewDBytes(tree.DBytes(desc.StartKey)), 2272 tree.NewDString(keys.PrettyPrint(nil /* valDirs */, desc.StartKey.AsRawKey())), 2273 tree.NewDBytes(tree.DBytes(desc.EndKey)), 2274 tree.NewDString(keys.PrettyPrint(nil /* valDirs */, desc.EndKey.AsRawKey())), 2275 tree.NewDString(dbName), 2276 tree.NewDString(tableName), 2277 tree.NewDString(indexName), 2278 votersArr, 2279 replicaLocalityArr, 2280 learnersArr, 2281 splitEnforcedUntil, 2282 }, nil 2283 }, nil, nil 2284 }, 2285 } 2286 2287 // NamespaceKey represents a key from the namespace table. 2288 type NamespaceKey struct { 2289 ParentID sqlbase.ID 2290 // ParentSchemaID is not populated for rows under system.deprecated_namespace. 2291 // This table will no longer exist on 20.2 or later. 2292 ParentSchemaID sqlbase.ID 2293 Name string 2294 } 2295 2296 // getAllNames returns a map from ID to namespaceKey for every entry in 2297 // system.namespace. 2298 func (p *planner) getAllNames(ctx context.Context) (map[sqlbase.ID]NamespaceKey, error) { 2299 return getAllNames(ctx, p.txn, p.ExtendedEvalContext().ExecCfg.InternalExecutor) 2300 } 2301 2302 // TestingGetAllNames is a wrapper for getAllNames. 2303 func TestingGetAllNames( 2304 ctx context.Context, txn *kv.Txn, executor *InternalExecutor, 2305 ) (map[sqlbase.ID]NamespaceKey, error) { 2306 return getAllNames(ctx, txn, executor) 2307 } 2308 2309 // getAllNames is the testable implementation of getAllNames. 2310 // It is public so that it can be tested outside the sql package. 2311 func getAllNames( 2312 ctx context.Context, txn *kv.Txn, executor *InternalExecutor, 2313 ) (map[sqlbase.ID]NamespaceKey, error) { 2314 namespace := map[sqlbase.ID]NamespaceKey{} 2315 if executor.s.cfg.Settings.Version.IsActive(ctx, clusterversion.VersionNamespaceTableWithSchemas) { 2316 rows, err := executor.Query( 2317 ctx, "get-all-names", txn, 2318 `SELECT id, "parentID", "parentSchemaID", name FROM system.namespace`, 2319 ) 2320 if err != nil { 2321 return nil, err 2322 } 2323 for _, r := range rows { 2324 id, parentID, parentSchemaID, name := tree.MustBeDInt(r[0]), tree.MustBeDInt(r[1]), tree.MustBeDInt(r[2]), tree.MustBeDString(r[3]) 2325 namespace[sqlbase.ID(id)] = NamespaceKey{ 2326 ParentID: sqlbase.ID(parentID), 2327 ParentSchemaID: sqlbase.ID(parentSchemaID), 2328 Name: string(name), 2329 } 2330 } 2331 } 2332 2333 // Also get all rows from namespace_deprecated, and add to the namespace map 2334 // if it is not already there yet. 2335 // If a row exists in both here and namespace, only use the one from namespace. 2336 // TODO(sqlexec): In 20.2, this can be removed. 2337 deprecatedRows, err := executor.Query( 2338 ctx, "get-all-names-deprecated-namespace", txn, 2339 fmt.Sprintf(`SELECT id, "parentID", name FROM [%d as namespace]`, keys.DeprecatedNamespaceTableID), 2340 ) 2341 if err != nil { 2342 return nil, err 2343 } 2344 for _, r := range deprecatedRows { 2345 id, parentID, name := tree.MustBeDInt(r[0]), tree.MustBeDInt(r[1]), tree.MustBeDString(r[2]) 2346 if _, ok := namespace[sqlbase.ID(id)]; !ok { 2347 namespace[sqlbase.ID(id)] = NamespaceKey{ 2348 ParentID: sqlbase.ID(parentID), 2349 Name: string(name), 2350 } 2351 } 2352 } 2353 2354 return namespace, nil 2355 } 2356 2357 // crdbInternalZonesTable decodes and exposes the zone configs in the 2358 // system.zones table. 2359 // 2360 // TODO(tbg): prefix with kv_. 2361 var crdbInternalZonesTable = virtualSchemaTable{ 2362 comment: "decoded zone configurations from system.zones (KV scan)", 2363 schema: ` 2364 CREATE TABLE crdb_internal.zones ( 2365 zone_id INT NOT NULL, 2366 subzone_id INT NOT NULL, 2367 target STRING, 2368 range_name STRING, 2369 database_name STRING, 2370 table_name STRING, 2371 index_name STRING, 2372 partition_name STRING, 2373 raw_config_yaml STRING NOT NULL, 2374 raw_config_sql STRING, -- this column can be NULL if there is no specifier syntax 2375 -- possible (e.g. the object was deleted). 2376 raw_config_protobuf BYTES NOT NULL, 2377 full_config_yaml STRING NOT NULL, 2378 full_config_sql STRING 2379 ) 2380 `, 2381 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 2382 namespace, err := p.getAllNames(ctx) 2383 if err != nil { 2384 return err 2385 } 2386 resolveID := func(id uint32) (parentID uint32, name string, err error) { 2387 if entry, ok := namespace[sqlbase.ID(id)]; ok { 2388 return uint32(entry.ParentID), entry.Name, nil 2389 } 2390 return 0, "", errors.AssertionFailedf( 2391 "object with ID %d does not exist", errors.Safe(id)) 2392 } 2393 2394 getKey := func(key roachpb.Key) (*roachpb.Value, error) { 2395 kv, err := p.txn.Get(ctx, key) 2396 if err != nil { 2397 return nil, err 2398 } 2399 return kv.Value, nil 2400 } 2401 2402 rows, err := p.ExtendedEvalContext().ExecCfg.InternalExecutor.Query( 2403 ctx, "crdb-internal-zones-table", p.txn, `SELECT id, config FROM system.zones`) 2404 if err != nil { 2405 return err 2406 } 2407 values := make(tree.Datums, len(showZoneConfigColumns)) 2408 for _, r := range rows { 2409 id := uint32(tree.MustBeDInt(r[0])) 2410 2411 var zoneSpecifier *tree.ZoneSpecifier 2412 zs, err := zonepb.ZoneSpecifierFromID(id, resolveID) 2413 if err != nil { 2414 // We can have valid zoneSpecifiers whose table/database has been 2415 // deleted because zoneSpecifiers are collected asynchronously. 2416 // In this case, just don't show the zoneSpecifier in the 2417 // output of the table. 2418 continue 2419 } else { 2420 zoneSpecifier = &zs 2421 } 2422 2423 configBytes := []byte(*r[1].(*tree.DBytes)) 2424 var configProto zonepb.ZoneConfig 2425 if err := protoutil.Unmarshal(configBytes, &configProto); err != nil { 2426 return err 2427 } 2428 subzones := configProto.Subzones 2429 2430 // Inherit full information about this zone. 2431 fullZone := configProto 2432 if err := completeZoneConfig(&fullZone, uint32(tree.MustBeDInt(r[0])), getKey); err != nil { 2433 return err 2434 } 2435 2436 var table *TableDescriptor 2437 if zs.Database != "" { 2438 database, err := sqlbase.GetDatabaseDescFromID(ctx, p.txn, p.ExecCfg().Codec, sqlbase.ID(id)) 2439 if err != nil { 2440 return err 2441 } 2442 if p.CheckAnyPrivilege(ctx, database) != nil { 2443 continue 2444 } 2445 } else if zoneSpecifier.TableOrIndex.Table.ObjectName != "" { 2446 table, err = sqlbase.GetTableDescFromID(ctx, p.txn, p.ExecCfg().Codec, sqlbase.ID(id)) 2447 if err != nil { 2448 return err 2449 } 2450 if p.CheckAnyPrivilege(ctx, table) != nil { 2451 continue 2452 } 2453 } 2454 2455 // Write down information about the zone in the table. 2456 // TODO (rohany): We would like to just display information about these 2457 // subzone placeholders, but there are a few tests that depend on this 2458 // behavior, so leave it in for now. 2459 if !configProto.IsSubzonePlaceholder() { 2460 // Ensure subzones don't infect the value of the config_proto column. 2461 configProto.Subzones = nil 2462 configProto.SubzoneSpans = nil 2463 2464 if err := generateZoneConfigIntrospectionValues( 2465 values, 2466 r[0], 2467 tree.NewDInt(tree.DInt(0)), 2468 zoneSpecifier, 2469 &configProto, 2470 &fullZone, 2471 ); err != nil { 2472 return err 2473 } 2474 2475 if err := addRow(values...); err != nil { 2476 return err 2477 } 2478 } 2479 2480 if len(subzones) > 0 { 2481 if table == nil { 2482 return errors.AssertionFailedf( 2483 "object id %d with #subzones %d is not a table", 2484 id, 2485 len(subzones), 2486 ) 2487 } 2488 2489 for i, s := range subzones { 2490 index := table.FindActiveIndexByID(sqlbase.IndexID(s.IndexID)) 2491 if index == nil { 2492 // If we can't find an active index that corresponds to this index 2493 // ID then continue, as the index is being dropped, or is already 2494 // dropped and in the GC queue. 2495 continue 2496 } 2497 if zoneSpecifier != nil { 2498 zs := zs 2499 zs.TableOrIndex.Index = tree.UnrestrictedName(index.Name) 2500 zs.Partition = tree.Name(s.PartitionName) 2501 zoneSpecifier = &zs 2502 } 2503 2504 // Generate information about full / inherited constraints. 2505 // There are two cases -- the subzone we are looking at refers 2506 // to an index, or to a partition. 2507 subZoneConfig := s.Config 2508 2509 // In this case, we have an index. Inherit from the parent zone. 2510 if s.PartitionName == "" { 2511 subZoneConfig.InheritFromParent(&fullZone) 2512 } else { 2513 // We have a partition. Get the parent index partition from the zone and 2514 // have it inherit constraints. 2515 if indexSubzone := fullZone.GetSubzone(uint32(index.ID), ""); indexSubzone != nil { 2516 subZoneConfig.InheritFromParent(&indexSubzone.Config) 2517 } 2518 // Inherit remaining fields from the full parent zone. 2519 subZoneConfig.InheritFromParent(&fullZone) 2520 } 2521 2522 if err := generateZoneConfigIntrospectionValues( 2523 values, 2524 r[0], 2525 tree.NewDInt(tree.DInt(i+1)), 2526 zoneSpecifier, 2527 &s.Config, 2528 &subZoneConfig, 2529 ); err != nil { 2530 return err 2531 } 2532 2533 if err := addRow(values...); err != nil { 2534 return err 2535 } 2536 } 2537 } 2538 } 2539 return nil 2540 }, 2541 } 2542 2543 func getAllNodeDescriptors(p *planner) ([]roachpb.NodeDescriptor, error) { 2544 g, err := p.ExecCfg().Gossip.OptionalErr(47899) 2545 if err != nil { 2546 return nil, err 2547 } 2548 var descriptors []roachpb.NodeDescriptor 2549 if err := g.IterateInfos(gossip.KeyNodeIDPrefix, func(key string, i gossip.Info) error { 2550 bytes, err := i.Value.GetBytes() 2551 if err != nil { 2552 return errors.NewAssertionErrorWithWrappedErrf(err, 2553 "failed to extract bytes for key %q", key) 2554 } 2555 2556 var d roachpb.NodeDescriptor 2557 if err := protoutil.Unmarshal(bytes, &d); err != nil { 2558 return errors.NewAssertionErrorWithWrappedErrf(err, 2559 "failed to parse value for key %q", key) 2560 } 2561 2562 // Don't use node descriptors with NodeID 0, because that's meant to 2563 // indicate that the node has been removed from the cluster. 2564 if d.NodeID != 0 { 2565 descriptors = append(descriptors, d) 2566 } 2567 return nil 2568 }); err != nil { 2569 return nil, err 2570 } 2571 return descriptors, nil 2572 } 2573 2574 // crdbInternalGossipNodesTable exposes local information about the cluster nodes. 2575 var crdbInternalGossipNodesTable = virtualSchemaTable{ 2576 comment: "locally known gossiped node details (RAM; local node only)", 2577 schema: ` 2578 CREATE TABLE crdb_internal.gossip_nodes ( 2579 node_id INT NOT NULL, 2580 network STRING NOT NULL, 2581 address STRING NOT NULL, 2582 advertise_address STRING NOT NULL, 2583 sql_network STRING NOT NULL, 2584 sql_address STRING NOT NULL, 2585 advertise_sql_address STRING NOT NULL, 2586 attrs JSON NOT NULL, 2587 locality STRING NOT NULL, 2588 cluster_name STRING NOT NULL, 2589 server_version STRING NOT NULL, 2590 build_tag STRING NOT NULL, 2591 started_at TIMESTAMP NOT NULL, 2592 is_live BOOL NOT NULL, 2593 ranges INT NOT NULL, 2594 leases INT NOT NULL 2595 ) 2596 `, 2597 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 2598 if err := p.RequireAdminRole(ctx, "read crdb_internal.gossip_nodes"); err != nil { 2599 return err 2600 } 2601 2602 g, err := p.ExecCfg().Gossip.OptionalErr(47899) 2603 if err != nil { 2604 return err 2605 } 2606 2607 descriptors, err := getAllNodeDescriptors(p) 2608 if err != nil { 2609 return err 2610 } 2611 2612 alive := make(map[roachpb.NodeID]tree.DBool) 2613 for _, d := range descriptors { 2614 if _, err := g.GetInfo(gossip.MakeGossipClientsKey(d.NodeID)); err == nil { 2615 alive[d.NodeID] = true 2616 } 2617 } 2618 2619 sort.Slice(descriptors, func(i, j int) bool { 2620 return descriptors[i].NodeID < descriptors[j].NodeID 2621 }) 2622 2623 type nodeStats struct { 2624 ranges int32 2625 leases int32 2626 } 2627 2628 stats := make(map[roachpb.NodeID]nodeStats) 2629 if err := g.IterateInfos(gossip.KeyStorePrefix, func(key string, i gossip.Info) error { 2630 bytes, err := i.Value.GetBytes() 2631 if err != nil { 2632 return errors.NewAssertionErrorWithWrappedErrf(err, 2633 "failed to extract bytes for key %q", key) 2634 } 2635 2636 var desc roachpb.StoreDescriptor 2637 if err := protoutil.Unmarshal(bytes, &desc); err != nil { 2638 return errors.NewAssertionErrorWithWrappedErrf(err, 2639 "failed to parse value for key %q", key) 2640 } 2641 2642 s := stats[desc.Node.NodeID] 2643 s.ranges += desc.Capacity.RangeCount 2644 s.leases += desc.Capacity.LeaseCount 2645 stats[desc.Node.NodeID] = s 2646 return nil 2647 }); err != nil { 2648 return err 2649 } 2650 2651 for _, d := range descriptors { 2652 attrs := json.NewArrayBuilder(len(d.Attrs.Attrs)) 2653 for _, a := range d.Attrs.Attrs { 2654 attrs.Add(json.FromString(a)) 2655 } 2656 2657 listenAddrRPC := d.Address 2658 listenAddrSQL := d.SQLAddress 2659 if listenAddrSQL.IsEmpty() { 2660 // Pre-19.2 node or same address for both. 2661 listenAddrSQL = listenAddrRPC 2662 } 2663 2664 advAddrRPC, err := g.GetNodeIDAddress(d.NodeID) 2665 if err != nil { 2666 return err 2667 } 2668 advAddrSQL, err := g.GetNodeIDSQLAddress(d.NodeID) 2669 if err != nil { 2670 return err 2671 } 2672 2673 startTSDatum, err := tree.MakeDTimestamp(timeutil.Unix(0, d.StartedAt), time.Microsecond) 2674 if err != nil { 2675 return err 2676 } 2677 if err := addRow( 2678 tree.NewDInt(tree.DInt(d.NodeID)), 2679 tree.NewDString(listenAddrRPC.NetworkField), 2680 tree.NewDString(listenAddrRPC.AddressField), 2681 tree.NewDString(advAddrRPC.String()), 2682 tree.NewDString(listenAddrSQL.NetworkField), 2683 tree.NewDString(listenAddrSQL.AddressField), 2684 tree.NewDString(advAddrSQL.String()), 2685 tree.NewDJSON(attrs.Build()), 2686 tree.NewDString(d.Locality.String()), 2687 tree.NewDString(d.ClusterName), 2688 tree.NewDString(d.ServerVersion.String()), 2689 tree.NewDString(d.BuildTag), 2690 startTSDatum, 2691 tree.MakeDBool(alive[d.NodeID]), 2692 tree.NewDInt(tree.DInt(stats[d.NodeID].ranges)), 2693 tree.NewDInt(tree.DInt(stats[d.NodeID].leases)), 2694 ); err != nil { 2695 return err 2696 } 2697 } 2698 return nil 2699 }, 2700 } 2701 2702 // crdbInternalGossipLivenessTable exposes local information about the nodes' 2703 // liveness. The data exposed in this table can be stale/incomplete because 2704 // gossip doesn't provide guarantees around freshness or consistency. 2705 var crdbInternalGossipLivenessTable = virtualSchemaTable{ 2706 comment: "locally known gossiped node liveness (RAM; local node only)", 2707 schema: ` 2708 CREATE TABLE crdb_internal.gossip_liveness ( 2709 node_id INT NOT NULL, 2710 epoch INT NOT NULL, 2711 expiration STRING NOT NULL, 2712 draining BOOL NOT NULL, 2713 decommissioning BOOL NOT NULL, 2714 updated_at TIMESTAMP 2715 ) 2716 `, 2717 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 2718 // ATTENTION: The contents of this table should only access gossip data 2719 // which is highly available. DO NOT CALL functions which require the 2720 // cluster to be healthy, such as StatusServer.Nodes(). 2721 2722 if err := p.RequireAdminRole(ctx, "read crdb_internal.gossip_liveness"); err != nil { 2723 return err 2724 } 2725 2726 g, err := p.ExecCfg().Gossip.OptionalErr(47899) 2727 if err != nil { 2728 return err 2729 } 2730 2731 type nodeInfo struct { 2732 liveness kvserverpb.Liveness 2733 updatedAt int64 2734 } 2735 2736 var nodes []nodeInfo 2737 if err := g.IterateInfos(gossip.KeyNodeLivenessPrefix, func(key string, i gossip.Info) error { 2738 bytes, err := i.Value.GetBytes() 2739 if err != nil { 2740 return errors.NewAssertionErrorWithWrappedErrf(err, 2741 "failed to extract bytes for key %q", key) 2742 } 2743 2744 var l kvserverpb.Liveness 2745 if err := protoutil.Unmarshal(bytes, &l); err != nil { 2746 return errors.NewAssertionErrorWithWrappedErrf(err, 2747 "failed to parse value for key %q", key) 2748 } 2749 nodes = append(nodes, nodeInfo{ 2750 liveness: l, 2751 updatedAt: i.OrigStamp, 2752 }) 2753 return nil 2754 }); err != nil { 2755 return err 2756 } 2757 2758 sort.Slice(nodes, func(i, j int) bool { 2759 return nodes[i].liveness.NodeID < nodes[j].liveness.NodeID 2760 }) 2761 2762 for i := range nodes { 2763 n := &nodes[i] 2764 l := &n.liveness 2765 updatedTSDatum, err := tree.MakeDTimestamp(timeutil.Unix(0, n.updatedAt), time.Microsecond) 2766 if err != nil { 2767 return err 2768 } 2769 if err := addRow( 2770 tree.NewDInt(tree.DInt(l.NodeID)), 2771 tree.NewDInt(tree.DInt(l.Epoch)), 2772 tree.NewDString(l.Expiration.String()), 2773 tree.MakeDBool(tree.DBool(l.Draining)), 2774 tree.MakeDBool(tree.DBool(l.Decommissioning)), 2775 updatedTSDatum, 2776 ); err != nil { 2777 return err 2778 } 2779 } 2780 return nil 2781 }, 2782 } 2783 2784 // crdbInternalGossipAlertsTable exposes current health alerts in the cluster. 2785 var crdbInternalGossipAlertsTable = virtualSchemaTable{ 2786 comment: "locally known gossiped health alerts (RAM; local node only)", 2787 schema: ` 2788 CREATE TABLE crdb_internal.gossip_alerts ( 2789 node_id INT NOT NULL, 2790 store_id INT NULL, -- null for alerts not associated to a store 2791 category STRING NOT NULL, -- type of alert, usually by subsystem 2792 description STRING NOT NULL, -- name of the alert (depends on subsystem) 2793 value FLOAT NOT NULL -- value of the alert (depends on subsystem, can be NaN) 2794 ) 2795 `, 2796 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 2797 if err := p.RequireAdminRole(ctx, "read crdb_internal.gossip_alerts"); err != nil { 2798 return err 2799 } 2800 2801 g, err := p.ExecCfg().Gossip.OptionalErr(47899) 2802 if err != nil { 2803 return err 2804 } 2805 2806 type resultWithNodeID struct { 2807 roachpb.NodeID 2808 statuspb.HealthCheckResult 2809 } 2810 var results []resultWithNodeID 2811 if err := g.IterateInfos(gossip.KeyNodeHealthAlertPrefix, func(key string, i gossip.Info) error { 2812 bytes, err := i.Value.GetBytes() 2813 if err != nil { 2814 return errors.NewAssertionErrorWithWrappedErrf(err, 2815 "failed to extract bytes for key %q", key) 2816 } 2817 2818 var d statuspb.HealthCheckResult 2819 if err := protoutil.Unmarshal(bytes, &d); err != nil { 2820 return errors.NewAssertionErrorWithWrappedErrf(err, 2821 "failed to parse value for key %q", key) 2822 } 2823 nodeID, err := gossip.NodeIDFromKey(key, gossip.KeyNodeHealthAlertPrefix) 2824 if err != nil { 2825 return errors.NewAssertionErrorWithWrappedErrf(err, 2826 "failed to parse node ID from key %q", key) 2827 } 2828 results = append(results, resultWithNodeID{nodeID, d}) 2829 return nil 2830 }); err != nil { 2831 return err 2832 } 2833 2834 for _, result := range results { 2835 for _, alert := range result.Alerts { 2836 storeID := tree.DNull 2837 if alert.StoreID != 0 { 2838 storeID = tree.NewDInt(tree.DInt(alert.StoreID)) 2839 } 2840 if err := addRow( 2841 tree.NewDInt(tree.DInt(result.NodeID)), 2842 storeID, 2843 tree.NewDString(strings.ToLower(alert.Category.String())), 2844 tree.NewDString(alert.Description), 2845 tree.NewDFloat(tree.DFloat(alert.Value)), 2846 ); err != nil { 2847 return err 2848 } 2849 } 2850 } 2851 return nil 2852 }, 2853 } 2854 2855 // crdbInternalGossipNetwork exposes the local view of the gossip network (i.e 2856 // the gossip client connections from source_id node to target_id node). 2857 var crdbInternalGossipNetworkTable = virtualSchemaTable{ 2858 comment: "locally known edges in the gossip network (RAM; local node only)", 2859 schema: ` 2860 CREATE TABLE crdb_internal.gossip_network ( 2861 source_id INT NOT NULL, -- source node of a gossip connection 2862 target_id INT NOT NULL -- target node of a gossip connection 2863 ) 2864 `, 2865 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 2866 if err := p.RequireAdminRole(ctx, "read crdb_internal.gossip_network"); err != nil { 2867 return err 2868 } 2869 2870 g, err := p.ExecCfg().Gossip.OptionalErr(47899) 2871 if err != nil { 2872 return err 2873 } 2874 2875 c := g.Connectivity() 2876 for _, conn := range c.ClientConns { 2877 if err := addRow( 2878 tree.NewDInt(tree.DInt(conn.SourceID)), 2879 tree.NewDInt(tree.DInt(conn.TargetID)), 2880 ); err != nil { 2881 return err 2882 } 2883 } 2884 return nil 2885 }, 2886 } 2887 2888 // addPartitioningRows adds the rows in crdb_internal.partitions for each partition. 2889 // None of the arguments can be nil, and it is used recursively when a list partition 2890 // has subpartitions. In that case, the colOffset argument is incremented to represent 2891 // how many columns of the index have been partitioned already. 2892 func addPartitioningRows( 2893 ctx context.Context, 2894 p *planner, 2895 database string, 2896 table *sqlbase.TableDescriptor, 2897 index *sqlbase.IndexDescriptor, 2898 partitioning *sqlbase.PartitioningDescriptor, 2899 parentName tree.Datum, 2900 colOffset int, 2901 addRow func(...tree.Datum) error, 2902 ) error { 2903 tableID := tree.NewDInt(tree.DInt(table.ID)) 2904 indexID := tree.NewDInt(tree.DInt(index.ID)) 2905 numColumns := tree.NewDInt(tree.DInt(partitioning.NumColumns)) 2906 2907 var buf bytes.Buffer 2908 for i := uint32(colOffset); i < uint32(colOffset)+partitioning.NumColumns; i++ { 2909 if i != uint32(colOffset) { 2910 buf.WriteString(`, `) 2911 } 2912 buf.WriteString(index.ColumnNames[i]) 2913 } 2914 colNames := tree.NewDString(buf.String()) 2915 2916 var datumAlloc sqlbase.DatumAlloc 2917 2918 // We don't need real prefixes in the DecodePartitionTuple calls because we 2919 // only use the tree.Datums part of the output. 2920 fakePrefixDatums := make([]tree.Datum, colOffset) 2921 for i := range fakePrefixDatums { 2922 fakePrefixDatums[i] = tree.DNull 2923 } 2924 2925 // This produces the list_value column. 2926 for _, l := range partitioning.List { 2927 var buf bytes.Buffer 2928 for j, values := range l.Values { 2929 if j != 0 { 2930 buf.WriteString(`, `) 2931 } 2932 tuple, _, err := sqlbase.DecodePartitionTuple( 2933 &datumAlloc, p.ExecCfg().Codec, table, index, partitioning, values, fakePrefixDatums, 2934 ) 2935 if err != nil { 2936 return err 2937 } 2938 buf.WriteString(tuple.String()) 2939 } 2940 2941 partitionValue := tree.NewDString(buf.String()) 2942 name := tree.NewDString(l.Name) 2943 2944 // Figure out which zone and subzone this partition should correspond to. 2945 zoneID, zone, subzone, err := GetZoneConfigInTxn( 2946 ctx, p.txn, uint32(table.ID), index, l.Name, false /* getInheritedDefault */) 2947 if err != nil { 2948 return err 2949 } 2950 subzoneID := base.SubzoneID(0) 2951 if subzone != nil { 2952 for i, s := range zone.Subzones { 2953 if s.IndexID == subzone.IndexID && s.PartitionName == subzone.PartitionName { 2954 subzoneID = base.SubzoneIDFromIndex(i) 2955 } 2956 } 2957 } 2958 2959 if err := addRow( 2960 tableID, 2961 indexID, 2962 parentName, 2963 name, 2964 numColumns, 2965 colNames, 2966 partitionValue, 2967 tree.DNull, /* null value for partition range */ 2968 tree.NewDInt(tree.DInt(zoneID)), 2969 tree.NewDInt(tree.DInt(subzoneID)), 2970 ); err != nil { 2971 return err 2972 } 2973 err = addPartitioningRows(ctx, p, database, table, index, &l.Subpartitioning, name, 2974 colOffset+int(partitioning.NumColumns), addRow) 2975 if err != nil { 2976 return err 2977 } 2978 } 2979 2980 // This produces the range_value column. 2981 for _, r := range partitioning.Range { 2982 var buf bytes.Buffer 2983 fromTuple, _, err := sqlbase.DecodePartitionTuple( 2984 &datumAlloc, p.ExecCfg().Codec, table, index, partitioning, r.FromInclusive, fakePrefixDatums, 2985 ) 2986 if err != nil { 2987 return err 2988 } 2989 buf.WriteString(fromTuple.String()) 2990 buf.WriteString(" TO ") 2991 toTuple, _, err := sqlbase.DecodePartitionTuple( 2992 &datumAlloc, p.ExecCfg().Codec, table, index, partitioning, r.ToExclusive, fakePrefixDatums, 2993 ) 2994 if err != nil { 2995 return err 2996 } 2997 buf.WriteString(toTuple.String()) 2998 partitionRange := tree.NewDString(buf.String()) 2999 3000 // Figure out which zone and subzone this partition should correspond to. 3001 zoneID, zone, subzone, err := GetZoneConfigInTxn( 3002 ctx, p.txn, uint32(table.ID), index, r.Name, false /* getInheritedDefault */) 3003 if err != nil { 3004 return err 3005 } 3006 subzoneID := base.SubzoneID(0) 3007 if subzone != nil { 3008 for i, s := range zone.Subzones { 3009 if s.IndexID == subzone.IndexID && s.PartitionName == subzone.PartitionName { 3010 subzoneID = base.SubzoneIDFromIndex(i) 3011 } 3012 } 3013 } 3014 3015 if err := addRow( 3016 tableID, 3017 indexID, 3018 parentName, 3019 tree.NewDString(r.Name), 3020 numColumns, 3021 colNames, 3022 tree.DNull, /* null value for partition list */ 3023 partitionRange, 3024 tree.NewDInt(tree.DInt(zoneID)), 3025 tree.NewDInt(tree.DInt(subzoneID)), 3026 ); err != nil { 3027 return err 3028 } 3029 } 3030 3031 return nil 3032 } 3033 3034 // crdbInternalPartitionsTable decodes and exposes the partitions of each 3035 // table. 3036 // 3037 // TODO(tbg): prefix with cluster_. 3038 var crdbInternalPartitionsTable = virtualSchemaTable{ 3039 comment: "defined partitions for all tables/indexes accessible by the current user in the current database (KV scan)", 3040 schema: ` 3041 CREATE TABLE crdb_internal.partitions ( 3042 table_id INT NOT NULL, 3043 index_id INT NOT NULL, 3044 parent_name STRING, 3045 name STRING NOT NULL, 3046 columns INT NOT NULL, 3047 column_names STRING, 3048 list_value STRING, 3049 range_value STRING, 3050 zone_id INT, -- references a zone id in the crdb_internal.zones table 3051 subzone_id INT -- references a subzone id in the crdb_internal.zones table 3052 ) 3053 `, 3054 generator: func(ctx context.Context, p *planner, dbContext *DatabaseDescriptor) (virtualTableGenerator, cleanupFunc, error) { 3055 dbName := "" 3056 if dbContext != nil { 3057 dbName = dbContext.Name 3058 } 3059 worker := func(pusher rowPusher) error { 3060 return forEachTableDescAll(ctx, p, dbContext, hideVirtual, /* virtual tables have no partitions*/ 3061 func(db *DatabaseDescriptor, _ string, table *TableDescriptor) error { 3062 return table.ForeachNonDropIndex(func(index *sqlbase.IndexDescriptor) error { 3063 return addPartitioningRows(ctx, p, dbName, table, index, &index.Partitioning, 3064 tree.DNull /* parentName */, 0 /* colOffset */, pusher.pushRow) 3065 }) 3066 }) 3067 } 3068 next, cleanup := setupGenerator(ctx, worker) 3069 return next, cleanup, nil 3070 }, 3071 } 3072 3073 // crdbInternalKVNodeStatusTable exposes information from the status server about the cluster nodes. 3074 // 3075 // TODO(tbg): s/kv_/cluster_/ 3076 var crdbInternalKVNodeStatusTable = virtualSchemaTable{ 3077 comment: "node details across the entire cluster (cluster RPC; expensive!)", 3078 schema: ` 3079 CREATE TABLE crdb_internal.kv_node_status ( 3080 node_id INT NOT NULL, 3081 network STRING NOT NULL, 3082 address STRING NOT NULL, 3083 attrs JSON NOT NULL, 3084 locality STRING NOT NULL, 3085 server_version STRING NOT NULL, 3086 go_version STRING NOT NULL, 3087 tag STRING NOT NULL, 3088 time STRING NOT NULL, 3089 revision STRING NOT NULL, 3090 cgo_compiler STRING NOT NULL, 3091 platform STRING NOT NULL, 3092 distribution STRING NOT NULL, 3093 type STRING NOT NULL, 3094 dependencies STRING NOT NULL, 3095 started_at TIMESTAMP NOT NULL, 3096 updated_at TIMESTAMP NOT NULL, 3097 metrics JSON NOT NULL, 3098 args JSON NOT NULL, 3099 env JSON NOT NULL, 3100 activity JSON NOT NULL 3101 ) 3102 `, 3103 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 3104 if err := p.RequireAdminRole(ctx, "read crdb_internal.kv_node_status"); err != nil { 3105 return err 3106 } 3107 ss, err := p.extendedEvalCtx.StatusServer.OptionalErr() 3108 if err != nil { 3109 return err 3110 } 3111 response, err := ss.Nodes(ctx, &serverpb.NodesRequest{}) 3112 if err != nil { 3113 return err 3114 } 3115 3116 for _, n := range response.Nodes { 3117 attrs := json.NewArrayBuilder(len(n.Desc.Attrs.Attrs)) 3118 for _, a := range n.Desc.Attrs.Attrs { 3119 attrs.Add(json.FromString(a)) 3120 } 3121 3122 var dependencies string 3123 if n.BuildInfo.Dependencies == nil { 3124 dependencies = "" 3125 } else { 3126 dependencies = *(n.BuildInfo.Dependencies) 3127 } 3128 3129 metrics := json.NewObjectBuilder(len(n.Metrics)) 3130 for k, v := range n.Metrics { 3131 metric, err := json.FromFloat64(v) 3132 if err != nil { 3133 return err 3134 } 3135 metrics.Add(k, metric) 3136 } 3137 3138 args := json.NewArrayBuilder(len(n.Args)) 3139 for _, a := range n.Args { 3140 args.Add(json.FromString(a)) 3141 } 3142 3143 env := json.NewArrayBuilder(len(n.Env)) 3144 for _, v := range n.Env { 3145 env.Add(json.FromString(v)) 3146 } 3147 3148 activity := json.NewObjectBuilder(len(n.Activity)) 3149 for nodeID, values := range n.Activity { 3150 b := json.NewObjectBuilder(3) 3151 b.Add("incoming", json.FromInt64(values.Incoming)) 3152 b.Add("outgoing", json.FromInt64(values.Outgoing)) 3153 b.Add("latency", json.FromInt64(values.Latency)) 3154 activity.Add(nodeID.String(), b.Build()) 3155 } 3156 3157 startTSDatum, err := tree.MakeDTimestamp(timeutil.Unix(0, n.StartedAt), time.Microsecond) 3158 if err != nil { 3159 return err 3160 } 3161 endTSDatum, err := tree.MakeDTimestamp(timeutil.Unix(0, n.UpdatedAt), time.Microsecond) 3162 if err != nil { 3163 return err 3164 } 3165 if err := addRow( 3166 tree.NewDInt(tree.DInt(n.Desc.NodeID)), 3167 tree.NewDString(n.Desc.Address.NetworkField), 3168 tree.NewDString(n.Desc.Address.AddressField), 3169 tree.NewDJSON(attrs.Build()), 3170 tree.NewDString(n.Desc.Locality.String()), 3171 tree.NewDString(n.Desc.ServerVersion.String()), 3172 tree.NewDString(n.BuildInfo.GoVersion), 3173 tree.NewDString(n.BuildInfo.Tag), 3174 tree.NewDString(n.BuildInfo.Time), 3175 tree.NewDString(n.BuildInfo.Revision), 3176 tree.NewDString(n.BuildInfo.CgoCompiler), 3177 tree.NewDString(n.BuildInfo.Platform), 3178 tree.NewDString(n.BuildInfo.Distribution), 3179 tree.NewDString(n.BuildInfo.Type), 3180 tree.NewDString(dependencies), 3181 startTSDatum, 3182 endTSDatum, 3183 tree.NewDJSON(metrics.Build()), 3184 tree.NewDJSON(args.Build()), 3185 tree.NewDJSON(env.Build()), 3186 tree.NewDJSON(activity.Build()), 3187 ); err != nil { 3188 return err 3189 } 3190 } 3191 return nil 3192 }, 3193 } 3194 3195 // crdbInternalKVStoreStatusTable exposes information about the cluster stores. 3196 // 3197 // TODO(tbg): s/kv_/cluster_/ 3198 var crdbInternalKVStoreStatusTable = virtualSchemaTable{ 3199 comment: "store details and status (cluster RPC; expensive!)", 3200 schema: ` 3201 CREATE TABLE crdb_internal.kv_store_status ( 3202 node_id INT NOT NULL, 3203 store_id INT NOT NULL, 3204 attrs JSON NOT NULL, 3205 capacity INT NOT NULL, 3206 available INT NOT NULL, 3207 used INT NOT NULL, 3208 logical_bytes INT NOT NULL, 3209 range_count INT NOT NULL, 3210 lease_count INT NOT NULL, 3211 writes_per_second FLOAT NOT NULL, 3212 bytes_per_replica JSON NOT NULL, 3213 writes_per_replica JSON NOT NULL, 3214 metrics JSON NOT NULL 3215 ) 3216 `, 3217 populate: func(ctx context.Context, p *planner, _ *DatabaseDescriptor, addRow func(...tree.Datum) error) error { 3218 if err := p.RequireAdminRole(ctx, "read crdb_internal.kv_store_status"); err != nil { 3219 return err 3220 } 3221 ss, err := p.ExecCfg().StatusServer.OptionalErr() 3222 if err != nil { 3223 return err 3224 } 3225 response, err := ss.Nodes(ctx, &serverpb.NodesRequest{}) 3226 if err != nil { 3227 return err 3228 } 3229 3230 for _, n := range response.Nodes { 3231 for _, s := range n.StoreStatuses { 3232 attrs := json.NewArrayBuilder(len(s.Desc.Attrs.Attrs)) 3233 for _, a := range s.Desc.Attrs.Attrs { 3234 attrs.Add(json.FromString(a)) 3235 } 3236 3237 metrics := json.NewObjectBuilder(len(s.Metrics)) 3238 for k, v := range s.Metrics { 3239 metric, err := json.FromFloat64(v) 3240 if err != nil { 3241 return err 3242 } 3243 metrics.Add(k, metric) 3244 } 3245 3246 percentilesToJSON := func(ps roachpb.Percentiles) (json.JSON, error) { 3247 b := json.NewObjectBuilder(5) 3248 v, err := json.FromFloat64(ps.P10) 3249 if err != nil { 3250 return nil, err 3251 } 3252 b.Add("P10", v) 3253 v, err = json.FromFloat64(ps.P25) 3254 if err != nil { 3255 return nil, err 3256 } 3257 b.Add("P25", v) 3258 v, err = json.FromFloat64(ps.P50) 3259 if err != nil { 3260 return nil, err 3261 } 3262 b.Add("P50", v) 3263 v, err = json.FromFloat64(ps.P75) 3264 if err != nil { 3265 return nil, err 3266 } 3267 b.Add("P75", v) 3268 v, err = json.FromFloat64(ps.P90) 3269 if err != nil { 3270 return nil, err 3271 } 3272 b.Add("P90", v) 3273 v, err = json.FromFloat64(ps.PMax) 3274 if err != nil { 3275 return nil, err 3276 } 3277 b.Add("PMax", v) 3278 return b.Build(), nil 3279 } 3280 3281 bytesPerReplica, err := percentilesToJSON(s.Desc.Capacity.BytesPerReplica) 3282 if err != nil { 3283 return err 3284 } 3285 writesPerReplica, err := percentilesToJSON(s.Desc.Capacity.WritesPerReplica) 3286 if err != nil { 3287 return err 3288 } 3289 3290 if err := addRow( 3291 tree.NewDInt(tree.DInt(s.Desc.Node.NodeID)), 3292 tree.NewDInt(tree.DInt(s.Desc.StoreID)), 3293 tree.NewDJSON(attrs.Build()), 3294 tree.NewDInt(tree.DInt(s.Desc.Capacity.Capacity)), 3295 tree.NewDInt(tree.DInt(s.Desc.Capacity.Available)), 3296 tree.NewDInt(tree.DInt(s.Desc.Capacity.Used)), 3297 tree.NewDInt(tree.DInt(s.Desc.Capacity.LogicalBytes)), 3298 tree.NewDInt(tree.DInt(s.Desc.Capacity.RangeCount)), 3299 tree.NewDInt(tree.DInt(s.Desc.Capacity.LeaseCount)), 3300 tree.NewDFloat(tree.DFloat(s.Desc.Capacity.WritesPerSecond)), 3301 tree.NewDJSON(bytesPerReplica), 3302 tree.NewDJSON(writesPerReplica), 3303 tree.NewDJSON(metrics.Build()), 3304 ); err != nil { 3305 return err 3306 } 3307 } 3308 } 3309 return nil 3310 }, 3311 } 3312 3313 // crdbInternalPredefinedComments exposes the predefined 3314 // comments for virtual tables. This is used by SHOW TABLES WITH COMMENT 3315 // as fall-back when system.comments is silent. 3316 // TODO(knz): extend this with vtable column comments. 3317 // 3318 // TODO(tbg): prefix with node_. 3319 var crdbInternalPredefinedCommentsTable = virtualSchemaTable{ 3320 comment: `comments for predefined virtual tables (RAM/static)`, 3321 schema: ` 3322 CREATE TABLE crdb_internal.predefined_comments ( 3323 TYPE INT, 3324 OBJECT_ID INT, 3325 SUB_ID INT, 3326 COMMENT STRING 3327 )`, 3328 populate: func( 3329 ctx context.Context, p *planner, dbContext *DatabaseDescriptor, addRow func(...tree.Datum) error, 3330 ) error { 3331 tableCommentKey := tree.NewDInt(keys.TableCommentType) 3332 vt := p.getVirtualTabler() 3333 vEntries := vt.getEntries() 3334 vSchemaNames := vt.getSchemaNames() 3335 3336 for _, virtSchemaName := range vSchemaNames { 3337 e := vEntries[virtSchemaName] 3338 3339 for _, tName := range e.orderedDefNames { 3340 vTableEntry := e.defs[tName] 3341 table := vTableEntry.desc 3342 3343 if vTableEntry.comment != "" { 3344 if err := addRow( 3345 tableCommentKey, 3346 tree.NewDInt(tree.DInt(table.ID)), 3347 zeroVal, 3348 tree.NewDString(vTableEntry.comment)); err != nil { 3349 return err 3350 } 3351 } 3352 } 3353 } 3354 3355 return nil 3356 }, 3357 }