github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/sql/crdb_internal_test.go (about) 1 // Copyright 2018 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package sql_test 12 13 import ( 14 "context" 15 gosql "database/sql" 16 "fmt" 17 "strings" 18 "testing" 19 "time" 20 21 "github.com/cockroachdb/cockroach/pkg/base" 22 "github.com/cockroachdb/cockroach/pkg/gossip" 23 "github.com/cockroachdb/cockroach/pkg/keys" 24 "github.com/cockroachdb/cockroach/pkg/kv" 25 "github.com/cockroachdb/cockroach/pkg/roachpb" 26 "github.com/cockroachdb/cockroach/pkg/security" 27 "github.com/cockroachdb/cockroach/pkg/server/status/statuspb" 28 "github.com/cockroachdb/cockroach/pkg/sql" 29 "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" 30 "github.com/cockroachdb/cockroach/pkg/sql/parser" 31 "github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode" 32 "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" 33 "github.com/cockroachdb/cockroach/pkg/sql/sqlbase" 34 "github.com/cockroachdb/cockroach/pkg/sql/tests" 35 "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" 36 "github.com/cockroachdb/cockroach/pkg/testutils/sqlutils" 37 "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" 38 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 39 "github.com/cockroachdb/errors" 40 "github.com/jackc/pgx/pgtype" 41 "github.com/lib/pq" 42 "github.com/stretchr/testify/assert" 43 "github.com/stretchr/testify/require" 44 ) 45 46 // TestGetAllNamesInternal tests both namespace and namespace_deprecated 47 // entries. 48 func TestGetAllNamesInternal(t *testing.T) { 49 defer leaktest.AfterTest(t)() 50 51 ctx := context.Background() 52 params, _ := tests.CreateTestServerParams() 53 s, _ /* sqlDB */, kvDB := serverutils.StartServer(t, params) 54 defer s.Stopper().Stop(ctx) 55 56 err := kvDB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { 57 batch := txn.NewBatch() 58 batch.Put(sqlbase.NewTableKey(999, 444, "bob").Key(keys.SystemSQLCodec), 9999) 59 batch.Put(sqlbase.NewDeprecatedTableKey(1000, "alice").Key(keys.SystemSQLCodec), 10000) 60 batch.Put(sqlbase.NewDeprecatedTableKey(999, "overwrite_me_old_value").Key(keys.SystemSQLCodec), 9999) 61 return txn.CommitInBatch(ctx, batch) 62 }) 63 require.NoError(t, err) 64 65 names, err := sql.TestingGetAllNames(ctx, nil, s.InternalExecutor().(*sql.InternalExecutor)) 66 require.NoError(t, err) 67 68 assert.Equal(t, sql.NamespaceKey{ParentID: 999, ParentSchemaID: 444, Name: "bob"}, names[9999]) 69 assert.Equal(t, sql.NamespaceKey{ParentID: 1000, Name: "alice"}, names[10000]) 70 } 71 72 // TestRangeLocalityBasedOnNodeIDs tests that the replica_localities shown in crdb_internal.ranges 73 // are correct reflection of the localities of the stores in the range descriptor which 74 // is in the replicas column 75 func TestRangeLocalityBasedOnNodeIDs(t *testing.T) { 76 defer leaktest.AfterTest(t)() 77 78 ctx := context.Background() 79 80 // NodeID=1, StoreID=1 81 tc := testcluster.StartTestCluster(t, 1, 82 base.TestClusterArgs{ 83 ServerArgs: base.TestServerArgs{ 84 Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "1"}}}, 85 }, 86 ReplicationMode: base.ReplicationAuto, 87 }, 88 ) 89 defer tc.Stopper().Stop(ctx) 90 assert.EqualValues(t, 1, tc.Servers[len(tc.Servers)-1].GetFirstStoreID()) 91 92 // Set to 2 so the the next store id will be 3. 93 assert.NoError(t, tc.Servers[0].DB().Put(ctx, keys.StoreIDGenerator, 2)) 94 95 // NodeID=2, StoreID=3 96 tc.AddServer(t, 97 base.TestServerArgs{ 98 Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "2"}}}, 99 }, 100 ) 101 assert.EqualValues(t, 3, tc.Servers[len(tc.Servers)-1].GetFirstStoreID()) 102 103 // Set to 1 so the next store id will be 2. 104 assert.NoError(t, tc.Servers[0].DB().Put(ctx, keys.StoreIDGenerator, 1)) 105 106 // NodeID=3, StoreID=2 107 tc.AddServer(t, 108 base.TestServerArgs{ 109 Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "3"}}}, 110 }, 111 ) 112 assert.EqualValues(t, 2, tc.Servers[len(tc.Servers)-1].GetFirstStoreID()) 113 assert.NoError(t, tc.WaitForFullReplication()) 114 115 sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) 116 var replicas, localities string 117 sqlDB.QueryRow(t, `select replicas, replica_localities from crdb_internal.ranges limit 1`). 118 Scan(&replicas, &localities) 119 120 assert.Equal(t, "{1,2,3}", replicas) 121 // If range is represented as tuple of node ids then the result will be {node=1,node=2,node=3}. 122 // If range is represented as tuple of store ids then the result will be {node=1,node=3,node=2}. 123 assert.Equal(t, "{node=1,node=3,node=2}", localities) 124 } 125 126 func TestGossipAlertsTable(t *testing.T) { 127 defer leaktest.AfterTest(t)() 128 params, _ := tests.CreateTestServerParams() 129 s, _, _ := serverutils.StartServer(t, params) 130 defer s.Stopper().Stop(context.Background()) 131 ctx := context.Background() 132 133 if err := s.GossipI().(*gossip.Gossip).AddInfoProto(gossip.MakeNodeHealthAlertKey(456), &statuspb.HealthCheckResult{ 134 Alerts: []statuspb.HealthAlert{{ 135 StoreID: 123, 136 Category: statuspb.HealthAlert_METRICS, 137 Description: "foo", 138 Value: 100.0, 139 }}, 140 }, time.Hour); err != nil { 141 t.Fatal(err) 142 } 143 144 ie := s.InternalExecutor().(*sql.InternalExecutor) 145 row, err := ie.QueryRowEx(ctx, "test", nil, /* txn */ 146 sqlbase.InternalExecutorSessionDataOverride{User: security.RootUser}, 147 "SELECT * FROM crdb_internal.gossip_alerts WHERE store_id = 123") 148 if err != nil { 149 t.Fatal(err) 150 } 151 152 if a, e := len(row), 5; a != e { 153 t.Fatalf("got %d rows, wanted %d", a, e) 154 } 155 a := fmt.Sprintf("%v %v %v %v %v", row[0], row[1], row[2], row[3], row[4]) 156 e := "456 123 'metrics' 'foo' 100.0" 157 if a != e { 158 t.Fatalf("got:\n%s\nexpected:\n%s", a, e) 159 } 160 } 161 162 // TestOldBitColumnMetadata checks that a pre-2.1 BIT columns 163 // shows up properly in metadata post-2.1. 164 func TestOldBitColumnMetadata(t *testing.T) { 165 defer leaktest.AfterTest(t)() 166 167 // The descriptor changes made must have an immediate effect 168 // so disable leases on tables. 169 defer lease.TestingDisableTableLeases()() 170 171 ctx := context.Background() 172 params, _ := tests.CreateTestServerParams() 173 s, sqlDB, kvDB := serverutils.StartServer(t, params) 174 defer s.Stopper().Stop(ctx) 175 176 if _, err := sqlDB.Exec(` 177 CREATE DATABASE t; 178 CREATE TABLE t.test (k INT); 179 `); err != nil { 180 t.Fatal(err) 181 } 182 183 // We now want to create a pre-2.1 table descriptor with an 184 // old-style bit column. We're going to edit the table descriptor 185 // manually, without going through SQL. 186 tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") 187 for i := range tableDesc.Columns { 188 if tableDesc.Columns[i].Name == "k" { 189 tableDesc.Columns[i].Type.InternalType.VisibleType = 4 // Pre-2.1 BIT. 190 tableDesc.Columns[i].Type.InternalType.Width = 12 // Arbitrary non-std INT size. 191 break 192 } 193 } 194 // To make this test future-proof we must ensure that there isn't 195 // any logic in an unrelated place which will prevent the table from 196 // being committed. To verify this, we add another column and check 197 // it appears in introspection afterwards. 198 // 199 // We also avoid the regular schema change logic entirely, because 200 // this may be equipped with code to "fix" the old-style BIT column 201 // we defined above. 202 alterCmd, err := parser.ParseOne("ALTER TABLE t ADD COLUMN z INT") 203 if err != nil { 204 t.Fatal(err) 205 } 206 colDef := alterCmd.AST.(*tree.AlterTable).Cmds[0].(*tree.AlterTableAddColumn).ColumnDef 207 col, _, _, err := sqlbase.MakeColumnDefDescs(ctx, colDef, nil, nil) 208 if err != nil { 209 t.Fatal(err) 210 } 211 col.ID = tableDesc.NextColumnID 212 tableDesc.NextColumnID++ 213 tableDesc.Families[0].ColumnNames = append(tableDesc.Families[0].ColumnNames, col.Name) 214 tableDesc.Families[0].ColumnIDs = append(tableDesc.Families[0].ColumnIDs, col.ID) 215 tableDesc.Columns = append(tableDesc.Columns, *col) 216 217 // Write the modified descriptor. 218 if err := kvDB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { 219 if err := txn.SetSystemConfigTrigger(); err != nil { 220 return err 221 } 222 return txn.Put(ctx, sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.ID), sqlbase.WrapDescriptor(tableDesc)) 223 }); err != nil { 224 t.Fatal(err) 225 } 226 227 // Read the column metadata from information_schema. 228 rows, err := sqlDB.Query(` 229 SELECT column_name, character_maximum_length, numeric_precision, numeric_precision_radix, crdb_sql_type 230 FROM t.information_schema.columns 231 WHERE table_catalog = 't' AND table_schema = 'public' AND table_name = 'test' 232 AND column_name != 'rowid'`) 233 if err != nil { 234 t.Fatal(err) 235 } 236 defer rows.Close() 237 238 expected := 0 239 for rows.Next() { 240 var colName string 241 var charMaxLength, numPrec, numPrecRadix pgtype.Int8 242 var sqlType string 243 if err := rows.Scan(&colName, &charMaxLength, &numPrec, &numPrecRadix, &sqlType); err != nil { 244 t.Fatal(err) 245 } 246 switch colName { 247 case "k": 248 if charMaxLength.Status != pgtype.Null { 249 t.Fatalf("x character_maximum_length: expected null, got %d", charMaxLength.Int) 250 } 251 if numPrec.Int != 64 { 252 t.Fatalf("x numeric_precision: expected 64, got %v", numPrec.Get()) 253 } 254 if numPrecRadix.Int != 2 { 255 t.Fatalf("x numeric_precision_radix: expected 64, got %v", numPrecRadix.Get()) 256 } 257 if sqlType != "INT8" { 258 t.Fatalf("x crdb_sql_type: expected INT8, got %q", sqlType) 259 } 260 expected |= 2 261 case "z": 262 // This is just a canary to verify that the manually-modified 263 // table descriptor is visible to introspection. 264 expected |= 1 265 default: 266 t.Fatalf("unexpected col: %q", colName) 267 } 268 } 269 if expected != 3 { 270 t.Fatal("did not find both expected rows") 271 } 272 273 // Now test the workaround: using ALTER to "upgrade" the type fully to INT. 274 if _, err := sqlDB.Exec(`ALTER TABLE t.test ALTER COLUMN k SET DATA TYPE INT8`); err != nil { 275 t.Fatal(err) 276 } 277 278 // And verify that this has re-set the fields. 279 tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") 280 found := false 281 for i := range tableDesc.Columns { 282 col := &tableDesc.Columns[i] 283 if col.Name == "k" { 284 // TODO(knz): post-2.2, visible types for integer types are gone. 285 if col.Type.InternalType.VisibleType != 0 { 286 t.Errorf("unexpected visible type: got %d, expected 0", col.Type.InternalType.VisibleType) 287 } 288 if col.Type.Width() != 64 { 289 t.Errorf("unexpected width: got %d, expected 64", col.Type.Width()) 290 } 291 found = true 292 break 293 } 294 } 295 if !found { 296 t.Fatal("column disappeared") 297 } 298 } 299 300 func TestClusterQueriesTxnData(t *testing.T) { 301 defer leaktest.AfterTest(t)() 302 s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) 303 defer s.Stopper().Stop(context.Background()) 304 305 if _, err := sqlDB.Exec(` 306 CREATE DATABASE t; 307 CREATE TABLE t.t (x INT); 308 INSERT INTO t.t VALUES (1); 309 `); err != nil { 310 t.Fatal(err) 311 } 312 313 if _, err := sqlDB.Exec(`BEGIN`); err != nil { 314 t.Fatal(err) 315 } 316 317 // Look up the schema first so only the read txn is recorded in 318 // kv trace logs. 319 if _, err := sqlDB.Exec(`SELECT * FROM t.t`); err != nil { 320 t.Fatal(err) 321 } 322 323 if _, err := sqlDB.Exec( 324 `SET tracing=on,kv; SELECT * FROM t.t; SET TRACING=off`); err != nil { 325 t.Fatal(err) 326 } 327 328 // The log messages we are looking for are structured like 329 // [....,txn=<txnID>], so search for those and extract the id. 330 row := sqlDB.QueryRow(` 331 SELECT 332 string_to_array(regexp_extract(tag, 'txn=[a-zA-Z0-9]*'), '=')[2] 333 FROM 334 [SHOW KV TRACE FOR SESSION] 335 WHERE 336 tag LIKE '%txn=%' LIMIT 1`) 337 var txnID string 338 if err := row.Scan(&txnID); err != nil { 339 t.Fatal(err) 340 } 341 342 // Now, run a SHOW QUERIES statement, in the same transaction. 343 // The txn_id we find there should be the same as the one we parsed, 344 // and the txn_start time should be before the start time of the statement. 345 row = sqlDB.QueryRow(` 346 SELECT 347 txn_id, start 348 FROM 349 crdb_internal.cluster_queries 350 WHERE 351 query LIKE '%SHOW CLUSTER QUERIES%'`) 352 353 var ( 354 foundTxnID string 355 txnStart time.Time 356 queryStart time.Time 357 ) 358 if err := row.Scan(&foundTxnID, &queryStart); err != nil { 359 t.Fatal(err) 360 } 361 if !strings.HasPrefix(foundTxnID, txnID) { 362 t.Errorf("expected to find txn id with prefix %s, but found %s", txnID, foundTxnID) 363 } 364 365 // Find the transaction start time and ensure that the query started after it. 366 row = sqlDB.QueryRow(`SELECT start FROM crdb_internal.node_transactions WHERE id = $1`, foundTxnID) 367 if err := row.Scan(&txnStart); err != nil { 368 t.Fatal(err) 369 } 370 if txnStart.After(queryStart) { 371 t.Error("expected txn to start before query") 372 } 373 } 374 375 // TestCrdbInternalJobsOOM verifies that the memory budget works correctly for 376 // crdb_internal.jobs. 377 func TestCrdbInternalJobsOOM(t *testing.T) { 378 defer leaktest.AfterTest(t)() 379 380 // The budget needs to be large enough to establish the initial database 381 // connection, but small enough to overflow easily. It's set to be comfortably 382 // large enough that the server can start up with a bit of 383 // extra space to overflow. 384 const lowMemoryBudget = 250000 385 const fieldSize = 10000 386 const numRows = 10 387 const statement = "SELECT count(*) FROM crdb_internal.jobs" 388 389 insertRows := func(sqlDB *gosql.DB) { 390 for i := 0; i < numRows; i++ { 391 if _, err := sqlDB.Exec(` 392 INSERT INTO system.jobs(id, status, payload, progress) 393 VALUES ($1, 'StatusRunning', repeat('a', $2)::BYTES, repeat('a', $2)::BYTES)`, i, fieldSize); err != nil { 394 t.Fatal(err) 395 } 396 } 397 } 398 399 t.Run("over budget jobs table", func(t *testing.T) { 400 s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ 401 SQLMemoryPoolSize: lowMemoryBudget, 402 }) 403 defer s.Stopper().Stop(context.Background()) 404 405 insertRows(sqlDB) 406 _, err := sqlDB.Exec(statement) 407 if err == nil { 408 t.Fatalf("Expected \"%s\" to consume too much memory, found no error", statement) 409 } 410 if pErr := (*pq.Error)(nil); !(errors.As(err, &pErr) && 411 pErr.Code == pgcode.OutOfMemory) { 412 t.Fatalf("Expected \"%s\" to consume too much memory, found unexpected error %+v", statement, pErr) 413 } 414 }) 415 416 t.Run("under budget jobs table", func(t *testing.T) { 417 s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{ 418 SQLMemoryPoolSize: 2 * lowMemoryBudget, 419 }) 420 defer s.Stopper().Stop(context.Background()) 421 422 insertRows(sqlDB) 423 if _, err := sqlDB.Exec(statement); err != nil { 424 t.Fatal(err) 425 } 426 }) 427 }