github.com/pingcap/tidb/parser@v0.0.0-20231013125129-93a834a6bf8d/ast/misc_test.go (about)

     1  // Copyright 2016 PingCAP, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // See the License for the specific language governing permissions and
    12  // limitations under the License.
    13  
    14  package ast_test
    15  
    16  import (
    17  	"fmt"
    18  	"testing"
    19  
    20  	"github.com/pingcap/tidb/parser"
    21  	"github.com/pingcap/tidb/parser/ast"
    22  	"github.com/pingcap/tidb/parser/auth"
    23  	"github.com/pingcap/tidb/parser/mysql"
    24  	"github.com/stretchr/testify/require"
    25  )
    26  
    27  type visitor struct{}
    28  
    29  func (v visitor) Enter(in ast.Node) (ast.Node, bool) {
    30  	return in, false
    31  }
    32  
    33  func (v visitor) Leave(in ast.Node) (ast.Node, bool) {
    34  	return in, true
    35  }
    36  
    37  type visitor1 struct {
    38  	visitor
    39  }
    40  
    41  func (visitor1) Enter(in ast.Node) (ast.Node, bool) {
    42  	return in, true
    43  }
    44  
    45  func TestMiscVisitorCover(t *testing.T) {
    46  	valueExpr := ast.NewValueExpr(42, mysql.DefaultCharset, mysql.DefaultCollationName)
    47  	stmts := []ast.Node{
    48  		&ast.AdminStmt{},
    49  		&ast.AlterUserStmt{},
    50  		&ast.BeginStmt{},
    51  		&ast.BinlogStmt{},
    52  		&ast.CommitStmt{},
    53  		&ast.CompactTableStmt{Table: &ast.TableName{}},
    54  		&ast.CreateUserStmt{},
    55  		&ast.DeallocateStmt{},
    56  		&ast.DoStmt{},
    57  		&ast.ExecuteStmt{UsingVars: []ast.ExprNode{valueExpr}},
    58  		&ast.ExplainStmt{Stmt: &ast.ShowStmt{}},
    59  		&ast.GrantStmt{},
    60  		&ast.PrepareStmt{SQLVar: &ast.VariableExpr{Value: valueExpr}},
    61  		&ast.RollbackStmt{},
    62  		&ast.SetPwdStmt{},
    63  		&ast.SetStmt{Variables: []*ast.VariableAssignment{
    64  			{
    65  				Value: valueExpr,
    66  			},
    67  		}},
    68  		&ast.UseStmt{},
    69  		&ast.AnalyzeTableStmt{
    70  			TableNames: []*ast.TableName{
    71  				{},
    72  			},
    73  		},
    74  		&ast.FlushStmt{},
    75  		&ast.PrivElem{},
    76  		&ast.VariableAssignment{Value: valueExpr},
    77  		&ast.KillStmt{},
    78  		&ast.DropStatsStmt{
    79  			Tables: []*ast.TableName{
    80  				{},
    81  			},
    82  		},
    83  		&ast.ShutdownStmt{},
    84  	}
    85  
    86  	for _, v := range stmts {
    87  		v.Accept(visitor{})
    88  		v.Accept(visitor1{})
    89  	}
    90  }
    91  
    92  func TestDDLVisitorCoverMisc(t *testing.T) {
    93  	sql := `
    94  create table t (c1 smallint unsigned, c2 int unsigned);
    95  alter table t add column a smallint unsigned after b;
    96  alter table t add column (a int, constraint check (a > 0));
    97  create index t_i on t (id);
    98  create database test character set utf8;
    99  drop database test;
   100  drop index t_i on t;
   101  drop table t;
   102  truncate t;
   103  create table t (
   104  jobAbbr char(4) not null,
   105  constraint foreign key (jobabbr) references ffxi_jobtype (jobabbr) on delete cascade on update cascade
   106  );
   107  `
   108  	parse := parser.New()
   109  	stmts, _, err := parse.Parse(sql, "", "")
   110  	require.NoError(t, err)
   111  	for _, stmt := range stmts {
   112  		stmt.Accept(visitor{})
   113  		stmt.Accept(visitor1{})
   114  	}
   115  }
   116  
   117  func TestDMLVistorCover(t *testing.T) {
   118  	sql := `delete from somelog where user = 'jcole' order by timestamp_column limit 1;
   119  delete t1, t2 from t1 inner join t2 inner join t3 where t1.id=t2.id and t2.id=t3.id;
   120  select * from t where exists(select * from t k where t.c = k.c having sum(c) = 1);
   121  insert into t_copy select * from t where t.x > 5;
   122  (select /*+ TIDB_INLJ(t1) */ a from t1 where a=10 and b=1) union (select /*+ TIDB_SMJ(t2) */ a from t2 where a=11 and b=2) order by a limit 10;
   123  update t1 set col1 = col1 + 1, col2 = col1;
   124  show create table t;
   125  load data infile '/tmp/t.csv' into table t fields terminated by 'ab' enclosed by 'b';
   126  import into t from '/file.csv'`
   127  
   128  	p := parser.New()
   129  	stmts, _, err := p.Parse(sql, "", "")
   130  	require.NoError(t, err)
   131  	for _, stmt := range stmts {
   132  		stmt.Accept(visitor{})
   133  		stmt.Accept(visitor1{})
   134  	}
   135  }
   136  
   137  // test Change Pump or drainer status sql parser
   138  func TestChangeStmt(t *testing.T) {
   139  	sql := `change pump to node_state='paused' for node_id '127.0.0.1:8249';
   140  change drainer to node_state='paused' for node_id '127.0.0.1:8249';
   141  shutdown;`
   142  
   143  	p := parser.New()
   144  	stmts, _, err := p.Parse(sql, "", "")
   145  	require.NoError(t, err)
   146  	for _, stmt := range stmts {
   147  		stmt.Accept(visitor{})
   148  		stmt.Accept(visitor1{})
   149  	}
   150  }
   151  
   152  func TestSensitiveStatement(t *testing.T) {
   153  	positive := []ast.StmtNode{
   154  		&ast.SetPwdStmt{},
   155  		&ast.CreateUserStmt{},
   156  		&ast.AlterUserStmt{},
   157  		&ast.GrantStmt{},
   158  	}
   159  	for i, stmt := range positive {
   160  		_, ok := stmt.(ast.SensitiveStmtNode)
   161  		require.Truef(t, ok, "%d, %#v fail", i, stmt)
   162  	}
   163  
   164  	negative := []ast.StmtNode{
   165  		&ast.DropUserStmt{},
   166  		&ast.RevokeStmt{},
   167  		&ast.AlterTableStmt{},
   168  		&ast.CreateDatabaseStmt{},
   169  		&ast.CreateIndexStmt{},
   170  		&ast.CreateTableStmt{},
   171  		&ast.DropDatabaseStmt{},
   172  		&ast.DropIndexStmt{},
   173  		&ast.DropTableStmt{},
   174  		&ast.RenameTableStmt{},
   175  		&ast.TruncateTableStmt{},
   176  	}
   177  	for _, stmt := range negative {
   178  		_, ok := stmt.(ast.SensitiveStmtNode)
   179  		require.False(t, ok)
   180  	}
   181  }
   182  
   183  func TestUserSpec(t *testing.T) {
   184  	hashString := "*3D56A309CD04FA2EEF181462E59011F075C89548"
   185  	u := ast.UserSpec{
   186  		User: &auth.UserIdentity{
   187  			Username: "test",
   188  		},
   189  		AuthOpt: &ast.AuthOption{
   190  			ByAuthString: false,
   191  			AuthString:   "xxx",
   192  			HashString:   hashString,
   193  		},
   194  	}
   195  	pwd, ok := u.EncodedPassword()
   196  	require.True(t, ok)
   197  	require.Equal(t, u.AuthOpt.HashString, pwd)
   198  
   199  	u.AuthOpt.HashString = "not-good-password-format"
   200  	_, ok = u.EncodedPassword()
   201  	require.False(t, ok)
   202  
   203  	u.AuthOpt.ByAuthString = true
   204  	pwd, ok = u.EncodedPassword()
   205  	require.True(t, ok)
   206  	require.Equal(t, hashString, pwd)
   207  
   208  	u.AuthOpt.AuthString = ""
   209  	pwd, ok = u.EncodedPassword()
   210  	require.True(t, ok)
   211  	require.Equal(t, "", pwd)
   212  }
   213  
   214  func TestTableOptimizerHintRestore(t *testing.T) {
   215  	testCases := []NodeRestoreTestCase{
   216  		{"USE_INDEX(t1 c1)", "USE_INDEX(`t1` `c1`)"},
   217  		{"USE_INDEX(test.t1 c1)", "USE_INDEX(`test`.`t1` `c1`)"},
   218  		{"USE_INDEX(@sel_1 t1 c1)", "USE_INDEX(@`sel_1` `t1` `c1`)"},
   219  		{"USE_INDEX(t1@sel_1 c1)", "USE_INDEX(`t1`@`sel_1` `c1`)"},
   220  		{"USE_INDEX(test.t1@sel_1 c1)", "USE_INDEX(`test`.`t1`@`sel_1` `c1`)"},
   221  		{"USE_INDEX(test.t1@sel_1 partition(p0) c1)", "USE_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"},
   222  		{"FORCE_INDEX(t1 c1)", "FORCE_INDEX(`t1` `c1`)"},
   223  		{"FORCE_INDEX(test.t1 c1)", "FORCE_INDEX(`test`.`t1` `c1`)"},
   224  		{"FORCE_INDEX(@sel_1 t1 c1)", "FORCE_INDEX(@`sel_1` `t1` `c1`)"},
   225  		{"FORCE_INDEX(t1@sel_1 c1)", "FORCE_INDEX(`t1`@`sel_1` `c1`)"},
   226  		{"FORCE_INDEX(test.t1@sel_1 c1)", "FORCE_INDEX(`test`.`t1`@`sel_1` `c1`)"},
   227  		{"FORCE_INDEX(test.t1@sel_1 partition(p0) c1)", "FORCE_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"},
   228  		{"IGNORE_INDEX(t1 c1)", "IGNORE_INDEX(`t1` `c1`)"},
   229  		{"IGNORE_INDEX(@sel_1 t1 c1)", "IGNORE_INDEX(@`sel_1` `t1` `c1`)"},
   230  		{"IGNORE_INDEX(t1@sel_1 c1)", "IGNORE_INDEX(`t1`@`sel_1` `c1`)"},
   231  		{"IGNORE_INDEX(t1@sel_1 partition(p0, p1) c1)", "IGNORE_INDEX(`t1`@`sel_1` PARTITION(`p0`, `p1`) `c1`)"},
   232  		{"ORDER_INDEX(t1 c1)", "ORDER_INDEX(`t1` `c1`)"},
   233  		{"ORDER_INDEX(test.t1 c1)", "ORDER_INDEX(`test`.`t1` `c1`)"},
   234  		{"ORDER_INDEX(@sel_1 t1 c1)", "ORDER_INDEX(@`sel_1` `t1` `c1`)"},
   235  		{"ORDER_INDEX(t1@sel_1 c1)", "ORDER_INDEX(`t1`@`sel_1` `c1`)"},
   236  		{"ORDER_INDEX(test.t1@sel_1 c1)", "ORDER_INDEX(`test`.`t1`@`sel_1` `c1`)"},
   237  		{"ORDER_INDEX(test.t1@sel_1 partition(p0) c1)", "ORDER_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"},
   238  		{"NO_ORDER_INDEX(t1 c1)", "NO_ORDER_INDEX(`t1` `c1`)"},
   239  		{"NO_ORDER_INDEX(test.t1 c1)", "NO_ORDER_INDEX(`test`.`t1` `c1`)"},
   240  		{"NO_ORDER_INDEX(@sel_1 t1 c1)", "NO_ORDER_INDEX(@`sel_1` `t1` `c1`)"},
   241  		{"NO_ORDER_INDEX(t1@sel_1 c1)", "NO_ORDER_INDEX(`t1`@`sel_1` `c1`)"},
   242  		{"NO_ORDER_INDEX(test.t1@sel_1 c1)", "NO_ORDER_INDEX(`test`.`t1`@`sel_1` `c1`)"},
   243  		{"NO_ORDER_INDEX(test.t1@sel_1 partition(p0) c1)", "NO_ORDER_INDEX(`test`.`t1`@`sel_1` PARTITION(`p0`) `c1`)"},
   244  		{"TIDB_SMJ(`t1`)", "TIDB_SMJ(`t1`)"},
   245  		{"TIDB_SMJ(t1)", "TIDB_SMJ(`t1`)"},
   246  		{"TIDB_SMJ(t1,t2)", "TIDB_SMJ(`t1`, `t2`)"},
   247  		{"TIDB_SMJ(@sel1 t1,t2)", "TIDB_SMJ(@`sel1` `t1`, `t2`)"},
   248  		{"TIDB_SMJ(t1@sel1,t2@sel2)", "TIDB_SMJ(`t1`@`sel1`, `t2`@`sel2`)"},
   249  		{"TIDB_INLJ(t1,t2)", "TIDB_INLJ(`t1`, `t2`)"},
   250  		{"TIDB_INLJ(@sel1 t1,t2)", "TIDB_INLJ(@`sel1` `t1`, `t2`)"},
   251  		{"TIDB_INLJ(t1@sel1,t2@sel2)", "TIDB_INLJ(`t1`@`sel1`, `t2`@`sel2`)"},
   252  		{"TIDB_HJ(t1,t2)", "TIDB_HJ(`t1`, `t2`)"},
   253  		{"TIDB_HJ(@sel1 t1,t2)", "TIDB_HJ(@`sel1` `t1`, `t2`)"},
   254  		{"TIDB_HJ(t1@sel1,t2@sel2)", "TIDB_HJ(`t1`@`sel1`, `t2`@`sel2`)"},
   255  		{"MERGE_JOIN(t1,t2)", "MERGE_JOIN(`t1`, `t2`)"},
   256  		{"BROADCAST_JOIN(t1,t2)", "BROADCAST_JOIN(`t1`, `t2`)"},
   257  		{"INL_HASH_JOIN(t1,t2)", "INL_HASH_JOIN(`t1`, `t2`)"},
   258  		{"INL_MERGE_JOIN(t1,t2)", "INL_MERGE_JOIN(`t1`, `t2`)"},
   259  		{"INL_JOIN(t1,t2)", "INL_JOIN(`t1`, `t2`)"},
   260  		{"HASH_JOIN(t1,t2)", "HASH_JOIN(`t1`, `t2`)"},
   261  		{"HASH_JOIN_BUILD(t1)", "HASH_JOIN_BUILD(`t1`)"},
   262  		{"HASH_JOIN_PROBE(t1)", "HASH_JOIN_PROBE(`t1`)"},
   263  		{"LEADING(t1)", "LEADING(`t1`)"},
   264  		{"LEADING(t1, c1)", "LEADING(`t1`, `c1`)"},
   265  		{"LEADING(t1, c1, t2)", "LEADING(`t1`, `c1`, `t2`)"},
   266  		{"LEADING(@sel1 t1, c1)", "LEADING(@`sel1` `t1`, `c1`)"},
   267  		{"LEADING(@sel1 t1)", "LEADING(@`sel1` `t1`)"},
   268  		{"LEADING(@sel1 t1, c1, t2)", "LEADING(@`sel1` `t1`, `c1`, `t2`)"},
   269  		{"LEADING(t1@sel1)", "LEADING(`t1`@`sel1`)"},
   270  		{"LEADING(t1@sel1, c1)", "LEADING(`t1`@`sel1`, `c1`)"},
   271  		{"LEADING(t1@sel1, c1, t2)", "LEADING(`t1`@`sel1`, `c1`, `t2`)"},
   272  		{"MAX_EXECUTION_TIME(3000)", "MAX_EXECUTION_TIME(3000)"},
   273  		{"MAX_EXECUTION_TIME(@sel1 3000)", "MAX_EXECUTION_TIME(@`sel1` 3000)"},
   274  		{"USE_INDEX_MERGE(t1 c1)", "USE_INDEX_MERGE(`t1` `c1`)"},
   275  		{"USE_INDEX_MERGE(@sel1 t1 c1)", "USE_INDEX_MERGE(@`sel1` `t1` `c1`)"},
   276  		{"USE_INDEX_MERGE(t1@sel1 c1)", "USE_INDEX_MERGE(`t1`@`sel1` `c1`)"},
   277  		{"USE_TOJA(TRUE)", "USE_TOJA(TRUE)"},
   278  		{"USE_TOJA(FALSE)", "USE_TOJA(FALSE)"},
   279  		{"USE_TOJA(@sel1 TRUE)", "USE_TOJA(@`sel1` TRUE)"},
   280  		{"USE_CASCADES(TRUE)", "USE_CASCADES(TRUE)"},
   281  		{"USE_CASCADES(FALSE)", "USE_CASCADES(FALSE)"},
   282  		{"USE_CASCADES(@sel1 TRUE)", "USE_CASCADES(@`sel1` TRUE)"},
   283  		{"QUERY_TYPE(OLAP)", "QUERY_TYPE(OLAP)"},
   284  		{"QUERY_TYPE(OLTP)", "QUERY_TYPE(OLTP)"},
   285  		{"QUERY_TYPE(@sel1 OLTP)", "QUERY_TYPE(@`sel1` OLTP)"},
   286  		{"NTH_PLAN(10)", "NTH_PLAN(10)"},
   287  		{"NTH_PLAN(@sel1 30)", "NTH_PLAN(@`sel1` 30)"},
   288  		{"MEMORY_QUOTA(1 GB)", "MEMORY_QUOTA(1024 MB)"},
   289  		{"MEMORY_QUOTA(@sel1 1 GB)", "MEMORY_QUOTA(@`sel1` 1024 MB)"},
   290  		{"HASH_AGG()", "HASH_AGG()"},
   291  		{"HASH_AGG(@sel1)", "HASH_AGG(@`sel1`)"},
   292  		{"STREAM_AGG()", "STREAM_AGG()"},
   293  		{"STREAM_AGG(@sel1)", "STREAM_AGG(@`sel1`)"},
   294  		{"AGG_TO_COP()", "AGG_TO_COP()"},
   295  		{"AGG_TO_COP(@sel_1)", "AGG_TO_COP(@`sel_1`)"},
   296  		{"LIMIT_TO_COP()", "LIMIT_TO_COP()"},
   297  		{"MERGE()", "MERGE()"},
   298  		{"STRAIGHT_JOIN()", "STRAIGHT_JOIN()"},
   299  		{"NO_INDEX_MERGE()", "NO_INDEX_MERGE()"},
   300  		{"NO_INDEX_MERGE(@sel1)", "NO_INDEX_MERGE(@`sel1`)"},
   301  		{"READ_CONSISTENT_REPLICA()", "READ_CONSISTENT_REPLICA()"},
   302  		{"READ_CONSISTENT_REPLICA(@sel1)", "READ_CONSISTENT_REPLICA(@`sel1`)"},
   303  		{"QB_NAME(sel1)", "QB_NAME(`sel1`)"},
   304  		{"READ_FROM_STORAGE(@sel TIFLASH[t1, t2])", "READ_FROM_STORAGE(@`sel` TIFLASH[`t1`, `t2`])"},
   305  		{"READ_FROM_STORAGE(@sel TIFLASH[t1 partition(p0)])", "READ_FROM_STORAGE(@`sel` TIFLASH[`t1` PARTITION(`p0`)])"},
   306  		{"TIME_RANGE('2020-02-02 10:10:10','2020-02-02 11:10:10')", "TIME_RANGE('2020-02-02 10:10:10', '2020-02-02 11:10:10')"},
   307  		{"RESOURCE_GROUP(rg1)", "RESOURCE_GROUP(`rg1`)"},
   308  		{"RESOURCE_GROUP(`default`)", "RESOURCE_GROUP(`default`)"},
   309  	}
   310  	extractNodeFunc := func(node ast.Node) ast.Node {
   311  		return node.(*ast.SelectStmt).TableHints[0]
   312  	}
   313  	runNodeRestoreTest(t, testCases, "select /*+ %s */ * from t1 join t2", extractNodeFunc)
   314  }
   315  
   316  func TestChangeStmtRestore(t *testing.T) {
   317  	testCases := []NodeRestoreTestCase{
   318  		{"CHANGE PUMP TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'", "CHANGE PUMP TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'"},
   319  		{"CHANGE DRAINER TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'", "CHANGE DRAINER TO NODE_STATE ='paused' FOR NODE_ID '127.0.0.1:9090'"},
   320  	}
   321  	extractNodeFunc := func(node ast.Node) ast.Node {
   322  		return node.(*ast.ChangeStmt)
   323  	}
   324  	runNodeRestoreTest(t, testCases, "%s", extractNodeFunc)
   325  }
   326  
   327  func TestBRIESecureText(t *testing.T) {
   328  	testCases := []struct {
   329  		input   string
   330  		secured string
   331  	}{
   332  		{
   333  			input:   "restore database * from 'local:///tmp/br01' snapshot = 23333",
   334  			secured: `^\QRESTORE DATABASE * FROM 'local:///tmp/br01' SNAPSHOT = 23333\E$`,
   335  		},
   336  		{
   337  			input:   "backup database * to 's3://bucket/prefix?region=us-west-2'",
   338  			secured: `^\QBACKUP DATABASE * TO 's3://bucket/prefix?region=us-west-2'\E$`,
   339  		},
   340  		{
   341  			// we need to use regexp to match to avoid the random ordering since a map was used.
   342  			// unfortunately Go's regexp doesn't support lookahead assertion, so the test case below
   343  			// has false positives.
   344  			input:   "backup database * to 's3://bucket/prefix?access-key=abcdefghi&secret-access-key=123&force-path-style=true'",
   345  			secured: `^\QBACKUP DATABASE * TO 's3://bucket/prefix?\E((access-key=xxxxxx|force-path-style=true|secret-access-key=xxxxxx)(&|'$)){3}`,
   346  		},
   347  		{
   348  			input:   "backup database * to 'gcs://bucket/prefix?access-key=irrelevant&credentials-file=/home/user/secrets.txt'",
   349  			secured: `^\QBACKUP DATABASE * TO 'gcs://bucket/prefix?\E((access-key=irrelevant|credentials-file=/home/user/secrets\.txt)(&|'$)){2}`,
   350  		},
   351  	}
   352  
   353  	p := parser.New()
   354  	for _, tc := range testCases {
   355  		comment := fmt.Sprintf("input = %s", tc.input)
   356  		node, err := p.ParseOneStmt(tc.input, "", "")
   357  		require.NoError(t, err, comment)
   358  		n, ok := node.(ast.SensitiveStmtNode)
   359  		require.True(t, ok, comment)
   360  		require.Regexp(t, tc.secured, n.SecureText(), comment)
   361  	}
   362  }
   363  
   364  func TestCompactTableStmtRestore(t *testing.T) {
   365  	testCases := []NodeRestoreTestCase{
   366  		{"alter table abc compact tiflash replica", "ALTER TABLE `abc` COMPACT TIFLASH REPLICA"},
   367  		{"alter table abc compact", "ALTER TABLE `abc` COMPACT"},
   368  		{"alter table test.abc compact", "ALTER TABLE `test`.`abc` COMPACT"},
   369  	}
   370  	extractNodeFunc := func(node ast.Node) ast.Node {
   371  		return node.(*ast.CompactTableStmt)
   372  	}
   373  	runNodeRestoreTest(t, testCases, "%s", extractNodeFunc)
   374  }
   375  
   376  func TestPlanReplayerStmtRestore(t *testing.T) {
   377  	testCases := []NodeRestoreTestCase{
   378  		{"plan replayer dump with stats as of timestamp '2023-06-28 12:34:00' explain select * from t where a > 10",
   379  			"PLAN REPLAYER DUMP WITH STATS AS OF TIMESTAMP _UTF8MB4'2023-06-28 12:34:00' EXPLAIN SELECT * FROM `t` WHERE `a`>10"},
   380  		{"plan replayer dump explain analyze select * from t where a > 10",
   381  			"PLAN REPLAYER DUMP EXPLAIN ANALYZE SELECT * FROM `t` WHERE `a`>10"},
   382  		{"plan replayer dump with stats as of timestamp 12345 explain analyze select * from t where a > 10",
   383  			"PLAN REPLAYER DUMP WITH STATS AS OF TIMESTAMP 12345 EXPLAIN ANALYZE SELECT * FROM `t` WHERE `a`>10"},
   384  		{"plan replayer dump explain analyze 'test'",
   385  			"PLAN REPLAYER DUMP EXPLAIN ANALYZE 'test'"},
   386  		{"plan replayer dump with stats as of timestamp '12345' explain analyze 'test2'",
   387  			"PLAN REPLAYER DUMP WITH STATS AS OF TIMESTAMP _UTF8MB4'12345' EXPLAIN ANALYZE 'test2'"},
   388  	}
   389  	extractNodeFunc := func(node ast.Node) ast.Node {
   390  		return node.(*ast.PlanReplayerStmt)
   391  	}
   392  	runNodeRestoreTest(t, testCases, "%s", extractNodeFunc)
   393  }
   394  
   395  func TestRedactURL(t *testing.T) {
   396  	type args struct {
   397  		str string
   398  	}
   399  	tests := []struct {
   400  		args args
   401  		want string
   402  	}{
   403  		{args{""}, ""},
   404  		{args{":"}, ":"},
   405  		{args{"~/file"}, "~/file"},
   406  		{args{"gs://bucket/file"}, "gs://bucket/file"},
   407  		// gs don't have access-key/secret-access-key, so it will NOT be redacted
   408  		{args{"gs://bucket/file?access-key=123"}, "gs://bucket/file?access-key=123"},
   409  		{args{"gs://bucket/file?secret-access-key=123"}, "gs://bucket/file?secret-access-key=123"},
   410  		{args{"s3://bucket/file"}, "s3://bucket/file"},
   411  		{args{"s3://bucket/file?other-key=123"}, "s3://bucket/file?other-key=123"},
   412  		{args{"s3://bucket/file?access-key=123"}, "s3://bucket/file?access-key=xxxxxx"},
   413  		{args{"s3://bucket/file?secret-access-key=123"}, "s3://bucket/file?secret-access-key=xxxxxx"},
   414  		// underline
   415  		{args{"s3://bucket/file?access_key=123"}, "s3://bucket/file?access_key=xxxxxx"},
   416  		{args{"s3://bucket/file?secret_access_key=123"}, "s3://bucket/file?secret_access_key=xxxxxx"},
   417  	}
   418  	for _, tt := range tests {
   419  		t.Run(tt.args.str, func(t *testing.T) {
   420  			got := ast.RedactURL(tt.args.str)
   421  			if got != tt.want {
   422  				t.Errorf("RedactURL() got = %v, want %v", got, tt.want)
   423  			}
   424  		})
   425  	}
   426  }