github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvserver/reports/constraint_stats_report_test.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  package reports
    12  
    13  import (
    14  	"bytes"
    15  	"context"
    16  	"fmt"
    17  	"sort"
    18  	"strings"
    19  	"testing"
    20  	"time"
    21  
    22  	"github.com/cockroachdb/cockroach/pkg/base"
    23  	"github.com/cockroachdb/cockroach/pkg/config"
    24  	"github.com/cockroachdb/cockroach/pkg/config/zonepb"
    25  	"github.com/cockroachdb/cockroach/pkg/keys"
    26  	"github.com/cockroachdb/cockroach/pkg/roachpb"
    27  	"github.com/cockroachdb/cockroach/pkg/settings/cluster"
    28  	"github.com/cockroachdb/cockroach/pkg/sql"
    29  	"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
    30  	"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
    31  	"github.com/cockroachdb/cockroach/pkg/sql/types"
    32  	"github.com/cockroachdb/cockroach/pkg/testutils/keysutils"
    33  	"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
    34  	"github.com/cockroachdb/cockroach/pkg/util/encoding"
    35  	"github.com/cockroachdb/cockroach/pkg/util/hlc"
    36  	"github.com/cockroachdb/cockroach/pkg/util/keysutil"
    37  	"github.com/cockroachdb/cockroach/pkg/util/leaktest"
    38  	"github.com/cockroachdb/cockroach/pkg/util/uuid"
    39  	"github.com/cockroachdb/errors"
    40  	"github.com/gogo/protobuf/proto"
    41  	"github.com/stretchr/testify/require"
    42  	yaml "gopkg.in/yaml.v2"
    43  )
    44  
    45  func TestConformanceReport(t *testing.T) {
    46  	defer leaktest.AfterTest(t)()
    47  	tests := []conformanceConstraintTestCase{
    48  		{
    49  			name: "simple no violations",
    50  			baseReportTestCase: baseReportTestCase{
    51  				defaultZone: zone{replicas: 3},
    52  				schema: []database{
    53  					{
    54  						name:   "db1",
    55  						tables: []table{{name: "t1"}, {name: "t2"}},
    56  						// The database has a zone requesting everything to be on SSDs.
    57  						zone: &zone{
    58  							replicas:    3,
    59  							constraints: "[+ssd]",
    60  						},
    61  					},
    62  					{
    63  						name:   "db2",
    64  						tables: []table{{name: "sentinel"}},
    65  					},
    66  				},
    67  				splits: []split{
    68  					{key: "/Table/t1", stores: []int{1, 2, 3}},
    69  					{key: "/Table/t1/pk", stores: []int{1, 2, 3}},
    70  					{key: "/Table/t1/pk/1", stores: []int{1, 2, 3}},
    71  					{key: "/Table/t1/pk/2", stores: []int{1, 2, 3}},
    72  					{key: "/Table/t1/pk/3", stores: []int{1, 2, 3}},
    73  					{key: "/Table/t2", stores: []int{1, 2, 3}},
    74  					{key: "/Table/t2/pk", stores: []int{1, 2, 3}},
    75  					{
    76  						// This range is not covered by the db1's zone config and so it won't
    77  						// be counted.
    78  						key: "/Table/sentinel", stores: []int{1, 2, 3},
    79  					},
    80  				},
    81  				nodes: []node{
    82  					{id: 1, locality: "", stores: []store{{id: 1, attrs: "ssd"}}},
    83  					{id: 2, locality: "", stores: []store{{id: 2, attrs: "ssd"}}},
    84  					{id: 3, locality: "", stores: []store{{id: 3, attrs: "ssd"}}},
    85  				},
    86  			},
    87  			exp: []constraintEntry{{
    88  				object:         "db1",
    89  				constraint:     "+ssd",
    90  				constraintType: Constraint,
    91  				numRanges:      0,
    92  			}},
    93  		},
    94  		{
    95  			name: "violations at multiple levels",
    96  			// Test zone constraints inheritance at all levels.
    97  			baseReportTestCase: baseReportTestCase{
    98  				defaultZone: zone{replicas: 3, constraints: "[+default]"},
    99  				schema: []database{
   100  					{
   101  						name: "db1",
   102  						// All the objects will have zones asking for different tags.
   103  						zone: &zone{
   104  							replicas:    3,
   105  							constraints: "[+db1]",
   106  						},
   107  						tables: []table{
   108  							{
   109  								name: "t1",
   110  								zone: &zone{
   111  									replicas:    3,
   112  									constraints: "[+t1]",
   113  								},
   114  							},
   115  							{
   116  								name: "t2",
   117  								zone: &zone{
   118  									replicas:    3,
   119  									constraints: "[+t2]",
   120  								},
   121  							},
   122  							// Violations for this one will count towards db1.
   123  							{name: "sentinel"},
   124  						},
   125  					}, {
   126  						name: "db2",
   127  						zone: &zone{constraints: "[+db2]"},
   128  						// Violations for this one will count towards db2, except for the
   129  						// partition part.
   130  						tables: []table{{
   131  							name: "t3",
   132  							partitions: []partition{{
   133  								name:  "p1",
   134  								start: []int{100},
   135  								end:   []int{200},
   136  								zone:  &zone{constraints: "[+p1]"},
   137  							}},
   138  						}},
   139  					}, {
   140  						name: "db3",
   141  						// Violations for this one will count towards the default zone.
   142  						tables: []table{{name: "t4"}},
   143  					},
   144  				},
   145  				splits: []split{
   146  					{key: "/Table/t1", stores: []int{1, 2, 3}},
   147  					{key: "/Table/t1/pk", stores: []int{1, 2, 3}},
   148  					{key: "/Table/t1/pk/1", stores: []int{1, 2, 3}},
   149  					{key: "/Table/t1/pk/2", stores: []int{1, 2, 3}},
   150  					{key: "/Table/t1/pk/3", stores: []int{1, 2, 3}},
   151  					{key: "/Table/t2", stores: []int{1, 2, 3}},
   152  					{key: "/Table/t2/pk", stores: []int{1, 2, 3}},
   153  					{key: "/Table/sentinel", stores: []int{1, 2, 3}},
   154  					{key: "/Table/t3", stores: []int{1, 2, 3}},
   155  					{key: "/Table/t3/pk/100", stores: []int{1, 2, 3}},
   156  					{key: "/Table/t3/pk/101", stores: []int{1, 2, 3}},
   157  					{key: "/Table/t3/pk/199", stores: []int{1, 2, 3}},
   158  					{key: "/Table/t3/pk/200", stores: []int{1, 2, 3}},
   159  					{key: "/Table/t4", stores: []int{1, 2, 3}},
   160  				},
   161  				// None of the stores have any attributes.
   162  				nodes: []node{
   163  					{id: 1, locality: "", stores: []store{{id: 1}}},
   164  					{id: 2, locality: "", stores: []store{{id: 2}}},
   165  					{id: 3, locality: "", stores: []store{{id: 3}}},
   166  				},
   167  			},
   168  			exp: []constraintEntry{
   169  				{
   170  					object:         "default",
   171  					constraint:     "+default",
   172  					constraintType: Constraint,
   173  					numRanges:      1,
   174  				},
   175  				{
   176  					object:         "db1",
   177  					constraint:     "+db1",
   178  					constraintType: Constraint,
   179  					numRanges:      1,
   180  				},
   181  				{
   182  					object:         "t1",
   183  					constraint:     "+t1",
   184  					constraintType: Constraint,
   185  					numRanges:      5,
   186  				},
   187  				{
   188  					object:         "t2",
   189  					constraint:     "+t2",
   190  					constraintType: Constraint,
   191  					numRanges:      2,
   192  				},
   193  				{
   194  					object:         "db2",
   195  					constraint:     "+db2",
   196  					constraintType: Constraint,
   197  					numRanges:      2,
   198  				},
   199  				{
   200  					object:         "t3.p1",
   201  					constraint:     "+p1",
   202  					constraintType: Constraint,
   203  					numRanges:      3,
   204  				},
   205  			},
   206  		},
   207  		{
   208  			// Test that, when we have conjunctions of constraints, we report at the
   209  			// level of the whole conjunction, not for individuals constraints within
   210  			// the conjunction. I.e. if we have a constraint {"+region=us,+dc=dc1":1}
   211  			// (one replica needs to be in a node with locality us,dc1), and instead
   212  			// the replica is in us,dc2, then the report has an entry for the pair
   213  			// "us,dc1" with a violation instead of one entry for "us" with no
   214  			// violation, and another entry for "dc1" with one violation.
   215  			name: "constraint conjunctions",
   216  			baseReportTestCase: baseReportTestCase{
   217  				defaultZone: zone{replicas: 3},
   218  				schema: []database{
   219  					{
   220  						name:   "db1",
   221  						tables: []table{{name: "t1"}, {name: "t2"}},
   222  						// The database has a zone requesting everything to be on SSDs.
   223  						zone: &zone{
   224  							replicas: 2,
   225  							// The first conjunction will be satisfied; the second won't.
   226  							constraints: `{"+region=us,+dc=dc1":1,"+region=us,+dc=dc2":1}`,
   227  						},
   228  					},
   229  				},
   230  				splits: []split{
   231  					{key: "/Table/t1", stores: []int{1, 2}},
   232  				},
   233  				nodes: []node{
   234  					{id: 1, locality: "region=us,dc=dc1", stores: []store{{id: 1}}},
   235  					{id: 2, locality: "region=us,dc=dc3", stores: []store{{id: 2}}},
   236  				},
   237  			},
   238  			exp: []constraintEntry{
   239  				{
   240  					object:         "db1",
   241  					constraint:     "+region=us,+dc=dc1:1",
   242  					constraintType: Constraint,
   243  					numRanges:      0,
   244  				},
   245  				{
   246  					object:         "db1",
   247  					constraint:     "+region=us,+dc=dc2:1",
   248  					constraintType: Constraint,
   249  					numRanges:      1,
   250  				},
   251  			},
   252  		},
   253  	}
   254  	for _, tc := range tests {
   255  		t.Run(tc.name, func(t *testing.T) {
   256  			runConformanceReportTest(t, tc)
   257  		})
   258  	}
   259  }
   260  
   261  // runConformanceReportTest runs one test case. It processes the input schema,
   262  // runs the reports, and verifies that the report looks as expected.
   263  func runConformanceReportTest(t *testing.T, tc conformanceConstraintTestCase) {
   264  	ctc, err := compileTestCase(tc.baseReportTestCase)
   265  	if err != nil {
   266  		t.Fatal(err)
   267  	}
   268  	rep, err := computeConstraintConformanceReport(
   269  		context.Background(), &ctc.iter, ctc.cfg, ctc.resolver)
   270  	if err != nil {
   271  		t.Fatal(err)
   272  	}
   273  
   274  	// Sort the report's keys.
   275  	gotRows := make(rows, len(rep))
   276  	i := 0
   277  	for k, v := range rep {
   278  		gotRows[i] = row{ConstraintStatusKey: k, ConstraintStatus: v}
   279  		i++
   280  	}
   281  	sort.Slice(gotRows, func(i, j int) bool {
   282  		return gotRows[i].ConstraintStatusKey.Less(gotRows[j].ConstraintStatusKey)
   283  	})
   284  
   285  	expRows := make(rows, len(tc.exp))
   286  	for i, exp := range tc.exp {
   287  		k, v, err := exp.toReportEntry(ctc.objectToZone)
   288  		if err != nil {
   289  			t.Fatalf("failed to process expected entry %d: %s", i, err)
   290  		}
   291  		expRows[i] = row{
   292  			ConstraintStatusKey: k,
   293  			ConstraintStatus:    v,
   294  		}
   295  	}
   296  	require.Equal(t, expRows, gotRows)
   297  }
   298  
   299  type zone struct {
   300  	// 0 means unset.
   301  	replicas int32
   302  	// "" means unset. "[]" means empty.
   303  	constraints string
   304  }
   305  
   306  func (z zone) toZoneConfig() zonepb.ZoneConfig {
   307  	cfg := zonepb.NewZoneConfig()
   308  	if z.replicas != 0 {
   309  		cfg.NumReplicas = proto.Int32(z.replicas)
   310  	}
   311  	if z.constraints != "" {
   312  		var constraintsList zonepb.ConstraintsList
   313  		if err := yaml.UnmarshalStrict([]byte(z.constraints), &constraintsList); err != nil {
   314  			panic(err)
   315  		}
   316  		cfg.Constraints = constraintsList.Constraints
   317  		cfg.InheritedConstraints = false
   318  	}
   319  	return *cfg
   320  }
   321  
   322  // Note that we support multi-column partitions but we don't support
   323  // sub-partitions.
   324  type partition struct {
   325  	name string
   326  	// start defines the partition's start key as a sequence of int column values.
   327  	start []int
   328  	// end defines the partition's end key as a sequence of int column values.
   329  	end  []int
   330  	zone *zone
   331  }
   332  
   333  func (p partition) toPartitionDescriptor() sqlbase.PartitioningDescriptor_Range {
   334  	var startKey roachpb.Key
   335  	for _, val := range p.start {
   336  		startKey = encoding.EncodeIntValue(startKey, encoding.NoColumnID, int64(val))
   337  	}
   338  	var endKey roachpb.Key
   339  	for _, val := range p.end {
   340  		endKey = encoding.EncodeIntValue(endKey, encoding.NoColumnID, int64(val))
   341  	}
   342  	return sqlbase.PartitioningDescriptor_Range{
   343  		Name:          p.name,
   344  		FromInclusive: startKey,
   345  		ToExclusive:   endKey,
   346  	}
   347  }
   348  
   349  type partitioning []partition
   350  
   351  func (p partitioning) numCols() int {
   352  	if len(p) == 0 {
   353  		return 0
   354  	}
   355  	return len(p[0].start)
   356  }
   357  
   358  func (p partitioning) validate() error {
   359  	// Validate that all partitions (if any) have the same number of columns.
   360  	if len(p) == 0 {
   361  		return nil
   362  	}
   363  	numCols := len(p[0].start)
   364  	for _, pp := range p {
   365  		if len(pp.start) != numCols {
   366  			return errors.Errorf("partition start doesn't have expected number of columns: %v", pp.start)
   367  		}
   368  		if len(pp.end) != numCols {
   369  			return errors.Errorf("partition end doesn't have expected number of columns: %v", pp.end)
   370  		}
   371  	}
   372  	return nil
   373  }
   374  
   375  type index struct {
   376  	name       string
   377  	zone       *zone
   378  	partitions partitioning
   379  }
   380  
   381  func (idx index) toIndexDescriptor(id int) sqlbase.IndexDescriptor {
   382  	var idxDesc sqlbase.IndexDescriptor
   383  	idxDesc.ID = sqlbase.IndexID(id)
   384  	idxDesc.Name = idx.name
   385  	if len(idx.partitions) > 0 {
   386  		neededCols := idx.partitions.numCols()
   387  		for i := 0; i < neededCols; i++ {
   388  			idxDesc.ColumnIDs = append(idxDesc.ColumnIDs, sqlbase.ColumnID(i))
   389  			idxDesc.ColumnNames = append(idxDesc.ColumnNames, fmt.Sprintf("col%d", i))
   390  			idxDesc.ColumnDirections = append(idxDesc.ColumnDirections, sqlbase.IndexDescriptor_ASC)
   391  		}
   392  		idxDesc.Partitioning.NumColumns = uint32(len(idx.partitions[0].start))
   393  		for _, p := range idx.partitions {
   394  			idxDesc.Partitioning.Range = append(idxDesc.Partitioning.Range, p.toPartitionDescriptor())
   395  		}
   396  	}
   397  	return idxDesc
   398  }
   399  
   400  type table struct {
   401  	name       string
   402  	zone       *zone
   403  	indexes    []index
   404  	partitions partitioning
   405  }
   406  
   407  func (t table) validate() error {
   408  	if len(t.indexes) == 0 || t.indexes[0].name != "PK" {
   409  		return errors.Errorf("missing PK index from table: %q", t.name)
   410  	}
   411  	return t.partitions.validate()
   412  }
   413  
   414  func (t *table) addPKIdx() {
   415  	if len(t.indexes) > 0 && t.indexes[0].name == "PK" {
   416  		return
   417  	}
   418  	// Add the PK index if missing.
   419  	pkIdx := index{
   420  		name:       "PK",
   421  		zone:       nil,
   422  		partitions: t.partitions,
   423  	}
   424  	t.indexes = append([]index{pkIdx}, t.indexes...)
   425  }
   426  
   427  type database struct {
   428  	name   string
   429  	tables []table
   430  	zone   *zone
   431  }
   432  
   433  // constraintEntry represents an expected entry in the constraints conformance
   434  // report.
   435  type constraintEntry struct {
   436  	// object is the name of the table/index/partition that this entry refers to.
   437  	// The format is "<database>" or "<table>[.<index>[.<partition>]]". A
   438  	// partition on the primary key is "<table>.<partition>".
   439  	object         string
   440  	constraint     string
   441  	constraintType ConstraintType
   442  	numRanges      int
   443  }
   444  
   445  // toReportEntry transforms the entry into the key/value format of the generated
   446  // report.
   447  func (c constraintEntry) toReportEntry(
   448  	objects map[string]ZoneKey,
   449  ) (ConstraintStatusKey, ConstraintStatus, error) {
   450  	zk, ok := objects[c.object]
   451  	if !ok {
   452  		return ConstraintStatusKey{}, ConstraintStatus{},
   453  			errors.AssertionFailedf("missing object: %s", c.object)
   454  	}
   455  	k := ConstraintStatusKey{
   456  		ZoneKey:       zk,
   457  		ViolationType: c.constraintType,
   458  		Constraint:    ConstraintRepr(c.constraint),
   459  	}
   460  	v := ConstraintStatus{FailRangeCount: c.numRanges}
   461  	return k, v, nil
   462  }
   463  
   464  type split struct {
   465  	key    string
   466  	stores []int
   467  }
   468  
   469  type store struct {
   470  	id    int
   471  	attrs string // comma separated
   472  }
   473  
   474  // toStoreDescriptor transforms the store into a StoreDescriptor.
   475  //
   476  // nodeDesc is the descriptor of the parent node.
   477  func (s store) toStoreDesc(nodeDesc roachpb.NodeDescriptor) roachpb.StoreDescriptor {
   478  	desc := roachpb.StoreDescriptor{
   479  		StoreID: roachpb.StoreID(s.id),
   480  		Node:    nodeDesc,
   481  	}
   482  	desc.Attrs.Attrs = append(desc.Attrs.Attrs, strings.Split(s.attrs, ",")...)
   483  	return desc
   484  }
   485  
   486  type node struct {
   487  	id int
   488  	// locality is the node's locality, in the same format that the --locality
   489  	// flag. "" for not setting a locality.
   490  	locality string
   491  	stores   []store
   492  	// dead, if set, indicates that the node is to be considered dead for the
   493  	// purposes of reports generation. In production, this corresponds to a node
   494  	// with an expired liveness record.
   495  	dead bool
   496  }
   497  
   498  func (n node) toDescriptors() (roachpb.NodeDescriptor, []roachpb.StoreDescriptor, error) {
   499  	nodeDesc := roachpb.NodeDescriptor{
   500  		NodeID: roachpb.NodeID(n.id),
   501  	}
   502  	if n.locality != "" {
   503  		if err := nodeDesc.Locality.Set(n.locality); err != nil {
   504  			return roachpb.NodeDescriptor{}, nil, err
   505  		}
   506  	}
   507  	storeDescs := make([]roachpb.StoreDescriptor, len(n.stores))
   508  	for i, s := range n.stores {
   509  		storeDescs[i] = s.toStoreDesc(nodeDesc)
   510  	}
   511  	return nodeDesc, storeDescs, nil
   512  }
   513  
   514  type conformanceConstraintTestCase struct {
   515  	baseReportTestCase
   516  	name string
   517  	exp  []constraintEntry
   518  }
   519  
   520  type baseReportTestCase struct {
   521  	schema      []database
   522  	splits      []split
   523  	nodes       []node
   524  	defaultZone zone
   525  }
   526  
   527  type row struct {
   528  	ConstraintStatusKey
   529  	ConstraintStatus
   530  }
   531  
   532  func (r row) String() string {
   533  	return fmt.Sprintf("%s failed:%d", r.ConstraintStatusKey, r.ConstraintStatus.FailRangeCount)
   534  }
   535  
   536  type rows []row
   537  
   538  func (r rows) String() string {
   539  	var sb strings.Builder
   540  	for _, rr := range r {
   541  		sb.WriteString(rr.String())
   542  		sb.WriteRune('\n')
   543  	}
   544  	return sb.String()
   545  }
   546  
   547  func TestConstraintReport(t *testing.T) {
   548  	defer leaktest.AfterTest(t)()
   549  
   550  	ctx := context.Background()
   551  	st := cluster.MakeTestingClusterSettings()
   552  	// This test uses the cluster as a recipient for a report saved from outside
   553  	// the cluster. We disable the cluster's own production of reports so that it
   554  	// doesn't interfere with the test.
   555  	ReporterInterval.Override(&st.SV, 0)
   556  	s, _, db := serverutils.StartServer(t, base.TestServerArgs{Settings: st})
   557  	con := s.InternalExecutor().(sqlutil.InternalExecutor)
   558  	defer s.Stopper().Stop(ctx)
   559  
   560  	// Verify that tables are empty.
   561  	require.ElementsMatch(t, TableData(ctx, "system.replication_constraint_stats", con), [][]string{})
   562  	require.ElementsMatch(t, TableData(ctx, "system.reports_meta", con), [][]string{})
   563  
   564  	// Add several replication constraint statuses.
   565  	report := make(ConstraintReport)
   566  	report.ensureEntry(MakeZoneKey(1, 3), "constraint", "+country=CH")
   567  	report.AddViolation(MakeZoneKey(2, 3), "constraint", "+country=CH")
   568  	report.ensureEntry(MakeZoneKey(2, 3), "constraint", "+country=CH")
   569  	report.AddViolation(MakeZoneKey(5, 6), "constraint", "+ssd")
   570  	report.AddViolation(MakeZoneKey(5, 6), "constraint", "+ssd")
   571  	report.AddViolation(MakeZoneKey(7, 8), "constraint", "+dc=west")
   572  	report.ensureEntry(MakeZoneKey(7, 8), "constraint", "+dc=east")
   573  	report.ensureEntry(MakeZoneKey(7, 8), "constraint", "+dc=east")
   574  	report.AddViolation(MakeZoneKey(8, 9), "constraint", "+dc=west")
   575  	report.ensureEntry(MakeZoneKey(8, 9), "constraint", "+dc=east")
   576  
   577  	time1 := time.Date(2001, 1, 1, 10, 0, 0, 0, time.UTC)
   578  	r := makeReplicationConstraintStatusReportSaver()
   579  	require.NoError(t, r.Save(ctx, report, time1, db, con))
   580  	report = make(ConstraintReport)
   581  
   582  	require.ElementsMatch(t, TableData(ctx, "system.replication_constraint_stats", con), [][]string{
   583  		{"1", "3", "'constraint'", "'+country=CH'", "1", "NULL", "0"},
   584  		{"2", "3", "'constraint'", "'+country=CH'", "1", "'2001-01-01 10:00:00+00:00'", "1"},
   585  		{"5", "6", "'constraint'", "'+ssd'", "1", "'2001-01-01 10:00:00+00:00'", "2"},
   586  		{"7", "8", "'constraint'", "'+dc=west'", "1", "'2001-01-01 10:00:00+00:00'", "1"},
   587  		{"7", "8", "'constraint'", "'+dc=east'", "1", "NULL", "0"},
   588  		{"8", "9", "'constraint'", "'+dc=west'", "1", "'2001-01-01 10:00:00+00:00'", "1"},
   589  		{"8", "9", "'constraint'", "'+dc=east'", "1", "NULL", "0"},
   590  	})
   591  	require.ElementsMatch(t, TableData(ctx, "system.reports_meta", con), [][]string{
   592  		{"1", "'2001-01-01 10:00:00+00:00'"},
   593  	})
   594  	require.Equal(t, 7, r.LastUpdatedRowCount())
   595  
   596  	// Add new set of replication constraint statuses to the existing report and verify the old ones are deleted.
   597  	report.AddViolation(MakeZoneKey(1, 3), "constraint", "+country=CH")
   598  	report.ensureEntry(MakeZoneKey(5, 6), "constraint", "+ssd")
   599  	report.AddViolation(MakeZoneKey(6, 8), "constraint", "+dc=east")
   600  	report.ensureEntry(MakeZoneKey(6, 8), "constraint", "+dc=west")
   601  	report.AddViolation(MakeZoneKey(7, 8), "constraint", "+dc=west")
   602  	report.AddViolation(MakeZoneKey(7, 8), "constraint", "+dc=west")
   603  	report.AddViolation(MakeZoneKey(8, 9), "constraint", "+dc=west")
   604  	report.ensureEntry(MakeZoneKey(8, 9), "constraint", "+dc=east")
   605  
   606  	time2 := time.Date(2001, 1, 1, 11, 0, 0, 0, time.UTC)
   607  	require.NoError(t, r.Save(ctx, report, time2, db, con))
   608  	report = make(ConstraintReport)
   609  
   610  	require.ElementsMatch(t, TableData(ctx, "system.replication_constraint_stats", con), [][]string{
   611  		// Wasn't violated before - is violated now.
   612  		{"1", "3", "'constraint'", "'+country=CH'", "1", "'2001-01-01 11:00:00+00:00'", "1"},
   613  		// Was violated before - isn't violated now.
   614  		{"5", "6", "'constraint'", "'+ssd'", "1", "NULL", "0"},
   615  		// Didn't exist before - new for this run and violated.
   616  		{"6", "8", "'constraint'", "'+dc=east'", "1", "'2001-01-01 11:00:00+00:00'", "1"},
   617  		// Didn't exist before - new for this run and not violated.
   618  		{"6", "8", "'constraint'", "'+dc=west'", "1", "NULL", "0"},
   619  		// Was violated before - and it still is but the range count changed.
   620  		{"7", "8", "'constraint'", "'+dc=west'", "1", "'2001-01-01 10:00:00+00:00'", "2"},
   621  		// Was violated before - and it still is but the range count didn't change.
   622  		{"8", "9", "'constraint'", "'+dc=west'", "1", "'2001-01-01 10:00:00+00:00'", "1"},
   623  		// Wasn't violated before - and is still not violated.
   624  		{"8", "9", "'constraint'", "'+dc=east'", "1", "NULL", "0"},
   625  	})
   626  	require.ElementsMatch(t, TableData(ctx, "system.reports_meta", con), [][]string{
   627  		{"1", "'2001-01-01 11:00:00+00:00'"},
   628  	})
   629  	require.Equal(t, 7, r.LastUpdatedRowCount())
   630  
   631  	time3 := time.Date(2001, 1, 1, 11, 30, 0, 0, time.UTC)
   632  	// If some other server takes over and does an update.
   633  	rows, err := con.Exec(ctx, "another-updater", nil, "update system.reports_meta set generated=$1 where id=1", time3)
   634  	require.NoError(t, err)
   635  	require.Equal(t, 1, rows)
   636  	rows, err = con.Exec(ctx, "another-updater", nil, "update system.replication_constraint_stats "+
   637  		"set violation_start=null, violating_ranges=0 where zone_id=1 and subzone_id=3")
   638  	require.NoError(t, err)
   639  	require.Equal(t, 1, rows)
   640  	rows, err = con.Exec(ctx, "another-updater", nil, "update system.replication_constraint_stats "+
   641  		"set violation_start=$1, violating_ranges=5 where zone_id=5 and subzone_id=6", time3)
   642  	require.NoError(t, err)
   643  	require.Equal(t, 1, rows)
   644  	rows, err = con.Exec(ctx, "another-updater", nil, "delete from system.replication_constraint_stats "+
   645  		"where zone_id=7 and subzone_id=8")
   646  	require.NoError(t, err)
   647  	require.Equal(t, 1, rows)
   648  
   649  	// Add new set of replication constraint statuses to the existing report and verify the everything is good.
   650  	report.AddViolation(MakeZoneKey(1, 3), "constraint", "+country=CH")
   651  	report.ensureEntry(MakeZoneKey(5, 6), "constraint", "+ssd")
   652  	report.AddViolation(MakeZoneKey(6, 8), "constraint", "+dc=east")
   653  	report.ensureEntry(MakeZoneKey(6, 8), "constraint", "+dc=west")
   654  	report.AddViolation(MakeZoneKey(7, 8), "constraint", "+dc=west")
   655  	report.AddViolation(MakeZoneKey(7, 8), "constraint", "+dc=west")
   656  	report.AddViolation(MakeZoneKey(8, 9), "constraint", "+dc=west")
   657  	report.ensureEntry(MakeZoneKey(8, 9), "constraint", "+dc=east")
   658  
   659  	time4 := time.Date(2001, 1, 1, 12, 0, 0, 0, time.UTC)
   660  	require.NoError(t, r.Save(ctx, report, time4, db, con))
   661  	report = make(ConstraintReport)
   662  
   663  	require.ElementsMatch(t, TableData(ctx, "system.replication_constraint_stats", con), [][]string{
   664  		{"1", "3", "'constraint'", "'+country=CH'", "1", "'2001-01-01 12:00:00+00:00'", "1"},
   665  		{"5", "6", "'constraint'", "'+ssd'", "1", "NULL", "0"},
   666  		{"6", "8", "'constraint'", "'+dc=east'", "1", "'2001-01-01 11:00:00+00:00'", "1"},
   667  		{"6", "8", "'constraint'", "'+dc=west'", "1", "NULL", "0"},
   668  		{"7", "8", "'constraint'", "'+dc=west'", "1", "'2001-01-01 12:00:00+00:00'", "2"},
   669  		{"8", "9", "'constraint'", "'+dc=west'", "1", "'2001-01-01 10:00:00+00:00'", "1"},
   670  		{"8", "9", "'constraint'", "'+dc=east'", "1", "NULL", "0"},
   671  	})
   672  	require.ElementsMatch(t, TableData(ctx, "system.reports_meta", con), [][]string{
   673  		{"1", "'2001-01-01 12:00:00+00:00'"},
   674  	})
   675  	require.Equal(t, 3, r.LastUpdatedRowCount())
   676  
   677  	// A brand new report (after restart for example) - still works.
   678  	// Add several replication constraint statuses.
   679  	r = makeReplicationConstraintStatusReportSaver()
   680  	report.AddViolation(MakeZoneKey(1, 3), "constraint", "+country=CH")
   681  
   682  	time5 := time.Date(2001, 1, 1, 12, 30, 0, 0, time.UTC)
   683  	require.NoError(t, r.Save(ctx, report, time5, db, con))
   684  
   685  	require.ElementsMatch(t, TableData(ctx, "system.replication_constraint_stats", con), [][]string{
   686  		{"1", "3", "'constraint'", "'+country=CH'", "1", "'2001-01-01 12:00:00+00:00'", "1"},
   687  	})
   688  	require.ElementsMatch(t, TableData(ctx, "system.reports_meta", con), [][]string{
   689  		{"1", "'2001-01-01 12:30:00+00:00'"},
   690  	})
   691  	require.Equal(t, 6, r.LastUpdatedRowCount())
   692  }
   693  
   694  type testRangeIter struct {
   695  	ranges []roachpb.RangeDescriptor
   696  }
   697  
   698  var _ RangeIterator = &testRangeIter{}
   699  
   700  // Next is part of the RangeIterator interface.
   701  func (t *testRangeIter) Next(ctx context.Context) (roachpb.RangeDescriptor, error) {
   702  	if len(t.ranges) == 0 {
   703  		return roachpb.RangeDescriptor{}, nil
   704  	}
   705  	first := t.ranges[0]
   706  	t.ranges = t.ranges[1:]
   707  	return first, nil
   708  }
   709  
   710  // Close is part of the RangeIterator interface.
   711  func (t *testRangeIter) Close(context.Context) {
   712  	t.ranges = nil
   713  }
   714  
   715  // compiledTestCase represents the result of calling compileTestCase(). It
   716  // contains everything needed to generate replication reports for a test case.
   717  type compiledTestCase struct {
   718  	// A collection of ranges represented as an implementation of RangeStore.
   719  	iter testRangeIter
   720  	// The SystemConfig populated with descriptors and zone configs.
   721  	cfg *config.SystemConfig
   722  	// A collection of stores represented as an implementation of StoreResolver.
   723  	resolver StoreResolver
   724  	// A function that dictates whether a node is to be considered live or dead.
   725  	checker nodeChecker
   726  
   727  	// A map from "object names" to ZoneKeys; each table/index/partition with a
   728  	// zone is mapped to the id that the report will use for it. See
   729  	// constraintEntry.object for the key format.
   730  	objectToZone map[string]ZoneKey
   731  	// The inverse of objectToZone.
   732  	zoneToObject map[ZoneKey]string
   733  }
   734  
   735  // compileTestCase takes the input schema and turns it into a compiledTestCase.
   736  func compileTestCase(tc baseReportTestCase) (compiledTestCase, error) {
   737  	tableToID := make(map[string]int)
   738  	idxToID := make(map[string]int)
   739  	// Databases and tables share the id space, so we'll use a common counter for them.
   740  	// And we're going to use keys in user space, otherwise there's special cases
   741  	// in the zone config lookup that we bump into.
   742  	objectCounter := keys.MinUserDescID
   743  	sysCfgBuilder := makeSystemConfigBuilder()
   744  	if err := sysCfgBuilder.setDefaultZoneConfig(tc.defaultZone.toZoneConfig()); err != nil {
   745  		return compiledTestCase{}, err
   746  	}
   747  	// Assign ids to databases, table, indexes; create descriptors and populate
   748  	// the SystemConfig.
   749  	for _, db := range tc.schema {
   750  		dbID := objectCounter
   751  		objectCounter++
   752  		if db.zone != nil {
   753  			if err := sysCfgBuilder.addDatabaseZone(db.name, dbID, db.zone.toZoneConfig()); err != nil {
   754  				return compiledTestCase{}, err
   755  			}
   756  		}
   757  		sysCfgBuilder.addDBDesc(dbID, sqlbase.DatabaseDescriptor{
   758  			Name: db.name,
   759  			ID:   sqlbase.ID(dbID),
   760  		})
   761  
   762  		for _, table := range db.tables {
   763  			tableID := objectCounter
   764  			objectCounter++
   765  			tableToID[table.name] = tableID
   766  
   767  			// Create a table descriptor to be used for creating the zone config.
   768  			table.addPKIdx()
   769  			tableDesc, err := makeTableDesc(table, tableID, dbID)
   770  			if err != nil {
   771  				return compiledTestCase{}, errors.Wrap(err, "error creating table descriptor")
   772  			}
   773  			sysCfgBuilder.addTableDesc(tableID, tableDesc)
   774  
   775  			tableZone, err := generateTableZone(table, tableDesc)
   776  			if err != nil {
   777  				return compiledTestCase{}, err
   778  			}
   779  			if tableZone != nil {
   780  				if err := sysCfgBuilder.addTableZone(tableDesc, *tableZone); err != nil {
   781  					return compiledTestCase{}, err
   782  				}
   783  			}
   784  			// Add the indexes to idxToID.
   785  			for i, idx := range table.indexes {
   786  				idxID := i + 1 // index 1 is the PK
   787  				idxToID[fmt.Sprintf("%s.%s", table.name, idx.name)] = idxID
   788  			}
   789  		}
   790  	}
   791  
   792  	keyScanner := keysutils.MakePrettyScannerForNamedTables(tableToID, idxToID)
   793  	ranges, err := processSplits(keyScanner, tc.splits)
   794  	if err != nil {
   795  		return compiledTestCase{}, err
   796  	}
   797  
   798  	var storeDescs []roachpb.StoreDescriptor
   799  	for _, n := range tc.nodes {
   800  		_ /* nodeDesc */, sds, err := n.toDescriptors()
   801  		if err != nil {
   802  			return compiledTestCase{}, err
   803  		}
   804  		storeDescs = append(storeDescs, sds...)
   805  	}
   806  	storeResolver := func(r *roachpb.RangeDescriptor) []roachpb.StoreDescriptor {
   807  		stores := make([]roachpb.StoreDescriptor, len(r.Replicas().Voters()))
   808  		for i, rep := range r.Replicas().Voters() {
   809  			for _, desc := range storeDescs {
   810  				if rep.StoreID == desc.StoreID {
   811  					stores[i] = desc
   812  					break
   813  				}
   814  			}
   815  		}
   816  		return stores
   817  	}
   818  	nodeChecker := func(nodeID roachpb.NodeID) bool {
   819  		for _, n := range tc.nodes {
   820  			if n.id != int(nodeID) {
   821  				continue
   822  			}
   823  			return !n.dead
   824  		}
   825  		panic(fmt.Sprintf("node %d not found", nodeID))
   826  	}
   827  	cfg, zoneToObject := sysCfgBuilder.build()
   828  	objectToZone := make(map[string]ZoneKey)
   829  	for k, v := range zoneToObject {
   830  		objectToZone[v] = k
   831  	}
   832  	return compiledTestCase{
   833  		iter:         testRangeIter{ranges: ranges},
   834  		cfg:          cfg,
   835  		resolver:     storeResolver,
   836  		checker:      nodeChecker,
   837  		zoneToObject: zoneToObject,
   838  		objectToZone: objectToZone,
   839  	}, nil
   840  }
   841  
   842  func generateTableZone(t table, tableDesc sqlbase.TableDescriptor) (*zonepb.ZoneConfig, error) {
   843  	// Create the table's zone config.
   844  	var tableZone *zonepb.ZoneConfig
   845  	if t.zone != nil {
   846  		tableZone = new(zonepb.ZoneConfig)
   847  		*tableZone = t.zone.toZoneConfig()
   848  	}
   849  	// Add subzones for the PK partitions.
   850  	tableZone = addIndexSubzones(t.indexes[0], tableZone, 1 /* id of PK */)
   851  	// Add subzones for all the indexes.
   852  	for i, idx := range t.indexes {
   853  		idxID := i + 1 // index 1 is the PK
   854  		tableZone = addIndexSubzones(idx, tableZone, idxID)
   855  	}
   856  	// Fill in the SubzoneSpans.
   857  	if tableZone != nil {
   858  		var err error
   859  		tableZone.SubzoneSpans, err = sql.GenerateSubzoneSpans(
   860  			nil, uuid.UUID{} /* clusterID */, keys.SystemSQLCodec,
   861  			&tableDesc, tableZone.Subzones, false /* hasNewSubzones */)
   862  		if err != nil {
   863  			return nil, errors.Wrap(err, "error generating subzone spans")
   864  		}
   865  	}
   866  	return tableZone, nil
   867  }
   868  
   869  func processSplits(
   870  	keyScanner keysutil.PrettyScanner, splits []split,
   871  ) ([]roachpb.RangeDescriptor, error) {
   872  	ranges := make([]roachpb.RangeDescriptor, len(splits))
   873  	var lastKey roachpb.Key
   874  	for i, split := range splits {
   875  		prettyKey := splits[i].key
   876  		startKey, err := keyScanner.Scan(split.key)
   877  		if err != nil {
   878  			return nil, errors.Wrapf(err, "failed to parse key: %s", prettyKey)
   879  		}
   880  		if lastKey.Compare(startKey) != -1 {
   881  			return nil, errors.WithStack(errors.Newf(
   882  				"unexpected lower or duplicate key: %s (prev key: %s)", prettyKey, lastKey))
   883  		}
   884  		lastKey = startKey
   885  		var endKey roachpb.Key
   886  		if i < len(splits)-1 {
   887  			prettyKey := splits[i+1].key
   888  			endKey, err = keyScanner.Scan(prettyKey)
   889  			if err != nil {
   890  				return nil, errors.Wrapf(err, "failed to parse key: %s", prettyKey)
   891  			}
   892  		} else {
   893  			endKey = roachpb.KeyMax
   894  		}
   895  
   896  		rd := roachpb.RangeDescriptor{
   897  			RangeID:  roachpb.RangeID(i + 1), // IDs start at 1
   898  			StartKey: keys.MustAddr(startKey),
   899  			EndKey:   keys.MustAddr(endKey),
   900  		}
   901  		for _, storeID := range split.stores {
   902  			rd.AddReplica(roachpb.NodeID(storeID), roachpb.StoreID(storeID), roachpb.VOTER_FULL)
   903  		}
   904  		ranges[i] = rd
   905  	}
   906  	return ranges, nil
   907  }
   908  
   909  func makeTableDesc(t table, tableID int, dbID int) (sqlbase.TableDescriptor, error) {
   910  	if err := t.validate(); err != nil {
   911  		return sqlbase.TableDescriptor{}, err
   912  	}
   913  
   914  	desc := sqlbase.TableDescriptor{
   915  		ID:       sqlbase.ID(tableID),
   916  		Name:     t.name,
   917  		ParentID: sqlbase.ID(dbID),
   918  	}
   919  
   920  	for i, idx := range t.indexes {
   921  		idxID := i + 1
   922  		desc.Indexes = append(desc.Indexes, idx.toIndexDescriptor(idxID))
   923  	}
   924  	numCols := 0
   925  	for _, idx := range desc.Indexes {
   926  		c := len(idx.ColumnIDs)
   927  		if c > numCols {
   928  			numCols = c
   929  		}
   930  	}
   931  	for i := 0; i < numCols; i++ {
   932  		desc.Columns = append(desc.Columns, sqlbase.ColumnDescriptor{
   933  			Name: fmt.Sprintf("col%d", i),
   934  			ID:   sqlbase.ColumnID(i),
   935  			Type: types.Int,
   936  		})
   937  	}
   938  
   939  	return desc, nil
   940  }
   941  
   942  // addIndexSubzones creates subzones for an index and all of its partitions and
   943  // appends them to a parent table zone, returning the amended parent.
   944  // If the index and its partitions have no zones, the parent is returned unchanged (
   945  // and possibly nil).
   946  //
   947  // parent: Can be nil if the parent table doesn't have a zone of its own. In that
   948  //   case, if any subzones are created, a placeholder zone will also be created and returned.
   949  func addIndexSubzones(idx index, parent *zonepb.ZoneConfig, idxID int) *zonepb.ZoneConfig {
   950  	res := parent
   951  
   952  	ensureParent := func() {
   953  		if res != nil {
   954  			return
   955  		}
   956  		// Create a placeholder zone config.
   957  		res = zonepb.NewZoneConfig()
   958  		res.DeleteTableConfig()
   959  	}
   960  
   961  	if idx.zone != nil {
   962  		ensureParent()
   963  		res.SetSubzone(zonepb.Subzone{
   964  			IndexID:       uint32(idxID),
   965  			PartitionName: "",
   966  			Config:        idx.zone.toZoneConfig(),
   967  		})
   968  	}
   969  
   970  	for _, p := range idx.partitions {
   971  		if p.zone == nil {
   972  			continue
   973  		}
   974  		ensureParent()
   975  		res.SetSubzone(zonepb.Subzone{
   976  			IndexID:       uint32(idxID),
   977  			PartitionName: p.name,
   978  			Config:        p.zone.toZoneConfig(),
   979  		})
   980  	}
   981  	return res
   982  }
   983  
   984  // systemConfigBuilder build a system config. Clients will call some setters and then call build().
   985  type systemConfigBuilder struct {
   986  	kv                []roachpb.KeyValue
   987  	defaultZoneConfig *zonepb.ZoneConfig
   988  	zoneToObject      map[ZoneKey]string
   989  }
   990  
   991  func makeSystemConfigBuilder() systemConfigBuilder {
   992  	return systemConfigBuilder{zoneToObject: make(map[ZoneKey]string)}
   993  }
   994  
   995  func (b *systemConfigBuilder) addZoneToObjectMapping(k ZoneKey, object string) error {
   996  	if s, ok := b.zoneToObject[k]; ok {
   997  		return errors.Errorf("zone %s already mapped to object %s", k, s)
   998  	}
   999  	for k, v := range b.zoneToObject {
  1000  		if v == object {
  1001  			return errors.Errorf("object %s already mapped to key %s", object, k)
  1002  		}
  1003  	}
  1004  	b.zoneToObject[k] = object
  1005  	return nil
  1006  }
  1007  
  1008  func (b *systemConfigBuilder) setDefaultZoneConfig(cfg zonepb.ZoneConfig) error {
  1009  	b.defaultZoneConfig = &cfg
  1010  	return b.addZoneInner("default", keys.RootNamespaceID, cfg)
  1011  }
  1012  
  1013  func (b *systemConfigBuilder) addZoneInner(objectName string, id int, cfg zonepb.ZoneConfig) error {
  1014  	k := config.MakeZoneKey(uint32(id))
  1015  	var v roachpb.Value
  1016  	if err := v.SetProto(&cfg); err != nil {
  1017  		panic(err)
  1018  	}
  1019  	b.kv = append(b.kv, roachpb.KeyValue{Key: k, Value: v})
  1020  	return b.addZoneToObjectMapping(MakeZoneKey(uint32(id), NoSubzone), objectName)
  1021  }
  1022  
  1023  func (b *systemConfigBuilder) addDatabaseZone(name string, id int, cfg zonepb.ZoneConfig) error {
  1024  	return b.addZoneInner(name, id, cfg)
  1025  }
  1026  
  1027  func (b *systemConfigBuilder) addTableZone(t sqlbase.TableDescriptor, cfg zonepb.ZoneConfig) error {
  1028  	if err := b.addZoneInner(t.Name, int(t.ID), cfg); err != nil {
  1029  		return err
  1030  	}
  1031  	// Figure out the mapping from all the partition names to zone keys.
  1032  	for i, subzone := range cfg.Subzones {
  1033  		var idx string
  1034  		if subzone.IndexID == 1 {
  1035  			// Index 1 is the PK.
  1036  			idx = t.Name
  1037  		} else {
  1038  			idx = fmt.Sprintf("%s.%s", t.Name, t.Indexes[subzone.IndexID-1].Name)
  1039  		}
  1040  		var object string
  1041  		if subzone.PartitionName == "" {
  1042  			object = idx
  1043  		} else {
  1044  			object = fmt.Sprintf("%s.%s", idx, subzone.PartitionName)
  1045  		}
  1046  		if err := b.addZoneToObjectMapping(
  1047  			MakeZoneKey(uint32(t.ID), base.SubzoneIDFromIndex(i)), object,
  1048  		); err != nil {
  1049  			return err
  1050  		}
  1051  	}
  1052  	return nil
  1053  }
  1054  
  1055  // build constructs a SystemConfig containing all the information accumulated in the builder.
  1056  func (b *systemConfigBuilder) build() (*config.SystemConfig, map[ZoneKey]string) {
  1057  	if b.defaultZoneConfig == nil {
  1058  		panic("default zone config not set")
  1059  	}
  1060  
  1061  	sort.Slice(b.kv, func(i, j int) bool {
  1062  		return bytes.Compare(b.kv[i].Key, b.kv[j].Key) < 0
  1063  	})
  1064  
  1065  	cfg := config.NewSystemConfig(b.defaultZoneConfig)
  1066  	cfg.SystemConfigEntries.Values = b.kv
  1067  	return cfg, b.zoneToObject
  1068  }
  1069  
  1070  // addTableDesc adds a table descriptor to the SystemConfig.
  1071  func (b *systemConfigBuilder) addTableDesc(id int, tableDesc sqlbase.TableDescriptor) {
  1072  	if tableDesc.ParentID == 0 {
  1073  		panic(fmt.Sprintf("parent not set for table %q", tableDesc.Name))
  1074  	}
  1075  	// Write the table to the SystemConfig, in the descriptors table.
  1076  	k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(id))
  1077  	desc := &sqlbase.Descriptor{
  1078  		Union: &sqlbase.Descriptor_Table{
  1079  			Table: &tableDesc,
  1080  		},
  1081  	}
  1082  	// Use a bogus timestamp for the descriptor modification time.
  1083  	ts := hlc.Timestamp{WallTime: 123}
  1084  	desc.Table(ts)
  1085  	var v roachpb.Value
  1086  	if err := v.SetProto(desc); err != nil {
  1087  		panic(err)
  1088  	}
  1089  	b.kv = append(b.kv, roachpb.KeyValue{Key: k, Value: v})
  1090  }
  1091  
  1092  // addTableDesc adds a database descriptor to the SystemConfig.
  1093  func (b *systemConfigBuilder) addDBDesc(id int, dbDesc sqlbase.DatabaseDescriptor) {
  1094  	// Write the table to the SystemConfig, in the descriptors table.
  1095  	k := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(id))
  1096  	desc := &sqlbase.Descriptor{
  1097  		Union: &sqlbase.Descriptor_Database{
  1098  			Database: &dbDesc,
  1099  		},
  1100  	}
  1101  	var v roachpb.Value
  1102  	if err := v.SetProto(desc); err != nil {
  1103  		panic(err)
  1104  	}
  1105  	b.kv = append(b.kv, roachpb.KeyValue{Key: k, Value: v})
  1106  }