github.com/zuoyebang/bitalostable@v1.0.1-0.20240229032404-e3b99a834294/internal/metamorphic/key_manager_test.go (about)

     1  package metamorphic
     2  
     3  import (
     4  	"bytes"
     5  	"testing"
     6  
     7  	"github.com/stretchr/testify/require"
     8  	"github.com/zuoyebang/bitalostable/internal/randvar"
     9  )
    10  
    11  func TestObjKey(t *testing.T) {
    12  	testCases := []struct {
    13  		key  objKey
    14  		want string
    15  	}{
    16  		{
    17  			key:  makeObjKey(makeObjID(dbTag, 0), []byte("foo")),
    18  			want: "db:foo",
    19  		},
    20  		{
    21  			key:  makeObjKey(makeObjID(batchTag, 1), []byte("bar")),
    22  			want: "batch1:bar",
    23  		},
    24  	}
    25  
    26  	for _, tc := range testCases {
    27  		t.Run("", func(t *testing.T) {
    28  			require.Equal(t, tc.want, tc.key.String())
    29  		})
    30  	}
    31  }
    32  
    33  func TestGlobalStateIndicatesEligibleForSingleDelete(t *testing.T) {
    34  	key := makeObjKey(makeObjID(dbTag, 0), []byte("foo"))
    35  	testCases := []struct {
    36  		meta keyMeta
    37  		want bool
    38  	}{
    39  		{
    40  			meta: keyMeta{
    41  				objKey: key,
    42  			},
    43  			want: false,
    44  		},
    45  		{
    46  			meta: keyMeta{
    47  				objKey: key,
    48  				sets:   1,
    49  			},
    50  			want: true,
    51  		},
    52  		{
    53  			meta: keyMeta{
    54  				objKey: key,
    55  				sets:   2,
    56  			},
    57  			want: false,
    58  		},
    59  		{
    60  			meta: keyMeta{
    61  				objKey: key,
    62  				sets:   1,
    63  				merges: 1,
    64  			},
    65  			want: false,
    66  		},
    67  		{
    68  			meta: keyMeta{
    69  				objKey: key,
    70  				sets:   1,
    71  				dels:   1,
    72  			},
    73  			want: false,
    74  		},
    75  		{
    76  			meta: keyMeta{
    77  				objKey:    key,
    78  				sets:      1,
    79  				singleDel: true,
    80  			},
    81  			want: false,
    82  		},
    83  	}
    84  
    85  	for _, tc := range testCases {
    86  		k := newKeyManager()
    87  		t.Run("", func(t *testing.T) {
    88  			k.globalKeysMap[string(key.key)] = &tc.meta
    89  			require.Equal(t, tc.want, k.globalStateIndicatesEligibleForSingleDelete(key.key))
    90  		})
    91  	}
    92  }
    93  
    94  func TestKeyMeta_MergeInto(t *testing.T) {
    95  	testCases := []struct {
    96  		existing keyMeta
    97  		toMerge  keyMeta
    98  		expected keyMeta
    99  	}{
   100  		{
   101  			existing: keyMeta{
   102  				sets:      1,
   103  				merges:    0,
   104  				singleDel: false,
   105  			},
   106  			toMerge: keyMeta{
   107  				sets:      0,
   108  				merges:    0,
   109  				singleDel: true,
   110  			},
   111  			expected: keyMeta{
   112  				sets:      1,
   113  				merges:    0,
   114  				singleDel: true,
   115  				updateOps: []keyUpdate{
   116  					{deleted: true, metaTimestamp: 0},
   117  				},
   118  			},
   119  		},
   120  		{
   121  			existing: keyMeta{
   122  				sets:   3,
   123  				merges: 1,
   124  				dels:   7,
   125  			},
   126  			toMerge: keyMeta{
   127  				sets:   4,
   128  				merges: 2,
   129  				dels:   8,
   130  				del:    true,
   131  			},
   132  			expected: keyMeta{
   133  				sets:   7,
   134  				merges: 3,
   135  				dels:   15,
   136  				del:    true,
   137  				updateOps: []keyUpdate{
   138  					{deleted: true, metaTimestamp: 1},
   139  				},
   140  			},
   141  		},
   142  		{
   143  			existing: keyMeta{
   144  				sets:   3,
   145  				merges: 1,
   146  				dels:   7,
   147  				del:    true,
   148  			},
   149  			toMerge: keyMeta{
   150  				sets:   1,
   151  				merges: 0,
   152  				dels:   8,
   153  				del:    false,
   154  			},
   155  			expected: keyMeta{
   156  				sets:   4,
   157  				merges: 1,
   158  				dels:   15,
   159  				del:    false,
   160  				updateOps: []keyUpdate{
   161  					{deleted: false, metaTimestamp: 2},
   162  				},
   163  			},
   164  		},
   165  	}
   166  
   167  	keyManager := newKeyManager()
   168  	for _, tc := range testCases {
   169  		t.Run("", func(t *testing.T) {
   170  			tc.toMerge.mergeInto(keyManager, &tc.existing)
   171  			require.Equal(t, tc.expected, tc.existing)
   172  		})
   173  	}
   174  }
   175  
   176  func TestKeyManager_AddKey(t *testing.T) {
   177  	m := newKeyManager()
   178  	require.Empty(t, m.globalKeys)
   179  
   180  	k1 := []byte("foo")
   181  	require.True(t, m.addNewKey(k1))
   182  	require.Len(t, m.globalKeys, 1)
   183  	require.Len(t, m.globalKeyPrefixes, 1)
   184  	require.Contains(t, m.globalKeys, k1)
   185  	require.Contains(t, m.globalKeyPrefixes, k1)
   186  	require.False(t, m.addNewKey(k1))
   187  	require.True(t, m.prefixExists([]byte("foo")))
   188  	require.False(t, m.prefixExists([]byte("bar")))
   189  
   190  	k2 := []byte("bar")
   191  	require.True(t, m.addNewKey(k2))
   192  	require.Len(t, m.globalKeys, 2)
   193  	require.Len(t, m.globalKeyPrefixes, 2)
   194  	require.Contains(t, m.globalKeys, k2)
   195  	require.Contains(t, m.globalKeyPrefixes, k2)
   196  	require.True(t, m.prefixExists([]byte("bar")))
   197  	k3 := []byte("bax@4")
   198  	require.True(t, m.addNewKey(k3))
   199  	require.Len(t, m.globalKeys, 3)
   200  	require.Len(t, m.globalKeyPrefixes, 3)
   201  	require.Contains(t, m.globalKeys, k3)
   202  	require.Contains(t, m.globalKeyPrefixes, []byte("bax"))
   203  	require.True(t, m.prefixExists([]byte("bax")))
   204  	k4 := []byte("foo@6")
   205  	require.True(t, m.addNewKey(k4))
   206  	require.Len(t, m.globalKeys, 4)
   207  	require.Len(t, m.globalKeyPrefixes, 3)
   208  	require.Contains(t, m.globalKeys, k4)
   209  	require.True(t, m.prefixExists([]byte("foo")))
   210  
   211  	require.Equal(t, [][]byte{
   212  		[]byte("foo"), []byte("bar"), []byte("bax"),
   213  	}, m.prefixes())
   214  }
   215  
   216  func TestKeyManager_GetOrInit(t *testing.T) {
   217  	id := makeObjID(batchTag, 1)
   218  	key := []byte("foo")
   219  	o := makeObjKey(id, key)
   220  
   221  	m := newKeyManager()
   222  	require.NotContains(t, m.byObjKey, o.String())
   223  	require.NotContains(t, m.byObj, id)
   224  	require.Contains(t, m.byObj, makeObjID(dbTag, 0)) // Always contains the DB key.
   225  
   226  	meta1 := m.getOrInit(id, key)
   227  	require.Contains(t, m.byObjKey, o.String())
   228  	require.Contains(t, m.byObj, id)
   229  
   230  	// Idempotent.
   231  	meta2 := m.getOrInit(id, key)
   232  	require.Equal(t, meta1, meta2)
   233  }
   234  
   235  func TestKeyManager_Contains(t *testing.T) {
   236  	id := makeObjID(dbTag, 0)
   237  	key := []byte("foo")
   238  
   239  	m := newKeyManager()
   240  	require.False(t, m.contains(id, key))
   241  
   242  	m.getOrInit(id, key)
   243  	require.True(t, m.contains(id, key))
   244  }
   245  
   246  func TestKeyManager_MergeInto(t *testing.T) {
   247  	fromID := makeObjID(batchTag, 1)
   248  	toID := makeObjID(dbTag, 0)
   249  
   250  	m := newKeyManager()
   251  
   252  	// Two keys in "from".
   253  	a := m.getOrInit(fromID, []byte("foo"))
   254  	a.sets = 1
   255  	b := m.getOrInit(fromID, []byte("bar"))
   256  	b.merges = 2
   257  
   258  	// One key in "to", with same value as a key in "from", that will be merged.
   259  	m.getOrInit(toID, []byte("foo"))
   260  
   261  	// Before, there are two sets.
   262  	require.Len(t, m.byObj[fromID], 2)
   263  	require.Len(t, m.byObj[toID], 1)
   264  
   265  	m.mergeKeysInto(fromID, toID)
   266  
   267  	// Keys in "from" sets are moved to "to" set.
   268  	require.Len(t, m.byObj[toID], 2)
   269  
   270  	// Key "foo" was merged into "to".
   271  	foo := m.getOrInit(toID, []byte("foo"))
   272  	require.Equal(t, 1, foo.sets) // value was merged.
   273  
   274  	// Key "bar" was merged into "to".
   275  	bar := m.getOrInit(toID, []byte("bar"))
   276  	require.Equal(t, 2, bar.merges) // value was unchanged.
   277  
   278  	// Keys in "from" sets are removed from maps.
   279  	require.NotContains(t, m.byObjKey, makeObjKey(fromID, a.key))
   280  	require.NotContains(t, m.byObjKey, makeObjKey(fromID, b.key))
   281  	require.NotContains(t, m.byObj, fromID)
   282  }
   283  
   284  type seqFn func(t *testing.T, k *keyManager)
   285  
   286  func updateForOp(op op) seqFn {
   287  	return func(t *testing.T, k *keyManager) {
   288  		k.update(op)
   289  	}
   290  }
   291  
   292  func addKey(key []byte, expected bool) seqFn {
   293  	return func(t *testing.T, k *keyManager) {
   294  		require.Equal(t, expected, k.addNewKey(key))
   295  	}
   296  }
   297  
   298  func eligibleRead(key []byte, val bool) seqFn {
   299  	return func(t *testing.T, k *keyManager) {
   300  		require.Equal(t, val, contains(key, k.eligibleReadKeys()))
   301  	}
   302  }
   303  
   304  func eligibleWrite(key []byte, val bool) seqFn {
   305  	return func(t *testing.T, k *keyManager) {
   306  		require.Equal(t, val, contains(key, k.eligibleWriteKeys()))
   307  	}
   308  }
   309  
   310  func eligibleSingleDelete(key []byte, val bool, id objID) seqFn {
   311  	return func(t *testing.T, k *keyManager) {
   312  		require.Equal(t, val, contains(key, k.eligibleSingleDeleteKeys(id)))
   313  	}
   314  }
   315  
   316  func contains(key []byte, keys [][]byte) bool {
   317  	for _, k := range keys {
   318  		if bytes.Equal(key, k) {
   319  			return true
   320  		}
   321  	}
   322  	return false
   323  }
   324  
   325  func TestKeyManager(t *testing.T) {
   326  	var (
   327  		id1  = makeObjID(batchTag, 0)
   328  		id2  = makeObjID(batchTag, 1)
   329  		key1 = []byte("foo")
   330  	)
   331  
   332  	testCases := []struct {
   333  		description string
   334  		ops         []seqFn
   335  		wantPanic   bool
   336  	}{
   337  		{
   338  			description: "set, single del, on db",
   339  			ops: []seqFn{
   340  				addKey(key1, true),
   341  				addKey(key1, false),
   342  				eligibleRead(key1, true),
   343  				eligibleWrite(key1, true),
   344  				eligibleSingleDelete(key1, false, dbObjID),
   345  				eligibleSingleDelete(key1, false, id1),
   346  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   347  				eligibleRead(key1, true),
   348  				eligibleWrite(key1, true),
   349  				eligibleSingleDelete(key1, true, dbObjID),
   350  				eligibleSingleDelete(key1, true, id1),
   351  				updateForOp(&singleDeleteOp{writerID: dbObjID, key: key1}),
   352  				eligibleRead(key1, true),
   353  				eligibleWrite(key1, true),
   354  				eligibleSingleDelete(key1, false, dbObjID),
   355  			},
   356  		},
   357  		{
   358  			description: "set, single del, on batch",
   359  			ops: []seqFn{
   360  				addKey(key1, true),
   361  				updateForOp(&setOp{writerID: id1, key: key1}),
   362  				eligibleRead(key1, true),
   363  				eligibleWrite(key1, true),
   364  				eligibleSingleDelete(key1, false, dbObjID),
   365  				eligibleSingleDelete(key1, true, id1),
   366  				eligibleSingleDelete(key1, false, id2),
   367  				updateForOp(&singleDeleteOp{writerID: id1, key: key1}),
   368  				eligibleRead(key1, true),
   369  				eligibleWrite(key1, false),
   370  				eligibleSingleDelete(key1, false, dbObjID),
   371  				eligibleSingleDelete(key1, false, id1),
   372  				updateForOp(&applyOp{batchID: id1, writerID: dbObjID}),
   373  				eligibleWrite(key1, true),
   374  				eligibleSingleDelete(key1, false, dbObjID),
   375  			},
   376  		},
   377  		{
   378  			description: "set on db, single del on batch",
   379  			ops: []seqFn{
   380  				addKey(key1, true),
   381  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   382  				eligibleWrite(key1, true),
   383  				eligibleSingleDelete(key1, true, dbObjID),
   384  				eligibleSingleDelete(key1, true, id1),
   385  				updateForOp(&singleDeleteOp{writerID: id1, key: key1}),
   386  				eligibleWrite(key1, false),
   387  				eligibleSingleDelete(key1, false, dbObjID),
   388  				eligibleSingleDelete(key1, false, id1),
   389  				updateForOp(&applyOp{batchID: id1, writerID: dbObjID}),
   390  				eligibleWrite(key1, true),
   391  				eligibleSingleDelete(key1, false, dbObjID),
   392  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   393  				eligibleSingleDelete(key1, true, dbObjID),
   394  				eligibleSingleDelete(key1, true, id1),
   395  			},
   396  		},
   397  		{
   398  			description: "set, del, set, single del, on db",
   399  			ops: []seqFn{
   400  				addKey(key1, true),
   401  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   402  				updateForOp(&deleteOp{writerID: dbObjID, key: key1}),
   403  				eligibleWrite(key1, true),
   404  				eligibleSingleDelete(key1, false, dbObjID),
   405  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   406  				eligibleWrite(key1, true),
   407  				eligibleSingleDelete(key1, true, dbObjID),
   408  				updateForOp(&singleDeleteOp{writerID: dbObjID, key: key1}),
   409  				eligibleWrite(key1, true),
   410  				eligibleSingleDelete(key1, false, dbObjID),
   411  			},
   412  		},
   413  		{
   414  			description: "set, del, set, del, on batches",
   415  			ops: []seqFn{
   416  				addKey(key1, true),
   417  				updateForOp(&setOp{writerID: id1, key: key1}),
   418  				updateForOp(&deleteOp{writerID: id1, key: key1}),
   419  				updateForOp(&setOp{writerID: id1, key: key1}),
   420  				eligibleWrite(key1, true),
   421  				eligibleSingleDelete(key1, false, id1),
   422  				updateForOp(&applyOp{batchID: id1, writerID: dbObjID}),
   423  				eligibleWrite(key1, true),
   424  				// Not eligible for single del since the set count is 2.
   425  				eligibleSingleDelete(key1, false, dbObjID),
   426  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   427  				// Not eligible for single del since the set count is 3.
   428  				eligibleSingleDelete(key1, false, dbObjID),
   429  				updateForOp(&deleteOp{writerID: id2, key: key1}),
   430  				updateForOp(&applyOp{batchID: id2, writerID: dbObjID}),
   431  				// Set count is 0.
   432  				eligibleSingleDelete(key1, false, dbObjID),
   433  				// Set count is 1.
   434  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   435  				eligibleSingleDelete(key1, true, dbObjID),
   436  			},
   437  		},
   438  		{
   439  			description: "set, merge, del, set, single del, on db",
   440  			ops: []seqFn{
   441  				addKey(key1, true),
   442  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   443  				eligibleSingleDelete(key1, true, dbObjID),
   444  				updateForOp(&mergeOp{writerID: dbObjID, key: key1}),
   445  				eligibleSingleDelete(key1, false, dbObjID),
   446  				updateForOp(&deleteOp{writerID: dbObjID, key: key1}),
   447  				eligibleWrite(key1, true),
   448  				eligibleSingleDelete(key1, false, dbObjID),
   449  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   450  				eligibleWrite(key1, true),
   451  				eligibleSingleDelete(key1, true, dbObjID),
   452  				updateForOp(&singleDeleteOp{writerID: dbObjID, key: key1}),
   453  				eligibleWrite(key1, true),
   454  				eligibleSingleDelete(key1, false, dbObjID),
   455  			},
   456  		},
   457  		{
   458  			description: "set, del on db, set, single del on batch",
   459  			ops: []seqFn{
   460  				addKey(key1, true),
   461  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   462  				eligibleSingleDelete(key1, true, dbObjID),
   463  				updateForOp(&deleteOp{writerID: dbObjID, key: key1}),
   464  				eligibleWrite(key1, true),
   465  				eligibleSingleDelete(key1, false, dbObjID),
   466  				updateForOp(&setOp{writerID: id1, key: key1}),
   467  				eligibleWrite(key1, true),
   468  				eligibleSingleDelete(key1, false, dbObjID),
   469  				eligibleSingleDelete(key1, true, id1),
   470  				updateForOp(&singleDeleteOp{writerID: id1, key: key1}),
   471  				eligibleWrite(key1, false),
   472  				eligibleSingleDelete(key1, false, id1),
   473  				eligibleSingleDelete(key1, false, dbObjID),
   474  				updateForOp(&applyOp{batchID: id1, writerID: dbObjID}),
   475  				eligibleWrite(key1, true),
   476  				eligibleSingleDelete(key1, false, dbObjID),
   477  				updateForOp(&setOp{writerID: dbObjID, key: key1}),
   478  				eligibleSingleDelete(key1, true, dbObjID),
   479  			},
   480  		},
   481  	}
   482  
   483  	for _, tc := range testCases {
   484  		t.Run(tc.description, func(t *testing.T) {
   485  			m := newKeyManager()
   486  			tFunc := func() {
   487  				for _, op := range tc.ops {
   488  					op(t, m)
   489  				}
   490  			}
   491  			if tc.wantPanic {
   492  				require.Panics(t, tFunc)
   493  			} else {
   494  				tFunc()
   495  			}
   496  		})
   497  	}
   498  }
   499  
   500  func TestOpWrittenKeys(t *testing.T) {
   501  	for name, info := range methods {
   502  		t.Run(name, func(t *testing.T) {
   503  			// Any operations that exist in methods but are not handled in
   504  			// opWrittenKeys will result in a panic, failing the subtest.
   505  			opWrittenKeys(info.constructor())
   506  		})
   507  	}
   508  }
   509  
   510  func TestLoadPrecedingKeys(t *testing.T) {
   511  	rng := randvar.NewRand()
   512  	cfg := defaultConfig()
   513  	km := newKeyManager()
   514  	ops := generate(rng, 1000, cfg, km)
   515  
   516  	cfg2 := defaultConfig()
   517  	km2 := newKeyManager()
   518  	loadPrecedingKeys(t, ops, &cfg2, km2)
   519  
   520  	// NB: We can't assert equality, because the original run may not have
   521  	// ever used the max of the distribution.
   522  	require.Greater(t, cfg2.writeSuffixDist.Max(), uint64(1))
   523  
   524  	// NB: We can't assert equality, because the original run may have generated
   525  	// keys that it didn't end up using in operations.
   526  	require.Subset(t, km.globalKeys, km2.globalKeys)
   527  	require.Subset(t, km.globalKeyPrefixes, km2.globalKeyPrefixes)
   528  }