github.com/letsencrypt/trillian@v1.1.2-0.20180615153820-ae375a99d36a/merkle/sparse_merkle_tree_test.go (about) 1 // Copyright 2016 Google Inc. All Rights Reserved. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package merkle 16 17 import ( 18 "bytes" 19 "context" 20 "crypto/rand" 21 "encoding/base64" 22 "errors" 23 "flag" 24 "fmt" 25 "os" 26 "runtime/pprof" 27 "strings" 28 "sync" 29 "testing" 30 31 "github.com/golang/mock/gomock" 32 "github.com/google/trillian/merkle/maphasher" 33 "github.com/google/trillian/storage" 34 "github.com/google/trillian/testonly" 35 ) 36 37 var ( 38 cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") 39 memprofile = flag.String("memprofile", "", "write mem profile to file") 40 ) 41 42 // These nodes were generated randomly and reviewed to ensure node IDs do not collide with 43 // those fetched during the test. 44 var inclusionProofIncorrectTestNodes = []storage.Node{ 45 {NodeID: storage.NodeID{Path: []uint8{0x2c, 0x8b, 0xcf, 0xe1, 0xc5, 0x71, 0xf4, 0x2d, 0xc2, 0xe9, 0x22, 0x7d, 0x91, 0xd5, 0x93, 0x70, 0x8f, 0x8c, 0x40, 0xca, 0xf, 0xd3, 0xd8, 0x4b, 0x43, 0x6a, 0x3, 0x2f, 0xf1, 0x4, 0x7, 0x9b}, PrefixLenBits: 174}, Hash: []uint8{0x4, 0x7b, 0xe5, 0xab, 0x12, 0x2d, 0x44, 0x98, 0xd8, 0xcc, 0xc7, 0x27, 0x4d, 0xc5, 0xda, 0x59, 0x38, 0xf5, 0x4d, 0x9c, 0x98, 0x33, 0x2a, 0x95, 0xb1, 0x20, 0xe2, 0x8c, 0x7, 0x5f, 0xb5, 0x9a}, NodeRevision: 34}, 46 {NodeID: storage.NodeID{Path: []uint8{0x7c, 0xf5, 0x65, 0xc6, 0xd5, 0xbe, 0x2d, 0x39, 0xff, 0xf4, 0x58, 0xc2, 0x9f, 0x4f, 0x9, 0x3c, 0x54, 0x62, 0xf5, 0x35, 0x19, 0x87, 0x56, 0xb5, 0x4c, 0x6c, 0x11, 0xf3, 0xd7, 0x2, 0xc, 0x80}, PrefixLenBits: 234}, Hash: []uint8{0xbc, 0x33, 0xbe, 0x74, 0x79, 0x43, 0x59, 0x83, 0x5d, 0x93, 0x87, 0x13, 0x22, 0x98, 0xa0, 0x69, 0xed, 0xa5, 0xca, 0xfb, 0x7c, 0x16, 0x91, 0x51, 0xa2, 0xb, 0x9f, 0x17, 0xe4, 0x3f, 0xe3, 0x3}, NodeRevision: 34}, 47 {NodeID: storage.NodeID{Path: []uint8{0x5f, 0xc6, 0x73, 0x1c, 0x5d, 0x57, 0x23, 0xdc, 0x6a, 0xd, 0x38, 0xcb, 0x41, 0x25, 0x97, 0x2, 0x63, 0x8d, 0xa, 0x2d, 0xbe, 0x8e, 0x88, 0xff, 0x9e, 0x54, 0x5b, 0xb4, 0x5d, 0x4e, 0x6e, 0x5b}, PrefixLenBits: 223}, Hash: []uint8{0xb6, 0xd4, 0xbd, 0x76, 0x5e, 0x9b, 0x80, 0x2f, 0x71, 0x32, 0x5e, 0xf8, 0x41, 0xea, 0x47, 0xc7, 0x4, 0x7d, 0xd, 0x64, 0xa8, 0xf6, 0x22, 0xe4, 0xb4, 0xe1, 0xef, 0x2f, 0x67, 0xf8, 0x8b, 0xaa}, NodeRevision: 34}, 48 {NodeID: storage.NodeID{Path: []uint8{0x30, 0xe, 0x65, 0x75, 0x4d, 0xd9, 0x7a, 0x1, 0xc5, 0x2b, 0x2a, 0x6f, 0x4b, 0x59, 0x5d, 0xa8, 0xeb, 0x65, 0x25, 0x3a, 0xc5, 0xf7, 0xd2, 0x4b, 0xcc, 0x54, 0xbf, 0xe8, 0x6e, 0xe8, 0x96, 0xb7}, PrefixLenBits: 156}, Hash: []uint8{0x74, 0x93, 0x28, 0x98, 0xbc, 0xd0, 0xfd, 0x28, 0xa9, 0x39, 0xb5, 0xb5, 0xe9, 0xcc, 0x17, 0xe0, 0xe2, 0xd, 0x16, 0x14, 0xfd, 0xb1, 0x67, 0x19, 0x31, 0x3, 0x73, 0x35, 0xb4, 0x1d, 0x6d, 0x1d}, NodeRevision: 34}, 49 {NodeID: storage.NodeID{Path: []uint8{0x8e, 0x3b, 0x81, 0xe4, 0x2f, 0xe6, 0xd6, 0x52, 0x9b, 0xbd, 0x36, 0xc5, 0x3, 0x52, 0xe9, 0x60, 0xbb, 0xcb, 0xc9, 0xbd, 0x57, 0x96, 0xaf, 0x18, 0xd4, 0x94, 0xdd, 0x8, 0xa2, 0x43, 0x1e, 0x10}, PrefixLenBits: 157}, Hash: []uint8{0xe0, 0xb6, 0xea, 0x8a, 0xf1, 0x57, 0x1e, 0x5c, 0xbe, 0xbe, 0xd9, 0x5b, 0x29, 0x5f, 0x3, 0x7c, 0x32, 0x33, 0x77, 0xf7, 0x1c, 0x9e, 0x19, 0x4d, 0xc6, 0xdb, 0x5, 0xf7, 0x3e, 0x6c, 0xcb, 0x85}, NodeRevision: 34}, 50 } 51 52 func maybeProfileCPU(t *testing.T) func() { 53 if *cpuprofile != "" { 54 f, err := os.Create(*cpuprofile) 55 if err != nil { 56 t.Fatal(err) 57 } 58 pprof.StartCPUProfile(f) 59 return pprof.StopCPUProfile 60 } 61 return func() {} 62 } 63 64 func maybeProfileMemory(t *testing.T) { 65 if *memprofile != "" { 66 f, err := os.Create(*memprofile) 67 if err != nil { 68 t.Fatal(err) 69 } 70 pprof.WriteHeapProfile(f) 71 f.Close() 72 } 73 } 74 75 func getSparseMerkleTreeReaderWithMockTX(ctrl *gomock.Controller, rev int64) (*SparseMerkleTreeReader, *storage.MockMapTreeTX) { 76 tx := storage.NewMockMapTreeTX(ctrl) 77 return NewSparseMerkleTreeReader(rev, maphasher.Default, tx), tx 78 } 79 80 func runOnProducer(tx storage.MapTreeTX) func(context.Context, func(context.Context, storage.MapTreeTX) error) error { 81 defer tx.Close() 82 83 return func(ctx context.Context, f func(context.Context, storage.MapTreeTX) error) error { 84 return f(ctx, tx) 85 } 86 } 87 88 func getSparseMerkleTreeWriterWithMockTX(ctx context.Context, ctrl *gomock.Controller, treeID, rev int64) (*SparseMerkleTreeWriter, *storage.MockMapTreeTX) { 89 tx := storage.NewMockMapTreeTX(ctrl) 90 tx.EXPECT().WriteRevision().AnyTimes().Return(rev) 91 tx.EXPECT().Close().MinTimes(1) 92 tree, err := NewSparseMerkleTreeWriter(ctx, treeID, rev, maphasher.Default, runOnProducer(tx)) 93 if err != nil { 94 panic(err) 95 } 96 return tree, tx 97 } 98 99 type rootNodeMatcher struct{} 100 101 func (r rootNodeMatcher) Matches(x interface{}) bool { 102 nodes, ok := x.([]storage.NodeID) 103 if !ok { 104 return false 105 } 106 return len(nodes) == 1 && 107 nodes[0].PrefixLenBits == 0 108 } 109 110 func (r rootNodeMatcher) String() string { 111 return "is a single root node" 112 } 113 114 func randomBytes(t *testing.T, n int) []byte { 115 r := make([]byte, n) 116 g, err := rand.Read(r) 117 if g != n || err != nil { 118 t.Fatalf("Failed to read %d bytes of entropy for path, read %d and got error: %v", n, g, err) 119 } 120 return r 121 } 122 123 func getRandomRootNode(t *testing.T, rev int64) storage.Node { 124 return storage.Node{ 125 NodeID: storage.NewEmptyNodeID(0), 126 Hash: randomBytes(t, 32), 127 NodeRevision: rev, 128 } 129 } 130 131 func getRandomNonRootNode(t *testing.T, rev int64) storage.Node { 132 nodeID := storage.NewNodeIDFromHash(randomBytes(t, 32)) 133 // Make sure it's not a root node. 134 nodeID.PrefixLenBits = int(1 + randomBytes(t, 1)[0]%254) 135 return storage.Node{ 136 NodeID: nodeID, 137 Hash: randomBytes(t, 32), 138 NodeRevision: rev, 139 } 140 } 141 142 func TestRootAtRevision(t *testing.T) { 143 ctx := context.Background() 144 145 mockCtrl := gomock.NewController(t) 146 defer mockCtrl.Finish() 147 148 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, 100) 149 node := getRandomRootNode(t, 14) 150 tx.EXPECT().Commit().AnyTimes().Return(nil) 151 tx.EXPECT().GetMerkleNodes(ctx, int64(23), rootNodeMatcher{}).Return([]storage.Node{node}, nil) 152 root, err := r.RootAtRevision(ctx, 23) 153 if err != nil { 154 t.Fatalf("Failed when calling RootAtRevision(23): %v", err) 155 } 156 if expected, got := root, node.Hash; !bytes.Equal(expected, got) { 157 t.Fatalf("Expected root %v, got %v", expected, got) 158 } 159 } 160 161 func TestRootAtUnknownRevision(t *testing.T) { 162 ctx := context.Background() 163 164 mockCtrl := gomock.NewController(t) 165 defer mockCtrl.Finish() 166 167 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, 100) 168 tx.EXPECT().Commit().AnyTimes().Return(nil) 169 tx.EXPECT().GetMerkleNodes(ctx, int64(23), rootNodeMatcher{}).Return([]storage.Node{}, nil) 170 _, err := r.RootAtRevision(ctx, 23) 171 if err != ErrNoSuchRevision { 172 t.Fatalf("Attempt to retrieve root an non-existent revision did not result in ErrNoSuchRevision: %v", err) 173 } 174 } 175 176 func TestRootAtRevisionHasMultipleRoots(t *testing.T) { 177 ctx := context.Background() 178 179 mockCtrl := gomock.NewController(t) 180 defer mockCtrl.Finish() 181 182 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, 100) 183 n1, n2 := getRandomRootNode(t, 14), getRandomRootNode(t, 15) 184 tx.EXPECT().Commit().AnyTimes().Return(nil) 185 tx.EXPECT().GetMerkleNodes(ctx, int64(23), rootNodeMatcher{}).Return([]storage.Node{n1, n2}, nil) 186 _, err := r.RootAtRevision(ctx, 23) 187 if err == nil || err == ErrNoSuchRevision { 188 t.Fatalf("Attempt to retrieve root an non-existent revision did not result in error: %v", err) 189 } 190 } 191 192 func TestRootAtRevisionCatchesFutureRevision(t *testing.T) { 193 ctx := context.Background() 194 195 mockCtrl := gomock.NewController(t) 196 defer mockCtrl.Finish() 197 198 const rev = 100 199 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, rev) 200 // Sanity checking in RootAtRevision should catch this node being incorrectly 201 // returned by the storage layer. 202 n1 := getRandomRootNode(t, rev+1) 203 tx.EXPECT().Commit().AnyTimes().Return(nil) 204 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), rootNodeMatcher{}).Return([]storage.Node{n1}, nil) 205 _, err := r.RootAtRevision(ctx, rev) 206 if err == nil || err == ErrNoSuchRevision { 207 t.Fatalf("Attempt to retrieve root with corrupt node did not result in error: %v", err) 208 } 209 } 210 211 func TestRootAtRevisionCatchesNonRootNode(t *testing.T) { 212 ctx := context.Background() 213 214 mockCtrl := gomock.NewController(t) 215 defer mockCtrl.Finish() 216 217 const rev = 100 218 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, rev) 219 // Sanity checking in RootAtRevision should catch this node being incorrectly 220 // returned by the storage layer. 221 n1 := getRandomNonRootNode(t, rev) 222 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), rootNodeMatcher{}).Return([]storage.Node{n1}, nil) 223 _, err := r.RootAtRevision(ctx, rev) 224 if err == nil || err == ErrNoSuchRevision { 225 t.Fatalf("Attempt to retrieve root with corrupt node did not result in error: %v", err) 226 } 227 } 228 229 func TestInclusionProofForNullEntryInEmptyTree(t *testing.T) { 230 ctx := context.Background() 231 232 mockCtrl := gomock.NewController(t) 233 defer mockCtrl.Finish() 234 235 const rev = 100 236 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, rev) 237 tx.EXPECT().Commit().AnyTimes().Return(nil) 238 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).Return([]storage.Node{}, nil) 239 const key = "SomeArbitraryKey" 240 proof, err := r.InclusionProof(ctx, rev, testonly.HashKey(key)) 241 if err != nil { 242 t.Fatalf("Got error while retrieving inclusion proof: %v", err) 243 } 244 245 if expected, got := 256, len(proof); expected != got { 246 t.Fatalf("Expected proof of len %d, but got len %d", expected, got) 247 } 248 249 // Verify these are null hashes 250 for i := len(proof) - 1; i > 0; i-- { 251 if got := proof[i]; got != nil { 252 t.Errorf("proof[%d] = %v, expected nil", i, got) 253 } 254 } 255 } 256 257 // TODO(al): Add some more inclusion proof tests here 258 259 func TestInclusionProofGetsIncorrectNode(t *testing.T) { 260 ctx := context.Background() 261 262 mockCtrl := gomock.NewController(t) 263 defer mockCtrl.Finish() 264 265 // This test requests inclusion proofs where storage returns a single node that should not be part 266 // of the proof for the supplied key. This should not succeed. 267 for _, testNode := range inclusionProofIncorrectTestNodes { 268 const rev = 100 269 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, rev) 270 tx.EXPECT().Commit().AnyTimes().Return(nil) 271 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).Return([]storage.Node{testNode}, nil) 272 const key = "SomeArbitraryKey" 273 index := testonly.HashKey(key) 274 proof, err := r.InclusionProof(ctx, rev, index) 275 if err == nil { 276 t.Errorf("InclusionProof() = %v, nil want: nil, 1 remain(s) unused", proof) 277 } 278 if !strings.Contains(err.Error(), "1 remain(s) unused") { 279 t.Errorf("InclusionProof() = %v, %v. want: nil, 1 remain(s) unused", proof, err) 280 } 281 } 282 } 283 284 func TestInclusionProofPassesThroughStorageError(t *testing.T) { 285 ctx := context.Background() 286 287 mockCtrl := gomock.NewController(t) 288 defer mockCtrl.Finish() 289 290 const rev = 100 291 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, rev) 292 e := errors.New("boo") 293 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).Return([]storage.Node{}, e) 294 _, err := r.InclusionProof(ctx, rev, testonly.HashKey("Whatever")) 295 if err != e { 296 t.Fatalf("InclusionProof() should've returned an error '%v', but got '%v'", e, err) 297 } 298 } 299 300 func TestInclusionProofGetsTooManyNodes(t *testing.T) { 301 ctx := context.Background() 302 303 mockCtrl := gomock.NewController(t) 304 defer mockCtrl.Finish() 305 306 const rev = 100 307 r, tx := getSparseMerkleTreeReaderWithMockTX(mockCtrl, rev) 308 const key = "SomeArbitraryKey" 309 keyHash := testonly.HashKey(key) 310 // going to return one too many nodes 311 nodes := make([]storage.Node, 257) 312 // First build a plausible looking set of proof nodes. 313 for i := 1; i < 256; i++ { 314 nodes[255-i].NodeID = storage.NewNodeIDFromHash(keyHash) 315 nodes[255-i].NodeID.PrefixLenBits = i + 1 316 nodes[255-i].NodeID.SetBit(i, nodes[255-i].NodeID.Bit(i)^1) 317 } 318 // and then tack on some rubbish: 319 nodes[256] = getRandomNonRootNode(t, 42) 320 321 tx.EXPECT().Commit().AnyTimes().Return(nil) 322 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).AnyTimes().Return(nodes, nil) 323 _, err := r.InclusionProof(ctx, rev, testonly.HashKey(key)) 324 if err == nil { 325 t.Fatal("InclusionProof() should've returned an error due to extra unused node") 326 } 327 if !strings.Contains(err.Error(), "failed to consume") { 328 t.Fatalf("Saw unexpected error: %v", err) 329 } 330 } 331 332 type sparseKeyValue struct { 333 k, v string 334 } 335 336 type sparseTestVector struct { 337 kv []sparseKeyValue 338 expectedRoot []byte 339 } 340 341 func testSparseTreeCalculatedRoot(ctx context.Context, t *testing.T, vec sparseTestVector) { 342 mockCtrl := gomock.NewController(t) 343 defer mockCtrl.Finish() 344 345 const rev = 100 346 w, tx := getSparseMerkleTreeWriterWithMockTX(ctx, mockCtrl, treeID, rev) 347 348 tx.EXPECT().Commit().AnyTimes().Return(nil) 349 tx.EXPECT().Close().AnyTimes().Return(nil) 350 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).AnyTimes().Return([]storage.Node{}, nil) 351 tx.EXPECT().SetMerkleNodes(ctx, gomock.Any()).AnyTimes().Return(nil) 352 353 testSparseTreeCalculatedRootWithWriter(ctx, t, rev, vec, w) 354 } 355 356 func testSparseTreeCalculatedRootWithWriter(ctx context.Context, t *testing.T, rev int64, vec sparseTestVector, w *SparseMerkleTreeWriter) { 357 var leaves []HashKeyValue 358 for _, kv := range vec.kv { 359 index := testonly.HashKey(kv.k) 360 leafHash, err := w.hasher.HashLeaf(treeID, index, []byte(kv.v)) 361 if err != nil { 362 t.Fatalf("HashLeaf(): %v", err) 363 } 364 leaves = append(leaves, HashKeyValue{ 365 HashedKey: index, 366 HashedValue: leafHash, 367 }) 368 } 369 370 if err := w.SetLeaves(ctx, leaves); err != nil { 371 t.Fatalf("Got error adding leaves: %v", err) 372 } 373 root, err := w.CalculateRoot() 374 if err != nil { 375 t.Fatalf("Failed to commit map changes: %v", err) 376 } 377 if got, want := root, vec.expectedRoot; !bytes.Equal(got, want) { 378 t.Errorf("got root: %x, want %x", got, want) 379 } 380 } 381 382 func TestSparseMerkleTreeWriterEmptyTree(t *testing.T) { 383 testSparseTreeCalculatedRoot( 384 context.Background(), 385 t, 386 sparseTestVector{ 387 kv: []sparseKeyValue{}, 388 expectedRoot: testonly.MustDecodeBase64("xmifEIEqCYCXbZUz2Dh1KCFmFZVn7DUVVxbBQTr1PWo="), 389 }) 390 } 391 392 func TestSparseMerkleTreeWriter(t *testing.T) { 393 vec := sparseTestVector{ 394 []sparseKeyValue{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}}, 395 testonly.MustDecodeBase64("Ms8A+VeDImofprfgq7Hoqh9cw+YrD/P/qibTmCm5JvQ="), 396 } 397 testSparseTreeCalculatedRoot(context.Background(), t, vec) 398 } 399 400 type nodeIDFuncMatcher struct { 401 f func(ids []storage.NodeID) bool 402 } 403 404 func (f nodeIDFuncMatcher) Matches(x interface{}) bool { 405 n, ok := x.([]storage.NodeID) 406 if !ok { 407 return false 408 } 409 return f.f(n) 410 } 411 412 func (f nodeIDFuncMatcher) String() string { 413 return "matches function" 414 } 415 416 func testSparseTreeFetches(ctx context.Context, t *testing.T, vec sparseTestVector) { 417 mockCtrl := gomock.NewController(t) 418 defer mockCtrl.Finish() 419 420 const rev = 100 421 w, tx := getSparseMerkleTreeWriterWithMockTX(ctx, mockCtrl, treeID, rev) 422 tx.EXPECT().Commit().AnyTimes().Return(nil) 423 tx.EXPECT().Close().AnyTimes().Return(nil) 424 425 reads := make(map[string]string) 426 readMutex := sync.Mutex{} 427 var leafNodeIDs []storage.NodeID 428 429 { 430 readMutex.Lock() 431 432 // calculate the set of expected node reads. 433 for _, kv := range vec.kv { 434 keyHash := testonly.HashKey(kv.k) 435 nodeID := storage.NewNodeIDFromHash(keyHash) 436 leafNodeIDs = append(leafNodeIDs, nodeID) 437 sibs := nodeID.Siblings() 438 439 // start with the set of siblings of all leaves: 440 for j := range sibs { 441 j := j 442 id := sibs[j].String() 443 pathNode := nodeID.String()[:len(id)] 444 if _, ok := reads[pathNode]; ok { 445 // we're modifying both children of a node because two keys are 446 // intersecting, since both will be recalculated neither will be read 447 // from storage so we remove the previously set expectation for this 448 // node's sibling, and skip adding one for this node: 449 delete(reads, pathNode) 450 continue 451 } 452 reads[sibs[j].String()] = "unmet" 453 } 454 } 455 456 // Next, remove any expectations for leaf-siblings which also happen to be 457 // one of the keys being set by the test vector (unlikely to happen tbh): 458 for i := range leafNodeIDs { 459 delete(reads, leafNodeIDs[i].String()) 460 } 461 462 readMutex.Unlock() 463 } 464 465 // Now, set up a mock call for GetMerkleNodes for the nodeIDs in the map 466 // we've just created: 467 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), nodeIDFuncMatcher{func(ids []storage.NodeID) bool { 468 if len(ids) == 0 { 469 return false 470 } 471 readMutex.Lock() 472 defer readMutex.Unlock() 473 474 state, ok := reads[ids[0].String()] 475 reads[ids[0].String()] = "met" 476 return ok && state == "unmet" 477 }}).AnyTimes().Return([]storage.Node{}, nil) 478 479 // Now add a general catch-all for any unexpected calls. If we don't do this 480 // it'll panic() with an unhelpful message on the first unexpected nodeID, so 481 // rather than doing that we'll make a note of all the unexpected IDs here 482 // instead, and we can then print them out later on. 483 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).AnyTimes().Do( 484 func(rev int64, a []storage.NodeID) { 485 if a == nil { 486 return 487 } 488 489 readMutex.Lock() 490 defer readMutex.Unlock() 491 492 for i := range a { 493 reads[a[i].String()] = "unexpected" 494 } 495 }).Return([]storage.Node{}, nil) 496 497 // Figure out which nodes should be written: 498 writes := make(map[string]string) 499 writeMutex := sync.Mutex{} 500 501 { 502 writeMutex.Lock() 503 for i := range leafNodeIDs { 504 s := leafNodeIDs[i].String() 505 for x := 0; x <= len(s); x++ { 506 writes[s[:x]] = "unmet" 507 } 508 } 509 writeMutex.Unlock() 510 } 511 512 tx.EXPECT().SetMerkleNodes(ctx, gomock.Any()).AnyTimes().Do( 513 func(_ context.Context, a []storage.Node) { 514 writeMutex.Lock() 515 defer writeMutex.Unlock() 516 if a == nil { 517 return 518 } 519 for i := range a { 520 id := a[i].NodeID.String() 521 state, ok := writes[id] 522 switch { 523 case !ok: 524 writes[id] = "unexpected" 525 case state == "unmet": 526 writes[id] = "met" 527 default: 528 writes[id] = "duplicate" 529 } 530 } 531 }).Return(nil) 532 533 testSparseTreeCalculatedRootWithWriter(ctx, t, rev, vec, w) 534 535 { 536 readMutex.Lock() 537 n, s := nonMatching(reads, "met") 538 // Fail if there are any nodes which we expected to be read but weren't, or vice-versa: 539 if n != 0 { 540 t.Fatalf("saw unexpected/unmet calls to GetMerkleNodes for the following nodeIDs:\n%s", s) 541 } 542 readMutex.Unlock() 543 } 544 545 { 546 writeMutex.Lock() 547 n, s := nonMatching(writes, "met") 548 // Fail if there are any nodes which we expected to be written but weren't, or vice-versa: 549 if n != 0 { 550 t.Fatalf("saw unexpected/unmet calls to SetMerkleNodes for the following nodeIDs:\n%s", s) 551 } 552 writeMutex.Unlock() 553 } 554 } 555 556 func nonMatching(m map[string]string, needle string) (int, string) { 557 s := "" 558 n := 0 559 for k, v := range m { 560 if v != needle { 561 s += fmt.Sprintf("%s: %s\n", k, v) 562 n++ 563 } 564 } 565 return n, s 566 } 567 568 func TestSparseMerkleTreeWriterFetchesSingleLeaf(t *testing.T) { 569 vec := sparseTestVector{ 570 []sparseKeyValue{{"key1", "value1"}}, 571 testonly.MustDecodeBase64("PPI818D5CiUQQMZulH58LikjxeOFWw2FbnGM0AdVHWA="), 572 } 573 testSparseTreeFetches(context.Background(), t, vec) 574 } 575 576 func TestSparseMerkleTreeWriterFetchesMultipleLeaves(t *testing.T) { 577 vec := sparseTestVector{ 578 []sparseKeyValue{{"key1", "value1"}, {"key2", "value2"}, {"key3", "value3"}}, 579 testonly.MustDecodeBase64("Ms8A+VeDImofprfgq7Hoqh9cw+YrD/P/qibTmCm5JvQ="), 580 } 581 testSparseTreeFetches(context.Background(), t, vec) 582 } 583 584 func TestSparseMerkleTreeWriterBigBatch(t *testing.T) { 585 t.Skip("Disabled: BigBatch takes too long") 586 ctx := context.Background() 587 588 mockCtrl := gomock.NewController(t) 589 defer mockCtrl.Finish() 590 591 defer maybeProfileCPU(t)() 592 const rev = 100 593 w, tx := getSparseMerkleTreeWriterWithMockTX(ctx, mockCtrl, treeID, rev) 594 595 tx.EXPECT().Close().AnyTimes().Return(nil) 596 tx.EXPECT().Commit().AnyTimes().Return(nil) 597 tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).AnyTimes().Return([]storage.Node{}, nil) 598 tx.EXPECT().SetMerkleNodes(ctx, gomock.Any()).AnyTimes().Return(nil) 599 600 const batchSize = 1024 601 const numBatches = 4 602 for x := 0; x < numBatches; x++ { 603 h := make([]HashKeyValue, batchSize) 604 for y := 0; y < batchSize; y++ { 605 index := testonly.HashKey(fmt.Sprintf("key-%d-%d", x, y)) 606 leafHash, err := w.hasher.HashLeaf(treeID, index, []byte(fmt.Sprintf("value-%d-%d", x, y))) 607 if err != nil { 608 t.Fatalf("HashLeaf(): %v", err) 609 } 610 h[y].HashedKey = index 611 h[y].HashedValue = leafHash 612 } 613 if err := w.SetLeaves(ctx, h); err != nil { 614 t.Fatalf("Failed to batch %d: %v", x, err) 615 } 616 } 617 root, err := w.CalculateRoot() 618 if err != nil { 619 t.Fatalf("Failed to calculate root hash: %v", err) 620 } 621 622 // calculated using python code. 623 const expectedRootB64 = "Av30xkERsepT6F/AgbZX3sp91TUmV1TKaXE6QPFfUZA=" 624 if expected, got := testonly.MustDecodeBase64(expectedRootB64), root; !bytes.Equal(expected, root) { 625 // Error, not Fatal so that we get our benchmark results regardless of the 626 // result - useful if you want to up the amount of data without having to 627 // figure out the expected root! 628 t.Errorf("Expected root %s, got root: %s", base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(got)) 629 } 630 maybeProfileMemory(t) 631 }