github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/core/ledger/kvledger/txmgmt/txmgr/txmgr_test.go (about) 1 /* 2 Copyright hechain. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package txmgr 8 9 import ( 10 "bytes" 11 "encoding/gob" 12 "encoding/json" 13 "fmt" 14 "strings" 15 "testing" 16 17 "github.com/hyperledger/fabric-protos-go/peer" 18 19 "github.com/golang/protobuf/proto" 20 "github.com/hechain20/hechain/common/ledger/testutil" 21 "github.com/hechain20/hechain/core/ledger" 22 "github.com/hechain20/hechain/core/ledger/internal/version" 23 "github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/privacyenabledstate" 24 "github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/rwsetutil" 25 btltestutil "github.com/hechain20/hechain/core/ledger/pvtdatapolicy/testutil" 26 "github.com/hechain20/hechain/core/ledger/util" 27 "github.com/hyperledger/fabric-protos-go/ledger/queryresult" 28 "github.com/hyperledger/fabric-protos-go/ledger/rwset" 29 "github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset" 30 "github.com/stretchr/testify/require" 31 ) 32 33 func TestTxSimulatorWithNoExistingData(t *testing.T) { 34 // run the tests for each environment configured in pkg_test.go 35 for _, testEnv := range testEnvs { 36 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 37 testLedgerID := "testtxsimulatorwithnoexistingdata" 38 testEnv.init(t, testLedgerID, nil) 39 testTxSimulatorWithNoExistingData(t, testEnv) 40 testEnv.cleanup() 41 } 42 } 43 44 func testTxSimulatorWithNoExistingData(t *testing.T, env testEnv) { 45 txMgr := env.getTxMgr() 46 s, _ := txMgr.NewTxSimulator("test_txid") 47 value, err := s.GetState("ns1", "key1") 48 require.NoErrorf(t, err, "Error in GetState(): %s", err) 49 require.Nil(t, value) 50 51 require.NoError(t, s.SetState("ns1", "key1", []byte("value1"))) 52 require.NoError(t, s.SetState("ns1", "key2", []byte("value2"))) 53 require.NoError(t, s.SetState("ns2", "key3", []byte("value3"))) 54 require.NoError(t, s.SetState("ns2", "key4", []byte("value4"))) 55 56 value, _ = s.GetState("ns2", "key3") 57 require.Nil(t, value) 58 59 simulationResults, err := s.GetTxSimulationResults() 60 require.NoError(t, err) 61 require.Nil(t, simulationResults.PvtSimulationResults) 62 } 63 64 func TestTxSimulatorGetResults(t *testing.T) { 65 testEnv := testEnvsMap[levelDBtestEnvName] 66 testEnv.init(t, "testLedger", nil) 67 defer testEnv.cleanup() 68 txMgr := testEnv.getTxMgr() 69 populateCollConfigForTest(t, txMgr, 70 []collConfigkey{ 71 {"ns1", "coll1"}, 72 {"ns1", "coll3"}, 73 {"ns2", "coll2"}, 74 {"ns3", "coll3"}, 75 }, 76 version.NewHeight(1, 1), 77 ) 78 79 var err error 80 81 // Create a simulator and get/set keys in one namespace "ns1" 82 simulator, _ := testEnv.getTxMgr().NewTxSimulator("test_txid1") 83 _, err = simulator.GetState("ns1", "key1") 84 require.NoError(t, err) 85 _, err = simulator.GetPrivateData("ns1", "coll1", "key1") 86 require.NoError(t, err) 87 err = simulator.SetState("ns1", "key1", []byte("value1")) 88 require.NoError(t, err) 89 // get simulation results and verify that this contains rwset only for one namespace 90 simulationResults1, err := simulator.GetTxSimulationResults() 91 require.NoError(t, err) 92 require.Len(t, simulationResults1.PubSimulationResults.NsRwset, 1) 93 // verify the Private Read has been captured 94 expectedPrivateReads := ledger.PrivateReads{} 95 expectedPrivateReads.Add("ns1", "coll1") 96 require.Equal(t, expectedPrivateReads, simulationResults1.PrivateReads) 97 // clone freeze simulationResults1 98 buff1 := new(bytes.Buffer) 99 require.NoError(t, gob.NewEncoder(buff1).Encode(simulationResults1)) 100 frozenSimulationResults1 := &ledger.TxSimulationResults{} 101 require.NoError(t, gob.NewDecoder(buff1).Decode(&frozenSimulationResults1)) 102 103 // use the same simulator after obtaining the simulation results by get/set keys in one more namespace "ns2" 104 _, err = simulator.GetState("ns2", "key2") 105 require.NoError(t, err) 106 _, err = simulator.GetPrivateData("ns2", "coll2", "key2") 107 require.NoError(t, err) 108 err = simulator.SetState("ns2", "key2", []byte("value2")) 109 require.NoError(t, err) 110 // get simulation results and verify that an error is raised when obtaining the simulation results more than once 111 _, err = simulator.GetTxSimulationResults() 112 require.Error(t, err) // calling 'GetTxSimulationResults()' more than once should raise error 113 // Now, verify that the simulator operations did not have an effect on previously obtained results 114 require.Equal(t, frozenSimulationResults1, simulationResults1) 115 116 // Call 'Done' and all the data get/set operations after calling 'Done' should fail. 117 simulator.Done() 118 _, err = simulator.GetState("ns3", "key3") 119 require.Errorf(t, err, "An error is expected when using simulator to get/set data after calling `Done` function()") 120 err = simulator.SetState("ns3", "key3", []byte("value3")) 121 require.Errorf(t, err, "An error is expected when using simulator to get/set data after calling `Done` function()") 122 _, err = simulator.GetPrivateData("ns3", "coll3", "key3") 123 require.Errorf(t, err, "An error is expected when using simulator to get/set data after calling `Done` function()") 124 err = simulator.SetPrivateData("ns3", "coll3", "key3", []byte("value3")) 125 require.Errorf(t, err, "An error is expected when using simulator to get/set data after calling `Done` function()") 126 } 127 128 func TestTxSimulatorWithExistingData(t *testing.T) { 129 for _, testEnv := range testEnvs { 130 t.Run(testEnv.getName(), func(t *testing.T) { 131 testLedgerID := "testtxsimulatorwithexistingdata" 132 testEnv.init(t, testLedgerID, nil) 133 testTxSimulatorWithExistingData(t, testEnv) 134 testEnv.cleanup() 135 }) 136 } 137 } 138 139 func testTxSimulatorWithExistingData(t *testing.T, env testEnv) { 140 txMgr := env.getTxMgr() 141 txMgrHelper := newTxMgrTestHelper(t, txMgr) 142 // simulate tx1 143 s1, _ := txMgr.NewTxSimulator("test_tx1") 144 require.NoError(t, s1.SetState("ns1", "key1", []byte("value1"))) 145 require.NoError(t, s1.SetState("ns1", "key2", []byte("value2"))) 146 require.NoError(t, s1.SetState("ns2", "key3", []byte("value3"))) 147 require.NoError(t, s1.SetState("ns2", "key4", []byte("value4"))) 148 s1.Done() 149 // validate and commit RWset 150 txRWSet1, _ := s1.GetTxSimulationResults() 151 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 152 153 // simulate tx2 that make changes to existing data 154 s2, _ := txMgr.NewTxSimulator("test_tx2") 155 value, _ := s2.GetState("ns1", "key1") 156 require.Equal(t, []byte("value1"), value) 157 require.NoError(t, s2.SetState("ns1", "key1", []byte("value1_1"))) 158 require.NoError(t, s2.DeleteState("ns2", "key3")) 159 value, err := s2.GetState("ns1", "key1") 160 require.NoError(t, err) 161 require.Equal(t, []byte("value1"), value) 162 s2.Done() 163 // validate and commit RWset for tx2 164 txRWSet2, err := s2.GetTxSimulationResults() 165 require.NoError(t, err) 166 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 167 168 // simulate tx3 169 s3, err := txMgr.NewTxSimulator("test_tx3") 170 require.NoError(t, err) 171 value, err = s3.GetState("ns1", "key1") 172 require.NoError(t, err) 173 require.Equal(t, []byte("value1_1"), value) 174 value, err = s3.GetState("ns2", "key3") 175 require.NoError(t, err) 176 require.Nil(t, value) 177 s3.Done() 178 179 // verify the versions of keys in persistence 180 vv, _ := env.getVDB().GetState("ns1", "key1") 181 require.Equal(t, version.NewHeight(2, 0), vv.Version) 182 vv, _ = env.getVDB().GetState("ns1", "key2") 183 require.Equal(t, version.NewHeight(1, 0), vv.Version) 184 } 185 186 func TestTxValidation(t *testing.T) { 187 for _, testEnv := range testEnvs { 188 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 189 testLedgerID := "testtxvalidation" 190 testEnv.init(t, testLedgerID, nil) 191 testTxValidation(t, testEnv) 192 testEnv.cleanup() 193 } 194 } 195 196 func testTxValidation(t *testing.T, env testEnv) { 197 txMgr := env.getTxMgr() 198 txMgrHelper := newTxMgrTestHelper(t, txMgr) 199 // simulate tx1 200 s1, _ := txMgr.NewTxSimulator("test_tx1") 201 require.NoError(t, s1.SetState("ns1", "key1", []byte("value1"))) 202 require.NoError(t, s1.SetState("ns1", "key2", []byte("value2"))) 203 require.NoError(t, s1.SetState("ns2", "key3", []byte("value3"))) 204 require.NoError(t, s1.SetState("ns2", "key4", []byte("value4"))) 205 s1.Done() 206 // validate and commit RWset 207 txRWSet1, _ := s1.GetTxSimulationResults() 208 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 209 210 // simulate tx2 that make changes to existing data. 211 // tx2: Read/Update ns1:key1, Delete ns2:key3. 212 s2, _ := txMgr.NewTxSimulator("test_tx2") 213 value, _ := s2.GetState("ns1", "key1") 214 require.Equal(t, []byte("value1"), value) 215 216 require.NoError(t, s2.SetState("ns1", "key1", []byte("value1_2"))) 217 require.NoError(t, s2.DeleteState("ns2", "key3")) 218 s2.Done() 219 220 // simulate tx3 before committing tx2 changes. Reads and modifies the key changed by tx2. 221 // tx3: Read/Update ns1:key1 222 s3, _ := txMgr.NewTxSimulator("test_tx3") 223 _, err := s3.GetState("ns1", "key1") 224 require.NoError(t, err) 225 require.NoError(t, s3.SetState("ns1", "key1", []byte("value1_3"))) 226 s3.Done() 227 228 // simulate tx4 before committing tx2 changes. Reads and Deletes the key changed by tx2 229 // tx4: Read/Delete ns2:key3 230 s4, _ := txMgr.NewTxSimulator("test_tx4") 231 _, err = s4.GetState("ns2", "key3") 232 require.NoError(t, err) 233 require.NoError(t, s4.DeleteState("ns2", "key3")) 234 s4.Done() 235 236 // simulate tx5 before committing tx2 changes. Modifies and then Reads the key changed by tx2 and writes a new key 237 // tx5: Update/Read ns1:key1 238 s5, err := txMgr.NewTxSimulator("test_tx5") 239 require.NoError(t, err) 240 require.NoError(t, s5.SetState("ns1", "key1", []byte("new_value"))) 241 _, err = s5.GetState("ns1", "key1") 242 require.NoError(t, err) 243 s5.Done() 244 245 // simulate tx6 before committing tx2 changes. Only writes a new key, does not reads/writes a key changed by tx2 246 // tx6: Update ns1:new_key 247 s6, _ := txMgr.NewTxSimulator("test_tx6") 248 require.NoError(t, s6.SetState("ns1", "new_key", []byte("new_value"))) 249 s6.Done() 250 251 // Summary of simulated transactions 252 // tx2: Read/Update ns1:key1, Delete ns2:key3. 253 // tx3: Read/Update ns1:key1 254 // tx4: Read/Delete ns2:key3 255 // tx5: Update/Read ns1:key1 256 // tx6: Update ns1:new_key 257 258 // validate and commit RWset for tx2 259 txRWSet2, _ := s2.GetTxSimulationResults() 260 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 261 262 // RWSet for tx3 and tx4 and tx5 should be invalid now due to read conflicts 263 txRWSet3, _ := s3.GetTxSimulationResults() 264 txMgrHelper.checkRWsetInvalid(txRWSet3.PubSimulationResults) 265 266 txRWSet4, _ := s4.GetTxSimulationResults() 267 txMgrHelper.checkRWsetInvalid(txRWSet4.PubSimulationResults) 268 269 txRWSet5, _ := s5.GetTxSimulationResults() 270 txMgrHelper.checkRWsetInvalid(txRWSet5.PubSimulationResults) 271 272 // tx6 should still be valid as it only writes a new key 273 txRWSet6, _ := s6.GetTxSimulationResults() 274 txMgrHelper.validateAndCommitRWSet(txRWSet6.PubSimulationResults) 275 } 276 277 func TestTxPhantomValidation(t *testing.T) { 278 for _, testEnv := range testEnvs { 279 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 280 testLedgerID := "testtxphantomvalidation" 281 testEnv.init(t, testLedgerID, nil) 282 testTxPhantomValidation(t, testEnv) 283 testEnv.cleanup() 284 } 285 } 286 287 func testTxPhantomValidation(t *testing.T, env testEnv) { 288 txMgr := env.getTxMgr() 289 txMgrHelper := newTxMgrTestHelper(t, txMgr) 290 // simulate tx1 291 s1, _ := txMgr.NewTxSimulator("test_tx1") 292 require.NoError(t, s1.SetState("ns", "key1", []byte("value1"))) 293 require.NoError(t, s1.SetState("ns", "key2", []byte("value2"))) 294 require.NoError(t, s1.SetState("ns", "key3", []byte("value3"))) 295 require.NoError(t, s1.SetState("ns", "key4", []byte("value4"))) 296 require.NoError(t, s1.SetState("ns", "key5", []byte("value5"))) 297 require.NoError(t, s1.SetState("ns", "key6", []byte("value6"))) 298 // validate and commit RWset 299 txRWSet1, _ := s1.GetTxSimulationResults() 300 s1.Done() // explicitly calling done after obtaining the results to verify FAB-10788 301 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 302 303 // simulate tx2 304 s2, _ := txMgr.NewTxSimulator("test_tx2") 305 itr2, _ := s2.GetStateRangeScanIterator("ns", "key2", "key5") 306 for { 307 if result, _ := itr2.Next(); result == nil { 308 break 309 } 310 } 311 require.NoError(t, s2.DeleteState("ns", "key3")) 312 txRWSet2, err := s2.GetTxSimulationResults() 313 require.NoError(t, err) 314 s2.Done() 315 316 // simulate tx3 317 s3, err := txMgr.NewTxSimulator("test_tx3") 318 require.NoError(t, err) 319 itr3, err := s3.GetStateRangeScanIterator("ns", "key2", "key5") 320 require.NoError(t, err) 321 for { 322 if result, _ := itr3.Next(); result == nil { 323 break 324 } 325 } 326 require.NoError(t, s3.SetState("ns", "key3", []byte("value3_new"))) 327 txRWSet3, _ := s3.GetTxSimulationResults() 328 s3.Done() 329 // simulate tx4 330 s4, _ := txMgr.NewTxSimulator("test_tx4") 331 itr4, _ := s4.GetStateRangeScanIterator("ns", "key4", "key6") 332 for { 333 if result, _ := itr4.Next(); result == nil { 334 break 335 } 336 } 337 require.NoError(t, s4.SetState("ns", "key3", []byte("value3_new"))) 338 txRWSet4, _ := s4.GetTxSimulationResults() 339 s4.Done() 340 341 // txRWSet2 should be valid 342 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 343 // txRWSet2 makes txRWSet3 invalid as it deletes a key in the range 344 txMgrHelper.checkRWsetInvalid(txRWSet3.PubSimulationResults) 345 // txRWSet4 should be valid as it iterates over a different range 346 txMgrHelper.validateAndCommitRWSet(txRWSet4.PubSimulationResults) 347 } 348 349 func TestIterator(t *testing.T) { 350 for _, testEnv := range testEnvs { 351 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 352 353 testLedgerID := "testiterator.1" 354 testEnv.init(t, testLedgerID, nil) 355 testIterator(t, testEnv, 10, 2, 7) 356 testEnv.cleanup() 357 358 testLedgerID = "testiterator.2" 359 testEnv.init(t, testLedgerID, nil) 360 testIterator(t, testEnv, 10, 1, 11) 361 testEnv.cleanup() 362 363 testLedgerID = "testiterator.3" 364 testEnv.init(t, testLedgerID, nil) 365 testIterator(t, testEnv, 10, 0, 0) 366 testEnv.cleanup() 367 368 testLedgerID = "testiterator.4" 369 testEnv.init(t, testLedgerID, nil) 370 testIterator(t, testEnv, 10, 5, 0) 371 testEnv.cleanup() 372 373 testLedgerID = "testiterator.5" 374 testEnv.init(t, testLedgerID, nil) 375 testIterator(t, testEnv, 10, 0, 5) 376 testEnv.cleanup() 377 } 378 } 379 380 func testIterator(t *testing.T, env testEnv, numKeys int, startKeyNum int, endKeyNum int) { 381 cID := "cid" 382 txMgr := env.getTxMgr() 383 txMgrHelper := newTxMgrTestHelper(t, txMgr) 384 s, _ := txMgr.NewTxSimulator("test_tx1") 385 for i := 1; i <= numKeys; i++ { 386 k := createTestKey(i) 387 v := createTestValue(i) 388 t.Logf("Adding k=[%s], v=[%s]", k, v) 389 require.NoError(t, s.SetState(cID, k, v)) 390 } 391 s.Done() 392 // validate and commit RWset 393 txRWSet, _ := s.GetTxSimulationResults() 394 txMgrHelper.validateAndCommitRWSet(txRWSet.PubSimulationResults) 395 396 var startKey string 397 var endKey string 398 var begin int 399 var end int 400 401 if startKeyNum != 0 { 402 begin = startKeyNum 403 startKey = createTestKey(startKeyNum) 404 } else { 405 begin = 1 // first key in the db 406 startKey = "" 407 } 408 409 if endKeyNum != 0 { 410 endKey = createTestKey(endKeyNum) 411 end = endKeyNum 412 } else { 413 endKey = "" 414 end = numKeys + 1 // last key in the db 415 } 416 417 expectedCount := end - begin 418 419 queryExecuter, _ := txMgr.NewQueryExecutor("test_tx2") 420 itr, _ := queryExecuter.GetStateRangeScanIterator(cID, startKey, endKey) 421 count := 0 422 for { 423 kv, _ := itr.Next() 424 if kv == nil { 425 break 426 } 427 keyNum := begin + count 428 k := kv.(*queryresult.KV).Key 429 v := kv.(*queryresult.KV).Value 430 t.Logf("Retrieved k=%s, v=%s at count=%d start=%s end=%s", k, v, count, startKey, endKey) 431 require.Equal(t, createTestKey(keyNum), k) 432 require.Equal(t, createTestValue(keyNum), v) 433 count++ 434 } 435 require.Equal(t, expectedCount, count) 436 } 437 438 func TestIteratorPaging(t *testing.T) { 439 for _, testEnv := range testEnvs { 440 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 441 442 // test explicit paging 443 testLedgerID := "testiterator.1" 444 testEnv.init(t, testLedgerID, nil) 445 testIteratorPagingInit(t, testEnv, 10) 446 returnKeys := []string{"key_002", "key_003"} 447 nextStartKey := testIteratorPaging(t, testEnv, 10, "key_002", "key_007", int32(2), returnKeys) 448 returnKeys = []string{"key_004", "key_005"} 449 nextStartKey = testIteratorPaging(t, testEnv, 10, nextStartKey, "key_007", int32(2), returnKeys) 450 returnKeys = []string{"key_006"} 451 testIteratorPaging(t, testEnv, 10, nextStartKey, "key_007", int32(2), returnKeys) 452 testEnv.cleanup() 453 } 454 } 455 456 func testIteratorPagingInit(t *testing.T, env testEnv, numKeys int) { 457 cID := "cid" 458 txMgr := env.getTxMgr() 459 txMgrHelper := newTxMgrTestHelper(t, txMgr) 460 s, _ := txMgr.NewTxSimulator("test_tx1") 461 for i := 1; i <= numKeys; i++ { 462 k := createTestKey(i) 463 v := createTestValue(i) 464 t.Logf("Adding k=[%s], v=[%s]", k, v) 465 require.NoError(t, s.SetState(cID, k, v)) 466 } 467 s.Done() 468 // validate and commit RWset 469 txRWSet, _ := s.GetTxSimulationResults() 470 txMgrHelper.validateAndCommitRWSet(txRWSet.PubSimulationResults) 471 } 472 473 func testIteratorPaging(t *testing.T, env testEnv, numKeys int, startKey, endKey string, 474 pageSize int32, expectedKeys []string) string { 475 cID := "cid" 476 txMgr := env.getTxMgr() 477 478 queryExecuter, _ := txMgr.NewQueryExecutor("test_tx2") 479 itr, _ := queryExecuter.GetStateRangeScanIteratorWithPagination(cID, startKey, endKey, pageSize) 480 481 // Verify the keys returned 482 testItrWithoutClose(t, itr, expectedKeys) 483 484 returnBookmark := "" 485 if pageSize > 0 { 486 returnBookmark = itr.GetBookmarkAndClose() 487 } 488 489 return returnBookmark 490 } 491 492 // testItrWithoutClose verifies an iterator contains expected keys 493 func testItrWithoutClose(t *testing.T, itr ledger.QueryResultsIterator, expectedKeys []string) { 494 for _, expectedKey := range expectedKeys { 495 queryResult, err := itr.Next() 496 require.NoError(t, err, "An unexpected error was thrown during iterator Next()") 497 vkv := queryResult.(*queryresult.KV) 498 key := vkv.Key 499 require.Equal(t, expectedKey, key) 500 } 501 queryResult, err := itr.Next() 502 require.NoError(t, err, "An unexpected error was thrown during iterator Next()") 503 require.Nil(t, queryResult) 504 } 505 506 func TestIteratorWithDeletes(t *testing.T) { 507 for _, testEnv := range testEnvs { 508 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 509 testLedgerID := "testiteratorwithdeletes" 510 testEnv.init(t, testLedgerID, nil) 511 testIteratorWithDeletes(t, testEnv) 512 testEnv.cleanup() 513 } 514 } 515 516 func testIteratorWithDeletes(t *testing.T, env testEnv) { 517 cID := "cid" 518 txMgr := env.getTxMgr() 519 txMgrHelper := newTxMgrTestHelper(t, txMgr) 520 s, _ := txMgr.NewTxSimulator("test_tx1") 521 for i := 1; i <= 10; i++ { 522 k := createTestKey(i) 523 v := createTestValue(i) 524 t.Logf("Adding k=[%s], v=[%s]", k, v) 525 require.NoError(t, s.SetState(cID, k, v)) 526 } 527 s.Done() 528 // validate and commit RWset 529 txRWSet1, _ := s.GetTxSimulationResults() 530 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 531 532 s, _ = txMgr.NewTxSimulator("test_tx2") 533 require.NoError(t, s.DeleteState(cID, createTestKey(4))) 534 s.Done() 535 // validate and commit RWset 536 txRWSet2, _ := s.GetTxSimulationResults() 537 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 538 539 queryExecuter, _ := txMgr.NewQueryExecutor("test_tx3") 540 itr, _ := queryExecuter.GetStateRangeScanIterator(cID, createTestKey(3), createTestKey(6)) 541 defer itr.Close() 542 kv, _ := itr.Next() 543 require.Equal(t, createTestKey(3), kv.(*queryresult.KV).Key) 544 kv, _ = itr.Next() 545 require.Equal(t, createTestKey(5), kv.(*queryresult.KV).Key) 546 } 547 548 func TestTxValidationWithItr(t *testing.T) { 549 for _, testEnv := range testEnvs { 550 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 551 testLedgerID := "testtxvalidationwithitr" 552 testEnv.init(t, testLedgerID, nil) 553 testTxValidationWithItr(t, testEnv) 554 testEnv.cleanup() 555 } 556 } 557 558 func testTxValidationWithItr(t *testing.T, env testEnv) { 559 cID := "cid" 560 txMgr := env.getTxMgr() 561 txMgrHelper := newTxMgrTestHelper(t, txMgr) 562 563 // simulate tx1 564 s1, _ := txMgr.NewTxSimulator("test_tx1") 565 for i := 1; i <= 10; i++ { 566 k := createTestKey(i) 567 v := createTestValue(i) 568 t.Logf("Adding k=[%s], v=[%s]", k, v) 569 require.NoError(t, s1.SetState(cID, k, v)) 570 } 571 s1.Done() 572 // validate and commit RWset 573 txRWSet1, _ := s1.GetTxSimulationResults() 574 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 575 576 // simulate tx2 that reads key_001 and key_002 577 s2, _ := txMgr.NewTxSimulator("test_tx2") 578 itr, _ := s2.GetStateRangeScanIterator(cID, createTestKey(1), createTestKey(5)) 579 // read key_001 and key_002 580 _, err := itr.Next() 581 require.NoError(t, err) 582 _, err = itr.Next() 583 require.NoError(t, err) 584 itr.Close() 585 s2.Done() 586 587 // simulate tx3 that reads key_004 and key_005 588 s3, _ := txMgr.NewTxSimulator("test_tx3") 589 itr, _ = s3.GetStateRangeScanIterator(cID, createTestKey(4), createTestKey(6)) 590 // read key_001 and key_002 591 _, err = itr.Next() 592 require.NoError(t, err) 593 _, err = itr.Next() 594 require.NoError(t, err) 595 itr.Close() 596 s3.Done() 597 598 // simulate tx4 before committing tx2 and tx3. Modifies a key read by tx3 599 s4, _ := txMgr.NewTxSimulator("test_tx4") 600 require.NoError(t, s4.DeleteState(cID, createTestKey(5))) 601 s4.Done() 602 603 // validate and commit RWset for tx4 604 txRWSet4, _ := s4.GetTxSimulationResults() 605 txMgrHelper.validateAndCommitRWSet(txRWSet4.PubSimulationResults) 606 607 // RWSet tx3 should be invalid now 608 txRWSet3, _ := s3.GetTxSimulationResults() 609 txMgrHelper.checkRWsetInvalid(txRWSet3.PubSimulationResults) 610 611 // tx2 should still be valid 612 txRWSet2, _ := s2.GetTxSimulationResults() 613 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 614 } 615 616 func TestGetSetMultipeKeys(t *testing.T) { 617 for _, testEnv := range testEnvs { 618 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 619 testLedgerID := "testgetsetmultipekeys" 620 testEnv.init(t, testLedgerID, nil) 621 testGetSetMultipeKeys(t, testEnv) 622 testEnv.cleanup() 623 } 624 } 625 626 func testGetSetMultipeKeys(t *testing.T, env testEnv) { 627 cID := "cid" 628 txMgr := env.getTxMgr() 629 txMgrHelper := newTxMgrTestHelper(t, txMgr) 630 // simulate tx1 631 s1, _ := txMgr.NewTxSimulator("test_tx1") 632 multipleKeyMap := make(map[string][]byte) 633 for i := 1; i <= 10; i++ { 634 k := createTestKey(i) 635 v := createTestValue(i) 636 multipleKeyMap[k] = v 637 } 638 require.NoError(t, s1.SetStateMultipleKeys(cID, multipleKeyMap)) 639 s1.Done() 640 // validate and commit RWset 641 txRWSet, _ := s1.GetTxSimulationResults() 642 txMgrHelper.validateAndCommitRWSet(txRWSet.PubSimulationResults) 643 qe, _ := txMgr.NewQueryExecutor("test_tx2") 644 defer qe.Done() 645 multipleKeys := []string{} 646 for k := range multipleKeyMap { 647 multipleKeys = append(multipleKeys, k) 648 } 649 values, _ := qe.GetStateMultipleKeys(cID, multipleKeys) 650 require.Len(t, values, 10) 651 for i, v := range values { 652 require.Equal(t, multipleKeyMap[multipleKeys[i]], v) 653 } 654 655 s2, _ := txMgr.NewTxSimulator("test_tx3") 656 defer s2.Done() 657 values, _ = s2.GetStateMultipleKeys(cID, multipleKeys[5:7]) 658 require.Len(t, values, 2) 659 for i, v := range values { 660 require.Equal(t, multipleKeyMap[multipleKeys[i+5]], v) 661 } 662 } 663 664 func createTestKey(i int) string { 665 if i == 0 { 666 return "" 667 } 668 return fmt.Sprintf("key_%03d", i) 669 } 670 671 func createTestValue(i int) []byte { 672 return []byte(fmt.Sprintf("value_%03d", i)) 673 } 674 675 // TestExecuteQuery is only tested on the CouchDB testEnv 676 func TestExecuteQuery(t *testing.T) { 677 for _, testEnv := range testEnvs { 678 // Query is only supported and tested on the CouchDB testEnv 679 if testEnv.getName() == couchDBtestEnvName { 680 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 681 testLedgerID := "testexecutequery" 682 testEnv.init(t, testLedgerID, nil) 683 testExecuteQuery(t, testEnv) 684 testEnv.cleanup() 685 } 686 } 687 } 688 689 func testExecuteQuery(t *testing.T, env testEnv) { 690 type Asset struct { 691 ID string `json:"_id"` 692 Rev string `json:"_rev"` 693 AssetName string `json:"asset_name"` 694 Color string `json:"color"` 695 Size string `json:"size"` 696 Owner string `json:"owner"` 697 } 698 699 txMgr := env.getTxMgr() 700 txMgrHelper := newTxMgrTestHelper(t, txMgr) 701 702 s1, _ := txMgr.NewTxSimulator("test_tx1") 703 704 require.NoError(t, s1.SetState("ns1", "key1", []byte("value1"))) 705 require.NoError(t, s1.SetState("ns1", "key2", []byte("value2"))) 706 require.NoError(t, s1.SetState("ns1", "key3", []byte("value3"))) 707 require.NoError(t, s1.SetState("ns1", "key4", []byte("value4"))) 708 require.NoError(t, s1.SetState("ns1", "key5", []byte("value5"))) 709 require.NoError(t, s1.SetState("ns1", "key6", []byte("value6"))) 710 require.NoError(t, s1.SetState("ns1", "key7", []byte("value7"))) 711 require.NoError(t, s1.SetState("ns1", "key8", []byte("value8"))) 712 713 require.NoError(t, s1.SetState("ns1", "key9", []byte(`{"asset_name":"marble1","color":"red","size":"25","owner":"jerry"}`))) 714 require.NoError(t, s1.SetState("ns1", "key10", []byte(`{"asset_name":"marble2","color":"blue","size":"10","owner":"bob"}`))) 715 require.NoError(t, s1.SetState("ns1", "key11", []byte(`{"asset_name":"marble3","color":"blue","size":"35","owner":"jerry"}`))) 716 require.NoError(t, s1.SetState("ns1", "key12", []byte(`{"asset_name":"marble4","color":"green","size":"15","owner":"bob"}`))) 717 require.NoError(t, s1.SetState("ns1", "key13", []byte(`{"asset_name":"marble5","color":"red","size":"35","owner":"jerry"}`))) 718 require.NoError(t, s1.SetState("ns1", "key14", []byte(`{"asset_name":"marble6","color":"blue","size":"25","owner":"bob"}`))) 719 720 s1.Done() 721 722 // validate and commit RWset 723 txRWSet, _ := s1.GetTxSimulationResults() 724 txMgrHelper.validateAndCommitRWSet(txRWSet.PubSimulationResults) 725 726 queryExecuter, _ := txMgr.NewQueryExecutor("test_tx2") 727 queryString := "{\"selector\":{\"owner\": {\"$eq\": \"bob\"}},\"limit\": 10,\"skip\": 0}" 728 729 itr, err := queryExecuter.ExecuteQuery("ns1", queryString) 730 require.NoError(t, err, "Error upon ExecuteQuery()") 731 counter := 0 732 for { 733 queryRecord, _ := itr.Next() 734 if queryRecord == nil { 735 break 736 } 737 // Unmarshal the document to Asset structure 738 assetResp := &Asset{} 739 require.NoError(t, json.Unmarshal(queryRecord.(*queryresult.KV).Value, &assetResp)) 740 // Verify the owner retrieved matches 741 require.Equal(t, "bob", assetResp.Owner) 742 counter++ 743 } 744 // Ensure the query returns 3 documents 745 require.Equal(t, 3, counter) 746 } 747 748 // TestExecutePaginatedQuery is only tested on the CouchDB testEnv 749 func TestExecutePaginatedQuery(t *testing.T) { 750 for _, testEnv := range testEnvs { 751 // Query is only supported and tested on the CouchDB testEnv 752 if testEnv.getName() == couchDBtestEnvName { 753 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 754 testLedgerID := "testexecutepaginatedquery" 755 testEnv.init(t, testLedgerID, nil) 756 testExecutePaginatedQuery(t, testEnv) 757 testEnv.cleanup() 758 } 759 } 760 } 761 762 func testExecutePaginatedQuery(t *testing.T, env testEnv) { 763 type Asset struct { 764 ID string `json:"_id"` 765 Rev string `json:"_rev"` 766 AssetName string `json:"asset_name"` 767 Color string `json:"color"` 768 Size string `json:"size"` 769 Owner string `json:"owner"` 770 } 771 772 txMgr := env.getTxMgr() 773 txMgrHelper := newTxMgrTestHelper(t, txMgr) 774 775 s1, _ := txMgr.NewTxSimulator("test_tx1") 776 777 require.NoError(t, s1.SetState("ns1", "key1", []byte(`{"asset_name":"marble1","color":"red","size":"25","owner":"jerry"}`))) 778 require.NoError(t, s1.SetState("ns1", "key2", []byte(`{"asset_name":"marble2","color":"blue","size":"10","owner":"bob"}`))) 779 require.NoError(t, s1.SetState("ns1", "key3", []byte(`{"asset_name":"marble3","color":"blue","size":"35","owner":"jerry"}`))) 780 require.NoError(t, s1.SetState("ns1", "key4", []byte(`{"asset_name":"marble4","color":"green","size":"15","owner":"bob"}`))) 781 require.NoError(t, s1.SetState("ns1", "key5", []byte(`{"asset_name":"marble5","color":"red","size":"35","owner":"jerry"}`))) 782 require.NoError(t, s1.SetState("ns1", "key6", []byte(`{"asset_name":"marble6","color":"blue","size":"25","owner":"bob"}`))) 783 784 s1.Done() 785 786 // validate and commit RWset 787 txRWSet, _ := s1.GetTxSimulationResults() 788 txMgrHelper.validateAndCommitRWSet(txRWSet.PubSimulationResults) 789 790 queryExecuter, _ := txMgr.NewQueryExecutor("test_tx2") 791 queryString := `{"selector":{"owner":{"$eq":"bob"}}}` 792 793 itr, err := queryExecuter.ExecuteQueryWithPagination("ns1", queryString, "", 2) 794 require.NoError(t, err, "Error upon ExecuteQueryWithMetadata()") 795 counter := 0 796 for { 797 queryRecord, _ := itr.Next() 798 if queryRecord == nil { 799 break 800 } 801 // Unmarshal the document to Asset structure 802 assetResp := &Asset{} 803 require.NoError(t, json.Unmarshal(queryRecord.(*queryresult.KV).Value, &assetResp)) 804 // Verify the owner retrieved matches 805 require.Equal(t, "bob", assetResp.Owner) 806 counter++ 807 } 808 // Ensure the query returns 2 documents 809 require.Equal(t, 2, counter) 810 811 bookmark := itr.GetBookmarkAndClose() 812 813 itr, err = queryExecuter.ExecuteQueryWithPagination("ns1", queryString, bookmark, 2) 814 require.NoError(t, err, "Error upon ExecuteQuery()") 815 counter = 0 816 for { 817 queryRecord, _ := itr.Next() 818 if queryRecord == nil { 819 break 820 } 821 // Unmarshal the document to Asset structure 822 assetResp := &Asset{} 823 require.NoError(t, json.Unmarshal(queryRecord.(*queryresult.KV).Value, &assetResp)) 824 // Verify the owner retrieved matches 825 require.Equal(t, "bob", assetResp.Owner) 826 counter++ 827 } 828 // Ensure the query returns 1 documents 829 require.Equal(t, 1, counter) 830 } 831 832 func TestValidateKey(t *testing.T) { 833 nonUTF8Key := string([]byte{0xff, 0xff}) 834 dummyValue := []byte("dummyValue") 835 for _, testEnv := range testEnvs { 836 testLedgerID := "test.validate.key" 837 testEnv.init(t, testLedgerID, nil) 838 txSimulator, _ := testEnv.getTxMgr().NewTxSimulator("test_tx1") 839 err := txSimulator.SetState("ns1", nonUTF8Key, dummyValue) 840 if testEnv.getName() == levelDBtestEnvName { 841 require.NoError(t, err) 842 } 843 if testEnv.getName() == couchDBtestEnvName { 844 require.Error(t, err) 845 } 846 testEnv.cleanup() 847 } 848 } 849 850 // TestTxSimulatorUnsupportedTx verifies that a simulation must throw an error when an unsupported transaction 851 // is perfromed - queries on private data are supported in a read-only tran 852 func TestTxSimulatorUnsupportedTx(t *testing.T) { 853 testEnv := testEnvsMap[levelDBtestEnvName] 854 testEnv.init(t, "testtxsimulatorunsupportedtx", nil) 855 defer testEnv.cleanup() 856 txMgr := testEnv.getTxMgr() 857 populateCollConfigForTest(t, txMgr, 858 []collConfigkey{ 859 {"ns1", "coll1"}, 860 {"ns1", "coll2"}, 861 {"ns1", "coll3"}, 862 {"ns1", "coll4"}, 863 }, 864 version.NewHeight(1, 1)) 865 866 simulator, _ := txMgr.NewTxSimulator("txid1") 867 err := simulator.SetState("ns", "key", []byte("value")) 868 require.NoError(t, err) 869 _, err = simulator.GetPrivateDataRangeScanIterator("ns1", "coll1", "startKey", "endKey") 870 require.EqualError(t, err, "txid [txid1]: unsuppored transaction. Queries on pvt data is supported only in a read-only transaction") 871 872 simulator, _ = txMgr.NewTxSimulator("txid2") 873 _, err = simulator.GetPrivateDataRangeScanIterator("ns1", "coll1", "startKey", "endKey") 874 require.NoError(t, err) 875 err = simulator.SetState("ns", "key", []byte("value")) 876 require.EqualError(t, err, "txid [txid2]: unsuppored transaction. Transaction has already performed queries on pvt data. Writes are not allowed") 877 878 simulator, _ = txMgr.NewTxSimulator("txid3") 879 err = simulator.SetState("ns", "key", []byte("value")) 880 require.NoError(t, err) 881 _, err = simulator.GetStateRangeScanIteratorWithPagination("ns1", "startKey", "endKey", 2) 882 require.EqualError(t, err, "txid [txid3]: unsuppored transaction. Paginated queries are supported only in a read-only transaction") 883 884 simulator, _ = txMgr.NewTxSimulator("txid4") 885 _, err = simulator.GetStateRangeScanIteratorWithPagination("ns1", "startKey", "endKey", 2) 886 require.NoError(t, err) 887 err = simulator.SetState("ns", "key", []byte("value")) 888 require.EqualError(t, err, "txid [txid4]: unsuppored transaction. Transaction has already performed a paginated query. Writes are not allowed") 889 } 890 891 func TestTxSimulatorUnsupportedTxCouchDBQuery(t *testing.T) { 892 testEnv := testEnvsMap[couchDBtestEnvName] 893 testEnv.init(t, "testtxsimulatorunsupportedtxqueries", nil) 894 defer testEnv.cleanup() 895 txMgr := testEnv.getTxMgr() 896 queryString := `{"selector":{"owner":{"$eq":"bob"}}}` 897 898 simulator, _ := txMgr.NewTxSimulator("txid1") 899 err := simulator.SetState("ns1", "key1", []byte(`{"asset_name":"marble1","color":"red","size":"25","owner":"jerry"}`)) 900 require.NoError(t, err) 901 _, err = simulator.ExecuteQueryWithPagination("ns1", queryString, "", 2) 902 require.EqualError(t, err, "txid [txid1]: unsuppored transaction. Paginated queries are supported only in a read-only transaction") 903 904 simulator, _ = txMgr.NewTxSimulator("txid2") 905 _, err = simulator.ExecuteQueryWithPagination("ns1", queryString, "", 2) 906 require.NoError(t, err) 907 err = simulator.SetState("ns1", "key1", []byte(`{"asset_name":"marble1","color":"red","size":"25","owner":"jerry"}`)) 908 require.EqualError(t, err, "txid [txid2]: unsuppored transaction. Transaction has already performed a paginated query. Writes are not allowed") 909 } 910 911 func TestConstructUniquePvtData(t *testing.T) { 912 v1 := []byte{1} 913 // ns1-coll1-key1 should be rejected as it is updated in the future by Blk2Tx1 914 pvtDataBlk1Tx1 := producePvtdata(t, 1, []string{"ns1:coll1"}, []string{"key1"}, [][]byte{v1}) 915 // ns1-coll2-key3 should be accepted but ns1-coll1-key2 as it is updated in the future by Blk2Tx2 916 pvtDataBlk1Tx2 := producePvtdata(t, 2, []string{"ns1:coll1", "ns1:coll2"}, []string{"key2", "key3"}, [][]byte{v1, v1}) 917 // ns1-coll2-key4 should be accepted 918 pvtDataBlk1Tx3 := producePvtdata(t, 3, []string{"ns1:coll2"}, []string{"key4"}, [][]byte{v1}) 919 920 v2 := []byte{2} 921 // ns1-coll1-key1 should be rejected as it is updated in the future by Blk3Tx1 922 pvtDataBlk2Tx1 := producePvtdata(t, 1, []string{"ns1:coll1"}, []string{"key1"}, [][]byte{v2}) 923 // ns1-coll1-key2 should be accepted 924 pvtDataBlk2Tx2 := producePvtdata(t, 2, []string{"ns1:coll1"}, []string{"key2"}, [][]byte{nil}) 925 926 v3 := []byte{3} 927 // ns1-coll1-key1 should be accepted 928 pvtDataBlk3Tx1 := producePvtdata(t, 1, []string{"ns1:coll1"}, []string{"key1"}, [][]byte{v3}) 929 930 pvtDataBlk3Tx2WriteSetBytes, err := proto.Marshal( 931 &kvrwset.KVRWSet{ 932 Writes: []*kvrwset.KVWrite{ 933 {Key: "key5", IsDelete: false, Value: nil}, 934 }, 935 }, 936 ) 937 require.NoError(t, err) 938 pvtDataBlk3Tx2 := &ledger.TxPvtData{ 939 SeqInBlock: 2, 940 WriteSet: &rwset.TxPvtReadWriteSet{ 941 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 942 { 943 Namespace: "ns1", 944 CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ 945 { 946 CollectionName: "coll1", 947 Rwset: pvtDataBlk3Tx2WriteSetBytes, 948 }, 949 }, 950 }, 951 }, 952 }, 953 } 954 955 blocksPvtData := map[uint64][]*ledger.TxPvtData{ 956 1: { 957 pvtDataBlk1Tx1, 958 pvtDataBlk1Tx2, 959 pvtDataBlk1Tx3, 960 }, 961 2: { 962 pvtDataBlk2Tx1, 963 pvtDataBlk2Tx2, 964 }, 965 3: { 966 pvtDataBlk3Tx1, 967 pvtDataBlk3Tx2, 968 }, 969 } 970 971 hashedCompositeKeyNs1Coll2Key3 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll2", KeyHash: string(util.ComputeStringHash("key3"))} 972 pvtKVWriteNs1Coll2Key3 := &privacyenabledstate.PvtKVWrite{Key: "key3", IsDelete: false, Value: v1, Version: version.NewHeight(1, 2)} 973 974 hashedCompositeKeyNs1Coll2Key4 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll2", KeyHash: string(util.ComputeStringHash("key4"))} 975 pvtKVWriteNs1Coll2Key4 := &privacyenabledstate.PvtKVWrite{Key: "key4", IsDelete: false, Value: v1, Version: version.NewHeight(1, 3)} 976 977 hashedCompositeKeyNs1Coll1Key2 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll1", KeyHash: string(util.ComputeStringHash("key2"))} 978 pvtKVWriteNs1Coll1Key2 := &privacyenabledstate.PvtKVWrite{Key: "key2", IsDelete: true, Value: nil, Version: version.NewHeight(2, 2)} 979 980 hashedCompositeKeyNs1Coll1Key1 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll1", KeyHash: string(util.ComputeStringHash("key1"))} 981 pvtKVWriteNs1Coll1Key1 := &privacyenabledstate.PvtKVWrite{Key: "key1", IsDelete: false, Value: v3, Version: version.NewHeight(3, 1)} 982 983 hashedCompositeKeyNs1Coll1Key5 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll1", KeyHash: string(util.ComputeStringHash("key5"))} 984 pvtKVWriteNs1Coll1Key5 := &privacyenabledstate.PvtKVWrite{Key: "key5", IsDelete: true, Value: nil, Version: version.NewHeight(3, 2)} 985 986 expectedUniquePvtData := uniquePvtDataMap{ 987 hashedCompositeKeyNs1Coll2Key3: pvtKVWriteNs1Coll2Key3, 988 hashedCompositeKeyNs1Coll2Key4: pvtKVWriteNs1Coll2Key4, 989 hashedCompositeKeyNs1Coll1Key2: pvtKVWriteNs1Coll1Key2, 990 hashedCompositeKeyNs1Coll1Key1: pvtKVWriteNs1Coll1Key1, 991 hashedCompositeKeyNs1Coll1Key5: pvtKVWriteNs1Coll1Key5, 992 } 993 994 uniquePvtData, err := constructUniquePvtData(blocksPvtData) 995 require.NoError(t, err) 996 require.Equal(t, expectedUniquePvtData, uniquePvtData) 997 } 998 999 func TestFindAndRemoveStalePvtData(t *testing.T) { 1000 ledgerid := "TestFindAndRemoveStalePvtData" 1001 testEnv := testEnvsMap[levelDBtestEnvName] 1002 testEnv.init(t, ledgerid, nil) 1003 defer testEnv.cleanup() 1004 db := testEnv.getVDB() 1005 1006 batch := privacyenabledstate.NewUpdateBatch() 1007 batch.HashUpdates.Put("ns1", "coll1", util.ComputeStringHash("key1"), util.ComputeStringHash("value_1_1_1"), version.NewHeight(1, 1)) 1008 batch.HashUpdates.Put("ns1", "coll2", util.ComputeStringHash("key2"), util.ComputeStringHash("value_1_2_2"), version.NewHeight(1, 2)) 1009 batch.HashUpdates.Put("ns2", "coll1", util.ComputeStringHash("key2"), util.ComputeStringHash("value_2_1_2"), version.NewHeight(2, 1)) 1010 batch.HashUpdates.Put("ns2", "coll2", util.ComputeStringHash("key3"), util.ComputeStringHash("value_2_2_3"), version.NewHeight(10, 10)) 1011 1012 // all pvt data associated with the hash updates are missing 1013 require.NoError(t, db.ApplyPrivacyAwareUpdates(batch, version.NewHeight(11, 1))) 1014 1015 // construct pvt data for some of the above missing data. note that no 1016 // duplicate entries are expected 1017 1018 // existent keyhash - a kvwrite with lower version (than the version of existent keyhash) should be considered stale 1019 hashedCompositeKeyNs1Coll1Key1 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll1", KeyHash: string(util.ComputeStringHash("key1"))} 1020 pvtKVWriteNs1Coll1Key1 := &privacyenabledstate.PvtKVWrite{Key: "key1", IsDelete: false, Value: []byte("old_value_1_1_1"), Version: version.NewHeight(1, 0)} 1021 1022 // existent keyhash - a kvwrite with higher version (than the version of existent keyhash) should not be considered stale 1023 hashedCompositeKeyNs2Coll1Key2 := privacyenabledstate.HashedCompositeKey{Namespace: "ns2", CollectionName: "coll1", KeyHash: string(util.ComputeStringHash("key2"))} 1024 pvtKVWriteNs2Coll1Key2 := &privacyenabledstate.PvtKVWrite{Key: "key2", IsDelete: false, Value: []byte("value_2_1_2"), Version: version.NewHeight(2, 1)} 1025 1026 // non existent keyhash (because deleted earlier or expired) - a kvwrite for delete should not be considered stale 1027 hashedCompositeKeyNs1Coll3Key3 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll3", KeyHash: string(util.ComputeStringHash("key3"))} 1028 pvtKVWriteNs1Coll3Key3 := &privacyenabledstate.PvtKVWrite{Key: "key3", IsDelete: true, Value: nil, Version: version.NewHeight(2, 3)} 1029 1030 // non existent keyhash (because deleted earlier or expired) - a kvwrite for value set should be considered stale 1031 hashedCompositeKeyNs1Coll4Key4 := privacyenabledstate.HashedCompositeKey{Namespace: "ns1", CollectionName: "coll4", KeyHash: string(util.ComputeStringHash("key4"))} 1032 pvtKVWriteNs1Coll4Key4 := &privacyenabledstate.PvtKVWrite{Key: "key4", Value: []byte("value_1_4_4"), Version: version.NewHeight(2, 3)} 1033 1034 // there would be a version mismatch but the hash value must be the same. hence, 1035 // this should be accepted too 1036 hashedCompositeKeyNs2Coll2Key3 := privacyenabledstate.HashedCompositeKey{Namespace: "ns2", CollectionName: "coll2", KeyHash: string(util.ComputeStringHash("key3"))} 1037 pvtKVWriteNs2Coll2Key3 := &privacyenabledstate.PvtKVWrite{Key: "key3", IsDelete: false, Value: []byte("value_2_2_3"), Version: version.NewHeight(9, 9)} 1038 1039 uniquePvtData := uniquePvtDataMap{ 1040 hashedCompositeKeyNs1Coll1Key1: pvtKVWriteNs1Coll1Key1, 1041 hashedCompositeKeyNs2Coll1Key2: pvtKVWriteNs2Coll1Key2, 1042 hashedCompositeKeyNs1Coll3Key3: pvtKVWriteNs1Coll3Key3, 1043 hashedCompositeKeyNs2Coll2Key3: pvtKVWriteNs2Coll2Key3, 1044 hashedCompositeKeyNs1Coll4Key4: pvtKVWriteNs1Coll4Key4, 1045 } 1046 1047 // created the expected batch from ValidateAndPrepareBatchForPvtDataofOldBlocks 1048 expectedBatch := privacyenabledstate.NewUpdateBatch() 1049 expectedBatch.PvtUpdates.Put("ns2", "coll1", "key2", []byte("value_2_1_2"), version.NewHeight(2, 1)) 1050 expectedBatch.PvtUpdates.Delete("ns1", "coll3", "key3", version.NewHeight(2, 3)) 1051 expectedBatch.PvtUpdates.Put("ns2", "coll2", "key3", []byte("value_2_2_3"), version.NewHeight(10, 10)) 1052 1053 err := uniquePvtData.findAndRemoveStalePvtData(db) 1054 require.NoError(t, err, "uniquePvtData.findAndRemoveStatePvtData resulted in an error") 1055 batch = uniquePvtData.transformToUpdateBatch() 1056 require.Equal(t, expectedBatch.PvtUpdates, batch.PvtUpdates) 1057 } 1058 1059 func producePvtdata(t *testing.T, txNum uint64, nsColls []string, keys []string, values [][]byte) *ledger.TxPvtData { 1060 builder := rwsetutil.NewRWSetBuilder() 1061 for index, nsColl := range nsColls { 1062 nsCollSplit := strings.Split(nsColl, ":") 1063 ns := nsCollSplit[0] 1064 coll := nsCollSplit[1] 1065 key := keys[index] 1066 value := values[index] 1067 builder.AddToPvtAndHashedWriteSet(ns, coll, key, value) 1068 } 1069 simRes, err := builder.GetTxSimulationResults() 1070 require.NoError(t, err) 1071 return &ledger.TxPvtData{ 1072 SeqInBlock: txNum, 1073 WriteSet: simRes.PvtSimulationResults, 1074 } 1075 } 1076 1077 func TestRemoveStaleAndCommitPvtDataOfOldBlocks(t *testing.T) { 1078 for _, testEnv := range testEnvs { 1079 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 1080 testValidationAndCommitOfOldPvtData(t, testEnv) 1081 } 1082 } 1083 1084 func testValidationAndCommitOfOldPvtData(t *testing.T, env testEnv) { 1085 ledgerid := "testvalidationandcommitofoldpvtdata" 1086 btlPolicy := btltestutil.SampleBTLPolicy( 1087 map[[2]string]uint64{ 1088 {"ns1", "coll1"}: 0, 1089 {"ns1", "coll2"}: 0, 1090 }, 1091 ) 1092 env.init(t, ledgerid, btlPolicy) 1093 defer env.cleanup() 1094 txMgr := env.getTxMgr() 1095 populateCollConfigForTest(t, txMgr, 1096 []collConfigkey{ 1097 {"ns1", "coll1"}, 1098 {"ns1", "coll2"}, 1099 }, 1100 version.NewHeight(1, 1), 1101 ) 1102 1103 db := env.getVDB() 1104 updateBatch := privacyenabledstate.NewUpdateBatch() 1105 // all pvt data are missing 1106 updateBatch.HashUpdates.Put("ns1", "coll1", util.ComputeStringHash("key1"), util.ComputeStringHash("value1"), version.NewHeight(1, 1)) // E1 1107 updateBatch.HashUpdates.Put("ns1", "coll1", util.ComputeStringHash("key2"), util.ComputeStringHash("value2"), version.NewHeight(1, 2)) // E2 1108 updateBatch.HashUpdates.Put("ns1", "coll2", util.ComputeStringHash("key3"), util.ComputeStringHash("value3"), version.NewHeight(1, 2)) // E3 1109 updateBatch.HashUpdates.Put("ns1", "coll2", util.ComputeStringHash("key4"), util.ComputeStringHash("value4"), version.NewHeight(1, 3)) // E4 1110 require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(1, 2))) 1111 1112 updateBatch = privacyenabledstate.NewUpdateBatch() 1113 updateBatch.HashUpdates.Put("ns1", "coll1", util.ComputeStringHash("key1"), util.ComputeStringHash("new-value1"), version.NewHeight(2, 1)) // E1 is updated 1114 updateBatch.HashUpdates.Delete("ns1", "coll1", util.ComputeStringHash("key2"), version.NewHeight(2, 2)) // E2 is being deleted 1115 require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(2, 2))) 1116 1117 updateBatch = privacyenabledstate.NewUpdateBatch() 1118 updateBatch.HashUpdates.Put("ns1", "coll1", util.ComputeStringHash("key1"), util.ComputeStringHash("another-new-value1"), version.NewHeight(3, 1)) // E1 is again updated 1119 updateBatch.HashUpdates.Put("ns1", "coll2", util.ComputeStringHash("key3"), util.ComputeStringHash("value3"), version.NewHeight(3, 2)) // E3 gets only metadata update 1120 require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(3, 2))) 1121 1122 v1 := []byte("value1") 1123 // ns1-coll1-key1 should be rejected as it is updated in the future by Blk2Tx1 1124 pvtDataBlk1Tx1 := producePvtdata(t, 1, []string{"ns1:coll1"}, []string{"key1"}, [][]byte{v1}) 1125 // ns1-coll2-key3 should be accepted but ns1-coll1-key2 1126 // should be rejected as it is updated in the future by Blk2Tx2 1127 v2 := []byte("value2") 1128 v3 := []byte("value3") 1129 pvtDataBlk1Tx2 := producePvtdata(t, 2, []string{"ns1:coll1", "ns1:coll2"}, []string{"key2", "key3"}, [][]byte{v2, v3}) 1130 // ns1-coll2-key4 should be accepted 1131 v4 := []byte("value4") 1132 pvtDataBlk1Tx3 := producePvtdata(t, 3, []string{"ns1:coll2"}, []string{"key4"}, [][]byte{v4}) 1133 1134 nv1 := []byte("new-value1") 1135 // ns1-coll1-key1 should be rejected as it is updated in the future by Blk3Tx1 1136 pvtDataBlk2Tx1 := producePvtdata(t, 1, []string{"ns1:coll1"}, []string{"key1"}, [][]byte{nv1}) 1137 // ns1-coll1-key2 should be accepted -- a delete operation 1138 pvtDataBlk2Tx2 := producePvtdata(t, 2, []string{"ns1:coll1"}, []string{"key2"}, [][]byte{nil}) 1139 1140 anv1 := []byte("another-new-value1") 1141 // ns1-coll1-key1 should be accepted 1142 pvtDataBlk3Tx1 := producePvtdata(t, 1, []string{"ns1:coll1"}, []string{"key1"}, [][]byte{anv1}) 1143 // ns1-coll2-key3 should be accepted -- assume that only metadata is being updated 1144 pvtDataBlk3Tx2 := producePvtdata(t, 2, []string{"ns1:coll2"}, []string{"key3"}, [][]byte{v3}) 1145 1146 blocksPvtData := map[uint64][]*ledger.TxPvtData{ 1147 1: { 1148 pvtDataBlk1Tx1, 1149 pvtDataBlk1Tx2, 1150 pvtDataBlk1Tx3, 1151 }, 1152 2: { 1153 pvtDataBlk2Tx1, 1154 pvtDataBlk2Tx2, 1155 }, 1156 3: { 1157 pvtDataBlk3Tx1, 1158 pvtDataBlk3Tx2, 1159 }, 1160 } 1161 1162 err := txMgr.RemoveStaleAndCommitPvtDataOfOldBlocks(blocksPvtData) 1163 require.NoError(t, err) 1164 1165 vv, err := db.GetPrivateData("ns1", "coll1", "key1") 1166 require.NoError(t, err) 1167 require.Equal(t, anv1, vv.Value) // last updated value 1168 1169 vv, err = db.GetPrivateData("ns1", "coll1", "key2") 1170 require.NoError(t, err) 1171 require.Nil(t, vv) // deleted 1172 1173 vv, err = db.GetPrivateData("ns1", "coll2", "key3") 1174 require.NoError(t, err) 1175 require.Equal(t, v3, vv.Value) 1176 require.Equal(t, version.NewHeight(3, 2), vv.Version) // though we passed with version {1,2}, we should get {3,2} due to metadata update 1177 1178 vv, err = db.GetPrivateData("ns1", "coll2", "key4") 1179 require.NoError(t, err) 1180 require.Equal(t, v4, vv.Value) 1181 } 1182 1183 func TestTxSimulatorMissingPvtdata(t *testing.T) { 1184 testEnv := testEnvsMap[levelDBtestEnvName] 1185 testEnv.init(t, "TestTxSimulatorUnsupportedTxQueries", nil) 1186 defer testEnv.cleanup() 1187 1188 txMgr := testEnv.getTxMgr() 1189 populateCollConfigForTest(t, txMgr, 1190 []collConfigkey{ 1191 {"ns1", "coll1"}, 1192 {"ns1", "coll2"}, 1193 {"ns1", "coll3"}, 1194 {"ns1", "coll4"}, 1195 }, 1196 version.NewHeight(1, 1), 1197 ) 1198 1199 db := testEnv.getVDB() 1200 updateBatch := privacyenabledstate.NewUpdateBatch() 1201 updateBatch.HashUpdates.Put("ns1", "coll1", util.ComputeStringHash("key1"), util.ComputeStringHash("value1"), version.NewHeight(1, 1)) 1202 updateBatch.PvtUpdates.Put("ns1", "coll1", "key1", []byte("value1"), version.NewHeight(1, 1)) 1203 require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(1, 1))) 1204 1205 verifyPvtKeyValue(t, txMgr, "ns1", "coll1", "key1", []byte("value1")) 1206 1207 updateBatch = privacyenabledstate.NewUpdateBatch() 1208 updateBatch.HashUpdates.Put("ns1", "coll1", util.ComputeStringHash("key1"), util.ComputeStringHash("value1"), version.NewHeight(2, 1)) 1209 updateBatch.HashUpdates.Put("ns1", "coll2", util.ComputeStringHash("key2"), util.ComputeStringHash("value2"), version.NewHeight(2, 1)) 1210 updateBatch.HashUpdates.Put("ns1", "coll3", util.ComputeStringHash("key3"), util.ComputeStringHash("value3"), version.NewHeight(2, 1)) 1211 updateBatch.PvtUpdates.Put("ns1", "coll3", "key3", []byte("value3"), version.NewHeight(2, 1)) 1212 require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(2, 1))) 1213 1214 verifyPvtKeyVersionStale(t, txMgr, "ns1", "coll1", "key1") 1215 verifyPvtKeyVersionStale(t, txMgr, "ns1", "coll2", "key2") 1216 verifyPvtKeyValue(t, txMgr, "ns1", "coll3", "key3", []byte("value3")) 1217 verifyPvtKeyValue(t, txMgr, "ns1", "coll4", "key4", nil) 1218 } 1219 1220 func TestRemoveStaleAndCommitPvtDataOfOldBlocksWithExpiry(t *testing.T) { 1221 ledgerid := "TestTxSimulatorMissingPvtdataExpiry" 1222 btlPolicy := btltestutil.SampleBTLPolicy( 1223 map[[2]string]uint64{ 1224 {"ns", "coll"}: 1, 1225 }, 1226 ) 1227 testEnv := testEnvsMap[levelDBtestEnvName] 1228 testEnv.init(t, ledgerid, btlPolicy) 1229 defer testEnv.cleanup() 1230 1231 txMgr := testEnv.getTxMgr() 1232 populateCollConfigForTest(t, txMgr, 1233 []collConfigkey{ 1234 {"ns", "coll"}, 1235 }, 1236 version.NewHeight(1, 1), 1237 ) 1238 1239 bg, _ := testutil.NewBlockGenerator(t, ledgerid, false) 1240 1241 // storing hashed data but the pvt key is missing 1242 // stored pvt key would get expired and purged while committing block 3 1243 blkAndPvtdata := prepareNextBlockForTest(t, txMgr, bg, "txid-1", 1244 map[string]string{"pubkey1": "pub-value1"}, map[string]string{"pvtkey1": "pvt-value1"}, true) 1245 _, _, err := txMgr.ValidateAndPrepare(blkAndPvtdata, true) 1246 require.NoError(t, err) 1247 // committing block 1 1248 require.NoError(t, txMgr.Commit()) 1249 1250 // pvt data should not exist 1251 verifyPvtKeyVersionStale(t, txMgr, "ns", "coll", "pvtkey1") 1252 1253 // committing pvt data of block 1 1254 v1 := []byte("pvt-value1") 1255 pvtDataBlk1Tx1 := producePvtdata(t, 1, []string{"ns:coll"}, []string{"pvtkey1"}, [][]byte{v1}) 1256 blocksPvtData := map[uint64][]*ledger.TxPvtData{ 1257 1: { 1258 pvtDataBlk1Tx1, 1259 }, 1260 } 1261 err = txMgr.RemoveStaleAndCommitPvtDataOfOldBlocks(blocksPvtData) 1262 require.NoError(t, err) 1263 1264 // pvt data should exist 1265 verifyPvtKeyValue(t, txMgr, "ns", "coll", "pvtkey1", v1) 1266 1267 // storing hashed data but the pvt key is missing 1268 // stored pvt key would get expired and purged while committing block 4 1269 blkAndPvtdata = prepareNextBlockForTest(t, txMgr, bg, "txid-2", 1270 map[string]string{"pubkey2": "pub-value2"}, map[string]string{"pvtkey2": "pvt-value2"}, true) 1271 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata, true) 1272 require.NoError(t, err) 1273 // committing block 2 1274 require.NoError(t, txMgr.Commit()) 1275 1276 // pvt data should not exist 1277 verifyPvtKeyVersionStale(t, txMgr, "ns", "coll", "pvtkey2") 1278 1279 blkAndPvtdata = prepareNextBlockForTest(t, txMgr, bg, "txid-3", 1280 map[string]string{"pubkey3": "pub-value3"}, nil, false) 1281 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata, true) 1282 require.NoError(t, err) 1283 // committing block 3 1284 require.NoError(t, txMgr.Commit()) 1285 1286 // prepareForExpiringKey must have selected the pvtkey2 as it would 1287 // get expired during next block commit 1288 1289 // committing pvt data of block 2 1290 v2 := []byte("pvt-value2") 1291 pvtDataBlk2Tx1 := producePvtdata(t, 1, []string{"ns:coll"}, []string{"pvtkey2"}, [][]byte{v2}) 1292 blocksPvtData = map[uint64][]*ledger.TxPvtData{ 1293 2: { 1294 pvtDataBlk2Tx1, 1295 }, 1296 } 1297 1298 err = txMgr.RemoveStaleAndCommitPvtDataOfOldBlocks(blocksPvtData) 1299 require.NoError(t, err) 1300 1301 // pvt data should exist 1302 verifyPvtKeyValue(t, txMgr, "ns", "coll", "pvtkey2", v2) 1303 1304 blkAndPvtdata = prepareNextBlockForTest(t, txMgr, bg, "txid-4", 1305 map[string]string{"pubkey4": "pub-value4"}, nil, false) 1306 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata, true) 1307 require.NoError(t, err) 1308 // committing block 4 and should purge pvtkey2 1309 require.NoError(t, txMgr.Commit()) 1310 verifyPvtKeyValue(t, txMgr, "ns", "coll", "pvtkey2", nil) 1311 } 1312 1313 func verifyPvtKeyVersionStale(t *testing.T, txMgr *LockBasedTxMgr, ns, coll, key string) { 1314 simulator, _ := txMgr.NewTxSimulator("tx-tmp") 1315 defer simulator.Done() 1316 _, err := simulator.GetPrivateData(ns, coll, key) 1317 require.Contains(t, err.Error(), "private data matching public hash version is not available") 1318 } 1319 1320 func verifyPvtKeyValue(t *testing.T, txMgr *LockBasedTxMgr, ns, coll, key string, expectedValue []byte) { 1321 simulator, _ := txMgr.NewTxSimulator("tx-tmp") 1322 defer simulator.Done() 1323 pvtValue, err := simulator.GetPrivateData(ns, coll, key) 1324 require.NoError(t, err) 1325 require.Equal(t, expectedValue, pvtValue) 1326 } 1327 1328 func TestDeleteOnCursor(t *testing.T) { 1329 cID := "cid" 1330 env := testEnvsMap[levelDBtestEnvName] 1331 env.init(t, "TestDeleteOnCursor", nil) 1332 defer env.cleanup() 1333 1334 txMgr := env.getTxMgr() 1335 txMgrHelper := newTxMgrTestHelper(t, txMgr) 1336 1337 // Simulate and commit tx1 to populate sample data (key_001 through key_010) 1338 s, _ := txMgr.NewTxSimulator("test_tx1") 1339 for i := 1; i <= 10; i++ { 1340 k := createTestKey(i) 1341 v := createTestValue(i) 1342 t.Logf("Adding k=[%s], v=[%s]", k, v) 1343 require.NoError(t, s.SetState(cID, k, v)) 1344 } 1345 s.Done() 1346 txRWSet1, _ := s.GetTxSimulationResults() 1347 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 1348 1349 // simulate and commit tx2 that reads keys key_001 through key_004 and deletes them one by one (in a loop - itr.Next() followed by Delete()) 1350 s2, _ := txMgr.NewTxSimulator("test_tx2") 1351 itr2, _ := s2.GetStateRangeScanIterator(cID, createTestKey(1), createTestKey(5)) 1352 for i := 1; i <= 4; i++ { 1353 kv, err := itr2.Next() 1354 require.NoError(t, err) 1355 require.NotNil(t, kv) 1356 key := kv.(*queryresult.KV).Key 1357 require.NoError(t, s2.DeleteState(cID, key)) 1358 } 1359 itr2.Close() 1360 s2.Done() 1361 txRWSet2, _ := s2.GetTxSimulationResults() 1362 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 1363 1364 // simulate tx3 to verify that the keys key_001 through key_004 got deleted 1365 s3, _ := txMgr.NewTxSimulator("test_tx3") 1366 itr3, _ := s3.GetStateRangeScanIterator(cID, createTestKey(1), createTestKey(10)) 1367 kv, err := itr3.Next() 1368 require.NoError(t, err) 1369 require.NotNil(t, kv) 1370 key := kv.(*queryresult.KV).Key 1371 require.Equal(t, "key_005", key) 1372 itr3.Close() 1373 s3.Done() 1374 } 1375 1376 func TestTxSimulatorMissingPvtdataExpiry(t *testing.T) { 1377 ledgerid := "TestTxSimulatorMissingPvtdataExpiry" 1378 testEnv := testEnvsMap[levelDBtestEnvName] 1379 btlPolicy := btltestutil.SampleBTLPolicy( 1380 map[[2]string]uint64{ 1381 {"ns", "coll"}: 1, 1382 }, 1383 ) 1384 testEnv.init(t, ledgerid, btlPolicy) 1385 defer testEnv.cleanup() 1386 1387 txMgr := testEnv.getTxMgr() 1388 populateCollConfigForTest(t, txMgr, []collConfigkey{{"ns", "coll"}}, version.NewHeight(1, 1)) 1389 1390 bg, _ := testutil.NewBlockGenerator(t, ledgerid, false) 1391 1392 blkAndPvtdata := prepareNextBlockForTest(t, txMgr, bg, "txid-1", 1393 map[string]string{"pubkey1": "pub-value1"}, map[string]string{"pvtkey1": "pvt-value1"}, false) 1394 _, _, err := txMgr.ValidateAndPrepare(blkAndPvtdata, true) 1395 require.NoError(t, err) 1396 require.NoError(t, txMgr.Commit()) 1397 1398 verifyPvtKeyValue(t, txMgr, "ns", "coll", "pvtkey1", []byte("pvt-value1")) 1399 1400 blkAndPvtdata = prepareNextBlockForTest(t, txMgr, bg, "txid-2", 1401 map[string]string{"pubkey1": "pub-value2"}, map[string]string{"pvtkey2": "pvt-value2"}, false) 1402 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata, true) 1403 require.NoError(t, err) 1404 require.NoError(t, txMgr.Commit()) 1405 verifyPvtKeyValue(t, txMgr, "ns", "coll", "pvtkey1", []byte("pvt-value1")) 1406 1407 blkAndPvtdata = prepareNextBlockForTest(t, txMgr, bg, "txid-2", 1408 map[string]string{"pubkey1": "pub-value3"}, map[string]string{"pvtkey3": "pvt-value3"}, false) 1409 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata, true) 1410 require.NoError(t, err) 1411 require.NoError(t, txMgr.Commit()) 1412 verifyPvtKeyValue(t, txMgr, "ns", "coll", "pvtkey1", nil) 1413 } 1414 1415 func TestTxWithPubMetadata(t *testing.T) { 1416 for _, testEnv := range testEnvs { 1417 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 1418 testLedgerID := "testtxwithpubmetadata" 1419 testEnv.init(t, testLedgerID, nil) 1420 testTxWithPubMetadata(t, testEnv) 1421 testEnv.cleanup() 1422 } 1423 } 1424 1425 func testTxWithPubMetadata(t *testing.T, env testEnv) { 1426 namespace := "testns" 1427 txMgr := env.getTxMgr() 1428 txMgrHelper := newTxMgrTestHelper(t, txMgr) 1429 1430 // Simulate and commit tx1 - set val and metadata for key1 and key2. Set only metadata for key3 1431 s1, _ := txMgr.NewTxSimulator("test_tx1") 1432 key1, value1, metadata1 := "key1", []byte("value1"), map[string][]byte{"entry1": []byte("meatadata1-entry1")} 1433 key2, value2, metadata2 := "key2", []byte("value2"), map[string][]byte{"entry1": []byte("meatadata2-entry1")} 1434 key3, metadata3 := "key3", map[string][]byte{"entry1": []byte("meatadata3-entry")} 1435 1436 require.NoError(t, s1.SetState(namespace, key1, value1)) 1437 require.NoError(t, s1.SetStateMetadata(namespace, key1, metadata1)) 1438 require.NoError(t, s1.SetState(namespace, key2, value2)) 1439 require.NoError(t, s1.SetStateMetadata(namespace, key2, metadata2)) 1440 require.NoError(t, s1.SetStateMetadata(namespace, key3, metadata3)) 1441 s1.Done() 1442 txRWSet1, _ := s1.GetTxSimulationResults() 1443 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 1444 1445 // Run query - key1 and key2 should return both value and metadata. Key3 should still be non-exsting in db 1446 qe, _ := txMgr.NewQueryExecutor("test_tx2") 1447 checkTestQueryResults(t, qe, namespace, key1, value1, metadata1) 1448 checkTestQueryResults(t, qe, namespace, key2, value2, metadata2) 1449 checkTestQueryResults(t, qe, namespace, key3, nil, nil) 1450 qe.Done() 1451 1452 // Simulate and commit tx3 - update metadata for key1 and delete metadata for key2 1453 updatedMetadata1 := map[string][]byte{"entry1": []byte("meatadata1-entry1"), "entry2": []byte("meatadata1-entry2")} 1454 s2, _ := txMgr.NewTxSimulator("test_tx3") 1455 require.NoError(t, s2.SetStateMetadata(namespace, key1, updatedMetadata1)) 1456 require.NoError(t, s2.DeleteStateMetadata(namespace, key2)) 1457 s2.Done() 1458 txRWSet2, _ := s2.GetTxSimulationResults() 1459 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 1460 1461 // Run query - key1 should return updated metadata. Key2 should return 'nil' metadata 1462 qe, _ = txMgr.NewQueryExecutor("test_tx4") 1463 checkTestQueryResults(t, qe, namespace, key1, value1, updatedMetadata1) 1464 checkTestQueryResults(t, qe, namespace, key2, value2, nil) 1465 qe.Done() 1466 } 1467 1468 func TestTxWithPvtdataMetadata(t *testing.T) { 1469 ledgerid, ns, coll := "testtxwithpvtdatametadata", "ns", "coll" 1470 btlPolicy := btltestutil.SampleBTLPolicy( 1471 map[[2]string]uint64{ 1472 {"ns", "coll"}: 1000, 1473 }, 1474 ) 1475 for _, testEnv := range testEnvs { 1476 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 1477 testEnv.init(t, ledgerid, btlPolicy) 1478 testTxWithPvtdataMetadata(t, testEnv, ns, coll) 1479 testEnv.cleanup() 1480 } 1481 } 1482 1483 func testTxWithPvtdataMetadata(t *testing.T, env testEnv, ns, coll string) { 1484 ledgerid := "testtxwithpvtdatametadata" 1485 txMgr := env.getTxMgr() 1486 bg, _ := testutil.NewBlockGenerator(t, ledgerid, false) 1487 1488 populateCollConfigForTest(t, txMgr, []collConfigkey{{"ns", "coll"}}, version.NewHeight(1, 1)) 1489 1490 // Simulate and commit tx1 - set val and metadata for key1 and key2. Set only metadata for key3 1491 s1, _ := txMgr.NewTxSimulator("test_tx1") 1492 key1, value1, metadata1 := "key1", []byte("value1"), map[string][]byte{"entry1": []byte("meatadata1-entry1")} 1493 key2, value2, metadata2 := "key2", []byte("value2"), map[string][]byte{"entry1": []byte("meatadata2-entry1")} 1494 key3, metadata3 := "key3", map[string][]byte{"entry1": []byte("meatadata3-entry")} 1495 require.NoError(t, s1.SetPrivateData(ns, coll, key1, value1)) 1496 require.NoError(t, s1.SetPrivateDataMetadata(ns, coll, key1, metadata1)) 1497 require.NoError(t, s1.SetPrivateData(ns, coll, key2, value2)) 1498 require.NoError(t, s1.SetPrivateDataMetadata(ns, coll, key2, metadata2)) 1499 require.NoError(t, s1.SetPrivateDataMetadata(ns, coll, key3, metadata3)) 1500 s1.Done() 1501 1502 blkAndPvtdata1, _ := prepareNextBlockForTestFromSimulator(t, bg, s1) 1503 _, _, err := txMgr.ValidateAndPrepare(blkAndPvtdata1, true) 1504 require.NoError(t, err) 1505 require.NoError(t, txMgr.Commit()) 1506 1507 // Run query - key1 and key2 should return both value and metadata. Key3 should still be non-exsting in db 1508 qe, _ := txMgr.NewQueryExecutor("test_tx2") 1509 checkPvtdataTestQueryResults(t, qe, ns, coll, key1, value1, metadata1) 1510 checkPvtdataTestQueryResults(t, qe, ns, coll, key2, value2, metadata2) 1511 checkPvtdataTestQueryResults(t, qe, ns, coll, key3, nil, nil) 1512 qe.Done() 1513 1514 // Simulate and commit tx3 - update metadata for key1 and delete metadata for key2 1515 updatedMetadata1 := map[string][]byte{"entry1": []byte("meatadata1-entry1"), "entry2": []byte("meatadata1-entry2")} 1516 s2, _ := txMgr.NewTxSimulator("test_tx3") 1517 require.NoError(t, s2.SetPrivateDataMetadata(ns, coll, key1, updatedMetadata1)) 1518 require.NoError(t, s2.DeletePrivateDataMetadata(ns, coll, key2)) 1519 s2.Done() 1520 1521 blkAndPvtdata2, _ := prepareNextBlockForTestFromSimulator(t, bg, s2) 1522 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata2, true) 1523 require.NoError(t, err) 1524 require.NoError(t, txMgr.Commit()) 1525 1526 // Run query - key1 should return updated metadata. Key2 should return 'nil' metadata 1527 qe, _ = txMgr.NewQueryExecutor("test_tx4") 1528 checkPvtdataTestQueryResults(t, qe, ns, coll, key1, value1, updatedMetadata1) 1529 checkPvtdataTestQueryResults(t, qe, ns, coll, key2, value2, nil) 1530 qe.Done() 1531 } 1532 1533 func prepareNextBlockForTest(t *testing.T, txMgr *LockBasedTxMgr, bg *testutil.BlockGenerator, 1534 txid string, pubKVs map[string]string, pvtKVs map[string]string, isMissing bool) *ledger.BlockAndPvtData { 1535 simulator, _ := txMgr.NewTxSimulator(txid) 1536 // simulating transaction 1537 for k, v := range pubKVs { 1538 require.NoError(t, simulator.SetState("ns", k, []byte(v))) 1539 } 1540 for k, v := range pvtKVs { 1541 require.NoError(t, simulator.SetPrivateData("ns", "coll", k, []byte(v))) 1542 } 1543 simulator.Done() 1544 if isMissing { 1545 return prepareNextBlockForTestFromSimulatorWithMissingData(t, bg, simulator, txid, 1, "ns", "coll", true) 1546 } 1547 nb, _ := prepareNextBlockForTestFromSimulator(t, bg, simulator) 1548 return nb 1549 } 1550 1551 func prepareNextBlockForTestFromSimulator(t *testing.T, bg *testutil.BlockGenerator, simulator ledger.TxSimulator) (*ledger.BlockAndPvtData, *ledger.TxSimulationResults) { 1552 simRes, _ := simulator.GetTxSimulationResults() 1553 pubSimBytes, _ := simRes.GetPubSimulationBytes() 1554 block := bg.NextBlock([][]byte{pubSimBytes}) 1555 return &ledger.BlockAndPvtData{ 1556 Block: block, 1557 PvtData: ledger.TxPvtDataMap{0: {SeqInBlock: 0, WriteSet: simRes.PvtSimulationResults}}, 1558 }, simRes 1559 } 1560 1561 func prepareNextBlockForTestFromSimulatorWithMissingData(t *testing.T, bg *testutil.BlockGenerator, simulator ledger.TxSimulator, 1562 txid string, txNum uint64, ns, coll string, isEligible bool) *ledger.BlockAndPvtData { 1563 simRes, _ := simulator.GetTxSimulationResults() 1564 pubSimBytes, _ := simRes.GetPubSimulationBytes() 1565 block := bg.NextBlock([][]byte{pubSimBytes}) 1566 missingData := make(ledger.TxMissingPvtData) 1567 missingData.Add(txNum, ns, coll, isEligible) 1568 return &ledger.BlockAndPvtData{Block: block, MissingPvtData: missingData} 1569 } 1570 1571 func checkTestQueryResults(t *testing.T, qe ledger.QueryExecutor, ns, key string, 1572 expectedVal []byte, expectedMetadata map[string][]byte) { 1573 committedVal, err := qe.GetState(ns, key) 1574 require.NoError(t, err) 1575 require.Equal(t, expectedVal, committedVal) 1576 1577 committedMetadata, err := qe.GetStateMetadata(ns, key) 1578 require.NoError(t, err) 1579 require.Equal(t, expectedMetadata, committedMetadata) 1580 t.Logf("key=%s, value=%s, metadata=%s", key, committedVal, committedMetadata) 1581 } 1582 1583 func checkPvtdataTestQueryResults(t *testing.T, qe ledger.QueryExecutor, ns, coll, key string, 1584 expectedVal []byte, expectedMetadata map[string][]byte) { 1585 committedVal, err := qe.GetPrivateData(ns, coll, key) 1586 require.NoError(t, err) 1587 require.Equal(t, expectedVal, committedVal) 1588 1589 committedMetadata, err := qe.GetPrivateDataMetadata(ns, coll, key) 1590 require.NoError(t, err) 1591 require.Equal(t, expectedMetadata, committedMetadata) 1592 t.Logf("key=%s, value=%s, metadata=%s", key, committedVal, committedMetadata) 1593 } 1594 1595 func TestName(t *testing.T) { 1596 testEnv := testEnvsMap[levelDBtestEnvName] 1597 testEnv.init(t, "testLedger", nil) 1598 defer testEnv.cleanup() 1599 txMgr := testEnv.getTxMgr() 1600 require.Equal(t, "state", txMgr.Name()) 1601 } 1602 1603 func TestTxSimulatorWithStateBasedEndorsement(t *testing.T) { 1604 for _, testEnv := range testEnvs { 1605 t.Run(testEnv.getName(), func(t *testing.T) { 1606 testEnv.init(t, "testtxsimulatorwithdtstebasedendorsement", nil) 1607 testTxSimulatorWithStateBasedEndorsement(t, testEnv) 1608 testEnv.cleanup() 1609 }) 1610 } 1611 } 1612 1613 func testTxSimulatorWithStateBasedEndorsement(t *testing.T, env testEnv) { 1614 txMgr := env.getTxMgr() 1615 txMgrHelper := newTxMgrTestHelper(t, txMgr) 1616 sbe1 := map[string][]byte{peer.MetaDataKeys_VALIDATION_PARAMETER.String(): []byte("SBE1")} 1617 sbe2 := map[string][]byte{peer.MetaDataKeys_VALIDATION_PARAMETER.String(): []byte("SBE2")} 1618 sbe3 := map[string][]byte{peer.MetaDataKeys_VALIDATION_PARAMETER.String(): []byte("SBE3")} 1619 1620 // simulate tx1 1621 s1, _ := txMgr.NewTxSimulator("test_tx1") 1622 require.NoError(t, s1.SetState("ns1", "key1", []byte("value1"))) 1623 require.NoError(t, s1.SetState("ns1", "key2", []byte("value2"))) 1624 require.NoError(t, s1.SetStateMetadata("ns1", "key2", sbe1)) 1625 require.NoError(t, s1.SetState("ns2", "key3", []byte("value3"))) 1626 require.NoError(t, s1.SetStateMetadata("ns2", "key3", sbe2)) 1627 require.NoError(t, s1.SetState("ns2", "key4", []byte("value4"))) 1628 s1.Done() 1629 // validate and commit RWset 1630 txRWSet1, _ := s1.GetTxSimulationResults() 1631 txMgrHelper.validateAndCommitRWSet(txRWSet1.PubSimulationResults) 1632 1633 // simulate tx2 that make changes to existing data and updates a key policy 1634 s2, _ := txMgr.NewTxSimulator("test_tx2") 1635 require.NoError(t, s2.SetState("ns1", "key1", []byte("value1b"))) 1636 require.NoError(t, s2.SetState("ns1", "key2", []byte("value2b"))) 1637 require.NoError(t, s2.SetStateMetadata("ns2", "key3", sbe3)) 1638 require.NoError(t, s2.SetState("ns2", "key4", []byte("value4b"))) 1639 s2.Done() 1640 // validate and commit RWset for tx2 1641 txRWSet2, err := s2.GetTxSimulationResults() 1642 require.NoError(t, err) 1643 txMgrHelper.validateAndCommitRWSet(txRWSet2.PubSimulationResults) 1644 // check the metadata are captured 1645 metadata := ledger.WritesetMetadata{} 1646 metadata.Add("ns1", "", "key1", nil) 1647 metadata.Add("ns1", "", "key2", sbe1) 1648 metadata.Add("ns2", "", "key3", sbe2) 1649 metadata.Add("ns2", "", "key4", nil) 1650 require.Equal(t, metadata, txRWSet2.WritesetMetadata) 1651 1652 // simulate tx3 1653 s3, _ := txMgr.NewTxSimulator("test_tx3") 1654 require.NoError(t, s3.SetState("ns1", "key1", []byte("value1c"))) 1655 require.NoError(t, s3.SetState("ns1", "key2", []byte("value2c"))) 1656 require.NoError(t, s3.SetState("ns2", "key3", []byte("value3c"))) 1657 require.NoError(t, s3.SetState("ns2", "key4", []byte("value4c"))) 1658 s3.Done() 1659 txRWSet3, err := s3.GetTxSimulationResults() 1660 require.NoError(t, err) 1661 1662 // check the metadata are captured 1663 metadata = ledger.WritesetMetadata{} 1664 metadata.Add("ns1", "", "key1", nil) 1665 metadata.Add("ns1", "", "key2", sbe1) 1666 metadata.Add("ns2", "", "key3", sbe3) 1667 metadata.Add("ns2", "", "key4", nil) 1668 require.Equal(t, metadata, txRWSet3.WritesetMetadata) 1669 } 1670 1671 func TestTxSimulatorWithPrivateDataStateBasedEndorsement(t *testing.T) { 1672 ledgerid, ns, coll := "testtxwithprivatedatastatebasedendorsement", "ns1", "coll1" 1673 btlPolicy := btltestutil.SampleBTLPolicy( 1674 map[[2]string]uint64{ 1675 {ns, coll}: 1000, 1676 }, 1677 ) 1678 for _, testEnv := range testEnvs { 1679 t.Logf("Running test for TestEnv = %s", testEnv.getName()) 1680 testEnv.init(t, ledgerid, btlPolicy) 1681 testTxSimulatorWithPrivateDataStateBasedEndorsement(t, testEnv, ns, coll) 1682 testEnv.cleanup() 1683 } 1684 } 1685 1686 func testTxSimulatorWithPrivateDataStateBasedEndorsement(t *testing.T, env testEnv, ns, coll string) { 1687 ledgerid := "testtxwithprivatedatastatebasedendorsement" 1688 txMgr := env.getTxMgr() 1689 bg, _ := testutil.NewBlockGenerator(t, ledgerid, false) 1690 1691 populateCollConfigForTest(t, txMgr, []collConfigkey{{ns, coll}}, version.NewHeight(1, 1)) 1692 1693 sbe1 := map[string][]byte{peer.MetaDataKeys_VALIDATION_PARAMETER.String(): []byte("SBE1")} 1694 sbe2 := map[string][]byte{peer.MetaDataKeys_VALIDATION_PARAMETER.String(): []byte("SBE2")} 1695 1696 // Simulate and commit tx1 - set private data and key policy 1697 s1, _ := txMgr.NewTxSimulator("test_tx1") 1698 require.NoError(t, s1.SetPrivateData(ns, coll, "key1", []byte("private_value1"))) 1699 require.NoError(t, s1.SetPrivateDataMetadata(ns, coll, "key1", sbe1)) 1700 s1.Done() 1701 1702 blkAndPvtdata1, _ := prepareNextBlockForTestFromSimulator(t, bg, s1) 1703 _, _, err := txMgr.ValidateAndPrepare(blkAndPvtdata1, true) 1704 require.NoError(t, err) 1705 require.NoError(t, txMgr.Commit()) 1706 1707 // simulate tx2 that make changes to existing data and updates a key policy 1708 s2, _ := txMgr.NewTxSimulator("test_tx2") 1709 require.NoError(t, s2.SetPrivateData(ns, coll, "key1", []byte("private_value2"))) 1710 require.NoError(t, s2.SetPrivateDataMetadata(ns, coll, "key1", sbe2)) 1711 s2.Done() 1712 1713 blkAndPvtdata2, simRes2 := prepareNextBlockForTestFromSimulator(t, bg, s2) 1714 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata2, true) 1715 require.NoError(t, err) 1716 require.NoError(t, txMgr.Commit()) 1717 // check the metadata are captured 1718 metadata := ledger.WritesetMetadata{} 1719 metadata.Add(ns, coll, "key1", sbe1) 1720 require.Equal(t, metadata, simRes2.WritesetMetadata) 1721 1722 // simulate tx3 that make changes to existing data 1723 s3, _ := txMgr.NewTxSimulator("test_tx3") 1724 require.NoError(t, s3.SetPrivateData(ns, coll, "key1", []byte("private_value2"))) 1725 s3.Done() 1726 1727 blkAndPvtdata3, simRes3 := prepareNextBlockForTestFromSimulator(t, bg, s3) 1728 _, _, err = txMgr.ValidateAndPrepare(blkAndPvtdata3, true) 1729 require.NoError(t, err) 1730 require.NoError(t, txMgr.Commit()) 1731 // check the metadata are captured 1732 metadata = ledger.WritesetMetadata{} 1733 metadata.Add(ns, coll, "key1", sbe2) 1734 require.Equal(t, metadata, simRes3.WritesetMetadata) 1735 }