github.com/braveheart12/just@v0.8.7/ledger/heavyclient/heavy_test.go (about) 1 /* 2 * Copyright 2019 Insolar Technologies 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package heavyclient_test 18 19 import ( 20 "bytes" 21 "context" 22 "fmt" 23 "sort" 24 "sync" 25 "sync/atomic" 26 "testing" 27 "time" 28 29 "github.com/dgraph-io/badger" 30 "github.com/insolar/insolar/component" 31 "github.com/insolar/insolar/configuration" 32 "github.com/insolar/insolar/core" 33 "github.com/insolar/insolar/core/message" 34 "github.com/insolar/insolar/core/reply" 35 "github.com/insolar/insolar/instrumentation/inslogger" 36 "github.com/insolar/insolar/ledger/artifactmanager" 37 "github.com/insolar/insolar/ledger/pulsemanager" 38 "github.com/insolar/insolar/ledger/recentstorage" 39 "github.com/insolar/insolar/ledger/storage" 40 "github.com/insolar/insolar/ledger/storage/index" 41 "github.com/insolar/insolar/ledger/storage/jet" 42 "github.com/insolar/insolar/ledger/storage/nodes" 43 "github.com/insolar/insolar/ledger/storage/record" 44 "github.com/insolar/insolar/ledger/storage/storagetest" 45 "github.com/insolar/insolar/platformpolicy" 46 "github.com/insolar/insolar/testutils" 47 "github.com/insolar/insolar/testutils/network" 48 "github.com/stretchr/testify/assert" 49 "github.com/stretchr/testify/require" 50 "github.com/stretchr/testify/suite" 51 ) 52 53 type heavySuite struct { 54 suite.Suite 55 56 cm *component.Manager 57 ctx context.Context 58 cleaner func() 59 db storage.DBContext 60 61 jetStorage storage.JetStorage 62 nodeAccessor *nodes.AccessorMock 63 nodeSetter *nodes.SetterMock 64 pulseTracker storage.PulseTracker 65 replicaStorage storage.ReplicaStorage 66 objectStorage storage.ObjectStorage 67 dropStorage storage.DropStorage 68 storageCleaner storage.Cleaner 69 } 70 71 func NewHeavySuite() *heavySuite { 72 return &heavySuite{ 73 Suite: suite.Suite{}, 74 } 75 } 76 77 // Init and run suite 78 func TestHeavySuite(t *testing.T) { 79 suite.Run(t, NewHeavySuite()) 80 } 81 82 func (s *heavySuite) BeforeTest(suiteName, testName string) { 83 s.cm = &component.Manager{} 84 s.ctx = inslogger.TestContext(s.T()) 85 86 db, cleaner := storagetest.TmpDB(s.ctx, s.T()) 87 s.cleaner = cleaner 88 s.db = db 89 s.jetStorage = storage.NewJetStorage() 90 s.nodeAccessor = nodes.NewAccessorMock(s.T()) 91 s.nodeSetter = nodes.NewSetterMock(s.T()) 92 s.pulseTracker = storage.NewPulseTracker() 93 s.replicaStorage = storage.NewReplicaStorage() 94 s.objectStorage = storage.NewObjectStorage() 95 s.dropStorage = storage.NewDropStorage(10) 96 s.storageCleaner = storage.NewCleaner() 97 98 s.cm.Inject( 99 platformpolicy.NewPlatformCryptographyScheme(), 100 s.db, 101 s.jetStorage, 102 s.nodeAccessor, 103 s.nodeSetter, 104 s.pulseTracker, 105 s.replicaStorage, 106 s.objectStorage, 107 s.dropStorage, 108 s.storageCleaner, 109 ) 110 111 s.nodeSetter.SetMock.Return(nil) 112 s.nodeAccessor.AllMock.Return(nil, nil) 113 114 err := s.cm.Init(s.ctx) 115 if err != nil { 116 s.T().Error("ComponentManager init failed", err) 117 } 118 err = s.cm.Start(s.ctx) 119 if err != nil { 120 s.T().Error("ComponentManager start failed", err) 121 } 122 } 123 124 func (s *heavySuite) AfterTest(suiteName, testName string) { 125 err := s.cm.Stop(s.ctx) 126 if err != nil { 127 s.T().Error("ComponentManager stop failed", err) 128 } 129 s.cleaner() 130 } 131 132 func (s *heavySuite) TestPulseManager_SendToHeavyHappyPath() { 133 sendToHeavy(s, false) 134 } 135 136 func (s *heavySuite) TestPulseManager_SendToHeavyWithRetry() { 137 sendToHeavy(s, true) 138 } 139 140 func sendToHeavy(s *heavySuite, withretry bool) { 141 // TODO: test should work with any JetID (add new test?) - 14.Dec.2018 @nordicdyno 142 jetID := jet.ZeroJetID 143 // Mock N1: LR mock do nothing 144 lrMock := testutils.NewLogicRunnerMock(s.T()) 145 lrMock.OnPulseMock.Return(nil) 146 147 // Mock N2: we are light material 148 nodeMock := network.NewNodeMock(s.T()) 149 nodeMock.RoleMock.Return(core.StaticRoleLightMaterial) 150 nodeMock.IDMock.Return(core.RecordRef{}) 151 152 // Mock N3: nodenet returns mocked node (above) 153 // and add stub for GetActiveNodes 154 nodenetMock := network.NewNodeNetworkMock(s.T()) 155 nodenetMock.GetWorkingNodesMock.Return(nil) 156 nodenetMock.GetOriginMock.Return(nodeMock) 157 158 // Mock N4: message bus for Send method 159 busMock := testutils.NewMessageBusMock(s.T()) 160 busMock.OnPulseFunc = func(context.Context, core.Pulse) error { 161 return nil 162 } 163 164 // Mock5: RecentIndexStorageMock and PendingStorageMock 165 recentMock := recentstorage.NewRecentIndexStorageMock(s.T()) 166 recentMock.GetObjectsMock.Return(nil) 167 recentMock.AddObjectMock.Return() 168 recentMock.DecreaseIndexTTLMock.Return([]core.RecordID{}) 169 recentMock.FilterNotExistWithLockMock.Return() 170 171 pendingStorageMock := recentstorage.NewPendingStorageMock(s.T()) 172 pendingStorageMock.GetRequestsMock.Return(map[core.RecordID]recentstorage.PendingObjectContext{}) 173 174 // Mock6: JetCoordinatorMock 175 jcMock := testutils.NewJetCoordinatorMock(s.T()) 176 jcMock.LightExecutorForJetMock.Return(&core.RecordRef{}, nil) 177 jcMock.MeMock.Return(core.RecordRef{}) 178 179 // Mock N7: GIL mock 180 gilMock := testutils.NewGlobalInsolarLockMock(s.T()) 181 gilMock.AcquireFunc = func(context.Context) {} 182 gilMock.ReleaseFunc = func(context.Context) {} 183 184 // Mock N8: Active List Swapper mock 185 alsMock := testutils.NewActiveListSwapperMock(s.T()) 186 alsMock.MoveSyncToActiveFunc = func(context.Context) error { return nil } 187 188 // Mock N9: Crypto things mock 189 cryptoServiceMock := testutils.NewCryptographyServiceMock(s.T()) 190 cryptoServiceMock.SignFunc = func(p []byte) (r *core.Signature, r1 error) { 191 signature := core.SignatureFromBytes(nil) 192 return &signature, nil 193 } 194 cryptoScheme := testutils.NewPlatformCryptographyScheme() 195 196 // mock bus.Mock method, store synced records, and calls count with HeavyRecord 197 var statMutex sync.Mutex 198 var synckeys []key 199 var syncsended int32 200 type messageStat struct { 201 size int 202 keys []key 203 } 204 syncmessagesPerMessage := map[int32]*messageStat{} 205 var bussendfailed int32 206 busMock.SendFunc = func(ctx context.Context, msg core.Message, ops *core.MessageSendOptions) (core.Reply, error) { 207 // fmt.Printf("got msg: %T (%s)\n", msg, msg.Type()) 208 heavymsg, ok := msg.(*message.HeavyPayload) 209 if ok { 210 if withretry && atomic.AddInt32(&bussendfailed, 1) < 2 { 211 return &reply.HeavyError{ 212 SubType: reply.ErrHeavySyncInProgress, 213 Message: "retryable error", 214 }, nil 215 } 216 217 syncsendedNewVal := atomic.AddInt32(&syncsended, 1) 218 var size int 219 var keys []key 220 221 for _, rec := range heavymsg.Records { 222 keys = append(keys, rec.K) 223 size += len(rec.K) + len(rec.V) 224 } 225 226 statMutex.Lock() 227 synckeys = append(synckeys, keys...) 228 syncmessagesPerMessage[syncsendedNewVal] = &messageStat{ 229 size: size, 230 keys: keys, 231 } 232 statMutex.Unlock() 233 } 234 return nil, nil 235 } 236 237 // build PulseManager 238 minretry := 20 * time.Millisecond 239 kb := 1 << 10 240 pmconf := configuration.PulseManager{ 241 HeavySyncEnabled: true, 242 HeavySyncMessageLimit: 2 * kb, 243 HeavyBackoff: configuration.Backoff{ 244 Jitter: true, 245 Min: minretry, 246 Max: minretry * 2, 247 Factor: 2, 248 }, 249 SplitThreshold: 10 * 1000 * 1000, 250 } 251 pm := pulsemanager.NewPulseManager( 252 configuration.Ledger{ 253 PulseManager: pmconf, 254 LightChainLimit: 10, 255 }, 256 ) 257 pm.LR = lrMock 258 pm.NodeNet = nodenetMock 259 pm.Bus = busMock 260 pm.JetCoordinator = jcMock 261 pm.GIL = gilMock 262 pm.JetStorage = s.jetStorage 263 pm.Nodes = s.nodeAccessor 264 pm.NodeSetter = s.nodeSetter 265 pm.DBContext = s.db 266 pm.PulseTracker = s.pulseTracker 267 pm.ReplicaStorage = s.replicaStorage 268 pm.StorageCleaner = s.storageCleaner 269 pm.ObjectStorage = s.objectStorage 270 pm.DropStorage = s.dropStorage 271 272 ps := storage.NewPulseStorage() 273 ps.PulseTracker = s.pulseTracker 274 pm.PulseStorage = ps 275 276 pm.HotDataWaiter = artifactmanager.NewHotDataWaiterConcrete() 277 278 providerMock := recentstorage.NewProviderMock(s.T()) 279 providerMock.GetIndexStorageMock.Return(recentMock) 280 providerMock.GetPendingStorageMock.Return(pendingStorageMock) 281 providerMock.CloneIndexStorageMock.Return() 282 providerMock.ClonePendingStorageMock.Return() 283 providerMock.RemovePendingStorageMock.Return() 284 providerMock.DecreaseIndexesTTLMock.Return(map[core.RecordID][]core.RecordID{}) 285 pm.RecentStorageProvider = providerMock 286 287 pm.ActiveListSwapper = alsMock 288 pm.CryptographyService = cryptoServiceMock 289 pm.PlatformCryptographyScheme = cryptoScheme 290 291 // Actial test logic 292 // start PulseManager 293 err := pm.Start(s.ctx) 294 assert.NoError(s.T(), err) 295 296 // store last pulse as light material and set next one 297 lastpulse := core.FirstPulseNumber + 1 298 err = setpulse(s.ctx, pm, lastpulse) 299 require.NoError(s.T(), err) 300 301 for i := 0; i < 2; i++ { 302 // fmt.Printf("%v: call addRecords for pulse %v\n", t.Name(), lastpulse) 303 addRecords(s.ctx, s.T(), s.objectStorage, jetID, core.PulseNumber(lastpulse+i)) 304 } 305 306 fmt.Println("Case1: sync after db fill and with new received pulses") 307 for i := 0; i < 2; i++ { 308 lastpulse++ 309 err = setpulse(s.ctx, pm, lastpulse) 310 require.NoError(s.T(), err) 311 } 312 313 fmt.Println("Case2: sync during db fill") 314 for i := 0; i < 2; i++ { 315 // fill DB with records, indexes (TODO: add blobs) 316 addRecords(s.ctx, s.T(), s.objectStorage, jetID, core.PulseNumber(lastpulse)) 317 318 lastpulse++ 319 err = setpulse(s.ctx, pm, lastpulse) 320 require.NoError(s.T(), err) 321 } 322 // set last pulse 323 lastpulse++ 324 err = setpulse(s.ctx, pm, lastpulse) 325 require.NoError(s.T(), err) 326 327 // give sync chance to complete and start sync loop again 328 time.Sleep(2 * minretry) 329 330 err = pm.Stop(s.ctx) 331 assert.NoError(s.T(), err) 332 333 synckeys = uniqkeys(sortkeys(synckeys)) 334 335 recs := getallkeys(s.db.GetBadgerDB()) 336 recs = filterkeys(recs, func(k key) bool { 337 return storage.Key(k).PulseNumber() != 0 338 }) 339 340 require.Equal(s.T(), len(recs), len(synckeys), "synced keys count are the same as records count in storage") 341 assert.Equal(s.T(), recs, synckeys, "synced keys are the same as records in storage") 342 } 343 344 func setpulse(ctx context.Context, pm core.PulseManager, pulsenum int) error { 345 return pm.Set(ctx, core.Pulse{PulseNumber: core.PulseNumber(pulsenum)}, true) 346 } 347 348 func addRecords( 349 ctx context.Context, 350 t *testing.T, 351 objectStorage storage.ObjectStorage, 352 jetID core.RecordID, 353 pn core.PulseNumber, 354 ) { 355 // set record 356 parentID, err := objectStorage.SetRecord( 357 ctx, 358 jetID, 359 pn, 360 &record.ObjectActivateRecord{ 361 SideEffectRecord: record.SideEffectRecord{ 362 Domain: testutils.RandomRef(), 363 }, 364 }, 365 ) 366 require.NoError(t, err) 367 368 _, err = objectStorage.SetBlob(ctx, jetID, pn, []byte("100500")) 369 require.NoError(t, err) 370 371 // set index of record 372 err = objectStorage.SetObjectIndex(ctx, jetID, parentID, &index.ObjectLifeline{ 373 LatestState: parentID, 374 }) 375 require.NoError(t, err) 376 return 377 } 378 379 var ( 380 scopeIDLifeline = byte(1) 381 scopeIDRecord = byte(2) 382 scopeIDJetDrop = byte(3) 383 scopeIDBlob = byte(7) 384 ) 385 386 type key []byte 387 388 func (k key) String() string { 389 return storage.Key(k).String() 390 } 391 392 func getallkeys(db *badger.DB) (records []key) { 393 txn := db.NewTransaction(true) 394 defer txn.Discard() 395 396 it := txn.NewIterator(badger.DefaultIteratorOptions) 397 defer it.Close() 398 for it.Rewind(); it.Valid(); it.Next() { 399 item := it.Item() 400 k := item.KeyCopy(nil) 401 if storage.Key(k).PulseNumber() == 0 { 402 continue 403 } 404 switch k[0] { 405 case 406 scopeIDRecord, 407 scopeIDJetDrop, 408 scopeIDLifeline, 409 scopeIDBlob: 410 records = append(records, k) 411 } 412 } 413 return 414 } 415 416 func printkeys(keys []key, prefix string) { 417 for _, k := range keys { 418 sk := storage.Key(k) 419 fmt.Printf("%v%v (%v)\n", prefix, sk, sk.PulseNumber()) 420 } 421 } 422 423 func filterkeys(keys []key, check func(key) bool) (keyout []key) { 424 for _, k := range keys { 425 if check(k) { 426 keyout = append(keyout, k) 427 } 428 } 429 return 430 } 431 432 func uniqkeys(keys []key) (keyout []key) { 433 uniq := map[string]bool{} 434 for _, k := range keys { 435 if uniq[string(k)] { 436 continue 437 } 438 uniq[string(k)] = true 439 keyout = append(keyout, k) 440 } 441 return 442 } 443 444 func sortkeys(keys []key) []key { 445 sort.Slice(keys, func(i, j int) bool { 446 return bytes.Compare(keys[i], keys[j]) < 0 447 }) 448 return keys 449 }