github.com/status-im/status-go@v1.1.0/services/wallet/activity/session.go (about) 1 package activity 2 3 import ( 4 "context" 5 "errors" 6 "strconv" 7 "time" 8 9 eth "github.com/ethereum/go-ethereum/common" 10 "github.com/ethereum/go-ethereum/event" 11 "github.com/ethereum/go-ethereum/log" 12 "github.com/status-im/status-go/services/wallet/async" 13 "github.com/status-im/status-go/services/wallet/common" 14 "github.com/status-im/status-go/services/wallet/transfer" 15 "github.com/status-im/status-go/services/wallet/walletevent" 16 "github.com/status-im/status-go/transactions" 17 ) 18 19 const nilStr = "nil" 20 21 type EntryIdentity struct { 22 payloadType PayloadType 23 transaction *transfer.TransactionIdentity 24 id common.MultiTransactionIDType 25 } 26 27 func (e EntryIdentity) same(a EntryIdentity) bool { 28 return a.payloadType == e.payloadType && 29 ((a.transaction == nil && e.transaction == nil) || 30 (a.transaction.ChainID == e.transaction.ChainID && 31 a.transaction.Hash == e.transaction.Hash && 32 a.transaction.Address == e.transaction.Address)) && 33 a.id == e.id 34 } 35 36 func (e EntryIdentity) key() string { 37 txID := nilStr 38 if e.transaction != nil { 39 txID = strconv.FormatUint(uint64(e.transaction.ChainID), 10) + e.transaction.Hash.Hex() + e.transaction.Address.Hex() 40 } 41 return strconv.Itoa(e.payloadType) + txID + strconv.FormatInt(int64(e.id), 16) 42 } 43 44 type SessionID int32 45 46 // Session stores state related to a filter session 47 // The user happy flow is: 48 // 1. StartFilterSession to get a new SessionID and client be notified by the current state 49 // 2. GetMoreForFilterSession anytime to get more entries after the first page 50 // 3. UpdateFilterForSession to update the filter and get the new state or clean the filter and get the newer entries 51 // 4. ResetFilterSession in case client receives SessionUpdate with HasNewOnTop = true to get the latest state 52 // 5. StopFilterSession to stop the session when no used (user changed from activity screens or changed addresses and chains) 53 type Session struct { 54 id SessionID 55 56 // Filter info 57 // 58 addresses []eth.Address 59 chainIDs []common.ChainID 60 filter Filter 61 62 // model is a mirror of the data model presentation has (sent by EventActivityFilteringDone) 63 model []EntryIdentity 64 // noFilterModel is a mirror of the data model presentation has when filter is empty 65 noFilterModel map[string]EntryIdentity 66 // new holds the new entries until user requests update by calling ResetFilterSession 67 new []EntryIdentity 68 } 69 70 type EntryUpdate struct { 71 Pos int `json:"pos"` 72 Entry *Entry `json:"entry"` 73 } 74 75 // SessionUpdate payload for EventActivitySessionUpdated 76 type SessionUpdate struct { 77 HasNewOnTop *bool `json:"hasNewOnTop,omitempty"` 78 New []*EntryUpdate `json:"new,omitempty"` 79 Removed []EntryIdentity `json:"removed,omitempty"` 80 } 81 82 type fullFilterParams struct { 83 sessionID SessionID 84 addresses []eth.Address 85 chainIDs []common.ChainID 86 filter Filter 87 } 88 89 func (s *Service) internalFilter(f fullFilterParams, offset int, count int, processResults func(entries []Entry) (offsetOverride int)) { 90 s.scheduler.Enqueue(int32(f.sessionID), filterTask, func(ctx context.Context) (interface{}, error) { 91 allAddresses := s.areAllAddresses(f.addresses) 92 activities, err := getActivityEntries(ctx, s.getDeps(), f.addresses, allAddresses, f.chainIDs, f.filter, offset, count) 93 return activities, err 94 }, func(result interface{}, taskType async.TaskType, err error) { 95 res := FilterResponse{ 96 ErrorCode: ErrorCodeFailed, 97 } 98 99 if errors.Is(err, context.Canceled) || errors.Is(err, async.ErrTaskOverwritten) { 100 res.ErrorCode = ErrorCodeTaskCanceled 101 } else if err == nil { 102 activities := result.([]Entry) 103 res.Activities = activities 104 res.HasMore = len(activities) == count 105 res.ErrorCode = ErrorCodeSuccess 106 107 res.Offset = processResults(activities) 108 } 109 110 int32SessionID := int32(f.sessionID) 111 sendResponseEvent(s.eventFeed, &int32SessionID, EventActivityFilteringDone, res, err) 112 113 s.getActivityDetailsAsync(int32SessionID, res.Activities) 114 }) 115 } 116 117 // mirrorIdentities for update use 118 func mirrorIdentities(entries []Entry) []EntryIdentity { 119 model := make([]EntryIdentity, 0, len(entries)) 120 for _, a := range entries { 121 model = append(model, EntryIdentity{ 122 payloadType: a.payloadType, 123 transaction: a.transaction, 124 id: a.id, 125 }) 126 } 127 return model 128 } 129 130 func (s *Service) internalFilterForSession(session *Session, firstPageCount int) { 131 s.internalFilter( 132 fullFilterParams{ 133 sessionID: session.id, 134 addresses: session.addresses, 135 chainIDs: session.chainIDs, 136 filter: session.filter, 137 }, 138 0, 139 firstPageCount, 140 func(entries []Entry) (offset int) { 141 s.sessionsRWMutex.Lock() 142 defer s.sessionsRWMutex.Unlock() 143 144 session.model = mirrorIdentities(entries) 145 146 return 0 147 }, 148 ) 149 } 150 151 func (s *Service) StartFilterSession(addresses []eth.Address, chainIDs []common.ChainID, filter Filter, firstPageCount int) SessionID { 152 sessionID := s.nextSessionID() 153 154 session := &Session{ 155 id: sessionID, 156 157 addresses: addresses, 158 chainIDs: chainIDs, 159 filter: filter, 160 161 model: make([]EntryIdentity, 0, firstPageCount), 162 } 163 164 s.sessionsRWMutex.Lock() 165 subscribeToEvents := len(s.sessions) == 0 166 167 s.sessions[sessionID] = session 168 169 if subscribeToEvents { 170 s.subscribeToEvents() 171 } 172 s.sessionsRWMutex.Unlock() 173 174 s.internalFilterForSession(session, firstPageCount) 175 176 return sessionID 177 } 178 179 // UpdateFilterForSession is to be called for updating the filter of a specific session 180 // After calling this method to set a filter all the incoming changes will be reported with 181 // Entry.isNew = true when filter is reset to empty 182 func (s *Service) UpdateFilterForSession(id SessionID, filter Filter, firstPageCount int) error { 183 s.sessionsRWMutex.RLock() 184 session, found := s.sessions[id] 185 if !found { 186 s.sessionsRWMutex.RUnlock() 187 return errors.New("session not found") 188 } 189 190 prevFilterEmpty := session.filter.IsEmpty() 191 newFilerEmpty := filter.IsEmpty() 192 s.sessionsRWMutex.RUnlock() 193 194 s.sessionsRWMutex.Lock() 195 196 session.new = nil 197 198 session.filter = filter 199 200 if prevFilterEmpty && !newFilerEmpty { 201 // Session is moving from empty to non-empty filter 202 // Take a snapshot of the current model 203 session.noFilterModel = entryIdsToMap(session.model) 204 205 session.model = make([]EntryIdentity, 0, firstPageCount) 206 207 // In this case there is nothing to flag so we request the first page 208 s.internalFilterForSession(session, firstPageCount) 209 } else if !prevFilterEmpty && newFilerEmpty { 210 // Session is moving from non-empty to empty filter 211 // In this case we need to flag all the new entries that are not in the noFilterModel 212 s.internalFilter( 213 fullFilterParams{ 214 sessionID: session.id, 215 addresses: session.addresses, 216 chainIDs: session.chainIDs, 217 filter: session.filter, 218 }, 219 0, 220 firstPageCount, 221 func(entries []Entry) (offset int) { 222 s.sessionsRWMutex.Lock() 223 defer s.sessionsRWMutex.Unlock() 224 225 // Mark new entries 226 for i, a := range entries { 227 _, found := session.noFilterModel[a.getIdentity().key()] 228 entries[i].isNew = !found 229 } 230 231 // Mirror identities for update use 232 session.model = mirrorIdentities(entries) 233 session.noFilterModel = nil 234 return 0 235 }, 236 ) 237 } else { 238 // Else act as a normal filter update 239 s.internalFilterForSession(session, firstPageCount) 240 } 241 s.sessionsRWMutex.Unlock() 242 243 return nil 244 } 245 246 // ResetFilterSession is to be called when SessionUpdate.HasNewOnTop == true to 247 // update client with the latest state including new on top entries 248 func (s *Service) ResetFilterSession(id SessionID, firstPageCount int) error { 249 session, found := s.sessions[id] 250 if !found { 251 return errors.New("session not found") 252 } 253 254 s.internalFilter( 255 fullFilterParams{ 256 sessionID: id, 257 addresses: session.addresses, 258 chainIDs: session.chainIDs, 259 filter: session.filter, 260 }, 261 0, 262 firstPageCount, 263 func(entries []Entry) (offset int) { 264 s.sessionsRWMutex.Lock() 265 defer s.sessionsRWMutex.Unlock() 266 267 // Mark new entries 268 newMap := entryIdsToMap(session.new) 269 for i, a := range entries { 270 _, isNew := newMap[a.getIdentity().key()] 271 entries[i].isNew = isNew 272 } 273 session.new = nil 274 275 if session.noFilterModel != nil { 276 // Add reported new entries to mark them as seen 277 for _, a := range newMap { 278 session.noFilterModel[a.key()] = a 279 } 280 } 281 282 // Mirror client identities for checking updates 283 session.model = mirrorIdentities(entries) 284 285 return 0 286 }, 287 ) 288 return nil 289 } 290 291 func (s *Service) GetMoreForFilterSession(id SessionID, pageCount int) error { 292 session, found := s.sessions[id] 293 if !found { 294 return errors.New("session not found") 295 } 296 297 prevModelLen := len(session.model) 298 s.internalFilter( 299 fullFilterParams{ 300 sessionID: id, 301 addresses: session.addresses, 302 chainIDs: session.chainIDs, 303 filter: session.filter, 304 }, 305 prevModelLen+len(session.new), 306 pageCount, 307 func(entries []Entry) (offset int) { 308 s.sessionsRWMutex.Lock() 309 defer s.sessionsRWMutex.Unlock() 310 311 // Mirror client identities for checking updates 312 for _, a := range entries { 313 session.model = append(session.model, EntryIdentity{ 314 payloadType: a.payloadType, 315 transaction: a.transaction, 316 id: a.id, 317 }) 318 } 319 320 // Overwrite the offset to account for new entries 321 return prevModelLen 322 }, 323 ) 324 return nil 325 } 326 327 // subscribeToEvents should be called with sessionsRWMutex locked for writing 328 func (s *Service) subscribeToEvents() { 329 s.ch = make(chan walletevent.Event, 100) 330 s.subscriptions = s.eventFeed.Subscribe(s.ch) 331 go s.processEvents() 332 } 333 334 // processEvents runs only if more than one session is active 335 func (s *Service) processEvents() { 336 eventCount := 0 337 lastUpdate := time.Now().UnixMilli() 338 for event := range s.ch { 339 if event.Type == transactions.EventPendingTransactionUpdate || 340 event.Type == transactions.EventPendingTransactionStatusChanged || 341 event.Type == transfer.EventNewTransfers { 342 eventCount++ 343 } 344 // debounce events updates 345 if eventCount > 0 && 346 (time.Duration(time.Now().UnixMilli()-lastUpdate)*time.Millisecond) >= s.debounceDuration { 347 s.detectNew(eventCount) 348 eventCount = 0 349 lastUpdate = time.Now().UnixMilli() 350 } 351 } 352 } 353 354 func (s *Service) detectNew(changeCount int) { 355 for sessionID := range s.sessions { 356 session := s.sessions[sessionID] 357 358 fetchLen := len(session.model) + changeCount 359 allAddresses := s.areAllAddresses(session.addresses) 360 activities, err := getActivityEntries(context.Background(), s.getDeps(), session.addresses, allAddresses, session.chainIDs, session.filter, 0, fetchLen) 361 if err != nil { 362 log.Error("Error getting activity entries", "error", err) 363 continue 364 } 365 366 s.sessionsRWMutex.RLock() 367 allData := append(session.new, session.model...) 368 new, _ /*removed*/ := findUpdates(allData, activities) 369 s.sessionsRWMutex.RUnlock() 370 371 s.sessionsRWMutex.Lock() 372 lastProcessed := -1 373 onTop := true 374 var mixed []*EntryUpdate 375 for i, idRes := range new { 376 // Detect on top 377 if onTop { 378 // mixedIdentityResult.newPos includes session.new, therefore compensate for it 379 if ((idRes.newPos - len(session.new)) - lastProcessed) > 1 { 380 // From now on the events are not on top and continuous but mixed between existing entries 381 onTop = false 382 mixed = make([]*EntryUpdate, 0, len(new)-i) 383 } 384 lastProcessed = idRes.newPos 385 } 386 387 if onTop { 388 if session.new == nil { 389 session.new = make([]EntryIdentity, 0, len(new)) 390 } 391 session.new = append(session.new, idRes.id) 392 } else { 393 modelPos := idRes.newPos - len(session.new) 394 entry := activities[idRes.newPos] 395 entry.isNew = true 396 mixed = append(mixed, &EntryUpdate{ 397 Pos: modelPos, 398 Entry: &entry, 399 }) 400 // Insert in session model at modelPos index 401 session.model = append(session.model[:modelPos], append([]EntryIdentity{{payloadType: entry.payloadType, transaction: entry.transaction, id: entry.id}}, session.model[modelPos:]...)...) 402 } 403 } 404 405 s.sessionsRWMutex.Unlock() 406 407 if len(session.new) > 0 || len(mixed) > 0 { 408 go notify(s.eventFeed, sessionID, len(session.new) > 0, mixed) 409 } 410 } 411 } 412 413 func notify(eventFeed *event.Feed, id SessionID, hasNewOnTop bool, mixed []*EntryUpdate) { 414 payload := SessionUpdate{ 415 New: mixed, 416 } 417 418 if hasNewOnTop { 419 payload.HasNewOnTop = &hasNewOnTop 420 } 421 422 sendResponseEvent(eventFeed, (*int32)(&id), EventActivitySessionUpdated, payload, nil) 423 } 424 425 // unsubscribeFromEvents should be called with sessionsRWMutex locked for writing 426 func (s *Service) unsubscribeFromEvents() { 427 s.subscriptions.Unsubscribe() 428 close(s.ch) 429 s.ch = nil 430 s.subscriptions = nil 431 } 432 433 func (s *Service) StopFilterSession(id SessionID) { 434 s.sessionsRWMutex.Lock() 435 delete(s.sessions, id) 436 if len(s.sessions) == 0 { 437 s.unsubscribeFromEvents() 438 } 439 s.sessionsRWMutex.Unlock() 440 441 // Cancel any pending or ongoing task 442 s.scheduler.Enqueue(int32(id), filterTask, func(ctx context.Context) (interface{}, error) { 443 return nil, nil 444 }, func(result interface{}, taskType async.TaskType, err error) {}) 445 } 446 447 func (s *Service) getActivityDetailsAsync(requestID int32, entries []Entry) { 448 if len(entries) == 0 { 449 return 450 } 451 452 ctx := context.Background() 453 454 go func() { 455 activityData, err := s.getActivityDetails(ctx, entries) 456 if len(activityData) != 0 { 457 sendResponseEvent(s.eventFeed, &requestID, EventActivityFilteringUpdate, activityData, err) 458 } 459 }() 460 } 461 462 type mixedIdentityResult struct { 463 newPos int 464 id EntryIdentity 465 } 466 467 func entryIdsToMap(ids []EntryIdentity) map[string]EntryIdentity { 468 idsMap := make(map[string]EntryIdentity, len(ids)) 469 for _, id := range ids { 470 idsMap[id.key()] = id 471 } 472 return idsMap 473 } 474 475 func entriesToMap(entries []Entry) map[string]Entry { 476 entryMap := make(map[string]Entry, len(entries)) 477 for _, entry := range entries { 478 updatedIdentity := entry.getIdentity() 479 entryMap[updatedIdentity.key()] = entry 480 } 481 return entryMap 482 } 483 484 // FindUpdates returns changes in updated entries compared to the identities 485 // 486 // expects identities and entries to be sorted by timestamp 487 // 488 // the returned newer are entries that are newer than the first identity 489 // the returned mixed are entries that are older than the first identity (sorted by timestamp) 490 // the returned removed are identities that are not present in the updated entries (sorted by timestamp) 491 // 492 // implementation assumes the order of each identity doesn't change from old state (identities) and new state (updated); we have either add or removed. 493 func findUpdates(identities []EntryIdentity, updated []Entry) (new []mixedIdentityResult, removed []EntryIdentity) { 494 if len(updated) == 0 { 495 return 496 } 497 498 idsMap := entryIdsToMap(identities) 499 updatedMap := entriesToMap(updated) 500 501 for newIndex, entry := range updated { 502 id := entry.getIdentity() 503 if _, found := idsMap[id.key()]; !found { 504 new = append(new, mixedIdentityResult{ 505 newPos: newIndex, 506 id: id, 507 }) 508 } 509 510 if len(identities) > 0 && entry.getIdentity().same(identities[len(identities)-1]) { 511 break 512 } 513 } 514 515 // Account for new entries 516 for i := 0; i < len(identities); i++ { 517 id := identities[i] 518 if _, found := updatedMap[id.key()]; !found { 519 removed = append(removed, id) 520 } 521 } 522 return 523 }