github.com/cs3org/reva/v2@v2.27.7/pkg/share/manager/jsoncs3/jsoncs3.go (about) 1 // Copyright 2018-2021 CERN 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 // 15 // In applying this license, CERN does not waive the privileges and immunities 16 // granted to it by virtue of its status as an Intergovernmental Organization 17 // or submit itself to any jurisdiction. 18 19 package jsoncs3 20 21 import ( 22 "context" 23 "strings" 24 "sync" 25 "time" 26 27 gatewayv1beta1 "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1" 28 userv1beta1 "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1" 29 rpcv1beta1 "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1" 30 collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1" 31 provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1" 32 "github.com/cs3org/reva/v2/pkg/appctx" 33 ctxpkg "github.com/cs3org/reva/v2/pkg/ctx" 34 "github.com/cs3org/reva/v2/pkg/errtypes" 35 "github.com/cs3org/reva/v2/pkg/events" 36 "github.com/cs3org/reva/v2/pkg/events/stream" 37 "github.com/cs3org/reva/v2/pkg/logger" 38 "github.com/cs3org/reva/v2/pkg/rgrpc/todo/pool" 39 "github.com/cs3org/reva/v2/pkg/share" 40 "github.com/cs3org/reva/v2/pkg/share/manager/jsoncs3/providercache" 41 "github.com/cs3org/reva/v2/pkg/share/manager/jsoncs3/receivedsharecache" 42 "github.com/cs3org/reva/v2/pkg/share/manager/jsoncs3/sharecache" 43 "github.com/cs3org/reva/v2/pkg/share/manager/jsoncs3/shareid" 44 "github.com/cs3org/reva/v2/pkg/share/manager/registry" 45 "github.com/cs3org/reva/v2/pkg/storage/utils/metadata" // nolint:staticcheck // we need the legacy package to convert V1 to V2 messages 46 "github.com/cs3org/reva/v2/pkg/storagespace" 47 "github.com/cs3org/reva/v2/pkg/utils" 48 "github.com/google/uuid" 49 "github.com/mitchellh/mapstructure" 50 "github.com/pkg/errors" 51 "go.opentelemetry.io/otel/codes" 52 "golang.org/x/sync/errgroup" 53 "google.golang.org/genproto/protobuf/field_mask" 54 "google.golang.org/protobuf/types/known/fieldmaskpb" 55 ) 56 57 /* 58 The sharded json driver splits the json file per storage space. Similar to fileids shareids are prefixed with the spaceid for easier lookup. 59 In addition to the space json the share manager keeps lists for users and groups to cache their lists of created and received shares 60 and to hold the state of received shares. 61 62 FAQ 63 Q: Why not split shares by user and have a list per user? 64 A: While shares are created by users, they are persisted as grants on a file. 65 If we persist shares by their creator/owner they would vanish if a user is deprovisioned: shares 66 in project spaces could not be managed collaboratively. 67 By splitting by space, we are in fact not only splitting by user, but more granular, per space. 68 69 70 File structure in the jsoncs3 space: 71 72 /storages/{storageid}/{spaceid.json} // contains the share information of all shares in that space 73 /users/{userid}/created.json // points to the spaces the user created shares in, including the list of shares 74 /users/{userid}/received.json // holds the accepted/pending state and mount point of received shares for users 75 /groups/{groupid}/received.json // points to the spaces the group has received shares in including the list of shares 76 77 Example: 78 ├── groups 79 │ └── group1 80 │ └── received.json 81 ├── storages 82 │ └── storageid 83 │ └── spaceid.json 84 └── users 85 ├── admin 86 │ └── created.json 87 └── einstein 88 └── received.json 89 90 Whenever a share is created, the share manager has to 91 1. update the /storages/{storageid}/{spaceid}.json file, 92 2. create /users/{userid}/created.json if it doesn't exist yet and add the space/share 93 3. create /users/{userid}/received.json or /groups/{groupid}/received.json if it doesn exist yet and add the space/share 94 95 When updating shares /storages/{storageid}/{spaceid}.json is updated accordingly. The etag is used to invalidate in-memory caches: 96 - TODO the upload is tried with an if-unmodified-since header 97 - TODO when if fails, the {spaceid}.json file is downloaded, the changes are reapplied and the upload is retried with the new etag 98 99 When updating received shares the mountpoint and state are updated in /users/{userid}/received.json (for both user and group shares). 100 101 When reading the list of received shares the /users/{userid}/received.json file and the /groups/{groupid}/received.json files are statted. 102 - if the etag changed we download the file to update the local cache 103 104 When reading the list of created shares the /users/{userid}/created.json file is statted 105 - if the etag changed we download the file to update the local cache 106 */ 107 108 // TODO implement a channel based aggregation of sharing requests: every in memory cache should read as many share updates to a space that are available and update them all in one go 109 // whenever a persist operation fails we check if we can read more shares from the channel 110 111 // name is the Tracer name used to identify this instrumentation library. 112 const tracerName = "jsoncs3" 113 114 func init() { 115 registry.Register("jsoncs3", NewDefault) 116 } 117 118 var ( 119 _registeredEvents = []events.Unmarshaller{ 120 events.SpaceDeleted{}, 121 } 122 ) 123 124 type config struct { 125 GatewayAddr string `mapstructure:"gateway_addr"` 126 MaxConcurrency int `mapstructure:"max_concurrency"` 127 ProviderAddr string `mapstructure:"provider_addr"` 128 ServiceUserID string `mapstructure:"service_user_id"` 129 ServiceUserIdp string `mapstructure:"service_user_idp"` 130 MachineAuthAPIKey string `mapstructure:"machine_auth_apikey"` 131 CacheTTL int `mapstructure:"ttl"` 132 Events EventOptions `mapstructure:"events"` 133 } 134 135 // EventOptions are the configurable options for events 136 type EventOptions struct { 137 Endpoint string `mapstructure:"natsaddress"` 138 Cluster string `mapstructure:"natsclusterid"` 139 TLSInsecure bool `mapstructure:"tlsinsecure"` 140 TLSRootCACertificate string `mapstructure:"tlsrootcacertificate"` 141 EnableTLS bool `mapstructure:"enabletls"` 142 AuthUsername string `mapstructure:"authusername"` 143 AuthPassword string `mapstructure:"authpassword"` 144 } 145 146 // Manager implements a share manager using a cs3 storage backend with local caching 147 type Manager struct { 148 sync.RWMutex 149 150 Cache providercache.Cache // holds all shares, sharded by provider id and space id 151 CreatedCache sharecache.Cache // holds the list of shares a user has created, sharded by user id 152 GroupReceivedCache sharecache.Cache // holds the list of shares a group has access to, sharded by group id 153 UserReceivedStates receivedsharecache.Cache // holds the state of shares a user has received, sharded by user id 154 155 storage metadata.Storage 156 SpaceRoot *provider.ResourceId 157 158 initialized bool 159 160 MaxConcurrency int 161 162 gatewaySelector pool.Selectable[gatewayv1beta1.GatewayAPIClient] 163 eventStream events.Stream 164 } 165 166 // NewDefault returns a new manager instance with default dependencies 167 func NewDefault(m map[string]interface{}) (share.Manager, error) { 168 c := &config{} 169 if err := mapstructure.Decode(m, c); err != nil { 170 err = errors.Wrap(err, "error creating a new manager") 171 return nil, err 172 } 173 174 s, err := metadata.NewCS3Storage(c.ProviderAddr, c.ProviderAddr, c.ServiceUserID, c.ServiceUserIdp, c.MachineAuthAPIKey) 175 if err != nil { 176 return nil, err 177 } 178 179 gatewaySelector, err := pool.GatewaySelector(c.GatewayAddr) 180 if err != nil { 181 return nil, err 182 } 183 184 var es events.Stream 185 if c.Events.Endpoint != "" { 186 es, err = stream.NatsFromConfig("jsoncs3-share-manager", false, stream.NatsConfig(c.Events)) 187 if err != nil { 188 return nil, err 189 } 190 } 191 192 return New(s, gatewaySelector, c.CacheTTL, es, c.MaxConcurrency) 193 } 194 195 // New returns a new manager instance. 196 func New(s metadata.Storage, gatewaySelector pool.Selectable[gatewayv1beta1.GatewayAPIClient], ttlSeconds int, es events.Stream, maxconcurrency int) (*Manager, error) { 197 ttl := time.Duration(ttlSeconds) * time.Second 198 199 m := &Manager{ 200 Cache: providercache.New(s, ttl), 201 CreatedCache: sharecache.New(s, "users", "created.json", ttl), 202 UserReceivedStates: receivedsharecache.New(s, ttl), 203 GroupReceivedCache: sharecache.New(s, "groups", "received.json", ttl), 204 storage: s, 205 gatewaySelector: gatewaySelector, 206 eventStream: es, 207 MaxConcurrency: maxconcurrency, 208 } 209 210 // listen for events 211 if m.eventStream != nil { 212 ch, err := events.Consume(m.eventStream, "jsoncs3sharemanager", _registeredEvents...) 213 if err != nil { 214 appctx.GetLogger(context.Background()).Error().Err(err).Msg("error consuming events") 215 } 216 go m.ProcessEvents(ch) 217 } 218 219 return m, nil 220 } 221 222 func (m *Manager) initialize(ctx context.Context) error { 223 _, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "initialize") 224 defer span.End() 225 if m.initialized { 226 span.SetStatus(codes.Ok, "already initialized") 227 return nil 228 } 229 230 m.Lock() 231 defer m.Unlock() 232 233 if m.initialized { // check if initialization happened while grabbing the lock 234 span.SetStatus(codes.Ok, "initialized while grabbing lock") 235 return nil 236 } 237 238 ctx = context.Background() 239 err := m.storage.Init(ctx, "jsoncs3-share-manager-metadata") 240 if err != nil { 241 span.RecordError(err) 242 span.SetStatus(codes.Error, err.Error()) 243 return err 244 } 245 246 err = m.storage.MakeDirIfNotExist(ctx, "storages") 247 if err != nil { 248 span.RecordError(err) 249 span.SetStatus(codes.Error, err.Error()) 250 return err 251 } 252 err = m.storage.MakeDirIfNotExist(ctx, "users") 253 if err != nil { 254 span.RecordError(err) 255 span.SetStatus(codes.Error, err.Error()) 256 return err 257 } 258 err = m.storage.MakeDirIfNotExist(ctx, "groups") 259 if err != nil { 260 span.RecordError(err) 261 span.SetStatus(codes.Error, err.Error()) 262 return err 263 } 264 265 m.initialized = true 266 span.SetStatus(codes.Ok, "initialized") 267 return nil 268 } 269 270 func (m *Manager) ProcessEvents(ch <-chan events.Event) { 271 log := logger.New() 272 for event := range ch { 273 ctx := context.Background() 274 275 if err := m.initialize(ctx); err != nil { 276 log.Error().Err(err).Msg("error initializing manager") 277 } 278 279 if ev, ok := event.Event.(events.SpaceDeleted); ok { 280 log.Debug().Msgf("space deleted event: %v", ev) 281 go func() { m.purgeSpace(ctx, ev.ID) }() 282 } 283 } 284 } 285 286 // Share creates a new share 287 func (m *Manager) Share(ctx context.Context, md *provider.ResourceInfo, g *collaboration.ShareGrant) (*collaboration.Share, error) { 288 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Share") 289 defer span.End() 290 if err := m.initialize(ctx); err != nil { 291 span.RecordError(err) 292 span.SetStatus(codes.Error, err.Error()) 293 return nil, err 294 } 295 296 user := ctxpkg.ContextMustGetUser(ctx) 297 ts := utils.TSNow() 298 299 // do not allow share to myself or the owner if share is for a user 300 // TODO: should this not already be caught at the gw level? 301 if g.Grantee.Type == provider.GranteeType_GRANTEE_TYPE_USER && 302 (utils.UserEqual(g.Grantee.GetUserId(), user.Id) || utils.UserEqual(g.Grantee.GetUserId(), md.Owner)) { 303 err := errtypes.BadRequest("jsoncs3: owner/creator and grantee are the same") 304 span.RecordError(err) 305 span.SetStatus(codes.Error, err.Error()) 306 return nil, err 307 } 308 309 // check if share already exists. 310 key := &collaboration.ShareKey{ 311 // Owner: md.Owner, owner no longer matters as it belongs to the space 312 ResourceId: md.Id, 313 Grantee: g.Grantee, 314 } 315 316 _, err := m.getByKey(ctx, key) 317 if err == nil { 318 // share already exists 319 err := errtypes.AlreadyExists(key.String()) 320 span.RecordError(err) 321 span.SetStatus(codes.Error, err.Error()) 322 return nil, err 323 } 324 325 shareID := shareid.Encode(md.GetId().GetStorageId(), md.GetId().GetSpaceId(), uuid.NewString()) 326 s := &collaboration.Share{ 327 Id: &collaboration.ShareId{ 328 OpaqueId: shareID, 329 }, 330 ResourceId: md.Id, 331 Permissions: g.Permissions, 332 Grantee: g.Grantee, 333 Expiration: g.Expiration, 334 Owner: md.Owner, 335 Creator: user.Id, 336 Ctime: ts, 337 Mtime: ts, 338 } 339 340 eg, ctx := errgroup.WithContext(ctx) 341 342 eg.Go(func() error { 343 err := m.Cache.Add(ctx, md.Id.StorageId, md.Id.SpaceId, shareID, s) 344 if err != nil { 345 span.RecordError(err) 346 span.SetStatus(codes.Error, err.Error()) 347 } 348 349 return err 350 }) 351 352 eg.Go(func() error { 353 err := m.CreatedCache.Add(ctx, s.GetCreator().GetOpaqueId(), shareID) 354 if err != nil { 355 span.RecordError(err) 356 span.SetStatus(codes.Error, err.Error()) 357 } 358 359 return err 360 }) 361 362 spaceID := md.Id.StorageId + shareid.IDDelimiter + md.Id.SpaceId 363 // set flag for grantee to have access to share 364 switch g.Grantee.Type { 365 case provider.GranteeType_GRANTEE_TYPE_USER: 366 eg.Go(func() error { 367 userid := g.Grantee.GetUserId().GetOpaqueId() 368 369 rs := &collaboration.ReceivedShare{ 370 Share: s, 371 State: collaboration.ShareState_SHARE_STATE_PENDING, 372 } 373 err := m.UserReceivedStates.Add(ctx, userid, spaceID, rs) 374 if err != nil { 375 span.RecordError(err) 376 span.SetStatus(codes.Error, err.Error()) 377 } 378 379 return err 380 }) 381 case provider.GranteeType_GRANTEE_TYPE_GROUP: 382 eg.Go(func() error { 383 groupid := g.Grantee.GetGroupId().GetOpaqueId() 384 err := m.GroupReceivedCache.Add(ctx, groupid, shareID) 385 if err != nil { 386 span.RecordError(err) 387 span.SetStatus(codes.Error, err.Error()) 388 } 389 390 return err 391 }) 392 } 393 394 if err = eg.Wait(); err != nil { 395 return nil, err 396 } 397 398 span.SetStatus(codes.Ok, "") 399 400 return s, nil 401 } 402 403 // getByID must be called in a lock-controlled block. 404 func (m *Manager) getByID(ctx context.Context, id *collaboration.ShareId) (*collaboration.Share, error) { 405 storageID, spaceID, _ := shareid.Decode(id.OpaqueId) 406 407 share, err := m.Cache.Get(ctx, storageID, spaceID, id.OpaqueId, false) 408 if err != nil { 409 return nil, err 410 } 411 if share == nil { 412 return nil, errtypes.NotFound(id.String()) 413 } 414 return share, nil 415 } 416 417 // getByKey must be called in a lock-controlled block. 418 func (m *Manager) getByKey(ctx context.Context, key *collaboration.ShareKey) (*collaboration.Share, error) { 419 spaceShares, err := m.Cache.ListSpace(ctx, key.ResourceId.StorageId, key.ResourceId.SpaceId) 420 if err != nil { 421 return nil, err 422 } 423 for _, share := range spaceShares.Shares { 424 if utils.GranteeEqual(key.Grantee, share.Grantee) && utils.ResourceIDEqual(share.ResourceId, key.ResourceId) { 425 return share, nil 426 } 427 } 428 return nil, errtypes.NotFound(key.String()) 429 } 430 431 // get must be called in a lock-controlled block. 432 func (m *Manager) get(ctx context.Context, ref *collaboration.ShareReference) (s *collaboration.Share, err error) { 433 switch { 434 case ref.GetId() != nil: 435 s, err = m.getByID(ctx, ref.GetId()) 436 case ref.GetKey() != nil: 437 s, err = m.getByKey(ctx, ref.GetKey()) 438 default: 439 err = errtypes.NotFound(ref.String()) 440 } 441 return 442 } 443 444 // GetShare gets the information for a share by the given ref. 445 func (m *Manager) GetShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.Share, error) { 446 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "GetShare") 447 defer span.End() 448 sublog := appctx.GetLogger(ctx).With().Str("id", ref.GetId().GetOpaqueId()).Str("key", ref.GetKey().String()).Str("driver", "jsoncs3").Str("handler", "GetShare").Logger() 449 if err := m.initialize(ctx); err != nil { 450 return nil, err 451 } 452 453 s, err := m.get(ctx, ref) 454 if err != nil { 455 return nil, err 456 } 457 if share.IsExpired(s) { 458 if err := m.removeShare(ctx, s, false); err != nil { 459 sublog.Error().Err(err). 460 Msg("failed to unshare expired share") 461 } 462 if err := events.Publish(ctx, m.eventStream, events.ShareExpired{ 463 ShareID: s.GetId(), 464 ShareOwner: s.GetOwner(), 465 ItemID: s.GetResourceId(), 466 ExpiredAt: time.Unix(int64(s.GetExpiration().GetSeconds()), int64(s.GetExpiration().GetNanos())), 467 GranteeUserID: s.GetGrantee().GetUserId(), 468 GranteeGroupID: s.GetGrantee().GetGroupId(), 469 }); err != nil { 470 sublog.Error().Err(err). 471 Msg("failed to publish share expired event") 472 } 473 } 474 // check if we are the creator or the grantee 475 // TODO allow manager to get shares in a space created by other users 476 user := ctxpkg.ContextMustGetUser(ctx) 477 if share.IsCreatedByUser(s, user) || share.IsGrantedToUser(s, user) { 478 return s, nil 479 } 480 481 req := &provider.StatRequest{ 482 Ref: &provider.Reference{ResourceId: s.ResourceId}, 483 FieldMask: &fieldmaskpb.FieldMask{ 484 Paths: []string{"permissions"}, 485 }, 486 } 487 client, err := m.gatewaySelector.Next() 488 if err != nil { 489 return nil, err 490 } 491 res, err := client.Stat(ctx, req) 492 if err == nil && 493 res.Status.Code == rpcv1beta1.Code_CODE_OK && 494 res.Info.PermissionSet.ListGrants { 495 return s, nil 496 } 497 498 // we return not found to not disclose information 499 return nil, errtypes.NotFound(ref.String()) 500 } 501 502 // Unshare deletes a share 503 func (m *Manager) Unshare(ctx context.Context, ref *collaboration.ShareReference) error { 504 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "Unshare") 505 defer span.End() 506 507 if err := m.initialize(ctx); err != nil { 508 return err 509 } 510 511 user := ctxpkg.ContextMustGetUser(ctx) 512 513 s, err := m.get(ctx, ref) 514 if err != nil { 515 return err 516 } 517 // TODO allow manager to unshare shares in a space created by other users 518 if !share.IsCreatedByUser(s, user) { 519 // TODO why not permission denied? 520 return errtypes.NotFound(ref.String()) 521 } 522 523 return m.removeShare(ctx, s, false) 524 } 525 526 // UpdateShare updates the mode of the given share. 527 func (m *Manager) UpdateShare(ctx context.Context, ref *collaboration.ShareReference, p *collaboration.SharePermissions, updated *collaboration.Share, fieldMask *field_mask.FieldMask) (*collaboration.Share, error) { 528 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "UpdateShare") 529 defer span.End() 530 531 if err := m.initialize(ctx); err != nil { 532 return nil, err 533 } 534 535 var toUpdate *collaboration.Share 536 537 if ref != nil { 538 var err error 539 toUpdate, err = m.get(ctx, ref) 540 if err != nil { 541 return nil, err 542 } 543 } else if updated != nil { 544 var err error 545 toUpdate, err = m.getByID(ctx, updated.Id) 546 if err != nil { 547 return nil, err 548 } 549 } 550 551 if fieldMask != nil { 552 for i := range fieldMask.Paths { 553 switch fieldMask.Paths[i] { 554 case "permissions": 555 toUpdate.Permissions = updated.Permissions 556 case "expiration": 557 toUpdate.Expiration = updated.Expiration 558 default: 559 return nil, errtypes.NotSupported("updating " + fieldMask.Paths[i] + " is not supported") 560 } 561 } 562 } 563 564 user := ctxpkg.ContextMustGetUser(ctx) 565 if !share.IsCreatedByUser(toUpdate, user) { 566 req := &provider.StatRequest{ 567 Ref: &provider.Reference{ResourceId: toUpdate.ResourceId}, 568 FieldMask: &fieldmaskpb.FieldMask{ 569 Paths: []string{"permissions"}, 570 }, 571 } 572 client, err := m.gatewaySelector.Next() 573 if err != nil { 574 return nil, err 575 } 576 res, err := client.Stat(ctx, req) 577 if err != nil || 578 res.Status.Code != rpcv1beta1.Code_CODE_OK || 579 !res.Info.PermissionSet.UpdateGrant { 580 return nil, errtypes.NotFound(ref.String()) 581 } 582 } 583 584 if p != nil { 585 toUpdate.Permissions = p 586 } 587 toUpdate.Mtime = utils.TSNow() 588 589 // Update provider cache 590 unlock := m.Cache.LockSpace(toUpdate.ResourceId.SpaceId) 591 defer unlock() 592 err := m.Cache.Persist(ctx, toUpdate.ResourceId.StorageId, toUpdate.ResourceId.SpaceId) 593 // when persisting fails 594 if _, ok := err.(errtypes.IsPreconditionFailed); ok { 595 // reupdate 596 toUpdate, err = m.get(ctx, ref) // does an implicit sync 597 if err != nil { 598 return nil, err 599 } 600 toUpdate.Permissions = p 601 toUpdate.Mtime = utils.TSNow() 602 603 // persist again 604 err = m.Cache.Persist(ctx, toUpdate.ResourceId.StorageId, toUpdate.ResourceId.SpaceId) 605 // TODO try more often? 606 } 607 if err != nil { 608 return nil, err 609 } 610 611 return toUpdate, nil 612 } 613 614 // ListShares returns the shares created by the user 615 func (m *Manager) ListShares(ctx context.Context, filters []*collaboration.Filter) ([]*collaboration.Share, error) { 616 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "ListShares") 617 defer span.End() 618 619 if err := m.initialize(ctx); err != nil { 620 return nil, err 621 } 622 623 user := ctxpkg.ContextMustGetUser(ctx) 624 625 if len(share.FilterFiltersByType(filters, collaboration.Filter_TYPE_RESOURCE_ID)) > 0 { 626 return m.listSharesByIDs(ctx, user, filters) 627 } 628 629 return m.listCreatedShares(ctx, user, filters) 630 } 631 632 func (m *Manager) listSharesByIDs(ctx context.Context, user *userv1beta1.User, filters []*collaboration.Filter) ([]*collaboration.Share, error) { 633 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "listSharesByIDs") 634 defer span.End() 635 sublog := appctx.GetLogger(ctx).With().Str("userid", user.GetId().GetOpaqueId()).Str("useridp", user.GetId().GetIdp()).Str("driver", "jsoncs3").Str("handler", "listSharesByIDs").Logger() 636 637 providerSpaces := make(map[string]map[string]struct{}) 638 for _, f := range share.FilterFiltersByType(filters, collaboration.Filter_TYPE_RESOURCE_ID) { 639 storageID := f.GetResourceId().GetStorageId() 640 spaceID := f.GetResourceId().GetSpaceId() 641 if providerSpaces[storageID] == nil { 642 providerSpaces[storageID] = make(map[string]struct{}) 643 } 644 providerSpaces[storageID][spaceID] = struct{}{} 645 } 646 647 statCache := make(map[string]struct{}) 648 var ss []*collaboration.Share 649 for providerID, spaces := range providerSpaces { 650 for spaceID := range spaces { 651 shares, err := m.Cache.ListSpace(ctx, providerID, spaceID) 652 if err != nil { 653 return nil, err 654 } 655 656 for _, s := range shares.Shares { 657 resourceID := s.GetResourceId() 658 sublog = sublog.With().Str("storageid", resourceID.GetStorageId()).Str("spaceid", resourceID.GetSpaceId()).Str("opaqueid", resourceID.GetOpaqueId()).Logger() 659 if share.IsExpired(s) { 660 if err := m.removeShare(ctx, s, false); err != nil { 661 sublog.Error().Err(err). 662 Msg("failed to unshare expired share") 663 } 664 if err := events.Publish(ctx, m.eventStream, events.ShareExpired{ 665 ShareOwner: s.GetOwner(), 666 ItemID: resourceID, 667 ExpiredAt: time.Unix(int64(s.GetExpiration().GetSeconds()), int64(s.GetExpiration().GetNanos())), 668 GranteeUserID: s.GetGrantee().GetUserId(), 669 GranteeGroupID: s.GetGrantee().GetGroupId(), 670 }); err != nil { 671 sublog.Error().Err(err). 672 Msg("failed to publish share expired event") 673 } 674 continue 675 } 676 if !share.MatchesFilters(s, filters) { 677 continue 678 } 679 680 if !(share.IsCreatedByUser(s, user) || share.IsGrantedToUser(s, user)) { 681 key := storagespace.FormatResourceID(resourceID) 682 if _, hit := statCache[key]; !hit { 683 req := &provider.StatRequest{ 684 Ref: &provider.Reference{ResourceId: resourceID}, 685 FieldMask: &fieldmaskpb.FieldMask{ 686 Paths: []string{"permissions"}, 687 }, 688 } 689 client, err := m.gatewaySelector.Next() 690 if err != nil { 691 sublog.Error().Err(err).Msg("failed to select next gateway client") 692 continue 693 } 694 res, err := client.Stat(ctx, req) 695 if err != nil { 696 sublog.Error().Err(err).Msg("failed to make stat call") 697 continue 698 } 699 if res.Status.Code != rpcv1beta1.Code_CODE_OK { 700 sublog.Debug().Str("code", res.GetStatus().GetCode().String()).Msg(res.GetStatus().GetMessage()) 701 continue 702 } 703 if !res.Info.PermissionSet.ListGrants { 704 sublog.Debug().Msg("user has no list grants permission") 705 continue 706 } 707 sublog.Debug().Msg("listing share for non participating user") 708 statCache[key] = struct{}{} 709 } 710 } 711 712 ss = append(ss, s) 713 } 714 } 715 } 716 span.SetStatus(codes.Ok, "") 717 return ss, nil 718 } 719 720 func (m *Manager) listCreatedShares(ctx context.Context, user *userv1beta1.User, filters []*collaboration.Filter) ([]*collaboration.Share, error) { 721 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "listCreatedShares") 722 defer span.End() 723 sublog := appctx.GetLogger(ctx).With().Str("userid", user.GetId().GetOpaqueId()).Str("useridp", user.GetId().GetIdp()).Str("driver", "jsoncs3").Str("handler", "listCreatedShares").Logger() 724 725 list, err := m.CreatedCache.List(ctx, user.Id.OpaqueId) 726 if err != nil { 727 span.RecordError(err) 728 span.SetStatus(codes.Error, err.Error()) 729 return nil, err 730 } 731 732 numWorkers := m.MaxConcurrency 733 if numWorkers == 0 || len(list) < numWorkers { 734 numWorkers = len(list) 735 } 736 737 type w struct { 738 ssid string 739 ids sharecache.SpaceShareIDs 740 } 741 work := make(chan w) 742 results := make(chan *collaboration.Share) 743 744 g, ctx := errgroup.WithContext(ctx) 745 746 // Distribute work 747 g.Go(func() error { 748 defer close(work) 749 for ssid, ids := range list { 750 select { 751 case work <- w{ssid, ids}: 752 case <-ctx.Done(): 753 return ctx.Err() 754 } 755 } 756 return nil 757 }) 758 // Spawn workers that'll concurrently work the queue 759 for i := 0; i < numWorkers; i++ { 760 g.Go(func() error { 761 for w := range work { 762 storageID, spaceID, _ := shareid.Decode(w.ssid) 763 // fetch all shares from space with one request 764 _, err := m.Cache.ListSpace(ctx, storageID, spaceID) 765 if err != nil { 766 sublog.Error().Err(err). 767 Str("storageid", storageID). 768 Str("spaceid", spaceID). 769 Msg("failed to list shares in space") 770 continue 771 } 772 for shareID := range w.ids.IDs { 773 s, err := m.Cache.Get(ctx, storageID, spaceID, shareID, true) 774 if err != nil || s == nil { 775 continue 776 } 777 if share.IsExpired(s) { 778 if err := m.removeShare(ctx, s, false); err != nil { 779 sublog.Error().Err(err). 780 Msg("failed to unshare expired share") 781 } 782 if err := events.Publish(ctx, m.eventStream, events.ShareExpired{ 783 ShareOwner: s.GetOwner(), 784 ItemID: s.GetResourceId(), 785 ExpiredAt: time.Unix(int64(s.GetExpiration().GetSeconds()), int64(s.GetExpiration().GetNanos())), 786 GranteeUserID: s.GetGrantee().GetUserId(), 787 GranteeGroupID: s.GetGrantee().GetGroupId(), 788 }); err != nil { 789 sublog.Error().Err(err). 790 Msg("failed to publish share expired event") 791 } 792 continue 793 } 794 if utils.UserEqual(user.GetId(), s.GetCreator()) { 795 if share.MatchesFilters(s, filters) { 796 select { 797 case results <- s: 798 case <-ctx.Done(): 799 return ctx.Err() 800 } 801 } 802 } 803 } 804 } 805 return nil 806 }) 807 } 808 809 // Wait for things to settle down, then close results chan 810 go func() { 811 _ = g.Wait() // error is checked later 812 close(results) 813 }() 814 815 ss := []*collaboration.Share{} 816 for n := range results { 817 ss = append(ss, n) 818 } 819 820 if err := g.Wait(); err != nil { 821 span.RecordError(err) 822 span.SetStatus(codes.Error, err.Error()) 823 return nil, err 824 } 825 826 span.SetStatus(codes.Ok, "") 827 return ss, nil 828 } 829 830 // ListReceivedShares returns the list of shares the user has access to. 831 func (m *Manager) ListReceivedShares(ctx context.Context, filters []*collaboration.Filter, forUser *userv1beta1.UserId) ([]*collaboration.ReceivedShare, error) { 832 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "ListReceivedShares") 833 defer span.End() 834 sublog := appctx.GetLogger(ctx).With().Str("driver", "jsoncs3").Str("handler", "ListReceivedShares").Logger() 835 836 if err := m.initialize(ctx); err != nil { 837 return nil, err 838 } 839 840 user := ctxpkg.ContextMustGetUser(ctx) 841 if user.GetId().GetType() == userv1beta1.UserType_USER_TYPE_SERVICE { 842 client, err := m.gatewaySelector.Next() 843 if err != nil { 844 return nil, err 845 } 846 u, err := utils.GetUser(forUser, client) 847 if err != nil { 848 return nil, errtypes.BadRequest("user not found") 849 } 850 user = u 851 } 852 853 ssids := map[string]*receivedsharecache.Space{} 854 855 // first collect all spaceids the user has access to as a group member 856 for _, group := range user.Groups { 857 list, err := m.GroupReceivedCache.List(ctx, group) 858 if err != nil { 859 continue // ignore error, cache will be updated on next read 860 } 861 for ssid, spaceShareIDs := range list { 862 // add a pending entry, the state will be updated 863 // when reading the received shares below if they have already been accepted or denied 864 var rs *receivedsharecache.Space 865 var ok bool 866 if rs, ok = ssids[ssid]; !ok { 867 rs = &receivedsharecache.Space{ 868 States: make(map[string]*receivedsharecache.State, len(spaceShareIDs.IDs)), 869 } 870 ssids[ssid] = rs 871 } 872 873 for shareid := range spaceShareIDs.IDs { 874 rs.States[shareid] = &receivedsharecache.State{ 875 State: collaboration.ShareState_SHARE_STATE_PENDING, 876 } 877 } 878 } 879 } 880 881 // add all spaces the user has receved shares for, this includes mount points and share state for groups 882 spaces, err := m.UserReceivedStates.List(ctx, user.Id.OpaqueId) 883 if err != nil { 884 return nil, err 885 } 886 for ssid, rspace := range spaces { 887 if rs, ok := ssids[ssid]; ok { 888 for shareid, state := range rspace.States { 889 // overwrite state 890 rs.States[shareid] = state 891 } 892 } else { 893 ssids[ssid] = rspace 894 } 895 } 896 897 numWorkers := m.MaxConcurrency 898 if numWorkers == 0 || len(ssids) < numWorkers { 899 numWorkers = len(ssids) 900 } 901 902 type w struct { 903 ssid string 904 rspace *receivedsharecache.Space 905 } 906 work := make(chan w) 907 results := make(chan *collaboration.ReceivedShare) 908 909 g, ctx := errgroup.WithContext(ctx) 910 911 // Distribute work 912 g.Go(func() error { 913 defer close(work) 914 for ssid, rspace := range ssids { 915 select { 916 case work <- w{ssid, rspace}: 917 case <-ctx.Done(): 918 return ctx.Err() 919 } 920 } 921 return nil 922 }) 923 924 // Spawn workers that'll concurrently work the queue 925 for i := 0; i < numWorkers; i++ { 926 g.Go(func() error { 927 for w := range work { 928 storageID, spaceID, _ := shareid.Decode(w.ssid) 929 sublogr := sublog.With().Str("storageid", storageID).Str("spaceid", spaceID).Logger() 930 // fetch all shares from space with one request 931 _, err := m.Cache.ListSpace(ctx, storageID, spaceID) 932 if err != nil { 933 sublogr.Error().Err(err). 934 Msg("failed to list shares in space") 935 continue 936 } 937 for shareID, state := range w.rspace.States { 938 s, err := m.Cache.Get(ctx, storageID, spaceID, shareID, true) 939 if err != nil { 940 sublogr.Error().Err(err).Msg("could not retrieve share") 941 continue 942 } 943 if s == nil { 944 sublogr.Warn().Str("shareid", shareID).Msg("share not found. cleaning up") 945 _ = m.UserReceivedStates.Remove(ctx, user.Id.OpaqueId, w.ssid, shareID) 946 continue 947 } 948 sublogr = sublogr.With().Str("shareid", shareID).Logger() 949 if share.IsExpired(s) { 950 if err := m.removeShare(ctx, s, false); err != nil { 951 sublogr.Error().Err(err). 952 Msg("failed to unshare expired share") 953 } 954 if err := events.Publish(ctx, m.eventStream, events.ShareExpired{ 955 ShareOwner: s.GetOwner(), 956 ItemID: s.GetResourceId(), 957 ExpiredAt: time.Unix(int64(s.GetExpiration().GetSeconds()), int64(s.GetExpiration().GetNanos())), 958 GranteeUserID: s.GetGrantee().GetUserId(), 959 GranteeGroupID: s.GetGrantee().GetGroupId(), 960 }); err != nil { 961 sublogr.Error().Err(err). 962 Msg("failed to publish share expired event") 963 } 964 continue 965 } 966 967 if share.IsGrantedToUser(s, user) { 968 if share.MatchesFiltersWithState(s, state.State, filters) { 969 rs := &collaboration.ReceivedShare{ 970 Share: s, 971 State: state.State, 972 MountPoint: state.MountPoint, 973 Hidden: state.Hidden, 974 } 975 select { 976 case results <- rs: 977 case <-ctx.Done(): 978 return ctx.Err() 979 } 980 } 981 } 982 } 983 } 984 return nil 985 }) 986 } 987 988 // Wait for things to settle down, then close results chan 989 go func() { 990 _ = g.Wait() // error is checked later 991 close(results) 992 }() 993 994 rss := []*collaboration.ReceivedShare{} 995 for n := range results { 996 rss = append(rss, n) 997 } 998 999 if err := g.Wait(); err != nil { 1000 span.RecordError(err) 1001 span.SetStatus(codes.Error, err.Error()) 1002 return nil, err 1003 } 1004 1005 span.SetStatus(codes.Ok, "") 1006 return rss, nil 1007 } 1008 1009 // convert must be called in a lock-controlled block. 1010 func (m *Manager) convert(ctx context.Context, userID string, s *collaboration.Share) *collaboration.ReceivedShare { 1011 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "convert") 1012 defer span.End() 1013 1014 rs := &collaboration.ReceivedShare{ 1015 Share: s, 1016 State: collaboration.ShareState_SHARE_STATE_PENDING, 1017 } 1018 1019 storageID, spaceID, _ := shareid.Decode(s.Id.OpaqueId) 1020 1021 state, err := m.UserReceivedStates.Get(ctx, userID, storageID+shareid.IDDelimiter+spaceID, s.Id.GetOpaqueId()) 1022 if err == nil && state != nil { 1023 rs.State = state.State 1024 rs.MountPoint = state.MountPoint 1025 rs.Hidden = state.Hidden 1026 } 1027 return rs 1028 } 1029 1030 // GetReceivedShare returns the information for a received share. 1031 func (m *Manager) GetReceivedShare(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.ReceivedShare, error) { 1032 if err := m.initialize(ctx); err != nil { 1033 return nil, err 1034 } 1035 1036 return m.getReceived(ctx, ref) 1037 } 1038 1039 func (m *Manager) getReceived(ctx context.Context, ref *collaboration.ShareReference) (*collaboration.ReceivedShare, error) { 1040 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "getReceived") 1041 defer span.End() 1042 sublog := appctx.GetLogger(ctx).With().Str("id", ref.GetId().GetOpaqueId()).Str("key", ref.GetKey().String()).Str("driver", "jsoncs3").Str("handler", "getReceived").Logger() 1043 1044 s, err := m.get(ctx, ref) 1045 if err != nil { 1046 return nil, err 1047 } 1048 user := ctxpkg.ContextMustGetUser(ctx) 1049 if user.GetId().GetType() != userv1beta1.UserType_USER_TYPE_SERVICE && !share.IsGrantedToUser(s, user) { 1050 return nil, errtypes.NotFound(ref.String()) 1051 } 1052 if share.IsExpired(s) { 1053 if err := m.removeShare(ctx, s, false); err != nil { 1054 sublog.Error().Err(err). 1055 Msg("failed to unshare expired share") 1056 } 1057 if err := events.Publish(ctx, m.eventStream, events.ShareExpired{ 1058 ShareOwner: s.GetOwner(), 1059 ItemID: s.GetResourceId(), 1060 ExpiredAt: time.Unix(int64(s.GetExpiration().GetSeconds()), int64(s.GetExpiration().GetNanos())), 1061 GranteeUserID: s.GetGrantee().GetUserId(), 1062 GranteeGroupID: s.GetGrantee().GetGroupId(), 1063 }); err != nil { 1064 sublog.Error().Err(err). 1065 Msg("failed to publish share expired event") 1066 } 1067 } 1068 return m.convert(ctx, user.Id.GetOpaqueId(), s), nil 1069 } 1070 1071 // UpdateReceivedShare updates the received share with share state. 1072 func (m *Manager) UpdateReceivedShare(ctx context.Context, receivedShare *collaboration.ReceivedShare, fieldMask *field_mask.FieldMask, forUser *userv1beta1.UserId) (*collaboration.ReceivedShare, error) { 1073 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "UpdateReceivedShare") 1074 defer span.End() 1075 1076 if err := m.initialize(ctx); err != nil { 1077 return nil, err 1078 } 1079 1080 rs, err := m.getReceived(ctx, &collaboration.ShareReference{Spec: &collaboration.ShareReference_Id{Id: receivedShare.Share.Id}}) 1081 if err != nil { 1082 return nil, err 1083 } 1084 1085 for i := range fieldMask.Paths { 1086 switch fieldMask.Paths[i] { 1087 case "state": 1088 rs.State = receivedShare.State 1089 case "mount_point": 1090 rs.MountPoint = receivedShare.MountPoint 1091 case "hidden": 1092 rs.Hidden = receivedShare.Hidden 1093 default: 1094 return nil, errtypes.NotSupported("updating " + fieldMask.Paths[i] + " is not supported") 1095 } 1096 } 1097 1098 // write back 1099 u := ctxpkg.ContextMustGetUser(ctx) 1100 uid := u.GetId().GetOpaqueId() 1101 if u.GetId().GetType() == userv1beta1.UserType_USER_TYPE_SERVICE { 1102 uid = forUser.GetOpaqueId() 1103 } 1104 1105 err = m.UserReceivedStates.Add(ctx, uid, rs.Share.ResourceId.StorageId+shareid.IDDelimiter+rs.Share.ResourceId.SpaceId, rs) 1106 if err != nil { 1107 return nil, err 1108 } 1109 1110 return rs, nil 1111 } 1112 1113 func shareIsRoutable(share *collaboration.Share) bool { 1114 return strings.Contains(share.Id.OpaqueId, shareid.IDDelimiter) 1115 } 1116 1117 func updateShareID(share *collaboration.Share) { 1118 share.Id.OpaqueId = shareid.Encode(share.ResourceId.StorageId, share.ResourceId.SpaceId, share.Id.OpaqueId) 1119 } 1120 1121 // Load imports shares and received shares from channels (e.g. during migration) 1122 func (m *Manager) Load(ctx context.Context, shareChan <-chan *collaboration.Share, receivedShareChan <-chan share.ReceivedShareWithUser) error { 1123 log := appctx.GetLogger(ctx) 1124 if err := m.initialize(ctx); err != nil { 1125 return err 1126 } 1127 1128 var wg sync.WaitGroup 1129 wg.Add(2) 1130 go func() { 1131 for s := range shareChan { 1132 if s == nil { 1133 continue 1134 } 1135 if !shareIsRoutable(s) { 1136 updateShareID(s) 1137 } 1138 if err := m.Cache.Add(context.Background(), s.GetResourceId().GetStorageId(), s.GetResourceId().GetSpaceId(), s.Id.OpaqueId, s); err != nil { 1139 log.Error().Err(err).Interface("share", s).Msg("error persisting share") 1140 } else { 1141 log.Debug().Str("storageid", s.GetResourceId().GetStorageId()).Str("spaceid", s.GetResourceId().GetSpaceId()).Str("shareid", s.Id.OpaqueId).Msg("imported share") 1142 } 1143 if err := m.CreatedCache.Add(ctx, s.GetCreator().GetOpaqueId(), s.Id.OpaqueId); err != nil { 1144 log.Error().Err(err).Interface("share", s).Msg("error persisting created cache") 1145 } else { 1146 log.Debug().Str("creatorid", s.GetCreator().GetOpaqueId()).Str("shareid", s.Id.OpaqueId).Msg("updated created cache") 1147 } 1148 } 1149 wg.Done() 1150 }() 1151 go func() { 1152 for s := range receivedShareChan { 1153 if s.ReceivedShare != nil { 1154 if !shareIsRoutable(s.ReceivedShare.GetShare()) { 1155 updateShareID(s.ReceivedShare.GetShare()) 1156 } 1157 switch s.ReceivedShare.Share.Grantee.Type { 1158 case provider.GranteeType_GRANTEE_TYPE_USER: 1159 if err := m.UserReceivedStates.Add(context.Background(), s.ReceivedShare.GetShare().GetGrantee().GetUserId().GetOpaqueId(), s.ReceivedShare.GetShare().GetResourceId().GetSpaceId(), s.ReceivedShare); err != nil { 1160 log.Error().Err(err).Interface("received share", s).Msg("error persisting received share for user") 1161 } else { 1162 log.Debug().Str("userid", s.ReceivedShare.GetShare().GetGrantee().GetUserId().GetOpaqueId()).Str("spaceid", s.ReceivedShare.GetShare().GetResourceId().GetSpaceId()).Str("shareid", s.ReceivedShare.GetShare().Id.OpaqueId).Msg("updated received share userdata") 1163 } 1164 case provider.GranteeType_GRANTEE_TYPE_GROUP: 1165 if err := m.GroupReceivedCache.Add(context.Background(), s.ReceivedShare.GetShare().GetGrantee().GetGroupId().GetOpaqueId(), s.ReceivedShare.GetShare().GetId().GetOpaqueId()); err != nil { 1166 log.Error().Err(err).Interface("received share", s).Msg("error persisting received share to group cache") 1167 } else { 1168 log.Debug().Str("groupid", s.ReceivedShare.GetShare().GetGrantee().GetGroupId().GetOpaqueId()).Str("shareid", s.ReceivedShare.GetShare().Id.OpaqueId).Msg("updated received share group cache") 1169 } 1170 } 1171 } 1172 } 1173 wg.Done() 1174 }() 1175 wg.Wait() 1176 1177 return nil 1178 } 1179 1180 func (m *Manager) purgeSpace(ctx context.Context, id *provider.StorageSpaceId) { 1181 log := appctx.GetLogger(ctx) 1182 storageID, spaceID := storagespace.SplitStorageID(id.OpaqueId) 1183 1184 shares, err := m.Cache.ListSpace(ctx, storageID, spaceID) 1185 if err != nil { 1186 log.Error().Err(err).Msg("error listing shares in space") 1187 return 1188 } 1189 1190 // iterate over all shares in the space and remove them 1191 for _, share := range shares.Shares { 1192 err := m.removeShare(ctx, share, true) 1193 if err != nil { 1194 log.Error().Err(err).Msg("error removing share") 1195 } 1196 } 1197 1198 // remove all shares in the space 1199 err = m.Cache.PurgeSpace(ctx, storageID, spaceID) 1200 if err != nil { 1201 log.Error().Err(err).Msg("error purging space") 1202 } 1203 } 1204 1205 func (m *Manager) removeShare(ctx context.Context, s *collaboration.Share, skipSpaceCache bool) error { 1206 ctx, span := appctx.GetTracerProvider(ctx).Tracer(tracerName).Start(ctx, "removeShare") 1207 defer span.End() 1208 1209 eg, ctx := errgroup.WithContext(ctx) 1210 if !skipSpaceCache { 1211 eg.Go(func() error { 1212 storageID, spaceID, _ := shareid.Decode(s.Id.OpaqueId) 1213 err := m.Cache.Remove(ctx, storageID, spaceID, s.Id.OpaqueId) 1214 1215 return err 1216 }) 1217 } 1218 1219 eg.Go(func() error { 1220 // remove from created cache 1221 return m.CreatedCache.Remove(ctx, s.GetCreator().GetOpaqueId(), s.Id.OpaqueId) 1222 }) 1223 1224 eg.Go(func() error { 1225 // remove from user received states 1226 if s.GetGrantee().Type == provider.GranteeType_GRANTEE_TYPE_USER { 1227 return m.UserReceivedStates.Remove(ctx, s.GetGrantee().GetUserId().GetOpaqueId(), s.GetResourceId().GetStorageId()+shareid.IDDelimiter+s.GetResourceId().GetSpaceId(), s.Id.OpaqueId) 1228 } else if s.GetGrantee().Type == provider.GranteeType_GRANTEE_TYPE_GROUP { 1229 return m.GroupReceivedCache.Remove(ctx, s.GetGrantee().GetGroupId().GetOpaqueId(), s.Id.OpaqueId) 1230 } 1231 return nil 1232 }) 1233 1234 return eg.Wait() 1235 } 1236 1237 func (m *Manager) CleanupStaleShares(ctx context.Context) { 1238 log := appctx.GetLogger(ctx) 1239 1240 if err := m.initialize(ctx); err != nil { 1241 return 1242 } 1243 1244 // list all shares 1245 providers, err := m.Cache.All(ctx) 1246 if err != nil { 1247 log.Error().Err(err).Msg("error listing all shares") 1248 return 1249 } 1250 1251 client, err := m.gatewaySelector.Next() 1252 if err != nil { 1253 log.Error().Err(err).Msg("could not get gateway client") 1254 } 1255 1256 providers.Range(func(storage string, spaces *providercache.Spaces) bool { 1257 log.Info().Str("storage", storage).Interface("spaceCount", spaces.Spaces.Count()).Msg("checking storage") 1258 1259 spaces.Spaces.Range(func(space string, shares *providercache.Shares) bool { 1260 log.Info().Str("storage", storage).Str("space", space).Interface("shareCount", len(shares.Shares)).Msg("checking space") 1261 1262 for _, s := range shares.Shares { 1263 req := &provider.StatRequest{ 1264 Ref: &provider.Reference{ResourceId: s.ResourceId, Path: "."}, 1265 } 1266 res, err := client.Stat(ctx, req) 1267 if err != nil { 1268 log.Error().Err(err).Str("storage", storage).Str("space", space).Msg("could not stat shared resource") 1269 } 1270 if res.Status.Code == rpcv1beta1.Code_CODE_NOT_FOUND { 1271 log.Info().Str("storage", storage).Str("space", space).Msg("shared resource does not exist anymore. cleaning up shares") 1272 if err := m.removeShare(ctx, s, false); err != nil { 1273 log.Error().Err(err).Str("storage", storage).Str("space", space).Msg("could not remove share") 1274 } 1275 } 1276 } 1277 1278 return true 1279 }) 1280 1281 return true 1282 }) 1283 }