github.com/cozy/cozy-stack@v0.0.0-20240603063001-31110fa4cae1/model/sharing/indexer.go (about) 1 package sharing 2 3 import ( 4 "encoding/hex" 5 "fmt" 6 7 "github.com/cozy/cozy-stack/model/instance" 8 "github.com/cozy/cozy-stack/model/vfs" 9 "github.com/cozy/cozy-stack/pkg/consts" 10 "github.com/cozy/cozy-stack/pkg/couchdb" 11 "github.com/cozy/cozy-stack/pkg/couchdb/revision" 12 "github.com/cozy/cozy-stack/pkg/crypto" 13 "github.com/cozy/cozy-stack/pkg/logger" 14 "github.com/cozy/cozy-stack/pkg/prefixer" 15 "github.com/cozy/cozy-stack/pkg/realtime" 16 ) 17 18 type bulkRevs struct { 19 Rev string 20 Revisions RevsStruct 21 } 22 23 type sharingIndexer struct { 24 db prefixer.Prefixer 25 indexer vfs.Indexer 26 bulkRevs *bulkRevs 27 shared *SharedRef 28 log *logger.Entry 29 } 30 31 // newSharingIndexer creates an Indexer for the special purpose of the sharing. 32 // It intercepts some requests to force the id and revisions of some documents, 33 // and proxifies other requests to the normal couchdbIndexer (reads). 34 func newSharingIndexer(inst *instance.Instance, bulkRevs *bulkRevs, shared *SharedRef) *sharingIndexer { 35 return &sharingIndexer{ 36 db: inst, 37 indexer: vfs.NewCouchdbIndexer(inst), 38 bulkRevs: bulkRevs, 39 shared: shared, 40 log: inst.Logger().WithNamespace("sharing-indexer"), 41 } 42 } 43 44 // IncrementRevision is used when a conflict between 2 files/folders arise: to 45 // resolve the conflict, a new name is changed and to ensure that this change 46 // is propagated to the other cozy instances, we add a new revision to the 47 // chain. 48 func (s *sharingIndexer) IncrementRevision() { 49 if s.bulkRevs == nil { 50 return 51 } 52 53 start := s.bulkRevs.Revisions.Start 54 start++ 55 generated := hex.EncodeToString(crypto.GenerateRandomBytes(16)) 56 s.bulkRevs.Rev = fmt.Sprintf("%d-%s", start, generated) 57 s.bulkRevs.Revisions.Start = start 58 s.bulkRevs.Revisions.IDs = append([]string{generated}, s.bulkRevs.Revisions.IDs...) 59 } 60 61 // WillResolveConflict is used when a conflict on a file/folder has been detected. 62 // There are 2 cases: 63 // 1. the file/folder has a revision on one side with a generation strictly 64 // higher than the revision on the other side => we can use this revision on 65 // both sides (but the chain of parents will not be the same) 66 // 2. the two revisions for the file/folder are at the same generation => we 67 // have to create a new revision that will be propagated to the other cozy. 68 func (s *sharingIndexer) WillResolveConflict(rev string, chain []string) { 69 last := chain[len(chain)-1] 70 if revision.Generation(last) == revision.Generation(rev) { 71 s.bulkRevs = nil 72 return 73 } 74 75 altered := MixupChainToResolveConflict(rev, chain) 76 s.bulkRevs.Revisions = revsChainToStruct(altered) 77 s.bulkRevs.Rev = last 78 } 79 80 // StashRevision is a way to not use the last revision for the next operation, 81 // and to keep it for later. 82 // 83 // For a new file, at least 2 revisions are needed: one to stash, and the other 84 // for next operation. For an existing file, at least 3 revisions are needed 85 // for this to work (the first revision is the one that is already is CouchDB, 86 // the second is the revision for the next operation, and the third is the 87 // stash). If don't have them, we fallback to revisions generated by CouchDB. 88 func (s *sharingIndexer) StashRevision(newFile bool) string { 89 minRevs := 3 90 if newFile { 91 minRevs = 2 92 } 93 if s.bulkRevs == nil || len(s.bulkRevs.Revisions.IDs) < minRevs { 94 s.bulkRevs = nil 95 return "" 96 } 97 stash := s.bulkRevs.Revisions.IDs[0] 98 s.bulkRevs.Revisions.IDs = s.bulkRevs.Revisions.IDs[1:] 99 s.bulkRevs.Revisions.Start-- 100 s.bulkRevs.Rev = fmt.Sprintf("%d-%s", s.bulkRevs.Revisions.Start, s.bulkRevs.Revisions.IDs[0]) 101 return stash 102 } 103 104 // UnstashRevision takes back a stash returned by StashRevision after the 105 // intermediate operation has been done. 106 func (s *sharingIndexer) UnstashRevision(stash string) { 107 if s.bulkRevs == nil { 108 return 109 } 110 s.bulkRevs.Revisions.Start++ 111 s.bulkRevs.Revisions.IDs = append([]string{stash}, s.bulkRevs.Revisions.IDs...) 112 s.bulkRevs.Rev = fmt.Sprintf("%d-%s", s.bulkRevs.Revisions.Start, stash) 113 } 114 115 // CreateBogusPrevRev creates a fake revision that can be used for an operation 116 // that come before the revision of bulkRevs. 117 func (s *sharingIndexer) CreateBogusPrevRev() { 118 if s.bulkRevs == nil { 119 return 120 } 121 bogus := hex.EncodeToString(crypto.GenerateRandomBytes(16)) 122 s.bulkRevs.Revisions.IDs = append(s.bulkRevs.Revisions.IDs, bogus) 123 } 124 125 func (s *sharingIndexer) InitIndex() error { 126 s.log.Errorf("Unexpected call to InitIndex") 127 return ErrInternalServerError 128 } 129 130 func (s *sharingIndexer) DiskUsage() (int64, error) { 131 return s.indexer.DiskUsage() 132 } 133 134 func (s *sharingIndexer) FilesUsage() (int64, error) { 135 return s.indexer.FilesUsage() 136 } 137 138 func (s *sharingIndexer) VersionsUsage() (int64, error) { 139 return s.indexer.VersionsUsage() 140 } 141 142 func (s *sharingIndexer) TrashUsage() (int64, error) { 143 return s.indexer.TrashUsage() 144 } 145 146 func (s *sharingIndexer) DirSize(doc *vfs.DirDoc) (int64, error) { 147 return s.indexer.DirSize(doc) 148 } 149 150 func (s *sharingIndexer) CreateFileDoc(doc *vfs.FileDoc) error { 151 s.log.Errorf("Unexpected call to CreateFileDoc") 152 return ErrInternalServerError 153 } 154 155 func (s *sharingIndexer) CreateNamedFileDoc(doc *vfs.FileDoc) error { 156 if s.bulkRevs == nil { 157 return s.indexer.CreateNamedFileDoc(doc) 158 } 159 160 // If the VFS creates the file by omitting the fake first revision with 161 // trashed=true, it is easy: we can insert the doc as is, and trigger the 162 // realtime event. 163 if !doc.Trashed { 164 // Ensure that fullpath is filled because it's used in realtime/@events 165 if _, err := doc.Path(s); err != nil { 166 s.log.Errorf("Cannot compute fullpath for %#v: %s", doc, err) 167 return err 168 } 169 if err := s.bulkForceUpdateDoc(nil, doc); err != nil { 170 return err 171 } 172 couchdb.RTEvent(s.db, realtime.EventCreate, doc, nil) 173 return nil 174 } 175 176 // But if the VFS creates a first fake revision, it will also create 177 // another revision after that to clear the trashed attribute when the 178 // upload will complete. It means using 2 revision numbers. So, we have to 179 // stash the target revision during the first write to keep it for the 180 // second write. 181 if len(s.bulkRevs.Revisions.IDs) == 1 { 182 s.CreateBogusPrevRev() 183 } 184 stash := s.StashRevision(true) 185 err := s.bulkForceUpdateDoc(nil, doc) 186 s.UnstashRevision(stash) 187 return err 188 } 189 190 func (s *sharingIndexer) UpdateFileDoc(olddoc, doc *vfs.FileDoc) error { 191 if s.bulkRevs == nil { 192 return s.indexer.UpdateFileDoc(olddoc, doc) 193 } 194 195 if err := s.bulkForceUpdateDoc(olddoc, doc); err != nil { 196 return err 197 } 198 199 if s.shared != nil { 200 if err := UpdateFileShared(s.db, s.shared, s.bulkRevs.Revisions); err != nil { 201 return err 202 } 203 } 204 205 // Ensure that fullpath is filled because it's used in realtime/@events 206 if _, err := doc.Path(s); err != nil { 207 return err 208 } 209 if olddoc != nil { 210 if _, err := olddoc.Path(s); err != nil { 211 return err 212 } 213 couchdb.RTEvent(s.db, realtime.EventUpdate, doc, olddoc) 214 } else { 215 couchdb.RTEvent(s.db, realtime.EventUpdate, doc, nil) 216 } 217 return nil 218 } 219 220 func (s *sharingIndexer) setDirOrFileRevisions(oldDocDir *vfs.DirDoc, oldDocFile *vfs.FileDoc, doc map[string]interface{}) { 221 doc["_rev"] = s.bulkRevs.Rev 222 var oldDocRev string 223 if oldDocDir != nil { 224 oldDocRev = oldDocDir.DocRev 225 } else if oldDocFile != nil { 226 oldDocRev = oldDocFile.DocRev 227 } 228 if oldDocRev != "" { 229 // XXX We cannot directly apply the revision chain as received by the remote 230 // instance because it might create a CouchDB conflict if a rev does not 231 // exist on the local instance. Therefore, we start from the last local rev 232 // and apply all the higher revs received from the remote. And the modified 233 // revisions in s.bulkRevs will also be used to update the io.cozy.shared. 234 chain := revsStructToChain(s.bulkRevs.Revisions) 235 newChain := MixupChainToResolveConflict(oldDocRev, chain) 236 s.bulkRevs.Revisions = revsChainToStruct(newChain) 237 } 238 doc["_revisions"] = s.bulkRevs.Revisions 239 } 240 241 func (s *sharingIndexer) bulkForceUpdateDoc(olddoc, doc *vfs.FileDoc) error { 242 // XXX We need to check that the file has not been updated between it has 243 // been loaded and the call to BulkUpdateDocs, as the VFS lock has been 244 // acquired after the file has been loaded, and CouchDB can create a 245 // conflict in the database if it happens (because of the new_edit: false 246 // option). 247 if olddoc != nil { 248 var current couchdb.JSONDoc 249 err := couchdb.GetDoc(s.db, consts.Files, doc.ID(), ¤t) 250 if err != nil { 251 return err 252 } 253 if current.Rev() != olddoc.Rev() { 254 return ErrInternalServerError 255 } 256 } 257 258 docs := make([]map[string]interface{}, 1) 259 docs[0] = map[string]interface{}{ 260 "type": doc.Type, 261 "_id": doc.DocID, 262 "name": doc.DocName, 263 "dir_id": doc.DirID, 264 "created_at": doc.CreatedAt, 265 "updated_at": doc.UpdatedAt, 266 "tags": doc.Tags, 267 "size": fmt.Sprintf("%d", doc.ByteSize), // XXX size must be serialized as a string, not an int 268 "md5sum": doc.MD5Sum, 269 "mime": doc.Mime, 270 "class": doc.Class, 271 "executable": doc.Executable, 272 "trashed": doc.Trashed, 273 } 274 if len(doc.ReferencedBy) > 0 { 275 docs[0][couchdb.SelectorReferencedBy] = doc.ReferencedBy 276 } 277 if doc.Metadata != nil { 278 docs[0]["metadata"] = doc.Metadata 279 } 280 if doc.CozyMetadata != nil { 281 docs[0]["cozyMetadata"] = doc.CozyMetadata 282 } 283 if doc.InternalID != "" { 284 docs[0]["internal_vfs_id"] = doc.InternalID 285 } 286 doc.SetRev(s.bulkRevs.Rev) 287 s.setDirOrFileRevisions(nil, olddoc, docs[0]) 288 289 return couchdb.BulkForceUpdateDocs(s.db, consts.Files, docs) 290 } 291 292 // DeleteFileDoc is used when uploading a new file fails (invalid md5sum for example) 293 func (s *sharingIndexer) DeleteFileDoc(doc *vfs.FileDoc) error { 294 return s.indexer.DeleteFileDoc(doc) 295 } 296 297 func (s *sharingIndexer) CreateDirDoc(doc *vfs.DirDoc) error { 298 s.log.Errorf("Unexpected call to CreateDirDoc") 299 return ErrInternalServerError 300 } 301 302 func (s *sharingIndexer) CreateNamedDirDoc(doc *vfs.DirDoc) error { 303 return s.UpdateDirDoc(nil, doc) 304 } 305 306 func (s *sharingIndexer) UpdateDirDoc(olddoc, doc *vfs.DirDoc) error { 307 if s.bulkRevs == nil { 308 return s.indexer.UpdateDirDoc(olddoc, doc) 309 } 310 311 if olddoc != nil && doc.Fullpath != olddoc.Fullpath { 312 if err := s.indexer.MoveDir(olddoc.Fullpath, doc.Fullpath); err != nil { 313 return err 314 } 315 } 316 317 docs := make([]map[string]interface{}, 1) 318 docs[0] = map[string]interface{}{ 319 "type": doc.Type, 320 "_id": doc.DocID, 321 "name": doc.DocName, 322 "dir_id": doc.DirID, 323 "created_at": doc.CreatedAt, 324 "updated_at": doc.UpdatedAt, 325 "tags": doc.Tags, 326 "path": doc.Fullpath, 327 } 328 if len(doc.ReferencedBy) > 0 { 329 docs[0][couchdb.SelectorReferencedBy] = doc.ReferencedBy 330 } 331 if len(doc.NotSynchronizedOn) > 0 { 332 docs[0]["not_synchronized_on"] = doc.NotSynchronizedOn 333 } 334 if doc.Metadata != nil { 335 docs[0]["metadata"] = doc.Metadata 336 } 337 if doc.CozyMetadata != nil { 338 docs[0]["cozyMetadata"] = doc.CozyMetadata 339 } 340 doc.SetRev(s.bulkRevs.Rev) 341 s.setDirOrFileRevisions(olddoc, nil, docs[0]) 342 343 if err := couchdb.BulkForceUpdateDocs(s.db, consts.Files, docs); err != nil { 344 return err 345 } 346 347 if err := UpdateFileShared(s.db, s.shared, s.bulkRevs.Revisions); err != nil { 348 return err 349 } 350 351 if olddoc != nil { 352 couchdb.RTEvent(s.db, realtime.EventUpdate, doc, olddoc) 353 } else { 354 couchdb.RTEvent(s.db, realtime.EventUpdate, doc, nil) 355 } 356 return nil 357 } 358 359 func (s *sharingIndexer) DeleteDirDoc(doc *vfs.DirDoc) error { 360 s.log.Errorf("Unexpected call to DeleteDirDoc") 361 return ErrInternalServerError 362 } 363 364 func (s *sharingIndexer) DeleteDirDocAndContent(doc *vfs.DirDoc, onlyContent bool) (files []*vfs.FileDoc, n int64, err error) { 365 s.log.Errorf("Unexpected call to DeleteDirDocAndContent") 366 return nil, 0, ErrInternalServerError 367 } 368 369 func (s *sharingIndexer) MoveDir(oldpath, newpath string) error { 370 s.log.Errorf("Unexpected call to MoveDir") 371 return ErrInternalServerError 372 } 373 374 func (s *sharingIndexer) BatchDelete(docs []couchdb.Doc) error { 375 s.log.Errorf("Unexpected call to BatchDelete") 376 return ErrInternalServerError 377 } 378 379 func (s *sharingIndexer) DirByID(fileID string) (*vfs.DirDoc, error) { 380 return s.indexer.DirByID(fileID) 381 } 382 383 func (s *sharingIndexer) DirByPath(name string) (*vfs.DirDoc, error) { 384 return s.indexer.DirByPath(name) 385 } 386 387 func (s *sharingIndexer) FileByID(fileID string) (*vfs.FileDoc, error) { 388 return s.indexer.FileByID(fileID) 389 } 390 391 func (s *sharingIndexer) FileByPath(name string) (*vfs.FileDoc, error) { 392 return s.indexer.FileByPath(name) 393 } 394 395 func (s *sharingIndexer) FilePath(doc *vfs.FileDoc) (string, error) { 396 return s.indexer.FilePath(doc) 397 } 398 399 func (s *sharingIndexer) DirOrFileByID(fileID string) (*vfs.DirDoc, *vfs.FileDoc, error) { 400 return s.indexer.DirOrFileByID(fileID) 401 } 402 403 func (s *sharingIndexer) DirOrFileByPath(name string) (*vfs.DirDoc, *vfs.FileDoc, error) { 404 return s.indexer.DirOrFileByPath(name) 405 } 406 407 func (s *sharingIndexer) DirIterator(doc *vfs.DirDoc, opts *vfs.IteratorOptions) vfs.DirIterator { 408 return s.indexer.DirIterator(doc, opts) 409 } 410 411 func (s *sharingIndexer) DirBatch(doc *vfs.DirDoc, cursor couchdb.Cursor) ([]vfs.DirOrFileDoc, error) { 412 return s.indexer.DirBatch(doc, cursor) 413 } 414 415 func (s *sharingIndexer) DirLength(doc *vfs.DirDoc) (int, error) { 416 return s.indexer.DirLength(doc) 417 } 418 419 func (s *sharingIndexer) DirChildExists(dirID, name string) (bool, error) { 420 return s.indexer.DirChildExists(dirID, name) 421 } 422 423 func (s *sharingIndexer) CreateVersion(v *vfs.Version) error { 424 return s.indexer.CreateVersion(v) 425 } 426 427 func (s *sharingIndexer) DeleteVersion(v *vfs.Version) error { 428 return s.indexer.DeleteVersion(v) 429 } 430 431 func (s *sharingIndexer) AllVersions() ([]*vfs.Version, error) { 432 return s.indexer.AllVersions() 433 } 434 435 func (s *sharingIndexer) BatchDeleteVersions(versions []*vfs.Version) error { 436 return s.indexer.BatchDeleteVersions(versions) 437 } 438 439 func (s *sharingIndexer) ListNotSynchronizedOn(clientID string) ([]vfs.DirDoc, error) { 440 return s.indexer.ListNotSynchronizedOn(clientID) 441 } 442 443 func (s *sharingIndexer) CheckIndexIntegrity(predicate func(*vfs.FsckLog), failFast bool) error { 444 s.log.Errorf("Unexpected call to CheckIndexIntegrity") 445 return ErrInternalServerError 446 } 447 448 func (s *sharingIndexer) CheckTreeIntegrity(tree *vfs.Tree, predicate func(*vfs.FsckLog), failFast bool) error { 449 s.log.Errorf("Unexpected call to CheckTreeIntegrity") 450 return ErrInternalServerError 451 } 452 453 func (s *sharingIndexer) BuildTree(each ...func(*vfs.TreeFile)) (t *vfs.Tree, err error) { 454 s.log.Errorf("Unexpected call to BuildTree") 455 return nil, ErrInternalServerError 456 } 457 458 var _ vfs.Indexer = (*sharingIndexer)(nil)