github.com/kayoticsully/syncthing@v0.8.9-0.20140724133906-c45a2fdc03f8/model/model.go (about) 1 // Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file). 2 // All rights reserved. Use of this source code is governed by an MIT-style 3 // license that can be found in the LICENSE file. 4 5 package model 6 7 import ( 8 "errors" 9 "fmt" 10 "io" 11 "net" 12 "os" 13 "path/filepath" 14 "strconv" 15 "sync" 16 "time" 17 18 "github.com/calmh/syncthing/config" 19 "github.com/calmh/syncthing/events" 20 "github.com/calmh/syncthing/files" 21 "github.com/calmh/syncthing/lamport" 22 "github.com/calmh/syncthing/protocol" 23 "github.com/calmh/syncthing/scanner" 24 "github.com/syndtr/goleveldb/leveldb" 25 ) 26 27 type repoState int 28 29 const ( 30 RepoIdle repoState = iota 31 RepoScanning 32 RepoSyncing 33 RepoCleaning 34 ) 35 36 func (s repoState) String() string { 37 switch s { 38 case RepoIdle: 39 return "idle" 40 case RepoScanning: 41 return "scanning" 42 case RepoCleaning: 43 return "cleaning" 44 case RepoSyncing: 45 return "syncing" 46 default: 47 return "unknown" 48 } 49 } 50 51 // Somewhat arbitrary amount of bytes that we choose to let represent the size 52 // of an unsynchronized directory entry or a deleted file. We need it to be 53 // larger than zero so that it's visible that there is some amount of bytes to 54 // transfer to bring the systems into synchronization. 55 const zeroEntrySize = 128 56 57 // How many files to send in each Index/IndexUpdate message. 58 const indexBatchSize = 1000 59 60 type Model struct { 61 indexDir string 62 cfg *config.Configuration 63 db *leveldb.DB 64 65 clientName string 66 clientVersion string 67 68 repoCfgs map[string]config.RepositoryConfiguration // repo -> cfg 69 repoFiles map[string]*files.Set // repo -> files 70 repoNodes map[string][]protocol.NodeID // repo -> nodeIDs 71 nodeRepos map[protocol.NodeID][]string // nodeID -> repos 72 suppressor map[string]*suppressor // repo -> suppressor 73 rmut sync.RWMutex // protects the above 74 75 repoState map[string]repoState // repo -> state 76 repoStateChanged map[string]time.Time // repo -> time when state changed 77 smut sync.RWMutex 78 79 protoConn map[protocol.NodeID]protocol.Connection 80 rawConn map[protocol.NodeID]io.Closer 81 nodeVer map[protocol.NodeID]string 82 pmut sync.RWMutex // protects protoConn and rawConn 83 84 sentLocalVer map[protocol.NodeID]map[string]uint64 85 slMut sync.Mutex 86 87 sup suppressor 88 89 addedRepo bool 90 started bool 91 } 92 93 var ( 94 ErrNoSuchFile = errors.New("no such file") 95 ErrInvalid = errors.New("file is invalid") 96 ) 97 98 // NewModel creates and starts a new model. The model starts in read-only mode, 99 // where it sends index information to connected peers and responds to requests 100 // for file data without altering the local repository in any way. 101 func NewModel(indexDir string, cfg *config.Configuration, clientName, clientVersion string, db *leveldb.DB) *Model { 102 m := &Model{ 103 indexDir: indexDir, 104 cfg: cfg, 105 db: db, 106 clientName: clientName, 107 clientVersion: clientVersion, 108 repoCfgs: make(map[string]config.RepositoryConfiguration), 109 repoFiles: make(map[string]*files.Set), 110 repoNodes: make(map[string][]protocol.NodeID), 111 nodeRepos: make(map[protocol.NodeID][]string), 112 repoState: make(map[string]repoState), 113 repoStateChanged: make(map[string]time.Time), 114 suppressor: make(map[string]*suppressor), 115 protoConn: make(map[protocol.NodeID]protocol.Connection), 116 rawConn: make(map[protocol.NodeID]io.Closer), 117 nodeVer: make(map[protocol.NodeID]string), 118 sentLocalVer: make(map[protocol.NodeID]map[string]uint64), 119 sup: suppressor{threshold: int64(cfg.Options.MaxChangeKbps)}, 120 } 121 122 var timeout = 20 * 60 // seconds 123 if t := os.Getenv("STDEADLOCKTIMEOUT"); len(t) > 0 { 124 it, err := strconv.Atoi(t) 125 if err == nil { 126 timeout = it 127 } 128 } 129 deadlockDetect(&m.rmut, time.Duration(timeout)*time.Second) 130 deadlockDetect(&m.smut, time.Duration(timeout)*time.Second) 131 deadlockDetect(&m.pmut, time.Duration(timeout)*time.Second) 132 return m 133 } 134 135 // StartRW starts read/write processing on the current model. When in 136 // read/write mode the model will attempt to keep in sync with the cluster by 137 // pulling needed files from peer nodes. 138 func (m *Model) StartRepoRW(repo string, threads int) { 139 m.rmut.RLock() 140 defer m.rmut.RUnlock() 141 142 if cfg, ok := m.repoCfgs[repo]; !ok { 143 panic("cannot start without repo") 144 } else { 145 newPuller(cfg, m, threads, m.cfg) 146 } 147 } 148 149 // StartRO starts read only processing on the current model. When in 150 // read only mode the model will announce files to the cluster but not 151 // pull in any external changes. 152 func (m *Model) StartRepoRO(repo string) { 153 m.StartRepoRW(repo, 0) // zero threads => read only 154 } 155 156 type ConnectionInfo struct { 157 protocol.Statistics 158 Address string 159 ClientVersion string 160 Completion int 161 } 162 163 // ConnectionStats returns a map with connection statistics for each connected node. 164 func (m *Model) ConnectionStats() map[string]ConnectionInfo { 165 type remoteAddrer interface { 166 RemoteAddr() net.Addr 167 } 168 169 m.pmut.RLock() 170 m.rmut.RLock() 171 172 var res = make(map[string]ConnectionInfo) 173 for node, conn := range m.protoConn { 174 ci := ConnectionInfo{ 175 Statistics: conn.Statistics(), 176 ClientVersion: m.nodeVer[node], 177 } 178 if nc, ok := m.rawConn[node].(remoteAddrer); ok { 179 ci.Address = nc.RemoteAddr().String() 180 } 181 182 var tot int64 183 var have int64 184 185 for _, repo := range m.nodeRepos[node] { 186 m.repoFiles[repo].WithGlobal(func(f protocol.FileInfo) bool { 187 if !protocol.IsDeleted(f.Flags) { 188 var size int64 189 if protocol.IsDirectory(f.Flags) { 190 size = zeroEntrySize 191 } else { 192 size = f.Size() 193 } 194 tot += size 195 have += size 196 } 197 return true 198 }) 199 200 m.repoFiles[repo].WithNeed(node, func(f protocol.FileInfo) bool { 201 if !protocol.IsDeleted(f.Flags) { 202 var size int64 203 if protocol.IsDirectory(f.Flags) { 204 size = zeroEntrySize 205 } else { 206 size = f.Size() 207 } 208 have -= size 209 } 210 return true 211 }) 212 } 213 214 ci.Completion = 100 215 if tot != 0 { 216 ci.Completion = int(100 * have / tot) 217 } 218 219 res[node.String()] = ci 220 } 221 222 m.rmut.RUnlock() 223 m.pmut.RUnlock() 224 225 in, out := protocol.TotalInOut() 226 res["total"] = ConnectionInfo{ 227 Statistics: protocol.Statistics{ 228 At: time.Now(), 229 InBytesTotal: in, 230 OutBytesTotal: out, 231 }, 232 } 233 234 return res 235 } 236 237 func sizeOf(fs []protocol.FileInfo) (files, deleted int, bytes int64) { 238 for _, f := range fs { 239 fs, de, by := sizeOfFile(f) 240 files += fs 241 deleted += de 242 bytes += by 243 } 244 return 245 } 246 247 func sizeOfFile(f protocol.FileInfo) (files, deleted int, bytes int64) { 248 if !protocol.IsDeleted(f.Flags) { 249 files++ 250 if !protocol.IsDirectory(f.Flags) { 251 bytes += f.Size() 252 } else { 253 bytes += zeroEntrySize 254 } 255 } else { 256 deleted++ 257 bytes += zeroEntrySize 258 } 259 return 260 } 261 262 // GlobalSize returns the number of files, deleted files and total bytes for all 263 // files in the global model. 264 func (m *Model) GlobalSize(repo string) (files, deleted int, bytes int64) { 265 m.rmut.RLock() 266 defer m.rmut.RUnlock() 267 if rf, ok := m.repoFiles[repo]; ok { 268 rf.WithGlobal(func(f protocol.FileInfo) bool { 269 fs, de, by := sizeOfFile(f) 270 files += fs 271 deleted += de 272 bytes += by 273 return true 274 }) 275 } 276 return 277 } 278 279 // LocalSize returns the number of files, deleted files and total bytes for all 280 // files in the local repository. 281 func (m *Model) LocalSize(repo string) (files, deleted int, bytes int64) { 282 m.rmut.RLock() 283 defer m.rmut.RUnlock() 284 if rf, ok := m.repoFiles[repo]; ok { 285 rf.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool { 286 fs, de, by := sizeOfFile(f) 287 files += fs 288 deleted += de 289 bytes += by 290 return true 291 }) 292 } 293 return 294 } 295 296 // NeedSize returns the number and total size of currently needed files. 297 func (m *Model) NeedSize(repo string) (files int, bytes int64) { 298 m.rmut.RLock() 299 defer m.rmut.RUnlock() 300 if rf, ok := m.repoFiles[repo]; ok { 301 rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool { 302 fs, de, by := sizeOfFile(f) 303 files += fs + de 304 bytes += by 305 return true 306 }) 307 } 308 return 309 } 310 311 // NeedFiles returns the list of currently needed files 312 func (m *Model) NeedFilesRepo(repo string) []protocol.FileInfo { 313 m.rmut.RLock() 314 defer m.rmut.RUnlock() 315 if rf, ok := m.repoFiles[repo]; ok { 316 fs := make([]protocol.FileInfo, 0, indexBatchSize) 317 rf.WithNeed(protocol.LocalNodeID, func(f protocol.FileInfo) bool { 318 fs = append(fs, f) 319 return len(fs) < indexBatchSize 320 }) 321 return fs 322 } 323 return nil 324 } 325 326 // Index is called when a new node is connected and we receive their full index. 327 // Implements the protocol.Model interface. 328 func (m *Model) Index(nodeID protocol.NodeID, repo string, fs []protocol.FileInfo) { 329 if debug { 330 l.Debugf("IDX(in): %s %q: %d files", nodeID, repo, len(fs)) 331 } 332 333 if !m.repoSharedWith(repo, nodeID) { 334 l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID) 335 return 336 } 337 338 for i := range fs { 339 lamport.Default.Tick(fs[i].Version) 340 } 341 342 m.rmut.RLock() 343 r, ok := m.repoFiles[repo] 344 m.rmut.RUnlock() 345 if ok { 346 r.Replace(nodeID, fs) 347 } else { 348 l.Fatalf("Index for nonexistant repo %q", repo) 349 } 350 351 events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{ 352 "node": nodeID.String(), 353 "repo": repo, 354 "items": len(fs), 355 "version": r.LocalVersion(nodeID), 356 }) 357 } 358 359 // IndexUpdate is called for incremental updates to connected nodes' indexes. 360 // Implements the protocol.Model interface. 361 func (m *Model) IndexUpdate(nodeID protocol.NodeID, repo string, fs []protocol.FileInfo) { 362 if debug { 363 l.Debugf("IDXUP(in): %s / %q: %d files", nodeID, repo, len(fs)) 364 } 365 366 if !m.repoSharedWith(repo, nodeID) { 367 l.Warnf("Unexpected repository ID %q sent from node %q; ensure that the repository exists and that this node is selected under \"Share With\" in the repository configuration.", repo, nodeID) 368 return 369 } 370 371 for i := range fs { 372 lamport.Default.Tick(fs[i].Version) 373 } 374 375 m.rmut.RLock() 376 r, ok := m.repoFiles[repo] 377 m.rmut.RUnlock() 378 if ok { 379 r.Update(nodeID, fs) 380 } else { 381 l.Fatalf("IndexUpdate for nonexistant repo %q", repo) 382 } 383 384 events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{ 385 "node": nodeID.String(), 386 "repo": repo, 387 "items": len(fs), 388 "version": r.LocalVersion(nodeID), 389 }) 390 } 391 392 func (m *Model) repoSharedWith(repo string, nodeID protocol.NodeID) bool { 393 m.rmut.RLock() 394 defer m.rmut.RUnlock() 395 for _, nrepo := range m.nodeRepos[nodeID] { 396 if nrepo == repo { 397 return true 398 } 399 } 400 return false 401 } 402 403 func (m *Model) ClusterConfig(nodeID protocol.NodeID, config protocol.ClusterConfigMessage) { 404 compErr := compareClusterConfig(m.clusterConfig(nodeID), config) 405 if debug { 406 l.Debugf("ClusterConfig: %s: %#v", nodeID, config) 407 l.Debugf(" ... compare: %s: %v", nodeID, compErr) 408 } 409 410 if compErr != nil { 411 l.Warnf("%s: %v", nodeID, compErr) 412 m.Close(nodeID, compErr) 413 } 414 415 m.pmut.Lock() 416 if config.ClientName == "syncthing" { 417 m.nodeVer[nodeID] = config.ClientVersion 418 } else { 419 m.nodeVer[nodeID] = config.ClientName + " " + config.ClientVersion 420 } 421 m.pmut.Unlock() 422 423 l.Infof(`Node %s client is "%s %s"`, nodeID, config.ClientName, config.ClientVersion) 424 } 425 426 // Close removes the peer from the model and closes the underlying connection if possible. 427 // Implements the protocol.Model interface. 428 func (m *Model) Close(node protocol.NodeID, err error) { 429 l.Infof("Connection to %s closed: %v", node, err) 430 events.Default.Log(events.NodeDisconnected, map[string]string{ 431 "id": node.String(), 432 "error": err.Error(), 433 }) 434 435 m.pmut.Lock() 436 m.rmut.RLock() 437 for _, repo := range m.nodeRepos[node] { 438 m.repoFiles[repo].Replace(node, nil) 439 } 440 m.rmut.RUnlock() 441 442 conn, ok := m.rawConn[node] 443 if ok { 444 conn.Close() 445 } 446 delete(m.protoConn, node) 447 delete(m.rawConn, node) 448 delete(m.nodeVer, node) 449 m.pmut.Unlock() 450 } 451 452 // Request returns the specified data segment by reading it from local disk. 453 // Implements the protocol.Model interface. 454 func (m *Model) Request(nodeID protocol.NodeID, repo, name string, offset int64, size int) ([]byte, error) { 455 // Verify that the requested file exists in the local model. 456 m.rmut.RLock() 457 r, ok := m.repoFiles[repo] 458 m.rmut.RUnlock() 459 460 if !ok { 461 l.Warnf("Request from %s for file %s in nonexistent repo %q", nodeID, name, repo) 462 return nil, ErrNoSuchFile 463 } 464 465 lf := r.Get(protocol.LocalNodeID, name) 466 if protocol.IsInvalid(lf.Flags) || protocol.IsDeleted(lf.Flags) { 467 if debug { 468 l.Debugf("REQ(in): %s: %q / %q o=%d s=%d; invalid: %v", nodeID, repo, name, offset, size, lf) 469 } 470 return nil, ErrInvalid 471 } 472 473 if offset > lf.Size() { 474 if debug { 475 l.Debugf("REQ(in; nonexistent): %s: %q o=%d s=%d", nodeID, name, offset, size) 476 } 477 return nil, ErrNoSuchFile 478 } 479 480 if debug && nodeID != protocol.LocalNodeID { 481 l.Debugf("REQ(in): %s: %q / %q o=%d s=%d", nodeID, repo, name, offset, size) 482 } 483 m.rmut.RLock() 484 fn := filepath.Join(m.repoCfgs[repo].Directory, name) 485 m.rmut.RUnlock() 486 fd, err := os.Open(fn) // XXX: Inefficient, should cache fd? 487 if err != nil { 488 return nil, err 489 } 490 defer fd.Close() 491 492 buf := make([]byte, size) 493 _, err = fd.ReadAt(buf, offset) 494 if err != nil { 495 return nil, err 496 } 497 498 return buf, nil 499 } 500 501 // ReplaceLocal replaces the local repository index with the given list of files. 502 func (m *Model) ReplaceLocal(repo string, fs []protocol.FileInfo) { 503 m.rmut.RLock() 504 m.repoFiles[repo].ReplaceWithDelete(protocol.LocalNodeID, fs) 505 m.rmut.RUnlock() 506 } 507 508 func (m *Model) CurrentRepoFile(repo string, file string) protocol.FileInfo { 509 m.rmut.RLock() 510 f := m.repoFiles[repo].Get(protocol.LocalNodeID, file) 511 m.rmut.RUnlock() 512 return f 513 } 514 515 func (m *Model) CurrentGlobalFile(repo string, file string) protocol.FileInfo { 516 m.rmut.RLock() 517 f := m.repoFiles[repo].GetGlobal(file) 518 m.rmut.RUnlock() 519 return f 520 } 521 522 type cFiler struct { 523 m *Model 524 r string 525 } 526 527 // Implements scanner.CurrentFiler 528 func (cf cFiler) CurrentFile(file string) protocol.FileInfo { 529 return cf.m.CurrentRepoFile(cf.r, file) 530 } 531 532 // ConnectedTo returns true if we are connected to the named node. 533 func (m *Model) ConnectedTo(nodeID protocol.NodeID) bool { 534 m.pmut.RLock() 535 _, ok := m.protoConn[nodeID] 536 m.pmut.RUnlock() 537 return ok 538 } 539 540 // AddConnection adds a new peer connection to the model. An initial index will 541 // be sent to the connected peer, thereafter index updates whenever the local 542 // repository changes. 543 func (m *Model) AddConnection(rawConn io.Closer, protoConn protocol.Connection) { 544 nodeID := protoConn.ID() 545 546 m.pmut.Lock() 547 if _, ok := m.protoConn[nodeID]; ok { 548 panic("add existing node") 549 } 550 m.protoConn[nodeID] = protoConn 551 if _, ok := m.rawConn[nodeID]; ok { 552 panic("add existing node") 553 } 554 m.rawConn[nodeID] = rawConn 555 556 cm := m.clusterConfig(nodeID) 557 protoConn.ClusterConfig(cm) 558 559 m.rmut.RLock() 560 for _, repo := range m.nodeRepos[nodeID] { 561 fs := m.repoFiles[repo] 562 go sendIndexes(protoConn, repo, fs) 563 } 564 m.rmut.RUnlock() 565 m.pmut.Unlock() 566 } 567 568 func sendIndexes(conn protocol.Connection, repo string, fs *files.Set) { 569 nodeID := conn.ID() 570 name := conn.Name() 571 572 if debug { 573 l.Debugf("sendIndexes for %s-%s@/%q starting", nodeID, name, repo) 574 } 575 576 initial := true 577 minLocalVer := uint64(0) 578 var err error 579 580 defer func() { 581 if debug { 582 l.Debugf("sendIndexes for %s-%s@/%q exiting: %v", nodeID, name, repo, err) 583 } 584 }() 585 586 for err == nil { 587 if !initial { 588 time.Sleep(5 * time.Second) 589 } 590 if fs.LocalVersion(protocol.LocalNodeID) <= minLocalVer { 591 continue 592 } 593 594 batch := make([]protocol.FileInfo, 0, indexBatchSize) 595 maxLocalVer := uint64(0) 596 597 fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool { 598 if f.LocalVersion <= minLocalVer { 599 return true 600 } 601 602 if f.LocalVersion > maxLocalVer { 603 maxLocalVer = f.LocalVersion 604 } 605 606 if len(batch) == indexBatchSize { 607 if initial { 608 if err = conn.Index(repo, batch); err != nil { 609 return false 610 } 611 if debug { 612 l.Debugf("sendIndexes for %s-%s/%q: %d files (initial index)", nodeID, name, repo, len(batch)) 613 } 614 initial = false 615 } else { 616 if err = conn.IndexUpdate(repo, batch); err != nil { 617 return false 618 } 619 if debug { 620 l.Debugf("sendIndexes for %s-%s/%q: %d files (batched update)", nodeID, name, repo, len(batch)) 621 } 622 } 623 624 batch = make([]protocol.FileInfo, 0, indexBatchSize) 625 } 626 627 batch = append(batch, f) 628 return true 629 }) 630 631 if initial { 632 err = conn.Index(repo, batch) 633 if debug && err == nil { 634 l.Debugf("sendIndexes for %s-%s/%q: %d files (small initial index)", nodeID, name, repo, len(batch)) 635 } 636 initial = false 637 } else if len(batch) > 0 { 638 err = conn.IndexUpdate(repo, batch) 639 if debug && err == nil { 640 l.Debugf("sendIndexes for %s-%s/%q: %d files (last batch)", nodeID, name, repo, len(batch)) 641 } 642 } 643 644 minLocalVer = maxLocalVer 645 } 646 } 647 648 func (m *Model) updateLocal(repo string, f protocol.FileInfo) { 649 f.LocalVersion = 0 650 m.rmut.RLock() 651 m.repoFiles[repo].Update(protocol.LocalNodeID, []protocol.FileInfo{f}) 652 m.rmut.RUnlock() 653 events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{ 654 "repo": repo, 655 "name": f.Name, 656 "modified": time.Unix(f.Modified, 0), 657 "flags": fmt.Sprintf("0%o", f.Flags), 658 "size": f.Size(), 659 }) 660 } 661 662 func (m *Model) requestGlobal(nodeID protocol.NodeID, repo, name string, offset int64, size int, hash []byte) ([]byte, error) { 663 m.pmut.RLock() 664 nc, ok := m.protoConn[nodeID] 665 m.pmut.RUnlock() 666 667 if !ok { 668 return nil, fmt.Errorf("requestGlobal: no such node: %s", nodeID) 669 } 670 671 if debug { 672 l.Debugf("REQ(out): %s: %q / %q o=%d s=%d h=%x", nodeID, repo, name, offset, size, hash) 673 } 674 675 return nc.Request(repo, name, offset, size) 676 } 677 678 func (m *Model) AddRepo(cfg config.RepositoryConfiguration) { 679 if m.started { 680 panic("cannot add repo to started model") 681 } 682 if len(cfg.ID) == 0 { 683 panic("cannot add empty repo id") 684 } 685 686 m.rmut.Lock() 687 m.repoCfgs[cfg.ID] = cfg 688 m.repoFiles[cfg.ID] = files.NewSet(cfg.ID, m.db) 689 m.suppressor[cfg.ID] = &suppressor{threshold: int64(m.cfg.Options.MaxChangeKbps)} 690 691 m.repoNodes[cfg.ID] = make([]protocol.NodeID, len(cfg.Nodes)) 692 for i, node := range cfg.Nodes { 693 m.repoNodes[cfg.ID][i] = node.NodeID 694 m.nodeRepos[node.NodeID] = append(m.nodeRepos[node.NodeID], cfg.ID) 695 } 696 697 m.addedRepo = true 698 m.rmut.Unlock() 699 } 700 701 func (m *Model) ScanRepos() { 702 m.rmut.RLock() 703 var repos = make([]string, 0, len(m.repoCfgs)) 704 for repo := range m.repoCfgs { 705 repos = append(repos, repo) 706 } 707 m.rmut.RUnlock() 708 709 var wg sync.WaitGroup 710 wg.Add(len(repos)) 711 for _, repo := range repos { 712 repo := repo 713 go func() { 714 err := m.ScanRepo(repo) 715 if err != nil { 716 invalidateRepo(m.cfg, repo, err) 717 } 718 wg.Done() 719 }() 720 } 721 wg.Wait() 722 } 723 724 func (m *Model) CleanRepos() { 725 m.rmut.RLock() 726 var dirs = make([]string, 0, len(m.repoCfgs)) 727 for _, cfg := range m.repoCfgs { 728 dirs = append(dirs, cfg.Directory) 729 } 730 m.rmut.RUnlock() 731 732 var wg sync.WaitGroup 733 wg.Add(len(dirs)) 734 for _, dir := range dirs { 735 w := &scanner.Walker{ 736 Dir: dir, 737 TempNamer: defTempNamer, 738 } 739 go func() { 740 w.CleanTempFiles() 741 wg.Done() 742 }() 743 } 744 wg.Wait() 745 } 746 747 func (m *Model) ScanRepo(repo string) error { 748 m.rmut.RLock() 749 fs := m.repoFiles[repo] 750 dir := m.repoCfgs[repo].Directory 751 752 w := &scanner.Walker{ 753 Dir: dir, 754 IgnoreFile: ".stignore", 755 BlockSize: scanner.StandardBlockSize, 756 TempNamer: defTempNamer, 757 Suppressor: m.suppressor[repo], 758 CurrentFiler: cFiler{m, repo}, 759 IgnorePerms: m.repoCfgs[repo].IgnorePerms, 760 } 761 m.rmut.RUnlock() 762 763 m.setState(repo, RepoScanning) 764 fchan, _, err := w.Walk() 765 766 if err != nil { 767 return err 768 } 769 batchSize := 100 770 batch := make([]protocol.FileInfo, 0, 00) 771 for f := range fchan { 772 if len(batch) == batchSize { 773 fs.Update(protocol.LocalNodeID, batch) 774 batch = batch[:0] 775 } 776 batch = append(batch, f) 777 } 778 if len(batch) > 0 { 779 fs.Update(protocol.LocalNodeID, batch) 780 } 781 782 batch = batch[:0] 783 fs.WithHave(protocol.LocalNodeID, func(f protocol.FileInfo) bool { 784 if !protocol.IsDeleted(f.Flags) { 785 if len(batch) == batchSize { 786 fs.Update(protocol.LocalNodeID, batch) 787 batch = batch[:0] 788 } 789 if _, err := os.Stat(filepath.Join(dir, f.Name)); err != nil && os.IsNotExist(err) { 790 // File has been deleted 791 f.Blocks = nil 792 f.Flags |= protocol.FlagDeleted 793 f.Version = lamport.Default.Tick(f.Version) 794 f.LocalVersion = 0 795 batch = append(batch, f) 796 } 797 } 798 return true 799 }) 800 if len(batch) > 0 { 801 fs.Update(protocol.LocalNodeID, batch) 802 } 803 804 m.setState(repo, RepoIdle) 805 return nil 806 } 807 808 // clusterConfig returns a ClusterConfigMessage that is correct for the given peer node 809 func (m *Model) clusterConfig(node protocol.NodeID) protocol.ClusterConfigMessage { 810 cm := protocol.ClusterConfigMessage{ 811 ClientName: m.clientName, 812 ClientVersion: m.clientVersion, 813 } 814 815 m.rmut.RLock() 816 for _, repo := range m.nodeRepos[node] { 817 cr := protocol.Repository{ 818 ID: repo, 819 } 820 for _, node := range m.repoNodes[repo] { 821 // TODO: Set read only bit when relevant 822 cr.Nodes = append(cr.Nodes, protocol.Node{ 823 ID: node[:], 824 Flags: protocol.FlagShareTrusted, 825 }) 826 } 827 cm.Repositories = append(cm.Repositories, cr) 828 } 829 m.rmut.RUnlock() 830 831 return cm 832 } 833 834 func (m *Model) setState(repo string, state repoState) { 835 m.smut.Lock() 836 oldState := m.repoState[repo] 837 changed, ok := m.repoStateChanged[repo] 838 if state != oldState { 839 m.repoState[repo] = state 840 m.repoStateChanged[repo] = time.Now() 841 eventData := map[string]interface{}{ 842 "repo": repo, 843 "to": state.String(), 844 } 845 if ok { 846 eventData["duration"] = time.Since(changed).Seconds() 847 eventData["from"] = oldState.String() 848 } 849 events.Default.Log(events.StateChanged, eventData) 850 } 851 m.smut.Unlock() 852 } 853 854 func (m *Model) State(repo string) (string, time.Time) { 855 m.smut.RLock() 856 state := m.repoState[repo] 857 changed := m.repoStateChanged[repo] 858 m.smut.RUnlock() 859 return state.String(), changed 860 } 861 862 func (m *Model) Override(repo string) { 863 m.rmut.RLock() 864 fs := m.repoFiles[repo] 865 m.rmut.RUnlock() 866 867 batch := make([]protocol.FileInfo, 0, indexBatchSize) 868 fs.WithNeed(protocol.LocalNodeID, func(need protocol.FileInfo) bool { 869 if len(batch) == indexBatchSize { 870 fs.Update(protocol.LocalNodeID, batch) 871 batch = batch[:0] 872 } 873 874 have := fs.Get(protocol.LocalNodeID, need.Name) 875 if have.Name != need.Name { 876 // We are missing the file 877 need.Flags |= protocol.FlagDeleted 878 need.Blocks = nil 879 } else { 880 // We have the file, replace with our version 881 need = have 882 } 883 need.Version = lamport.Default.Tick(need.Version) 884 need.LocalVersion = 0 885 batch = append(batch, need) 886 return true 887 }) 888 if len(batch) > 0 { 889 fs.Update(protocol.LocalNodeID, batch) 890 } 891 } 892 893 // Version returns the change version for the given repository. This is 894 // guaranteed to increment if the contents of the local or global repository 895 // has changed. 896 func (m *Model) LocalVersion(repo string) uint64 { 897 m.rmut.Lock() 898 defer m.rmut.Unlock() 899 900 fs, ok := m.repoFiles[repo] 901 if !ok { 902 return 0 903 } 904 905 ver := fs.LocalVersion(protocol.LocalNodeID) 906 for _, n := range m.repoNodes[repo] { 907 ver += fs.LocalVersion(n) 908 } 909 910 return ver 911 }