github.com/keybase/client/go@v0.0.0-20240520164431-4f512a4c85a3/kbfs/simplefs/archive.go (about) 1 // Copyright 2024 Keybase, Inc. All rights reserved. Use of 2 // this source code is governed by the included BSD license. 3 4 package simplefs 5 6 import ( 7 "archive/zip" 8 "bytes" 9 "compress/gzip" 10 "crypto/sha256" 11 "encoding/hex" 12 "encoding/json" 13 "fmt" 14 "hash" 15 "io" 16 "io/fs" 17 "os" 18 "path" 19 "path/filepath" 20 "sort" 21 "sync" 22 "time" 23 24 "golang.org/x/time/rate" 25 26 "github.com/keybase/client/go/libkb" 27 "github.com/keybase/client/go/protocol/keybase1" 28 "github.com/pkg/errors" 29 "golang.org/x/net/context" 30 "gopkg.in/src-d/go-billy.v4" 31 ) 32 33 func loadArchiveStateFromJsonGz(ctx context.Context, simpleFS *SimpleFS, filePath string) (state *keybase1.SimpleFSArchiveState, err error) { 34 f, err := os.Open(filePath) 35 if err != nil { 36 simpleFS.log.CErrorf(ctx, "loadArchiveStateFromJsonGz: opening state file error: %v", err) 37 return nil, err 38 } 39 defer f.Close() 40 gzReader, err := gzip.NewReader(f) 41 if err != nil { 42 simpleFS.log.CErrorf(ctx, "loadArchiveStateFromJsonGz: creating gzip reader error: %v", err) 43 return nil, err 44 } 45 decoder := json.NewDecoder(gzReader) 46 err = decoder.Decode(&state) 47 if err != nil { 48 simpleFS.log.CErrorf(ctx, "loadArchiveStateFromJsonGz: decoding state file error: %v", err) 49 return nil, err 50 } 51 return state, nil 52 } 53 54 func writeArchiveStateIntoJsonGz(ctx context.Context, simpleFS *SimpleFS, filePath string, s *keybase1.SimpleFSArchiveState) error { 55 err := os.MkdirAll(filepath.Dir(filePath), 0755) 56 if err != nil { 57 simpleFS.log.CErrorf(ctx, "writeArchiveStateIntoJsonGz: os.MkdirAll error: %v", err) 58 return err 59 } 60 f, err := os.Create(filePath) 61 if err != nil { 62 simpleFS.log.CErrorf(ctx, "writeArchiveStateIntoJsonGz: creating state file error: %v", err) 63 return err 64 } 65 defer f.Close() 66 67 gzWriter := gzip.NewWriter(f) 68 defer gzWriter.Close() 69 70 encoder := json.NewEncoder(gzWriter) 71 err = encoder.Encode(s) 72 if err != nil { 73 simpleFS.log.CErrorf(ctx, "writeArchiveStateIntoJsonGz: encoding state file error: %v", err) 74 return err 75 } 76 77 return nil 78 } 79 80 type errorState struct { 81 err error 82 nextRetry time.Time 83 } 84 85 type archiveManager struct { 86 simpleFS *SimpleFS 87 username libkb.NormalizedUsername 88 89 // Just use a regular mutex rather than a rw one so all writes to 90 // persistent storage are synchronized. 91 mu sync.Mutex 92 state *keybase1.SimpleFSArchiveState 93 jobCtxCancellers map[string]func() 94 // jobID -> errorState. Populated when an error has happened. It's only 95 // valid for these phases: 96 // 97 // keybase1.SimpleFSArchiveJobPhase_Indexing 98 // keybase1.SimpleFSArchiveJobPhase_Copying 99 // keybase1.SimpleFSArchiveJobPhase_Zipping 100 // 101 // When nextRetry is current errorRetryWorker delete the errorState from 102 // this map, while also putting them back to the previous phase so the 103 // worker can pick it up. 104 errors map[string]errorState 105 106 indexingWorkerSignal chan struct{} 107 copyingWorkerSignal chan struct{} 108 zippingWorkerSignal chan struct{} 109 notifyUIStateChangeSignal chan struct{} 110 111 ctxCancel func() 112 } 113 114 func (m *archiveManager) getStateFilePath(simpleFS *SimpleFS) string { 115 cacheDir := simpleFS.getCacheDir() 116 return filepath.Join(cacheDir, fmt.Sprintf("kbfs-archive-%s.json.gz", m.username)) 117 } 118 119 func (m *archiveManager) flushStateFileLocked(ctx context.Context) error { 120 select { 121 case <-ctx.Done(): 122 return ctx.Err() 123 default: 124 } 125 err := writeArchiveStateIntoJsonGz(ctx, m.simpleFS, m.getStateFilePath(m.simpleFS), m.state) 126 if err != nil { 127 m.simpleFS.log.CErrorf(ctx, 128 "archiveManager.flushStateFileLocked: writing state file error: %v", err) 129 return err 130 } 131 return nil 132 } 133 134 func (m *archiveManager) flushStateFile(ctx context.Context) error { 135 m.mu.Lock() 136 defer m.mu.Unlock() 137 return m.flushStateFileLocked(ctx) 138 } 139 140 func (m *archiveManager) signal(ch chan struct{}) { 141 select { 142 case ch <- struct{}{}: 143 default: 144 // There's already a signal in the chan. Skipping this. 145 } 146 } 147 148 func (m *archiveManager) shutdown(ctx context.Context) { 149 // OK to cancel before flushStateFileLocked because we'll pass in the 150 // shutdown ctx there. 151 if m.ctxCancel != nil { 152 m.ctxCancel() 153 } 154 155 m.mu.Lock() 156 defer m.mu.Unlock() 157 err := m.flushStateFileLocked(ctx) 158 if err != nil { 159 m.simpleFS.log.CWarningf(ctx, "m.flushStateFileLocked error: %v", err) 160 } 161 } 162 163 func (m *archiveManager) notifyUIStateChange(ctx context.Context) { 164 m.simpleFS.log.CDebugf(ctx, "+ archiveManager.notifyUIStateChange") 165 defer m.simpleFS.log.CDebugf(ctx, "- archiveManager.notifyUIStateChange") 166 m.mu.Lock() 167 defer m.mu.Unlock() 168 state, errorStates := m.getCurrentStateLocked(ctx) 169 m.simpleFS.notifyUIArchiveStateChange(ctx, state, errorStates) 170 } 171 172 func (m *archiveManager) startJob(ctx context.Context, job keybase1.SimpleFSArchiveJobDesc) error { 173 m.simpleFS.log.CDebugf(ctx, "+ archiveManager.startJob %#+v", job) 174 defer m.simpleFS.log.CDebugf(ctx, "- archiveManager.startJob") 175 176 m.mu.Lock() 177 defer m.mu.Unlock() 178 if _, ok := m.state.Jobs[job.JobID]; ok { 179 return errors.New("job ID already exists") 180 } 181 m.state.Jobs[job.JobID] = keybase1.SimpleFSArchiveJobState{ 182 Desc: job, 183 Phase: keybase1.SimpleFSArchiveJobPhase_Queued, 184 } 185 m.state.LastUpdated = keybase1.ToTime(time.Now()) 186 m.signal(m.notifyUIStateChangeSignal) 187 m.signal(m.indexingWorkerSignal) 188 return m.flushStateFileLocked(ctx) 189 } 190 191 func (m *archiveManager) cancelOrDismissJob(ctx context.Context, 192 jobID string) (err error) { 193 m.simpleFS.log.CDebugf(ctx, "+ archiveManager.cancelOrDismissJob") 194 defer m.simpleFS.log.CDebugf(ctx, "- archiveManager.cancelOrDismissJob %s", jobID) 195 m.mu.Lock() 196 defer m.mu.Unlock() 197 198 if cancel, ok := m.jobCtxCancellers[jobID]; ok { 199 cancel() 200 delete(m.jobCtxCancellers, jobID) 201 } 202 203 job, ok := m.state.Jobs[jobID] 204 if !ok { 205 return errors.New("job not found") 206 } 207 delete(m.state.Jobs, jobID) 208 209 err = os.RemoveAll(job.Desc.StagingPath) 210 if err != nil { 211 m.simpleFS.log.CWarningf(ctx, "removing staging path %q for job %s error: %v", 212 job.Desc.StagingPath, jobID, err) 213 } 214 215 m.signal(m.notifyUIStateChangeSignal) 216 return nil 217 } 218 219 func (m *archiveManager) getCurrentStateLocked(ctx context.Context) ( 220 state keybase1.SimpleFSArchiveState, errorStates map[string]errorState) { 221 errorStates = make(map[string]errorState) 222 for jobID, errState := range m.errors { 223 errorStates[jobID] = errState 224 } 225 return m.state.DeepCopy(), errorStates 226 } 227 228 func (m *archiveManager) getCurrentState(ctx context.Context) ( 229 state keybase1.SimpleFSArchiveState, errorStates map[string]errorState) { 230 m.simpleFS.log.CDebugf(ctx, "+ archiveManager.getCurrentState") 231 defer m.simpleFS.log.CDebugf(ctx, "- archiveManager.getCurrentState") 232 m.mu.Lock() 233 defer m.mu.Unlock() 234 return m.getCurrentStateLocked(ctx) 235 } 236 237 func (m *archiveManager) checkArchive( 238 ctx context.Context, archiveZipFilePath string) ( 239 desc keybase1.SimpleFSArchiveJobDesc, pathsWithIssues map[string]string, 240 err error) { 241 m.simpleFS.log.CDebugf(ctx, "+ archiveManager.checkArchive %q", archiveZipFilePath) 242 defer m.simpleFS.log.CDebugf(ctx, "- archiveManager.checkArchive %q", archiveZipFilePath) 243 244 reader, err := zip.OpenReader(archiveZipFilePath) 245 if err != nil { 246 return keybase1.SimpleFSArchiveJobDesc{}, nil, 247 fmt.Errorf("zip.OpenReader(%s) error: %v", archiveZipFilePath, err) 248 } 249 defer reader.Close() 250 251 var receipt Receipt 252 { 253 receiptFile, err := reader.Open("receipt.json") 254 if err != nil { 255 return keybase1.SimpleFSArchiveJobDesc{}, nil, 256 fmt.Errorf("reader.Open(receipt.json) error: %v", err) 257 } 258 defer receiptFile.Close() 259 err = json.NewDecoder(receiptFile).Decode(&receipt) 260 if err != nil { 261 return keybase1.SimpleFSArchiveJobDesc{}, nil, 262 fmt.Errorf("json Decode on receipt.json error: %v", err) 263 } 264 } 265 266 pathsWithIssues = make(map[string]string) 267 268 loopManifest: 269 for itemPath, item := range receipt.Manifest { 270 f, err := reader.Open(path.Join(receipt.Desc.TargetName, itemPath)) 271 if err != nil { 272 errDesc := fmt.Sprintf("opening %q error: %v", itemPath, err) 273 m.simpleFS.log.CWarningf(ctx, errDesc) 274 pathsWithIssues[itemPath] = errDesc 275 continue loopManifest 276 } 277 278 { // Check DirentType 279 fstat, err := f.Stat() 280 if err != nil { 281 errDesc := fmt.Sprintf("f.Stat %q error: %v", itemPath, err) 282 m.simpleFS.log.CWarningf(ctx, errDesc) 283 pathsWithIssues[itemPath] = errDesc 284 continue loopManifest 285 } 286 switch item.DirentType { 287 case keybase1.DirentType_DIR: 288 if !fstat.IsDir() { 289 errDesc := fmt.Sprintf( 290 "%q is a dir in manifest but not a dir in archive", itemPath) 291 m.simpleFS.log.CWarningf(ctx, errDesc) 292 pathsWithIssues[itemPath] = errDesc 293 continue loopManifest 294 } 295 continue loopManifest 296 case keybase1.DirentType_FILE: 297 if fstat.IsDir() || fstat.Mode()&os.ModeSymlink != 0 || fstat.Mode()&0111 != 0 { 298 errDesc := fmt.Sprintf( 299 "%q is a normal file with no exec bit in manifest but not in archive (mode=%v)", itemPath, fstat.Mode()) 300 m.simpleFS.log.CWarningf(ctx, errDesc) 301 pathsWithIssues[itemPath] = errDesc 302 continue loopManifest 303 } 304 case keybase1.DirentType_SYM: 305 if fstat.IsDir() || fstat.Mode()&os.ModeSymlink == 0 { 306 errDesc := fmt.Sprintf( 307 "%q is a symlink in manifest but not in archive (mode=%v)", itemPath, fstat.Mode()) 308 m.simpleFS.log.CWarningf(ctx, errDesc) 309 pathsWithIssues[itemPath] = errDesc 310 continue loopManifest 311 } 312 continue loopManifest 313 case keybase1.DirentType_EXEC: 314 if fstat.IsDir() || fstat.Mode()&os.ModeSymlink != 0 || fstat.Mode()&0111 == 0 { 315 errDesc := fmt.Sprintf( 316 "%q is a normal file with exec bit in manifest but not in archive (mode=%v)", itemPath, fstat.Mode()) 317 m.simpleFS.log.CWarningf(ctx, errDesc) 318 pathsWithIssues[itemPath] = errDesc 319 continue loopManifest 320 } 321 } 322 } 323 324 { // Check hash 325 h := sha256.New() 326 _, err = io.Copy(h, f) 327 if err != nil { 328 errDesc := fmt.Sprintf("hashing %q error: %v", itemPath, err) 329 m.simpleFS.log.CWarningf(ctx, errDesc) 330 pathsWithIssues[itemPath] = errDesc 331 return keybase1.SimpleFSArchiveJobDesc{}, nil, 332 fmt.Errorf("hashing %q error: %v", itemPath, err) 333 } 334 if hex.EncodeToString(h.Sum(nil)) != item.Sha256SumHex { 335 errDesc := fmt.Sprintf("hash doesn't match %q", itemPath) 336 m.simpleFS.log.CWarningf(ctx, errDesc) 337 pathsWithIssues[itemPath] = errDesc 338 continue loopManifest 339 } 340 } 341 } 342 return receipt.Desc, pathsWithIssues, nil 343 } 344 345 func (m *archiveManager) changeJobPhaseLocked(ctx context.Context, 346 jobID string, newPhase keybase1.SimpleFSArchiveJobPhase) { 347 copy, ok := m.state.Jobs[jobID] 348 if !ok { 349 m.simpleFS.log.CWarningf(ctx, "job %s not found. it might have been canceled", jobID) 350 return 351 } 352 copy.Phase = newPhase 353 m.state.Jobs[jobID] = copy 354 m.signal(m.notifyUIStateChangeSignal) 355 } 356 func (m *archiveManager) changeJobPhase(ctx context.Context, 357 jobID string, newPhase keybase1.SimpleFSArchiveJobPhase) { 358 m.mu.Lock() 359 defer m.mu.Unlock() 360 m.changeJobPhaseLocked(ctx, jobID, newPhase) 361 } 362 363 func (m *archiveManager) startWorkerTask(ctx context.Context, 364 eligiblePhase keybase1.SimpleFSArchiveJobPhase, 365 newPhase keybase1.SimpleFSArchiveJobPhase) (jobID string, jobCtx context.Context, ok bool) { 366 jobCtx, cancel := context.WithCancel(ctx) 367 m.mu.Lock() 368 defer m.mu.Unlock() 369 for jobID := range m.state.Jobs { 370 if m.state.Jobs[jobID].Phase == eligiblePhase { 371 m.changeJobPhaseLocked(ctx, jobID, newPhase) 372 m.jobCtxCancellers[jobID] = cancel 373 return jobID, jobCtx, true 374 } 375 } 376 return "", nil, false 377 } 378 379 const archiveErrorRetryDuration = time.Minute 380 381 func (m *archiveManager) setJobError( 382 ctx context.Context, jobID string, err error) { 383 m.mu.Lock() 384 defer m.mu.Unlock() 385 nextRetry := time.Now().Add(archiveErrorRetryDuration) 386 m.simpleFS.log.CErrorf(ctx, "job %s nextRetry: %s", jobID, nextRetry) 387 m.errors[jobID] = errorState{ 388 err: err, 389 nextRetry: nextRetry, 390 } 391 } 392 393 func (m *archiveManager) doIndexing(ctx context.Context, jobID string) (err error) { 394 m.simpleFS.log.CDebugf(ctx, "+ doIndexing %s", jobID) 395 defer func() { m.simpleFS.log.CDebugf(ctx, "- doIndexing %s err: %v", jobID, err) }() 396 397 jobDesc := func() keybase1.SimpleFSArchiveJobDesc { 398 m.mu.Lock() 399 defer m.mu.Unlock() 400 return m.state.Jobs[jobID].Desc 401 }() 402 opid, err := m.simpleFS.SimpleFSMakeOpid(ctx) 403 if err != nil { 404 return err 405 } 406 defer m.simpleFS.SimpleFSClose(ctx, opid) 407 filter := keybase1.ListFilter_NO_FILTER 408 err = m.simpleFS.SimpleFSListRecursive(ctx, keybase1.SimpleFSListRecursiveArg{ 409 OpID: opid, 410 Path: keybase1.NewPathWithKbfsArchived(jobDesc.KbfsPathWithRevision), 411 Filter: filter, 412 }) 413 err = m.simpleFS.SimpleFSWait(ctx, opid) 414 if err != nil { 415 return err 416 } 417 418 listResult, err := m.simpleFS.SimpleFSReadList(ctx, opid) 419 if err != nil { 420 return err 421 } 422 423 var bytesTotal int64 424 manifest := make(map[string]keybase1.SimpleFSArchiveFile) 425 for _, e := range listResult.Entries { 426 manifest[e.Name] = keybase1.SimpleFSArchiveFile{ 427 State: keybase1.SimpleFSFileArchiveState_ToDo, 428 DirentType: e.DirentType, 429 } 430 if e.DirentType == keybase1.DirentType_FILE || 431 e.DirentType == keybase1.DirentType_EXEC { 432 bytesTotal += int64(e.Size) 433 } 434 } 435 436 func() { 437 m.mu.Lock() 438 defer m.mu.Unlock() 439 440 jobCopy, ok := m.state.Jobs[jobID] 441 if !ok { 442 m.simpleFS.log.CWarningf(ctx, "job %s not found. it might have been canceled", jobID) 443 return 444 } 445 jobCopy.Manifest = manifest 446 jobCopy.BytesTotal = bytesTotal 447 m.state.Jobs[jobID] = jobCopy 448 m.signal(m.notifyUIStateChangeSignal) 449 }() 450 return nil 451 } 452 453 func (m *archiveManager) indexingWorker(ctx context.Context) { 454 for { 455 select { 456 case <-ctx.Done(): 457 return 458 case <-m.indexingWorkerSignal: 459 } 460 461 jobID, jobCtx, ok := m.startWorkerTask(ctx, 462 keybase1.SimpleFSArchiveJobPhase_Queued, 463 keybase1.SimpleFSArchiveJobPhase_Indexing) 464 465 if !ok { 466 continue 467 } 468 // We got a task. Put another token into the signal channel so we 469 // check again on the next iteration. 470 m.signal(m.indexingWorkerSignal) 471 472 m.simpleFS.log.CDebugf(ctx, "indexing: %s", jobID) 473 474 err := m.doIndexing(jobCtx, jobID) 475 if err == nil { 476 m.simpleFS.log.CDebugf(jobCtx, "indexing done on job %s", jobID) 477 m.changeJobPhase(jobCtx, jobID, keybase1.SimpleFSArchiveJobPhase_Indexed) 478 m.signal(m.copyingWorkerSignal) // Done indexing! Notify the copying worker. 479 } else { 480 m.simpleFS.log.CErrorf(jobCtx, "indexing error on job %s: %v", jobID, err) 481 m.setJobError(ctx, jobID, err) 482 } 483 484 err = m.flushStateFile(ctx) 485 if err != nil { 486 m.simpleFS.log.CWarningf(ctx, "m.flushStateFileLocked error: %v", err) 487 } 488 } 489 } 490 491 type sha256TeeReader struct { 492 inner io.Reader 493 innerTeeReader io.Reader 494 h hash.Hash 495 } 496 497 var _ io.Reader = (*sha256TeeReader)(nil) 498 499 // Read implements the io.Reader interface. 500 func (r *sha256TeeReader) Read(p []byte) (n int, err error) { 501 return r.innerTeeReader.Read(p) 502 } 503 504 func (r *sha256TeeReader) getSum() []byte { 505 return r.h.Sum(nil) 506 } 507 508 func newSHA256TeeReader(inner io.Reader) (r *sha256TeeReader) { 509 r = &sha256TeeReader{ 510 inner: inner, 511 h: sha256.New(), 512 } 513 r.innerTeeReader = io.TeeReader(r.inner, r.h) 514 return r 515 } 516 517 type bytesUpdaterFunc = func(delta int64) 518 519 func ctxAwareCopy( 520 ctx context.Context, to io.Writer, from io.Reader, 521 bytesUpdater bytesUpdaterFunc) error { 522 for { 523 select { 524 case <-ctx.Done(): 525 return ctx.Err() 526 default: 527 } 528 n, err := io.CopyN(to, from, 64*1024) 529 switch err { 530 case nil: 531 bytesUpdater(n) 532 case io.EOF: 533 bytesUpdater(n) 534 return nil 535 default: 536 return err 537 } 538 } 539 } 540 541 func (m *archiveManager) copyFileFromBeginning(ctx context.Context, 542 srcDirFS billy.Filesystem, entryPathWithinJob string, 543 localPath string, mode os.FileMode, 544 bytesCopiedUpdater bytesUpdaterFunc) (sha256Sum []byte, err error) { 545 m.simpleFS.log.CDebugf(ctx, "+ copyFileFromBeginning %s", entryPathWithinJob) 546 defer func() { m.simpleFS.log.CDebugf(ctx, "- copyFileFromBeginning %s err: %v", entryPathWithinJob, err) }() 547 548 src, err := srcDirFS.Open(entryPathWithinJob) 549 if err != nil { 550 return nil, fmt.Errorf("srcDirFS.Open(%s) error: %v", entryPathWithinJob, err) 551 } 552 defer src.Close() 553 554 dst, err := os.OpenFile(localPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) 555 if err != nil { 556 return nil, fmt.Errorf("os.OpenFile(%s) error: %v", localPath, err) 557 } 558 defer dst.Close() 559 560 teeReader := newSHA256TeeReader(src) 561 562 err = ctxAwareCopy(ctx, dst, teeReader, bytesCopiedUpdater) 563 if err != nil { 564 return nil, fmt.Errorf("[%s] io.CopyN error: %v", entryPathWithinJob, err) 565 } 566 567 // We didn't continue from a previously interrupted copy, so don't 568 // bother verifying the sha256sum and just return it. 569 return teeReader.getSum(), nil 570 } 571 572 func (m *archiveManager) copyFilePickupPrevious(ctx context.Context, 573 srcDirFS billy.Filesystem, entryPathWithinJob string, 574 localPath string, srcSeekOffset int64, mode os.FileMode, 575 bytesCopiedUpdater bytesUpdaterFunc) (sha256Sum []byte, err error) { 576 m.simpleFS.log.CDebugf(ctx, "+ copyFilePickupPrevious %s", entryPathWithinJob) 577 defer func() { m.simpleFS.log.CDebugf(ctx, "- copyFilePickupPrevious %s err: %v", entryPathWithinJob, err) }() 578 579 src, err := srcDirFS.Open(entryPathWithinJob) 580 if err != nil { 581 return nil, fmt.Errorf("srcDirFS.Open(%s) error: %v", entryPathWithinJob, err) 582 } 583 defer src.Close() 584 585 _, err = src.Seek(srcSeekOffset, io.SeekStart) 586 if err != nil { 587 return nil, fmt.Errorf("[%s] src.Seek error: %v", entryPathWithinJob, err) 588 } 589 590 // Copy the file. 591 if err = func() error { 592 dst, err := os.OpenFile(localPath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, mode) 593 if err != nil { 594 return fmt.Errorf("os.OpenFile(%s) error: %v", localPath, err) 595 } 596 defer dst.Close() 597 598 err = ctxAwareCopy(ctx, dst, src, bytesCopiedUpdater) 599 if err != nil { 600 return fmt.Errorf("[%s] io.CopyN error: %v", entryPathWithinJob, err) 601 } 602 603 return nil 604 }(); err != nil { 605 return nil, err 606 } 607 608 var size int64 609 // Calculate sha256 and check the sha256 of the copied file since we 610 // continued from a previously interrupted copy. 611 srcSHA256Sum, dstSHA256Sum, err := func() (srcSHA256Sum, dstSHA256Sum []byte, err error) { 612 _, err = src.Seek(0, io.SeekStart) 613 if err != nil { 614 return nil, nil, fmt.Errorf("[%s] src.Seek error: %v", entryPathWithinJob, err) 615 } 616 srcSHA256SumHasher := sha256.New() 617 size, err = io.Copy(srcSHA256SumHasher, src) 618 if err != nil { 619 return nil, nil, fmt.Errorf("[%s] io.Copy error: %v", entryPathWithinJob, err) 620 } 621 srcSHA256Sum = srcSHA256SumHasher.Sum(nil) 622 623 dst, err := os.Open(localPath) 624 if err != nil { 625 return nil, nil, fmt.Errorf("os.Open(%s) error: %v", localPath, err) 626 } 627 defer dst.Close() 628 dstSHA256SumHasher := sha256.New() 629 _, err = io.Copy(dstSHA256SumHasher, dst) 630 if err != nil { 631 return nil, nil, fmt.Errorf("[%s] io.Copy error: %v", entryPathWithinJob, err) 632 } 633 dstSHA256Sum = dstSHA256SumHasher.Sum(nil) 634 635 return srcSHA256Sum, dstSHA256Sum, nil 636 }() 637 if err != nil { 638 return nil, err 639 } 640 641 if !bytes.Equal(srcSHA256Sum, dstSHA256Sum) { 642 m.simpleFS.log.CInfof(ctx, 643 "file corruption is detected from a previous copy. Will copy from the beginning: ", 644 entryPathWithinJob) 645 bytesCopiedUpdater(-size) 646 return m.copyFileFromBeginning(ctx, srcDirFS, entryPathWithinJob, localPath, mode, bytesCopiedUpdater) 647 } 648 649 return srcSHA256Sum, nil 650 } 651 652 func (m *archiveManager) copyFile(ctx context.Context, 653 srcDirFS billy.Filesystem, entryPathWithinJob string, 654 localPath string, srcSeekOffset int64, mode os.FileMode, 655 bytesCopiedUpdater bytesUpdaterFunc) (sha256Sum []byte, err error) { 656 if srcSeekOffset == 0 { 657 return m.copyFileFromBeginning(ctx, srcDirFS, entryPathWithinJob, localPath, mode, bytesCopiedUpdater) 658 } 659 return m.copyFilePickupPrevious(ctx, srcDirFS, entryPathWithinJob, localPath, srcSeekOffset, mode, bytesCopiedUpdater) 660 } 661 662 func getWorkspaceDir(jobDesc keybase1.SimpleFSArchiveJobDesc) string { 663 return filepath.Join(jobDesc.StagingPath, "workspace") 664 } 665 666 func (m *archiveManager) doCopying(ctx context.Context, jobID string) (err error) { 667 m.simpleFS.log.CDebugf(ctx, "+ doCopying %s", jobID) 668 defer func() { m.simpleFS.log.CDebugf(ctx, "- doCopying %s err: %v", jobID, err) }() 669 670 desc, manifest := func() (keybase1.SimpleFSArchiveJobDesc, map[string]keybase1.SimpleFSArchiveFile) { 671 m.mu.Lock() 672 defer m.mu.Unlock() 673 manifest := make(map[string]keybase1.SimpleFSArchiveFile) 674 for k, v := range m.state.Jobs[jobID].Manifest { 675 manifest[k] = v.DeepCopy() 676 } 677 return m.state.Jobs[jobID].Desc, manifest 678 }() 679 680 updateManifest := func(manifest map[string]keybase1.SimpleFSArchiveFile) { 681 m.mu.Lock() 682 defer m.mu.Unlock() 683 // Can override directly since only one worker can work on a give job at a time. 684 job := m.state.Jobs[jobID] 685 for k, v := range manifest { 686 job.Manifest[k] = v.DeepCopy() 687 } 688 m.state.Jobs[jobID] = job 689 m.signal(m.notifyUIStateChangeSignal) 690 } 691 692 updateBytesCopied := func(delta int64) { 693 m.mu.Lock() 694 defer m.mu.Unlock() 695 // Can override directly since only one worker can work on a give job at a time. 696 job := m.state.Jobs[jobID] 697 job.BytesCopied += delta 698 m.state.Jobs[jobID] = job 699 m.signal(m.notifyUIStateChangeSignal) 700 } 701 702 srcContainingDirFS, finalElem, err := m.simpleFS.getFSIfExists(ctx, 703 keybase1.NewPathWithKbfsArchived(desc.KbfsPathWithRevision)) 704 if err != nil { 705 return fmt.Errorf("getFSIfExists error: %v", err) 706 } 707 srcDirFS, err := srcContainingDirFS.Chroot(finalElem) 708 if err != nil { 709 return fmt.Errorf("srcContainingDirFS.Chroot error: %v", err) 710 } 711 dstBase := filepath.Join(getWorkspaceDir(desc), desc.TargetName) 712 713 err = os.MkdirAll(dstBase, 0755) 714 if err != nil { 715 return fmt.Errorf("os.MkdirAll(%s) error: %v", dstBase, err) 716 } 717 718 entryPaths := make([]string, 0, len(manifest)) 719 for entryPathWithinJob := range manifest { 720 entryPaths = append(entryPaths, entryPathWithinJob) 721 } 722 sort.Strings(entryPaths) 723 724 loopEntryPaths: 725 for _, entryPathWithinJob := range entryPaths { 726 entry := manifest[entryPathWithinJob] 727 entry.State = keybase1.SimpleFSFileArchiveState_InProgress 728 manifest[entryPathWithinJob] = entry 729 updateManifest(manifest) 730 731 localPath := filepath.Join(dstBase, entryPathWithinJob) 732 srcFI, err := srcDirFS.Lstat(entryPathWithinJob) 733 if err != nil { 734 return fmt.Errorf("srcDirFS.LStat(%s) error: %v", entryPathWithinJob, err) 735 } 736 switch { 737 case srcFI.IsDir(): 738 err = os.MkdirAll(localPath, 0755) 739 if err != nil { 740 return fmt.Errorf("os.MkdirAll(%s) error: %v", localPath, err) 741 } 742 err = os.Chtimes(localPath, time.Time{}, srcFI.ModTime()) 743 if err != nil { 744 return fmt.Errorf("os.Chtimes(%s) error: %v", localPath, err) 745 } 746 entry.State = keybase1.SimpleFSFileArchiveState_Complete 747 manifest[entryPathWithinJob] = entry 748 case srcFI.Mode()&os.ModeSymlink != 0: // symlink 749 err = os.MkdirAll(filepath.Dir(localPath), 0755) 750 if err != nil { 751 return fmt.Errorf("os.MkdirAll(filepath.Dir(%s)) error: %v", localPath, err) 752 } 753 // Call Stat, which follows symlinks, to make sure the link doesn't 754 // escape outside the srcDirFS. 755 _, err = srcDirFS.Stat(entryPathWithinJob) 756 if err != nil { 757 m.simpleFS.log.CWarningf(ctx, "skipping %s due to srcDirFS.Stat error: %v", entryPathWithinJob, err) 758 entry.State = keybase1.SimpleFSFileArchiveState_Skipped 759 manifest[entryPathWithinJob] = entry 760 continue loopEntryPaths 761 } 762 763 link, err := srcDirFS.Readlink(entryPathWithinJob) 764 if err != nil { 765 return fmt.Errorf("srcDirFS(%s) error: %v", entryPathWithinJob, err) 766 } 767 m.simpleFS.log.CInfof(ctx, "calling os.Symlink(%s, %s) ", link, localPath) 768 err = os.Symlink(link, localPath) 769 if err != nil { 770 return fmt.Errorf("os.Symlink(%s, %s) error: %v", link, localPath, err) 771 } 772 // Skipping Chtimes becasue there doesn't seem to be a way to 773 // change time on symlinks. 774 entry.State = keybase1.SimpleFSFileArchiveState_Complete 775 manifest[entryPathWithinJob] = entry 776 default: 777 err = os.MkdirAll(filepath.Dir(localPath), 0755) 778 if err != nil { 779 return fmt.Errorf("os.MkdirAll(filepath.Dir(%s)) error: %v", localPath, err) 780 } 781 782 var mode os.FileMode = 0644 783 if srcFI.Mode()&0100 != 0 { 784 mode = 0755 785 } 786 787 seek := int64(0) 788 789 dstFI, err := os.Lstat(localPath) 790 switch { 791 case os.IsNotExist(err): // simple copy from the start of file 792 case err == nil: // continue from a previously interrupted copy 793 if srcFI.Mode()&os.ModeSymlink == 0 { 794 seek = dstFI.Size() 795 } 796 // otherwise copy from the start of file 797 default: 798 return fmt.Errorf("os.Lstat(%s) error: %v", localPath, err) 799 } 800 801 sha256Sum, err := m.copyFile(ctx, 802 srcDirFS, entryPathWithinJob, localPath, seek, mode, updateBytesCopied) 803 if err != nil { 804 return err 805 } 806 807 err = os.Chtimes(localPath, time.Time{}, srcFI.ModTime()) 808 if err != nil { 809 return fmt.Errorf("os.Chtimes(%s) error: %v", localPath, err) 810 } 811 812 entry.Sha256SumHex = hex.EncodeToString(sha256Sum) 813 entry.State = keybase1.SimpleFSFileArchiveState_Complete 814 manifest[entryPathWithinJob] = entry 815 } 816 updateManifest(manifest) 817 } 818 819 return nil 820 } 821 822 func (m *archiveManager) copyingWorker(ctx context.Context) { 823 for { 824 select { 825 case <-ctx.Done(): 826 return 827 case <-m.copyingWorkerSignal: 828 } 829 830 jobID, jobCtx, ok := m.startWorkerTask(ctx, 831 keybase1.SimpleFSArchiveJobPhase_Indexed, 832 keybase1.SimpleFSArchiveJobPhase_Copying) 833 834 if !ok { 835 continue 836 } 837 // We got a task. Put another token into the signal channel so we 838 // check again on the next iteration. 839 m.signal(m.copyingWorkerSignal) 840 841 m.simpleFS.log.CDebugf(ctx, "copying: %s", jobID) 842 843 err := m.doCopying(jobCtx, jobID) 844 if err == nil { 845 m.simpleFS.log.CDebugf(jobCtx, "copying done on job %s", jobID) 846 m.changeJobPhase(jobCtx, jobID, keybase1.SimpleFSArchiveJobPhase_Copied) 847 m.signal(m.zippingWorkerSignal) // Done copying! Notify the zipping worker. 848 } else { 849 m.simpleFS.log.CErrorf(jobCtx, "copying error on job %s: %v", jobID, err) 850 m.setJobError(ctx, jobID, err) 851 } 852 853 err = m.flushStateFile(ctx) 854 if err != nil { 855 m.simpleFS.log.CWarningf(ctx, "m.flushStateFileLocked error: %v", err) 856 } 857 } 858 } 859 860 // zipWriterAddDir is adapted from zip.Writer.AddFS in go1.22.0 source because 1) we're 861 // not on a version with this function yet, and 2) Go's AddFS doesn't support 862 // symlinks; 3) we need bytesZippedUpdater here and we need to use CopyN for it. 863 func zipWriterAddDir(ctx context.Context, 864 w *zip.Writer, dirPath string, bytesZippedUpdater bytesUpdaterFunc) error { 865 fsys := os.DirFS(dirPath) 866 return fs.WalkDir(fsys, ".", func(name string, d fs.DirEntry, err error) error { 867 if err != nil { 868 return err 869 } 870 info, err := d.Info() 871 if err != nil { 872 return err 873 } 874 if !d.IsDir() && !(info.Mode() &^ fs.ModeSymlink).IsRegular() { 875 return errors.New("zip: cannot add non-regular file except symlink") 876 } 877 h, err := zip.FileInfoHeader(info) 878 if err != nil { 879 return err 880 } 881 h.Name = name 882 h.Method = zip.Deflate 883 fw, err := w.CreateHeader(h) 884 if err != nil { 885 return err 886 } 887 switch { 888 case d.IsDir(): 889 return nil 890 case info.Mode()&fs.ModeSymlink != 0: 891 target, err := os.Readlink(filepath.Join(dirPath, name)) 892 if err != nil { 893 return err 894 } 895 _, err = fw.Write([]byte(filepath.ToSlash(target))) 896 if err != nil { 897 return err 898 } 899 return nil 900 default: 901 f, err := fsys.Open(name) 902 if err != nil { 903 return err 904 } 905 defer f.Close() 906 return ctxAwareCopy(ctx, fw, f, bytesZippedUpdater) 907 } 908 }) 909 } 910 911 // Receipt is serialized into receipt.json in the archive. 912 type Receipt struct { 913 Desc keybase1.SimpleFSArchiveJobDesc 914 Manifest map[string]keybase1.SimpleFSArchiveFile 915 } 916 917 func (m *archiveManager) doZipping(ctx context.Context, jobID string) (err error) { 918 m.simpleFS.log.CDebugf(ctx, "+ doZipping %s", jobID) 919 defer func() { m.simpleFS.log.CDebugf(ctx, "- doZipping %s err: %v", jobID, err) }() 920 921 jobDesc, receiptBytes, err := func() (keybase1.SimpleFSArchiveJobDesc, []byte, error) { 922 m.mu.Lock() 923 defer m.mu.Unlock() 924 receiptBytes, err := json.MarshalIndent(Receipt{ 925 Desc: m.state.Jobs[jobID].Desc, 926 Manifest: m.state.Jobs[jobID].Manifest, 927 }, "", " ") 928 return m.state.Jobs[jobID].Desc, receiptBytes, err 929 }() 930 if err != nil { 931 return fmt.Errorf( 932 "getting jobDesc and receiptBytes for %s error: %v", jobID, err) 933 } 934 935 // Reset BytesZipped. 936 func() { 937 m.mu.Lock() 938 defer m.mu.Unlock() 939 // Can override directly since only one worker can work on a give job at a time. 940 job := m.state.Jobs[jobID] 941 job.BytesZipped = 0 942 m.state.Jobs[jobID] = job 943 m.signal(m.notifyUIStateChangeSignal) 944 }() 945 946 updateBytesZipped := func(delta int64) { 947 m.mu.Lock() 948 defer m.mu.Unlock() 949 // Can override directly since only one worker can work on a give job at a time. 950 job := m.state.Jobs[jobID] 951 job.BytesZipped += delta 952 m.state.Jobs[jobID] = job 953 m.signal(m.notifyUIStateChangeSignal) 954 } 955 956 workspaceDir := getWorkspaceDir(jobDesc) 957 958 err = os.MkdirAll(filepath.Dir(jobDesc.ZipFilePath), 0755) 959 if err != nil { 960 m.simpleFS.log.CErrorf(ctx, "os.MkdirAll error: %v", err) 961 return err 962 } 963 964 err = func() (err error) { 965 flag := os.O_WRONLY | os.O_CREATE | os.O_EXCL 966 if jobDesc.OverwriteZip { 967 flag = os.O_WRONLY | os.O_CREATE | os.O_TRUNC 968 } 969 zipFile, err := os.OpenFile(jobDesc.ZipFilePath, flag, 0666) 970 if err != nil { 971 return fmt.Errorf("os.Create(%s) error: %v", jobDesc.ZipFilePath, err) 972 } 973 defer func() { 974 closeErr := zipFile.Close() 975 if err == nil { 976 err = closeErr 977 } 978 if closeErr != nil { 979 m.simpleFS.log.CWarningf(ctx, "zipFile.Close %s error %v", jobDesc.ZipFilePath, err) 980 } 981 // Call Quarantine even if close failed just in case. 982 qerr := Quarantine(ctx, jobDesc.ZipFilePath) 983 if err == nil { 984 err = qerr 985 } 986 if qerr != nil { 987 m.simpleFS.log.CWarningf(ctx, "Quarantine %s error %v", jobDesc.ZipFilePath, err) 988 } 989 }() 990 991 zipWriter := zip.NewWriter(zipFile) 992 defer func() { 993 closeErr := zipWriter.Close() 994 if err == nil { 995 err = closeErr 996 } 997 if closeErr != nil { 998 m.simpleFS.log.CWarningf(ctx, "zipWriter.Close %s error %v", jobDesc.ZipFilePath, err) 999 } 1000 }() 1001 1002 err = zipWriterAddDir(ctx, zipWriter, workspaceDir, updateBytesZipped) 1003 if err != nil { 1004 return fmt.Errorf("zipWriterAddDir into %s error: %v", jobDesc.ZipFilePath, err) 1005 } 1006 1007 { // write the manifest and desc down 1008 header := &zip.FileHeader{ 1009 Name: "receipt.json", 1010 Method: zip.Deflate, 1011 } 1012 header.SetModTime(time.Now()) 1013 w, err := zipWriter.CreateHeader(header) 1014 if err != nil { 1015 return fmt.Errorf("zipWriter.Create(receipt.json) into %s error: %v", jobDesc.ZipFilePath, err) 1016 } 1017 _, err = w.Write(receiptBytes) 1018 if err != nil { 1019 return fmt.Errorf("w.Write(receiptBytes) into %s error: %v", jobDesc.ZipFilePath, err) 1020 } 1021 } 1022 1023 return nil 1024 }() 1025 if err != nil { 1026 return err 1027 } 1028 1029 // Remove the workspace so we release the storage space early on before 1030 // user dismisses the job. 1031 err = os.RemoveAll(workspaceDir) 1032 if err != nil { 1033 m.simpleFS.log.CWarningf(ctx, "removing workspace %s error %v", workspaceDir, err) 1034 } 1035 1036 return nil 1037 } 1038 1039 func (m *archiveManager) zippingWorker(ctx context.Context) { 1040 for { 1041 select { 1042 case <-ctx.Done(): 1043 return 1044 case <-m.zippingWorkerSignal: 1045 } 1046 1047 jobID, jobCtx, ok := m.startWorkerTask(ctx, 1048 keybase1.SimpleFSArchiveJobPhase_Copied, 1049 keybase1.SimpleFSArchiveJobPhase_Zipping) 1050 1051 if !ok { 1052 continue 1053 } 1054 // We got a task. Put another token into the signal channel so we 1055 // check again on the next iteration. 1056 m.signal(m.zippingWorkerSignal) 1057 1058 m.simpleFS.log.CDebugf(ctx, "zipping: %s", jobID) 1059 1060 err := m.doZipping(jobCtx, jobID) 1061 if err == nil { 1062 m.simpleFS.log.CDebugf(jobCtx, "zipping done on job %s", jobID) 1063 m.changeJobPhase(jobCtx, jobID, keybase1.SimpleFSArchiveJobPhase_Done) 1064 } else { 1065 m.simpleFS.log.CErrorf(jobCtx, "zipping error on job %s: %v", jobID, err) 1066 m.setJobError(ctx, jobID, err) 1067 } 1068 1069 err = m.flushStateFile(ctx) 1070 if err != nil { 1071 m.simpleFS.log.CWarningf(ctx, "m.flushStateFileLocked error: %v", err) 1072 } 1073 } 1074 } 1075 1076 func (m *archiveManager) resetInterruptedPhaseLocked(ctx context.Context, jobID string) (changed bool) { 1077 switch m.state.Jobs[jobID].Phase { 1078 case keybase1.SimpleFSArchiveJobPhase_Indexing: 1079 m.simpleFS.log.CDebugf(ctx, "resetting %s phase from %s to %s", jobID, 1080 keybase1.SimpleFSArchiveJobPhase_Indexing, 1081 keybase1.SimpleFSArchiveJobPhase_Queued) 1082 m.changeJobPhaseLocked(ctx, jobID, 1083 keybase1.SimpleFSArchiveJobPhase_Queued) 1084 return true 1085 case keybase1.SimpleFSArchiveJobPhase_Copying: 1086 m.simpleFS.log.CDebugf(ctx, "resetting %s phase from %s to %s", jobID, 1087 keybase1.SimpleFSArchiveJobPhase_Copying, 1088 keybase1.SimpleFSArchiveJobPhase_Indexed) 1089 m.changeJobPhaseLocked(ctx, jobID, 1090 keybase1.SimpleFSArchiveJobPhase_Indexed) 1091 return true 1092 case keybase1.SimpleFSArchiveJobPhase_Zipping: 1093 m.simpleFS.log.CDebugf(ctx, "resetting %s phase from %s to %s", jobID, 1094 keybase1.SimpleFSArchiveJobPhase_Zipping, 1095 keybase1.SimpleFSArchiveJobPhase_Copied) 1096 m.changeJobPhaseLocked(ctx, jobID, 1097 keybase1.SimpleFSArchiveJobPhase_Copied) 1098 return true 1099 default: 1100 m.simpleFS.log.CDebugf(ctx, "not resetting %s phase from %s", jobID, 1101 m.state.Jobs[jobID].Phase) 1102 return false 1103 } 1104 } 1105 1106 func (m *archiveManager) errorRetryWorker(ctx context.Context) { 1107 ticker := time.NewTicker(time.Second * 5) 1108 for { 1109 select { 1110 case <-ctx.Done(): 1111 return 1112 case <-ticker.C: 1113 } 1114 1115 func() { 1116 m.mu.Lock() 1117 defer m.mu.Unlock() 1118 jobIDs := make([]string, len(m.state.Jobs)) 1119 for jobID := range m.state.Jobs { 1120 jobIDs = append(jobIDs, jobID) 1121 } 1122 loopJobIDs: 1123 for _, jobID := range jobIDs { 1124 errState, ok := m.errors[jobID] 1125 if !ok { 1126 continue loopJobIDs 1127 } 1128 if time.Now().Before(errState.nextRetry) { 1129 continue loopJobIDs 1130 } 1131 m.simpleFS.log.CDebugf(ctx, "retrying job %s", jobID) 1132 changed := m.resetInterruptedPhaseLocked(ctx, jobID) 1133 if !changed { 1134 m.simpleFS.log.CWarningf(ctx, 1135 "job %s has an error state %v but an unexpected job phase", 1136 jobID, errState.err) 1137 continue loopJobIDs 1138 } 1139 delete(m.errors, jobID) 1140 1141 m.signal(m.indexingWorkerSignal) 1142 m.signal(m.copyingWorkerSignal) 1143 m.signal(m.zippingWorkerSignal) 1144 } 1145 }() 1146 } 1147 } 1148 1149 func (m *archiveManager) notifyUIStateChangeWorker(ctx context.Context) { 1150 limiter := rate.NewLimiter(rate.Every(time.Second/2), 1) 1151 for { 1152 select { 1153 case <-ctx.Done(): 1154 return 1155 case <-m.notifyUIStateChangeSignal: 1156 } 1157 limiter.Wait(ctx) 1158 1159 m.notifyUIStateChange(ctx) 1160 } 1161 } 1162 1163 func (m *archiveManager) start() { 1164 ctx := context.Background() 1165 ctx, m.ctxCancel = context.WithCancel(ctx) 1166 go m.indexingWorker(m.simpleFS.makeContext(ctx)) 1167 go m.copyingWorker(m.simpleFS.makeContext(ctx)) 1168 go m.zippingWorker(m.simpleFS.makeContext(ctx)) 1169 go m.errorRetryWorker(m.simpleFS.makeContext(ctx)) 1170 go m.notifyUIStateChangeWorker(m.simpleFS.makeContext(ctx)) 1171 m.signal(m.indexingWorkerSignal) 1172 m.signal(m.copyingWorkerSignal) 1173 m.signal(m.zippingWorkerSignal) 1174 } 1175 1176 func (m *archiveManager) resetInterruptedPhasesLocked(ctx context.Context) { 1177 // We don't resume indexing and zipping work, so just reset them here. 1178 // Copying is resumable but we have per file state tracking so reset the 1179 // phase here as well. 1180 for jobID := range m.state.Jobs { 1181 _ = m.resetInterruptedPhaseLocked(ctx, jobID) 1182 } 1183 } 1184 1185 func newArchiveManager(simpleFS *SimpleFS, username libkb.NormalizedUsername) ( 1186 m *archiveManager, err error) { 1187 ctx := context.Background() 1188 simpleFS.log.CDebugf(ctx, "+ newArchiveManager") 1189 defer simpleFS.log.CDebugf(ctx, "- newArchiveManager") 1190 m = &archiveManager{ 1191 simpleFS: simpleFS, 1192 username: username, 1193 jobCtxCancellers: make(map[string]func()), 1194 errors: make(map[string]errorState), 1195 indexingWorkerSignal: make(chan struct{}, 1), 1196 copyingWorkerSignal: make(chan struct{}, 1), 1197 zippingWorkerSignal: make(chan struct{}, 1), 1198 notifyUIStateChangeSignal: make(chan struct{}, 1), 1199 } 1200 stateFilePath := m.getStateFilePath(simpleFS) 1201 simpleFS.log.CDebugf(ctx, "stateFilePath: %q", stateFilePath) 1202 m.state, err = loadArchiveStateFromJsonGz(ctx, simpleFS, stateFilePath) 1203 switch err { 1204 case nil: 1205 if m.state.Jobs == nil { 1206 m.state.Jobs = make(map[string]keybase1.SimpleFSArchiveJobState) 1207 } 1208 m.resetInterruptedPhasesLocked(ctx) 1209 default: 1210 simpleFS.log.CErrorf(ctx, "loadArchiveStateFromJsonGz error ( %v ). Creating a new state.", err) 1211 m.state = &keybase1.SimpleFSArchiveState{ 1212 Jobs: make(map[string]keybase1.SimpleFSArchiveJobState), 1213 } 1214 err = writeArchiveStateIntoJsonGz(ctx, simpleFS, stateFilePath, m.state) 1215 if err != nil { 1216 simpleFS.log.CErrorf(ctx, "newArchiveManager: creating state file error: %v", err) 1217 return nil, err 1218 } 1219 } 1220 m.start() 1221 return m, nil 1222 } 1223 1224 func (m *archiveManager) getStagingPath(ctx context.Context, jobID string) (stagingPath string) { 1225 cacheDir := m.simpleFS.getCacheDir() 1226 return filepath.Join(cacheDir, fmt.Sprintf("kbfs-archive-%s-%s", m.username, jobID)) 1227 }