github.com/atlassian/git-lob@v0.0.0-20150806085256-2386a5ed291a/core/push.go (about)

     1  package core
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"os"
     7  
     8  	"github.com/atlassian/git-lob/providers"
     9  	"github.com/atlassian/git-lob/util"
    10  )
    11  
    12  // Some temporary storage used to pre-calculate the amount of data we'll need to upload
    13  type PushCommitContentDetails struct {
    14  	CommitSHA  string      // the commit's SHA
    15  	Deltas     []*LOBDelta // delta uploads, items in here won't be in Files
    16  	Files      []string    // list of files we'll need to upload, relative path, if not doing deltas
    17  	BaseDir    string      // the base dir of the above files
    18  	FileBytes  int64       // total bytes for all files in the list
    19  	DeltaBytes int64       // total bytes for all deltas in the list
    20  	Incomplete bool        // File list is not complete because of missing local data, we shouldn't mark this commit as pushed
    21  }
    22  
    23  func Push(provider providers.SyncProvider, remoteName string, refspecs []*GitRefSpec, dryRun, force, recheck bool,
    24  	callback util.ProgressCallback) error {
    25  
    26  	util.LogDebugf("Pushing to %v via %v\n", remoteName, provider.TypeID())
    27  	smartProvider := providers.UpgradeToSmartSyncProvider(provider)
    28  
    29  	// for use when --force used
    30  	shasAlreadyQueued := util.NewStringSet()
    31  
    32  	for i, refspec := range refspecs {
    33  		// We now perform a complete push per refspec before proceeding to the nex
    34  		// estimates & progress is measured within the refspec
    35  		// This is how we mark pushed anyway, more consistent than trying to do for all refspecs in 1
    36  		var refCommitsToPush []*PushCommitContentDetails
    37  		var anyIncomplete bool
    38  
    39  		if util.GlobalOptions.Verbose {
    40  			callback(&util.ProgressCallbackData{util.ProgressCalculate, fmt.Sprintf("Calculating data to push for %v", refspec),
    41  				int64(i), int64(len(refspecs)), 0, 0})
    42  		}
    43  
    44  		var refFileSize, refDeltaSize int64
    45  		var deltaSavings int64
    46  
    47  		// First we walk the commits to push & build up a picture of size etc
    48  		walkFunc := func(commit *CommitLOBRef) (quit bool, err error) {
    49  			var problemSHAs []string
    50  			var allfilenamesforcommit []string
    51  			var alldeltasforcommit []*LOBDelta
    52  			var commitFileSize int64
    53  			var commitDeltaSize int64
    54  			// Always use local LOB root since files are hardlinked there in shared case
    55  			basedir := GetLocalLOBRoot()
    56  			commitIncomplete := false
    57  			for _, filelob := range commit.FileLOBs {
    58  				var err error
    59  				filesMissing := false
    60  
    61  				// We can end up duplicating work (and uploads in --force mode) when a LOB is referred
    62  				// to multiple times in one push, so skip duplicates
    63  				// We still add the commit to the list, it just might not need anything done, but
    64  				// important to mark it as pushed anyway
    65  				if shasAlreadyQueued.Contains(filelob.SHA) {
    66  					continue
    67  				}
    68  
    69  				// check size integrity but don't recalculate sha
    70  				filenames, filesize, err := GetLOBFilesForSHA(filelob.SHA, basedir, true, false)
    71  				if err != nil {
    72  					if IsNotFoundError(err) {
    73  						filesMissing = true
    74  						problemSHAs = append(problemSHAs, filelob.SHA)
    75  					} else {
    76  						return true, err
    77  					}
    78  				}
    79  				// Pre-check if we can/should do a delta
    80  				var delta *LOBDelta
    81  				if !filesMissing && smartProvider != nil && filesize > util.GlobalOptions.PushDeltasAboveSize {
    82  					// This will return nil if not possible
    83  					delta = preparePushDelta(filelob.SHA, filelob.Filename, smartProvider, remoteName, force)
    84  				}
    85  
    86  				if delta != nil {
    87  					// We'll try this as a delta; if it fails later then we'll fall back on normal
    88  					alldeltasforcommit = append(alldeltasforcommit, delta)
    89  					commitDeltaSize += delta.DeltaSize + ApproximateMetadataSize
    90  					deltaSavings += filesize - (delta.DeltaSize + ApproximateMetadataSize)
    91  				} else {
    92  					allfilenamesforcommit = append(allfilenamesforcommit, filenames...)
    93  					commitFileSize += filesize
    94  				}
    95  				shasAlreadyQueued.Add(filelob.SHA)
    96  
    97  			}
    98  			if len(problemSHAs) > 0 {
    99  				// If we got here it means one or more sets of files for SHAs were not available or were bad locally
   100  				// We still want to push the rest though, we want to be tolerant of partial data
   101  
   102  				// This MAY be ok to still mark as pushed - the commits may have come from someone else,
   103  				// and may just be outside of our fetch range. If all the missing ones are already present
   104  				// on the remote then we're OK
   105  
   106  				// Check the remote for the presence of missing SHA data
   107  				remoteHasOurMissingSHAs := true
   108  				for _, sha := range problemSHAs {
   109  					remoteerr := CheckRemoteLOBFilesForSHA(sha, provider, remoteName)
   110  					if remoteerr != nil {
   111  						// Damn, missing
   112  						util.LogDebug(fmt.Sprintf("Commit %v locally missing %v, not on remote: %v", commit.Commit[:7], sha, remoteerr.Error()))
   113  						remoteHasOurMissingSHAs = false
   114  						break
   115  					}
   116  				}
   117  
   118  				if !remoteHasOurMissingSHAs {
   119  					// Genuinely incomplete data in this commit that isn't present on remote
   120  					// We can't mark this (or following) commits as pushed, but we still want to
   121  					// push everything we can
   122  					commitIncomplete = true
   123  					anyIncomplete = true
   124  					util.LogDebug(fmt.Sprintf("Some content for commit %v is missing & not on remote already", commit.Commit[:7]))
   125  					callback(&util.ProgressCallbackData{util.ProgressNotFound, fmt.Sprintf("data for commit %v", commit.Commit[:7]),
   126  						int64(i + 1), int64(len(refspecs)), 0, 0})
   127  				}
   128  				// If we DID manage to find the missing data on the remote though, we treat this as
   129  				// being able to push everything
   130  			}
   131  
   132  			refCommitsToPush = append(refCommitsToPush, &PushCommitContentDetails{
   133  				CommitSHA:  commit.Commit,
   134  				Files:      allfilenamesforcommit,
   135  				BaseDir:    basedir,
   136  				FileBytes:  commitFileSize,
   137  				DeltaBytes: commitDeltaSize,
   138  				Incomplete: commitIncomplete,
   139  				Deltas:     alldeltasforcommit,
   140  			})
   141  
   142  			refDeltaSize += commitDeltaSize
   143  			refFileSize += commitFileSize
   144  
   145  			return false, nil
   146  		}
   147  
   148  		err := WalkGitCommitLOBsToPushForRefSpec(remoteName, refspec, recheck, walkFunc)
   149  		// defer delete any delta files we created so we always clean up
   150  		for _, commit := range refCommitsToPush {
   151  			for _, delta := range commit.Deltas {
   152  				if delta.DeltaFilename != "" {
   153  					defer os.Remove(delta.DeltaFilename)
   154  				}
   155  			}
   156  		}
   157  		if err != nil {
   158  			return err
   159  		}
   160  
   161  		refCommitsSize := refFileSize + refDeltaSize
   162  
   163  		if len(refCommitsToPush) == 0 {
   164  			callback(&util.ProgressCallbackData{util.ProgressCalculate, fmt.Sprintf(" * %v: Nothing to push", refspec),
   165  				int64(i), int64(len(refspecs)), 0, 0})
   166  			// if nothing to push, then mark this ref as pushed to make querying faster next time
   167  			// Only for normal ref where we've checked for all ancestors to be pushed, not a manual range
   168  			if !dryRun && !refspec.IsRange() {
   169  				commitSHA, err := GitRefToFullSHA(refspec.Ref1)
   170  				if err != nil {
   171  					return err
   172  				}
   173  				err = MarkBinariesAsPushed(remoteName, commitSHA, "")
   174  				if err != nil {
   175  					return err
   176  				}
   177  
   178  			}
   179  
   180  		} else {
   181  			if refCommitsSize > 0 {
   182  				forcenotforcemsg := "if not already on remote"
   183  				if force {
   184  					forcenotforcemsg = "forced upload"
   185  				}
   186  				callback(&util.ProgressCallbackData{util.ProgressCalculate, fmt.Sprintf(" * %v: %d commits with %v to push (%v)",
   187  					refspec, len(refCommitsToPush), util.FormatSize(refCommitsSize), forcenotforcemsg), int64(i + 1), int64(len(refspecs)), 0, 0})
   188  				if deltaSavings > 0 {
   189  					callback(&util.ProgressCallbackData{util.ProgressCalculate, fmt.Sprintf("   Saving %v by using binary deltas",
   190  						util.FormatSize(deltaSavings)), int64(i + 1), int64(len(refspecs)), 0, 0})
   191  				}
   192  			} else {
   193  				callback(&util.ProgressCallbackData{util.ProgressCalculate, fmt.Sprintf(" * %v: Nothing to push, remote is up to date", refspec),
   194  					int64(i + 1), int64(len(refspecs)), 0, 0})
   195  			}
   196  		}
   197  		if util.GlobalOptions.Verbose {
   198  			callback(&util.ProgressCallbackData{util.ProgressCalculate, fmt.Sprintf("Finished calculating data to push for %v", refspec),
   199  				int64(i + 1), int64(len(refspecs)), 0, 0})
   200  		}
   201  
   202  		if !dryRun && len(refCommitsToPush) > 0 {
   203  			// Even if size == 0 we still skim through marking them as pushed (must have been that data was on remote)
   204  			if refCommitsSize > 0 {
   205  				callback(&util.ProgressCallbackData{util.ProgressCalculate,
   206  					fmt.Sprintf("Uploading up to %v to %v via %v", util.FormatSize(refCommitsSize), remoteName, provider.TypeID()),
   207  					0, 0, 0, 0})
   208  			}
   209  
   210  			var bytesDoneSoFar int64
   211  			previousCommitIncomplete := false
   212  			previousCommitSHA := ""
   213  			basedir := GetLocalLOBRoot()
   214  			for _, commit := range refCommitsToPush {
   215  
   216  				// Push this one
   217  				// Firstly, do any deltas (may be some deltas and some not in one commit)
   218  				if smartProvider != nil && len(commit.Deltas) > 0 {
   219  					// add any failed deltas back to the regular file-based upload for the next step
   220  					faileddeltas := pushCommitDeltas(commit, smartProvider, remoteName, force, bytesDoneSoFar, refCommitsSize, callback)
   221  					for _, delta := range faileddeltas {
   222  						// Add the files for failed deltas to the standard route
   223  						filenames, filesize, err := GetLOBFilesForSHA(delta.TargetSHA, basedir, true, false)
   224  						if err != nil {
   225  							// We already checked local files were there earlier so this is fatal
   226  							return fmt.Errorf("Error while trying to fall back from delta to standard push: %v", err)
   227  						}
   228  						commit.Files = append(commit.Files, filenames...)
   229  						// Just add the filesize on, don't subtract the delta size since we'll mark that as done
   230  						commit.FileBytes += filesize
   231  						refFileSize += filesize
   232  						refCommitsSize += filesize
   233  					}
   234  
   235  				}
   236  				bytesDoneSoFar += commit.DeltaBytes
   237  				// Then, do any regular file-based uploads (and also any delta fallbacks)
   238  				err := pushCommitStandard(commit, provider, remoteName, force, bytesDoneSoFar, refCommitsSize, callback)
   239  				if err != nil {
   240  					// stop at commit we can't push
   241  					return err
   242  				}
   243  				// in the case of a failed delta & fallback we would have uploaded more bytes but gloss over this
   244  				bytesDoneSoFar += commit.FileBytes
   245  
   246  				// Otherwise mark commit as pushed IF complete
   247  				if commit.Incomplete {
   248  					previousCommitIncomplete = true
   249  					// Any subsequent commits will also not be marked as pushed so we always go back to the incomplete commit
   250  					// until this is resolved. Our commits are in ancestor order.
   251  					// note that in the case of multiple refs is also means other following commits aren't marked as complete either
   252  					// this will result in longer than necessary calculations in subsequent pushes, but better to be safe.
   253  					// Sync provider will avoid any duplicate uploads anyway.
   254  				}
   255  				if !commit.Incomplete && !previousCommitIncomplete {
   256  					// replace the previous commit SHA we marked as pushed each time, IF it was the direct parent
   257  					// it's important not to just replace all because where there are merges even --topo-order will
   258  					// walk through multiple threads of development in parallel, the only constraint is that ancestors
   259  					// are always seen before descendants. Replacing a SHA in a parallel stream would give an incorrect
   260  					// result if the merge wasn't finished. Although the worst case is that the other stream would
   261  					// think it's not pushed, worth avoiding.
   262  					// If we end up adding extra SHAs in this case, they'll get tidied up in CleanupPushState at end
   263  					// avoids having to consolidate tons of commits later & means we generally store
   264  					// one pushed SHA per ref, before consolidation
   265  					replaceSHA := ""
   266  					if previousCommitSHA != "" {
   267  						isancestor, err := GitIsAncestor(previousCommitSHA, commit.CommitSHA)
   268  						if err != nil {
   269  							return err
   270  						}
   271  						if isancestor {
   272  							replaceSHA = previousCommitSHA
   273  						}
   274  					}
   275  					// This writes data to disk every time and that's fine, for robustness & interruptability
   276  					err = MarkBinariesAsPushed(remoteName, commit.CommitSHA, replaceSHA)
   277  					if err != nil {
   278  						// Stop at commit we can't mark, order is important
   279  						return err
   280  					}
   281  					previousCommitSHA = commit.CommitSHA
   282  				}
   283  			}
   284  			// now perform cleanup of the push state to ensure we simplify it
   285  			// do this per ref so that subsequent refs have a simpler git log call
   286  			CleanupPushState(remoteName)
   287  		}
   288  
   289  		if anyIncomplete {
   290  			util.LogDebugf("Partial push to %v for %v\n", remoteName, refspec)
   291  		} else {
   292  			util.LogDebugf("Successfully pushed to %v for %v\n", remoteName, refspec)
   293  		}
   294  	}
   295  	return nil
   296  
   297  }
   298  
   299  // Push deltas in a commit & report those which didn't make it
   300  func pushCommitDeltas(commit *PushCommitContentDetails, provider providers.SmartSyncProvider, remoteName string,
   301  	force bool, bytesDoneSoFar, refDeltaBytes int64, callback util.ProgressCallback) []*LOBDelta {
   302  
   303  	// First add up the sizes
   304  	var faileddeltas []*LOBDelta
   305  
   306  	for _, delta := range commit.Deltas {
   307  		// Push metadata for this individually
   308  		metacallback := func(fileInProgress string, progressType util.ProgressCallbackType, bytesDone, totalBytes int64) (abort bool) {
   309  			// Don't bother to track partial completion, only small
   310  			if progressType == util.ProgressSkip || progressType == util.ProgressNotFound {
   311  				return callback(&util.ProgressCallbackData{progressType, fileInProgress, totalBytes, totalBytes,
   312  					bytesDoneSoFar + ApproximateMetadataSize, refDeltaBytes})
   313  				// Remote did not have this file
   314  			} else {
   315  				return callback(&util.ProgressCallbackData{util.ProgressTransferBytes, fileInProgress, bytesDone, totalBytes,
   316  					bytesDoneSoFar + bytesDone, refDeltaBytes})
   317  			}
   318  			return false
   319  		}
   320  		metafile := GetLOBMetaRelativePath(delta.TargetSHA)
   321  		err := provider.Upload(remoteName, []string{metafile}, GetLocalLOBRoot(), force, metacallback)
   322  		if err != nil {
   323  			faileddeltas = append(faileddeltas, delta)
   324  			continue
   325  		}
   326  		bytesDoneSoFar += ApproximateMetadataSize
   327  		// Now upload delta
   328  		completionSeen := false
   329  		deltacallback := func(txt string, progressType util.ProgressCallbackType, bytesDone, totalBytes int64) (abort bool) {
   330  			if bytesDone == totalBytes {
   331  				completionSeen = true
   332  			}
   333  			return callback(&util.ProgressCallbackData{util.ProgressTransferBytes, getDeltaProgressDesc(delta), bytesDone, totalBytes,
   334  				bytesDoneSoFar + bytesDone, refDeltaBytes})
   335  		}
   336  		in, err := os.OpenFile(delta.DeltaFilename, os.O_RDONLY, 0644)
   337  		if err != nil {
   338  			faileddeltas = append(faileddeltas, delta)
   339  			continue
   340  		}
   341  		defer in.Close()
   342  		err = provider.UploadDelta(remoteName, delta.BaseSHA, delta.TargetSHA, in, delta.DeltaSize, deltacallback)
   343  		bytesDoneSoFar += delta.DeltaSize
   344  
   345  		if err != nil {
   346  			faileddeltas = append(faileddeltas, delta)
   347  			callback(&util.ProgressCallbackData{util.ProgressError, getDeltaProgressDesc(delta), delta.DeltaSize, delta.DeltaSize,
   348  				bytesDoneSoFar, refDeltaBytes})
   349  			continue
   350  		}
   351  		if !completionSeen {
   352  			// Do a final callback to make sure 100% is there
   353  			callback(&util.ProgressCallbackData{util.ProgressTransferBytes, getDeltaProgressDesc(delta), delta.DeltaSize, delta.DeltaSize,
   354  				bytesDoneSoFar, refDeltaBytes})
   355  		}
   356  	}
   357  	return faileddeltas
   358  }
   359  
   360  // Push a single commit using the standard approach
   361  func pushCommitStandard(commit *PushCommitContentDetails, provider providers.SyncProvider, remoteName string,
   362  	force bool, bytesDoneSoFar, refCommitsSize int64, callback util.ProgressCallback) error {
   363  	// Upload now
   364  	var lastFilename string
   365  	var lastFileBytes int64
   366  	localcallback := func(fileInProgress string, progressType util.ProgressCallbackType, bytesDone, totalBytes int64) (abort bool) {
   367  		if lastFilename != fileInProgress {
   368  			// New file, always callback
   369  			if lastFilename != "" {
   370  				// we obviously never got a 100% call for previous file
   371  				bytesDoneSoFar += lastFileBytes
   372  				callback(&util.ProgressCallbackData{util.ProgressTransferBytes, lastFilename, lastFileBytes, lastFileBytes,
   373  					bytesDoneSoFar, refCommitsSize})
   374  				lastFilename = ""
   375  			}
   376  			if progressType == util.ProgressSkip || progressType == util.ProgressNotFound {
   377  				// 'not found' will have caused an error earlier anyway so just pass through
   378  				bytesDoneSoFar += totalBytes
   379  				callback(&util.ProgressCallbackData{progressType, fileInProgress, totalBytes, totalBytes,
   380  					bytesDoneSoFar, refCommitsSize})
   381  			} else {
   382  				// Start new file
   383  				callback(&util.ProgressCallbackData{util.ProgressTransferBytes, fileInProgress, bytesDone, totalBytes,
   384  					bytesDoneSoFar + bytesDone, refCommitsSize})
   385  				lastFilename = fileInProgress
   386  				lastFileBytes = totalBytes
   387  			}
   388  		} else {
   389  			if bytesDone == totalBytes {
   390  				// finished
   391  				bytesDoneSoFar += totalBytes
   392  				callback(&util.ProgressCallbackData{util.ProgressTransferBytes, fileInProgress, bytesDone, totalBytes,
   393  					bytesDoneSoFar, refCommitsSize})
   394  				lastFilename = ""
   395  			} else {
   396  				// Otherwise this is a progress callback
   397  				return callback(&util.ProgressCallbackData{util.ProgressTransferBytes, fileInProgress, bytesDone, totalBytes,
   398  					bytesDoneSoFar + bytesDone, refCommitsSize})
   399  			}
   400  		}
   401  		return false
   402  	}
   403  	// It IS possible to have a commit here with no files to upload. E.g. missing data locally (see above)
   404  	// which was present on remote. We still include it in the commit list for completeness
   405  	if len(commit.Files) > 0 {
   406  		err := provider.Upload(remoteName, commit.Files, commit.BaseDir, force, localcallback)
   407  		if err != nil {
   408  			return err
   409  		}
   410  	}
   411  	if lastFilename != "" {
   412  		// We obviously never got a 100% progress update from the last file
   413  		bytesDoneSoFar += lastFileBytes
   414  		callback(&util.ProgressCallbackData{util.ProgressTransferBytes, lastFilename, lastFileBytes, lastFileBytes,
   415  			bytesDoneSoFar, refCommitsSize})
   416  		lastFilename = ""
   417  	}
   418  	return nil
   419  
   420  }
   421  
   422  func preparePushDelta(lobsha, filename string, provider providers.SmartSyncProvider, remoteName string, force bool) *LOBDelta {
   423  	// Don't bother to try to generate a delta if lob is already on remote & not force; will be skipped in regular upload
   424  	if !force {
   425  		exists, _ := provider.LOBExists(remoteName, lobsha)
   426  		if exists {
   427  			return nil
   428  		}
   429  	}
   430  
   431  	othershas, err := GetGitAllLOBHistoryForFile(filename, lobsha)
   432  	if err != nil {
   433  		util.LogErrorf("Unable to prepare delta for %v(%v): %v\n", lobsha, filename, err.Error())
   434  		return nil
   435  	}
   436  	// This is all the possible base shas, but we can only use ones we have locally too
   437  	// Right now we're not trying to cope with ordered downloads where we might have newer ones part way through fetch (too fiddly)
   438  	var localbaseshas []string
   439  	for _, sha := range othershas {
   440  		if !IsLOBMissing(sha, false) {
   441  			localbaseshas = append(localbaseshas, sha)
   442  		}
   443  	}
   444  	if len(localbaseshas) == 0 {
   445  		// no base shas, cannot do this
   446  		return nil
   447  	}
   448  	// Now ask the server to pick a sha
   449  	chosenbasesha, err := provider.GetFirstCompleteLOBFromList(remoteName, localbaseshas)
   450  	if err != nil {
   451  		util.LogErrorf("Unable to get common base for delta %v(%v): %v\n", lobsha, filename, err.Error())
   452  		return nil
   453  	}
   454  	// No common base to use
   455  	if chosenbasesha == "" {
   456  		return nil
   457  	}
   458  	// Now we calculate the delta locally
   459  	tempf, err := ioutil.TempFile("", "uploaddelta")
   460  	if err != nil {
   461  		util.LogErrorf("Unable to open temp file for delta %v(%v): %v\n", lobsha, filename, err.Error())
   462  		return nil
   463  	}
   464  	defer tempf.Close()
   465  	tempfilename := tempf.Name()
   466  	sz, err := GenerateLOBDelta(chosenbasesha, lobsha, tempf)
   467  	if err != nil {
   468  		util.LogErrorf("Error calculating delta %v(%v): %v\n", lobsha, filename, err.Error())
   469  		tempf.Close() // have to close before remove & defer is in wrong order
   470  		os.Remove(tempfilename)
   471  		return nil
   472  	}
   473  	return &LOBDelta{
   474  		BaseSHA:       chosenbasesha,
   475  		TargetSHA:     lobsha,
   476  		DeltaSize:     sz,
   477  		DeltaFilename: tempfilename,
   478  	}
   479  }
   480  
   481  // Push a single LOB to a remote
   482  func PushSingle(sha string, provider providers.SyncProvider, remoteName string, force bool,
   483  	callback util.ProgressCallback) error {
   484  	basedir := GetLocalLOBRoot()
   485  	filenames, totalSize, err := GetLOBFilesForSHA(basedir, sha, true, false)
   486  	if err != nil {
   487  		return err
   488  	}
   489  
   490  	var lastFilename string
   491  	var lastFileBytes int64
   492  	var bytesFromFilesDoneSoFar int64
   493  	localcallback := func(fileInProgress string, progressType util.ProgressCallbackType, bytesDone, totalBytes int64) (abort bool) {
   494  		if lastFilename != fileInProgress {
   495  			// New file, always callback
   496  			if lastFilename != "" {
   497  				// we obviously never got a 100% call for previous file
   498  				bytesFromFilesDoneSoFar += lastFileBytes
   499  				callback(&util.ProgressCallbackData{util.ProgressTransferBytes, lastFilename, lastFileBytes, lastFileBytes,
   500  					bytesFromFilesDoneSoFar, totalSize})
   501  				lastFilename = ""
   502  			}
   503  			if progressType == util.ProgressSkip || progressType == util.ProgressNotFound {
   504  				// 'not found' will have caused an error earlier anyway so just pass through
   505  				bytesFromFilesDoneSoFar += totalBytes
   506  				callback(&util.ProgressCallbackData{progressType, fileInProgress, totalBytes, totalBytes,
   507  					bytesFromFilesDoneSoFar, totalSize})
   508  			} else {
   509  				// Start new file
   510  				callback(&util.ProgressCallbackData{util.ProgressTransferBytes, fileInProgress, bytesDone, totalBytes,
   511  					bytesFromFilesDoneSoFar + bytesDone, totalSize})
   512  				lastFilename = fileInProgress
   513  				lastFileBytes = totalBytes
   514  			}
   515  		} else {
   516  			if bytesDone == totalBytes {
   517  				// finished
   518  				bytesFromFilesDoneSoFar += totalBytes
   519  				callback(&util.ProgressCallbackData{util.ProgressTransferBytes, fileInProgress, bytesDone, totalBytes,
   520  					bytesFromFilesDoneSoFar, totalSize})
   521  				lastFilename = ""
   522  			} else {
   523  				// Otherwise this is a progress callback
   524  				return callback(&util.ProgressCallbackData{util.ProgressTransferBytes, fileInProgress, bytesDone, totalBytes,
   525  					bytesFromFilesDoneSoFar + bytesDone, totalSize})
   526  			}
   527  		}
   528  		return false
   529  	}
   530  
   531  	return provider.Upload(remoteName, filenames, basedir, force, localcallback)
   532  }