github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/md_util.go (about)

     1  // Copyright 2016 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"fmt"
     9  	"sort"
    10  	"time"
    11  
    12  	"github.com/keybase/client/go/kbfs/data"
    13  	"github.com/keybase/client/go/kbfs/idutil"
    14  	"github.com/keybase/client/go/kbfs/kbfscodec"
    15  	"github.com/keybase/client/go/kbfs/kbfscrypto"
    16  	"github.com/keybase/client/go/kbfs/kbfsmd"
    17  	"github.com/keybase/client/go/kbfs/libkey"
    18  	"github.com/keybase/client/go/kbfs/tlf"
    19  	"github.com/keybase/client/go/kbfs/tlfhandle"
    20  	kbname "github.com/keybase/client/go/kbun"
    21  	"github.com/keybase/client/go/libkb"
    22  	"github.com/keybase/client/go/logger"
    23  	"github.com/keybase/client/go/protocol/keybase1"
    24  	"github.com/pkg/errors"
    25  	"golang.org/x/net/context"
    26  )
    27  
    28  type mdRange struct {
    29  	start kbfsmd.Revision
    30  	end   kbfsmd.Revision
    31  }
    32  
    33  func makeRekeyReadErrorHelper(
    34  	err error, kmd libkey.KeyMetadata, resolvedHandle *tlfhandle.Handle,
    35  	uid keybase1.UID, username kbname.NormalizedUsername) error {
    36  	if resolvedHandle.Type() == tlf.Public {
    37  		panic("makeRekeyReadError called on public folder")
    38  	}
    39  	// If the user is not a legitimate reader of the folder, this is a
    40  	// normal read access error.
    41  	if !resolvedHandle.IsReader(uid) {
    42  		return tlfhandle.NewReadAccessError(
    43  			resolvedHandle, username, resolvedHandle.GetCanonicalPath())
    44  	}
    45  
    46  	// Otherwise, this folder needs to be rekeyed for this device.
    47  	tlfName := resolvedHandle.GetCanonicalName()
    48  	hasKeys, hasKeyErr := kmd.HasKeyForUser(uid)
    49  	if (hasKeyErr == nil) && hasKeys {
    50  		return NeedSelfRekeyError{tlfName, err}
    51  	}
    52  	return NeedOtherRekeyError{tlfName, err}
    53  }
    54  
    55  func makeRekeyReadError(
    56  	ctx context.Context, err error, kbpki KBPKI,
    57  	syncGetter syncedTlfGetterSetter, kmd libkey.KeyMetadata,
    58  	uid keybase1.UID, username kbname.NormalizedUsername) error {
    59  	h := kmd.GetTlfHandle()
    60  	resolvedHandle, resolveErr := h.ResolveAgain(ctx, kbpki, nil, syncGetter)
    61  	if resolveErr != nil {
    62  		// Ignore error and pretend h is already fully
    63  		// resolved.
    64  		resolvedHandle = h
    65  	}
    66  	return makeRekeyReadErrorHelper(err, kmd, resolvedHandle, uid, username)
    67  }
    68  
    69  // Helper which returns nil if the md block is uninitialized or readable by
    70  // the current user. Otherwise an appropriate read access error is returned.
    71  func isReadableOrError(
    72  	ctx context.Context, kbpki KBPKI, syncGetter syncedTlfGetterSetter,
    73  	md ReadOnlyRootMetadata) error {
    74  	if !md.IsInitialized() || md.IsReadable() {
    75  		return nil
    76  	}
    77  	// this should only be the case if we're a new device not yet
    78  	// added to the set of reader/writer keys.
    79  	session, err := kbpki.GetCurrentSession(ctx)
    80  	if err != nil {
    81  		return err
    82  	}
    83  	err = errors.Errorf("%s is not readable by %s (uid:%s)", md.TlfID(),
    84  		session.Name, session.UID)
    85  	return makeRekeyReadError(
    86  		ctx, err, kbpki, syncGetter, md, session.UID, session.Name)
    87  }
    88  
    89  func getMDRange(ctx context.Context, config Config, id tlf.ID, bid kbfsmd.BranchID,
    90  	start kbfsmd.Revision, end kbfsmd.Revision, mStatus kbfsmd.MergeStatus,
    91  	lockBeforeGet *keybase1.LockID) (rmds []ImmutableRootMetadata, err error) {
    92  	// The range is invalid.  Don't treat as an error though; it just
    93  	// indicates that we don't yet know about any revisions.
    94  	if start < kbfsmd.RevisionInitial || end < kbfsmd.RevisionInitial {
    95  		return nil, nil
    96  	}
    97  
    98  	mdcache := config.MDCache()
    99  	var toDownload []mdRange
   100  
   101  	// Fetch one at a time, and figure out what ranges to fetch as you
   102  	// go.
   103  	minSlot := int(end-start) + 1
   104  	maxSlot := -1
   105  	for i := start; i <= end; i++ {
   106  		irmd, err := mdcache.Get(id, i, bid)
   107  		if err != nil {
   108  			if len(toDownload) == 0 ||
   109  				toDownload[len(toDownload)-1].end != i-1 {
   110  				toDownload = append(toDownload, mdRange{i, i})
   111  			}
   112  			toDownload[len(toDownload)-1].end = i
   113  			irmd = ImmutableRootMetadata{}
   114  		} else {
   115  			slot := len(rmds)
   116  			if slot < minSlot {
   117  				minSlot = slot
   118  			}
   119  			if slot > maxSlot {
   120  				maxSlot = slot
   121  			}
   122  		}
   123  		rmds = append(rmds, irmd)
   124  	}
   125  
   126  	// Try to fetch the rest from the server.  TODO: parallelize me.
   127  	for _, r := range toDownload {
   128  		var fetchedRmds []ImmutableRootMetadata
   129  		switch mStatus {
   130  		case kbfsmd.Merged:
   131  			fetchedRmds, err = config.MDOps().GetRange(
   132  				ctx, id, r.start, r.end, lockBeforeGet)
   133  		case kbfsmd.Unmerged:
   134  			fetchedRmds, err = config.MDOps().GetUnmergedRange(
   135  				ctx, id, bid, r.start, r.end)
   136  		default:
   137  			panic(fmt.Sprintf("Unknown merged type: %s", mStatus))
   138  		}
   139  		if err != nil {
   140  			return nil, err
   141  		}
   142  
   143  		for _, rmd := range fetchedRmds {
   144  			slot := int(rmd.Revision() - start)
   145  			if slot < minSlot {
   146  				minSlot = slot
   147  			}
   148  			if slot > maxSlot {
   149  				maxSlot = slot
   150  			}
   151  
   152  			rmds[slot] = rmd
   153  
   154  			// We don't cache the MD here, since it's already done in
   155  			// `MDOpsStandard` for MDs that come from a remote server.
   156  			// MDs that come from the local journal don't get cached
   157  			// as part of a get, to avoid races as in KBFS-2224.
   158  		}
   159  	}
   160  
   161  	if minSlot > maxSlot {
   162  		return nil, nil
   163  	}
   164  
   165  	rmds = rmds[minSlot : maxSlot+1]
   166  	// check to make sure there are no holes
   167  	for i, rmd := range rmds {
   168  		if rmd == (ImmutableRootMetadata{}) {
   169  			return nil, fmt.Errorf("No %s MD found for revision %d",
   170  				mStatus, int(start)+minSlot+i)
   171  		}
   172  	}
   173  
   174  	return rmds, nil
   175  }
   176  
   177  // GetSingleMD returns an MD that is required to exist.
   178  func GetSingleMD(
   179  	ctx context.Context, config Config, id tlf.ID, bid kbfsmd.BranchID,
   180  	rev kbfsmd.Revision, mStatus kbfsmd.MergeStatus,
   181  	lockBeforeGet *keybase1.LockID) (ImmutableRootMetadata, error) {
   182  	rmds, err := getMDRange(
   183  		ctx, config, id, bid, rev, rev, mStatus, lockBeforeGet)
   184  	if err != nil {
   185  		return ImmutableRootMetadata{}, err
   186  	}
   187  
   188  	if len(rmds) != 1 {
   189  		return ImmutableRootMetadata{},
   190  			fmt.Errorf("Single expected revision %d not found", rev)
   191  	}
   192  	return rmds[0], nil
   193  }
   194  
   195  // MakeCopyWithDecryptedPrivateData makes a copy of the given IRMD,
   196  // decrypting it with the given IRMD with keys.
   197  func MakeCopyWithDecryptedPrivateData(
   198  	ctx context.Context, config Config,
   199  	irmdToDecrypt, irmdWithKeys ImmutableRootMetadata, uid keybase1.UID) (
   200  	rmdDecrypted ImmutableRootMetadata, err error) {
   201  	pmd, err := decryptMDPrivateData(
   202  		ctx, config.Codec(), config.Crypto(),
   203  		config.BlockCache(), config.BlockOps(),
   204  		config.KeyManager(), config.KBPKI(), config, config.Mode(), uid,
   205  		irmdToDecrypt.GetSerializedPrivateMetadata(),
   206  		irmdToDecrypt, irmdWithKeys, config.MakeLogger(""))
   207  	if err != nil {
   208  		return ImmutableRootMetadata{}, err
   209  	}
   210  
   211  	rmdCopy, err := irmdToDecrypt.deepCopy(config.Codec())
   212  	if err != nil {
   213  		return ImmutableRootMetadata{}, err
   214  	}
   215  	rmdCopy.data = pmd
   216  	return MakeImmutableRootMetadata(rmdCopy,
   217  		irmdToDecrypt.LastModifyingWriterVerifyingKey(),
   218  		irmdToDecrypt.MdID(),
   219  		irmdToDecrypt.LocalTimestamp(),
   220  		irmdToDecrypt.putToServer), nil
   221  }
   222  
   223  func getMergedMDUpdatesWithEnd(ctx context.Context, config Config, id tlf.ID,
   224  	startRev kbfsmd.Revision, endRev kbfsmd.Revision,
   225  	lockBeforeGet *keybase1.LockID) (
   226  	mergedRmds []ImmutableRootMetadata, err error) {
   227  	// We don't yet know about any revisions yet, so there's no range
   228  	// to get.
   229  	if startRev < kbfsmd.RevisionInitial {
   230  		return nil, nil
   231  	}
   232  
   233  	start := startRev
   234  	for {
   235  		end := start + maxMDsAtATime - 1 // range is inclusive
   236  		if endRev != kbfsmd.RevisionUninitialized && end > endRev {
   237  			end = endRev
   238  		}
   239  		if end < start {
   240  			break
   241  		}
   242  		rmds, err := getMDRange(ctx, config, id, kbfsmd.NullBranchID,
   243  			start, end, kbfsmd.Merged, lockBeforeGet)
   244  		if err != nil {
   245  			return nil, err
   246  		}
   247  
   248  		if len(mergedRmds) > 0 && len(rmds) > 0 {
   249  			// Make sure the first new one is a valid successor of the
   250  			// last one.
   251  			lastRmd := mergedRmds[len(mergedRmds)-1]
   252  			err = lastRmd.CheckValidSuccessor(
   253  				lastRmd.mdID, rmds[0].ReadOnlyRootMetadata)
   254  			if err != nil {
   255  				return nil, err
   256  			}
   257  		}
   258  
   259  		mergedRmds = append(mergedRmds, rmds...)
   260  
   261  		// TODO: limit the number of MDs we're allowed to hold in
   262  		// memory at any one time?
   263  		if len(rmds) < maxMDsAtATime {
   264  			break
   265  		}
   266  		start = end + 1
   267  	}
   268  
   269  	var uid keybase1.UID
   270  	// Check the readability of each MD.  Because rekeys can append a
   271  	// MD revision with the new key, older revisions might not be
   272  	// readable until the newer revision, containing the key for this
   273  	// device, is processed.
   274  	for i, rmd := range mergedRmds {
   275  		if err := isReadableOrError(ctx, config.KBPKI(), config, rmd.ReadOnly()); err != nil {
   276  			// The right secret key for the given rmd's
   277  			// key generation may only be present in the
   278  			// most recent rmd.
   279  			latestRmd := mergedRmds[len(mergedRmds)-1]
   280  
   281  			if uid == keybase1.UID("") {
   282  				session, err := config.KBPKI().GetCurrentSession(ctx)
   283  				if err != nil {
   284  					return nil, err
   285  				}
   286  				uid = session.UID
   287  			}
   288  
   289  			irmdCopy, err := MakeCopyWithDecryptedPrivateData(
   290  				ctx, config, rmd, latestRmd, uid)
   291  			if err != nil {
   292  				return nil, err
   293  			}
   294  			// Overwrite the cached copy with the new copy.  Unlike in
   295  			// `getMDRange`, it's safe to put this into the cache
   296  			// blindly, since updates coming from our local journal
   297  			// would always be readable, and thus not subject to this
   298  			// rewrite.
   299  			if err := config.MDCache().Put(irmdCopy); err != nil {
   300  				return nil, err
   301  			}
   302  			mergedRmds[i] = irmdCopy
   303  		}
   304  	}
   305  	return mergedRmds, nil
   306  }
   307  
   308  // getMergedMDUpdates returns a slice of all the merged MDs for a TLF,
   309  // starting from the given startRev.  The returned MDs are the same
   310  // instances that are stored in the MD cache, so they should be
   311  // modified with care.
   312  //
   313  // TODO: Accept a parameter to express that we want copies of the MDs
   314  // instead of the cached versions.
   315  func getMergedMDUpdates(ctx context.Context, config Config, id tlf.ID,
   316  	startRev kbfsmd.Revision, lockBeforeGet *keybase1.LockID) (
   317  	mergedRmds []ImmutableRootMetadata, err error) {
   318  	return getMergedMDUpdatesWithEnd(
   319  		ctx, config, id, startRev, kbfsmd.RevisionUninitialized, lockBeforeGet)
   320  }
   321  
   322  // getUnmergedMDUpdates returns a slice of the unmerged MDs for a TLF
   323  // and unmerged branch, between the merge point for that branch and
   324  // startRev (inclusive).  The returned MDs are the same instances that
   325  // are stored in the MD cache, so they should be modified with care.
   326  // If bid is kbfsmd.NullBranchID, it returns an empty MD list.
   327  //
   328  // TODO: Accept a parameter to express that we want copies of the MDs
   329  // instead of the cached versions.
   330  func getUnmergedMDUpdates(ctx context.Context, config Config, id tlf.ID,
   331  	bid kbfsmd.BranchID, startRev kbfsmd.Revision) (
   332  	currHead kbfsmd.Revision, unmergedRmds []ImmutableRootMetadata,
   333  	err error) {
   334  	if bid == kbfsmd.NullBranchID {
   335  		// We're not really unmerged, so there's nothing to do.
   336  		// TODO: require the caller to avoid making this call if the
   337  		// bid isn't set (and change the mdserver behavior in that
   338  		// case as well).
   339  		return startRev, nil, nil
   340  	}
   341  
   342  	// We don't yet know about any revisions yet, so there's no range
   343  	// to get.
   344  	if startRev < kbfsmd.RevisionInitial {
   345  		return kbfsmd.RevisionUninitialized, nil, nil
   346  	}
   347  
   348  	// walk backwards until we find one that is merged
   349  	currHead = startRev
   350  	for {
   351  		// first look up all unmerged MD revisions older than my current head
   352  		startRev := currHead - maxMDsAtATime + 1 // (kbfsmd.Revision is signed)
   353  		if startRev < kbfsmd.RevisionInitial {
   354  			startRev = kbfsmd.RevisionInitial
   355  		}
   356  
   357  		rmds, err := getMDRange(ctx, config, id, bid, startRev, currHead,
   358  			kbfsmd.Unmerged, nil)
   359  		if err != nil {
   360  			return kbfsmd.RevisionUninitialized, nil, err
   361  		}
   362  
   363  		if len(unmergedRmds) > 0 && len(rmds) > 0 {
   364  			// Make sure the first old one is a valid successor of the
   365  			// last new one.
   366  			lastRmd := rmds[len(rmds)-1]
   367  			err = lastRmd.CheckValidSuccessor(
   368  				lastRmd.mdID, unmergedRmds[0].ReadOnlyRootMetadata)
   369  			if err != nil {
   370  				return kbfsmd.RevisionUninitialized, nil, err
   371  			}
   372  		}
   373  
   374  		numNew := len(rmds)
   375  		// prepend to keep the ordering correct
   376  		unmergedRmds = append(rmds, unmergedRmds...)
   377  
   378  		// on the next iteration, start apply the previous root
   379  		if numNew > 0 {
   380  			currHead = rmds[0].Revision() - 1
   381  		}
   382  		if currHead < kbfsmd.RevisionInitial {
   383  			return kbfsmd.RevisionUninitialized, nil,
   384  				errors.New("ran out of MD updates to unstage")
   385  		}
   386  		// TODO: limit the number of MDs we're allowed to hold in
   387  		// memory at any one time?
   388  		if numNew < maxMDsAtATime {
   389  			break
   390  		}
   391  	}
   392  	return currHead, unmergedRmds, nil
   393  }
   394  
   395  // GetMDRevisionByTime returns the revision number of the earliest
   396  // merged MD of `handle` with a server timestamp greater or equal to
   397  // `serverTime`.
   398  func GetMDRevisionByTime(
   399  	ctx context.Context, config Config, handle *tlfhandle.Handle,
   400  	serverTime time.Time) (kbfsmd.Revision, error) {
   401  	id := handle.TlfID()
   402  	if id == tlf.NullID {
   403  		return kbfsmd.RevisionUninitialized, errors.Errorf(
   404  			"No ID set in handle %s", handle.GetCanonicalPath())
   405  	}
   406  
   407  	md, err := config.MDOps().GetForTLFByTime(ctx, id, serverTime)
   408  	if err != nil {
   409  		return kbfsmd.RevisionUninitialized, err
   410  	}
   411  
   412  	return md.Revision(), nil
   413  }
   414  
   415  // encryptMDPrivateData encrypts the private data of the given
   416  // RootMetadata and makes other modifications to prepare it for
   417  // signing (see signMD below). After this function is called, the
   418  // MetadataID of the RootMetadata's BareRootMetadata can be computed.
   419  func encryptMDPrivateData(
   420  	ctx context.Context, codec kbfscodec.Codec, crypto cryptoPure,
   421  	signer kbfscrypto.Signer, ekg encryptionKeyGetter, me keybase1.UID,
   422  	rmd *RootMetadata) error {
   423  	err := rmd.data.checkValid()
   424  	if err != nil {
   425  		return err
   426  	}
   427  
   428  	brmd := rmd.bareMd
   429  	privateData := rmd.data
   430  
   431  	if brmd.TypeForKeying() == tlf.PublicKeying ||
   432  		!brmd.IsWriterMetadataCopiedSet() {
   433  		// Record the last writer to modify this writer metadata
   434  		brmd.SetLastModifyingWriter(me)
   435  
   436  		if brmd.TypeForKeying() == tlf.PublicKeying {
   437  			// Encode the private metadata
   438  			encodedPrivateMetadata, err := codec.Encode(privateData)
   439  			if err != nil {
   440  				return err
   441  			}
   442  			brmd.SetSerializedPrivateMetadata(encodedPrivateMetadata)
   443  		} else if !brmd.IsWriterMetadataCopiedSet() {
   444  			// Encrypt and encode the private metadata
   445  			k, err := ekg.GetTLFCryptKeyForEncryption(ctx, rmd)
   446  			if err != nil {
   447  				return err
   448  			}
   449  			encryptedPrivateMetadata, err := crypto.EncryptPrivateMetadata(privateData, k)
   450  			if err != nil {
   451  				return err
   452  			}
   453  			encodedEncryptedPrivateMetadata, err := codec.Encode(encryptedPrivateMetadata)
   454  			if err != nil {
   455  				return err
   456  			}
   457  			brmd.SetSerializedPrivateMetadata(encodedEncryptedPrivateMetadata)
   458  		}
   459  
   460  		// Sign the writer metadata internally. This has to be
   461  		// done here, instead of in signMD, since the
   462  		// MetadataID may depend on it.
   463  		err := brmd.SignWriterMetadataInternally(ctx, codec, signer)
   464  		if err != nil {
   465  			return err
   466  		}
   467  	}
   468  
   469  	// Record the last user to modify this metadata
   470  	brmd.SetLastModifyingUser(me)
   471  
   472  	return nil
   473  }
   474  
   475  func getFileBlockForMD(
   476  	ctx context.Context, bcache data.BlockCacheSimple, bops BlockOps,
   477  	ptr data.BlockPointer, tlfID tlf.ID, rmdWithKeys libkey.KeyMetadata) (
   478  	*data.FileBlock, error) {
   479  	// We don't have a convenient way to fetch the block from here via
   480  	// folderBlockOps, so just go directly via the
   481  	// BlockCache/BlockOps.  No locking around the blocks is needed
   482  	// since these change blocks are read-only.
   483  	block, err := bcache.Get(ptr)
   484  	if err != nil {
   485  		block = data.NewFileBlock()
   486  		// TODO: eventually we should plumb the correct branch name
   487  		// here, but that would impact a huge number of functions that
   488  		// fetch MD.  For now, the worst thing that can happen is that
   489  		// MD blocks for historical MD revisions sneak their way into
   490  		// the sync cache.
   491  		branch := data.MasterBranch
   492  		if err := bops.Get(
   493  			ctx, rmdWithKeys, ptr, block, data.TransientEntry,
   494  			branch); err != nil {
   495  			return nil, err
   496  		}
   497  	}
   498  
   499  	fblock, ok := block.(*data.FileBlock)
   500  	if !ok {
   501  		return nil, NotFileBlockError{ptr, data.MasterBranch, data.Path{}}
   502  	}
   503  	return fblock, nil
   504  }
   505  
   506  func reembedBlockChanges(ctx context.Context, codec kbfscodec.Codec,
   507  	bcache data.BlockCacheSimple, bops BlockOps, mode InitMode, tlfID tlf.ID,
   508  	pmd *PrivateMetadata, rmdWithKeys libkey.KeyMetadata,
   509  	log logger.Logger) error {
   510  	info := pmd.Changes.Info
   511  	if info.BlockPointer == data.ZeroPtr {
   512  		return nil
   513  	}
   514  
   515  	if !mode.BlockManagementEnabled() {
   516  		// Leave the block changes unembedded -- they aren't needed in
   517  		// minimal mode since there's no node cache, and thus there
   518  		// are no Nodes that needs to be updated due to BlockChange
   519  		// pointers in those blocks.
   520  		log.CDebugf(ctx, "Skipping block change reembedding in mode: %s",
   521  			mode.Type())
   522  		return nil
   523  	}
   524  
   525  	// Treat the unembedded block change like a file so we can reuse
   526  	// the file reading code.
   527  	file := data.Path{
   528  		FolderBranch: data.FolderBranch{
   529  			Tlf:    tlfID,
   530  			Branch: data.MasterBranch,
   531  		},
   532  		Path: []data.PathNode{{
   533  			BlockPointer: info.BlockPointer,
   534  			Name: data.NewPathPartString(
   535  				fmt.Sprintf("<MD with block change pointer %s>",
   536  					info.BlockPointer), nil),
   537  		}},
   538  	}
   539  	getter := func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
   540  		p data.Path, rtype data.BlockReqType) (*data.FileBlock, bool, error) {
   541  		block, err := getFileBlockForMD(ctx, bcache, bops, ptr, tlfID, kmd)
   542  		if err != nil {
   543  			return nil, false, err
   544  		}
   545  		return block, false, nil
   546  	}
   547  	cacher := func(_ context.Context, ptr data.BlockPointer, block data.Block) error {
   548  		return nil
   549  	}
   550  	// Reading doesn't use crypto or the block splitter, so for now
   551  	// just pass in nil.  Also, reading doesn't depend on the UID, so
   552  	// it's ok to be empty.
   553  	var id keybase1.UserOrTeamID
   554  	fd := data.NewFileData(
   555  		file, id, nil, rmdWithKeys, getter, cacher, log,
   556  		libkb.NewVDebugLog(log) /* one-off, short-lived, unconfigured vlog */)
   557  
   558  	buf, err := fd.GetBytes(ctx, 0, -1)
   559  	if err != nil {
   560  		return err
   561  	}
   562  
   563  	var unembeddedChanges BlockChanges
   564  	err = codec.Decode(buf, &unembeddedChanges)
   565  	if err != nil {
   566  		return err
   567  	}
   568  
   569  	// We rely on at most one of Info or Ops being non-empty in
   570  	// crChains.addOps.
   571  	if unembeddedChanges.Info.IsInitialized() {
   572  		return errors.New("Unembedded BlockChangesInfo unexpectedly has an initialized Info")
   573  	}
   574  
   575  	// The changes block pointers are implicit ref blocks.
   576  	unembeddedChanges.Ops[0].AddRefBlock(info.BlockPointer)
   577  	iptrs, err := fd.GetIndirectFileBlockInfos(ctx)
   578  	if err != nil {
   579  		return err
   580  	}
   581  	for _, iptr := range iptrs {
   582  		unembeddedChanges.Ops[0].AddRefBlock(iptr.BlockPointer)
   583  	}
   584  
   585  	pmd.Changes = unembeddedChanges
   586  	pmd.cachedChanges.Info = info
   587  	return nil
   588  }
   589  
   590  func reembedBlockChangesIntoCopyIfNeeded(
   591  	ctx context.Context, codec kbfscodec.Codec,
   592  	bcache data.BlockCacheSimple, bops BlockOps, mode InitMode,
   593  	rmd ImmutableRootMetadata, log logger.Logger) (
   594  	ImmutableRootMetadata, error) {
   595  	if rmd.data.Changes.Ops != nil {
   596  		return rmd, nil
   597  	}
   598  
   599  	// This might be necessary if the MD was retrieved from the
   600  	// cache in between putting it to the server (with unembedded
   601  	// block changes), and re-loading the block changes back into
   602  	// the MD and re-inserting into the cache.
   603  	log.CDebugf(ctx,
   604  		"Reembedding block changes for revision %d", rmd.Revision())
   605  	rmdCopy, err := rmd.deepCopy(codec)
   606  	if err != nil {
   607  		return ImmutableRootMetadata{}, err
   608  	}
   609  	err = reembedBlockChanges(
   610  		ctx, codec, bcache, bops, mode, rmd.TlfID(),
   611  		&rmdCopy.data, rmd, log)
   612  	if err != nil {
   613  		return ImmutableRootMetadata{}, err
   614  	}
   615  	return MakeImmutableRootMetadata(
   616  		rmdCopy, rmd.lastWriterVerifyingKey, rmd.mdID,
   617  		rmd.localTimestamp, rmd.putToServer), nil
   618  }
   619  
   620  func getMDObfuscationSecret(
   621  	ctx context.Context, keyGetter mdDecryptionKeyGetter,
   622  	kmd libkey.KeyMetadata) (data.NodeObfuscatorSecret, error) {
   623  	if kmd.TlfID().Type() == tlf.Public {
   624  		return nil, nil
   625  	}
   626  	key, err := keyGetter.GetFirstTLFCryptKey(ctx, kmd)
   627  	if err != nil {
   628  		return nil, err
   629  	}
   630  	secret, err := key.DeriveSecret(obfuscatorDerivationString)
   631  	if err != nil {
   632  		return nil, err
   633  	}
   634  	return data.NodeObfuscatorSecret(secret), nil
   635  }
   636  
   637  func makeMDObfuscatorFromSecret(
   638  	secret data.NodeObfuscatorSecret, mode InitMode) data.Obfuscator {
   639  	if !mode.DoLogObfuscation() {
   640  		return nil
   641  	}
   642  
   643  	if secret == nil {
   644  		return nil
   645  	}
   646  	return data.NewNodeObfuscator(secret)
   647  }
   648  
   649  // decryptMDPrivateData does not use uid if the handle is a public one.
   650  func decryptMDPrivateData(ctx context.Context, codec kbfscodec.Codec,
   651  	crypto Crypto, bcache data.BlockCache, bops BlockOps,
   652  	keyGetter mdDecryptionKeyGetter, teamChecker kbfsmd.TeamMembershipChecker,
   653  	osg idutil.OfflineStatusGetter, mode InitMode, uid keybase1.UID,
   654  	serializedPrivateMetadata []byte, rmdToDecrypt, rmdWithKeys libkey.KeyMetadata,
   655  	log logger.Logger) (PrivateMetadata, error) {
   656  	handle := rmdToDecrypt.GetTlfHandle()
   657  
   658  	var pmd PrivateMetadata
   659  	keyedForDevice := true
   660  	if handle.TypeForKeying() == tlf.PublicKeying {
   661  		if err := codec.Decode(serializedPrivateMetadata,
   662  			&pmd); err != nil {
   663  			return PrivateMetadata{}, err
   664  		}
   665  	} else {
   666  		// decrypt the root data for non-public directories
   667  		var encryptedPrivateMetadata kbfscrypto.EncryptedPrivateMetadata
   668  		if err := codec.Decode(serializedPrivateMetadata,
   669  			&encryptedPrivateMetadata); err != nil {
   670  			return PrivateMetadata{}, err
   671  		}
   672  
   673  		k, err := keyGetter.GetTLFCryptKeyForMDDecryption(ctx,
   674  			rmdToDecrypt, rmdWithKeys)
   675  
   676  		if err != nil {
   677  			log.CDebugf(ctx, "Couldn't get crypt key for %s (%s): %+v",
   678  				handle.GetCanonicalPath(), rmdToDecrypt.TlfID(), err)
   679  			isReader, readerErr := isReaderFromHandle(
   680  				ctx, handle, teamChecker, osg, uid)
   681  			if readerErr != nil {
   682  				return PrivateMetadata{}, readerErr
   683  			}
   684  			_, isSelfRekeyError := err.(NeedSelfRekeyError)
   685  			_, isOtherRekeyError := err.(NeedOtherRekeyError)
   686  			if isReader && (isOtherRekeyError || isSelfRekeyError) {
   687  				// Rekey errors are expected if this client is a
   688  				// valid folder participant but doesn't have the
   689  				// shared crypt key.
   690  				keyedForDevice = false
   691  			} else {
   692  				return PrivateMetadata{}, err
   693  			}
   694  		} else {
   695  			pmd, err = crypto.DecryptPrivateMetadata(
   696  				encryptedPrivateMetadata, k)
   697  			if err != nil {
   698  				log.CDebugf(
   699  					ctx, "Failed to decrypt MD for id=%s, keygen=%d",
   700  					rmdToDecrypt.TlfID(), rmdToDecrypt.LatestKeyGeneration())
   701  				return PrivateMetadata{}, err
   702  			}
   703  		}
   704  	}
   705  
   706  	// Re-embed the block changes if it's needed.
   707  	err := reembedBlockChanges(
   708  		ctx, codec, bcache, bops, mode, rmdWithKeys.TlfID(),
   709  		&pmd, rmdWithKeys, log)
   710  	if err != nil {
   711  		log.CDebugf(
   712  			ctx, "Failed to re-embed block changes for id=%s, keygen=%d, info pointer=%v",
   713  			rmdToDecrypt.TlfID(), rmdToDecrypt.LatestKeyGeneration(),
   714  			pmd.Changes.Info)
   715  		return PrivateMetadata{}, err
   716  	}
   717  
   718  	var obfuscator data.Obfuscator
   719  	if keyedForDevice {
   720  		secret, err := getMDObfuscationSecret(ctx, keyGetter, rmdWithKeys)
   721  		if err != nil {
   722  			return PrivateMetadata{}, err
   723  		}
   724  		obfuscator = makeMDObfuscatorFromSecret(secret, mode)
   725  	}
   726  	for _, op := range pmd.Changes.Ops {
   727  		// Add a temporary path with an obfuscator.  When we
   728  		// deserialize the ops from the raw byte buffer of the MD
   729  		// object, they don't have proper `data.Path`s set in them yet
   730  		// -- that's an unexported field. The path is required for
   731  		// obfuscation, so here we're just making sure that they all
   732  		// have one. In places where a perfectly-accurate path is
   733  		// required (like in conflict resolution), the code there will
   734  		// need to add a proper path. Note that here the level of the
   735  		// obfuscator might be wrong, and so might result in
   736  		// inconsistent suffixes for obfuscated names that conflict.
   737  		if !op.getFinalPath().IsValid() {
   738  			op.setFinalPath(data.Path{Path: []data.PathNode{{
   739  				BlockPointer: data.ZeroPtr,
   740  				Name:         data.NewPathPartString("", obfuscator),
   741  			}}})
   742  		}
   743  	}
   744  
   745  	return pmd, nil
   746  }
   747  
   748  func getOpsSafe(config Config, id tlf.ID) (*folderBranchOps, error) {
   749  	kbfsOps := config.KBFSOps()
   750  	kbfsOpsStandard, ok := kbfsOps.(*KBFSOpsStandard)
   751  	if !ok {
   752  		return nil, errors.New("Not KBFSOpsStandard")
   753  	}
   754  
   755  	return kbfsOpsStandard.getOpsNoAdd(context.TODO(), data.FolderBranch{
   756  		Tlf:    id,
   757  		Branch: data.MasterBranch,
   758  	}), nil
   759  }
   760  
   761  func getOps(config Config, id tlf.ID) *folderBranchOps {
   762  	ops, err := getOpsSafe(config, id)
   763  	if err != nil {
   764  		panic(err)
   765  	}
   766  	return ops
   767  }
   768  
   769  // ChangeType indicates what kind of change is being referenced.
   770  type ChangeType int
   771  
   772  const (
   773  	// ChangeTypeWrite is a change to a file (could be a create or a
   774  	// write to an existing file).
   775  	ChangeTypeWrite ChangeType = iota
   776  	// ChangeTypeRename is a rename of an existing file or directory.
   777  	ChangeTypeRename
   778  	// ChangeTypeDelete is a delete of an existing file or directory.
   779  	ChangeTypeDelete
   780  )
   781  
   782  func (ct ChangeType) String() string {
   783  	switch ct {
   784  	case ChangeTypeWrite:
   785  		return "write"
   786  	case ChangeTypeRename:
   787  		return "rename"
   788  	case ChangeTypeDelete:
   789  		return "delete"
   790  	default:
   791  		return "unknown"
   792  	}
   793  }
   794  
   795  // ChangeItem describes a single change to a file or directory between
   796  // revisions.
   797  type ChangeItem struct {
   798  	Type            ChangeType
   799  	CurrPath        data.Path // Full path to the node created/renamed/deleted
   800  	UnrefsForDelete []data.BlockPointer
   801  	IsNew           bool
   802  	OldPtr          data.BlockPointer
   803  }
   804  
   805  func (ci *ChangeItem) addUnrefs(chains *crChains, op op) error {
   806  	// Find the original block pointers for each unref.
   807  	unrefs := op.Unrefs()
   808  	ci.UnrefsForDelete = make([]data.BlockPointer, len(unrefs))
   809  	for i, unref := range unrefs {
   810  		ptr, err := chains.originalFromMostRecentOrSame(unref)
   811  		if err != nil {
   812  			return err
   813  		}
   814  		ci.UnrefsForDelete[i] = ptr
   815  	}
   816  	return nil
   817  }
   818  
   819  func (ci ChangeItem) String() string {
   820  	return fmt.Sprintf(
   821  		"{type: %s, currPath: %s}", ci.Type, ci.CurrPath.CanonicalPathString())
   822  }
   823  
   824  // GetChangesBetweenRevisions returns a list of all the changes
   825  // between the two given revisions (after `oldRev`, up to and
   826  // including `newRev`). Also returns the sum of all the newly ref'd
   827  // block sizes (in bytes), as a crude estimate of how big this change
   828  // set is.
   829  func GetChangesBetweenRevisions(
   830  	ctx context.Context, config Config, id tlf.ID,
   831  	oldRev, newRev kbfsmd.Revision) (
   832  	changes []*ChangeItem, refSize uint64, err error) {
   833  	if newRev <= oldRev {
   834  		return nil, 0, errors.Errorf(
   835  			"Can't get changes between %d and %d", oldRev, newRev)
   836  	}
   837  
   838  	rmds, err := getMDRange(
   839  		ctx, config, id, kbfsmd.NullBranchID, oldRev+1, newRev,
   840  		kbfsmd.Merged, nil)
   841  	if err != nil {
   842  		return nil, 0, err
   843  	}
   844  
   845  	fbo, err := getOpsSafe(config, id)
   846  	if err != nil {
   847  		return nil, 0, err
   848  	}
   849  
   850  	chains, err := newCRChainsForIRMDs(
   851  		ctx, config.Codec(), config, rmds, &fbo.blocks, true)
   852  	if err != nil {
   853  		return nil, 0, err
   854  	}
   855  	err = fbo.blocks.populateChainPaths(
   856  		ctx, config.MakeLogger(""), chains, true)
   857  	if err != nil {
   858  		return nil, 0, err
   859  	}
   860  
   861  	// The crChains creation process splits up a rename op into
   862  	// a delete and a create.  Turn them back into a rename.
   863  	opsCount := 0
   864  	for _, rmd := range rmds {
   865  		opsCount += len(rmd.data.Changes.Ops)
   866  	}
   867  	ops := make([]op, opsCount)
   868  	soFar := 0
   869  	for _, rmd := range rmds {
   870  		for i, op := range rmd.data.Changes.Ops {
   871  			ops[soFar+i] = op.deepCopy()
   872  		}
   873  		soFar += len(rmd.data.Changes.Ops)
   874  		refSize += rmd.RefBytes()
   875  	}
   876  	err = chains.revertRenames(ops)
   877  	if err != nil {
   878  		return nil, 0, err
   879  	}
   880  
   881  	// Create the change items for each chain.  Use the following
   882  	// simplifications:
   883  	// * Creates become writes, and use the full path to the created file/dir.
   884  	// * Deletes use the original blockpointer from the start of the chain
   885  	//   for the deleted file's path.
   886  	items := make(map[string][]*ChangeItem)
   887  	numItems := 0
   888  	for _, chain := range chains.byMostRecent {
   889  		for _, op := range chain.ops {
   890  			newItem := true
   891  			item := &ChangeItem{
   892  				CurrPath: op.getFinalPath(),
   893  			}
   894  			switch realOp := op.(type) {
   895  			case *createOp:
   896  				item.Type = ChangeTypeWrite
   897  				// Don't force there to be a pointer for the new node,
   898  				// since it could be a symlink.
   899  				item.CurrPath = item.CurrPath.ChildPathNoPtr(
   900  					realOp.obfuscatedNewName(), fbo.makeObfuscator())
   901  
   902  				// If the write was processed first, re-use that item.
   903  				itemSlice, ok := items[item.CurrPath.CanonicalPathString()]
   904  				if ok {
   905  					for _, existingItem := range itemSlice {
   906  						if existingItem.Type == ChangeTypeWrite {
   907  							newItem = false
   908  							item = existingItem
   909  						}
   910  					}
   911  				}
   912  				item.IsNew = true
   913  			case *syncOp:
   914  				// If the create was processed first, reuse that item.
   915  				itemSlice, ok := items[item.CurrPath.CanonicalPathString()]
   916  				if ok {
   917  					for _, existingItem := range itemSlice {
   918  						if existingItem.Type == ChangeTypeWrite {
   919  							newItem = false
   920  							item = existingItem
   921  							item.CurrPath.Path[len(item.CurrPath.Path)-1].
   922  								BlockPointer = chain.mostRecent
   923  							break
   924  						}
   925  					}
   926  				}
   927  				item.Type = ChangeTypeWrite
   928  				item.OldPtr = chain.original
   929  			case *renameOp:
   930  				item.Type = ChangeTypeRename
   931  				err := item.addUnrefs(chains, op)
   932  				if err != nil {
   933  					return nil, 0, err
   934  				}
   935  				// Don't force there to be a pointer for the node,
   936  				// since it could be a symlink.
   937  				item.CurrPath = item.CurrPath.ChildPathNoPtr(
   938  					realOp.obfuscatedNewName(), fbo.makeObfuscator())
   939  			case *rmOp:
   940  				item.Type = ChangeTypeDelete
   941  				// Find the original block pointers for each unref.
   942  				err := item.addUnrefs(chains, op)
   943  				if err != nil {
   944  					return nil, 0, err
   945  				}
   946  				unrefs := op.Unrefs()
   947  				if len(unrefs) > 0 {
   948  					unref := unrefs[0]
   949  					ptr, err := chains.originalFromMostRecentOrSame(unref)
   950  					if err != nil {
   951  						return nil, 0, err
   952  					}
   953  					item.CurrPath = item.CurrPath.ChildPath(
   954  						realOp.obfuscatedOldName(), ptr, fbo.makeObfuscator())
   955  				}
   956  			}
   957  
   958  			if newItem {
   959  				pString := item.CurrPath.CanonicalPathString()
   960  				items[pString] = append(items[pString], item)
   961  				numItems++
   962  
   963  				// Add in an update for every directory whose blockpointer
   964  				// was updated.
   965  				currPath := item.CurrPath
   966  				for currPath.HasValidParent() {
   967  					currPath = *currPath.ParentPath()
   968  					pString := currPath.CanonicalPathString()
   969  					itemSlice, ok := items[pString]
   970  					needsUpdate := true
   971  					if ok {
   972  						for _, existingItem := range itemSlice {
   973  							if existingItem.Type == ChangeTypeWrite {
   974  								needsUpdate = false
   975  								break
   976  							}
   977  						}
   978  					}
   979  					if !needsUpdate {
   980  						break
   981  					}
   982  					oldPtr, err := chains.originalFromMostRecentOrSame(
   983  						currPath.TailPointer())
   984  					if err != nil {
   985  						return nil, 0, err
   986  					}
   987  					item := &ChangeItem{
   988  						Type:     ChangeTypeWrite,
   989  						CurrPath: currPath,
   990  						OldPtr:   oldPtr,
   991  					}
   992  					items[pString] = append(items[pString], item)
   993  				}
   994  			}
   995  		}
   996  	}
   997  
   998  	changes = make([]*ChangeItem, 0, numItems)
   999  	for _, itemSlice := range items {
  1000  		changes = append(changes, itemSlice...)
  1001  	}
  1002  
  1003  	// Renames should always go at the end, since if there's a pointer
  1004  	// change for the renamed thing (e.g., because it was a directory
  1005  	// that changed or a file that was written), we need to process
  1006  	// that pointer change before the rename.
  1007  	sort.SliceStable(changes, func(i, j int) bool {
  1008  		if changes[i].Type != ChangeTypeRename &&
  1009  			changes[j].Type == ChangeTypeRename {
  1010  			return true
  1011  		}
  1012  		return false
  1013  	})
  1014  
  1015  	return changes, refSize, nil
  1016  }