github.com/bazelbuild/remote-apis-sdks@v0.0.0-20240425170053-8a36686a6350/go/pkg/client/tree_test.go (about)

     1  package client_test
     2  
     3  import (
     4  	"context"
     5  	"math/rand"
     6  	"os"
     7  	"path/filepath"
     8  	"testing"
     9  
    10  	cpb "github.com/bazelbuild/remote-apis-sdks/go/api/command"
    11  	"github.com/bazelbuild/remote-apis-sdks/go/pkg/chunker"
    12  	"github.com/bazelbuild/remote-apis-sdks/go/pkg/client"
    13  	"github.com/bazelbuild/remote-apis-sdks/go/pkg/command"
    14  	"github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
    15  	"github.com/bazelbuild/remote-apis-sdks/go/pkg/fakes"
    16  	"github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata"
    17  	"github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo"
    18  	"github.com/google/go-cmp/cmp"
    19  	"github.com/google/go-cmp/cmp/cmpopts"
    20  	"google.golang.org/protobuf/proto"
    21  	"google.golang.org/protobuf/testing/protocmp"
    22  
    23  	repb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
    24  )
    25  
    26  var (
    27  	fooBlob, barBlob, bazBlob = []byte("foo"), []byte("bar"), []byte("baz")
    28  	fooDg, barDg, bazDg       = digest.NewFromBlob(fooBlob), digest.NewFromBlob(barBlob), digest.NewFromBlob(bazBlob)
    29  	fooDgPb, barDgPb, bazDgPb = fooDg.ToProto(), barDg.ToProto(), bazDg.ToProto()
    30  	fooProperties             = &cpb.NodeProperties{Properties: []*cpb.NodeProperty{{Name: "fooName", Value: "fooValue"}}}
    31  
    32  	fooDir    = &repb.Directory{Files: []*repb.FileNode{{Name: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)}}}
    33  	barDir    = &repb.Directory{Files: []*repb.FileNode{{Name: "bar", Digest: barDgPb}}}
    34  	bazDir    = &repb.Directory{Files: []*repb.FileNode{{Name: "baz", Digest: bazDgPb}}}
    35  	vBarDir   = &repb.Directory{Directories: []*repb.DirectoryNode{{Name: "baz", Digest: digest.Empty.ToProto()}}}
    36  	foobarDir = &repb.Directory{Files: []*repb.FileNode{
    37  		{Name: "bar", Digest: barDgPb},
    38  		{Name: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)},
    39  	}}
    40  
    41  	fooDirBlob, barDirBlob, foobarDirBlob, bazDirBlob, vBarDirBlob = mustMarshal(fooDir), mustMarshal(barDir), mustMarshal(foobarDir), mustMarshal(bazDir), mustMarshal(vBarDir)
    42  	fooDirDg, barDirDg, foobarDirDg, bazDirDg, vBarDirDg           = digest.NewFromBlob(fooDirBlob), digest.NewFromBlob(barDirBlob), digest.NewFromBlob(foobarDirBlob), digest.NewFromBlob(bazDirBlob), digest.NewFromBlob(vBarDirBlob)
    43  	fooDirDgPb, barDirDgPb, foobarDirDgPb, bazDirDgPb, vBarDirDgPb = fooDirDg.ToProto(), barDirDg.ToProto(), foobarDirDg.ToProto(), bazDirDg.ToProto(), vBarDirDg.ToProto()
    44  )
    45  
    46  func mustMarshal(p proto.Message) []byte {
    47  	b, err := proto.Marshal(p)
    48  	if err != nil {
    49  		panic("error marshalling proto during test setup: %s" + err.Error())
    50  	}
    51  	return b
    52  }
    53  
    54  func newDigest(t *testing.T, hash string, size int64) digest.Digest {
    55  	dg, err := digest.New(hash, size)
    56  	if err != nil {
    57  		t.Fatalf("unexpected error while creating digest: %v", err)
    58  	}
    59  	return dg
    60  }
    61  
    62  type inputPath struct {
    63  	path             string
    64  	emptyDir         bool
    65  	fileContents     []byte
    66  	isExecutable     bool
    67  	isSymlink        bool
    68  	isAbsolute       bool
    69  	relSymlinkTarget string
    70  }
    71  
    72  func construct(dir string, ips []*inputPath) error {
    73  	for _, ip := range ips {
    74  		path := filepath.Join(dir, ip.path)
    75  		if ip.emptyDir {
    76  			if err := os.MkdirAll(path, 0777); err != nil {
    77  				return err
    78  			}
    79  			continue
    80  		}
    81  		if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {
    82  			return err
    83  		}
    84  		if ip.isSymlink {
    85  			target := ip.relSymlinkTarget
    86  			if ip.isAbsolute {
    87  				target = filepath.Join(dir, target)
    88  			}
    89  			if err := os.Symlink(target, path); err != nil {
    90  				return err
    91  			}
    92  			continue
    93  		}
    94  		// Regular file.
    95  		perm := os.FileMode(0666)
    96  		if ip.isExecutable {
    97  			perm = os.FileMode(0777)
    98  		}
    99  		if err := os.WriteFile(path, ip.fileContents, perm); err != nil {
   100  			return err
   101  		}
   102  	}
   103  	return nil
   104  }
   105  
   106  type callCountingMetadataCache struct {
   107  	calls    map[string]int
   108  	cache    filemetadata.Cache
   109  	execRoot string
   110  	t        *testing.T
   111  }
   112  
   113  func newCallCountingMetadataCache(execRoot string, t *testing.T) *callCountingMetadataCache {
   114  	return &callCountingMetadataCache{
   115  		calls:    make(map[string]int),
   116  		cache:    filemetadata.NewNoopCache(),
   117  		execRoot: execRoot,
   118  		t:        t,
   119  	}
   120  }
   121  
   122  func (c *callCountingMetadataCache) Get(path string) *filemetadata.Metadata {
   123  	c.t.Helper()
   124  	p, err := filepath.Rel(c.execRoot, path)
   125  	if err != nil {
   126  		c.t.Errorf("expected %v to be under %v", path, c.execRoot)
   127  	}
   128  	c.calls[p]++
   129  	return c.cache.Get(path)
   130  }
   131  
   132  func (c *callCountingMetadataCache) Delete(path string) error {
   133  	c.t.Helper()
   134  	p, err := filepath.Rel(c.execRoot, path)
   135  	if err != nil {
   136  		c.t.Errorf("expected %v to be under %v", path, c.execRoot)
   137  	}
   138  	c.calls[p]++
   139  	return c.cache.Delete(path)
   140  }
   141  
   142  func (c *callCountingMetadataCache) Update(path string, ce *filemetadata.Metadata) error {
   143  	c.t.Helper()
   144  	p, err := filepath.Rel(c.execRoot, path)
   145  	if err != nil {
   146  		c.t.Errorf("expected %v to be under %v", path, c.execRoot)
   147  	}
   148  	c.calls[p]++
   149  	return c.cache.Update(path, ce)
   150  }
   151  
   152  func (c *callCountingMetadataCache) GetCacheHits() uint64 {
   153  	return 0
   154  }
   155  
   156  func (c *callCountingMetadataCache) GetCacheMisses() uint64 {
   157  	return 0
   158  }
   159  
   160  func TestComputeMerkleTreeRemoteWorkingDir(t *testing.T) {
   161  	callComputeMerkleTree := func(files, inputs []string, virtualInputs []*command.VirtualInput, localWorkingDir, remoteWorkingDir string) (digest.Digest, map[string]int) {
   162  		root := t.TempDir()
   163  		inputPaths := []*inputPath{}
   164  		for _, file := range files {
   165  			inputPaths = append(inputPaths, &inputPath{path: file, fileContents: []byte(filepath.Base(file)), isExecutable: true})
   166  		}
   167  		if err := construct(root, inputPaths); err != nil {
   168  			t.Fatalf("failed to construct input dir structure: %v", err)
   169  		}
   170  		cache := newCallCountingMetadataCache(root, t)
   171  		spec := &command.InputSpec{Inputs: inputs, VirtualInputs: virtualInputs}
   172  
   173  		e, cleanup := fakes.NewTestEnv(t)
   174  		defer cleanup()
   175  
   176  		rootDg, _, _, err := e.Client.GrpcClient.ComputeMerkleTree(context.Background(), root, localWorkingDir, remoteWorkingDir, spec, cache)
   177  		if err != nil {
   178  			t.Errorf("ComputeMerkleTree(...) = gave error %q, want success", err)
   179  		}
   180  		return rootDg, cache.calls
   181  	}
   182  	// call ComputeMerkleTree with working dir = "out/bar" and remote working dir not overridden
   183  	// inputs contain a file and a non-empty dir in the working directory, and a file and a non-empty dir outside of the working directory
   184  	referenceDg, _ := callComputeMerkleTree([]string{"out/bar/a", "out/bar/foo/b", "c", "bar/baz/d"},
   185  		[]string{"out/bar/a", "out/bar/foo", "c", "bar/baz"}, []*command.VirtualInput{{Path: "out/bar/baz", IsEmptyDirectory: true}}, "out/bar", "")
   186  	tests := []struct {
   187  		localWorkingDir  string
   188  		remoteWorkingDir string
   189  		files            []string
   190  		inputs           []string
   191  		virtualInputs    []*command.VirtualInput
   192  		wantCacheCalls   map[string]int
   193  	}{
   194  		{
   195  			localWorkingDir:  "out/foo1",
   196  			remoteWorkingDir: "out/bar",
   197  			files:            []string{"out/foo1/a", "out/foo1/foo/b", "c", "bar/baz/d"},
   198  			inputs:           []string{"out/foo1/a", "out/foo1/foo", "c", "bar/baz"},
   199  			virtualInputs:    []*command.VirtualInput{{Path: "out/foo1/baz", IsEmptyDirectory: true}},
   200  			// ensures that file metadata cache is queried for local paths (not remote ones)
   201  			wantCacheCalls: map[string]int{"out/foo1/a": 1, "out/foo1/foo": 1, "out/foo1/foo/b": 1, "c": 1, "bar/baz": 1, "bar/baz/d": 1},
   202  		},
   203  		{
   204  			localWorkingDir:  "out/bar",
   205  			remoteWorkingDir: "out/bar",
   206  			files:            []string{"out/bar/a", "out/bar/foo/b", "c", "bar/baz/d"},
   207  			inputs:           []string{"out/bar/a", "out/bar/foo", "c", "bar/baz"},
   208  			virtualInputs:    []*command.VirtualInput{{Path: "out/bar/baz", IsEmptyDirectory: true}},
   209  			wantCacheCalls:   map[string]int{"out/bar/a": 1, "out/bar/foo": 1, "out/bar/foo/b": 1, "c": 1, "bar/baz": 1, "bar/baz/d": 1},
   210  		},
   211  	}
   212  	for _, tc := range tests {
   213  		gotDg, gotCacheCalls := callComputeMerkleTree(tc.files, tc.inputs, tc.virtualInputs, tc.localWorkingDir, tc.remoteWorkingDir)
   214  		if diff := cmp.Diff(referenceDg, gotDg); diff != "" {
   215  			t.Errorf("ComputeMerkleTree with workingDir=%q andRemoteWorkingDir=%q returned different root digest than expected (-want +got)\n%s", tc.localWorkingDir, tc.remoteWorkingDir, diff)
   216  		}
   217  		if diff := cmp.Diff(tc.wantCacheCalls, gotCacheCalls); diff != "" {
   218  			t.Errorf("ComputeMerkleTree with workingDir=%q andRemoteWorkingDir=%q made unexpected file metadata cache calls (-want +got)\n%s", tc.localWorkingDir, tc.remoteWorkingDir, diff)
   219  		}
   220  	}
   221  }
   222  
   223  func TestComputeMerkleTreeEmptySubdirs(t *testing.T) {
   224  	fileBlob := []byte("bla")
   225  	fileDg := digest.NewFromBlob(fileBlob)
   226  	fileDgPb := fileDg.ToProto()
   227  	emptyDirDgPb := digest.Empty.ToProto()
   228  	cDir := &repb.Directory{
   229  		Files:       []*repb.FileNode{{Name: "file", Digest: fileDgPb}},
   230  		Directories: []*repb.DirectoryNode{{Name: "empty", Digest: emptyDirDgPb}},
   231  	}
   232  	cDirBlob := mustMarshal(cDir)
   233  	cDirDg := digest.NewFromBlob(cDirBlob)
   234  	cDirDgPb := cDirDg.ToProto()
   235  	bDir := &repb.Directory{
   236  		Directories: []*repb.DirectoryNode{
   237  			{Name: "c", Digest: cDirDgPb},
   238  			{Name: "empty", Digest: emptyDirDgPb},
   239  		},
   240  	}
   241  	bDirBlob := mustMarshal(bDir)
   242  	bDirDg := digest.NewFromBlob(bDirBlob)
   243  	bDirDgPb := bDirDg.ToProto()
   244  	aDir := &repb.Directory{
   245  		Directories: []*repb.DirectoryNode{
   246  			{Name: "b", Digest: bDirDgPb},
   247  			{Name: "empty", Digest: emptyDirDgPb},
   248  		},
   249  	}
   250  	aDirBlob := mustMarshal(aDir)
   251  	aDirDg := digest.NewFromBlob(aDirBlob)
   252  
   253  	ips := []*inputPath{
   254  		{path: "empty", emptyDir: true},
   255  		{path: "b/empty", emptyDir: true},
   256  		{path: "b/c/empty", emptyDir: true},
   257  		{path: "b/c/file", fileContents: fileBlob},
   258  	}
   259  	root := t.TempDir()
   260  	if err := construct(root, ips); err != nil {
   261  		t.Fatalf("failed to construct input dir structure: %v", err)
   262  	}
   263  	inputSpec := &command.InputSpec{Inputs: []string{"b", "empty"}}
   264  	wantBlobs := map[digest.Digest][]byte{
   265  		aDirDg:       aDirBlob,
   266  		bDirDg:       bDirBlob,
   267  		cDirDg:       cDirBlob,
   268  		fileDg:       fileBlob,
   269  		digest.Empty: []byte{},
   270  	}
   271  
   272  	gotBlobs := make(map[digest.Digest][]byte)
   273  	cache := newCallCountingMetadataCache(root, t)
   274  
   275  	e, cleanup := fakes.NewTestEnv(t)
   276  	defer cleanup()
   277  
   278  	gotRootDg, inputs, stats, err := e.Client.GrpcClient.ComputeMerkleTree(context.Background(), root, "", "", inputSpec, cache)
   279  	if err != nil {
   280  		t.Errorf("ComputeMerkleTree(...) = gave error %v, want success", err)
   281  	}
   282  	for _, ue := range inputs {
   283  		ch, err := chunker.New(ue, false, int(e.Client.GrpcClient.ChunkMaxSize))
   284  		if err != nil {
   285  			t.Fatalf("chunker.New(ue): failed to create chunker from UploadEntry: %v", err)
   286  		}
   287  		blob, err := ch.FullData()
   288  		if err != nil {
   289  			t.Errorf("chunker %v FullData() returned error %v", ch, err)
   290  		}
   291  		gotBlobs[ue.Digest] = blob
   292  	}
   293  	if diff := cmp.Diff(aDirDg, gotRootDg); diff != "" {
   294  		t.Errorf("ComputeMerkleTree(...) gave diff (-want +got) on root:\n%s", diff)
   295  		if gotRootBlob, ok := gotBlobs[gotRootDg]; ok {
   296  			gotRoot := new(repb.Directory)
   297  			if err := proto.Unmarshal(gotRootBlob, gotRoot); err != nil {
   298  				t.Errorf("  When unpacking root blob, got error: %s", err)
   299  			} else {
   300  				diff := cmp.Diff(aDir, gotRoot)
   301  				t.Errorf("  Diff between unpacked roots (-want +got):\n%s", diff)
   302  			}
   303  		} else {
   304  			t.Errorf("  Root digest gotten not present in blobs map")
   305  		}
   306  	}
   307  	if diff := cmp.Diff(wantBlobs, gotBlobs); diff != "" {
   308  		t.Errorf("ComputeMerkleTree(...) gave diff (-want +got) on blobs:\n%s", diff)
   309  	}
   310  	wantCacheCalls := map[string]int{
   311  		"empty":     1,
   312  		"b":         1,
   313  		"b/empty":   1,
   314  		"b/c":       1,
   315  		"b/c/empty": 1,
   316  		"b/c/file":  1,
   317  	}
   318  	if diff := cmp.Diff(wantCacheCalls, cache.calls, cmpopts.EquateEmpty()); diff != "" {
   319  		t.Errorf("ComputeMerkleTree(...) gave diff on file metadata cache access (-want +got) on blobs:\n%s", diff)
   320  	}
   321  	wantStats := &client.TreeStats{
   322  		InputDirectories: 6,
   323  		InputFiles:       1,
   324  		TotalInputBytes:  fileDg.Size + aDirDg.Size + bDirDg.Size + cDirDg.Size,
   325  	}
   326  	if diff := cmp.Diff(wantStats, stats); diff != "" {
   327  		t.Errorf("ComputeMerkleTree(...) gave diff on stats (-want +got) on blobs:\n%s", diff)
   328  	}
   329  }
   330  
   331  func TestComputeMerkleTreeEmptyStructureVirtualInputs(t *testing.T) {
   332  	emptyDirDgPb := digest.Empty.ToProto()
   333  	cDir := &repb.Directory{
   334  		Directories: []*repb.DirectoryNode{{Name: "empty", Digest: emptyDirDgPb}},
   335  	}
   336  	cDirBlob := mustMarshal(cDir)
   337  	cDirDg := digest.NewFromBlob(cDirBlob)
   338  	cDirDgPb := cDirDg.ToProto()
   339  	bDir := &repb.Directory{
   340  		Directories: []*repb.DirectoryNode{
   341  			{Name: "c", Digest: cDirDgPb},
   342  			{Name: "empty", Digest: emptyDirDgPb},
   343  		},
   344  	}
   345  	bDirBlob := mustMarshal(bDir)
   346  	bDirDg := digest.NewFromBlob(bDirBlob)
   347  	bDirDgPb := bDirDg.ToProto()
   348  	aDir := &repb.Directory{
   349  		Directories: []*repb.DirectoryNode{
   350  			{Name: "b", Digest: bDirDgPb},
   351  			{Name: "empty", Digest: emptyDirDgPb},
   352  		},
   353  	}
   354  	aDirBlob := mustMarshal(aDir)
   355  	aDirDg := digest.NewFromBlob(aDirBlob)
   356  
   357  	root := t.TempDir()
   358  	inputSpec := &command.InputSpec{VirtualInputs: []*command.VirtualInput{
   359  		&command.VirtualInput{Path: "b/c/empty", IsEmptyDirectory: true},
   360  		&command.VirtualInput{Path: "b/empty", IsEmptyDirectory: true},
   361  		&command.VirtualInput{Path: "empty", IsEmptyDirectory: true},
   362  	}}
   363  	wantBlobs := map[digest.Digest][]byte{
   364  		aDirDg:       aDirBlob,
   365  		bDirDg:       bDirBlob,
   366  		cDirDg:       cDirBlob,
   367  		digest.Empty: []byte{},
   368  	}
   369  
   370  	gotBlobs := make(map[digest.Digest][]byte)
   371  	cache := newCallCountingMetadataCache(root, t)
   372  
   373  	e, cleanup := fakes.NewTestEnv(t)
   374  	defer cleanup()
   375  
   376  	gotRootDg, inputs, stats, err := e.Client.GrpcClient.ComputeMerkleTree(context.Background(), root, "", "", inputSpec, cache)
   377  	if err != nil {
   378  		t.Errorf("ComputeMerkleTree(...) = gave error %v, want success", err)
   379  	}
   380  	for _, ue := range inputs {
   381  		ch, err := chunker.New(ue, false, int(e.Client.GrpcClient.ChunkMaxSize))
   382  		if err != nil {
   383  			t.Fatalf("chunker.New(ue): failed to create chunker from UploadEntry: %v", err)
   384  		}
   385  		blob, err := ch.FullData()
   386  		if err != nil {
   387  			t.Errorf("chunker %v FullData() returned error %v", ch, err)
   388  		}
   389  		gotBlobs[ue.Digest] = blob
   390  	}
   391  	if diff := cmp.Diff(aDirDg, gotRootDg); diff != "" {
   392  		t.Errorf("ComputeMerkleTree(...) gave diff (-want +got) on root:\n%s", diff)
   393  		if gotRootBlob, ok := gotBlobs[gotRootDg]; ok {
   394  			gotRoot := new(repb.Directory)
   395  			if err := proto.Unmarshal(gotRootBlob, gotRoot); err != nil {
   396  				t.Errorf("  When unpacking root blob, got error: %s", err)
   397  			} else {
   398  				diff := cmp.Diff(aDir, gotRoot)
   399  				t.Errorf("  Diff between unpacked roots (-want +got):\n%s", diff)
   400  			}
   401  		} else {
   402  			t.Errorf("  Root digest gotten not present in blobs map")
   403  		}
   404  	}
   405  	if diff := cmp.Diff(wantBlobs, gotBlobs); diff != "" {
   406  		t.Errorf("ComputeMerkleTree(...) gave diff (-want +got) on blobs:\n%s", diff)
   407  	}
   408  	if len(cache.calls) != 0 {
   409  		t.Errorf("ComputeMerkleTree(...) gave diff on file metadata cache access (want 0, got %v)", cache.calls)
   410  	}
   411  	wantStats := &client.TreeStats{
   412  		InputDirectories: 6,
   413  		TotalInputBytes:  aDirDg.Size + bDirDg.Size + cDirDg.Size,
   414  	}
   415  	if diff := cmp.Diff(wantStats, stats); diff != "" {
   416  		t.Errorf("ComputeMerkleTree(...) gave diff on stats (-want +got) on blobs:\n%s", diff)
   417  	}
   418  }
   419  
   420  func TestComputeMerkleTreeEmptyRoot(t *testing.T) {
   421  	root := t.TempDir()
   422  	inputSpec := &command.InputSpec{
   423  		Inputs: []string{"."},
   424  	}
   425  	cache := newCallCountingMetadataCache(root, t)
   426  
   427  	e, cleanup := fakes.NewTestEnv(t)
   428  	defer cleanup()
   429  
   430  	gotRootDg, inputs, stats, err := e.Client.GrpcClient.ComputeMerkleTree(context.Background(), root, "", "", inputSpec, cache)
   431  	if err != nil {
   432  		t.Errorf("ComputeMerkleTree(...) = gave error %v, want success", err)
   433  	}
   434  	if diff := cmp.Diff(digest.Empty, gotRootDg); diff != "" {
   435  		t.Errorf("ComputeMerkleTree(...) gave diff (-want +got) on root:\n%s", diff)
   436  	}
   437  	if len(inputs) != 1 {
   438  		t.Errorf("ComputeMerkleTree(...) should only include one input:\n%v", inputs)
   439  	}
   440  	wantInput := uploadinfo.EntryFromBlob([]byte{})
   441  	if diff := cmp.Diff(wantInput, inputs[0], cmp.AllowUnexported(uploadinfo.Entry{})); diff != "" {
   442  		t.Errorf("ComputeMerkleTree(...) gave diff on input (-want +got) on blobs:\n%s", diff)
   443  	}
   444  	wantStats := &client.TreeStats{InputDirectories: 1}
   445  	if diff := cmp.Diff(wantStats, stats); diff != "" {
   446  		t.Errorf("ComputeMerkleTree(...) gave diff on stats (-want +got) on blobs:\n%s", diff)
   447  	}
   448  }
   449  
   450  func TestComputeMerkleTree(t *testing.T) {
   451  	foobarSymDir := &repb.Directory{Symlinks: []*repb.SymlinkNode{{Name: "foobarSymDir", Target: "../foobarDir"}}}
   452  	foobarSymDirBlob := mustMarshal(foobarSymDir)
   453  	foobarSymDirDg := digest.NewFromBlob(foobarSymDirBlob)
   454  	foobarSymDirDgPb := foobarSymDirDg.ToProto()
   455  
   456  	tests := []struct {
   457  		desc  string
   458  		input []*inputPath
   459  		spec  *command.InputSpec
   460  		// The expected results are calculated by marshalling rootDir, then expecting the result to be
   461  		// the digest of rootDir plus a map containing rootDir's marshalled blob and all the additional
   462  		// blobs.
   463  		rootDir         *repb.Directory
   464  		additionalBlobs [][]byte
   465  		wantCacheCalls  map[string]int
   466  		// The expected wantStats.TotalInputBytes is calculated by adding the marshalled rootDir size.
   467  		wantStats *client.TreeStats
   468  		treeOpts  *client.TreeSymlinkOpts
   469  	}{
   470  		{
   471  			desc:            "Empty directory",
   472  			input:           nil,
   473  			spec:            &command.InputSpec{},
   474  			rootDir:         &repb.Directory{},
   475  			additionalBlobs: nil,
   476  			wantStats: &client.TreeStats{
   477  				InputDirectories: 1,
   478  			},
   479  		},
   480  		{
   481  			desc: "Files at root",
   482  			input: []*inputPath{
   483  				{path: "foo", fileContents: fooBlob, isExecutable: true},
   484  				{path: "bar", fileContents: barBlob},
   485  			},
   486  			spec: &command.InputSpec{
   487  				Inputs:              []string{"foo", "bar"},
   488  				InputNodeProperties: map[string]*cpb.NodeProperties{"foo": fooProperties},
   489  			},
   490  			rootDir:         foobarDir,
   491  			additionalBlobs: [][]byte{fooBlob, barBlob},
   492  			wantCacheCalls: map[string]int{
   493  				"foo": 1,
   494  				"bar": 1,
   495  			},
   496  			wantStats: &client.TreeStats{
   497  				InputDirectories: 1,
   498  				InputFiles:       2,
   499  				TotalInputBytes:  fooDg.Size + barDg.Size,
   500  			},
   501  		},
   502  		{
   503  			desc: "File below root",
   504  			input: []*inputPath{
   505  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   506  				{path: "barDir/bar", fileContents: barBlob},
   507  			},
   508  			spec: &command.InputSpec{
   509  				Inputs:              []string{"fooDir", "barDir"},
   510  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   511  			},
   512  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
   513  				{Name: "barDir", Digest: barDirDgPb},
   514  				{Name: "fooDir", Digest: fooDirDgPb},
   515  			}},
   516  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
   517  			wantCacheCalls: map[string]int{
   518  				"fooDir":     1,
   519  				"fooDir/foo": 1,
   520  				"barDir":     1,
   521  				"barDir/bar": 1,
   522  			},
   523  			wantStats: &client.TreeStats{
   524  				InputDirectories: 3,
   525  				InputFiles:       2,
   526  				TotalInputBytes:  fooDg.Size + barDg.Size + fooDirDg.Size + barDirDg.Size,
   527  			},
   528  		},
   529  		{
   530  			desc: "Normalizing input paths",
   531  			input: []*inputPath{
   532  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   533  				{path: "fooDir/otherDir/foo", fileContents: fooBlob, isExecutable: true},
   534  				{path: "barDir/bar", fileContents: barBlob},
   535  			},
   536  			spec: &command.InputSpec{
   537  				Inputs:              []string{"fooDir/../fooDir/foo", "//barDir//bar"},
   538  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   539  			},
   540  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
   541  				{Name: "barDir", Digest: barDirDgPb},
   542  				{Name: "fooDir", Digest: fooDirDgPb},
   543  			}},
   544  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
   545  			wantCacheCalls: map[string]int{
   546  				"fooDir/foo": 1,
   547  				"barDir/bar": 1,
   548  			},
   549  			wantStats: &client.TreeStats{
   550  				InputDirectories: 3,
   551  				InputFiles:       2,
   552  				TotalInputBytes:  fooDg.Size + barDg.Size + fooDirDg.Size + barDirDg.Size,
   553  			},
   554  		},
   555  		{
   556  			desc: "File absolute symlink",
   557  			input: []*inputPath{
   558  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   559  				{path: "foo", isSymlink: true, isAbsolute: true, relSymlinkTarget: "fooDir/foo"},
   560  			},
   561  			spec: &command.InputSpec{
   562  				Inputs:              []string{"fooDir", "foo"},
   563  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   564  			},
   565  			rootDir: &repb.Directory{
   566  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
   567  				Files:       []*repb.FileNode{{Name: "foo", Digest: fooDgPb, IsExecutable: true}},
   568  			},
   569  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
   570  			wantCacheCalls: map[string]int{
   571  				"fooDir":     1,
   572  				"fooDir/foo": 1,
   573  				"foo":        1,
   574  			},
   575  			wantStats: &client.TreeStats{
   576  				InputDirectories: 2,
   577  				InputFiles:       2,
   578  				TotalInputBytes:  2*fooDg.Size + fooDirDg.Size,
   579  			},
   580  		},
   581  		{
   582  			desc: "File relative symlink",
   583  			input: []*inputPath{
   584  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   585  				{path: "foo", isSymlink: true, relSymlinkTarget: "fooDir/foo"},
   586  			},
   587  			spec: &command.InputSpec{
   588  				Inputs:              []string{"fooDir", "foo"},
   589  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   590  			},
   591  			rootDir: &repb.Directory{
   592  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
   593  				Files:       []*repb.FileNode{{Name: "foo", Digest: fooDgPb, IsExecutable: true}},
   594  			},
   595  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
   596  			wantCacheCalls: map[string]int{
   597  				"fooDir":     1,
   598  				"fooDir/foo": 1,
   599  				"foo":        1,
   600  			},
   601  			wantStats: &client.TreeStats{
   602  				InputDirectories: 2,
   603  				InputFiles:       2,
   604  				TotalInputBytes:  2*fooDg.Size + fooDirDg.Size,
   605  			},
   606  		},
   607  		{
   608  			desc: "File relative symlink (preserved)",
   609  			input: []*inputPath{
   610  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   611  				{path: "fooSym", isSymlink: true, relSymlinkTarget: "fooDir/foo"},
   612  			},
   613  			spec: &command.InputSpec{
   614  				// The symlink target will be traversed recursively.
   615  				Inputs:              []string{"fooSym"},
   616  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   617  			},
   618  			rootDir: &repb.Directory{
   619  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
   620  				Symlinks:    []*repb.SymlinkNode{{Name: "fooSym", Target: "fooDir/foo"}},
   621  			},
   622  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
   623  			wantCacheCalls: map[string]int{
   624  				"fooDir":     1,
   625  				"fooDir/foo": 1,
   626  				"fooSym":     1,
   627  			},
   628  			wantStats: &client.TreeStats{
   629  				InputDirectories: 2,
   630  				InputFiles:       1,
   631  				InputSymlinks:    1,
   632  				TotalInputBytes:  fooDg.Size + fooDirDg.Size,
   633  			},
   634  			treeOpts: &client.TreeSymlinkOpts{
   635  				Preserved:     true,
   636  				FollowsTarget: true,
   637  			},
   638  		},
   639  		{
   640  			desc: "File relative symlink (preserved based on InputSpec)",
   641  			input: []*inputPath{
   642  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   643  				{path: "fooSym", isSymlink: true, relSymlinkTarget: "fooDir/foo"},
   644  			},
   645  			spec: &command.InputSpec{
   646  				// The symlink target will be traversed recursively.
   647  				Inputs:              []string{"fooSym"},
   648  				SymlinkBehavior:     command.PreserveSymlink,
   649  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   650  			},
   651  			rootDir: &repb.Directory{
   652  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
   653  				Symlinks:    []*repb.SymlinkNode{{Name: "fooSym", Target: "fooDir/foo"}},
   654  			},
   655  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
   656  			wantCacheCalls: map[string]int{
   657  				"fooDir":     1,
   658  				"fooDir/foo": 1,
   659  				"fooSym":     1,
   660  			},
   661  			wantStats: &client.TreeStats{
   662  				InputDirectories: 2,
   663  				InputFiles:       1,
   664  				InputSymlinks:    1,
   665  				TotalInputBytes:  fooDg.Size + fooDirDg.Size,
   666  			},
   667  			treeOpts: &client.TreeSymlinkOpts{
   668  				FollowsTarget: true,
   669  			},
   670  		},
   671  		{
   672  			desc: "File relative symlink (preserved but not followed)",
   673  			input: []*inputPath{
   674  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   675  				{path: "fooSym", isSymlink: true, relSymlinkTarget: "fooDir/foo"},
   676  			},
   677  			spec: &command.InputSpec{
   678  				Inputs:              []string{"fooSym"},
   679  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   680  			},
   681  			rootDir: &repb.Directory{
   682  				Directories: nil,
   683  				Symlinks:    []*repb.SymlinkNode{{Name: "fooSym", Target: "fooDir/foo"}},
   684  			},
   685  			wantCacheCalls: map[string]int{
   686  				"fooSym": 1,
   687  			},
   688  			wantStats: &client.TreeStats{
   689  				InputDirectories: 1,
   690  				InputFiles:       0,
   691  				InputSymlinks:    1,
   692  				TotalInputBytes:  0,
   693  			},
   694  			treeOpts: &client.TreeSymlinkOpts{
   695  				Preserved: true,
   696  			},
   697  		},
   698  		{
   699  			desc: "File absolute symlink (preserved)",
   700  			input: []*inputPath{
   701  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   702  				{path: "fooSym", isSymlink: true, isAbsolute: true, relSymlinkTarget: "fooDir/foo"},
   703  			},
   704  			spec: &command.InputSpec{
   705  				// The symlink target will be traversed recursively.
   706  				Inputs:              []string{"fooSym"},
   707  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   708  			},
   709  			rootDir: &repb.Directory{
   710  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
   711  				Symlinks:    []*repb.SymlinkNode{{Name: "fooSym", Target: "fooDir/foo"}},
   712  			},
   713  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
   714  			wantCacheCalls: map[string]int{
   715  				"fooDir":     1,
   716  				"fooDir/foo": 1,
   717  				"fooSym":     1,
   718  			},
   719  			wantStats: &client.TreeStats{
   720  				InputDirectories: 2,
   721  				InputFiles:       1,
   722  				InputSymlinks:    1,
   723  				TotalInputBytes:  fooDg.Size + fooDirDg.Size,
   724  			},
   725  			treeOpts: &client.TreeSymlinkOpts{
   726  				Preserved:     true,
   727  				FollowsTarget: true,
   728  			},
   729  		},
   730  		{
   731  			desc: "File invalid symlink",
   732  			input: []*inputPath{
   733  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   734  				{path: "foo", isSymlink: true, relSymlinkTarget: "fooDir/foo"},
   735  				{path: "bar", isSymlink: true, relSymlinkTarget: "fooDir/bar"},
   736  			},
   737  			spec: &command.InputSpec{
   738  				Inputs:              []string{"fooDir", "foo"},
   739  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   740  			},
   741  			rootDir: &repb.Directory{
   742  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
   743  				Files:       []*repb.FileNode{{Name: "foo", Digest: fooDgPb, IsExecutable: true}},
   744  			},
   745  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
   746  			wantCacheCalls: map[string]int{
   747  				"fooDir":     1,
   748  				"fooDir/foo": 1,
   749  				"foo":        1,
   750  			},
   751  			wantStats: &client.TreeStats{
   752  				InputDirectories: 2,
   753  				InputFiles:       2,
   754  				TotalInputBytes:  2*fooDg.Size + fooDirDg.Size,
   755  			},
   756  		},
   757  		{
   758  			desc: "Dangling symlink is preserved",
   759  			input: []*inputPath{
   760  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   761  				{path: "invalidSym", isSymlink: true, relSymlinkTarget: "fooDir/invalid"},
   762  			},
   763  			spec: &command.InputSpec{
   764  				Inputs:              []string{"fooDir", "invalidSym"},
   765  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   766  			},
   767  			rootDir: &repb.Directory{
   768  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
   769  				Files:       nil,
   770  				Symlinks:    []*repb.SymlinkNode{{Name: "invalidSym", Target: "fooDir/invalid"}},
   771  			},
   772  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
   773  			wantCacheCalls: map[string]int{
   774  				"fooDir":     2,
   775  				"fooDir/foo": 1,
   776  				"invalidSym": 1,
   777  			},
   778  			wantStats: &client.TreeStats{
   779  				InputDirectories: 2,
   780  				InputFiles:       1,
   781  				InputSymlinks:    1,
   782  				TotalInputBytes:  fooDg.Size + fooDirDg.Size,
   783  			},
   784  			treeOpts: &client.TreeSymlinkOpts{
   785  				Preserved: true,
   786  			},
   787  		},
   788  		{
   789  			desc: "Directory absolute symlink",
   790  			input: []*inputPath{
   791  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   792  				{path: "barDirTarget/bar", fileContents: barBlob},
   793  				{path: "barDir", isSymlink: true, isAbsolute: true, relSymlinkTarget: "barDirTarget"},
   794  			},
   795  			spec: &command.InputSpec{
   796  				Inputs:              []string{"fooDir", "barDir"},
   797  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   798  			},
   799  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
   800  				{Name: "barDir", Digest: barDirDgPb},
   801  				{Name: "fooDir", Digest: fooDirDgPb},
   802  			}},
   803  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
   804  			wantCacheCalls: map[string]int{
   805  				"fooDir":     1,
   806  				"fooDir/foo": 1,
   807  				"barDir":     1,
   808  				"barDir/bar": 1,
   809  			},
   810  			wantStats: &client.TreeStats{
   811  				InputDirectories: 3,
   812  				InputFiles:       2,
   813  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
   814  			},
   815  		},
   816  		{
   817  			desc: "Directory relative symlink",
   818  			input: []*inputPath{
   819  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
   820  				{path: "barDirTarget/bar", fileContents: barBlob},
   821  				{path: "barDir", isSymlink: true, relSymlinkTarget: "barDirTarget"},
   822  			},
   823  			spec: &command.InputSpec{
   824  				Inputs:              []string{"fooDir", "barDir"},
   825  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
   826  			},
   827  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
   828  				{Name: "barDir", Digest: barDirDgPb},
   829  				{Name: "fooDir", Digest: fooDirDgPb},
   830  			}},
   831  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
   832  			wantCacheCalls: map[string]int{
   833  				"fooDir":     1,
   834  				"fooDir/foo": 1,
   835  				"barDir":     1,
   836  				"barDir/bar": 1,
   837  			},
   838  			wantStats: &client.TreeStats{
   839  				InputDirectories: 3,
   840  				InputFiles:       2,
   841  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
   842  			},
   843  		},
   844  		{
   845  			desc: "Directory absolute symlink (preserved)",
   846  			input: []*inputPath{
   847  				{path: "foobarDir/foo", fileContents: fooBlob, isExecutable: true},
   848  				{path: "foobarDir/bar", fileContents: barBlob},
   849  				{path: "base/foobarSymDir", isSymlink: true, isAbsolute: true, relSymlinkTarget: "foobarDir"},
   850  			},
   851  			spec: &command.InputSpec{
   852  				// The symlink target will be traversed recursively.
   853  				Inputs:              []string{"base/foobarSymDir"},
   854  				InputNodeProperties: map[string]*cpb.NodeProperties{"foobarDir/foo": fooProperties},
   855  			},
   856  			rootDir: &repb.Directory{
   857  				Directories: []*repb.DirectoryNode{{Name: "base", Digest: foobarSymDirDgPb}, {Name: "foobarDir", Digest: foobarDirDgPb}},
   858  			},
   859  			additionalBlobs: [][]byte{fooBlob, barBlob, foobarDirBlob, foobarSymDirBlob},
   860  			wantCacheCalls: map[string]int{
   861  				"base":              1,
   862  				"foobarDir":         3,
   863  				"foobarDir/foo":     1,
   864  				"foobarDir/bar":     1,
   865  				"base/foobarSymDir": 1,
   866  			},
   867  			wantStats: &client.TreeStats{
   868  				InputDirectories: 3,
   869  				InputFiles:       2,
   870  				InputSymlinks:    1,
   871  				TotalInputBytes:  fooDg.Size + barDg.Size + foobarDirDg.Size + foobarSymDirDg.Size,
   872  			},
   873  			treeOpts: &client.TreeSymlinkOpts{
   874  				Preserved:     true,
   875  				FollowsTarget: true,
   876  			},
   877  		},
   878  		{
   879  			desc: "Directory relative symlink (preserved)",
   880  			input: []*inputPath{
   881  				{path: "foobarDir/foo", fileContents: fooBlob, isExecutable: true},
   882  				{path: "foobarDir/bar", fileContents: barBlob},
   883  				{path: "base/foobarSymDir", isSymlink: true, relSymlinkTarget: "../foobarDir"},
   884  			},
   885  			spec: &command.InputSpec{
   886  				// The symlink target will be traversed recursively.
   887  				Inputs:              []string{"base/foobarSymDir"},
   888  				InputNodeProperties: map[string]*cpb.NodeProperties{"foobarDir/foo": fooProperties},
   889  			},
   890  			rootDir: &repb.Directory{
   891  				Directories: []*repb.DirectoryNode{{Name: "base", Digest: foobarSymDirDgPb}, {Name: "foobarDir", Digest: foobarDirDgPb}},
   892  			},
   893  			additionalBlobs: [][]byte{fooBlob, barBlob, foobarDirBlob, foobarSymDirBlob},
   894  			wantCacheCalls: map[string]int{
   895  				"base":              1,
   896  				"foobarDir":         3,
   897  				"foobarDir/foo":     1,
   898  				"foobarDir/bar":     1,
   899  				"base/foobarSymDir": 1,
   900  			},
   901  			wantStats: &client.TreeStats{
   902  				InputDirectories: 3,
   903  				InputFiles:       2,
   904  				InputSymlinks:    1,
   905  				TotalInputBytes:  fooDg.Size + barDg.Size + foobarDirDg.Size + foobarSymDirDg.Size,
   906  			},
   907  			treeOpts: &client.TreeSymlinkOpts{
   908  				Preserved:     true,
   909  				FollowsTarget: true,
   910  			},
   911  		},
   912  		{
   913  			desc: "Directory relative symlink (materialized from outside exec root)",
   914  			input: []*inputPath{
   915  				{path: "../foo", fileContents: fooBlob, isExecutable: true},
   916  				{path: "fooSym", isSymlink: true, relSymlinkTarget: "../foo"},
   917  				{path: "barDir/bar", fileContents: barBlob},
   918  				{path: "barSym", isSymlink: true, relSymlinkTarget: "barDir/bar"},
   919  			},
   920  			spec: &command.InputSpec{
   921  				Inputs: []string{"fooSym", "barSym"},
   922  			},
   923  			rootDir: &repb.Directory{
   924  				Directories: []*repb.DirectoryNode{
   925  					{Name: "barDir", Digest: barDirDgPb},
   926  				},
   927  				Files: []*repb.FileNode{
   928  					{Name: "fooSym", Digest: fooDgPb, IsExecutable: true},
   929  				},
   930  				Symlinks: []*repb.SymlinkNode{
   931  					{Name: "barSym", Target: "barDir/bar"},
   932  				},
   933  			},
   934  			additionalBlobs: [][]byte{fooBlob, barDirBlob, barBlob},
   935  			wantCacheCalls: map[string]int{
   936  				"barDir":     1,
   937  				"fooSym":     1,
   938  				"barSym":     1,
   939  				"barDir/bar": 1,
   940  			},
   941  			wantStats: &client.TreeStats{
   942  				InputDirectories: 2,
   943  				InputFiles:       2,
   944  				InputSymlinks:    1,
   945  				TotalInputBytes:  fooDg.Size + barDirDg.Size + barDg.Size,
   946  			},
   947  			treeOpts: &client.TreeSymlinkOpts{
   948  				Preserved:                  true,
   949  				FollowsTarget:              true,
   950  				MaterializeOutsideExecRoot: true,
   951  			},
   952  		},
   953  		{
   954  			desc: "Intermediate directory relative symlink (preserved)",
   955  			input: []*inputPath{
   956  				{path: "foobarDir/foo", fileContents: fooBlob, isExecutable: true},
   957  				{path: "foobarDir/bar", fileContents: barBlob},
   958  				{path: "foobarSymDir", isSymlink: true, relSymlinkTarget: "foobarDir"},
   959  			},
   960  			spec: &command.InputSpec{
   961  				Inputs:              []string{"foobarSymDir/foo", "foobarSymDir/bar"},
   962  				InputNodeProperties: map[string]*cpb.NodeProperties{"foobarDir/foo": fooProperties},
   963  			},
   964  			rootDir: &repb.Directory{
   965  				// foobarSymDir should not be a directory.
   966  				Directories: []*repb.DirectoryNode{{Name: "foobarDir", Digest: foobarDirDgPb}},
   967  				Symlinks:    []*repb.SymlinkNode{{Name: "foobarSymDir", Target: "foobarDir"}},
   968  			},
   969  			additionalBlobs: [][]byte{fooBlob, barBlob, foobarDirBlob},
   970  			wantCacheCalls: map[string]int{
   971  				"foobarDir/foo": 1, // 1 via the symlink
   972  				"foobarDir/bar": 1,
   973  				"foobarSymDir":  3, // 1 as added symlink ancestor, 2 as input ancestor
   974  				// foobarDir should not have been followed as a target of the symlink since it was not an explicit input.
   975  			},
   976  			wantStats: &client.TreeStats{
   977  				InputDirectories: 2, // Root and foobarDir
   978  				InputFiles:       2,
   979  				InputSymlinks:    1,
   980  				TotalInputBytes:  fooDg.Size + barDg.Size + foobarDirDg.Size,
   981  			},
   982  			treeOpts: &client.TreeSymlinkOpts{
   983  				Preserved:     true,
   984  				FollowsTarget: true,
   985  			},
   986  		},
   987  		{
   988  			desc: "Intermediate virtual directory relative symlink (preserved)",
   989  			input: []*inputPath{
   990  				{path: "foobarDir/foo", fileContents: fooBlob, isExecutable: true},
   991  				{path: "foobarDir/bar", fileContents: barBlob},
   992  				{path: "foobarSymDir", isSymlink: true, relSymlinkTarget: "foobarDir"},
   993  			},
   994  			spec: &command.InputSpec{
   995  				VirtualInputs: []*command.VirtualInput{
   996  					{Path: "foobarSymDir/foo", Contents: fooBlob, IsExecutable: true},
   997  					{Path: "foobarSymDir/bar", Contents: barBlob},
   998  				},
   999  				InputNodeProperties: map[string]*cpb.NodeProperties{"foobarDir/foo": fooProperties},
  1000  			},
  1001  			rootDir: &repb.Directory{
  1002  				// foobarSymDir should not be a directory.
  1003  				Directories: []*repb.DirectoryNode{{Name: "foobarDir", Digest: foobarDirDgPb}},
  1004  				Symlinks:    []*repb.SymlinkNode{{Name: "foobarSymDir", Target: "foobarDir"}},
  1005  			},
  1006  			additionalBlobs: [][]byte{fooBlob, barBlob, foobarDirBlob},
  1007  			wantCacheCalls: map[string]int{
  1008  				"foobarSymDir": 3, // 1 as added symlink ancestor, 2 as input ancestor
  1009  				// foobarDir should not have been followed as a target of the symlink since it was not an explicit input.
  1010  			},
  1011  			wantStats: &client.TreeStats{
  1012  				InputDirectories: 2, // Root and foobarDir
  1013  				InputFiles:       2,
  1014  				InputSymlinks:    1,
  1015  				TotalInputBytes:  fooDg.Size + barDg.Size + foobarDirDg.Size,
  1016  			},
  1017  			treeOpts: &client.TreeSymlinkOpts{
  1018  				Preserved:     true,
  1019  				FollowsTarget: true,
  1020  			},
  1021  		},
  1022  		{
  1023  			desc: "Intermediate directory relative symlink and input (preserved)",
  1024  			input: []*inputPath{
  1025  				{path: "foobarDir/foo", fileContents: fooBlob, isExecutable: true},
  1026  				{path: "foobarDir/bar", fileContents: barBlob},
  1027  				{path: "foobarSymDir", isSymlink: true, relSymlinkTarget: "foobarDir"},
  1028  			},
  1029  			spec: &command.InputSpec{
  1030  				// The directory symlink is also an input.
  1031  				// Must appear last to cover the corner case where it is seen as an ancestor before as an input.
  1032  				Inputs:              []string{"foobarSymDir/foo", "foobarSymDir/bar", "foobarSymDir"},
  1033  				InputNodeProperties: map[string]*cpb.NodeProperties{"foobarDir/foo": fooProperties},
  1034  			},
  1035  			rootDir: &repb.Directory{
  1036  				// foobarSymDir should not be a directory.
  1037  				Directories: []*repb.DirectoryNode{{Name: "foobarDir", Digest: foobarDirDgPb}},
  1038  				Symlinks:    []*repb.SymlinkNode{{Name: "foobarSymDir", Target: "foobarDir"}},
  1039  			},
  1040  			additionalBlobs: [][]byte{fooBlob, barBlob, foobarDirBlob},
  1041  			wantCacheCalls: map[string]int{
  1042  				"foobarDir/foo": 2, // 1 via the dir and 1 via the symlink
  1043  				"foobarDir/bar": 2,
  1044  				"foobarDir":     3, // 1 as input, 2 as input ancestor
  1045  				"foobarSymDir":  4, // 1 as input, 1 as added symlink ancestor, 2 as input ancestor
  1046  			},
  1047  			wantStats: &client.TreeStats{
  1048  				InputDirectories: 2, // Root and foobarDir
  1049  				InputFiles:       2,
  1050  				InputSymlinks:    1,
  1051  				TotalInputBytes:  fooDg.Size + barDg.Size + foobarDirDg.Size,
  1052  			},
  1053  			treeOpts: &client.TreeSymlinkOpts{
  1054  				Preserved:     true,
  1055  				FollowsTarget: true,
  1056  			},
  1057  		},
  1058  		{
  1059  			desc: "Intermediate directory relative symlink (preserved, materialize)",
  1060  			input: []*inputPath{
  1061  				{path: "../foobarDirOrig/foo", fileContents: fooBlob, isExecutable: true},
  1062  				{path: "../foobarDirOrig/bar", fileContents: barBlob},
  1063  				{path: "foobarDir", isSymlink: true, relSymlinkTarget: "../foobarDirOrig"},
  1064  			},
  1065  			spec: &command.InputSpec{
  1066  				Inputs:              []string{"foobarDir", "foobarDir/foo", "foobarDir/bar"},
  1067  				InputNodeProperties: map[string]*cpb.NodeProperties{"foobarDir/foo": fooProperties},
  1068  			},
  1069  			rootDir: &repb.Directory{
  1070  				// foobarDir should be materialized as a directory.
  1071  				Directories: []*repb.DirectoryNode{{Name: "foobarDir", Digest: foobarDirDgPb}},
  1072  			},
  1073  			additionalBlobs: [][]byte{fooBlob, barBlob, foobarDirBlob},
  1074  			wantCacheCalls: map[string]int{
  1075  				"foobarDir/foo": 2,
  1076  				"foobarDir/bar": 2,
  1077  				"foobarDir":     5, // 2 as input ancestor, 1 as input, 2 as nested input ancestor
  1078  			},
  1079  			wantStats: &client.TreeStats{
  1080  				InputDirectories: 2, // Root and foobarDir
  1081  				InputFiles:       2,
  1082  				TotalInputBytes:  fooDg.Size + barDg.Size + foobarDirDg.Size,
  1083  			},
  1084  			treeOpts: &client.TreeSymlinkOpts{
  1085  				Preserved:                  true,
  1086  				FollowsTarget:              true,
  1087  				MaterializeOutsideExecRoot: true,
  1088  			},
  1089  		},
  1090  		{
  1091  			desc: "Intermediate directory absolute symlink and input (preserved)",
  1092  			input: []*inputPath{
  1093  				{path: "foobarDir/foo", fileContents: fooBlob, isExecutable: true},
  1094  				{path: "foobarDir/bar", fileContents: barBlob},
  1095  				{path: "foobarSymDir", isSymlink: true, isAbsolute: true, relSymlinkTarget: "foobarDir"},
  1096  			},
  1097  			spec: &command.InputSpec{
  1098  				Inputs:              []string{"foobarSymDir", "foobarSymDir/foo", "foobarSymDir/bar"},
  1099  				InputNodeProperties: map[string]*cpb.NodeProperties{"foobarDir/foo": fooProperties},
  1100  			},
  1101  			rootDir: &repb.Directory{
  1102  				// foobarSymDir should not be a directory.
  1103  				Directories: []*repb.DirectoryNode{{Name: "foobarDir", Digest: foobarDirDgPb}},
  1104  				Symlinks:    []*repb.SymlinkNode{{Name: "foobarSymDir", Target: "foobarDir"}},
  1105  			},
  1106  			additionalBlobs: [][]byte{fooBlob, barBlob, foobarDirBlob},
  1107  			wantCacheCalls: map[string]int{
  1108  				"foobarDir/foo": 2, // 1 via the dir and 1 via the symlink
  1109  				"foobarDir/bar": 2,
  1110  				"foobarDir":     3, // 1 as target of the symlink, 2 as input ancestor
  1111  				"foobarSymDir":  3, // 1 as input, 2 as input ancestor
  1112  			},
  1113  			wantStats: &client.TreeStats{
  1114  				InputDirectories: 2, // Root and foobarDir
  1115  				InputFiles:       2,
  1116  				InputSymlinks:    1,
  1117  				TotalInputBytes:  fooDg.Size + barDg.Size + foobarDirDg.Size,
  1118  			},
  1119  			treeOpts: &client.TreeSymlinkOpts{
  1120  				Preserved:     true,
  1121  				FollowsTarget: true,
  1122  			},
  1123  		},
  1124  		{
  1125  			desc: "De-duplicating files",
  1126  			input: []*inputPath{
  1127  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1128  				{path: "foobarDir/foo", fileContents: fooBlob, isExecutable: true},
  1129  				{path: "foobarDir/bar", fileContents: barBlob},
  1130  			},
  1131  			spec: &command.InputSpec{
  1132  				Inputs:              []string{"fooDir", "foobarDir"},
  1133  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties, "foobarDir/foo": fooProperties},
  1134  			},
  1135  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1136  				{Name: "fooDir", Digest: fooDirDgPb},
  1137  				{Name: "foobarDir", Digest: foobarDirDgPb},
  1138  			}},
  1139  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, foobarDirBlob},
  1140  			wantCacheCalls: map[string]int{
  1141  				"fooDir":        1,
  1142  				"fooDir/foo":    1,
  1143  				"foobarDir":     1,
  1144  				"foobarDir/foo": 1,
  1145  				"foobarDir/bar": 1,
  1146  			},
  1147  			wantStats: &client.TreeStats{
  1148  				InputDirectories: 3,
  1149  				InputFiles:       3,
  1150  				TotalInputBytes:  2*fooDg.Size + fooDirDg.Size + barDg.Size + foobarDirDg.Size,
  1151  			},
  1152  		},
  1153  		{
  1154  			desc: "De-duplicating directories",
  1155  			input: []*inputPath{
  1156  				{path: "fooDir1/foo", fileContents: fooBlob, isExecutable: true},
  1157  				{path: "fooDir2/foo", fileContents: fooBlob, isExecutable: true},
  1158  			},
  1159  			spec: &command.InputSpec{
  1160  				Inputs:              []string{"fooDir1", "fooDir2"},
  1161  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir1/foo": fooProperties, "fooDir2/foo": fooProperties},
  1162  			},
  1163  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1164  				{Name: "fooDir1", Digest: fooDirDgPb},
  1165  				{Name: "fooDir2", Digest: fooDirDgPb},
  1166  			}},
  1167  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
  1168  			wantCacheCalls: map[string]int{
  1169  				"fooDir1":     1,
  1170  				"fooDir1/foo": 1,
  1171  				"fooDir2":     1,
  1172  				"fooDir2/foo": 1,
  1173  			},
  1174  			wantStats: &client.TreeStats{
  1175  				InputDirectories: 3,
  1176  				InputFiles:       2,
  1177  				TotalInputBytes:  2*fooDg.Size + 2*fooDirDg.Size,
  1178  			},
  1179  		},
  1180  		{
  1181  			desc: "De-duplicating files with directories",
  1182  			input: []*inputPath{
  1183  				{path: "fooDirBlob", fileContents: fooDirBlob, isExecutable: true},
  1184  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1185  			},
  1186  			spec: &command.InputSpec{
  1187  				Inputs:              []string{"fooDirBlob", "fooDir"},
  1188  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1189  			},
  1190  			rootDir: &repb.Directory{
  1191  				Directories: []*repb.DirectoryNode{{Name: "fooDir", Digest: fooDirDgPb}},
  1192  				Files:       []*repb.FileNode{{Name: "fooDirBlob", Digest: fooDirDgPb, IsExecutable: true}},
  1193  			},
  1194  			additionalBlobs: [][]byte{fooBlob, fooDirBlob},
  1195  			wantCacheCalls: map[string]int{
  1196  				"fooDirBlob": 1,
  1197  				"fooDir":     1,
  1198  				"fooDir/foo": 1,
  1199  			},
  1200  			wantStats: &client.TreeStats{
  1201  				InputDirectories: 2,
  1202  				InputFiles:       2,
  1203  				TotalInputBytes:  fooDg.Size + 2*fooDirDg.Size,
  1204  			},
  1205  		},
  1206  		{
  1207  			desc: "File exclusions",
  1208  			input: []*inputPath{
  1209  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1210  				{path: "fooDir/foo.txt", fileContents: fooBlob, isExecutable: true},
  1211  				{path: "barDir/bar", fileContents: barBlob},
  1212  				{path: "barDir/bar.txt", fileContents: barBlob},
  1213  			},
  1214  			spec: &command.InputSpec{
  1215  				Inputs: []string{"fooDir", "barDir"},
  1216  				InputExclusions: []*command.InputExclusion{
  1217  					&command.InputExclusion{Regex: `txt$`, Type: command.FileInputType},
  1218  				},
  1219  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1220  			},
  1221  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1222  				{Name: "barDir", Digest: barDirDgPb},
  1223  				{Name: "fooDir", Digest: fooDirDgPb},
  1224  			}},
  1225  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
  1226  			wantCacheCalls: map[string]int{
  1227  				"fooDir":         1,
  1228  				"fooDir/foo":     1,
  1229  				"fooDir/foo.txt": 1,
  1230  				"barDir":         1,
  1231  				"barDir/bar":     1,
  1232  				"barDir/bar.txt": 1,
  1233  			},
  1234  			wantStats: &client.TreeStats{
  1235  				InputDirectories: 3,
  1236  				InputFiles:       2,
  1237  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
  1238  			},
  1239  		},
  1240  		{
  1241  			desc: "Directory exclusions",
  1242  			input: []*inputPath{
  1243  				{path: "foo", fileContents: fooBlob, isExecutable: true},
  1244  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1245  				{path: "barDir/bar", fileContents: barBlob},
  1246  			},
  1247  			spec: &command.InputSpec{
  1248  				Inputs: []string{"foo", "fooDir", "barDir"},
  1249  				InputExclusions: []*command.InputExclusion{
  1250  					&command.InputExclusion{Regex: `foo`, Type: command.DirectoryInputType},
  1251  				},
  1252  			},
  1253  			rootDir: &repb.Directory{
  1254  				Directories: []*repb.DirectoryNode{{Name: "barDir", Digest: barDirDgPb}},
  1255  				Files:       []*repb.FileNode{{Name: "foo", Digest: fooDgPb, IsExecutable: true}},
  1256  			},
  1257  			additionalBlobs: [][]byte{fooBlob, barBlob, barDirBlob},
  1258  			wantCacheCalls: map[string]int{
  1259  				"foo":        1,
  1260  				"fooDir":     1,
  1261  				"barDir":     1,
  1262  				"barDir/bar": 1,
  1263  			},
  1264  			wantStats: &client.TreeStats{
  1265  				InputDirectories: 2,
  1266  				InputFiles:       2,
  1267  				TotalInputBytes:  fooDg.Size + barDg.Size + barDirDg.Size,
  1268  			},
  1269  		},
  1270  		{
  1271  			desc: "All type exclusions",
  1272  			input: []*inputPath{
  1273  				{path: "foo", fileContents: fooBlob, isExecutable: true},
  1274  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1275  				{path: "barDir/bar", fileContents: barBlob},
  1276  			},
  1277  			spec: &command.InputSpec{
  1278  				Inputs: []string{"foo", "fooDir", "barDir"},
  1279  				InputExclusions: []*command.InputExclusion{
  1280  					&command.InputExclusion{Regex: `foo`, Type: command.UnspecifiedInputType},
  1281  				},
  1282  			},
  1283  			rootDir: &repb.Directory{
  1284  				Directories: []*repb.DirectoryNode{{Name: "barDir", Digest: barDirDgPb}},
  1285  			},
  1286  			additionalBlobs: [][]byte{barBlob, barDirBlob},
  1287  			wantCacheCalls: map[string]int{
  1288  				"foo":        1,
  1289  				"fooDir":     1,
  1290  				"barDir":     1,
  1291  				"barDir/bar": 1,
  1292  			},
  1293  			wantStats: &client.TreeStats{
  1294  				InputDirectories: 2,
  1295  				InputFiles:       1,
  1296  				TotalInputBytes:  barDg.Size + barDirDg.Size,
  1297  			},
  1298  		},
  1299  		{
  1300  			desc: "Virtual inputs",
  1301  			spec: &command.InputSpec{
  1302  				VirtualInputs: []*command.VirtualInput{
  1303  					&command.VirtualInput{Path: "fooDir/foo", Contents: fooBlob, IsExecutable: true},
  1304  					&command.VirtualInput{Path: "barDir/bar", Contents: barBlob},
  1305  				},
  1306  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1307  			},
  1308  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1309  				{Name: "barDir", Digest: barDirDgPb},
  1310  				{Name: "fooDir", Digest: fooDirDgPb},
  1311  			}},
  1312  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
  1313  			wantStats: &client.TreeStats{
  1314  				InputDirectories: 3,
  1315  				InputFiles:       2,
  1316  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
  1317  			},
  1318  		},
  1319  		{
  1320  			desc: "Physical inputs supercede virtual inputs",
  1321  			input: []*inputPath{
  1322  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1323  				{path: "barDir/bar", fileContents: barBlob},
  1324  			},
  1325  			spec: &command.InputSpec{
  1326  				Inputs: []string{"fooDir", "barDir"},
  1327  				VirtualInputs: []*command.VirtualInput{
  1328  					&command.VirtualInput{Path: "fooDir/foo", Contents: barBlob, IsExecutable: true},
  1329  					&command.VirtualInput{Path: "barDir/bar", IsEmptyDirectory: true},
  1330  				},
  1331  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1332  			},
  1333  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1334  				{Name: "barDir", Digest: barDirDgPb},
  1335  				{Name: "fooDir", Digest: fooDirDgPb},
  1336  			}},
  1337  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
  1338  			wantCacheCalls: map[string]int{
  1339  				"fooDir":     1,
  1340  				"fooDir/foo": 1,
  1341  				"barDir":     1,
  1342  				"barDir/bar": 1,
  1343  			},
  1344  			wantStats: &client.TreeStats{
  1345  				InputDirectories: 3,
  1346  				InputFiles:       2,
  1347  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
  1348  			},
  1349  		},
  1350  		{
  1351  			desc: "Virtual inputs as ancestors of physical inputs",
  1352  			input: []*inputPath{
  1353  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1354  				{path: "barDir/bar", fileContents: barBlob},
  1355  			},
  1356  			spec: &command.InputSpec{
  1357  				Inputs: []string{"fooDir", "barDir"},
  1358  				VirtualInputs: []*command.VirtualInput{
  1359  					&command.VirtualInput{Path: "barDir", IsEmptyDirectory: true},
  1360  				},
  1361  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1362  			},
  1363  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1364  				{Name: "barDir", Digest: barDirDgPb},
  1365  				{Name: "fooDir", Digest: fooDirDgPb},
  1366  			}},
  1367  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
  1368  			wantCacheCalls: map[string]int{
  1369  				"fooDir":     1,
  1370  				"fooDir/foo": 1,
  1371  				"barDir":     1,
  1372  				"barDir/bar": 1,
  1373  			},
  1374  			wantStats: &client.TreeStats{
  1375  				InputDirectories: 3,
  1376  				InputFiles:       2,
  1377  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
  1378  			},
  1379  		},
  1380  		{
  1381  			desc: "Virtual inputs as children of physical inputs",
  1382  			input: []*inputPath{
  1383  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1384  				{path: "bar", fileContents: barBlob},
  1385  			},
  1386  			spec: &command.InputSpec{
  1387  				Inputs: []string{"fooDir", "bar"},
  1388  				VirtualInputs: []*command.VirtualInput{
  1389  					&command.VirtualInput{Path: "bar/baz", IsEmptyDirectory: true},
  1390  				},
  1391  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1392  			},
  1393  			rootDir: &repb.Directory{
  1394  				Directories: []*repb.DirectoryNode{
  1395  					{Name: "bar", Digest: vBarDirDgPb},
  1396  					{Name: "fooDir", Digest: fooDirDgPb},
  1397  				},
  1398  				Files: []*repb.FileNode{{Name: "bar", Digest: barDgPb}},
  1399  			},
  1400  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, vBarDirBlob, []byte{}},
  1401  			wantCacheCalls: map[string]int{
  1402  				"fooDir":     1,
  1403  				"fooDir/foo": 1,
  1404  				"bar":        1,
  1405  			},
  1406  			wantStats: &client.TreeStats{
  1407  				InputDirectories: 4,
  1408  				InputFiles:       2,
  1409  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + vBarDirDg.Size,
  1410  			},
  1411  		},
  1412  		{
  1413  			desc: "Virtual inputs as ancestors of virtual inputs",
  1414  			input: []*inputPath{
  1415  				{path: "fooDir/foo", fileContents: fooBlob, isExecutable: true},
  1416  				{path: "bar", fileContents: barBlob},
  1417  			},
  1418  			spec: &command.InputSpec{
  1419  				Inputs: []string{"fooDir", "bar"},
  1420  				VirtualInputs: []*command.VirtualInput{
  1421  					&command.VirtualInput{Path: "bar/baz", IsEmptyDirectory: true},
  1422  					&command.VirtualInput{Path: "bar", IsEmptyDirectory: true},
  1423  				},
  1424  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1425  			},
  1426  			rootDir: &repb.Directory{
  1427  				Directories: []*repb.DirectoryNode{
  1428  					{Name: "bar", Digest: vBarDirDgPb},
  1429  					{Name: "fooDir", Digest: fooDirDgPb},
  1430  				},
  1431  				Files: []*repb.FileNode{{Name: "bar", Digest: barDgPb}},
  1432  			},
  1433  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, vBarDirBlob, []byte{}},
  1434  			wantCacheCalls: map[string]int{
  1435  				"fooDir":     1,
  1436  				"fooDir/foo": 1,
  1437  				"bar":        1,
  1438  			},
  1439  			wantStats: &client.TreeStats{
  1440  				InputDirectories: 4,
  1441  				InputFiles:       2,
  1442  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + vBarDirDg.Size,
  1443  			},
  1444  		},
  1445  		{
  1446  			desc: "Normalizing virtual inputs paths",
  1447  			spec: &command.InputSpec{
  1448  				VirtualInputs: []*command.VirtualInput{
  1449  					&command.VirtualInput{Path: "//fooDir/../fooDir/foo", Contents: fooBlob, IsExecutable: true},
  1450  					&command.VirtualInput{Path: "barDir///bar", Contents: barBlob},
  1451  				},
  1452  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1453  			},
  1454  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1455  				{Name: "barDir", Digest: barDirDgPb},
  1456  				{Name: "fooDir", Digest: fooDirDgPb},
  1457  			}},
  1458  			additionalBlobs: [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob},
  1459  			wantStats: &client.TreeStats{
  1460  				InputDirectories: 3,
  1461  				InputFiles:       2,
  1462  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
  1463  			},
  1464  		},
  1465  		{
  1466  			desc: "Virtual inputs with digests",
  1467  			spec: &command.InputSpec{
  1468  				VirtualInputs: []*command.VirtualInput{
  1469  					&command.VirtualInput{Path: "fooDir/foo", Digest: fooDg.String(), IsExecutable: true},
  1470  					&command.VirtualInput{Path: "barDir/bar", Digest: barDg.String()},
  1471  				},
  1472  				InputNodeProperties: map[string]*cpb.NodeProperties{"fooDir/foo": fooProperties},
  1473  			},
  1474  			rootDir: &repb.Directory{Directories: []*repb.DirectoryNode{
  1475  				{Name: "barDir", Digest: barDirDgPb},
  1476  				{Name: "fooDir", Digest: fooDirDgPb},
  1477  			}},
  1478  			additionalBlobs: [][]byte{fooDirBlob, barDirBlob},
  1479  			wantStats: &client.TreeStats{
  1480  				InputDirectories: 3,
  1481  				InputFiles:       2,
  1482  				TotalInputBytes:  fooDg.Size + fooDirDg.Size + barDg.Size + barDirDg.Size,
  1483  			},
  1484  		},
  1485  		{
  1486  			// NOTE: The use of maps in our implementation means that the traversal order is unstable. The
  1487  			// outputs are required to be in lexicographic order, so if ComputeMerkleTree is not sorting
  1488  			// correctly, this test will fail (except in the rare occasion the traversal order is
  1489  			// coincidentally correct).
  1490  			desc: "Correct sorting",
  1491  			input: []*inputPath{
  1492  				{path: "a", fileContents: fooBlob, isExecutable: true},
  1493  				{path: "b", fileContents: fooBlob, isExecutable: true},
  1494  				{path: "c", fileContents: fooBlob, isExecutable: true},
  1495  				{path: "d", fileContents: barBlob},
  1496  				{path: "e", fileContents: barBlob},
  1497  				{path: "f", fileContents: barBlob},
  1498  				{path: "g/foo", fileContents: fooBlob, isExecutable: true},
  1499  				{path: "h/foo", fileContents: fooBlob, isExecutable: true},
  1500  				{path: "i/foo", fileContents: fooBlob, isExecutable: true},
  1501  				{path: "j/bar", fileContents: barBlob},
  1502  				{path: "k/bar", fileContents: barBlob},
  1503  				{path: "l/bar", fileContents: barBlob},
  1504  			},
  1505  			spec: &command.InputSpec{
  1506  				Inputs: []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l"},
  1507  				InputNodeProperties: map[string]*cpb.NodeProperties{
  1508  					"g/foo": fooProperties,
  1509  					"h/foo": fooProperties,
  1510  					"i/foo": fooProperties,
  1511  				},
  1512  			},
  1513  			rootDir: &repb.Directory{
  1514  				Files: []*repb.FileNode{
  1515  					{Name: "a", Digest: fooDgPb, IsExecutable: true},
  1516  					{Name: "b", Digest: fooDgPb, IsExecutable: true},
  1517  					{Name: "c", Digest: fooDgPb, IsExecutable: true},
  1518  					{Name: "d", Digest: barDgPb},
  1519  					{Name: "e", Digest: barDgPb},
  1520  					{Name: "f", Digest: barDgPb},
  1521  				},
  1522  				Directories: []*repb.DirectoryNode{
  1523  					{Name: "g", Digest: fooDirDgPb},
  1524  					{Name: "h", Digest: fooDirDgPb},
  1525  					{Name: "i", Digest: fooDirDgPb},
  1526  					{Name: "j", Digest: barDirDgPb},
  1527  					{Name: "k", Digest: barDirDgPb},
  1528  					{Name: "l", Digest: barDirDgPb},
  1529  				},
  1530  			},
  1531  			additionalBlobs: [][]byte{fooBlob, fooDirBlob, barBlob, barDirBlob},
  1532  			wantCacheCalls: map[string]int{
  1533  				"a":     1,
  1534  				"b":     1,
  1535  				"c":     1,
  1536  				"d":     1,
  1537  				"e":     1,
  1538  				"f":     1,
  1539  				"g":     1,
  1540  				"h":     1,
  1541  				"i":     1,
  1542  				"j":     1,
  1543  				"k":     1,
  1544  				"l":     1,
  1545  				"g/foo": 1,
  1546  				"h/foo": 1,
  1547  				"i/foo": 1,
  1548  				"j/bar": 1,
  1549  				"k/bar": 1,
  1550  				"l/bar": 1,
  1551  			},
  1552  			wantStats: &client.TreeStats{
  1553  				InputDirectories: 7,
  1554  				InputFiles:       12,
  1555  				TotalInputBytes:  12*fooDg.Size + 3*fooDirDg.Size + 3*barDirDg.Size,
  1556  			},
  1557  		},
  1558  	}
  1559  
  1560  	for _, tc := range tests {
  1561  		root := t.TempDir()
  1562  		if err := construct(root, tc.input); err != nil {
  1563  			t.Fatalf("failed to construct input dir structure: %v", err)
  1564  		}
  1565  
  1566  		t.Run(tc.desc, func(t *testing.T) {
  1567  			wantBlobs := make(map[digest.Digest][]byte)
  1568  			rootBlob := mustMarshal(tc.rootDir)
  1569  			rootDg := digest.NewFromBlob(rootBlob)
  1570  			wantBlobs[rootDg] = rootBlob
  1571  			tc.wantStats.TotalInputBytes += int64(len(rootBlob))
  1572  			for _, b := range tc.additionalBlobs {
  1573  				wantBlobs[digest.NewFromBlob(b)] = b
  1574  			}
  1575  
  1576  			gotBlobs := make(map[digest.Digest][]byte)
  1577  			cache := newCallCountingMetadataCache(root, t)
  1578  
  1579  			e, cleanup := fakes.NewTestEnv(t)
  1580  			defer cleanup()
  1581  			tc.treeOpts.Apply(e.Client.GrpcClient)
  1582  
  1583  			gotRootDg, inputs, stats, err := e.Client.GrpcClient.ComputeMerkleTree(context.Background(), root, "", "", tc.spec, cache)
  1584  			if err != nil {
  1585  				t.Errorf("ComputeMerkleTree(...) = gave error %q, want success", err)
  1586  			}
  1587  			for _, ue := range inputs {
  1588  				if ue.IsVirtualFile() {
  1589  					continue
  1590  				}
  1591  				ch, err := chunker.New(ue, false, int(e.Client.GrpcClient.ChunkMaxSize))
  1592  				if err != nil {
  1593  					t.Fatalf("chunker.New(ue): failed to create chunker from UploadEntry: %v", err)
  1594  				}
  1595  				blob, err := ch.FullData()
  1596  				if err != nil {
  1597  					t.Errorf("chunker %v FullData() returned error %v", ch, err)
  1598  				}
  1599  				gotBlobs[ue.Digest] = blob
  1600  			}
  1601  			if diff := cmp.Diff(rootDg, gotRootDg); diff != "" {
  1602  				t.Errorf("ComputeMerkleTree(...) gave diff (-want +got) on root:\n%s", diff)
  1603  				if gotRootBlob, ok := gotBlobs[gotRootDg]; ok {
  1604  					gotRoot := new(repb.Directory)
  1605  					if err := proto.Unmarshal(gotRootBlob, gotRoot); err != nil {
  1606  						t.Errorf("  When unpacking root blob, got error: %s", err)
  1607  					} else {
  1608  						diff := cmp.Diff(tc.rootDir, gotRoot, protocmp.Transform())
  1609  						t.Errorf("  Diff between unpacked roots (-want +got):\n%s", diff)
  1610  					}
  1611  				} else {
  1612  					t.Errorf("  Root digest gotten not present in blobs map")
  1613  				}
  1614  			}
  1615  			if diff := cmp.Diff(wantBlobs, gotBlobs); diff != "" {
  1616  				t.Errorf("ComputeMerkleTree(...) gave diff (-want +got) on blobs:\n%s", diff)
  1617  			}
  1618  			if diff := cmp.Diff(tc.wantCacheCalls, cache.calls, cmpopts.EquateEmpty()); diff != "" {
  1619  				t.Errorf("ComputeMerkleTree(...) gave diff on file metadata cache access (-want +got) on blobs:\n%s", diff)
  1620  			}
  1621  			if diff := cmp.Diff(tc.wantStats, stats); diff != "" {
  1622  				t.Errorf("ComputeMerkleTree(...) gave diff on stats (-want +got) on blobs:\n%s", diff)
  1623  			}
  1624  		})
  1625  	}
  1626  }
  1627  
  1628  func TestComputeMerkleTreeErrors(t *testing.T) {
  1629  	tests := []struct {
  1630  		desc     string
  1631  		input    []*inputPath
  1632  		spec     *command.InputSpec
  1633  		treeOpts *client.TreeSymlinkOpts
  1634  	}{
  1635  		{
  1636  			desc: "empty input",
  1637  			spec: &command.InputSpec{Inputs: []string{""}},
  1638  		},
  1639  		{
  1640  			desc: "empty virtual input",
  1641  			spec: &command.InputSpec{
  1642  				VirtualInputs: []*command.VirtualInput{
  1643  					&command.VirtualInput{Path: "", Contents: []byte("foo")},
  1644  				},
  1645  			},
  1646  		},
  1647  		{
  1648  			desc: "virtual input specifies content and digest",
  1649  			spec: &command.InputSpec{
  1650  				VirtualInputs: []*command.VirtualInput{
  1651  					&command.VirtualInput{Path: "", Contents: fooBlob, Digest: fooDg.String()},
  1652  				},
  1653  			},
  1654  		},
  1655  		{
  1656  			desc: "virtual input has invalid digest",
  1657  			spec: &command.InputSpec{
  1658  				VirtualInputs: []*command.VirtualInput{
  1659  					&command.VirtualInput{Path: "", Digest: "Not a real digest"},
  1660  				},
  1661  			},
  1662  		},
  1663  		{
  1664  			desc: "missing input",
  1665  			spec: &command.InputSpec{Inputs: []string{"foo"}},
  1666  		},
  1667  		{
  1668  			desc: "missing nested input",
  1669  			input: []*inputPath{
  1670  				{path: "a", fileContents: []byte("a")},
  1671  				{path: "dir/a", fileContents: []byte("a")},
  1672  			},
  1673  			spec: &command.InputSpec{Inputs: []string{"a", "dir", "dir/b"}},
  1674  		},
  1675  		{
  1676  			desc: "Preserved symlink escaping exec root",
  1677  			input: []*inputPath{
  1678  				{path: "../foo", fileContents: fooBlob, isExecutable: true},
  1679  				{path: "escapingFoo", isSymlink: true, relSymlinkTarget: "../foo"},
  1680  			},
  1681  			spec: &command.InputSpec{
  1682  				Inputs: []string{"escapingFoo"},
  1683  			},
  1684  			treeOpts: &client.TreeSymlinkOpts{
  1685  				Preserved: true,
  1686  			},
  1687  		},
  1688  		{
  1689  			desc: "Materialization of dangling symlink pointing outside exec root fails",
  1690  			input: []*inputPath{
  1691  				{path: "danglingSym", isSymlink: true, relSymlinkTarget: "../doesNotExist"},
  1692  			},
  1693  			spec: &command.InputSpec{
  1694  				Inputs: []string{"danglingSym"},
  1695  			},
  1696  			treeOpts: &client.TreeSymlinkOpts{
  1697  				MaterializeOutsideExecRoot: true,
  1698  				Preserved:                  true,
  1699  			},
  1700  		},
  1701  	}
  1702  
  1703  	for _, tc := range tests {
  1704  		root := t.TempDir()
  1705  		if err := construct(root, tc.input); err != nil {
  1706  			t.Fatalf("failed to construct input dir structure: %v", err)
  1707  		}
  1708  		t.Run(tc.desc, func(t *testing.T) {
  1709  			e, cleanup := fakes.NewTestEnv(t)
  1710  			defer cleanup()
  1711  			tc.treeOpts.Apply(e.Client.GrpcClient)
  1712  
  1713  			if _, _, _, err := e.Client.GrpcClient.ComputeMerkleTree(context.Background(), root, "", "", tc.spec, filemetadata.NewNoopCache()); err == nil {
  1714  				t.Errorf("ComputeMerkleTree(%v) succeeded, want error", tc.spec)
  1715  			}
  1716  		})
  1717  	}
  1718  }
  1719  
  1720  func TestFlattenTreeRepeated(t *testing.T) {
  1721  	// Directory structure:
  1722  	// <root>
  1723  	//  +-baz -> digest 1003/10 (rw)
  1724  	//  +-a
  1725  	//    + b
  1726  	//      + c    ## empty subdir
  1727  	//      +-foo -> digest 1001/1 (rw)
  1728  	//      +-bar -> digest 1002/2 (rwx)
  1729  	//  + b
  1730  	//    + c    ## empty subdir
  1731  	//    +-foo -> digest 1001/1 (rw)
  1732  	//    +-bar -> digest 1002/2 (rwx)
  1733  	//  + c    ## empty subdir
  1734  	fooDigest := digest.TestNew("1001", 1)
  1735  	barDigest := digest.TestNew("1002", 2)
  1736  	bazDigest := digest.TestNew("1003", 10)
  1737  	dirC := &repb.Directory{}
  1738  	cDigest := digest.TestNewFromMessage(dirC)
  1739  	dirB := &repb.Directory{
  1740  		Files: []*repb.FileNode{
  1741  			{Name: "foo", Digest: fooDigest.ToProto(), IsExecutable: false},
  1742  			{Name: "bar", Digest: barDigest.ToProto(), IsExecutable: true},
  1743  		},
  1744  		Directories: []*repb.DirectoryNode{
  1745  			{Name: "c", Digest: cDigest.ToProto()},
  1746  		},
  1747  	}
  1748  	bDigest := digest.TestNewFromMessage(dirB)
  1749  	dirA := &repb.Directory{
  1750  		Directories: []*repb.DirectoryNode{
  1751  			{Name: "b", Digest: bDigest.ToProto()},
  1752  		}}
  1753  	aDigest := digest.TestNewFromMessage(dirA)
  1754  	root := &repb.Directory{
  1755  		Directories: []*repb.DirectoryNode{
  1756  			{Name: "a", Digest: aDigest.ToProto()},
  1757  			{Name: "b", Digest: bDigest.ToProto()},
  1758  			{Name: "c", Digest: cDigest.ToProto()},
  1759  		},
  1760  		Files: []*repb.FileNode{
  1761  			{Name: "baz", Digest: bazDigest.ToProto()},
  1762  		},
  1763  	}
  1764  	tree := &repb.Tree{
  1765  		Root:     root,
  1766  		Children: []*repb.Directory{dirA, dirB, dirC},
  1767  	}
  1768  
  1769  	e, cleanup := fakes.NewTestEnv(t)
  1770  	defer cleanup()
  1771  
  1772  	outputs, err := e.Client.GrpcClient.FlattenTree(tree, "x")
  1773  	if err != nil {
  1774  		t.Errorf("FlattenTree gave error %v", err)
  1775  	}
  1776  	wantOutputs := map[string]*client.TreeOutput{
  1777  		"x/baz":     &client.TreeOutput{Digest: bazDigest},
  1778  		"x/a/b/c":   &client.TreeOutput{IsEmptyDirectory: true, Digest: digest.Empty},
  1779  		"x/a/b/foo": &client.TreeOutput{Digest: fooDigest},
  1780  		"x/a/b/bar": &client.TreeOutput{Digest: barDigest, IsExecutable: true},
  1781  		"x/b/c":     &client.TreeOutput{IsEmptyDirectory: true, Digest: digest.Empty},
  1782  		"x/b/foo":   &client.TreeOutput{Digest: fooDigest},
  1783  		"x/b/bar":   &client.TreeOutput{Digest: barDigest, IsExecutable: true},
  1784  		"x/c":       &client.TreeOutput{IsEmptyDirectory: true, Digest: digest.Empty},
  1785  	}
  1786  	if len(outputs) != len(wantOutputs) {
  1787  		t.Errorf("FlattenTree gave wrong number of outputs: want %d, got %d", len(wantOutputs), len(outputs))
  1788  	}
  1789  	for path, wantOut := range wantOutputs {
  1790  		got, ok := outputs[path]
  1791  		if !ok {
  1792  			t.Errorf("expected output %s is missing", path)
  1793  		}
  1794  		if got.Path != path {
  1795  			t.Errorf("FlattenTree keyed %s output with %s path", got.Path, path)
  1796  		}
  1797  		if wantOut.Digest != got.Digest {
  1798  			t.Errorf("FlattenTree gave digest diff on %s: want %v, got: %v", path, wantOut.Digest, got.Digest)
  1799  		}
  1800  		if wantOut.IsExecutable != got.IsExecutable {
  1801  			t.Errorf("FlattenTree gave IsExecutable diff on %s: want %v, got: %v", path, wantOut.IsExecutable, got.IsExecutable)
  1802  		}
  1803  	}
  1804  }
  1805  
  1806  func TestComputeOutputsToUploadFiles(t *testing.T) {
  1807  	tests := []struct {
  1808  		desc           string
  1809  		input          []*inputPath
  1810  		wd             string
  1811  		paths          []string
  1812  		nodeProperties map[string]*cpb.NodeProperties
  1813  		wantResult     *repb.ActionResult
  1814  		wantBlobs      [][]byte
  1815  		wantCacheCalls map[string]int
  1816  	}{
  1817  		{
  1818  			desc:       "Empty paths",
  1819  			input:      nil,
  1820  			wantResult: &repb.ActionResult{},
  1821  		},
  1822  		{
  1823  			desc: "Missing output",
  1824  			input: []*inputPath{
  1825  				{path: "foo", fileContents: fooBlob, isExecutable: true},
  1826  			},
  1827  			paths:          []string{"foo", "bar"},
  1828  			nodeProperties: map[string]*cpb.NodeProperties{"foo": fooProperties},
  1829  			wantBlobs:      [][]byte{fooBlob},
  1830  			wantResult: &repb.ActionResult{
  1831  				OutputFiles: []*repb.OutputFile{&repb.OutputFile{Path: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)}},
  1832  			},
  1833  			wantCacheCalls: map[string]int{
  1834  				"bar": 1,
  1835  				"foo": 1,
  1836  			},
  1837  		},
  1838  		{
  1839  			desc: "Two files",
  1840  			input: []*inputPath{
  1841  				{path: "foo", fileContents: fooBlob, isExecutable: true},
  1842  				{path: "bar", fileContents: barBlob},
  1843  			},
  1844  			paths:          []string{"foo", "bar"},
  1845  			nodeProperties: map[string]*cpb.NodeProperties{"foo": fooProperties},
  1846  			wantBlobs:      [][]byte{fooBlob, barBlob},
  1847  			wantResult: &repb.ActionResult{
  1848  				OutputFiles: []*repb.OutputFile{
  1849  					// Note the outputs are not sorted.
  1850  					&repb.OutputFile{Path: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)},
  1851  					&repb.OutputFile{Path: "bar", Digest: barDgPb},
  1852  				},
  1853  			},
  1854  			wantCacheCalls: map[string]int{
  1855  				"bar": 1,
  1856  				"foo": 1,
  1857  			},
  1858  		},
  1859  		{
  1860  			desc: "Two files under working dir",
  1861  			input: []*inputPath{
  1862  				{path: "wd/foo", fileContents: fooBlob, isExecutable: true},
  1863  				{path: "bar", fileContents: barBlob},
  1864  			},
  1865  			paths:          []string{"foo", "../bar"},
  1866  			nodeProperties: map[string]*cpb.NodeProperties{"foo": fooProperties},
  1867  			wd:             "wd",
  1868  			wantBlobs:      [][]byte{fooBlob, barBlob},
  1869  			wantResult: &repb.ActionResult{
  1870  				OutputFiles: []*repb.OutputFile{
  1871  					// Note the outputs are not sorted.
  1872  					&repb.OutputFile{Path: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)},
  1873  					&repb.OutputFile{Path: "../bar", Digest: barDgPb},
  1874  				},
  1875  			},
  1876  			wantCacheCalls: map[string]int{
  1877  				"bar":    1,
  1878  				"wd/foo": 1,
  1879  			},
  1880  		},
  1881  		{
  1882  			desc: "Symlink",
  1883  			input: []*inputPath{
  1884  				{path: "bar", fileContents: barBlob},
  1885  				{path: "dir1/dir2/bar", isSymlink: true, relSymlinkTarget: "../../bar"},
  1886  			},
  1887  			paths:     []string{"dir1/dir2/bar"},
  1888  			wantBlobs: [][]byte{barBlob},
  1889  			wantResult: &repb.ActionResult{
  1890  				OutputFiles: []*repb.OutputFile{
  1891  					&repb.OutputFile{Path: "dir1/dir2/bar", Digest: barDgPb},
  1892  				},
  1893  			},
  1894  			wantCacheCalls: map[string]int{
  1895  				"dir1/dir2/bar": 1,
  1896  			},
  1897  		},
  1898  		{
  1899  			desc: "Duplicate file contents",
  1900  			input: []*inputPath{
  1901  				{path: "foo", fileContents: fooBlob, isExecutable: true},
  1902  				{path: "bar", fileContents: fooBlob},
  1903  			},
  1904  			paths:          []string{"foo", "bar"},
  1905  			nodeProperties: map[string]*cpb.NodeProperties{"foo": fooProperties},
  1906  			wantBlobs:      [][]byte{fooBlob},
  1907  			wantResult: &repb.ActionResult{
  1908  				OutputFiles: []*repb.OutputFile{
  1909  					// Note the outputs are not sorted.
  1910  					&repb.OutputFile{Path: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)},
  1911  					&repb.OutputFile{Path: "bar", Digest: fooDgPb},
  1912  				},
  1913  			},
  1914  			wantCacheCalls: map[string]int{
  1915  				"bar": 1,
  1916  				"foo": 1,
  1917  			},
  1918  		},
  1919  	}
  1920  
  1921  	for _, tc := range tests {
  1922  		root := t.TempDir()
  1923  		if err := construct(root, tc.input); err != nil {
  1924  			t.Fatalf("failed to construct input dir structure: %v", err)
  1925  		}
  1926  
  1927  		t.Run(tc.desc, func(t *testing.T) {
  1928  			wantBlobs := make(map[digest.Digest][]byte)
  1929  			for _, b := range tc.wantBlobs {
  1930  				wantBlobs[digest.NewFromBlob(b)] = b
  1931  			}
  1932  
  1933  			gotBlobs := make(map[digest.Digest][]byte)
  1934  			cache := newCallCountingMetadataCache(root, t)
  1935  			e, cleanup := fakes.NewTestEnv(t)
  1936  			defer cleanup()
  1937  
  1938  			inputs, gotResult, err := e.Client.GrpcClient.ComputeOutputsToUpload(root, tc.wd, tc.paths, cache, command.UnspecifiedSymlinkBehavior, tc.nodeProperties)
  1939  			if err != nil {
  1940  				t.Errorf("ComputeOutputsToUpload(...) = gave error %v, want success", err)
  1941  			}
  1942  			for _, ue := range inputs {
  1943  				ch, err := chunker.New(ue, false, int(e.Client.GrpcClient.ChunkMaxSize))
  1944  				if err != nil {
  1945  					t.Fatalf("chunker.New(ue): failed to create chunker from UploadEntry: %v", err)
  1946  				}
  1947  				blob, err := ch.FullData()
  1948  				if err != nil {
  1949  					t.Errorf("chunker %v FullData() returned error %v", ch, err)
  1950  				}
  1951  				gotBlobs[ue.Digest] = blob
  1952  			}
  1953  			if diff := cmp.Diff(wantBlobs, gotBlobs); diff != "" {
  1954  				t.Errorf("ComputeOutputsToUpload(...) gave diff (-want +got) on blobs:\n%s", diff)
  1955  			}
  1956  			if diff := cmp.Diff(tc.wantCacheCalls, cache.calls, cmpopts.EquateEmpty()); diff != "" {
  1957  				t.Errorf("ComputeOutputsToUpload(...) gave diff on file metadata cache access (-want +got) on blobs:\n%s", diff)
  1958  			}
  1959  			if diff := cmp.Diff(tc.wantResult, gotResult, cmp.Comparer(proto.Equal)); diff != "" {
  1960  				t.Errorf("ComputeOutputsToUpload(...) gave diff on action result (-want +got) on blobs:\n%s", diff)
  1961  			}
  1962  		})
  1963  	}
  1964  }
  1965  
  1966  func TestComputeOutputsToUploadDirectories(t *testing.T) {
  1967  	/*
  1968  		We want to test that the directory tree is built consistently and in lexical order according to it's path.
  1969  		Building directory with structure:
  1970  
  1971  		dirA
  1972  		  -> dirC
  1973  		  -> dirF
  1974  		dirB
  1975  		  -> dirD
  1976  		  -> dirE
  1977  
  1978  		We should get a Tree like
  1979  		{
  1980  			root: <root node>
  1981  			children: [dirA, dirC, dirF, dirB, dirD, dirE]
  1982  		}
  1983  	*/
  1984  
  1985  	dirA := &repb.Directory{
  1986  		Directories: []*repb.DirectoryNode{
  1987  			{Name: "dirC", Digest: barDirDgPb},
  1988  			{Name: "dirF", Digest: fooDirDgPb},
  1989  		},
  1990  	}
  1991  	dirABlob := mustMarshal(dirA)
  1992  	dirADg := digest.NewFromBlob(dirABlob)
  1993  
  1994  	dirB := &repb.Directory{
  1995  		Directories: []*repb.DirectoryNode{
  1996  			{Name: "dirD", Digest: bazDirDgPb},
  1997  			{Name: "dirE", Digest: foobarDirDgPb},
  1998  		},
  1999  	}
  2000  	dirBBlob := mustMarshal(dirB)
  2001  	dirBDg := digest.NewFromBlob(dirBBlob)
  2002  
  2003  	tests := []struct {
  2004  		desc           string
  2005  		input          []*inputPath
  2006  		nodeProperties map[string]*cpb.NodeProperties
  2007  		// The blobs are everything else outside of the Tree proto itself.
  2008  		wantBlobs        [][]byte
  2009  		wantTreeRoot     *repb.Directory
  2010  		wantTreeChildren []*repb.Directory
  2011  		wantCacheCalls   map[string]int
  2012  	}{
  2013  		{
  2014  			desc: "Two files",
  2015  			input: []*inputPath{
  2016  				{path: "a/b/fooDir/foo", fileContents: fooBlob, isExecutable: true},
  2017  				{path: "a/b/fooDir/bar", fileContents: barBlob},
  2018  			},
  2019  			nodeProperties: map[string]*cpb.NodeProperties{"foo": fooProperties},
  2020  			wantBlobs:      [][]byte{fooBlob, barBlob},
  2021  			wantTreeRoot:   foobarDir,
  2022  			wantCacheCalls: map[string]int{
  2023  				"a/b/fooDir":     2,
  2024  				"a/b/fooDir/bar": 1,
  2025  				"a/b/fooDir/foo": 1,
  2026  			},
  2027  		},
  2028  		{
  2029  			desc: "Duplicate file contents",
  2030  			input: []*inputPath{
  2031  				{path: "a/b/fooDir/foo", fileContents: fooBlob, isExecutable: true},
  2032  				{path: "a/b/fooDir/bar", fileContents: fooBlob, isExecutable: true},
  2033  			},
  2034  			nodeProperties: map[string]*cpb.NodeProperties{"foo": fooProperties},
  2035  			wantBlobs:      [][]byte{fooBlob, fooBlob},
  2036  			wantTreeRoot: &repb.Directory{Files: []*repb.FileNode{
  2037  				{Name: "bar", Digest: fooDgPb, IsExecutable: true},
  2038  				{Name: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)},
  2039  			}},
  2040  			wantCacheCalls: map[string]int{
  2041  				"a/b/fooDir":     2,
  2042  				"a/b/fooDir/bar": 1,
  2043  				"a/b/fooDir/foo": 1,
  2044  			},
  2045  		},
  2046  		{
  2047  			desc: "Duplicate subdirectories",
  2048  			input: []*inputPath{
  2049  				{path: "a/b/fooDir/dir1/foo", fileContents: fooBlob, isExecutable: true},
  2050  				{path: "a/b/fooDir/dir2/foo", fileContents: fooBlob, isExecutable: true},
  2051  			},
  2052  			nodeProperties: map[string]*cpb.NodeProperties{"dir1/foo": fooProperties, "dir2/foo": fooProperties},
  2053  			wantBlobs:      [][]byte{fooBlob, fooDirBlob},
  2054  			wantTreeRoot: &repb.Directory{Directories: []*repb.DirectoryNode{
  2055  				{Name: "dir1", Digest: fooDirDgPb},
  2056  				{Name: "dir2", Digest: fooDirDgPb},
  2057  			}},
  2058  			wantTreeChildren: []*repb.Directory{fooDir},
  2059  			wantCacheCalls: map[string]int{
  2060  				"a/b/fooDir":          2,
  2061  				"a/b/fooDir/dir1":     1,
  2062  				"a/b/fooDir/dir1/foo": 1,
  2063  				"a/b/fooDir/dir2":     1,
  2064  				"a/b/fooDir/dir2/foo": 1,
  2065  			},
  2066  		},
  2067  		{
  2068  			desc: "Directory tree preserves lexicographical order",
  2069  			input: []*inputPath{
  2070  				{path: "a/b/fooDir/dirA/dirC/bar", fileContents: barBlob},
  2071  				{path: "a/b/fooDir/dirA/dirF/foo", fileContents: fooBlob, isExecutable: true},
  2072  				{path: "a/b/fooDir/dirB/dirD/baz", fileContents: bazBlob},
  2073  				{path: "a/b/fooDir/dirB/dirE/foo", fileContents: fooBlob, isExecutable: true},
  2074  				{path: "a/b/fooDir/dirB/dirE/bar", fileContents: barBlob},
  2075  			},
  2076  			nodeProperties: map[string]*cpb.NodeProperties{"dirA/dirF/foo": fooProperties, "dirB/dirE/foo": fooProperties},
  2077  			wantBlobs:      [][]byte{fooBlob, barBlob, fooDirBlob, barDirBlob, dirABlob, dirBBlob, bazDirBlob, bazBlob, foobarDirBlob},
  2078  			wantTreeRoot: &repb.Directory{Directories: []*repb.DirectoryNode{
  2079  				{Name: "dirA", Digest: dirADg.ToProto()},
  2080  				{Name: "dirB", Digest: dirBDg.ToProto()},
  2081  			}},
  2082  			wantTreeChildren: []*repb.Directory{dirA, barDir, fooDir, dirB, bazDir, foobarDir},
  2083  			wantCacheCalls: map[string]int{
  2084  				"a/b/fooDir":               2,
  2085  				"a/b/fooDir/dirA":          1,
  2086  				"a/b/fooDir/dirA/dirC":     1,
  2087  				"a/b/fooDir/dirA/dirC/bar": 1,
  2088  				"a/b/fooDir/dirA/dirF":     1,
  2089  				"a/b/fooDir/dirA/dirF/foo": 1,
  2090  				"a/b/fooDir/dirB":          1,
  2091  				"a/b/fooDir/dirB/dirD":     1,
  2092  				"a/b/fooDir/dirB/dirD/baz": 1,
  2093  				"a/b/fooDir/dirB/dirE":     1,
  2094  				"a/b/fooDir/dirB/dirE/foo": 1,
  2095  				"a/b/fooDir/dirB/dirE/bar": 1,
  2096  			},
  2097  		},
  2098  	}
  2099  
  2100  	for _, tc := range tests {
  2101  		root := t.TempDir()
  2102  		if err := construct(root, tc.input); err != nil {
  2103  			t.Fatalf("failed to construct input dir structure: %v", err)
  2104  		}
  2105  
  2106  		t.Run(tc.desc, func(t *testing.T) {
  2107  			wantBlobs := make(map[digest.Digest][]byte)
  2108  			for _, b := range tc.wantBlobs {
  2109  				wantBlobs[digest.NewFromBlob(b)] = b
  2110  			}
  2111  
  2112  			gotBlobs := make(map[digest.Digest][]byte)
  2113  			cache := newCallCountingMetadataCache(root, t)
  2114  			e, cleanup := fakes.NewTestEnv(t)
  2115  			defer cleanup()
  2116  
  2117  			inputs, gotResult, err := e.Client.GrpcClient.ComputeOutputsToUpload(root, "", []string{"a/b/fooDir"}, cache, command.UnspecifiedSymlinkBehavior, tc.nodeProperties)
  2118  			if err != nil {
  2119  				t.Fatalf("ComputeOutputsToUpload(...) = gave error %v, want success", err)
  2120  			}
  2121  			for _, ue := range inputs {
  2122  				ch, err := chunker.New(ue, false, int(e.Client.GrpcClient.ChunkMaxSize))
  2123  				if err != nil {
  2124  					t.Fatalf("chunker.New(ue): failed to create chunker from UploadEntry: %v", err)
  2125  				}
  2126  				blob, err := ch.FullData()
  2127  				if err != nil {
  2128  					t.Errorf("chunker %v FullData() returned error %v", ch, err)
  2129  				}
  2130  				gotBlobs[ue.Digest] = blob
  2131  			}
  2132  			if diff := cmp.Diff(tc.wantCacheCalls, cache.calls, cmpopts.EquateEmpty()); diff != "" {
  2133  				t.Errorf("ComputeOutputsToUpload(...) gave diff on file metadata cache access (-want +got) on blobs:\n%s", diff)
  2134  			}
  2135  			if len(gotResult.OutputDirectories) != 1 {
  2136  				t.Fatalf("ComputeOutputsToUpload(...) expected result with an output directory, got %+v", gotResult)
  2137  			}
  2138  			dir := gotResult.OutputDirectories[0]
  2139  			if dir.Path != "a/b/fooDir" {
  2140  				t.Errorf("ComputeOutputsToUpload(...) gave result dir path %s, want a/b/fooDir:\n", dir.Path)
  2141  			}
  2142  
  2143  			digests := make(map[string]bool)
  2144  			digests[gotResult.OutputDirectories[0].TreeDigest.Hash] = true
  2145  
  2146  			for i := 0; i < 5; i++ {
  2147  				_, gotResult, err = e.Client.GrpcClient.ComputeOutputsToUpload(root, "", []string{"a/b/fooDir"}, cache, command.UnspecifiedSymlinkBehavior, tc.nodeProperties)
  2148  				if err != nil {
  2149  					t.Fatalf("ComputeOutputsToUpload(...) = gave error %v, want success", err)
  2150  				}
  2151  				digests[gotResult.OutputDirectories[0].TreeDigest.Hash] = true
  2152  			}
  2153  			if len(digests) != 1 {
  2154  				dgList := []string{}
  2155  				for d := range digests {
  2156  					dgList = append(dgList, d)
  2157  				}
  2158  				t.Fatalf("ComputeOutputsToUpload(...) directory digests are not consistent got:%v", dgList)
  2159  			}
  2160  
  2161  			dg := digest.NewFromProtoUnvalidated(dir.TreeDigest)
  2162  			treeBlob, ok := gotBlobs[dg]
  2163  			if !ok {
  2164  				t.Fatalf("ComputeOutputsToUpload(...) tree proto with digest %+v not uploaded", dg)
  2165  			}
  2166  			wantBlobs[dg] = treeBlob
  2167  			rootBlob := mustMarshal(tc.wantTreeRoot)
  2168  			wantBlobs[digest.NewFromBlob(rootBlob)] = rootBlob
  2169  			if diff := cmp.Diff(wantBlobs, gotBlobs); diff != "" {
  2170  				t.Errorf("ComputeOutputsToUpload(...) gave diff (-want +got) on blobs:\n%s", diff)
  2171  			}
  2172  			tree := &repb.Tree{}
  2173  			if err := proto.Unmarshal(treeBlob, tree); err != nil {
  2174  				t.Errorf("ComputeOutputsToUpload(...) failed unmarshalling tree blob from %v: %v\n", treeBlob, err)
  2175  			}
  2176  			if diff := cmp.Diff(tc.wantTreeRoot, tree.Root, cmp.Comparer(proto.Equal)); diff != "" {
  2177  				t.Errorf("ComputeOutputsToUpload(...) gave diff (-want +got) on tree root:\n%s", diff)
  2178  			}
  2179  			wantChildren := make(map[digest.Digest]*repb.Directory)
  2180  			for _, d := range tc.wantTreeChildren {
  2181  				wantChildren[digest.TestNewFromMessage(d)] = d
  2182  			}
  2183  			gotChildren := make(map[digest.Digest]*repb.Directory)
  2184  			for _, d := range tree.Children {
  2185  				gotChildren[digest.TestNewFromMessage(d)] = d
  2186  			}
  2187  			if diff := cmp.Diff(wantChildren, gotChildren, cmp.Comparer(proto.Equal)); diff != "" {
  2188  				t.Errorf("ComputeOutputsToUpload(...) gave diff (-want +got) on tree children:\n%s", diff)
  2189  			}
  2190  		})
  2191  	}
  2192  }
  2193  
  2194  func TestComputeOutputsToUploadFileNoPermissions(t *testing.T) {
  2195  	input := []*inputPath{
  2196  		{path: "wd/foo", fileContents: fooBlob, isExecutable: true},
  2197  		{path: "bar", fileContents: barBlob},
  2198  	}
  2199  	paths := []string{"foo", "../bar"}
  2200  	nodeProperties := map[string]*cpb.NodeProperties{"foo": fooProperties}
  2201  	wd := "wd"
  2202  	wantBlob := [][]byte{fooBlob}
  2203  	wantResult := &repb.ActionResult{
  2204  		OutputFiles: []*repb.OutputFile{
  2205  			&repb.OutputFile{Path: "foo", Digest: fooDgPb, IsExecutable: true, NodeProperties: command.NodePropertiesToAPI(fooProperties)},
  2206  		},
  2207  	}
  2208  	wantCacheCalls := map[string]int{
  2209  		"bar":    1,
  2210  		"wd/foo": 1,
  2211  	}
  2212  
  2213  	root := t.TempDir()
  2214  	if err := construct(root, input); err != nil {
  2215  		t.Fatalf("failed to construct input dir structure: %v", err)
  2216  	}
  2217  
  2218  	if err := os.Chmod(filepath.Join(root, "bar"), 0100); err != nil {
  2219  		t.Fatalf("failed to set permissions of bar: %v", err)
  2220  	}
  2221  
  2222  	wantBlobs := make(map[digest.Digest][]byte)
  2223  	for _, b := range wantBlob {
  2224  		wantBlobs[digest.NewFromBlob(b)] = b
  2225  	}
  2226  
  2227  	gotBlobs := make(map[digest.Digest][]byte)
  2228  	cache := newCallCountingMetadataCache(root, t)
  2229  	e, cleanup := fakes.NewTestEnv(t)
  2230  	defer cleanup()
  2231  
  2232  	inputs, gotResult, err := e.Client.GrpcClient.ComputeOutputsToUpload(root, wd, paths, cache, command.UnspecifiedSymlinkBehavior, nodeProperties)
  2233  	if err != nil {
  2234  		t.Errorf("ComputeOutputsToUpload(...) = gave error %v, want success", err)
  2235  	}
  2236  	for _, ue := range inputs {
  2237  		ch, err := chunker.New(ue, false, int(e.Client.GrpcClient.ChunkMaxSize))
  2238  		if err != nil {
  2239  			t.Fatalf("chunker.New(ue): failed to create chunker from UploadEntry: %v", err)
  2240  		}
  2241  		blob, err := ch.FullData()
  2242  		if err != nil {
  2243  			t.Errorf("chunker %v FullData() returned error %v", ch, err)
  2244  		}
  2245  		gotBlobs[ue.Digest] = blob
  2246  	}
  2247  	if diff := cmp.Diff(wantBlobs, gotBlobs); diff != "" {
  2248  		t.Errorf("ComputeOutputsToUpload(...) gave diff (-want +got) on blobs:\n%s", diff)
  2249  	}
  2250  	if diff := cmp.Diff(wantCacheCalls, cache.calls, cmpopts.EquateEmpty()); diff != "" {
  2251  		t.Errorf("ComputeOutputsToUpload(...) gave diff on file metadata cache access (-want +got) on blobs:\n%s", diff)
  2252  	}
  2253  	if diff := cmp.Diff(wantResult, gotResult, cmp.Comparer(proto.Equal)); diff != "" {
  2254  		t.Errorf("ComputeOutputsToUpload(...) gave diff on action result (-want +got) on blobs:\n%s", diff)
  2255  	}
  2256  }
  2257  
  2258  func randomBytes(randGen *rand.Rand, n int) []byte {
  2259  	b := make([]byte, n)
  2260  	randGen.Read(b)
  2261  	return b
  2262  }
  2263  
  2264  func BenchmarkComputeMerkleTree(b *testing.B) {
  2265  	e, cleanup := fakes.NewTestEnv(b)
  2266  	defer cleanup()
  2267  
  2268  	randGen := rand.New(rand.NewSource(0))
  2269  	construct(e.ExecRoot, []*inputPath{
  2270  		{path: "a", fileContents: randomBytes(randGen, 2048)},
  2271  		{path: "b", fileContents: randomBytes(randGen, 9999)},
  2272  		{path: "c", fileContents: randomBytes(randGen, 1024)},
  2273  		{path: "d/a", fileContents: randomBytes(randGen, 4444)},
  2274  		{path: "d/b", fileContents: randomBytes(randGen, 7491)},
  2275  		{path: "d/c", emptyDir: true},
  2276  		{path: "d/d/a", fileContents: randomBytes(randGen, 5912)},
  2277  		{path: "d/d/b", fileContents: randomBytes(randGen, 9157)},
  2278  		{path: "d/d/c", isSymlink: true, relSymlinkTarget: "../../b"},
  2279  		{path: "d/d/d", fileContents: randomBytes(randGen, 5381)},
  2280  	})
  2281  
  2282  	inputSpec := &command.InputSpec{
  2283  		Inputs: []string{"a", "b", "c", "d/a", "d/b", "d/c", "d/d/a", "d/d/b", "d/d/c", "d/d/d"},
  2284  	}
  2285  
  2286  	b.ResetTimer()
  2287  	for i := 0; i < b.N; i++ {
  2288  		fmc := filemetadata.NewSingleFlightCache()
  2289  		_, _, _, err := e.Client.GrpcClient.ComputeMerkleTree(context.Background(), e.ExecRoot, "", "", inputSpec, fmc)
  2290  		if err != nil {
  2291  			b.Errorf("Failed to compute merkle tree: %v", err)
  2292  		}
  2293  	}
  2294  }