github.com/bir3/gocompiler@v0.9.2202/src/xvendor/golang.org/x/mod/sumdb/client.go (about)

     1  // Copyright 2019 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package sumdb
     6  
     7  import (
     8  	"bytes"
     9  	"errors"
    10  	"fmt"
    11  	"path"
    12  	"strings"
    13  	"sync"
    14  	"sync/atomic"
    15  
    16  	"github.com/bir3/gocompiler/src/xvendor/golang.org/x/mod/module"
    17  	"github.com/bir3/gocompiler/src/xvendor/golang.org/x/mod/sumdb/note"
    18  	"github.com/bir3/gocompiler/src/xvendor/golang.org/x/mod/sumdb/tlog"
    19  )
    20  
    21  // A ClientOps provides the external operations
    22  // (file caching, HTTP fetches, and so on) needed by the [Client].
    23  // The methods must be safe for concurrent use by multiple goroutines.
    24  type ClientOps interface {
    25  	// ReadRemote reads and returns the content served at the given path
    26  	// on the remote database server. The path begins with "/lookup" or "/tile/",
    27  	// and there is no need to parse the path in any way.
    28  	// It is the implementation's responsibility to turn that path into a full URL
    29  	// and make the HTTP request. ReadRemote should return an error for
    30  	// any non-200 HTTP response status.
    31  	ReadRemote(path string) ([]byte, error)
    32  
    33  	// ReadConfig reads and returns the content of the named configuration file.
    34  	// There are only a fixed set of configuration files.
    35  	//
    36  	// "key" returns a file containing the verifier key for the server.
    37  	//
    38  	// serverName + "/latest" returns a file containing the latest known
    39  	// signed tree from the server.
    40  	// To signal that the client wishes to start with an "empty" signed tree,
    41  	// ReadConfig can return a successful empty result (0 bytes of data).
    42  	ReadConfig(file string) ([]byte, error)
    43  
    44  	// WriteConfig updates the content of the named configuration file,
    45  	// changing it from the old []byte to the new []byte.
    46  	// If the old []byte does not match the stored configuration,
    47  	// WriteConfig must return ErrWriteConflict.
    48  	// Otherwise, WriteConfig should atomically replace old with new.
    49  	// The "key" configuration file is never written using WriteConfig.
    50  	WriteConfig(file string, old, new []byte) error
    51  
    52  	// ReadCache reads and returns the content of the named cache file.
    53  	// Any returned error will be treated as equivalent to the file not existing.
    54  	// There can be arbitrarily many cache files, such as:
    55  	//	serverName/lookup/pkg@version
    56  	//	serverName/tile/8/1/x123/456
    57  	ReadCache(file string) ([]byte, error)
    58  
    59  	// WriteCache writes the named cache file.
    60  	WriteCache(file string, data []byte)
    61  
    62  	// Log prints the given log message (such as with log.Print)
    63  	Log(msg string)
    64  
    65  	// SecurityError prints the given security error log message.
    66  	// The Client returns ErrSecurity from any operation that invokes SecurityError,
    67  	// but the return value is mainly for testing. In a real program,
    68  	// SecurityError should typically print the message and call log.Fatal or os.Exit.
    69  	SecurityError(msg string)
    70  }
    71  
    72  // ErrWriteConflict signals a write conflict during Client.WriteConfig.
    73  var ErrWriteConflict = errors.New("write conflict")
    74  
    75  // ErrSecurity is returned by [Client] operations that invoke Client.SecurityError.
    76  var ErrSecurity = errors.New("security error: misbehaving server")
    77  
    78  // A Client is a client connection to a checksum database.
    79  // All the methods are safe for simultaneous use by multiple goroutines.
    80  type Client struct {
    81  	ops	ClientOps	// access to operations in the external world
    82  
    83  	didLookup	uint32
    84  
    85  	// one-time initialized data
    86  	initOnce	sync.Once
    87  	initErr		error		// init error, if any
    88  	name		string		// name of accepted verifier
    89  	verifiers	note.Verifiers	// accepted verifiers (just one, but Verifiers for note.Open)
    90  	tileReader	tileReader
    91  	tileHeight	int
    92  	nosumdb		string
    93  
    94  	record		parCache	// cache of record lookup, keyed by path@vers
    95  	tileCache	parCache	// cache of c.readTile, keyed by tile
    96  
    97  	latestMu	sync.Mutex
    98  	latest		tlog.Tree	// latest known tree head
    99  	latestMsg	[]byte		// encoded signed note for latest
   100  
   101  	tileSavedMu	sync.Mutex
   102  	tileSaved	map[tlog.Tile]bool	// which tiles have been saved using c.ops.WriteCache already
   103  }
   104  
   105  // NewClient returns a new [Client] using the given [ClientOps].
   106  func NewClient(ops ClientOps) *Client {
   107  	return &Client{
   108  		ops: ops,
   109  	}
   110  }
   111  
   112  // init initializes the client (if not already initialized)
   113  // and returns any initialization error.
   114  func (c *Client) init() error {
   115  	c.initOnce.Do(c.initWork)
   116  	return c.initErr
   117  }
   118  
   119  // initWork does the actual initialization work.
   120  func (c *Client) initWork() {
   121  	defer func() {
   122  		if c.initErr != nil {
   123  			c.initErr = fmt.Errorf("initializing sumdb.Client: %v", c.initErr)
   124  		}
   125  	}()
   126  
   127  	c.tileReader.c = c
   128  	if c.tileHeight == 0 {
   129  		c.tileHeight = 8
   130  	}
   131  	c.tileSaved = make(map[tlog.Tile]bool)
   132  
   133  	vkey, err := c.ops.ReadConfig("key")
   134  	if err != nil {
   135  		c.initErr = err
   136  		return
   137  	}
   138  	verifier, err := note.NewVerifier(strings.TrimSpace(string(vkey)))
   139  	if err != nil {
   140  		c.initErr = err
   141  		return
   142  	}
   143  	c.verifiers = note.VerifierList(verifier)
   144  	c.name = verifier.Name()
   145  
   146  	data, err := c.ops.ReadConfig(c.name + "/latest")
   147  	if err != nil {
   148  		c.initErr = err
   149  		return
   150  	}
   151  	if err := c.mergeLatest(data); err != nil {
   152  		c.initErr = err
   153  		return
   154  	}
   155  }
   156  
   157  // SetTileHeight sets the tile height for the Client.
   158  // Any call to SetTileHeight must happen before the first call to [Client.Lookup].
   159  // If SetTileHeight is not called, the Client defaults to tile height 8.
   160  // SetTileHeight can be called at most once,
   161  // and if so it must be called before the first call to Lookup.
   162  func (c *Client) SetTileHeight(height int) {
   163  	if atomic.LoadUint32(&c.didLookup) != 0 {
   164  		panic("SetTileHeight used after Lookup")
   165  	}
   166  	if height <= 0 {
   167  		panic("invalid call to SetTileHeight")
   168  	}
   169  	if c.tileHeight != 0 {
   170  		panic("multiple calls to SetTileHeight")
   171  	}
   172  	c.tileHeight = height
   173  }
   174  
   175  // SetGONOSUMDB sets the list of comma-separated GONOSUMDB patterns for the Client.
   176  // For any module path matching one of the patterns,
   177  // [Client.Lookup] will return ErrGONOSUMDB.
   178  // SetGONOSUMDB can be called at most once,
   179  // and if so it must be called before the first call to Lookup.
   180  func (c *Client) SetGONOSUMDB(list string) {
   181  	if atomic.LoadUint32(&c.didLookup) != 0 {
   182  		panic("SetGONOSUMDB used after Lookup")
   183  	}
   184  	if c.nosumdb != "" {
   185  		panic("multiple calls to SetGONOSUMDB")
   186  	}
   187  	c.nosumdb = list
   188  }
   189  
   190  // ErrGONOSUMDB is returned by [Client.Lookup] for paths that match
   191  // a pattern listed in the GONOSUMDB list (set by [Client.SetGONOSUMDB],
   192  // usually from the environment variable).
   193  var ErrGONOSUMDB = errors.New("skipped (listed in GONOSUMDB)")
   194  
   195  func (c *Client) skip(target string) bool {
   196  	return globsMatchPath(c.nosumdb, target)
   197  }
   198  
   199  // globsMatchPath reports whether any path prefix of target
   200  // matches one of the glob patterns (as defined by path.Match)
   201  // in the comma-separated globs list.
   202  // It ignores any empty or malformed patterns in the list.
   203  func globsMatchPath(globs, target string) bool {
   204  	for globs != "" {
   205  		// Extract next non-empty glob in comma-separated list.
   206  		var glob string
   207  		if i := strings.Index(globs, ","); i >= 0 {
   208  			glob, globs = globs[:i], globs[i+1:]
   209  		} else {
   210  			glob, globs = globs, ""
   211  		}
   212  		if glob == "" {
   213  			continue
   214  		}
   215  
   216  		// A glob with N+1 path elements (N slashes) needs to be matched
   217  		// against the first N+1 path elements of target,
   218  		// which end just before the N+1'th slash.
   219  		n := strings.Count(glob, "/")
   220  		prefix := target
   221  		// Walk target, counting slashes, truncating at the N+1'th slash.
   222  		for i := 0; i < len(target); i++ {
   223  			if target[i] == '/' {
   224  				if n == 0 {
   225  					prefix = target[:i]
   226  					break
   227  				}
   228  				n--
   229  			}
   230  		}
   231  		if n > 0 {
   232  			// Not enough prefix elements.
   233  			continue
   234  		}
   235  		matched, _ := path.Match(glob, prefix)
   236  		if matched {
   237  			return true
   238  		}
   239  	}
   240  	return false
   241  }
   242  
   243  // Lookup returns the go.sum lines for the given module path and version.
   244  // The version may end in a /go.mod suffix, in which case Lookup returns
   245  // the go.sum lines for the module's go.mod-only hash.
   246  func (c *Client) Lookup(path, vers string) (lines []string, err error) {
   247  	atomic.StoreUint32(&c.didLookup, 1)
   248  
   249  	if c.skip(path) {
   250  		return nil, ErrGONOSUMDB
   251  	}
   252  
   253  	defer func() {
   254  		if err != nil {
   255  			err = fmt.Errorf("%s@%s: %v", path, vers, err)
   256  		}
   257  	}()
   258  
   259  	if err := c.init(); err != nil {
   260  		return nil, err
   261  	}
   262  
   263  	// Prepare encoded cache filename / URL.
   264  	epath, err := module.EscapePath(path)
   265  	if err != nil {
   266  		return nil, err
   267  	}
   268  	evers, err := module.EscapeVersion(strings.TrimSuffix(vers, "/go.mod"))
   269  	if err != nil {
   270  		return nil, err
   271  	}
   272  	remotePath := "/lookup/" + epath + "@" + evers
   273  	file := c.name + remotePath
   274  
   275  	// Fetch the data.
   276  	// The lookupCache avoids redundant ReadCache/GetURL operations
   277  	// (especially since go.sum lines tend to come in pairs for a given
   278  	// path and version) and also avoids having multiple of the same
   279  	// request in flight at once.
   280  	type cached struct {
   281  		data	[]byte
   282  		err	error
   283  	}
   284  	result := c.record.Do(file, func() interface{} {
   285  		// Try the on-disk cache, or else get from web.
   286  		writeCache := false
   287  		data, err := c.ops.ReadCache(file)
   288  		if err != nil {
   289  			data, err = c.ops.ReadRemote(remotePath)
   290  			if err != nil {
   291  				return cached{nil, err}
   292  			}
   293  			writeCache = true
   294  		}
   295  
   296  		// Validate the record before using it for anything.
   297  		id, text, treeMsg, err := tlog.ParseRecord(data)
   298  		if err != nil {
   299  			return cached{nil, err}
   300  		}
   301  		if err := c.mergeLatest(treeMsg); err != nil {
   302  			return cached{nil, err}
   303  		}
   304  		if err := c.checkRecord(id, text); err != nil {
   305  			return cached{nil, err}
   306  		}
   307  
   308  		// Now that we've validated the record,
   309  		// save it to the on-disk cache (unless that's where it came from).
   310  		if writeCache {
   311  			c.ops.WriteCache(file, data)
   312  		}
   313  
   314  		return cached{data, nil}
   315  	}).(cached)
   316  	if result.err != nil {
   317  		return nil, result.err
   318  	}
   319  
   320  	// Extract the lines for the specific version we want
   321  	// (with or without /go.mod).
   322  	prefix := path + " " + vers + " "
   323  	var hashes []string
   324  	for _, line := range strings.Split(string(result.data), "\n") {
   325  		if strings.HasPrefix(line, prefix) {
   326  			hashes = append(hashes, line)
   327  		}
   328  	}
   329  	return hashes, nil
   330  }
   331  
   332  // mergeLatest merges the tree head in msg
   333  // with the Client's current latest tree head,
   334  // ensuring the result is a consistent timeline.
   335  // If the result is inconsistent, mergeLatest calls c.ops.SecurityError
   336  // with a detailed security error message and then
   337  // (only if c.ops.SecurityError does not exit the program) returns ErrSecurity.
   338  // If the Client's current latest tree head moves forward,
   339  // mergeLatest updates the underlying configuration file as well,
   340  // taking care to merge any independent updates to that configuration.
   341  func (c *Client) mergeLatest(msg []byte) error {
   342  	// Merge msg into our in-memory copy of the latest tree head.
   343  	when, err := c.mergeLatestMem(msg)
   344  	if err != nil {
   345  		return err
   346  	}
   347  	if when != msgFuture {
   348  		// msg matched our present or was in the past.
   349  		// No change to our present, so no update of config file.
   350  		return nil
   351  	}
   352  
   353  	// Flush our extended timeline back out to the configuration file.
   354  	// If the configuration file has been updated in the interim,
   355  	// we need to merge any updates made there as well.
   356  	// Note that writeConfig is an atomic compare-and-swap.
   357  	for {
   358  		msg, err := c.ops.ReadConfig(c.name + "/latest")
   359  		if err != nil {
   360  			return err
   361  		}
   362  		when, err := c.mergeLatestMem(msg)
   363  		if err != nil {
   364  			return err
   365  		}
   366  		if when != msgPast {
   367  			// msg matched our present or was from the future,
   368  			// and now our in-memory copy matches.
   369  			return nil
   370  		}
   371  
   372  		// msg (== config) is in the past, so we need to update it.
   373  		c.latestMu.Lock()
   374  		latestMsg := c.latestMsg
   375  		c.latestMu.Unlock()
   376  		if err := c.ops.WriteConfig(c.name+"/latest", msg, latestMsg); err != ErrWriteConflict {
   377  			// Success or a non-write-conflict error.
   378  			return err
   379  		}
   380  	}
   381  }
   382  
   383  const (
   384  	msgPast	= 1 + iota
   385  	msgNow
   386  	msgFuture
   387  )
   388  
   389  // mergeLatestMem is like mergeLatest but is only concerned with
   390  // updating the in-memory copy of the latest tree head (c.latest)
   391  // not the configuration file.
   392  // The when result explains when msg happened relative to our
   393  // previous idea of c.latest:
   394  // msgPast means msg was from before c.latest,
   395  // msgNow means msg was exactly c.latest, and
   396  // msgFuture means msg was from after c.latest, which has now been updated.
   397  func (c *Client) mergeLatestMem(msg []byte) (when int, err error) {
   398  	if len(msg) == 0 {
   399  		// Accept empty msg as the unsigned, empty timeline.
   400  		c.latestMu.Lock()
   401  		latest := c.latest
   402  		c.latestMu.Unlock()
   403  		if latest.N == 0 {
   404  			return msgNow, nil
   405  		}
   406  		return msgPast, nil
   407  	}
   408  
   409  	note, err := note.Open(msg, c.verifiers)
   410  	if err != nil {
   411  		return 0, fmt.Errorf("reading tree note: %v\nnote:\n%s", err, msg)
   412  	}
   413  	tree, err := tlog.ParseTree([]byte(note.Text))
   414  	if err != nil {
   415  		return 0, fmt.Errorf("reading tree: %v\ntree:\n%s", err, note.Text)
   416  	}
   417  
   418  	// Other lookups may be calling mergeLatest with other heads,
   419  	// so c.latest is changing underfoot. We don't want to hold the
   420  	// c.mu lock during tile fetches, so loop trying to update c.latest.
   421  	c.latestMu.Lock()
   422  	latest := c.latest
   423  	latestMsg := c.latestMsg
   424  	c.latestMu.Unlock()
   425  
   426  	for {
   427  		// If the tree head looks old, check that it is on our timeline.
   428  		if tree.N <= latest.N {
   429  			if err := c.checkTrees(tree, msg, latest, latestMsg); err != nil {
   430  				return 0, err
   431  			}
   432  			if tree.N < latest.N {
   433  				return msgPast, nil
   434  			}
   435  			return msgNow, nil
   436  		}
   437  
   438  		// The tree head looks new. Check that we are on its timeline and try to move our timeline forward.
   439  		if err := c.checkTrees(latest, latestMsg, tree, msg); err != nil {
   440  			return 0, err
   441  		}
   442  
   443  		// Install our msg if possible.
   444  		// Otherwise we will go around again.
   445  		c.latestMu.Lock()
   446  		installed := false
   447  		if c.latest == latest {
   448  			installed = true
   449  			c.latest = tree
   450  			c.latestMsg = msg
   451  		} else {
   452  			latest = c.latest
   453  			latestMsg = c.latestMsg
   454  		}
   455  		c.latestMu.Unlock()
   456  
   457  		if installed {
   458  			return msgFuture, nil
   459  		}
   460  	}
   461  }
   462  
   463  // checkTrees checks that older (from olderNote) is contained in newer (from newerNote).
   464  // If an error occurs, such as malformed data or a network problem, checkTrees returns that error.
   465  // If on the other hand checkTrees finds evidence of misbehavior, it prepares a detailed
   466  // message and calls log.Fatal.
   467  func (c *Client) checkTrees(older tlog.Tree, olderNote []byte, newer tlog.Tree, newerNote []byte) error {
   468  	thr := tlog.TileHashReader(newer, &c.tileReader)
   469  	h, err := tlog.TreeHash(older.N, thr)
   470  	if err != nil {
   471  		if older.N == newer.N {
   472  			return fmt.Errorf("checking tree#%d: %v", older.N, err)
   473  		}
   474  		return fmt.Errorf("checking tree#%d against tree#%d: %v", older.N, newer.N, err)
   475  	}
   476  	if h == older.Hash {
   477  		return nil
   478  	}
   479  
   480  	// Detected a fork in the tree timeline.
   481  	// Start by reporting the inconsistent signed tree notes.
   482  	var buf bytes.Buffer
   483  	fmt.Fprintf(&buf, "SECURITY ERROR\n")
   484  	fmt.Fprintf(&buf, "go.sum database server misbehavior detected!\n\n")
   485  	indent := func(b []byte) []byte {
   486  		return bytes.Replace(b, []byte("\n"), []byte("\n\t"), -1)
   487  	}
   488  	fmt.Fprintf(&buf, "old database:\n\t%s\n", indent(olderNote))
   489  	fmt.Fprintf(&buf, "new database:\n\t%s\n", indent(newerNote))
   490  
   491  	// The notes alone are not enough to prove the inconsistency.
   492  	// We also need to show that the newer note's tree hash for older.N
   493  	// does not match older.Hash. The consumer of this report could
   494  	// of course consult the server to try to verify the inconsistency,
   495  	// but we are holding all the bits we need to prove it right now,
   496  	// so we might as well print them and make the report not depend
   497  	// on the continued availability of the misbehaving server.
   498  	// Preparing this data only reuses the tiled hashes needed for
   499  	// tlog.TreeHash(older.N, thr) above, so assuming thr is caching tiles,
   500  	// there are no new access to the server here, and these operations cannot fail.
   501  	fmt.Fprintf(&buf, "proof of misbehavior:\n\t%v", h)
   502  	if p, err := tlog.ProveTree(newer.N, older.N, thr); err != nil {
   503  		fmt.Fprintf(&buf, "\tinternal error: %v\n", err)
   504  	} else if err := tlog.CheckTree(p, newer.N, newer.Hash, older.N, h); err != nil {
   505  		fmt.Fprintf(&buf, "\tinternal error: generated inconsistent proof\n")
   506  	} else {
   507  		for _, h := range p {
   508  			fmt.Fprintf(&buf, "\n\t%v", h)
   509  		}
   510  	}
   511  	c.ops.SecurityError(buf.String())
   512  	return ErrSecurity
   513  }
   514  
   515  // checkRecord checks that record #id's hash matches data.
   516  func (c *Client) checkRecord(id int64, data []byte) error {
   517  	c.latestMu.Lock()
   518  	latest := c.latest
   519  	c.latestMu.Unlock()
   520  
   521  	if id >= latest.N {
   522  		return fmt.Errorf("cannot validate record %d in tree of size %d", id, latest.N)
   523  	}
   524  	hashes, err := tlog.TileHashReader(latest, &c.tileReader).ReadHashes([]int64{tlog.StoredHashIndex(0, id)})
   525  	if err != nil {
   526  		return err
   527  	}
   528  	if hashes[0] == tlog.RecordHash(data) {
   529  		return nil
   530  	}
   531  	return fmt.Errorf("cannot authenticate record data in server response")
   532  }
   533  
   534  // tileReader is a *Client wrapper that implements tlog.TileReader.
   535  // The separate type avoids exposing the ReadTiles and SaveTiles
   536  // methods on Client itself.
   537  type tileReader struct {
   538  	c *Client
   539  }
   540  
   541  func (r *tileReader) Height() int {
   542  	return r.c.tileHeight
   543  }
   544  
   545  // ReadTiles reads and returns the requested tiles,
   546  // either from the on-disk cache or the server.
   547  func (r *tileReader) ReadTiles(tiles []tlog.Tile) ([][]byte, error) {
   548  	// Read all the tiles in parallel.
   549  	data := make([][]byte, len(tiles))
   550  	errs := make([]error, len(tiles))
   551  	var wg sync.WaitGroup
   552  	for i, tile := range tiles {
   553  		wg.Add(1)
   554  		go func(i int, tile tlog.Tile) {
   555  			defer wg.Done()
   556  			defer func() {
   557  				if e := recover(); e != nil {
   558  					errs[i] = fmt.Errorf("panic: %v", e)
   559  				}
   560  			}()
   561  			data[i], errs[i] = r.c.readTile(tile)
   562  		}(i, tile)
   563  	}
   564  	wg.Wait()
   565  
   566  	for _, err := range errs {
   567  		if err != nil {
   568  			return nil, err
   569  		}
   570  	}
   571  
   572  	return data, nil
   573  }
   574  
   575  // tileCacheKey returns the cache key for the tile.
   576  func (c *Client) tileCacheKey(tile tlog.Tile) string {
   577  	return c.name + "/" + tile.Path()
   578  }
   579  
   580  // tileRemotePath returns the remote path for the tile.
   581  func (c *Client) tileRemotePath(tile tlog.Tile) string {
   582  	return "/" + tile.Path()
   583  }
   584  
   585  // readTile reads a single tile, either from the on-disk cache or the server.
   586  func (c *Client) readTile(tile tlog.Tile) ([]byte, error) {
   587  	type cached struct {
   588  		data	[]byte
   589  		err	error
   590  	}
   591  
   592  	result := c.tileCache.Do(tile, func() interface{} {
   593  		// Try the requested tile in on-disk cache.
   594  		data, err := c.ops.ReadCache(c.tileCacheKey(tile))
   595  		if err == nil {
   596  			c.markTileSaved(tile)
   597  			return cached{data, nil}
   598  		}
   599  
   600  		// Try the full tile in on-disk cache (if requested tile not already full).
   601  		// We only save authenticated tiles to the on-disk cache,
   602  		// so the recreated prefix is equally authenticated.
   603  		full := tile
   604  		full.W = 1 << uint(tile.H)
   605  		if tile != full {
   606  			data, err := c.ops.ReadCache(c.tileCacheKey(full))
   607  			if err == nil {
   608  				c.markTileSaved(tile)	// don't save tile later; we already have full
   609  				return cached{data[:len(data)/full.W*tile.W], nil}
   610  			}
   611  		}
   612  
   613  		// Try requested tile from server.
   614  		data, err = c.ops.ReadRemote(c.tileRemotePath(tile))
   615  		if err == nil {
   616  			return cached{data, nil}
   617  		}
   618  
   619  		// Try full tile on server.
   620  		// If the partial tile does not exist, it should be because
   621  		// the tile has been completed and only the complete one
   622  		// is available.
   623  		if tile != full {
   624  			data, err := c.ops.ReadRemote(c.tileRemotePath(full))
   625  			if err == nil {
   626  				// Note: We could save the full tile in the on-disk cache here,
   627  				// but we don't know if it is valid yet, and we will only find out
   628  				// about the partial data, not the full data. So let SaveTiles
   629  				// save the partial tile, and we'll just refetch the full tile later
   630  				// once we can validate more (or all) of it.
   631  				return cached{data[:len(data)/full.W*tile.W], nil}
   632  			}
   633  		}
   634  
   635  		// Nothing worked.
   636  		// Return the error from the server fetch for the requested (not full) tile.
   637  		return cached{nil, err}
   638  	}).(cached)
   639  
   640  	return result.data, result.err
   641  }
   642  
   643  // markTileSaved records that tile is already present in the on-disk cache,
   644  // so that a future SaveTiles for that tile can be ignored.
   645  func (c *Client) markTileSaved(tile tlog.Tile) {
   646  	c.tileSavedMu.Lock()
   647  	c.tileSaved[tile] = true
   648  	c.tileSavedMu.Unlock()
   649  }
   650  
   651  // SaveTiles saves the now validated tiles.
   652  func (r *tileReader) SaveTiles(tiles []tlog.Tile, data [][]byte) {
   653  	c := r.c
   654  
   655  	// Determine which tiles need saving.
   656  	// (Tiles that came from the cache need not be saved back.)
   657  	save := make([]bool, len(tiles))
   658  	c.tileSavedMu.Lock()
   659  	for i, tile := range tiles {
   660  		if !c.tileSaved[tile] {
   661  			save[i] = true
   662  			c.tileSaved[tile] = true
   663  		}
   664  	}
   665  	c.tileSavedMu.Unlock()
   666  
   667  	for i, tile := range tiles {
   668  		if save[i] {
   669  			// If WriteCache fails here (out of disk space? i/o error?),
   670  			// c.tileSaved[tile] is still true and we will not try to write it again.
   671  			// Next time we run maybe we'll redownload it again and be
   672  			// more successful.
   673  			c.ops.WriteCache(c.name+"/"+tile.Path(), data[i])
   674  		}
   675  	}
   676  }