github.com/terramate-io/tf@v0.0.0-20230830114523-fce866b4dfcd/providercache/installer.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package providercache
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"log"
    10  	"sort"
    11  	"strings"
    12  
    13  	"github.com/apparentlymart/go-versions/versions"
    14  
    15  	"github.com/terramate-io/tf/addrs"
    16  	copydir "github.com/terramate-io/tf/copy"
    17  	"github.com/terramate-io/tf/depsfile"
    18  	"github.com/terramate-io/tf/getproviders"
    19  )
    20  
    21  // Installer is the main type in this package, representing a provider installer
    22  // with a particular configuration-specific cache directory and an optional
    23  // global cache directory.
    24  type Installer struct {
    25  	// targetDir is the cache directory we're ultimately aiming to get the
    26  	// requested providers installed into.
    27  	targetDir *Dir
    28  
    29  	// source is the provider source that the installer will use to discover
    30  	// what provider versions are available for installation and to
    31  	// find the source locations for any versions that are not already
    32  	// available via one of the cache directories.
    33  	source getproviders.Source
    34  
    35  	// globalCacheDir is an optional additional directory that will, if
    36  	// provided, be treated as a read-through cache when retrieving new
    37  	// provider versions. That is, new packages are fetched into this
    38  	// directory first and then linked into targetDir, which allows sharing
    39  	// both the disk space and the download time for a particular provider
    40  	// version between different configurations on the same system.
    41  	globalCacheDir *Dir
    42  
    43  	// globalCacheDirMayBreakDependencyLockFile allows a temporary exception to
    44  	// the rule that an entry in globalCacheDir can normally only be used if
    45  	// its validity is already confirmed by an entry in the dependency lock
    46  	// file.
    47  	globalCacheDirMayBreakDependencyLockFile bool
    48  
    49  	// builtInProviderTypes is an optional set of types that should be
    50  	// considered valid to appear in the special terraform.io/builtin/...
    51  	// namespace, which we use for providers that are built in to Terraform
    52  	// and thus do not need any separate installation step.
    53  	builtInProviderTypes []string
    54  
    55  	// unmanagedProviderTypes is a set of provider addresses that should be
    56  	// considered implemented, but that Terraform does not manage the
    57  	// lifecycle for, and therefore does not need to worry about the
    58  	// installation of.
    59  	unmanagedProviderTypes map[addrs.Provider]struct{}
    60  }
    61  
    62  // NewInstaller constructs and returns a new installer with the given target
    63  // directory and provider source.
    64  //
    65  // A newly-created installer does not have a global cache directory configured,
    66  // but a caller can make a follow-up call to SetGlobalCacheDir to provide
    67  // one prior to taking any installation actions.
    68  //
    69  // The target directory MUST NOT also be an input consulted by the given source,
    70  // or the result is undefined.
    71  func NewInstaller(targetDir *Dir, source getproviders.Source) *Installer {
    72  	return &Installer{
    73  		targetDir: targetDir,
    74  		source:    source,
    75  	}
    76  }
    77  
    78  // Clone returns a new Installer which has the a new target directory but
    79  // the same optional global cache directory, the same installation sources,
    80  // and the same built-in/unmanaged providers. The result can be mutated further
    81  // using the various setter methods without affecting the original.
    82  func (i *Installer) Clone(targetDir *Dir) *Installer {
    83  	// For now all of our setter methods just overwrite field values in
    84  	// their entirety, rather than mutating things on the other side of
    85  	// the shared pointers, and so we can safely just shallow-copy the
    86  	// root. We might need to be more careful here if in future we add
    87  	// methods that allow deeper mutations through the stored pointers.
    88  	ret := *i
    89  	ret.targetDir = targetDir
    90  	return &ret
    91  }
    92  
    93  // ProviderSource returns the getproviders.Source that the installer would
    94  // use for installing any new providers.
    95  func (i *Installer) ProviderSource() getproviders.Source {
    96  	return i.source
    97  }
    98  
    99  // SetGlobalCacheDir activates a second tier of caching for the receiving
   100  // installer, with the given directory used as a read-through cache for
   101  // installation operations that need to retrieve new packages.
   102  //
   103  // The global cache directory for an installer must never be the same as its
   104  // target directory, and must not be used as one of its provider sources.
   105  // If these overlap then undefined behavior will result.
   106  func (i *Installer) SetGlobalCacheDir(cacheDir *Dir) {
   107  	// A little safety check to catch straightforward mistakes where the
   108  	// directories overlap. Better to panic early than to do
   109  	// possibly-distructive actions on the cache directory downstream.
   110  	if same, err := copydir.SameFile(i.targetDir.baseDir, cacheDir.baseDir); err == nil && same {
   111  		panic(fmt.Sprintf("global cache directory %s must not match the installation target directory %s", cacheDir.baseDir, i.targetDir.baseDir))
   112  	}
   113  	i.globalCacheDir = cacheDir
   114  }
   115  
   116  // SetGlobalCacheDirMayBreakDependencyLockFile activates or deactivates our
   117  // temporary exception to the rule that the global cache directory can be used
   118  // only when entries are confirmed by existing entries in the dependency lock
   119  // file.
   120  //
   121  // If this is set then if we install a provider for the first time from the
   122  // cache then the dependency lock file will include only the checksum from
   123  // the package in the global cache, which means the lock file won't be portable
   124  // to Terraform running on another operating system or CPU architecture.
   125  func (i *Installer) SetGlobalCacheDirMayBreakDependencyLockFile(mayBreak bool) {
   126  	i.globalCacheDirMayBreakDependencyLockFile = mayBreak
   127  }
   128  
   129  // HasGlobalCacheDir returns true if someone has previously called
   130  // SetGlobalCacheDir to configure a global cache directory for this installer.
   131  func (i *Installer) HasGlobalCacheDir() bool {
   132  	return i.globalCacheDir != nil
   133  }
   134  
   135  // SetBuiltInProviderTypes tells the receiver to consider the type names in the
   136  // given slice to be valid as providers in the special special
   137  // terraform.io/builtin/... namespace that we use for providers that are
   138  // built in to Terraform and thus do not need a separate installation step.
   139  //
   140  // If a caller requests installation of a provider in that namespace, the
   141  // installer will treat it as a no-op if its name exists in this list, but
   142  // will produce an error if it does not.
   143  //
   144  // The default, if this method isn't called, is for there to be no valid
   145  // builtin providers.
   146  //
   147  // Do not modify the buffer under the given slice after passing it to this
   148  // method.
   149  func (i *Installer) SetBuiltInProviderTypes(types []string) {
   150  	i.builtInProviderTypes = types
   151  }
   152  
   153  // SetUnmanagedProviderTypes tells the receiver to consider the providers
   154  // indicated by the passed addrs.Providers as unmanaged. Terraform does not
   155  // need to control the lifecycle of these providers, and they are assumed to be
   156  // running already when Terraform is started. Because these are essentially
   157  // processes, not binaries, Terraform will not do any work to ensure presence
   158  // or versioning of these binaries.
   159  func (i *Installer) SetUnmanagedProviderTypes(types map[addrs.Provider]struct{}) {
   160  	i.unmanagedProviderTypes = types
   161  }
   162  
   163  // EnsureProviderVersions compares the given provider requirements with what
   164  // is already available in the installer's target directory and then takes
   165  // appropriate installation actions to ensure that suitable packages
   166  // are available in the target cache directory.
   167  //
   168  // The given mode modifies how the operation will treat providers that already
   169  // have acceptable versions available in the target cache directory. See the
   170  // documentation for InstallMode and the InstallMode values for more
   171  // information.
   172  //
   173  // The given context can be used to cancel the overall installation operation
   174  // (causing any operations in progress to fail with an error), and can also
   175  // include an InstallerEvents value for optional intermediate progress
   176  // notifications.
   177  //
   178  // If a given InstallerEvents subscribes to notifications about installation
   179  // failures then those notifications will be redundant with the ones included
   180  // in the final returned error value so callers should show either one or the
   181  // other, and not both.
   182  func (i *Installer) EnsureProviderVersions(ctx context.Context, locks *depsfile.Locks, reqs getproviders.Requirements, mode InstallMode) (*depsfile.Locks, error) {
   183  	errs := map[addrs.Provider]error{}
   184  	evts := installerEventsForContext(ctx)
   185  
   186  	// We'll work with a copy of the given locks, so we can modify it and
   187  	// return the updated locks without affecting the caller's object.
   188  	// We'll add, replace, or remove locks in here during our work so that the
   189  	// final locks file reflects what the installer has selected.
   190  	locks = locks.DeepCopy()
   191  
   192  	if cb := evts.PendingProviders; cb != nil {
   193  		cb(reqs)
   194  	}
   195  
   196  	// Step 1: Which providers might we need to fetch a new version of?
   197  	// This produces the subset of requirements we need to ask the provider
   198  	// source about. If we're in the normal (non-upgrade) mode then we'll
   199  	// just ask the source to confirm the continued existence of what
   200  	// was locked, or otherwise we'll find the newest version matching the
   201  	// configured version constraint.
   202  	mightNeed := map[addrs.Provider]getproviders.VersionSet{}
   203  	locked := map[addrs.Provider]bool{}
   204  	for provider, versionConstraints := range reqs {
   205  		if provider.IsBuiltIn() {
   206  			// Built in providers do not require installation but we'll still
   207  			// verify that the requested provider name is valid.
   208  			valid := false
   209  			for _, name := range i.builtInProviderTypes {
   210  				if name == provider.Type {
   211  					valid = true
   212  					break
   213  				}
   214  			}
   215  			var err error
   216  			if valid {
   217  				if len(versionConstraints) == 0 {
   218  					// Other than reporting an event for the outcome of this
   219  					// provider, we'll do nothing else with it: it's just
   220  					// automatically available for use.
   221  					if cb := evts.BuiltInProviderAvailable; cb != nil {
   222  						cb(provider)
   223  					}
   224  				} else {
   225  					// A built-in provider is not permitted to have an explicit
   226  					// version constraint, because we can only use the version
   227  					// that is built in to the current Terraform release.
   228  					err = fmt.Errorf("built-in providers do not support explicit version constraints")
   229  				}
   230  			} else {
   231  				err = fmt.Errorf("this Terraform release has no built-in provider named %q", provider.Type)
   232  			}
   233  			if err != nil {
   234  				errs[provider] = err
   235  				if cb := evts.BuiltInProviderFailure; cb != nil {
   236  					cb(provider, err)
   237  				}
   238  			}
   239  			continue
   240  		}
   241  		if _, ok := i.unmanagedProviderTypes[provider]; ok {
   242  			// unmanaged providers do not require installation
   243  			continue
   244  		}
   245  		acceptableVersions := versions.MeetingConstraints(versionConstraints)
   246  		if !mode.forceQueryAllProviders() {
   247  			// If we're not forcing potential changes of version then an
   248  			// existing selection from the lock file takes priority over
   249  			// the currently-configured version constraints.
   250  			if lock := locks.Provider(provider); lock != nil {
   251  				if !acceptableVersions.Has(lock.Version()) {
   252  					err := fmt.Errorf(
   253  						"locked provider %s %s does not match configured version constraint %s; must use terraform init -upgrade to allow selection of new versions",
   254  						provider, lock.Version(), getproviders.VersionConstraintsString(versionConstraints),
   255  					)
   256  					errs[provider] = err
   257  					// This is a funny case where we're returning an error
   258  					// before we do any querying at all. To keep the event
   259  					// stream consistent without introducing an extra event
   260  					// type, we'll emit an artificial QueryPackagesBegin for
   261  					// this provider before we indicate that it failed using
   262  					// QueryPackagesFailure.
   263  					if cb := evts.QueryPackagesBegin; cb != nil {
   264  						cb(provider, versionConstraints, true)
   265  					}
   266  					if cb := evts.QueryPackagesFailure; cb != nil {
   267  						cb(provider, err)
   268  					}
   269  					continue
   270  				}
   271  				acceptableVersions = versions.Only(lock.Version())
   272  				locked[provider] = true
   273  			}
   274  		}
   275  		mightNeed[provider] = acceptableVersions
   276  	}
   277  
   278  	// Step 2: Query the provider source for each of the providers we selected
   279  	// in the first step and select the latest available version that is
   280  	// in the set of acceptable versions.
   281  	//
   282  	// This produces a set of packages to install to our cache in the next step.
   283  	need := map[addrs.Provider]getproviders.Version{}
   284  NeedProvider:
   285  	for provider, acceptableVersions := range mightNeed {
   286  		if err := ctx.Err(); err != nil {
   287  			// If our context has been cancelled or reached a timeout then
   288  			// we'll abort early, because subsequent operations against
   289  			// that context will fail immediately anyway.
   290  			return nil, err
   291  		}
   292  
   293  		if cb := evts.QueryPackagesBegin; cb != nil {
   294  			cb(provider, reqs[provider], locked[provider])
   295  		}
   296  		available, warnings, err := i.source.AvailableVersions(ctx, provider)
   297  		if err != nil {
   298  			// TODO: Consider retrying a few times for certain types of
   299  			// source errors that seem likely to be transient.
   300  			errs[provider] = err
   301  			if cb := evts.QueryPackagesFailure; cb != nil {
   302  				cb(provider, err)
   303  			}
   304  			// We will take no further actions for this provider.
   305  			continue
   306  		}
   307  		if len(warnings) > 0 {
   308  			if cb := evts.QueryPackagesWarning; cb != nil {
   309  				cb(provider, warnings)
   310  			}
   311  		}
   312  		available.Sort()                           // put the versions in increasing order of precedence
   313  		for i := len(available) - 1; i >= 0; i-- { // walk backwards to consider newer versions first
   314  			if acceptableVersions.Has(available[i]) {
   315  				need[provider] = available[i]
   316  				if cb := evts.QueryPackagesSuccess; cb != nil {
   317  					cb(provider, available[i])
   318  				}
   319  				continue NeedProvider
   320  			}
   321  		}
   322  		// If we get here then the source has no packages that meet the given
   323  		// version constraint, which we model as a query error.
   324  		if locked[provider] {
   325  			// This situation should be a rare one: it suggests that a
   326  			// version was previously available but was yanked for some
   327  			// reason.
   328  			lock := locks.Provider(provider)
   329  			err = fmt.Errorf("the previously-selected version %s is no longer available", lock.Version())
   330  		} else {
   331  			err = fmt.Errorf("no available releases match the given constraints %s", getproviders.VersionConstraintsString(reqs[provider]))
   332  		}
   333  		errs[provider] = err
   334  		if cb := evts.QueryPackagesFailure; cb != nil {
   335  			cb(provider, err)
   336  		}
   337  	}
   338  
   339  	// Step 3: For each provider version we've decided we need to install,
   340  	// install its package into our target cache (possibly via the global cache).
   341  	authResults := map[addrs.Provider]*getproviders.PackageAuthenticationResult{} // record auth results for all successfully fetched providers
   342  	targetPlatform := i.targetDir.targetPlatform                                  // we inherit this to behave correctly in unit tests
   343  	for provider, version := range need {
   344  		if err := ctx.Err(); err != nil {
   345  			// If our context has been cancelled or reached a timeout then
   346  			// we'll abort early, because subsequent operations against
   347  			// that context will fail immediately anyway.
   348  			return nil, err
   349  		}
   350  
   351  		lock := locks.Provider(provider)
   352  		var preferredHashes []getproviders.Hash
   353  		if lock != nil && lock.Version() == version { // hash changes are expected if the version is also changing
   354  			preferredHashes = lock.PreferredHashes()
   355  		}
   356  
   357  		// If our target directory already has the provider version that fulfills the lock file, carry on
   358  		if installed := i.targetDir.ProviderVersion(provider, version); installed != nil {
   359  			if len(preferredHashes) > 0 {
   360  				if matches, _ := installed.MatchesAnyHash(preferredHashes); matches {
   361  					if cb := evts.ProviderAlreadyInstalled; cb != nil {
   362  						cb(provider, version)
   363  					}
   364  					continue
   365  				}
   366  			}
   367  		}
   368  
   369  		if i.globalCacheDir != nil {
   370  			// Step 3a: If our global cache already has this version available then
   371  			// we'll just link it in.
   372  			if cached := i.globalCacheDir.ProviderVersion(provider, version); cached != nil {
   373  				// An existing cache entry is only an acceptable choice
   374  				// if there is already a lock file entry for this provider
   375  				// and the cache entry matches its checksums.
   376  				//
   377  				// If there was no lock file entry at all then we need to
   378  				// install the package for real so that we can lock as complete
   379  				// as possible a set of checksums for all of this provider's
   380  				// packages.
   381  				//
   382  				// If there was a lock file entry but the cache doesn't match
   383  				// it then we assume that the lock file checksums were only
   384  				// partially populated (e.g. from a local mirror where we can
   385  				// only see one package to checksum it) and so we'll fetch
   386  				// from upstream to see if the origin can give us a package
   387  				// that _does_ match. This might still not work out, but if
   388  				// it does then it allows us to avoid returning a checksum
   389  				// mismatch error.
   390  				acceptablePackage := false
   391  				if len(preferredHashes) != 0 {
   392  					var err error
   393  					acceptablePackage, err = cached.MatchesAnyHash(preferredHashes)
   394  					if err != nil {
   395  						// If we can't calculate the checksum for the cached
   396  						// package then we'll just treat it as a checksum failure.
   397  						acceptablePackage = false
   398  					}
   399  				}
   400  
   401  				if !acceptablePackage && i.globalCacheDirMayBreakDependencyLockFile {
   402  					// The "may break dependency lock file" setting effectively
   403  					// means that we'll accept any matching package that's
   404  					// already in the cache, regardless of whether it matches
   405  					// what's in the dependency lock file.
   406  					//
   407  					// That means two less-ideal situations might occur:
   408  					// - If this provider is not currently tracked in the lock
   409  					//   file at all then after installation the lock file will
   410  					//   only accept the package that was already present in
   411  					//   the cache as a valid checksum. That means the generated
   412  					//   lock file won't be portable to other operating systems
   413  					//   or CPU architectures.
   414  					// - If the provider _is_ currently tracked in the lock file
   415  					//   but the checksums there don't match what was in the
   416  					//   cache then the LinkFromOtherCache call below will
   417  					//   fail with a checksum error, and the user will need to
   418  					//   either manually remove the entry from the lock file
   419  					//   or remove the mismatching item from the cache,
   420  					//   depending on which of these they prefer to use as the
   421  					//   source of truth for the expected contents of the
   422  					//   package.
   423  					//
   424  					// If the lock file already includes this provider and the
   425  					// cache entry matches one of the locked checksums then
   426  					// there's no problem, but in that case we wouldn't enter
   427  					// this branch because acceptablePackage would already be
   428  					// true from the check above.
   429  					log.Printf(
   430  						"[WARN] plugin_cache_may_break_dependency_lock_file: Using global cache dir package for %s v%s even though it doesn't match this configuration's dependency lock file",
   431  						provider.String(), version.String(),
   432  					)
   433  					acceptablePackage = true
   434  				}
   435  
   436  				// TODO: Should we emit an event through the events object
   437  				// for "there was an entry in the cache but we ignored it
   438  				// because the checksum didn't match"? We can't use
   439  				// LinkFromCacheFailure in that case because this isn't a
   440  				// failure. For now we'll just be quiet about it.
   441  
   442  				if acceptablePackage {
   443  					if cb := evts.LinkFromCacheBegin; cb != nil {
   444  						cb(provider, version, i.globalCacheDir.baseDir)
   445  					}
   446  					if _, err := cached.ExecutableFile(); err != nil {
   447  						err := fmt.Errorf("provider binary not found: %s", err)
   448  						errs[provider] = err
   449  						if cb := evts.LinkFromCacheFailure; cb != nil {
   450  							cb(provider, version, err)
   451  						}
   452  						continue
   453  					}
   454  
   455  					err := i.targetDir.LinkFromOtherCache(cached, preferredHashes)
   456  					if err != nil {
   457  						errs[provider] = err
   458  						if cb := evts.LinkFromCacheFailure; cb != nil {
   459  							cb(provider, version, err)
   460  						}
   461  						continue
   462  					}
   463  					// We'll fetch what we just linked to make sure it actually
   464  					// did show up there.
   465  					new := i.targetDir.ProviderVersion(provider, version)
   466  					if new == nil {
   467  						err := fmt.Errorf("after linking %s from provider cache at %s it is still not detected in the target directory; this is a bug in Terraform", provider, i.globalCacheDir.baseDir)
   468  						errs[provider] = err
   469  						if cb := evts.LinkFromCacheFailure; cb != nil {
   470  							cb(provider, version, err)
   471  						}
   472  						continue
   473  					}
   474  
   475  					// The LinkFromOtherCache call above should've verified that
   476  					// the package matches one of the hashes previously recorded,
   477  					// if any. We'll now augment those hashes with one freshly
   478  					// calculated from the package we just linked, which allows
   479  					// the lock file to gradually transition to recording newer hash
   480  					// schemes when they become available.
   481  					var priorHashes []getproviders.Hash
   482  					if lock != nil && lock.Version() == version {
   483  						// If the version we're installing is identical to the
   484  						// one we previously locked then we'll keep all of the
   485  						// hashes we saved previously and add to it. Otherwise
   486  						// we'll be starting fresh, because each version has its
   487  						// own set of packages and thus its own hashes.
   488  						priorHashes = append(priorHashes, preferredHashes...)
   489  
   490  						// NOTE: The behavior here is unfortunate when a particular
   491  						// provider version was already cached on the first time
   492  						// the current configuration requested it, because that
   493  						// means we don't currently get the opportunity to fetch
   494  						// and verify the checksums for the new package from
   495  						// upstream. That's currently unavoidable because upstream
   496  						// checksums are in the "ziphash" format and so we can't
   497  						// verify them against our cache directory's unpacked
   498  						// packages: we'd need to go fetch the package from the
   499  						// origin and compare against it, which would defeat the
   500  						// purpose of the global cache.
   501  						//
   502  						// If we fetch from upstream on the first encounter with
   503  						// a particular provider then we'll end up in the other
   504  						// codepath below where we're able to also include the
   505  						// checksums from the origin registry.
   506  					}
   507  					newHash, err := cached.Hash()
   508  					if err != nil {
   509  						err := fmt.Errorf("after linking %s from provider cache at %s, failed to compute a checksum for it: %s", provider, i.globalCacheDir.baseDir, err)
   510  						errs[provider] = err
   511  						if cb := evts.LinkFromCacheFailure; cb != nil {
   512  							cb(provider, version, err)
   513  						}
   514  						continue
   515  					}
   516  					// The hashes slice gets deduplicated in the lock file
   517  					// implementation, so we don't worry about potentially
   518  					// creating a duplicate here.
   519  					var newHashes []getproviders.Hash
   520  					newHashes = append(newHashes, priorHashes...)
   521  					newHashes = append(newHashes, newHash)
   522  					locks.SetProvider(provider, version, reqs[provider], newHashes)
   523  					if cb := evts.ProvidersLockUpdated; cb != nil {
   524  						// We want to ensure that newHash and priorHashes are
   525  						// sorted. newHash is a single value, so it's definitely
   526  						// sorted. priorHashes are pulled from the lock file, so
   527  						// are also already sorted.
   528  						cb(provider, version, []getproviders.Hash{newHash}, nil, priorHashes)
   529  					}
   530  
   531  					if cb := evts.LinkFromCacheSuccess; cb != nil {
   532  						cb(provider, version, new.PackageDir)
   533  					}
   534  					continue // Don't need to do full install, then.
   535  				}
   536  			}
   537  		}
   538  
   539  		// Step 3b: Get the package metadata for the selected version from our
   540  		// provider source.
   541  		//
   542  		// This is the step where we might detect and report that the provider
   543  		// isn't available for the current platform.
   544  		if cb := evts.FetchPackageMeta; cb != nil {
   545  			cb(provider, version)
   546  		}
   547  		meta, err := i.source.PackageMeta(ctx, provider, version, targetPlatform)
   548  		if err != nil {
   549  			errs[provider] = err
   550  			if cb := evts.FetchPackageFailure; cb != nil {
   551  				cb(provider, version, err)
   552  			}
   553  			continue
   554  		}
   555  
   556  		// Step 3c: Retrieve the package indicated by the metadata we received,
   557  		// either directly into our target directory or via the global cache
   558  		// directory.
   559  		if cb := evts.FetchPackageBegin; cb != nil {
   560  			cb(provider, version, meta.Location)
   561  		}
   562  		var installTo, linkTo *Dir
   563  		if i.globalCacheDir != nil {
   564  			installTo = i.globalCacheDir
   565  			linkTo = i.targetDir
   566  		} else {
   567  			installTo = i.targetDir
   568  			linkTo = nil // no linking needed
   569  		}
   570  
   571  		allowedHashes := preferredHashes
   572  		if mode.forceInstallChecksums() {
   573  			allowedHashes = []getproviders.Hash{}
   574  		}
   575  
   576  		authResult, err := installTo.InstallPackage(ctx, meta, allowedHashes)
   577  		if err != nil {
   578  			// TODO: Consider retrying for certain kinds of error that seem
   579  			// likely to be transient. For now, we just treat all errors equally.
   580  			errs[provider] = err
   581  			if cb := evts.FetchPackageFailure; cb != nil {
   582  				cb(provider, version, err)
   583  			}
   584  			continue
   585  		}
   586  		new := installTo.ProviderVersion(provider, version)
   587  		if new == nil {
   588  			err := fmt.Errorf("after installing %s it is still not detected in %s; this is a bug in Terraform", provider, installTo.BasePath())
   589  			errs[provider] = err
   590  			if cb := evts.FetchPackageFailure; cb != nil {
   591  				cb(provider, version, err)
   592  			}
   593  			continue
   594  		}
   595  		if _, err := new.ExecutableFile(); err != nil {
   596  			err := fmt.Errorf("provider binary not found: %s", err)
   597  			errs[provider] = err
   598  			if cb := evts.FetchPackageFailure; cb != nil {
   599  				cb(provider, version, err)
   600  			}
   601  			continue
   602  		}
   603  		if linkTo != nil {
   604  			// We skip emitting the "LinkFromCache..." events here because
   605  			// it's simpler for the caller to treat them as mutually exclusive.
   606  			// We can just subsume the linking step under the "FetchPackage..."
   607  			// series here (and that's why we use FetchPackageFailure below).
   608  			// We also don't do a hash check here because we already did that
   609  			// as part of the installTo.InstallPackage call above.
   610  			err := linkTo.LinkFromOtherCache(new, nil)
   611  			if err != nil {
   612  				errs[provider] = err
   613  				if cb := evts.FetchPackageFailure; cb != nil {
   614  					cb(provider, version, err)
   615  				}
   616  				continue
   617  			}
   618  
   619  			// We should now also find the package in the linkTo dir, which
   620  			// gives us the final value of "new" where the path points in to
   621  			// the true target directory, rather than possibly the global
   622  			// cache directory.
   623  			new = linkTo.ProviderVersion(provider, version)
   624  			if new == nil {
   625  				err := fmt.Errorf("after installing %s it is still not detected in %s; this is a bug in Terraform", provider, linkTo.BasePath())
   626  				errs[provider] = err
   627  				if cb := evts.FetchPackageFailure; cb != nil {
   628  					cb(provider, version, err)
   629  				}
   630  				continue
   631  			}
   632  			if _, err := new.ExecutableFile(); err != nil {
   633  				err := fmt.Errorf("provider binary not found: %s", err)
   634  				errs[provider] = err
   635  				if cb := evts.FetchPackageFailure; cb != nil {
   636  					cb(provider, version, err)
   637  				}
   638  				continue
   639  			}
   640  		}
   641  		authResults[provider] = authResult
   642  
   643  		// The InstallPackage call above should've verified that
   644  		// the package matches one of the hashes previously recorded,
   645  		// if any. We'll now augment those hashes with a new set populated
   646  		// with the hashes returned by the upstream source and from the
   647  		// package we've just installed, which allows the lock file to
   648  		// gradually transition to newer hash schemes when they become
   649  		// available.
   650  		//
   651  		// This is assuming that if a package matches both a hash we saw before
   652  		// _and_ a new hash then the new hash is a valid substitute for
   653  		// the previous hash.
   654  		//
   655  		// The hashes slice gets deduplicated in the lock file
   656  		// implementation, so we don't worry about potentially
   657  		// creating duplicates here.
   658  		var priorHashes []getproviders.Hash
   659  		if lock != nil && lock.Version() == version {
   660  			// If the version we're installing is identical to the
   661  			// one we previously locked then we'll keep all of the
   662  			// hashes we saved previously and add to it. Otherwise
   663  			// we'll be starting fresh, because each version has its
   664  			// own set of packages and thus its own hashes.
   665  			priorHashes = append(priorHashes, preferredHashes...)
   666  		}
   667  		newHash, err := new.Hash()
   668  		if err != nil {
   669  			err := fmt.Errorf("after installing %s, failed to compute a checksum for it: %s", provider, err)
   670  			errs[provider] = err
   671  			if cb := evts.FetchPackageFailure; cb != nil {
   672  				cb(provider, version, err)
   673  			}
   674  			continue
   675  		}
   676  
   677  		var signedHashes []getproviders.Hash
   678  		if authResult.SignedByAnyParty() {
   679  			// We'll trust new hashes from upstream only if they were verified
   680  			// as signed by a suitable key. Otherwise, we'd record only
   681  			// a new hash we just calculated ourselves from the bytes on disk,
   682  			// and so the hashes would cover only the current platform.
   683  			signedHashes = append(signedHashes, meta.AcceptableHashes()...)
   684  		}
   685  
   686  		var newHashes []getproviders.Hash
   687  		newHashes = append(newHashes, newHash)
   688  		newHashes = append(newHashes, priorHashes...)
   689  		newHashes = append(newHashes, signedHashes...)
   690  
   691  		locks.SetProvider(provider, version, reqs[provider], newHashes)
   692  		if cb := evts.ProvidersLockUpdated; cb != nil {
   693  			// newHash and priorHashes are already sorted.
   694  			// But we do need to sort signedHashes so we can reason about it
   695  			// sensibly.
   696  			sort.Slice(signedHashes, func(i, j int) bool {
   697  				return string(signedHashes[i]) < string(signedHashes[j])
   698  			})
   699  
   700  			cb(provider, version, []getproviders.Hash{newHash}, signedHashes, priorHashes)
   701  		}
   702  
   703  		if cb := evts.FetchPackageSuccess; cb != nil {
   704  			cb(provider, version, new.PackageDir, authResult)
   705  		}
   706  	}
   707  
   708  	// Emit final event for fetching if any were successfully fetched
   709  	if cb := evts.ProvidersFetched; cb != nil && len(authResults) > 0 {
   710  		cb(authResults)
   711  	}
   712  
   713  	// Finally, if the lock structure contains locks for any providers that
   714  	// are no longer needed by this configuration, we'll remove them. This
   715  	// is important because we will not have installed those providers
   716  	// above and so a lock file still containing them would make the working
   717  	// directory invalid: not every provider in the lock file is available
   718  	// for use.
   719  	for providerAddr := range locks.AllProviders() {
   720  		if _, ok := reqs[providerAddr]; !ok {
   721  			locks.RemoveProvider(providerAddr)
   722  		}
   723  	}
   724  
   725  	if len(errs) > 0 {
   726  		return locks, InstallerError{
   727  			ProviderErrors: errs,
   728  		}
   729  	}
   730  	return locks, nil
   731  }
   732  
   733  // InstallMode customizes the details of how an install operation treats
   734  // providers that have versions already cached in the target directory.
   735  type InstallMode rune
   736  
   737  const (
   738  	// InstallNewProvidersOnly is an InstallMode that causes the installer
   739  	// to accept any existing version of a requested provider that is already
   740  	// cached as long as it's in the given version sets, without checking
   741  	// whether new versions are available that are also in the given version
   742  	// sets.
   743  	InstallNewProvidersOnly InstallMode = 'N'
   744  
   745  	// InstallNewProvidersForce is an InstallMode that follows the same
   746  	// logic as InstallNewProvidersOnly except it does not verify existing
   747  	// checksums but force installs new checksums for all given providers.
   748  	InstallNewProvidersForce InstallMode = 'F'
   749  
   750  	// InstallUpgrades is an InstallMode that causes the installer to check
   751  	// all requested providers to see if new versions are available that
   752  	// are also in the given version sets, even if a suitable version of
   753  	// a given provider is already available.
   754  	InstallUpgrades InstallMode = 'U'
   755  )
   756  
   757  func (m InstallMode) forceQueryAllProviders() bool {
   758  	return m == InstallUpgrades
   759  }
   760  
   761  func (m InstallMode) forceInstallChecksums() bool {
   762  	return m == InstallNewProvidersForce
   763  }
   764  
   765  // InstallerError is an error type that may be returned (but is not guaranteed)
   766  // from Installer.EnsureProviderVersions to indicate potentially several
   767  // separate failed installation outcomes for different providers included in
   768  // the overall request.
   769  type InstallerError struct {
   770  	ProviderErrors map[addrs.Provider]error
   771  }
   772  
   773  func (err InstallerError) Error() string {
   774  	addrs := make([]addrs.Provider, 0, len(err.ProviderErrors))
   775  	for addr := range err.ProviderErrors {
   776  		addrs = append(addrs, addr)
   777  	}
   778  	sort.Slice(addrs, func(i, j int) bool {
   779  		return addrs[i].LessThan(addrs[j])
   780  	})
   781  	var b strings.Builder
   782  	b.WriteString("some providers could not be installed:\n")
   783  	for _, addr := range addrs {
   784  		providerErr := err.ProviderErrors[addr]
   785  		fmt.Fprintf(&b, "- %s: %s\n", addr, providerErr)
   786  	}
   787  	return strings.TrimSpace(b.String())
   788  }