golang.org/x/tools/gopls@v0.15.3/internal/server/diagnostics.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package server
     6  
     7  import (
     8  	"context"
     9  	"crypto/sha256"
    10  	"errors"
    11  	"fmt"
    12  	"os"
    13  	"path/filepath"
    14  	"runtime"
    15  	"sort"
    16  	"strings"
    17  	"sync"
    18  	"time"
    19  
    20  	"golang.org/x/tools/gopls/internal/cache"
    21  	"golang.org/x/tools/gopls/internal/cache/metadata"
    22  	"golang.org/x/tools/gopls/internal/file"
    23  	"golang.org/x/tools/gopls/internal/golang"
    24  	"golang.org/x/tools/gopls/internal/mod"
    25  	"golang.org/x/tools/gopls/internal/protocol"
    26  	"golang.org/x/tools/gopls/internal/settings"
    27  	"golang.org/x/tools/gopls/internal/template"
    28  	"golang.org/x/tools/gopls/internal/util/maps"
    29  	"golang.org/x/tools/gopls/internal/work"
    30  	"golang.org/x/tools/internal/event"
    31  	"golang.org/x/tools/internal/event/keys"
    32  	"golang.org/x/tools/internal/event/tag"
    33  )
    34  
    35  // fileDiagnostics holds the current state of published diagnostics for a file.
    36  type fileDiagnostics struct {
    37  	publishedHash file.Hash // hash of the last set of diagnostics published for this URI
    38  	mustPublish   bool      // if set, publish diagnostics even if they haven't changed
    39  
    40  	// Orphaned file diagnostics are not necessarily associated with any *View
    41  	// (since they are orphaned). Instead, keep track of the modification ID at
    42  	// which they were orphaned (see server.lastModificationID).
    43  	orphanedAt              uint64 // modification ID at which this file was orphaned.
    44  	orphanedFileDiagnostics []*cache.Diagnostic
    45  
    46  	// Files may have their diagnostics computed by multiple views, and so
    47  	// diagnostics are organized by View. See the documentation for update for more
    48  	// details about how the set of file diagnostics evolves over time.
    49  	byView map[*cache.View]viewDiagnostics
    50  }
    51  
    52  // viewDiagnostics holds a set of file diagnostics computed from a given View.
    53  type viewDiagnostics struct {
    54  	snapshot    uint64 // snapshot sequence ID
    55  	version     int32  // file version
    56  	diagnostics []*cache.Diagnostic
    57  }
    58  
    59  // common types; for brevity
    60  type (
    61  	viewSet = map[*cache.View]unit
    62  	diagMap = map[protocol.DocumentURI][]*cache.Diagnostic
    63  )
    64  
    65  // hashDiagnostic computes a hash to identify a diagnostic.
    66  // The hash is for deduplicating within a file,
    67  // so it need not incorporate d.URI.
    68  func hashDiagnostic(d *cache.Diagnostic) file.Hash {
    69  	h := sha256.New()
    70  	for _, t := range d.Tags {
    71  		fmt.Fprintf(h, "tag: %s\n", t)
    72  	}
    73  	for _, r := range d.Related {
    74  		fmt.Fprintf(h, "related: %s %s %s\n", r.Location.URI, r.Message, r.Location.Range)
    75  	}
    76  	fmt.Fprintf(h, "code: %s\n", d.Code)
    77  	fmt.Fprintf(h, "codeHref: %s\n", d.CodeHref)
    78  	fmt.Fprintf(h, "message: %s\n", d.Message)
    79  	fmt.Fprintf(h, "range: %s\n", d.Range)
    80  	fmt.Fprintf(h, "severity: %s\n", d.Severity)
    81  	fmt.Fprintf(h, "source: %s\n", d.Source)
    82  	if d.BundledFixes != nil {
    83  		fmt.Fprintf(h, "fixes: %s\n", *d.BundledFixes)
    84  	}
    85  	var hash [sha256.Size]byte
    86  	h.Sum(hash[:0])
    87  	return hash
    88  }
    89  
    90  func sortDiagnostics(d []*cache.Diagnostic) {
    91  	sort.Slice(d, func(i int, j int) bool {
    92  		a, b := d[i], d[j]
    93  		if r := protocol.CompareRange(a.Range, b.Range); r != 0 {
    94  			return r < 0
    95  		}
    96  		if a.Source != b.Source {
    97  			return a.Source < b.Source
    98  		}
    99  		return a.Message < b.Message
   100  	})
   101  }
   102  
   103  func (s *server) diagnoseChangedViews(ctx context.Context, modID uint64, lastChange map[*cache.View][]protocol.DocumentURI, cause ModificationSource) {
   104  	// Collect views needing diagnosis.
   105  	s.modificationMu.Lock()
   106  	needsDiagnosis := maps.Keys(s.viewsToDiagnose)
   107  	s.modificationMu.Unlock()
   108  
   109  	// Diagnose views concurrently.
   110  	var wg sync.WaitGroup
   111  	for _, v := range needsDiagnosis {
   112  		v := v
   113  		snapshot, release, err := v.Snapshot()
   114  		if err != nil {
   115  			s.modificationMu.Lock()
   116  			// The View is shut down. Unlike below, no need to check
   117  			// s.needsDiagnosis[v], since the view can never be diagnosed.
   118  			delete(s.viewsToDiagnose, v)
   119  			s.modificationMu.Unlock()
   120  			continue
   121  		}
   122  
   123  		// Collect uris for fast diagnosis. We only care about the most recent
   124  		// change here, because this is just an optimization for the case where the
   125  		// user is actively editing a single file.
   126  		uris := lastChange[v]
   127  		if snapshot.Options().DiagnosticsTrigger == settings.DiagnosticsOnSave && cause == FromDidChange {
   128  			// The user requested to update the diagnostics only on save.
   129  			// Do not diagnose yet.
   130  			release()
   131  			continue
   132  		}
   133  
   134  		wg.Add(1)
   135  		go func(snapshot *cache.Snapshot, uris []protocol.DocumentURI) {
   136  			defer release()
   137  			defer wg.Done()
   138  			s.diagnoseSnapshot(snapshot, uris, snapshot.Options().DiagnosticsDelay)
   139  			s.modificationMu.Lock()
   140  
   141  			// Only remove v from s.viewsToDiagnose if the snapshot is not cancelled.
   142  			// This ensures that the snapshot was not cloned before its state was
   143  			// fully evaluated, and therefore avoids missing a change that was
   144  			// irrelevant to an incomplete snapshot.
   145  			//
   146  			// See the documentation for s.viewsToDiagnose for details.
   147  			if snapshot.BackgroundContext().Err() == nil && s.viewsToDiagnose[v] <= modID {
   148  				delete(s.viewsToDiagnose, v)
   149  			}
   150  			s.modificationMu.Unlock()
   151  		}(snapshot, uris)
   152  	}
   153  
   154  	wg.Wait()
   155  
   156  	// Diagnose orphaned files for the session.
   157  	orphanedFileDiagnostics, err := s.session.OrphanedFileDiagnostics(ctx)
   158  	if err == nil {
   159  		err = s.updateOrphanedFileDiagnostics(ctx, modID, orphanedFileDiagnostics)
   160  	}
   161  	if err != nil {
   162  		if ctx.Err() == nil {
   163  			event.Error(ctx, "warning: while diagnosing orphaned files", err)
   164  		}
   165  	}
   166  }
   167  
   168  // diagnoseSnapshot computes and publishes diagnostics for the given snapshot.
   169  //
   170  // If delay is non-zero, computing diagnostics does not start until after this
   171  // delay has expired, to allow work to be cancelled by subsequent changes.
   172  //
   173  // If changedURIs is non-empty, it is a set of recently changed files that
   174  // should be diagnosed immediately, and onDisk reports whether these file
   175  // changes came from a change to on-disk files.
   176  func (s *server) diagnoseSnapshot(snapshot *cache.Snapshot, changedURIs []protocol.DocumentURI, delay time.Duration) {
   177  	ctx := snapshot.BackgroundContext()
   178  	ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", snapshot.Labels()...)
   179  	defer done()
   180  
   181  	allViews := s.session.Views()
   182  	if delay > 0 {
   183  		// 2-phase diagnostics.
   184  		//
   185  		// The first phase just parses and type-checks (but
   186  		// does not analyze) packages directly affected by
   187  		// file modifications.
   188  		//
   189  		// The second phase runs after the delay, and does everything.
   190  		//
   191  		// We wait a brief delay before the first phase, to allow higher priority
   192  		// work such as autocompletion to acquire the type checking mutex (though
   193  		// typically both diagnosing changed files and performing autocompletion
   194  		// will be doing the same work: recomputing active packages).
   195  		const minDelay = 20 * time.Millisecond
   196  		select {
   197  		case <-time.After(minDelay):
   198  		case <-ctx.Done():
   199  			return
   200  		}
   201  
   202  		if len(changedURIs) > 0 {
   203  			diagnostics, err := s.diagnoseChangedFiles(ctx, snapshot, changedURIs)
   204  			if err != nil {
   205  				if ctx.Err() == nil {
   206  					event.Error(ctx, "warning: while diagnosing changed files", err, snapshot.Labels()...)
   207  				}
   208  				return
   209  			}
   210  			s.updateDiagnostics(ctx, allViews, snapshot, diagnostics, false)
   211  		}
   212  
   213  		if delay < minDelay {
   214  			delay = 0
   215  		} else {
   216  			delay -= minDelay
   217  		}
   218  
   219  		select {
   220  		case <-time.After(delay):
   221  		case <-ctx.Done():
   222  			return
   223  		}
   224  	}
   225  
   226  	diagnostics, err := s.diagnose(ctx, snapshot)
   227  	if err != nil {
   228  		if ctx.Err() == nil {
   229  			event.Error(ctx, "warning: while diagnosing snapshot", err, snapshot.Labels()...)
   230  		}
   231  		return
   232  	}
   233  	s.updateDiagnostics(ctx, allViews, snapshot, diagnostics, true)
   234  }
   235  
   236  func (s *server) diagnoseChangedFiles(ctx context.Context, snapshot *cache.Snapshot, uris []protocol.DocumentURI) (diagMap, error) {
   237  	ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", snapshot.Labels()...)
   238  	defer done()
   239  
   240  	toDiagnose := make(map[metadata.PackageID]*metadata.Package)
   241  	for _, uri := range uris {
   242  		// If the file is not open, don't diagnose its package.
   243  		//
   244  		// We don't care about fast diagnostics for files that are no longer open,
   245  		// because the user isn't looking at them. Also, explicitly requesting a
   246  		// package can lead to "command-line-arguments" packages if the file isn't
   247  		// covered by the current View. By avoiding requesting packages for e.g.
   248  		// unrelated file movement, we can minimize these unnecessary packages.
   249  		if !snapshot.IsOpen(uri) {
   250  			continue
   251  		}
   252  		// If the file is not known to the snapshot (e.g., if it was deleted),
   253  		// don't diagnose it.
   254  		if snapshot.FindFile(uri) == nil {
   255  			continue
   256  		}
   257  
   258  		// Don't request type-checking for builtin.go: it's not a real package.
   259  		if snapshot.IsBuiltin(uri) {
   260  			continue
   261  		}
   262  
   263  		// Don't diagnose files that are ignored by `go list` (e.g. testdata).
   264  		if snapshot.IgnoredFile(uri) {
   265  			continue
   266  		}
   267  
   268  		// Find all packages that include this file and diagnose them in parallel.
   269  		meta, err := golang.NarrowestMetadataForFile(ctx, snapshot, uri)
   270  		if err != nil {
   271  			if ctx.Err() != nil {
   272  				return nil, ctx.Err()
   273  			}
   274  			// TODO(findleyr): we should probably do something with the error here,
   275  			// but as of now this can fail repeatedly if load fails, so can be too
   276  			// noisy to log (and we'll handle things later in the slow pass).
   277  			continue
   278  		}
   279  		// golang/go#65801: only diagnose changes to workspace packages. Otherwise,
   280  		// diagnostics will be unstable, as the slow-path diagnostics will erase
   281  		// them.
   282  		if snapshot.IsWorkspacePackage(ctx, meta.ID) {
   283  			toDiagnose[meta.ID] = meta
   284  		}
   285  	}
   286  	diags, err := snapshot.PackageDiagnostics(ctx, maps.Keys(toDiagnose)...)
   287  	if err != nil {
   288  		if ctx.Err() == nil {
   289  			event.Error(ctx, "warning: diagnostics failed", err, snapshot.Labels()...)
   290  		}
   291  		return nil, err
   292  	}
   293  	// golang/go#59587: guarantee that we compute type-checking diagnostics
   294  	// for every compiled package file, otherwise diagnostics won't be quickly
   295  	// cleared following a fix.
   296  	for _, meta := range toDiagnose {
   297  		for _, uri := range meta.CompiledGoFiles {
   298  			if _, ok := diags[uri]; !ok {
   299  				diags[uri] = nil
   300  			}
   301  		}
   302  	}
   303  	return diags, nil
   304  }
   305  
   306  func (s *server) diagnose(ctx context.Context, snapshot *cache.Snapshot) (diagMap, error) {
   307  	ctx, done := event.Start(ctx, "Server.diagnose", snapshot.Labels()...)
   308  	defer done()
   309  
   310  	// Wait for a free diagnostics slot.
   311  	// TODO(adonovan): opt: shouldn't it be the analysis implementation's
   312  	// job to de-dup and limit resource consumption? In any case this
   313  	// function spends most its time waiting for awaitLoaded, at
   314  	// least initially.
   315  	select {
   316  	case <-ctx.Done():
   317  		return nil, ctx.Err()
   318  	case s.diagnosticsSema <- struct{}{}:
   319  	}
   320  	defer func() {
   321  		<-s.diagnosticsSema
   322  	}()
   323  
   324  	var (
   325  		diagnosticsMu sync.Mutex
   326  		diagnostics   = make(diagMap)
   327  	)
   328  	// common code for dispatching diagnostics
   329  	store := func(operation string, diagsByFile diagMap, err error) {
   330  		if err != nil {
   331  			if ctx.Err() == nil {
   332  				event.Error(ctx, "warning: while "+operation, err, snapshot.Labels()...)
   333  			}
   334  			return
   335  		}
   336  		diagnosticsMu.Lock()
   337  		defer diagnosticsMu.Unlock()
   338  		for uri, diags := range diagsByFile {
   339  			diagnostics[uri] = append(diagnostics[uri], diags...)
   340  		}
   341  	}
   342  
   343  	// Diagnostics below are organized by increasing specificity:
   344  	//  go.work > mod > mod upgrade > mod vuln > package, etc.
   345  
   346  	// Diagnose go.work file.
   347  	workReports, workErr := work.Diagnostics(ctx, snapshot)
   348  	if ctx.Err() != nil {
   349  		return nil, ctx.Err()
   350  	}
   351  	store("diagnosing go.work file", workReports, workErr)
   352  
   353  	// Diagnose go.mod file.
   354  	modReports, modErr := mod.ParseDiagnostics(ctx, snapshot)
   355  	if ctx.Err() != nil {
   356  		return nil, ctx.Err()
   357  	}
   358  	store("diagnosing go.mod file", modReports, modErr)
   359  
   360  	// Diagnose go.mod upgrades.
   361  	upgradeReports, upgradeErr := mod.UpgradeDiagnostics(ctx, snapshot)
   362  	if ctx.Err() != nil {
   363  		return nil, ctx.Err()
   364  	}
   365  	store("diagnosing go.mod upgrades", upgradeReports, upgradeErr)
   366  
   367  	// Diagnose vulnerabilities.
   368  	vulnReports, vulnErr := mod.VulnerabilityDiagnostics(ctx, snapshot)
   369  	if ctx.Err() != nil {
   370  		return nil, ctx.Err()
   371  	}
   372  	store("diagnosing vulnerabilities", vulnReports, vulnErr)
   373  
   374  	workspacePkgs, err := snapshot.WorkspaceMetadata(ctx)
   375  	if s.shouldIgnoreError(snapshot, err) {
   376  		return diagnostics, ctx.Err()
   377  	}
   378  
   379  	initialErr := snapshot.InitializationError()
   380  	if ctx.Err() != nil {
   381  		// Don't update initialization status if the context is cancelled.
   382  		return nil, ctx.Err()
   383  	}
   384  
   385  	if initialErr != nil {
   386  		store("critical error", initialErr.Diagnostics, nil)
   387  	}
   388  
   389  	// Show the error as a progress error report so that it appears in the
   390  	// status bar. If a client doesn't support progress reports, the error
   391  	// will still be shown as a ShowMessage. If there is no error, any running
   392  	// error progress reports will be closed.
   393  	statusErr := initialErr
   394  	if len(snapshot.Overlays()) == 0 {
   395  		// Don't report a hanging status message if there are no open files at this
   396  		// snapshot.
   397  		statusErr = nil
   398  	}
   399  	s.updateCriticalErrorStatus(ctx, snapshot, statusErr)
   400  
   401  	// Diagnose template (.tmpl) files.
   402  	tmplReports := template.Diagnostics(snapshot)
   403  	// NOTE(rfindley): typeCheckSource is not accurate here.
   404  	// (but this will be gone soon anyway).
   405  	store("diagnosing templates", tmplReports, nil)
   406  
   407  	// If there are no workspace packages, there is nothing to diagnose and
   408  	// there are no orphaned files.
   409  	if len(workspacePkgs) == 0 {
   410  		return diagnostics, nil
   411  	}
   412  
   413  	var wg sync.WaitGroup // for potentially slow operations below
   414  
   415  	// Maybe run go mod tidy (if it has been invalidated).
   416  	//
   417  	// Since go mod tidy can be slow, we run it concurrently to diagnostics.
   418  	wg.Add(1)
   419  	go func() {
   420  		defer wg.Done()
   421  		modTidyReports, err := mod.TidyDiagnostics(ctx, snapshot)
   422  		store("running go mod tidy", modTidyReports, err)
   423  	}()
   424  
   425  	// Run type checking and go/analysis diagnosis of packages in parallel.
   426  	//
   427  	// For analysis, we use the *widest* package for each open file,
   428  	// for two reasons:
   429  	//
   430  	// - Correctness: some analyzers (e.g. unusedparam) depend
   431  	//   on it. If applied to a non-test package for which a
   432  	//   corresponding test package exists, they make assumptions
   433  	//   that are falsified in the test package, for example that
   434  	//   all references to unexported symbols are visible to the
   435  	//   analysis.
   436  	//
   437  	// - Efficiency: it may yield a smaller covering set of
   438  	//   PackageIDs for a given set of files. For example, {x.go,
   439  	//   x_test.go} is covered by the single package x_test using
   440  	//   "widest". (Using "narrowest", it would be covered only by
   441  	//   the pair of packages {x, x_test}, Originally we used all
   442  	//   covering packages, so {x.go} alone would be analyzed
   443  	//   twice.)
   444  	var (
   445  		toDiagnose = make(map[metadata.PackageID]*metadata.Package)
   446  		toAnalyze  = make(map[metadata.PackageID]*metadata.Package)
   447  
   448  		// secondary index, used to eliminate narrower packages.
   449  		toAnalyzeWidest = make(map[golang.PackagePath]*metadata.Package)
   450  	)
   451  	for _, mp := range workspacePkgs {
   452  		var hasNonIgnored, hasOpenFile bool
   453  		for _, uri := range mp.CompiledGoFiles {
   454  			if !hasNonIgnored && !snapshot.IgnoredFile(uri) {
   455  				hasNonIgnored = true
   456  			}
   457  			if !hasOpenFile && snapshot.IsOpen(uri) {
   458  				hasOpenFile = true
   459  			}
   460  		}
   461  		if hasNonIgnored {
   462  			toDiagnose[mp.ID] = mp
   463  			if hasOpenFile {
   464  				if prev, ok := toAnalyzeWidest[mp.PkgPath]; ok {
   465  					if len(prev.CompiledGoFiles) >= len(mp.CompiledGoFiles) {
   466  						// Previous entry is not narrower; keep it.
   467  						continue
   468  					}
   469  					// Evict previous (narrower) entry.
   470  					delete(toAnalyze, prev.ID)
   471  				}
   472  				toAnalyze[mp.ID] = mp
   473  				toAnalyzeWidest[mp.PkgPath] = mp
   474  			}
   475  		}
   476  	}
   477  
   478  	wg.Add(1)
   479  	go func() {
   480  		defer wg.Done()
   481  		gcDetailsReports, err := s.gcDetailsDiagnostics(ctx, snapshot, toDiagnose)
   482  		store("collecting gc_details", gcDetailsReports, err)
   483  	}()
   484  
   485  	// Package diagnostics and analysis diagnostics must both be computed and
   486  	// merged before they can be reported.
   487  	var pkgDiags, analysisDiags diagMap
   488  	// Collect package diagnostics.
   489  	wg.Add(1)
   490  	go func() {
   491  		defer wg.Done()
   492  		var err error
   493  		pkgDiags, err = snapshot.PackageDiagnostics(ctx, maps.Keys(toDiagnose)...)
   494  		if err != nil {
   495  			event.Error(ctx, "warning: diagnostics failed", err, snapshot.Labels()...)
   496  		}
   497  	}()
   498  
   499  	// Get diagnostics from analysis framework.
   500  	// This includes type-error analyzers, which suggest fixes to compiler errors.
   501  	wg.Add(1)
   502  	go func() {
   503  		defer wg.Done()
   504  		var err error
   505  		// TODO(rfindley): here and above, we should avoid using the first result
   506  		// if err is non-nil (though as of today it's OK).
   507  		analysisDiags, err = golang.Analyze(ctx, snapshot, toAnalyze, s.progress)
   508  		if err != nil {
   509  			event.Error(ctx, "warning: analyzing package", err, append(snapshot.Labels(), tag.Package.Of(keys.Join(maps.Keys(toDiagnose))))...)
   510  			return
   511  		}
   512  	}()
   513  
   514  	wg.Wait()
   515  
   516  	// Merge analysis diagnostics with package diagnostics, and store the
   517  	// resulting analysis diagnostics.
   518  	for uri, adiags := range analysisDiags {
   519  		tdiags := pkgDiags[uri]
   520  		var tdiags2, adiags2 []*cache.Diagnostic
   521  		combineDiagnostics(tdiags, adiags, &tdiags2, &adiags2)
   522  		pkgDiags[uri] = tdiags2
   523  		analysisDiags[uri] = adiags2
   524  	}
   525  	store("type checking", pkgDiags, nil)           // error reported above
   526  	store("analyzing packages", analysisDiags, nil) // error reported above
   527  
   528  	return diagnostics, nil
   529  }
   530  
   531  func (s *server) gcDetailsDiagnostics(ctx context.Context, snapshot *cache.Snapshot, toDiagnose map[metadata.PackageID]*metadata.Package) (diagMap, error) {
   532  	// Process requested gc_details diagnostics.
   533  	//
   534  	// TODO(rfindley): this could be improved:
   535  	//   1. This should memoize its results if the package has not changed.
   536  	//   2. This should not even run gc_details if the package contains unsaved
   537  	//      files.
   538  	//   3. See note below about using ReadFile.
   539  	// Consider that these points, in combination with the note below about
   540  	// races, suggest that gc_details should be tracked on the Snapshot.
   541  	var toGCDetail map[metadata.PackageID]*metadata.Package
   542  	for _, mp := range toDiagnose {
   543  		if snapshot.WantGCDetails(mp.ID) {
   544  			if toGCDetail == nil {
   545  				toGCDetail = make(map[metadata.PackageID]*metadata.Package)
   546  			}
   547  			toGCDetail[mp.ID] = mp
   548  		}
   549  	}
   550  
   551  	diagnostics := make(diagMap)
   552  	for _, mp := range toGCDetail {
   553  		gcReports, err := golang.GCOptimizationDetails(ctx, snapshot, mp)
   554  		if err != nil {
   555  			event.Error(ctx, "warning: gc details", err, append(snapshot.Labels(), tag.Package.Of(string(mp.ID)))...)
   556  			continue
   557  		}
   558  		for uri, diags := range gcReports {
   559  			// TODO(rfindley): reading here should not be necessary: if a file has
   560  			// been deleted we should be notified, and diagnostics will eventually
   561  			// become consistent.
   562  			fh, err := snapshot.ReadFile(ctx, uri)
   563  			if err != nil {
   564  				return nil, err
   565  			}
   566  			// Don't publish gc details for unsaved buffers, since the underlying
   567  			// logic operates on the file on disk.
   568  			if fh == nil || !fh.SameContentsOnDisk() {
   569  				continue
   570  			}
   571  			diagnostics[uri] = append(diagnostics[uri], diags...)
   572  		}
   573  	}
   574  	return diagnostics, nil
   575  }
   576  
   577  // combineDiagnostics combines and filters list/parse/type diagnostics from
   578  // tdiags with adiags, and appends the two lists to *outT and *outA,
   579  // respectively.
   580  //
   581  // Type-error analyzers produce diagnostics that are redundant
   582  // with type checker diagnostics, but more detailed (e.g. fixes).
   583  // Rather than report two diagnostics for the same problem,
   584  // we combine them by augmenting the type-checker diagnostic
   585  // and discarding the analyzer diagnostic.
   586  //
   587  // If an analysis diagnostic has the same range and message as
   588  // a list/parse/type diagnostic, the suggested fix information
   589  // (et al) of the latter is merged into a copy of the former.
   590  // This handles the case where a type-error analyzer suggests
   591  // a fix to a type error, and avoids duplication.
   592  //
   593  // The use of out-slices, though irregular, allows the caller to
   594  // easily choose whether to keep the results separate or combined.
   595  //
   596  // The arguments are not modified.
   597  func combineDiagnostics(tdiags []*cache.Diagnostic, adiags []*cache.Diagnostic, outT, outA *[]*cache.Diagnostic) {
   598  
   599  	// Build index of (list+parse+)type errors.
   600  	type key struct {
   601  		Range   protocol.Range
   602  		message string
   603  	}
   604  	index := make(map[key]int) // maps (Range,Message) to index in tdiags slice
   605  	for i, diag := range tdiags {
   606  		index[key{diag.Range, diag.Message}] = i
   607  	}
   608  
   609  	// Filter out analysis diagnostics that match type errors,
   610  	// retaining their suggested fix (etc) fields.
   611  	for _, diag := range adiags {
   612  		if i, ok := index[key{diag.Range, diag.Message}]; ok {
   613  			copy := *tdiags[i]
   614  			copy.SuggestedFixes = diag.SuggestedFixes
   615  			copy.Tags = diag.Tags
   616  			tdiags[i] = &copy
   617  			continue
   618  		}
   619  
   620  		*outA = append(*outA, diag)
   621  	}
   622  
   623  	*outT = append(*outT, tdiags...)
   624  }
   625  
   626  // mustPublishDiagnostics marks the uri as needing publication, independent of
   627  // whether the published contents have changed.
   628  //
   629  // This can be used for ensuring gopls publishes diagnostics after certain file
   630  // events.
   631  func (s *server) mustPublishDiagnostics(uri protocol.DocumentURI) {
   632  	s.diagnosticsMu.Lock()
   633  	defer s.diagnosticsMu.Unlock()
   634  
   635  	if s.diagnostics[uri] == nil {
   636  		s.diagnostics[uri] = new(fileDiagnostics)
   637  	}
   638  	s.diagnostics[uri].mustPublish = true
   639  }
   640  
   641  const WorkspaceLoadFailure = "Error loading workspace"
   642  
   643  // updateCriticalErrorStatus updates the critical error progress notification
   644  // based on err.
   645  //
   646  // If err is nil, or if there are no open files, it clears any existing error
   647  // progress report.
   648  func (s *server) updateCriticalErrorStatus(ctx context.Context, snapshot *cache.Snapshot, err *cache.InitializationError) {
   649  	s.criticalErrorStatusMu.Lock()
   650  	defer s.criticalErrorStatusMu.Unlock()
   651  
   652  	// Remove all newlines so that the error message can be formatted in a
   653  	// status bar.
   654  	var errMsg string
   655  	if err != nil {
   656  		errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ")
   657  	}
   658  
   659  	if s.criticalErrorStatus == nil {
   660  		if errMsg != "" {
   661  			event.Error(ctx, "errors loading workspace", err.MainError, snapshot.Labels()...)
   662  			s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil)
   663  		}
   664  		return
   665  	}
   666  
   667  	// If an error is already shown to the user, update it or mark it as
   668  	// resolved.
   669  	if errMsg == "" {
   670  		s.criticalErrorStatus.End(ctx, "Done.")
   671  		s.criticalErrorStatus = nil
   672  	} else {
   673  		s.criticalErrorStatus.Report(ctx, errMsg, 0)
   674  	}
   675  }
   676  
   677  // updateDiagnostics records the result of diagnosing a snapshot, and publishes
   678  // any diagnostics that need to be updated on the client.
   679  //
   680  // The allViews argument should be the current set of views present in the
   681  // session, for the purposes of trimming diagnostics produced by deleted views.
   682  func (s *server) updateDiagnostics(ctx context.Context, allViews []*cache.View, snapshot *cache.Snapshot, diagnostics diagMap, final bool) {
   683  	ctx, done := event.Start(ctx, "Server.publishDiagnostics")
   684  	defer done()
   685  
   686  	s.diagnosticsMu.Lock()
   687  	defer s.diagnosticsMu.Unlock()
   688  
   689  	// Before updating any diagnostics, check that the context (i.e. snapshot
   690  	// background context) is not cancelled.
   691  	//
   692  	// If not, then we know that we haven't started diagnosing the next snapshot,
   693  	// because the previous snapshot is cancelled before the next snapshot is
   694  	// returned from Invalidate.
   695  	//
   696  	// Therefore, even if we publish stale diagnostics here, they should
   697  	// eventually be overwritten with accurate diagnostics.
   698  	//
   699  	// TODO(rfindley): refactor the API to force that snapshots are diagnosed
   700  	// after they are created.
   701  	if ctx.Err() != nil {
   702  		return
   703  	}
   704  
   705  	viewMap := make(viewSet)
   706  	for _, v := range allViews {
   707  		viewMap[v] = unit{}
   708  	}
   709  
   710  	// updateAndPublish updates diagnostics for a file, checking both the latest
   711  	// diagnostics for the current snapshot, as well as reconciling the set of
   712  	// views.
   713  	updateAndPublish := func(uri protocol.DocumentURI, f *fileDiagnostics, diags []*cache.Diagnostic) error {
   714  		current, ok := f.byView[snapshot.View()]
   715  		// Update the stored diagnostics if:
   716  		//  1. we've never seen diagnostics for this view,
   717  		//  2. diagnostics are for an older snapshot, or
   718  		//  3. we're overwriting with final diagnostics
   719  		//
   720  		// In other words, we shouldn't overwrite existing diagnostics for a
   721  		// snapshot with non-final diagnostics. This avoids the race described at
   722  		// https://github.com/golang/go/issues/64765#issuecomment-1890144575.
   723  		if !ok || current.snapshot < snapshot.SequenceID() || (current.snapshot == snapshot.SequenceID() && final) {
   724  			fh, err := snapshot.ReadFile(ctx, uri)
   725  			if err != nil {
   726  				return err
   727  			}
   728  			current = viewDiagnostics{
   729  				snapshot:    snapshot.SequenceID(),
   730  				version:     fh.Version(),
   731  				diagnostics: diags,
   732  			}
   733  			if f.byView == nil {
   734  				f.byView = make(map[*cache.View]viewDiagnostics)
   735  			}
   736  			f.byView[snapshot.View()] = current
   737  		}
   738  
   739  		return s.publishFileDiagnosticsLocked(ctx, viewMap, uri, current.version, f)
   740  	}
   741  
   742  	seen := make(map[protocol.DocumentURI]bool)
   743  	for uri, diags := range diagnostics {
   744  		f, ok := s.diagnostics[uri]
   745  		if !ok {
   746  			f = new(fileDiagnostics)
   747  			s.diagnostics[uri] = f
   748  		}
   749  		seen[uri] = true
   750  		if err := updateAndPublish(uri, f, diags); err != nil {
   751  			if ctx.Err() != nil {
   752  				return
   753  			} else {
   754  				event.Error(ctx, "updateDiagnostics: failed to deliver diagnostics", err, tag.URI.Of(uri))
   755  			}
   756  		}
   757  	}
   758  
   759  	// TODO(rfindley): perhaps we should clean up files that have no diagnostics.
   760  	// One could imagine a large operation generating diagnostics for a great
   761  	// number of files, after which gopls has to do more bookkeeping into the
   762  	// future.
   763  	if final {
   764  		for uri, f := range s.diagnostics {
   765  			if !seen[uri] {
   766  				if err := updateAndPublish(uri, f, nil); err != nil {
   767  					if ctx.Err() != nil {
   768  						return
   769  					} else {
   770  						event.Error(ctx, "updateDiagnostics: failed to deliver diagnostics", err, tag.URI.Of(uri))
   771  					}
   772  				}
   773  			}
   774  		}
   775  	}
   776  }
   777  
   778  // updateOrphanedFileDiagnostics records and publishes orphaned file
   779  // diagnostics as a given modification time.
   780  func (s *server) updateOrphanedFileDiagnostics(ctx context.Context, modID uint64, diagnostics diagMap) error {
   781  	views := s.session.Views()
   782  	viewSet := make(viewSet)
   783  	for _, v := range views {
   784  		viewSet[v] = unit{}
   785  	}
   786  
   787  	s.diagnosticsMu.Lock()
   788  	defer s.diagnosticsMu.Unlock()
   789  
   790  	for uri, diags := range diagnostics {
   791  		f, ok := s.diagnostics[uri]
   792  		if !ok {
   793  			f = new(fileDiagnostics)
   794  			s.diagnostics[uri] = f
   795  		}
   796  		if f.orphanedAt > modID {
   797  			continue
   798  		}
   799  		f.orphanedAt = modID
   800  		f.orphanedFileDiagnostics = diags
   801  		// TODO(rfindley): the version of this file is potentially inaccurate;
   802  		// nevertheless, it should be eventually consistent, because all
   803  		// modifications are diagnosed.
   804  		fh, err := s.session.ReadFile(ctx, uri)
   805  		if err != nil {
   806  			return err
   807  		}
   808  		if err := s.publishFileDiagnosticsLocked(ctx, viewSet, uri, fh.Version(), f); err != nil {
   809  			return err
   810  		}
   811  	}
   812  
   813  	// Clear any stale orphaned file diagnostics.
   814  	for uri, f := range s.diagnostics {
   815  		if f.orphanedAt < modID {
   816  			f.orphanedFileDiagnostics = nil
   817  		}
   818  		fh, err := s.session.ReadFile(ctx, uri)
   819  		if err != nil {
   820  			return err
   821  		}
   822  		if err := s.publishFileDiagnosticsLocked(ctx, viewSet, uri, fh.Version(), f); err != nil {
   823  			return err
   824  		}
   825  	}
   826  	return nil
   827  }
   828  
   829  // publishFileDiagnosticsLocked publishes a fileDiagnostics value, while holding s.diagnosticsMu.
   830  //
   831  // If the publication succeeds, it updates f.publishedHash and f.mustPublish.
   832  func (s *server) publishFileDiagnosticsLocked(ctx context.Context, views viewSet, uri protocol.DocumentURI, version int32, f *fileDiagnostics) error {
   833  	// We add a disambiguating suffix (e.g. " [darwin,arm64]") to
   834  	// each diagnostic that doesn't occur in the default view;
   835  	// see golang/go#65496.
   836  	type diagSuffix struct {
   837  		diag   *cache.Diagnostic
   838  		suffix string // "" for default build (or orphans)
   839  	}
   840  
   841  	// diagSuffixes records the set of view suffixes for a given diagnostic.
   842  	diagSuffixes := make(map[file.Hash][]diagSuffix)
   843  	add := func(diag *cache.Diagnostic, suffix string) {
   844  		h := hashDiagnostic(diag)
   845  		diagSuffixes[h] = append(diagSuffixes[h], diagSuffix{diag, suffix})
   846  	}
   847  
   848  	// Construct the inverse mapping, from diagnostic (hash) to its suffixes (views).
   849  	for _, diag := range f.orphanedFileDiagnostics {
   850  		add(diag, "")
   851  	}
   852  
   853  	var allViews []*cache.View
   854  	for view, viewDiags := range f.byView {
   855  		if _, ok := views[view]; !ok {
   856  			delete(f.byView, view) // view no longer exists
   857  			continue
   858  		}
   859  		if viewDiags.version != version {
   860  			continue // a payload of diagnostics applies to a specific file version
   861  		}
   862  		allViews = append(allViews, view)
   863  	}
   864  
   865  	// Only report diagnostics from the best views for a file. This avoids
   866  	// spurious import errors when a view has only a partial set of dependencies
   867  	// for a package (golang/go#66425).
   868  	//
   869  	// It's ok to use the session to derive the eligible views, because we
   870  	// publish diagnostics following any state change, so the set of best views
   871  	// is eventually consistent.
   872  	bestViews, err := cache.BestViews(ctx, s.session, uri, allViews)
   873  	if err != nil {
   874  		return err
   875  	}
   876  
   877  	if len(bestViews) == 0 {
   878  		// If we have no preferred diagnostics for a given file (i.e., the file is
   879  		// not naturally nested within a view), then all diagnostics should be
   880  		// considered valid.
   881  		//
   882  		// This could arise if the user jumps to definition outside the workspace.
   883  		// There is no view that owns the file, so its diagnostics are valid from
   884  		// any view.
   885  		bestViews = allViews
   886  	}
   887  
   888  	for _, view := range bestViews {
   889  		viewDiags := f.byView[view]
   890  		// Compute the view's suffix (e.g. " [darwin,arm64]").
   891  		var suffix string
   892  		{
   893  			var words []string
   894  			if view.GOOS() != runtime.GOOS {
   895  				words = append(words, view.GOOS())
   896  			}
   897  			if view.GOARCH() != runtime.GOARCH {
   898  				words = append(words, view.GOARCH())
   899  			}
   900  			if len(words) > 0 {
   901  				suffix = fmt.Sprintf(" [%s]", strings.Join(words, ","))
   902  			}
   903  		}
   904  
   905  		for _, diag := range viewDiags.diagnostics {
   906  			add(diag, suffix)
   907  		}
   908  	}
   909  
   910  	// De-dup diagnostics across views by hash, and sort.
   911  	var (
   912  		hash   file.Hash
   913  		unique []*cache.Diagnostic
   914  	)
   915  	for h, items := range diagSuffixes {
   916  		// Sort the items by ascending suffix, so that the
   917  		// default view (if present) is first.
   918  		// (The others are ordered arbitrarily.)
   919  		sort.Slice(items, func(i, j int) bool {
   920  			return items[i].suffix < items[j].suffix
   921  		})
   922  
   923  		// If the diagnostic was not present in
   924  		// the default view, add the view suffix.
   925  		first := items[0]
   926  		if first.suffix != "" {
   927  			diag2 := *first.diag // shallow copy
   928  			diag2.Message += first.suffix
   929  			first.diag = &diag2
   930  			h = hashDiagnostic(&diag2) // update the hash
   931  		}
   932  
   933  		hash.XORWith(h)
   934  		unique = append(unique, first.diag)
   935  	}
   936  	sortDiagnostics(unique)
   937  
   938  	// Publish, if necessary.
   939  	if hash != f.publishedHash || f.mustPublish {
   940  		if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
   941  			Diagnostics: toProtocolDiagnostics(unique),
   942  			URI:         uri,
   943  			Version:     version,
   944  		}); err != nil {
   945  			return err
   946  		}
   947  		f.publishedHash = hash
   948  		f.mustPublish = false
   949  	}
   950  	return nil
   951  }
   952  
   953  func toProtocolDiagnostics(diagnostics []*cache.Diagnostic) []protocol.Diagnostic {
   954  	reports := []protocol.Diagnostic{}
   955  	for _, diag := range diagnostics {
   956  		pdiag := protocol.Diagnostic{
   957  			// diag.Message might start with \n or \t
   958  			Message:            strings.TrimSpace(diag.Message),
   959  			Range:              diag.Range,
   960  			Severity:           diag.Severity,
   961  			Source:             string(diag.Source),
   962  			Tags:               protocol.NonNilSlice(diag.Tags),
   963  			RelatedInformation: diag.Related,
   964  			Data:               diag.BundledFixes,
   965  		}
   966  		if diag.Code != "" {
   967  			pdiag.Code = diag.Code
   968  		}
   969  		if diag.CodeHref != "" {
   970  			pdiag.CodeDescription = &protocol.CodeDescription{Href: diag.CodeHref}
   971  		}
   972  		reports = append(reports, pdiag)
   973  	}
   974  	return reports
   975  }
   976  
   977  func (s *server) shouldIgnoreError(snapshot *cache.Snapshot, err error) bool {
   978  	if err == nil { // if there is no error at all
   979  		return false
   980  	}
   981  	if errors.Is(err, context.Canceled) {
   982  		return true
   983  	}
   984  	// If the folder has no Go code in it, we shouldn't spam the user with a warning.
   985  	// TODO(rfindley): surely it is not correct to walk the folder here just to
   986  	// suppress diagnostics, every time we compute diagnostics.
   987  	var hasGo bool
   988  	_ = filepath.Walk(snapshot.Folder().Path(), func(_ string, info os.FileInfo, err error) error {
   989  		if err != nil {
   990  			return err
   991  		}
   992  		if !strings.HasSuffix(info.Name(), ".go") {
   993  			return nil
   994  		}
   995  		hasGo = true
   996  		return errors.New("done")
   997  	})
   998  	return !hasGo
   999  }