cuelang.org/go@v0.10.1/internal/golangorgx/gopls/server/diagnostics.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package server 6 7 import ( 8 "context" 9 "crypto/sha256" 10 "errors" 11 "fmt" 12 "os" 13 "path/filepath" 14 "sort" 15 "strings" 16 "sync" 17 "time" 18 19 "cuelang.org/go/internal/golangorgx/gopls/cache" 20 "cuelang.org/go/internal/golangorgx/gopls/cache/metadata" 21 "cuelang.org/go/internal/golangorgx/gopls/file" 22 "cuelang.org/go/internal/golangorgx/gopls/golang" 23 "cuelang.org/go/internal/golangorgx/gopls/protocol" 24 "cuelang.org/go/internal/golangorgx/gopls/settings" 25 "cuelang.org/go/internal/golangorgx/gopls/template" 26 "cuelang.org/go/internal/golangorgx/gopls/util/maps" 27 "cuelang.org/go/internal/golangorgx/tools/event" 28 "cuelang.org/go/internal/golangorgx/tools/event/tag" 29 ) 30 31 // fileDiagnostics holds the current state of published diagnostics for a file. 32 type fileDiagnostics struct { 33 publishedHash file.Hash // hash of the last set of diagnostics published for this URI 34 mustPublish bool // if set, publish diagnostics even if they haven't changed 35 36 // Orphaned file diagnostics are not necessarily associated with any *View 37 // (since they are orphaned). Instead, keep track of the modification ID at 38 // which they were orphaned (see server.lastModificationID). 39 orphanedAt uint64 // modification ID at which this file was orphaned. 40 orphanedFileDiagnostics []*cache.Diagnostic 41 42 // Files may have their diagnostics computed by multiple views, and so 43 // diagnostics are organized by View. See the documentation for update for more 44 // details about how the set of file diagnostics evolves over time. 45 byView map[*cache.View]viewDiagnostics 46 } 47 48 // viewDiagnostics holds a set of file diagnostics computed from a given View. 49 type viewDiagnostics struct { 50 snapshot uint64 // snapshot sequence ID 51 version int32 // file version 52 diagnostics []*cache.Diagnostic 53 } 54 55 // common types; for brevity 56 type ( 57 viewSet = map[*cache.View]unit 58 diagMap = map[protocol.DocumentURI][]*cache.Diagnostic 59 ) 60 61 // hashDiagnostics computes a hash to identify a diagnostic. 62 func hashDiagnostic(d *cache.Diagnostic) file.Hash { 63 h := sha256.New() 64 for _, t := range d.Tags { 65 fmt.Fprintf(h, "tag: %s\n", t) 66 } 67 for _, r := range d.Related { 68 fmt.Fprintf(h, "related: %s %s %s\n", r.Location.URI, r.Message, r.Location.Range) 69 } 70 fmt.Fprintf(h, "code: %s\n", d.Code) 71 fmt.Fprintf(h, "codeHref: %s\n", d.CodeHref) 72 fmt.Fprintf(h, "message: %s\n", d.Message) 73 fmt.Fprintf(h, "range: %s\n", d.Range) 74 fmt.Fprintf(h, "severity: %s\n", d.Severity) 75 fmt.Fprintf(h, "source: %s\n", d.Source) 76 if d.BundledFixes != nil { 77 fmt.Fprintf(h, "fixes: %s\n", *d.BundledFixes) 78 } 79 var hash [sha256.Size]byte 80 h.Sum(hash[:0]) 81 return hash 82 } 83 84 func sortDiagnostics(d []*cache.Diagnostic) { 85 sort.Slice(d, func(i int, j int) bool { 86 a, b := d[i], d[j] 87 if r := protocol.CompareRange(a.Range, b.Range); r != 0 { 88 return r < 0 89 } 90 if a.Source != b.Source { 91 return a.Source < b.Source 92 } 93 return a.Message < b.Message 94 }) 95 } 96 97 func (s *server) diagnoseChangedViews(ctx context.Context, modID uint64, lastChange map[*cache.View][]protocol.DocumentURI, cause ModificationSource) { 98 // Collect views needing diagnosis. 99 s.modificationMu.Lock() 100 needsDiagnosis := maps.Keys(s.viewsToDiagnose) 101 s.modificationMu.Unlock() 102 103 // Diagnose views concurrently. 104 var wg sync.WaitGroup 105 for _, v := range needsDiagnosis { 106 v := v 107 snapshot, release, err := v.Snapshot() 108 if err != nil { 109 s.modificationMu.Lock() 110 // The View is shut down. Unlike below, no need to check 111 // s.needsDiagnosis[v], since the view can never be diagnosed. 112 delete(s.viewsToDiagnose, v) 113 s.modificationMu.Unlock() 114 continue 115 } 116 117 // Collect uris for fast diagnosis. We only care about the most recent 118 // change here, because this is just an optimization for the case where the 119 // user is actively editing a single file. 120 uris := lastChange[v] 121 if snapshot.Options().DiagnosticsTrigger == settings.DiagnosticsOnSave && cause == FromDidChange { 122 // The user requested to update the diagnostics only on save. 123 // Do not diagnose yet. 124 release() 125 continue 126 } 127 128 wg.Add(1) 129 go func(snapshot *cache.Snapshot, uris []protocol.DocumentURI) { 130 defer release() 131 defer wg.Done() 132 s.diagnoseSnapshot(snapshot, uris, snapshot.Options().DiagnosticsDelay) 133 s.modificationMu.Lock() 134 135 // Only remove v from s.viewsToDiagnose if the snapshot is not cancelled. 136 // This ensures that the snapshot was not cloned before its state was 137 // fully evaluated, and therefore avoids missing a change that was 138 // irrelevant to an incomplete snapshot. 139 // 140 // See the documentation for s.viewsToDiagnose for details. 141 if snapshot.BackgroundContext().Err() == nil && s.viewsToDiagnose[v] <= modID { 142 delete(s.viewsToDiagnose, v) 143 } 144 s.modificationMu.Unlock() 145 }(snapshot, uris) 146 } 147 148 wg.Wait() 149 150 // Diagnose orphaned files for the session. 151 orphanedFileDiagnostics, err := s.session.OrphanedFileDiagnostics(ctx) 152 if err == nil { 153 err = s.updateOrphanedFileDiagnostics(ctx, modID, orphanedFileDiagnostics) 154 } 155 if err != nil { 156 if ctx.Err() == nil { 157 event.Error(ctx, "warning: while diagnosing orphaned files", err) 158 } 159 } 160 } 161 162 // diagnoseSnapshot computes and publishes diagnostics for the given snapshot. 163 // 164 // If delay is non-zero, computing diagnostics does not start until after this 165 // delay has expired, to allow work to be cancelled by subsequent changes. 166 // 167 // If changedURIs is non-empty, it is a set of recently changed files that 168 // should be diagnosed immediately, and onDisk reports whether these file 169 // changes came from a change to on-disk files. 170 func (s *server) diagnoseSnapshot(snapshot *cache.Snapshot, changedURIs []protocol.DocumentURI, delay time.Duration) { 171 ctx := snapshot.BackgroundContext() 172 ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", snapshot.Labels()...) 173 defer done() 174 175 allViews := s.session.Views() 176 if delay > 0 { 177 // 2-phase diagnostics. 178 // 179 // The first phase just parses and type-checks (but 180 // does not analyze) packages directly affected by 181 // file modifications. 182 // 183 // The second phase runs after the delay, and does everything. 184 // 185 // We wait a brief delay before the first phase, to allow higher priority 186 // work such as autocompletion to acquire the type checking mutex (though 187 // typically both diagnosing changed files and performing autocompletion 188 // will be doing the same work: recomputing active packages). 189 const minDelay = 20 * time.Millisecond 190 select { 191 case <-time.After(minDelay): 192 case <-ctx.Done(): 193 return 194 } 195 196 if len(changedURIs) > 0 { 197 diagnostics, err := s.diagnoseChangedFiles(ctx, snapshot, changedURIs) 198 if err != nil { 199 if ctx.Err() == nil { 200 event.Error(ctx, "warning: while diagnosing changed files", err, snapshot.Labels()...) 201 } 202 return 203 } 204 s.updateDiagnostics(ctx, allViews, snapshot, diagnostics, false) 205 } 206 207 if delay < minDelay { 208 delay = 0 209 } else { 210 delay -= minDelay 211 } 212 213 select { 214 case <-time.After(delay): 215 case <-ctx.Done(): 216 return 217 } 218 } 219 220 diagnostics, err := s.diagnose(ctx, snapshot) 221 if err != nil { 222 if ctx.Err() == nil { 223 event.Error(ctx, "warning: while diagnosing snapshot", err, snapshot.Labels()...) 224 } 225 return 226 } 227 s.updateDiagnostics(ctx, allViews, snapshot, diagnostics, true) 228 } 229 230 func (s *server) diagnoseChangedFiles(ctx context.Context, snapshot *cache.Snapshot, uris []protocol.DocumentURI) (diagMap, error) { 231 ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", snapshot.Labels()...) 232 defer done() 233 234 toDiagnose := make(map[metadata.PackageID]*metadata.Package) 235 for _, uri := range uris { 236 // If the file is not open, don't diagnose its package. 237 // 238 // We don't care about fast diagnostics for files that are no longer open, 239 // because the user isn't looking at them. Also, explicitly requesting a 240 // package can lead to "command-line-arguments" packages if the file isn't 241 // covered by the current View. By avoiding requesting packages for e.g. 242 // unrelated file movement, we can minimize these unnecessary packages. 243 if !snapshot.IsOpen(uri) { 244 continue 245 } 246 // If the file is not known to the snapshot (e.g., if it was deleted), 247 // don't diagnose it. 248 if snapshot.FindFile(uri) == nil { 249 continue 250 } 251 252 // Don't request type-checking for builtin.go: it's not a real package. 253 if snapshot.IsBuiltin(uri) { 254 continue 255 } 256 257 // Don't diagnose files that are ignored by `go list` (e.g. testdata). 258 if snapshot.IgnoredFile(uri) { 259 continue 260 } 261 262 // Find all packages that include this file and diagnose them in parallel. 263 meta, err := golang.NarrowestMetadataForFile(ctx, snapshot, uri) 264 if err != nil { 265 if ctx.Err() != nil { 266 return nil, ctx.Err() 267 } 268 // TODO(findleyr): we should probably do something with the error here, 269 // but as of now this can fail repeatedly if load fails, so can be too 270 // noisy to log (and we'll handle things later in the slow pass). 271 continue 272 } 273 toDiagnose[meta.ID] = meta 274 } 275 diags, err := snapshot.PackageDiagnostics(ctx, maps.Keys(toDiagnose)...) 276 if err != nil { 277 if ctx.Err() == nil { 278 event.Error(ctx, "warning: diagnostics failed", err, snapshot.Labels()...) 279 } 280 return nil, err 281 } 282 // golang/go#59587: guarantee that we compute type-checking diagnostics 283 // for every compiled package file, otherwise diagnostics won't be quickly 284 // cleared following a fix. 285 for _, meta := range toDiagnose { 286 for _, uri := range meta.CompiledGoFiles { 287 if _, ok := diags[uri]; !ok { 288 diags[uri] = nil 289 } 290 } 291 } 292 return diags, nil 293 } 294 295 func (s *server) diagnose(ctx context.Context, snapshot *cache.Snapshot) (diagMap, error) { 296 ctx, done := event.Start(ctx, "Server.diagnose", snapshot.Labels()...) 297 defer done() 298 299 // Wait for a free diagnostics slot. 300 // TODO(adonovan): opt: shouldn't it be the analysis implementation's 301 // job to de-dup and limit resource consumption? In any case this 302 // function spends most its time waiting for awaitLoaded, at 303 // least initially. 304 select { 305 case <-ctx.Done(): 306 return nil, ctx.Err() 307 case s.diagnosticsSema <- struct{}{}: 308 } 309 defer func() { 310 <-s.diagnosticsSema 311 }() 312 313 var ( 314 diagnosticsMu sync.Mutex 315 diagnostics = make(diagMap) 316 ) 317 // common code for dispatching diagnostics 318 store := func(operation string, diagsByFile diagMap, err error) { 319 if err != nil { 320 if ctx.Err() == nil { 321 event.Error(ctx, "warning: while "+operation, err, snapshot.Labels()...) 322 } 323 return 324 } 325 diagnosticsMu.Lock() 326 defer diagnosticsMu.Unlock() 327 for uri, diags := range diagsByFile { 328 diagnostics[uri] = append(diagnostics[uri], diags...) 329 } 330 } 331 332 // Diagnostics below are organized by increasing specificity: 333 // go.work > mod > mod upgrade > mod vuln > package, etc. 334 335 workspacePkgs, err := snapshot.WorkspaceMetadata(ctx) 336 if s.shouldIgnoreError(snapshot, err) { 337 return diagnostics, ctx.Err() 338 } 339 340 initialErr := snapshot.InitializationError() 341 if ctx.Err() != nil { 342 // Don't update initialization status if the context is cancelled. 343 return nil, ctx.Err() 344 } 345 346 if initialErr != nil { 347 store("critical error", initialErr.Diagnostics, nil) 348 } 349 350 // Show the error as a progress error report so that it appears in the 351 // status bar. If a client doesn't support progress reports, the error 352 // will still be shown as a ShowMessage. If there is no error, any running 353 // error progress reports will be closed. 354 statusErr := initialErr 355 if len(snapshot.Overlays()) == 0 { 356 // Don't report a hanging status message if there are no open files at this 357 // snapshot. 358 statusErr = nil 359 } 360 s.updateCriticalErrorStatus(ctx, snapshot, statusErr) 361 362 // Diagnose template (.tmpl) files. 363 tmplReports := template.Diagnostics(snapshot) 364 // NOTE(rfindley): typeCheckSource is not accurate here. 365 // (but this will be gone soon anyway). 366 store("diagnosing templates", tmplReports, nil) 367 368 // If there are no workspace packages, there is nothing to diagnose and 369 // there are no orphaned files. 370 if len(workspacePkgs) == 0 { 371 return diagnostics, nil 372 } 373 374 var wg sync.WaitGroup // for potentially slow operations below 375 376 // Run type checking and go/analysis diagnosis of packages in parallel. 377 // 378 // For analysis, we use the *widest* package for each open file, 379 // for two reasons: 380 // 381 // - Correctness: some analyzers (e.g. unusedparam) depend 382 // on it. If applied to a non-test package for which a 383 // corresponding test package exists, they make assumptions 384 // that are falsified in the test package, for example that 385 // all references to unexported symbols are visible to the 386 // analysis. 387 // 388 // - Efficiency: it may yield a smaller covering set of 389 // PackageIDs for a given set of files. For example, {x.go, 390 // x_test.go} is covered by the single package x_test using 391 // "widest". (Using "narrowest", it would be covered only by 392 // the pair of packages {x, x_test}, Originally we used all 393 // covering packages, so {x.go} alone would be analyzed 394 // twice.) 395 var ( 396 toDiagnose = make(map[metadata.PackageID]*metadata.Package) 397 toAnalyze = make(map[metadata.PackageID]*metadata.Package) 398 399 // secondary index, used to eliminate narrower packages. 400 toAnalyzeWidest = make(map[golang.PackagePath]*metadata.Package) 401 ) 402 for _, mp := range workspacePkgs { 403 var hasNonIgnored, hasOpenFile bool 404 for _, uri := range mp.CompiledGoFiles { 405 if !hasNonIgnored && !snapshot.IgnoredFile(uri) { 406 hasNonIgnored = true 407 } 408 if !hasOpenFile && snapshot.IsOpen(uri) { 409 hasOpenFile = true 410 } 411 } 412 if hasNonIgnored { 413 toDiagnose[mp.ID] = mp 414 if hasOpenFile { 415 if prev, ok := toAnalyzeWidest[mp.PkgPath]; ok { 416 if len(prev.CompiledGoFiles) >= len(mp.CompiledGoFiles) { 417 // Previous entry is not narrower; keep it. 418 continue 419 } 420 // Evict previous (narrower) entry. 421 delete(toAnalyze, prev.ID) 422 } 423 toAnalyze[mp.ID] = mp 424 toAnalyzeWidest[mp.PkgPath] = mp 425 } 426 } 427 } 428 429 // Package diagnostics and analysis diagnostics must both be computed and 430 // merged before they can be reported. 431 var pkgDiags, analysisDiags diagMap 432 // Collect package diagnostics. 433 wg.Add(1) 434 go func() { 435 defer wg.Done() 436 var err error 437 pkgDiags, err = snapshot.PackageDiagnostics(ctx, maps.Keys(toDiagnose)...) 438 if err != nil { 439 event.Error(ctx, "warning: diagnostics failed", err, snapshot.Labels()...) 440 } 441 }() 442 443 wg.Wait() 444 445 // Merge analysis diagnostics with package diagnostics, and store the 446 // resulting analysis diagnostics. 447 for uri, adiags := range analysisDiags { 448 tdiags := pkgDiags[uri] 449 var tdiags2, adiags2 []*cache.Diagnostic 450 combineDiagnostics(tdiags, adiags, &tdiags2, &adiags2) 451 pkgDiags[uri] = tdiags2 452 analysisDiags[uri] = adiags2 453 } 454 store("type checking", pkgDiags, nil) // error reported above 455 store("analyzing packages", analysisDiags, nil) // error reported above 456 457 return diagnostics, nil 458 } 459 460 // combineDiagnostics combines and filters list/parse/type diagnostics from 461 // tdiags with adiags, and appends the two lists to *outT and *outA, 462 // respectively. 463 // 464 // Type-error analyzers produce diagnostics that are redundant 465 // with type checker diagnostics, but more detailed (e.g. fixes). 466 // Rather than report two diagnostics for the same problem, 467 // we combine them by augmenting the type-checker diagnostic 468 // and discarding the analyzer diagnostic. 469 // 470 // If an analysis diagnostic has the same range and message as 471 // a list/parse/type diagnostic, the suggested fix information 472 // (et al) of the latter is merged into a copy of the former. 473 // This handles the case where a type-error analyzer suggests 474 // a fix to a type error, and avoids duplication. 475 // 476 // The use of out-slices, though irregular, allows the caller to 477 // easily choose whether to keep the results separate or combined. 478 // 479 // The arguments are not modified. 480 func combineDiagnostics(tdiags []*cache.Diagnostic, adiags []*cache.Diagnostic, outT, outA *[]*cache.Diagnostic) { 481 482 // Build index of (list+parse+)type errors. 483 type key struct { 484 Range protocol.Range 485 message string 486 } 487 index := make(map[key]int) // maps (Range,Message) to index in tdiags slice 488 for i, diag := range tdiags { 489 index[key{diag.Range, diag.Message}] = i 490 } 491 492 // Filter out analysis diagnostics that match type errors, 493 // retaining their suggested fix (etc) fields. 494 for _, diag := range adiags { 495 if i, ok := index[key{diag.Range, diag.Message}]; ok { 496 copy := *tdiags[i] 497 copy.SuggestedFixes = diag.SuggestedFixes 498 copy.Tags = diag.Tags 499 tdiags[i] = © 500 continue 501 } 502 503 *outA = append(*outA, diag) 504 } 505 506 *outT = append(*outT, tdiags...) 507 } 508 509 // mustPublishDiagnostics marks the uri as needing publication, independent of 510 // whether the published contents have changed. 511 // 512 // This can be used for ensuring gopls publishes diagnostics after certain file 513 // events. 514 func (s *server) mustPublishDiagnostics(uri protocol.DocumentURI) { 515 s.diagnosticsMu.Lock() 516 defer s.diagnosticsMu.Unlock() 517 518 if s.diagnostics[uri] == nil { 519 s.diagnostics[uri] = new(fileDiagnostics) 520 } 521 s.diagnostics[uri].mustPublish = true 522 } 523 524 const WorkspaceLoadFailure = "Error loading workspace" 525 526 // updateCriticalErrorStatus updates the critical error progress notification 527 // based on err. 528 // 529 // If err is nil, or if there are no open files, it clears any existing error 530 // progress report. 531 func (s *server) updateCriticalErrorStatus(ctx context.Context, snapshot *cache.Snapshot, err *cache.InitializationError) { 532 s.criticalErrorStatusMu.Lock() 533 defer s.criticalErrorStatusMu.Unlock() 534 535 // Remove all newlines so that the error message can be formatted in a 536 // status bar. 537 var errMsg string 538 if err != nil { 539 errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ") 540 } 541 542 if s.criticalErrorStatus == nil { 543 if errMsg != "" { 544 event.Error(ctx, "errors loading workspace", err.MainError, snapshot.Labels()...) 545 s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil) 546 } 547 return 548 } 549 550 // If an error is already shown to the user, update it or mark it as 551 // resolved. 552 if errMsg == "" { 553 s.criticalErrorStatus.End(ctx, "Done.") 554 s.criticalErrorStatus = nil 555 } else { 556 s.criticalErrorStatus.Report(ctx, errMsg, 0) 557 } 558 } 559 560 // updateDiagnostics records the result of diagnosing a snapshot, and publishes 561 // any diagnostics that need to be updated on the client. 562 // 563 // The allViews argument should be the current set of views present in the 564 // session, for the purposes of trimming diagnostics produced by deleted views. 565 func (s *server) updateDiagnostics(ctx context.Context, allViews []*cache.View, snapshot *cache.Snapshot, diagnostics diagMap, final bool) { 566 ctx, done := event.Start(ctx, "Server.publishDiagnostics") 567 defer done() 568 569 s.diagnosticsMu.Lock() 570 defer s.diagnosticsMu.Unlock() 571 572 // Before updating any diagnostics, check that the context (i.e. snapshot 573 // background context) is not cancelled. 574 // 575 // If not, then we know that we haven't started diagnosing the next snapshot, 576 // because the previous snapshot is cancelled before the next snapshot is 577 // returned from Invalidate. 578 // 579 // Therefore, even if we publish stale diagnostics here, they should 580 // eventually be overwritten with accurate diagnostics. 581 // 582 // TODO(rfindley): refactor the API to force that snapshots are diagnosed 583 // after they are created. 584 if ctx.Err() != nil { 585 return 586 } 587 588 viewMap := make(viewSet) 589 for _, v := range allViews { 590 viewMap[v] = unit{} 591 } 592 593 // updateAndPublish updates diagnostics for a file, checking both the latest 594 // diagnostics for the current snapshot, as well as reconciling the set of 595 // views. 596 updateAndPublish := func(uri protocol.DocumentURI, f *fileDiagnostics, diags []*cache.Diagnostic) error { 597 current, ok := f.byView[snapshot.View()] 598 // Update the stored diagnostics if: 599 // 1. we've never seen diagnostics for this view, 600 // 2. diagnostics are for an older snapshot, or 601 // 3. we're overwriting with final diagnostics 602 // 603 // In other words, we shouldn't overwrite existing diagnostics for a 604 // snapshot with non-final diagnostics. This avoids the race described at 605 // https://github.com/golang/go/issues/64765#issuecomment-1890144575. 606 if !ok || current.snapshot < snapshot.SequenceID() || (current.snapshot == snapshot.SequenceID() && final) { 607 fh, err := snapshot.ReadFile(ctx, uri) 608 if err != nil { 609 return err 610 } 611 current = viewDiagnostics{ 612 snapshot: snapshot.SequenceID(), 613 version: fh.Version(), 614 diagnostics: diags, 615 } 616 if f.byView == nil { 617 f.byView = make(map[*cache.View]viewDiagnostics) 618 } 619 f.byView[snapshot.View()] = current 620 } 621 622 return s.publishFileDiagnosticsLocked(ctx, viewMap, uri, current.version, f) 623 } 624 625 seen := make(map[protocol.DocumentURI]bool) 626 for uri, diags := range diagnostics { 627 f, ok := s.diagnostics[uri] 628 if !ok { 629 f = new(fileDiagnostics) 630 s.diagnostics[uri] = f 631 } 632 seen[uri] = true 633 if err := updateAndPublish(uri, f, diags); err != nil { 634 if ctx.Err() != nil { 635 return 636 } else { 637 event.Error(ctx, "updateDiagnostics: failed to deliver diagnostics", err, tag.URI.Of(uri)) 638 } 639 } 640 } 641 642 // TODO(rfindley): perhaps we should clean up files that have no diagnostics. 643 // One could imagine a large operation generating diagnostics for a great 644 // number of files, after which gopls has to do more bookkeeping into the 645 // future. 646 if final { 647 for uri, f := range s.diagnostics { 648 if !seen[uri] { 649 if err := updateAndPublish(uri, f, nil); err != nil { 650 if ctx.Err() != nil { 651 return 652 } else { 653 event.Error(ctx, "updateDiagnostics: failed to deliver diagnostics", err, tag.URI.Of(uri)) 654 } 655 } 656 } 657 } 658 } 659 } 660 661 // updateOrphanedFileDiagnostics records and publishes orphaned file 662 // diagnostics as a given modification time. 663 func (s *server) updateOrphanedFileDiagnostics(ctx context.Context, modID uint64, diagnostics diagMap) error { 664 views := s.session.Views() 665 viewSet := make(viewSet) 666 for _, v := range views { 667 viewSet[v] = unit{} 668 } 669 670 s.diagnosticsMu.Lock() 671 defer s.diagnosticsMu.Unlock() 672 673 for uri, diags := range diagnostics { 674 f, ok := s.diagnostics[uri] 675 if !ok { 676 f = new(fileDiagnostics) 677 s.diagnostics[uri] = f 678 } 679 if f.orphanedAt > modID { 680 continue 681 } 682 f.orphanedAt = modID 683 f.orphanedFileDiagnostics = diags 684 // TODO(rfindley): the version of this file is potentially inaccurate; 685 // nevertheless, it should be eventually consistent, because all 686 // modifications are diagnosed. 687 fh, err := s.session.ReadFile(ctx, uri) 688 if err != nil { 689 return err 690 } 691 if err := s.publishFileDiagnosticsLocked(ctx, viewSet, uri, fh.Version(), f); err != nil { 692 return err 693 } 694 } 695 696 // Clear any stale orphaned file diagnostics. 697 for uri, f := range s.diagnostics { 698 if f.orphanedAt < modID { 699 f.orphanedFileDiagnostics = nil 700 } 701 fh, err := s.session.ReadFile(ctx, uri) 702 if err != nil { 703 return err 704 } 705 if err := s.publishFileDiagnosticsLocked(ctx, viewSet, uri, fh.Version(), f); err != nil { 706 return err 707 } 708 } 709 return nil 710 } 711 712 // publishFileDiagnosticsLocked publishes a fileDiagnostics value, while holding s.diagnosticsMu. 713 // 714 // If the publication succeeds, it updates f.publishedHash and f.mustPublish. 715 func (s *server) publishFileDiagnosticsLocked(ctx context.Context, views viewSet, uri protocol.DocumentURI, version int32, f *fileDiagnostics) error { 716 // Check that the set of views is up-to-date, and de-dupe diagnostics 717 // across views. 718 var ( 719 diagHashes = make(map[file.Hash]unit) // unique diagnostic hashes 720 hash file.Hash // XOR of diagnostic hashes 721 unique []*cache.Diagnostic // unique diagnostics 722 ) 723 add := func(diag *cache.Diagnostic) { 724 h := hashDiagnostic(diag) 725 if _, ok := diagHashes[h]; !ok { 726 diagHashes[h] = unit{} 727 unique = append(unique, diag) 728 hash.XORWith(h) 729 } 730 } 731 for _, diag := range f.orphanedFileDiagnostics { 732 add(diag) 733 } 734 for view, viewDiags := range f.byView { 735 if _, ok := views[view]; !ok { 736 delete(f.byView, view) // view no longer exists 737 continue 738 } 739 if viewDiags.version != version { 740 continue // a payload of diagnostics applies to a specific file version 741 } 742 for _, diag := range viewDiags.diagnostics { 743 add(diag) 744 } 745 } 746 sortDiagnostics(unique) 747 748 // Publish, if necessary. 749 if hash != f.publishedHash || f.mustPublish { 750 if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{ 751 Diagnostics: toProtocolDiagnostics(unique), 752 URI: uri, 753 Version: version, 754 }); err != nil { 755 return err 756 } 757 f.publishedHash = hash 758 f.mustPublish = false 759 } 760 return nil 761 } 762 763 func toProtocolDiagnostics(diagnostics []*cache.Diagnostic) []protocol.Diagnostic { 764 reports := []protocol.Diagnostic{} 765 for _, diag := range diagnostics { 766 pdiag := protocol.Diagnostic{ 767 // diag.Message might start with \n or \t 768 Message: strings.TrimSpace(diag.Message), 769 Range: diag.Range, 770 Severity: diag.Severity, 771 Source: string(diag.Source), 772 Tags: protocol.NonNilSlice(diag.Tags), 773 RelatedInformation: diag.Related, 774 Data: diag.BundledFixes, 775 } 776 if diag.Code != "" { 777 pdiag.Code = diag.Code 778 } 779 if diag.CodeHref != "" { 780 pdiag.CodeDescription = &protocol.CodeDescription{Href: diag.CodeHref} 781 } 782 reports = append(reports, pdiag) 783 } 784 return reports 785 } 786 787 func (s *server) shouldIgnoreError(snapshot *cache.Snapshot, err error) bool { 788 if err == nil { // if there is no error at all 789 return false 790 } 791 if errors.Is(err, context.Canceled) { 792 return true 793 } 794 // If the folder has no Go code in it, we shouldn't spam the user with a warning. 795 // TODO(rfindley): surely it is not correct to walk the folder here just to 796 // suppress diagnostics, every time we compute diagnostics. 797 var hasGo bool 798 _ = filepath.Walk(snapshot.Folder().Path(), func(_ string, info os.FileInfo, err error) error { 799 if err != nil { 800 return err 801 } 802 if !strings.HasSuffix(info.Name(), ".go") { 803 return nil 804 } 805 hasGo = true 806 return errors.New("done") 807 }) 808 return !hasGo 809 }