cuelang.org/go@v0.13.0/internal/golangorgx/gopls/server/diagnostics.go (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package server 6 7 import ( 8 "context" 9 "crypto/sha256" 10 "errors" 11 "fmt" 12 "os" 13 "path/filepath" 14 "sort" 15 "strings" 16 "sync" 17 "time" 18 19 "cuelang.org/go/cue/build" 20 "cuelang.org/go/internal/golangorgx/gopls/cache" 21 "cuelang.org/go/internal/golangorgx/gopls/cache/metadata" 22 "cuelang.org/go/internal/golangorgx/gopls/file" 23 "cuelang.org/go/internal/golangorgx/gopls/protocol" 24 "cuelang.org/go/internal/golangorgx/gopls/settings" 25 "cuelang.org/go/internal/golangorgx/gopls/util/maps" 26 "cuelang.org/go/internal/golangorgx/tools/event" 27 "cuelang.org/go/internal/golangorgx/tools/event/tag" 28 ) 29 30 // fileDiagnostics holds the current state of published diagnostics for a file. 31 type fileDiagnostics struct { 32 publishedHash file.Hash // hash of the last set of diagnostics published for this URI 33 mustPublish bool // if set, publish diagnostics even if they haven't changed 34 35 // Orphaned file diagnostics are not necessarily associated with any *View 36 // (since they are orphaned). Instead, keep track of the modification ID at 37 // which they were orphaned (see server.lastModificationID). 38 orphanedAt uint64 // modification ID at which this file was orphaned. 39 orphanedFileDiagnostics []*cache.Diagnostic 40 41 // Files may have their diagnostics computed by multiple views, and so 42 // diagnostics are organized by View. See the documentation for update for more 43 // details about how the set of file diagnostics evolves over time. 44 byView map[*cache.View]viewDiagnostics 45 } 46 47 // viewDiagnostics holds a set of file diagnostics computed from a given View. 48 type viewDiagnostics struct { 49 snapshot uint64 // snapshot sequence ID 50 version int32 // file version 51 diagnostics []*cache.Diagnostic 52 } 53 54 // common types; for brevity 55 type ( 56 viewSet = map[*cache.View]unit 57 diagMap = map[protocol.DocumentURI][]*cache.Diagnostic 58 ) 59 60 // hashDiagnostics computes a hash to identify a diagnostic. 61 func hashDiagnostic(d *cache.Diagnostic) file.Hash { 62 h := sha256.New() 63 for _, t := range d.Tags { 64 fmt.Fprintf(h, "tag: %s\n", t) 65 } 66 for _, r := range d.Related { 67 fmt.Fprintf(h, "related: %s %s %s\n", r.Location.URI, r.Message, r.Location.Range) 68 } 69 fmt.Fprintf(h, "code: %s\n", d.Code) 70 fmt.Fprintf(h, "codeHref: %s\n", d.CodeHref) 71 fmt.Fprintf(h, "message: %s\n", d.Message) 72 fmt.Fprintf(h, "range: %s\n", d.Range) 73 fmt.Fprintf(h, "severity: %s\n", d.Severity) 74 fmt.Fprintf(h, "source: %s\n", d.Source) 75 if d.BundledFixes != nil { 76 fmt.Fprintf(h, "fixes: %s\n", *d.BundledFixes) 77 } 78 var hash [sha256.Size]byte 79 h.Sum(hash[:0]) 80 return hash 81 } 82 83 func sortDiagnostics(d []*cache.Diagnostic) { 84 sort.Slice(d, func(i int, j int) bool { 85 a, b := d[i], d[j] 86 if r := protocol.CompareRange(a.Range, b.Range); r != 0 { 87 return r < 0 88 } 89 if a.Source != b.Source { 90 return a.Source < b.Source 91 } 92 return a.Message < b.Message 93 }) 94 } 95 96 func (s *server) diagnoseChangedViews(ctx context.Context, modID uint64, lastChange map[*cache.View][]protocol.DocumentURI, cause ModificationSource) { 97 // Collect views needing diagnosis. 98 s.modificationMu.Lock() 99 needsDiagnosis := maps.Keys(s.viewsToDiagnose) 100 s.modificationMu.Unlock() 101 102 // Diagnose views concurrently. 103 var wg sync.WaitGroup 104 for _, v := range needsDiagnosis { 105 v := v 106 snapshot, release, err := v.Snapshot() 107 if err != nil { 108 s.modificationMu.Lock() 109 // The View is shut down. Unlike below, no need to check 110 // s.needsDiagnosis[v], since the view can never be diagnosed. 111 delete(s.viewsToDiagnose, v) 112 s.modificationMu.Unlock() 113 continue 114 } 115 116 // Collect uris for fast diagnosis. We only care about the most recent 117 // change here, because this is just an optimization for the case where the 118 // user is actively editing a single file. 119 uris := lastChange[v] 120 if snapshot.Options().DiagnosticsTrigger == settings.DiagnosticsOnSave && cause == FromDidChange { 121 // The user requested to update the diagnostics only on save. 122 // Do not diagnose yet. 123 release() 124 continue 125 } 126 127 wg.Add(1) 128 go func(snapshot *cache.Snapshot, uris []protocol.DocumentURI) { 129 defer release() 130 defer wg.Done() 131 s.diagnoseSnapshot(snapshot, uris, snapshot.Options().DiagnosticsDelay) 132 s.modificationMu.Lock() 133 134 // Only remove v from s.viewsToDiagnose if the snapshot is not cancelled. 135 // This ensures that the snapshot was not cloned before its state was 136 // fully evaluated, and therefore avoids missing a change that was 137 // irrelevant to an incomplete snapshot. 138 // 139 // See the documentation for s.viewsToDiagnose for details. 140 if snapshot.BackgroundContext().Err() == nil && s.viewsToDiagnose[v] <= modID { 141 delete(s.viewsToDiagnose, v) 142 } 143 s.modificationMu.Unlock() 144 }(snapshot, uris) 145 } 146 147 wg.Wait() 148 149 // Diagnose orphaned files for the session. 150 orphanedFileDiagnostics, err := s.session.OrphanedFileDiagnostics(ctx) 151 if err == nil { 152 err = s.updateOrphanedFileDiagnostics(ctx, modID, orphanedFileDiagnostics) 153 } 154 if err != nil { 155 if ctx.Err() == nil { 156 event.Error(ctx, "warning: while diagnosing orphaned files", err) 157 } 158 } 159 } 160 161 // diagnoseSnapshot computes and publishes diagnostics for the given snapshot. 162 // 163 // If delay is non-zero, computing diagnostics does not start until after this 164 // delay has expired, to allow work to be cancelled by subsequent changes. 165 // 166 // If changedURIs is non-empty, it is a set of recently changed files that 167 // should be diagnosed immediately, and onDisk reports whether these file 168 // changes came from a change to on-disk files. 169 func (s *server) diagnoseSnapshot(snapshot *cache.Snapshot, changedURIs []protocol.DocumentURI, delay time.Duration) { 170 ctx := snapshot.BackgroundContext() 171 ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", snapshot.Labels()...) 172 defer done() 173 174 allViews := s.session.Views() 175 if delay > 0 { 176 // 2-phase diagnostics. 177 // 178 // The first phase just parses and type-checks (but 179 // does not analyze) packages directly affected by 180 // file modifications. 181 // 182 // The second phase runs after the delay, and does everything. 183 // 184 // We wait a brief delay before the first phase, to allow higher priority 185 // work such as autocompletion to acquire the type checking mutex (though 186 // typically both diagnosing changed files and performing autocompletion 187 // will be doing the same work: recomputing active packages). 188 const minDelay = 20 * time.Millisecond 189 select { 190 case <-time.After(minDelay): 191 case <-ctx.Done(): 192 return 193 } 194 195 if len(changedURIs) > 0 { 196 diagnostics, err := s.diagnoseChangedFiles(ctx, snapshot, changedURIs) 197 if err != nil { 198 if ctx.Err() == nil { 199 event.Error(ctx, "warning: while diagnosing changed files", err, snapshot.Labels()...) 200 } 201 return 202 } 203 s.updateDiagnostics(ctx, allViews, snapshot, diagnostics, false) 204 } 205 206 if delay < minDelay { 207 delay = 0 208 } else { 209 delay -= minDelay 210 } 211 212 select { 213 case <-time.After(delay): 214 case <-ctx.Done(): 215 return 216 } 217 } 218 219 diagnostics, err := s.diagnose(ctx, snapshot) 220 if err != nil { 221 if ctx.Err() == nil { 222 event.Error(ctx, "warning: while diagnosing snapshot", err, snapshot.Labels()...) 223 } 224 return 225 } 226 s.updateDiagnostics(ctx, allViews, snapshot, diagnostics, true) 227 } 228 229 func (s *server) diagnoseChangedFiles(ctx context.Context, snapshot *cache.Snapshot, uris []protocol.DocumentURI) (diagMap, error) { 230 ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", snapshot.Labels()...) 231 defer done() 232 233 toDiagnose := make(map[metadata.ImportPath]*build.Instance) 234 for _, uri := range uris { 235 // If the file is not open, don't diagnose its package. 236 // 237 // We don't care about fast diagnostics for files that are no longer open, 238 // because the user isn't looking at them. Also, explicitly requesting a 239 // package can lead to "command-line-arguments" packages if the file isn't 240 // covered by the current View. By avoiding requesting packages for e.g. 241 // unrelated file movement, we can minimize these unnecessary packages. 242 if !snapshot.IsOpen(uri) { 243 continue 244 } 245 // If the file is not known to the snapshot (e.g., if it was deleted), 246 // don't diagnose it. 247 if snapshot.FindFile(uri) == nil { 248 continue 249 } 250 251 insts, err := snapshot.MetadataForFile(ctx, uri) 252 if err != nil { 253 if ctx.Err() != nil { 254 return nil, ctx.Err() 255 } 256 // TODO(findleyr): we should probably do something with the error here, 257 // but as of now this can fail repeatedly if load fails, so can be too 258 // noisy to log (and we'll handle things later in the slow pass). 259 continue 260 } 261 if len(insts) > 0 { 262 // The results of snapshot.MetadataForFile are sorted, with 263 // the instance with the fewest BuildFiles first. We want the 264 // smallest/narrowest instance here. 265 inst := insts[0] 266 toDiagnose[metadata.ImportPath(inst.ImportPath)] = inst 267 } 268 } 269 diags, err := snapshot.PackageDiagnostics(ctx, maps.Keys(toDiagnose)...) 270 if err != nil { 271 if ctx.Err() == nil { 272 event.Error(ctx, "warning: diagnostics failed", err, snapshot.Labels()...) 273 } 274 return nil, err 275 } 276 // golang/go#59587: guarantee that we compute type-checking diagnostics 277 // for every compiled package file, otherwise diagnostics won't be quickly 278 // cleared following a fix. 279 for _, inst := range toDiagnose { 280 for _, file := range inst.BuildFiles { 281 uri := protocol.URIFromPath(file.Filename) 282 if _, ok := diags[uri]; !ok { 283 diags[uri] = nil 284 } 285 } 286 } 287 return diags, nil 288 } 289 290 func (s *server) diagnose(ctx context.Context, snapshot *cache.Snapshot) (diagMap, error) { 291 ctx, done := event.Start(ctx, "Server.diagnose", snapshot.Labels()...) 292 defer done() 293 294 var ( 295 diagnostics = make(diagMap) 296 ) 297 298 return diagnostics, nil 299 } 300 301 // combineDiagnostics combines and filters list/parse/type diagnostics from 302 // tdiags with adiags, and appends the two lists to *outT and *outA, 303 // respectively. 304 // 305 // Type-error analyzers produce diagnostics that are redundant 306 // with type checker diagnostics, but more detailed (e.g. fixes). 307 // Rather than report two diagnostics for the same problem, 308 // we combine them by augmenting the type-checker diagnostic 309 // and discarding the analyzer diagnostic. 310 // 311 // If an analysis diagnostic has the same range and message as 312 // a list/parse/type diagnostic, the suggested fix information 313 // (et al) of the latter is merged into a copy of the former. 314 // This handles the case where a type-error analyzer suggests 315 // a fix to a type error, and avoids duplication. 316 // 317 // The use of out-slices, though irregular, allows the caller to 318 // easily choose whether to keep the results separate or combined. 319 // 320 // The arguments are not modified. 321 func combineDiagnostics(tdiags []*cache.Diagnostic, adiags []*cache.Diagnostic, outT, outA *[]*cache.Diagnostic) { 322 323 // Build index of (list+parse+)type errors. 324 type key struct { 325 Range protocol.Range 326 message string 327 } 328 index := make(map[key]int) // maps (Range,Message) to index in tdiags slice 329 for i, diag := range tdiags { 330 index[key{diag.Range, diag.Message}] = i 331 } 332 333 // Filter out analysis diagnostics that match type errors, 334 // retaining their suggested fix (etc) fields. 335 for _, diag := range adiags { 336 if i, ok := index[key{diag.Range, diag.Message}]; ok { 337 copy := *tdiags[i] 338 copy.SuggestedFixes = diag.SuggestedFixes 339 copy.Tags = diag.Tags 340 tdiags[i] = © 341 continue 342 } 343 344 *outA = append(*outA, diag) 345 } 346 347 *outT = append(*outT, tdiags...) 348 } 349 350 // mustPublishDiagnostics marks the uri as needing publication, independent of 351 // whether the published contents have changed. 352 // 353 // This can be used for ensuring gopls publishes diagnostics after certain file 354 // events. 355 func (s *server) mustPublishDiagnostics(uri protocol.DocumentURI) { 356 s.diagnosticsMu.Lock() 357 defer s.diagnosticsMu.Unlock() 358 359 if s.diagnostics[uri] == nil { 360 s.diagnostics[uri] = new(fileDiagnostics) 361 } 362 s.diagnostics[uri].mustPublish = true 363 } 364 365 const WorkspaceLoadFailure = "Error loading workspace" 366 367 // updateCriticalErrorStatus updates the critical error progress notification 368 // based on err. 369 // 370 // If err is nil, or if there are no open files, it clears any existing error 371 // progress report. 372 func (s *server) updateCriticalErrorStatus(ctx context.Context, snapshot *cache.Snapshot, err *cache.InitializationError) { 373 s.criticalErrorStatusMu.Lock() 374 defer s.criticalErrorStatusMu.Unlock() 375 376 // Remove all newlines so that the error message can be formatted in a 377 // status bar. 378 var errMsg string 379 if err != nil { 380 errMsg = strings.ReplaceAll(err.MainError.Error(), "\n", " ") 381 } 382 383 if s.criticalErrorStatus == nil { 384 if errMsg != "" { 385 event.Error(ctx, "errors loading workspace", err.MainError, snapshot.Labels()...) 386 s.criticalErrorStatus = s.progress.Start(ctx, WorkspaceLoadFailure, errMsg, nil, nil) 387 } 388 return 389 } 390 391 // If an error is already shown to the user, update it or mark it as 392 // resolved. 393 if errMsg == "" { 394 s.criticalErrorStatus.End(ctx, "Done.") 395 s.criticalErrorStatus = nil 396 } else { 397 s.criticalErrorStatus.Report(ctx, errMsg, 0) 398 } 399 } 400 401 // updateDiagnostics records the result of diagnosing a snapshot, and publishes 402 // any diagnostics that need to be updated on the client. 403 // 404 // The allViews argument should be the current set of views present in the 405 // session, for the purposes of trimming diagnostics produced by deleted views. 406 func (s *server) updateDiagnostics(ctx context.Context, allViews []*cache.View, snapshot *cache.Snapshot, diagnostics diagMap, final bool) { 407 ctx, done := event.Start(ctx, "Server.publishDiagnostics") 408 defer done() 409 410 s.diagnosticsMu.Lock() 411 defer s.diagnosticsMu.Unlock() 412 413 // Before updating any diagnostics, check that the context (i.e. snapshot 414 // background context) is not cancelled. 415 // 416 // If not, then we know that we haven't started diagnosing the next snapshot, 417 // because the previous snapshot is cancelled before the next snapshot is 418 // returned from Invalidate. 419 // 420 // Therefore, even if we publish stale diagnostics here, they should 421 // eventually be overwritten with accurate diagnostics. 422 // 423 // TODO(rfindley): refactor the API to force that snapshots are diagnosed 424 // after they are created. 425 if ctx.Err() != nil { 426 return 427 } 428 429 viewMap := make(viewSet) 430 for _, v := range allViews { 431 viewMap[v] = unit{} 432 } 433 434 // updateAndPublish updates diagnostics for a file, checking both the latest 435 // diagnostics for the current snapshot, as well as reconciling the set of 436 // views. 437 updateAndPublish := func(uri protocol.DocumentURI, f *fileDiagnostics, diags []*cache.Diagnostic) error { 438 current, ok := f.byView[snapshot.View()] 439 // Update the stored diagnostics if: 440 // 1. we've never seen diagnostics for this view, 441 // 2. diagnostics are for an older snapshot, or 442 // 3. we're overwriting with final diagnostics 443 // 444 // In other words, we shouldn't overwrite existing diagnostics for a 445 // snapshot with non-final diagnostics. This avoids the race described at 446 // https://github.com/golang/go/issues/64765#issuecomment-1890144575. 447 if !ok || current.snapshot < snapshot.SequenceID() || (current.snapshot == snapshot.SequenceID() && final) { 448 fh, err := snapshot.ReadFile(ctx, uri) 449 if err != nil { 450 return err 451 } 452 current = viewDiagnostics{ 453 snapshot: snapshot.SequenceID(), 454 version: fh.Version(), 455 diagnostics: diags, 456 } 457 if f.byView == nil { 458 f.byView = make(map[*cache.View]viewDiagnostics) 459 } 460 f.byView[snapshot.View()] = current 461 } 462 463 return s.publishFileDiagnosticsLocked(ctx, viewMap, uri, current.version, f) 464 } 465 466 seen := make(map[protocol.DocumentURI]bool) 467 for uri, diags := range diagnostics { 468 f, ok := s.diagnostics[uri] 469 if !ok { 470 f = new(fileDiagnostics) 471 s.diagnostics[uri] = f 472 } 473 seen[uri] = true 474 if err := updateAndPublish(uri, f, diags); err != nil { 475 if ctx.Err() != nil { 476 return 477 } else { 478 event.Error(ctx, "updateDiagnostics: failed to deliver diagnostics", err, tag.URI.Of(uri)) 479 } 480 } 481 } 482 483 // TODO(rfindley): perhaps we should clean up files that have no diagnostics. 484 // One could imagine a large operation generating diagnostics for a great 485 // number of files, after which gopls has to do more bookkeeping into the 486 // future. 487 if final { 488 for uri, f := range s.diagnostics { 489 if !seen[uri] { 490 if err := updateAndPublish(uri, f, nil); err != nil { 491 if ctx.Err() != nil { 492 return 493 } else { 494 event.Error(ctx, "updateDiagnostics: failed to deliver diagnostics", err, tag.URI.Of(uri)) 495 } 496 } 497 } 498 } 499 } 500 } 501 502 // updateOrphanedFileDiagnostics records and publishes orphaned file 503 // diagnostics as a given modification time. 504 func (s *server) updateOrphanedFileDiagnostics(ctx context.Context, modID uint64, diagnostics diagMap) error { 505 views := s.session.Views() 506 viewSet := make(viewSet) 507 for _, v := range views { 508 viewSet[v] = unit{} 509 } 510 511 s.diagnosticsMu.Lock() 512 defer s.diagnosticsMu.Unlock() 513 514 for uri, diags := range diagnostics { 515 f, ok := s.diagnostics[uri] 516 if !ok { 517 f = new(fileDiagnostics) 518 s.diagnostics[uri] = f 519 } 520 if f.orphanedAt > modID { 521 continue 522 } 523 f.orphanedAt = modID 524 f.orphanedFileDiagnostics = diags 525 // TODO(rfindley): the version of this file is potentially inaccurate; 526 // nevertheless, it should be eventually consistent, because all 527 // modifications are diagnosed. 528 fh, err := s.session.ReadFile(ctx, uri) 529 if err != nil { 530 return err 531 } 532 if err := s.publishFileDiagnosticsLocked(ctx, viewSet, uri, fh.Version(), f); err != nil { 533 return err 534 } 535 } 536 537 // Clear any stale orphaned file diagnostics. 538 for uri, f := range s.diagnostics { 539 if f.orphanedAt < modID { 540 f.orphanedFileDiagnostics = nil 541 } 542 fh, err := s.session.ReadFile(ctx, uri) 543 if err != nil { 544 return err 545 } 546 if err := s.publishFileDiagnosticsLocked(ctx, viewSet, uri, fh.Version(), f); err != nil { 547 return err 548 } 549 } 550 return nil 551 } 552 553 // publishFileDiagnosticsLocked publishes a fileDiagnostics value, while holding s.diagnosticsMu. 554 // 555 // If the publication succeeds, it updates f.publishedHash and f.mustPublish. 556 func (s *server) publishFileDiagnosticsLocked(ctx context.Context, views viewSet, uri protocol.DocumentURI, version int32, f *fileDiagnostics) error { 557 // Check that the set of views is up-to-date, and de-dupe diagnostics 558 // across views. 559 var ( 560 diagHashes = make(map[file.Hash]unit) // unique diagnostic hashes 561 hash file.Hash // XOR of diagnostic hashes 562 unique []*cache.Diagnostic // unique diagnostics 563 ) 564 add := func(diag *cache.Diagnostic) { 565 h := hashDiagnostic(diag) 566 if _, ok := diagHashes[h]; !ok { 567 diagHashes[h] = unit{} 568 unique = append(unique, diag) 569 hash.XORWith(h) 570 } 571 } 572 for _, diag := range f.orphanedFileDiagnostics { 573 add(diag) 574 } 575 for view, viewDiags := range f.byView { 576 if _, ok := views[view]; !ok { 577 delete(f.byView, view) // view no longer exists 578 continue 579 } 580 if viewDiags.version != version { 581 continue // a payload of diagnostics applies to a specific file version 582 } 583 for _, diag := range viewDiags.diagnostics { 584 add(diag) 585 } 586 } 587 sortDiagnostics(unique) 588 589 // Publish, if necessary. 590 if hash != f.publishedHash || f.mustPublish { 591 if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{ 592 Diagnostics: toProtocolDiagnostics(unique), 593 URI: uri, 594 Version: version, 595 }); err != nil { 596 return err 597 } 598 f.publishedHash = hash 599 f.mustPublish = false 600 } 601 return nil 602 } 603 604 func toProtocolDiagnostics(diagnostics []*cache.Diagnostic) []protocol.Diagnostic { 605 reports := []protocol.Diagnostic{} 606 for _, diag := range diagnostics { 607 pdiag := protocol.Diagnostic{ 608 // diag.Message might start with \n or \t 609 Message: strings.TrimSpace(diag.Message), 610 Range: diag.Range, 611 Severity: diag.Severity, 612 Source: string(diag.Source), 613 Tags: protocol.NonNilSlice(diag.Tags), 614 RelatedInformation: diag.Related, 615 Data: diag.BundledFixes, 616 } 617 if diag.Code != "" { 618 pdiag.Code = diag.Code 619 } 620 if diag.CodeHref != "" { 621 pdiag.CodeDescription = &protocol.CodeDescription{Href: diag.CodeHref} 622 } 623 reports = append(reports, pdiag) 624 } 625 return reports 626 } 627 628 func (s *server) shouldIgnoreError(snapshot *cache.Snapshot, err error) bool { 629 if err == nil { // if there is no error at all 630 return false 631 } 632 if errors.Is(err, context.Canceled) { 633 return true 634 } 635 // If the folder has no Go code in it, we shouldn't spam the user with a warning. 636 // TODO(rfindley): surely it is not correct to walk the folder here just to 637 // suppress diagnostics, every time we compute diagnostics. 638 var hasGo bool 639 _ = filepath.Walk(snapshot.Folder().Path(), func(_ string, info os.FileInfo, err error) error { 640 if err != nil { 641 return err 642 } 643 if !strings.HasSuffix(info.Name(), ".go") { 644 return nil 645 } 646 hasGo = true 647 return errors.New("done") 648 }) 649 return !hasGo 650 }