github.com/vanstinator/golangci-lint@v0.0.0-20240223191551-cc572f00d9d1/pkg/golinters/goanalysis/runners.go (about) 1 package goanalysis 2 3 import ( 4 "fmt" 5 "runtime" 6 "sort" 7 "strings" 8 "sync" 9 "sync/atomic" 10 "time" 11 12 "golang.org/x/tools/go/analysis" 13 "golang.org/x/tools/go/packages" 14 15 "github.com/vanstinator/golangci-lint/internal/pkgcache" 16 "github.com/vanstinator/golangci-lint/pkg/lint/linter" 17 "github.com/vanstinator/golangci-lint/pkg/logutils" 18 "github.com/vanstinator/golangci-lint/pkg/result" 19 "github.com/vanstinator/golangci-lint/pkg/timeutils" 20 ) 21 22 type runAnalyzersConfig interface { 23 getName() string 24 getLinterNameForDiagnostic(*Diagnostic) string 25 getAnalyzers() []*analysis.Analyzer 26 useOriginalPackages() bool 27 reportIssues(*linter.Context) []Issue 28 getLoadMode() LoadMode 29 } 30 31 func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) { 32 log := lintCtx.Log.Child(logutils.DebugKeyGoAnalysis) 33 sw := timeutils.NewStopwatch("analyzers", log) 34 35 const stagesToPrint = 10 36 defer sw.PrintTopStages(stagesToPrint) 37 38 runner := newRunner(cfg.getName(), log, lintCtx.PkgCache, lintCtx.LoadGuard, cfg.getLoadMode(), sw) 39 40 pkgs := lintCtx.Packages 41 if cfg.useOriginalPackages() { 42 pkgs = lintCtx.OriginalPackages 43 } 44 45 issues, pkgsFromCache := loadIssuesFromCache(pkgs, lintCtx, cfg.getAnalyzers()) 46 var pkgsToAnalyze []*packages.Package 47 for _, pkg := range pkgs { 48 if !pkgsFromCache[pkg] { 49 pkgsToAnalyze = append(pkgsToAnalyze, pkg) 50 } 51 } 52 53 diags, errs, passToPkg := runner.run(cfg.getAnalyzers(), pkgsToAnalyze) 54 55 defer func() { 56 if len(errs) == 0 { 57 // If we try to save to cache even if we have compilation errors 58 // we won't see them on repeated runs. 59 saveIssuesToCache(pkgs, pkgsFromCache, issues, lintCtx, cfg.getAnalyzers()) 60 } 61 }() 62 63 buildAllIssues := func() []result.Issue { 64 var retIssues []result.Issue 65 reportedIssues := cfg.reportIssues(lintCtx) 66 for i := range reportedIssues { 67 issue := &reportedIssues[i].Issue 68 if issue.Pkg == nil { 69 issue.Pkg = passToPkg[reportedIssues[i].Pass] 70 } 71 retIssues = append(retIssues, *issue) 72 } 73 retIssues = append(retIssues, buildIssues(diags, cfg.getLinterNameForDiagnostic)...) 74 return retIssues 75 } 76 77 errIssues, err := buildIssuesFromIllTypedError(errs, lintCtx) 78 if err != nil { 79 return nil, err 80 } 81 82 issues = append(issues, errIssues...) 83 issues = append(issues, buildAllIssues()...) 84 85 return issues, nil 86 } 87 88 func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue { 89 var issues []result.Issue 90 for i := range diags { 91 diag := &diags[i] 92 linterName := linterNameBuilder(diag) 93 94 var text string 95 if diag.Analyzer.Name == linterName { 96 text = diag.Message 97 } else { 98 text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) 99 } 100 101 issues = append(issues, result.Issue{ 102 FromLinter: linterName, 103 Text: text, 104 Pos: diag.Position, 105 Pkg: diag.Pkg, 106 }) 107 108 if len(diag.Related) > 0 { 109 for _, info := range diag.Related { 110 issues = append(issues, result.Issue{ 111 FromLinter: linterName, 112 Text: fmt.Sprintf("%s(related information): %s", diag.Analyzer.Name, info.Message), 113 Pos: diag.Pkg.Fset.Position(info.Pos), 114 Pkg: diag.Pkg, 115 }) 116 } 117 } 118 } 119 return issues 120 } 121 122 func getIssuesCacheKey(analyzers []*analysis.Analyzer) string { 123 return "lint/result:" + analyzersHashID(analyzers) 124 } 125 126 func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool, 127 issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) { 128 startedAt := time.Now() 129 perPkgIssues := map[*packages.Package][]result.Issue{} 130 for ind := range issues { 131 i := &issues[ind] 132 perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i) 133 } 134 135 savedIssuesCount := int32(0) 136 lintResKey := getIssuesCacheKey(analyzers) 137 138 workerCount := runtime.GOMAXPROCS(-1) 139 var wg sync.WaitGroup 140 wg.Add(workerCount) 141 142 pkgCh := make(chan *packages.Package, len(allPkgs)) 143 for i := 0; i < workerCount; i++ { 144 go func() { 145 defer wg.Done() 146 for pkg := range pkgCh { 147 pkgIssues := perPkgIssues[pkg] 148 encodedIssues := make([]EncodingIssue, 0, len(pkgIssues)) 149 for ind := range pkgIssues { 150 i := &pkgIssues[ind] 151 encodedIssues = append(encodedIssues, EncodingIssue{ 152 FromLinter: i.FromLinter, 153 Text: i.Text, 154 Pos: i.Pos, 155 LineRange: i.LineRange, 156 Replacement: i.Replacement, 157 ExpectNoLint: i.ExpectNoLint, 158 ExpectedNoLintLinter: i.ExpectedNoLintLinter, 159 }) 160 } 161 162 atomic.AddInt32(&savedIssuesCount, int32(len(encodedIssues))) 163 if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil { 164 lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err) 165 } else { 166 issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues)) 167 } 168 } 169 }() 170 } 171 172 for _, pkg := range allPkgs { 173 if pkgsFromCache[pkg] { 174 continue 175 } 176 177 pkgCh <- pkg 178 } 179 close(pkgCh) 180 wg.Wait() 181 182 issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt)) 183 } 184 185 //nolint:gocritic 186 func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, 187 analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) { 188 startedAt := time.Now() 189 190 lintResKey := getIssuesCacheKey(analyzers) 191 type cacheRes struct { 192 issues []result.Issue 193 loadErr error 194 } 195 pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs)) 196 for _, pkg := range pkgs { 197 pkgToCacheRes[pkg] = &cacheRes{} 198 } 199 200 workerCount := runtime.GOMAXPROCS(-1) 201 var wg sync.WaitGroup 202 wg.Add(workerCount) 203 204 pkgCh := make(chan *packages.Package, len(pkgs)) 205 for i := 0; i < workerCount; i++ { 206 go func() { 207 defer wg.Done() 208 for pkg := range pkgCh { 209 var pkgIssues []EncodingIssue 210 err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues) 211 cacheRes := pkgToCacheRes[pkg] 212 cacheRes.loadErr = err 213 if err != nil { 214 continue 215 } 216 if len(pkgIssues) == 0 { 217 continue 218 } 219 220 issues := make([]result.Issue, 0, len(pkgIssues)) 221 for _, i := range pkgIssues { 222 issues = append(issues, result.Issue{ 223 FromLinter: i.FromLinter, 224 Text: i.Text, 225 Pos: i.Pos, 226 LineRange: i.LineRange, 227 Replacement: i.Replacement, 228 Pkg: pkg, 229 ExpectNoLint: i.ExpectNoLint, 230 ExpectedNoLintLinter: i.ExpectedNoLintLinter, 231 }) 232 } 233 cacheRes.issues = issues 234 } 235 }() 236 } 237 238 for _, pkg := range pkgs { 239 pkgCh <- pkg 240 } 241 close(pkgCh) 242 wg.Wait() 243 244 loadedIssuesCount := 0 245 var issues []result.Issue 246 pkgsFromCache := map[*packages.Package]bool{} 247 for pkg, cacheRes := range pkgToCacheRes { 248 if cacheRes.loadErr == nil { 249 loadedIssuesCount += len(cacheRes.issues) 250 pkgsFromCache[pkg] = true 251 issues = append(issues, cacheRes.issues...) 252 issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues)) 253 } else { 254 issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr) 255 } 256 } 257 issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages", 258 loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs)) 259 return issues, pkgsFromCache 260 } 261 262 func analyzersHashID(analyzers []*analysis.Analyzer) string { 263 names := make([]string, 0, len(analyzers)) 264 for _, a := range analyzers { 265 names = append(names, a.Name) 266 } 267 268 sort.Strings(names) 269 return strings.Join(names, ",") 270 }