github.com/elek/golangci-lint@v1.42.2-0.20211208090441-c05b7fcb3a9a/pkg/golinters/goanalysis/runners.go (about) 1 package goanalysis 2 3 import ( 4 "fmt" 5 "runtime" 6 "sort" 7 "strings" 8 "sync" 9 "sync/atomic" 10 "time" 11 12 "golang.org/x/tools/go/analysis" 13 "golang.org/x/tools/go/packages" 14 15 "github.com/elek/golangci-lint/internal/pkgcache" 16 "github.com/elek/golangci-lint/pkg/lint/linter" 17 "github.com/elek/golangci-lint/pkg/result" 18 "github.com/elek/golangci-lint/pkg/timeutils" 19 ) 20 21 type runAnalyzersConfig interface { 22 getName() string 23 getLinterNameForDiagnostic(*Diagnostic) string 24 getAnalyzers() []*analysis.Analyzer 25 useOriginalPackages() bool 26 reportIssues(*linter.Context) []Issue 27 getLoadMode() LoadMode 28 } 29 30 func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) { 31 log := lintCtx.Log.Child("goanalysis") 32 sw := timeutils.NewStopwatch("analyzers", log) 33 34 const stagesToPrint = 10 35 defer sw.PrintTopStages(stagesToPrint) 36 37 runner := newRunner(cfg.getName(), log, lintCtx.PkgCache, lintCtx.LoadGuard, cfg.getLoadMode(), sw) 38 39 pkgs := lintCtx.Packages 40 if cfg.useOriginalPackages() { 41 pkgs = lintCtx.OriginalPackages 42 } 43 44 issues, pkgsFromCache := loadIssuesFromCache(pkgs, lintCtx, cfg.getAnalyzers()) 45 var pkgsToAnalyze []*packages.Package 46 for _, pkg := range pkgs { 47 if !pkgsFromCache[pkg] { 48 pkgsToAnalyze = append(pkgsToAnalyze, pkg) 49 } 50 } 51 52 diags, errs, passToPkg := runner.run(cfg.getAnalyzers(), pkgsToAnalyze) 53 54 defer func() { 55 if len(errs) == 0 { 56 // If we try to save to cache even if we have compilation errors 57 // we won't see them on repeated runs. 58 saveIssuesToCache(pkgs, pkgsFromCache, issues, lintCtx, cfg.getAnalyzers()) 59 } 60 }() 61 62 buildAllIssues := func() []result.Issue { 63 var retIssues []result.Issue 64 reportedIssues := cfg.reportIssues(lintCtx) 65 for i := range reportedIssues { 66 issue := &reportedIssues[i].Issue 67 if issue.Pkg == nil { 68 issue.Pkg = passToPkg[reportedIssues[i].Pass] 69 } 70 retIssues = append(retIssues, *issue) 71 } 72 retIssues = append(retIssues, buildIssues(diags, cfg.getLinterNameForDiagnostic)...) 73 return retIssues 74 } 75 76 errIssues, err := buildIssuesFromIllTypedError(errs, lintCtx) 77 if err != nil { 78 return nil, err 79 } 80 81 issues = append(issues, errIssues...) 82 issues = append(issues, buildAllIssues()...) 83 84 return issues, nil 85 } 86 87 func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) string) []result.Issue { 88 var issues []result.Issue 89 for i := range diags { 90 diag := &diags[i] 91 linterName := linterNameBuilder(diag) 92 93 var text string 94 if diag.Analyzer.Name == linterName { 95 text = diag.Message 96 } else { 97 text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) 98 } 99 100 issues = append(issues, result.Issue{ 101 FromLinter: linterName, 102 Text: text, 103 Pos: diag.Position, 104 Pkg: diag.Pkg, 105 }) 106 107 if len(diag.Related) > 0 { 108 for _, info := range diag.Related { 109 issues = append(issues, result.Issue{ 110 FromLinter: linterName, 111 Text: fmt.Sprintf("%s(related information): %s", diag.Analyzer.Name, info.Message), 112 Pos: diag.Pkg.Fset.Position(info.Pos), 113 Pkg: diag.Pkg, 114 }) 115 } 116 } 117 } 118 return issues 119 } 120 121 func getIssuesCacheKey(analyzers []*analysis.Analyzer) string { 122 return "lint/result:" + analyzersHashID(analyzers) 123 } 124 125 func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool, 126 issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) { 127 startedAt := time.Now() 128 perPkgIssues := map[*packages.Package][]result.Issue{} 129 for ind := range issues { 130 i := &issues[ind] 131 perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i) 132 } 133 134 savedIssuesCount := int32(0) 135 lintResKey := getIssuesCacheKey(analyzers) 136 137 workerCount := runtime.GOMAXPROCS(-1) 138 var wg sync.WaitGroup 139 wg.Add(workerCount) 140 141 pkgCh := make(chan *packages.Package, len(allPkgs)) 142 for i := 0; i < workerCount; i++ { 143 go func() { 144 defer wg.Done() 145 for pkg := range pkgCh { 146 pkgIssues := perPkgIssues[pkg] 147 encodedIssues := make([]EncodingIssue, 0, len(pkgIssues)) 148 for ind := range pkgIssues { 149 i := &pkgIssues[ind] 150 encodedIssues = append(encodedIssues, EncodingIssue{ 151 FromLinter: i.FromLinter, 152 Text: i.Text, 153 Pos: i.Pos, 154 LineRange: i.LineRange, 155 Replacement: i.Replacement, 156 ExpectNoLint: i.ExpectNoLint, 157 ExpectedNoLintLinter: i.ExpectedNoLintLinter, 158 }) 159 } 160 161 atomic.AddInt32(&savedIssuesCount, int32(len(encodedIssues))) 162 if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil { 163 lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err) 164 } else { 165 issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues)) 166 } 167 } 168 }() 169 } 170 171 for _, pkg := range allPkgs { 172 if pkgsFromCache[pkg] { 173 continue 174 } 175 176 pkgCh <- pkg 177 } 178 close(pkgCh) 179 wg.Wait() 180 181 issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt)) 182 } 183 184 //nolint:gocritic 185 func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, 186 analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) { 187 startedAt := time.Now() 188 189 lintResKey := getIssuesCacheKey(analyzers) 190 type cacheRes struct { 191 issues []result.Issue 192 loadErr error 193 } 194 pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs)) 195 for _, pkg := range pkgs { 196 pkgToCacheRes[pkg] = &cacheRes{} 197 } 198 199 workerCount := runtime.GOMAXPROCS(-1) 200 var wg sync.WaitGroup 201 wg.Add(workerCount) 202 203 pkgCh := make(chan *packages.Package, len(pkgs)) 204 for i := 0; i < workerCount; i++ { 205 go func() { 206 defer wg.Done() 207 for pkg := range pkgCh { 208 var pkgIssues []EncodingIssue 209 err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues) 210 cacheRes := pkgToCacheRes[pkg] 211 cacheRes.loadErr = err 212 if err != nil { 213 continue 214 } 215 if len(pkgIssues) == 0 { 216 continue 217 } 218 219 issues := make([]result.Issue, 0, len(pkgIssues)) 220 for _, i := range pkgIssues { 221 issues = append(issues, result.Issue{ 222 FromLinter: i.FromLinter, 223 Text: i.Text, 224 Pos: i.Pos, 225 LineRange: i.LineRange, 226 Replacement: i.Replacement, 227 Pkg: pkg, 228 ExpectNoLint: i.ExpectNoLint, 229 ExpectedNoLintLinter: i.ExpectedNoLintLinter, 230 }) 231 } 232 cacheRes.issues = issues 233 } 234 }() 235 } 236 237 for _, pkg := range pkgs { 238 pkgCh <- pkg 239 } 240 close(pkgCh) 241 wg.Wait() 242 243 loadedIssuesCount := 0 244 var issues []result.Issue 245 pkgsFromCache := map[*packages.Package]bool{} 246 for pkg, cacheRes := range pkgToCacheRes { 247 if cacheRes.loadErr == nil { 248 loadedIssuesCount += len(cacheRes.issues) 249 pkgsFromCache[pkg] = true 250 issues = append(issues, cacheRes.issues...) 251 issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues)) 252 } else { 253 issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr) 254 } 255 } 256 issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages", 257 loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs)) 258 return issues, pkgsFromCache 259 } 260 261 func analyzersHashID(analyzers []*analysis.Analyzer) string { 262 names := make([]string, 0, len(analyzers)) 263 for _, a := range analyzers { 264 names = append(names, a.Name) 265 } 266 267 sort.Strings(names) 268 return strings.Join(names, ",") 269 }