Merge pull request #6 from golangci/feature/optimize-linters-scheduling
optimize linters scheduling: run slow linters first
This commit is contained in:
commit
3c2ca7b7c9
@ -202,7 +202,7 @@ func buildLintCtx(ctx context.Context, linters []pkg.Linter, cfg *config.Config)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *Executor) runAnalysis(ctx context.Context, args []string) (chan result.Issue, error) {
|
||||
func (e *Executor) runAnalysis(ctx context.Context, args []string) (<-chan result.Issue, error) {
|
||||
e.cfg.Run.Args = args
|
||||
|
||||
linters, err := pkg.GetEnabledLinters(e.cfg)
|
||||
|
@ -38,6 +38,7 @@ type LinterConfig struct {
|
||||
DoesFullImport bool
|
||||
NeedsSSARepr bool
|
||||
InPresets []string
|
||||
Speed int // more value means faster execution of linter
|
||||
}
|
||||
|
||||
func (lc LinterConfig) WithFullImport() LinterConfig {
|
||||
@ -56,6 +57,11 @@ func (lc LinterConfig) WithPresets(presets ...string) LinterConfig {
|
||||
return lc
|
||||
}
|
||||
|
||||
func (lc LinterConfig) WithSpeed(speed int) LinterConfig {
|
||||
lc.Speed = speed
|
||||
return lc
|
||||
}
|
||||
|
||||
func newLinterConfig(linter Linter) LinterConfig {
|
||||
return LinterConfig{
|
||||
Linter: linter,
|
||||
@ -93,30 +99,31 @@ func enableLinterConfigs(lcs []LinterConfig, isEnabled func(lc *LinterConfig) bo
|
||||
|
||||
func GetAllSupportedLinterConfigs() []LinterConfig {
|
||||
lcs := []LinterConfig{
|
||||
newLinterConfig(golinters.Govet{}).WithPresets(PresetBugs),
|
||||
newLinterConfig(golinters.Errcheck{}).WithFullImport().WithPresets(PresetBugs),
|
||||
newLinterConfig(golinters.Golint{}).WithPresets(PresetStyle),
|
||||
newLinterConfig(golinters.Govet{}).WithPresets(PresetBugs).WithSpeed(4),
|
||||
newLinterConfig(golinters.Errcheck{}).WithFullImport().WithPresets(PresetBugs).WithSpeed(10),
|
||||
newLinterConfig(golinters.Golint{}).WithPresets(PresetStyle).WithSpeed(3),
|
||||
|
||||
newLinterConfig(golinters.Megacheck{StaticcheckEnabled: true}).WithSSA().WithPresets(PresetBugs),
|
||||
newLinterConfig(golinters.Megacheck{UnusedEnabled: true}).WithSSA().WithPresets(PresetUnused),
|
||||
newLinterConfig(golinters.Megacheck{GosimpleEnabled: true}).WithSSA().WithPresets(PresetStyle),
|
||||
newLinterConfig(golinters.Megacheck{StaticcheckEnabled: true}).WithSSA().
|
||||
WithPresets(PresetBugs).WithSpeed(2),
|
||||
newLinterConfig(golinters.Megacheck{UnusedEnabled: true}).WithSSA().WithPresets(PresetUnused).WithSpeed(5),
|
||||
newLinterConfig(golinters.Megacheck{GosimpleEnabled: true}).WithSSA().WithPresets(PresetStyle).WithSpeed(5),
|
||||
|
||||
newLinterConfig(golinters.Gas{}).WithFullImport().WithPresets(PresetBugs),
|
||||
newLinterConfig(golinters.Structcheck{}).WithFullImport().WithPresets(PresetUnused),
|
||||
newLinterConfig(golinters.Varcheck{}).WithFullImport().WithPresets(PresetUnused),
|
||||
newLinterConfig(golinters.Interfacer{}).WithSSA().WithPresets(PresetStyle),
|
||||
newLinterConfig(golinters.Unconvert{}).WithFullImport().WithPresets(PresetStyle),
|
||||
newLinterConfig(golinters.Ineffassign{}).WithPresets(PresetUnused),
|
||||
newLinterConfig(golinters.Dupl{}).WithPresets(PresetStyle),
|
||||
newLinterConfig(golinters.Goconst{}).WithPresets(PresetStyle),
|
||||
newLinterConfig(golinters.Deadcode{}).WithFullImport().WithPresets(PresetUnused),
|
||||
newLinterConfig(golinters.Gocyclo{}).WithPresets(PresetComplexity),
|
||||
newLinterConfig(golinters.Gas{}).WithFullImport().WithPresets(PresetBugs).WithSpeed(8),
|
||||
newLinterConfig(golinters.Structcheck{}).WithFullImport().WithPresets(PresetUnused).WithSpeed(10),
|
||||
newLinterConfig(golinters.Varcheck{}).WithFullImport().WithPresets(PresetUnused).WithSpeed(10),
|
||||
newLinterConfig(golinters.Interfacer{}).WithSSA().WithPresets(PresetStyle).WithSpeed(6),
|
||||
newLinterConfig(golinters.Unconvert{}).WithFullImport().WithPresets(PresetStyle).WithSpeed(10),
|
||||
newLinterConfig(golinters.Ineffassign{}).WithPresets(PresetUnused).WithSpeed(9),
|
||||
newLinterConfig(golinters.Dupl{}).WithPresets(PresetStyle).WithSpeed(7),
|
||||
newLinterConfig(golinters.Goconst{}).WithPresets(PresetStyle).WithSpeed(9),
|
||||
newLinterConfig(golinters.Deadcode{}).WithFullImport().WithPresets(PresetUnused).WithSpeed(10),
|
||||
newLinterConfig(golinters.Gocyclo{}).WithPresets(PresetComplexity).WithSpeed(8),
|
||||
|
||||
newLinterConfig(golinters.Gofmt{}).WithPresets(PresetFormatting),
|
||||
newLinterConfig(golinters.Gofmt{UseGoimports: true}).WithPresets(PresetFormatting),
|
||||
newLinterConfig(golinters.Maligned{}).WithFullImport().WithPresets(PresetPerformance),
|
||||
newLinterConfig(golinters.Gofmt{}).WithPresets(PresetFormatting).WithSpeed(7),
|
||||
newLinterConfig(golinters.Gofmt{UseGoimports: true}).WithPresets(PresetFormatting).WithSpeed(5),
|
||||
newLinterConfig(golinters.Maligned{}).WithFullImport().WithPresets(PresetPerformance).WithSpeed(10),
|
||||
newLinterConfig(golinters.Megacheck{GosimpleEnabled: true, UnusedEnabled: true, StaticcheckEnabled: true}).
|
||||
WithSSA().WithPresets(PresetStyle, PresetBugs, PresetUnused),
|
||||
WithSSA().WithPresets(PresetStyle, PresetBugs, PresetUnused).WithSpeed(1),
|
||||
}
|
||||
|
||||
if os.Getenv("GOLANGCI_COM_RUN") == "1" {
|
||||
|
@ -13,7 +13,7 @@ func NewJSON() *JSON {
|
||||
return &JSON{}
|
||||
}
|
||||
|
||||
func (JSON) Print(issues chan result.Issue) (bool, error) {
|
||||
func (JSON) Print(issues <-chan result.Issue) (bool, error) {
|
||||
var allIssues []result.Issue
|
||||
for i := range issues {
|
||||
allIssues = append(allIssues, i)
|
||||
|
@ -3,5 +3,5 @@ package printers
|
||||
import "github.com/golangci/golangci-lint/pkg/result"
|
||||
|
||||
type Printer interface {
|
||||
Print(issues chan result.Issue) (bool, error)
|
||||
Print(issues <-chan result.Issue) (bool, error)
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func (p *Text) getFileLinesForIssue(i *result.Issue) (linesCache, error) {
|
||||
return fc, nil
|
||||
}
|
||||
|
||||
func (p *Text) Print(issues chan result.Issue) (bool, error) {
|
||||
func (p *Text) Print(issues <-chan result.Issue) (bool, error) {
|
||||
var issuedLineExtractingDuration time.Duration
|
||||
defer func() {
|
||||
logrus.Infof("Extracting issued lines took %s", issuedLineExtractingDuration)
|
||||
|
280
pkg/runner.go
280
pkg/runner.go
@ -5,12 +5,15 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golangci/golangci-lint/pkg/golinters"
|
||||
"github.com/golangci/golangci-lint/pkg/result"
|
||||
"github.com/golangci/golangci-lint/pkg/result/processors"
|
||||
"github.com/golangci/golangci-lint/pkg/timeutils"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -24,81 +27,102 @@ type lintRes struct {
|
||||
issues []result.Issue
|
||||
}
|
||||
|
||||
func (r *SimpleRunner) runLinter(ctx context.Context, linter Linter, lintCtx *golinters.Context, i int) (res []result.Issue, err error) {
|
||||
func runLinterSafe(ctx context.Context, lintCtx *golinters.Context, linter Linter) (ret []result.Issue, err error) {
|
||||
defer func() {
|
||||
if panicData := recover(); panicData != nil {
|
||||
err = fmt.Errorf("panic occured: %s", panicData)
|
||||
logrus.Infof("Panic stack trace: %s", debug.Stack())
|
||||
}
|
||||
}()
|
||||
startedAt := time.Now()
|
||||
res, err = linter.Run(ctx, lintCtx)
|
||||
|
||||
logrus.Infof("worker #%d: linter %s took %s and found %d issues (before processing them)", i, linter.Name(),
|
||||
time.Since(startedAt), len(res))
|
||||
return
|
||||
return linter.Run(ctx, lintCtx)
|
||||
}
|
||||
|
||||
func (r *SimpleRunner) runLinters(ctx context.Context, wg *sync.WaitGroup, tasksCh chan Linter, lintResultsCh chan lintRes, lintCtx *golinters.Context, workersCount int) {
|
||||
for i := 0; i < workersCount; i++ {
|
||||
func runWorker(ctx context.Context, lintCtx *golinters.Context, tasksCh <-chan Linter, lintResultsCh chan<- lintRes, name string) {
|
||||
sw := timeutils.NewStopwatch(name)
|
||||
defer sw.Print()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case linter, ok := <-tasksCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
// XXX: if check it in only int a select
|
||||
// it's possible to not enter to this case until tasksCh is empty.
|
||||
return
|
||||
}
|
||||
var issues []result.Issue
|
||||
var err error
|
||||
sw.TrackStage(linter.Name(), func() {
|
||||
issues, err = runLinterSafe(ctx, lintCtx, linter)
|
||||
})
|
||||
lintResultsCh <- lintRes{
|
||||
linter: linter,
|
||||
err: err,
|
||||
issues: issues,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logWorkersStat(workersFinishTimes []time.Time) {
|
||||
lastFinishTime := workersFinishTimes[0]
|
||||
for _, t := range workersFinishTimes {
|
||||
if t.After(lastFinishTime) {
|
||||
lastFinishTime = t
|
||||
}
|
||||
}
|
||||
|
||||
logStrings := []string{}
|
||||
for i, t := range workersFinishTimes {
|
||||
if t.Equal(lastFinishTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
logStrings = append(logStrings, fmt.Sprintf("#%d: %s", i+1, lastFinishTime.Sub(t)))
|
||||
}
|
||||
|
||||
logrus.Infof("Workers idle times: %s", strings.Join(logStrings, ", "))
|
||||
}
|
||||
|
||||
func getSortedLintersConfigs(linters []Linter) []LinterConfig {
|
||||
ret := make([]LinterConfig, 0, len(linters))
|
||||
for _, linter := range linters {
|
||||
ret = append(ret, *GetLinterConfig(linter.Name()))
|
||||
}
|
||||
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return ret[i].Speed < ret[j].Speed
|
||||
})
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (r *SimpleRunner) runWorkers(ctx context.Context, lintCtx *golinters.Context, linters []Linter) <-chan lintRes {
|
||||
tasksCh := make(chan Linter, len(linters))
|
||||
lintResultsCh := make(chan lintRes, len(linters))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
savedStdout, savedStderr := setOutputToDevNull() // Don't allow linters to print anything
|
||||
workersFinishTimes := make([]time.Time, lintCtx.Cfg.Run.Concurrency)
|
||||
|
||||
for i := 0; i < lintCtx.Cfg.Run.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case linter, ok := <-tasksCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
// XXX: if check it in only int a select
|
||||
// it's possible to not enter to this case until tasksCh is empty.
|
||||
return
|
||||
}
|
||||
issues, lerr := r.runLinter(ctx, linter, lintCtx, i)
|
||||
lintResultsCh <- lintRes{
|
||||
linter: linter,
|
||||
err: lerr,
|
||||
issues: issues,
|
||||
}
|
||||
}
|
||||
}
|
||||
}(i + 1)
|
||||
}
|
||||
}
|
||||
|
||||
func (r SimpleRunner) Run(ctx context.Context, linters []Linter, lintCtx *golinters.Context) chan result.Issue {
|
||||
retIssues := make(chan result.Issue, 1024)
|
||||
go func() {
|
||||
defer close(retIssues)
|
||||
if err := r.runGo(ctx, linters, lintCtx, retIssues); err != nil {
|
||||
logrus.Warnf("error running linters: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return retIssues
|
||||
}
|
||||
|
||||
func (r SimpleRunner) runGo(ctx context.Context, linters []Linter, lintCtx *golinters.Context, retIssues chan result.Issue) error {
|
||||
savedStdout, savedStderr := os.Stdout, os.Stderr
|
||||
devNull, err := os.Open(os.DevNull)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't open null device %q: %s", os.DevNull, err)
|
||||
name := fmt.Sprintf("worker.%d", i+1)
|
||||
runWorker(ctx, lintCtx, tasksCh, lintResultsCh, name)
|
||||
workersFinishTimes[i] = time.Now()
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Don't allow linters to print anything
|
||||
os.Stdout, os.Stderr = devNull, devNull
|
||||
|
||||
lintResultsCh := make(chan lintRes, len(linters))
|
||||
tasksCh := make(chan Linter, len(linters))
|
||||
workersCount := lintCtx.Cfg.Run.Concurrency
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(workersCount)
|
||||
r.runLinters(ctx, &wg, tasksCh, lintResultsCh, lintCtx, workersCount)
|
||||
|
||||
for _, linter := range linters {
|
||||
tasksCh <- linter
|
||||
lcs := getSortedLintersConfigs(linters)
|
||||
for _, lc := range lcs {
|
||||
tasksCh <- lc.Linter
|
||||
}
|
||||
close(tasksCh)
|
||||
|
||||
@ -106,53 +130,111 @@ func (r SimpleRunner) runGo(ctx context.Context, linters []Linter, lintCtx *goli
|
||||
wg.Wait()
|
||||
close(lintResultsCh)
|
||||
os.Stdout, os.Stderr = savedStdout, savedStderr
|
||||
|
||||
logWorkersStat(workersFinishTimes)
|
||||
}()
|
||||
|
||||
finishedN := 0
|
||||
for res := range lintResultsCh {
|
||||
if res.err != nil {
|
||||
logrus.Infof("Can't run linter %s: %s", res.linter.Name(), res.err)
|
||||
continue
|
||||
}
|
||||
|
||||
finishedN++
|
||||
|
||||
if len(res.issues) != 0 {
|
||||
res.issues = r.processIssues(ctx, res.issues)
|
||||
}
|
||||
|
||||
for _, i := range res.issues {
|
||||
retIssues <- i
|
||||
}
|
||||
}
|
||||
|
||||
// finalize processors: logging, clearing, no heavy work here
|
||||
for _, p := range r.Processors {
|
||||
p.Finish()
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return fmt.Errorf("%d/%d linters finished: deadline exceeded: try increase it by passing --deadline option",
|
||||
finishedN, len(linters))
|
||||
}
|
||||
|
||||
return nil
|
||||
return lintResultsCh
|
||||
}
|
||||
|
||||
func (r *SimpleRunner) processIssues(ctx context.Context, issues []result.Issue) []result.Issue {
|
||||
for _, p := range r.Processors {
|
||||
startedAt := time.Now()
|
||||
newIssues, err := p.Process(issues)
|
||||
elapsed := time.Since(startedAt)
|
||||
if elapsed > 50*time.Millisecond {
|
||||
logrus.Infof("Result processor %s took %s and transformed %d -> %d issues",
|
||||
p.Name(), elapsed, len(issues), len(newIssues))
|
||||
func (r SimpleRunner) processLintResults(ctx context.Context, inCh <-chan lintRes) <-chan lintRes {
|
||||
outCh := make(chan lintRes, 64)
|
||||
|
||||
go func() {
|
||||
sw := timeutils.NewStopwatch("processing")
|
||||
|
||||
defer close(outCh)
|
||||
|
||||
for res := range inCh {
|
||||
if res.err != nil {
|
||||
logrus.Infof("Can't run linter %s: %s", res.linter.Name(), res.err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(res.issues) != 0 {
|
||||
res.issues = r.processIssues(ctx, res.issues, sw)
|
||||
outCh <- res
|
||||
}
|
||||
}
|
||||
|
||||
// finalize processors: logging, clearing, no heavy work here
|
||||
|
||||
for _, p := range r.Processors {
|
||||
sw.TrackStage(p.Name(), func() {
|
||||
p.Finish()
|
||||
})
|
||||
}
|
||||
|
||||
sw.PrintStages()
|
||||
}()
|
||||
|
||||
return outCh
|
||||
}
|
||||
|
||||
func collectIssues(ctx context.Context, resCh <-chan lintRes) <-chan result.Issue {
|
||||
retIssues := make(chan result.Issue, 1024)
|
||||
go func() {
|
||||
defer close(retIssues)
|
||||
|
||||
for res := range resCh {
|
||||
if len(res.issues) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, i := range res.issues {
|
||||
retIssues <- i
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return retIssues
|
||||
}
|
||||
|
||||
func setOutputToDevNull() (savedStdout, savedStderr *os.File) {
|
||||
savedStdout, savedStderr = os.Stdout, os.Stderr
|
||||
devNull, err := os.Open(os.DevNull)
|
||||
if err != nil {
|
||||
logrus.Warnf("can't open null device %q: %s", os.DevNull, err)
|
||||
return
|
||||
}
|
||||
|
||||
os.Stdout, os.Stderr = devNull, devNull
|
||||
return
|
||||
}
|
||||
|
||||
func (r SimpleRunner) Run(ctx context.Context, linters []Linter, lintCtx *golinters.Context) <-chan result.Issue {
|
||||
defer timeutils.NewStopwatch("runner").Print()
|
||||
|
||||
lintResultsCh := r.runWorkers(ctx, lintCtx, linters)
|
||||
processedLintResultsCh := r.processLintResults(ctx, lintResultsCh)
|
||||
if ctx.Err() != nil {
|
||||
// XXX: always process issues, even if timeout occured
|
||||
finishedLintersN := 0
|
||||
for range processedLintResultsCh {
|
||||
finishedLintersN++
|
||||
}
|
||||
|
||||
logrus.Warnf("%d/%d linters finished: deadline exceeded: try increase it by passing --deadline option",
|
||||
finishedLintersN, len(linters))
|
||||
}
|
||||
|
||||
return collectIssues(ctx, processedLintResultsCh)
|
||||
}
|
||||
|
||||
func (r *SimpleRunner) processIssues(ctx context.Context, issues []result.Issue, sw *timeutils.Stopwatch) []result.Issue {
|
||||
for _, p := range r.Processors {
|
||||
var newIssues []result.Issue
|
||||
var err error
|
||||
sw.TrackStage(p.Name(), func() {
|
||||
newIssues, err = p.Process(issues)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logrus.Warnf("Can't process result by %s processor: %s", p.Name(), err)
|
||||
} else {
|
||||
issues = newIssues
|
||||
}
|
||||
|
||||
if issues == nil {
|
||||
issues = []result.Issue{}
|
||||
}
|
||||
|
74
pkg/timeutils/stopwatch.go
Normal file
74
pkg/timeutils/stopwatch.go
Normal file
@ -0,0 +1,74 @@
|
||||
package timeutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Stopwatch struct {
|
||||
name string
|
||||
startedAt time.Time
|
||||
stages map[string]time.Duration
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewStopwatch(name string) *Stopwatch {
|
||||
return &Stopwatch{
|
||||
name: name,
|
||||
startedAt: time.Now(),
|
||||
stages: map[string]time.Duration{},
|
||||
}
|
||||
}
|
||||
|
||||
type stageDuration struct {
|
||||
name string
|
||||
d time.Duration
|
||||
}
|
||||
|
||||
func (s *Stopwatch) sprintStages() string {
|
||||
stageDurations := []stageDuration{}
|
||||
for n, d := range s.stages {
|
||||
stageDurations = append(stageDurations, stageDuration{
|
||||
name: n,
|
||||
d: d,
|
||||
})
|
||||
}
|
||||
sort.Slice(stageDurations, func(i, j int) bool {
|
||||
return stageDurations[i].d > stageDurations[j].d
|
||||
})
|
||||
stagesStrings := []string{}
|
||||
for _, s := range stageDurations {
|
||||
stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("stages: %s", strings.Join(stagesStrings, ", "))
|
||||
}
|
||||
|
||||
func (s *Stopwatch) Print() {
|
||||
p := fmt.Sprintf("%s took %s", s.name, time.Since(s.startedAt))
|
||||
if len(s.stages) == 0 {
|
||||
logrus.Info(p)
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Infof("%s with %s", p, s.sprintStages())
|
||||
}
|
||||
|
||||
func (s *Stopwatch) PrintStages() {
|
||||
logrus.Infof("%s %s", s.name, s.sprintStages())
|
||||
}
|
||||
|
||||
func (s *Stopwatch) TrackStage(name string, f func()) {
|
||||
startedAt := time.Now()
|
||||
f()
|
||||
|
||||
s.Lock()
|
||||
s.stages[name] += time.Since(startedAt)
|
||||
s.Unlock()
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user