docs: fix grammar issues in comments (#2352)

This commit is contained in:
Oleksandr Redko 2021-11-11 06:56:36 +02:00 committed by GitHub
parent ecbb9c475f
commit 99c6516c6b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 30 additions and 30 deletions

View File

@ -257,7 +257,7 @@ const (
// and to reduce the amount of disk activity caused by using
// cache entries, used only updates the mtime if the current
// mtime is more than an hour old. This heuristic eliminates
// nearly all of the mtime updates that would otherwise happen,
// nearly all the mtime updates that would otherwise happen,
// while still keeping the mtimes useful for cache trimming.
func (c *Cache) used(file string) error {
info, err := os.Stat(file)
@ -311,7 +311,7 @@ func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
// Read all directory entries from subdir before removing
// any files, in case removing files invalidates the file offset
// in the directory scan. Also, ignore error from f.Readdirnames,
// because we don't care about reporting the error and we still
// because we don't care about reporting the error, and we still
// want to process any entries found before the error.
f, err := os.Open(subdir)
if err != nil {

View File

@ -233,7 +233,7 @@ func TestCacheTrim(t *testing.T) {
t.Fatalf("second trim did work: %q -> %q", data, data2)
}
// Fast forward and do another trim just before the 5 day cutoff.
// Fast-forward and do another trim just before the 5-day cutoff.
// Note that because of usedQuantum the cutoff is actually 5 days + 1 hour.
// We used c.Get(id) just now, so 5 days later it should still be kept.
// On the other hand almost a full day has gone by since we wrote dummyID(2)

View File

@ -26,7 +26,7 @@ const (
)
// Cache is a per-package data cache. A cached data is invalidated when
// package or it's dependencies change.
// package, or it's dependencies change.
type Cache struct {
lowLevelCache *cache.Cache
pkgHashes sync.Map

View File

@ -53,7 +53,7 @@ func retry(f func() (err error, mayRetry bool)) error {
// rename is like os.Rename, but retries ephemeral errors.
//
// On windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
// On Windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
// MOVEFILE_REPLACE_EXISTING.
//
// Windows also provides a different system call, ReplaceFile,

View File

@ -243,7 +243,7 @@ func (e *Executor) initRunConfiguration(cmd *cobra.Command) {
func (e *Executor) getConfigForCommandLine() (*config.Config, error) {
// We use another pflag.FlagSet here to not set `changed` flag
// on cmd.Flags() options. Otherwise string slice options will be duplicated.
// on cmd.Flags() options. Otherwise, string slice options will be duplicated.
fs := pflag.NewFlagSet("config flag set", pflag.ContinueOnError)
var cfg config.Config
@ -259,7 +259,7 @@ func (e *Executor) getConfigForCommandLine() (*config.Config, error) {
// cfg vs e.cfg.
initRootFlagSet(fs, &cfg, true)
fs.Usage = func() {} // otherwise help text will be printed twice
fs.Usage = func() {} // otherwise, help text will be printed twice
if err := fs.Parse(os.Args); err != nil {
if err == pflag.ErrHelp {
return nil, err

View File

@ -185,7 +185,7 @@ func (r *runner) prepareAnalysis(pkgs []*packages.Package,
// and analysis-to-analysis (horizontal) dependencies.
// This place is memory-intensive: e.g. Istio project has 120k total actions.
// Therefore optimize it carefully.
// Therefore, optimize it carefully.
markedActions := make(map[actKey]struct{}, len(analyzers)*len(pkgs))
for _, a := range analyzers {
for _, pkg := range pkgs {

View File

@ -179,8 +179,8 @@ func (act *action) analyze() {
if act.pkg.IllTyped {
// It looks like there should be !pass.Analyzer.RunDespiteErrors
// but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here
// but it exit before it if packages.Load have failed.
// but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here,
// but it exits before it if packages.Load have failed.
act.err = errors.Wrap(&IllTypedError{Pkg: act.pkg}, "analysis skipped")
} else {
startedAt = time.Now()

View File

@ -61,7 +61,7 @@ func (lp *loadingPackage) analyze(loadMode LoadMode, loadSem chan struct{}) {
if err := lp.loadWithFacts(loadMode); err != nil {
werr := errors.Wrapf(err, "failed to load package %s", lp.pkg.Name)
// Don't need to write error to errCh, it will be extracted and reported on another layer.
// Unblock depending actions and propagate error.
// Unblock depending on actions and propagate error.
for _, act := range lp.actions {
close(act.analysisDoneCh)
act.err = werr
@ -269,16 +269,16 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error
// Load package from export data
if loadMode >= LoadModeTypesInfo {
if err := lp.loadFromExportData(); err != nil {
// We asked Go to give us up to date export data, yet
// We asked Go to give us up-to-date export data, yet
// we can't load it. There must be something wrong.
//
// Attempt loading from source. This should fail (because
// otherwise there would be export data); we just want to
// get the compile errors. If loading from source succeeds
// we discard the result, anyway. Otherwise we'll fail
// we discard the result, anyway. Otherwise, we'll fail
// when trying to reload from export data later.
// Otherwise it panics because uses already existing (from exported data) types.
// Otherwise, it panics because uses already existing (from exported data) types.
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
if srcErr := lp.loadFromSource(loadMode); srcErr != nil {
return srcErr
@ -311,7 +311,7 @@ func (lp *loadingPackage) loadImportedPackageWithFacts(loadMode LoadMode) error
// Cached facts loading failed: analyze later the action from source. To perform
// the analysis we need to load the package from source code.
// Otherwise it panics because uses already existing (from exported data) types.
// Otherwise, it panics because uses already existing (from exported data) types.
if loadMode >= LoadModeTypesInfo {
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
}

View File

@ -10,9 +10,9 @@ import (
func NewGochecknoglobals() *goanalysis.Linter {
gochecknoglobals := checknoglobals.Analyzer()
// gochecknoglobals only lints test files if the `-t` flag is passed so we
// gochecknoglobals only lints test files if the `-t` flag is passed, so we
// pass the `t` flag as true to the analyzer before running it. This can be
// turned of by using the regular golangci-lint flags such as `--tests` or
// turned off by using the regular golangci-lint flags such as `--tests` or
// `--skip-files`.
linterConfig := map[string]map[string]interface{}{
gochecknoglobals.Name: {

View File

@ -51,11 +51,11 @@ func getLLLIssuesForFile(filename string, maxLineLen int, tabSpaces string) ([]r
// we can return this line as a long line instead of returning an error.
// The reason for this change is that this case might happen with autogenerated files
// The go-bindata tool for instance might generate a file with a very long line.
// In this case, as it's a auto generated file, the warning returned by lll will
// In this case, as it's an auto generated file, the warning returned by lll will
// be ignored.
// But if we return a linter error here, and this error happens for an autogenerated
// file the error will be discarded (fine), but all the subsequent errors for lll will
// be discarded for other files and we'll miss legit error.
// be discarded for other files, and we'll miss legit error.
res = append(res, result.Issue{
Pos: token.Position{
Filename: filename,

View File

@ -199,7 +199,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
position: pos,
}
// check for, report and eliminate leading spaces so we can check for other issues
// check for, report and eliminate leading spaces, so we can check for other issues
if len(leadingSpace) > 0 {
removeWhitespace := &result.Replacement{
Inline: &result.InlineFix{
@ -281,7 +281,7 @@ func (l Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
if (l.needs&NeedsExplanation) != 0 && (explanation == "" || strings.TrimSpace(explanation) == "//") {
needsExplanation := len(linters) == 0 // if no linters are mentioned, we must have explanation
// otherwise, check if we are excluding all of the mentioned linters
// otherwise, check if we are excluding all the mentioned linters
for _, ll := range linters {
if !l.excludeByLinter[ll] { // if a linter does require explanation
needsExplanation = true

View File

@ -146,7 +146,7 @@ func reviveToIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue {
}
// This function mimics the GetConfig function of revive.
// This allow to get default values and right types.
// This allows to get default values and right types.
// https://github.com/golangci/golangci-lint/issues/1745
// https://github.com/mgechev/revive/blob/389ba853b0b3587f0c3b71b5f0c61ea4e23928ec/config/config.go#L155
func getReviveConfig(cfg *config.ReviveSettings) (*lint.Config, error) {

View File

@ -84,7 +84,7 @@ func (cl *ContextLoader) buildArgs() []string {
if strings.HasPrefix(arg, ".") || filepath.IsAbs(arg) {
retArgs = append(retArgs, arg)
} else {
// go/packages doesn't work well if we don't have prefix ./ for local packages
// go/packages doesn't work well if we don't have the prefix ./ for local packages
retArgs = append(retArgs, fmt.Sprintf(".%c%s", filepath.Separator, arg))
}
}

View File

@ -123,7 +123,7 @@ func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context,
// which affects to the next analysis.
// To avoid this issue, we clear type information from the packages.
// See https://github.com/golangci/golangci-lint/pull/944.
// Currently DoesChangeTypes is true only for `unused`.
// Currently, DoesChangeTypes is true only for `unused`.
lintCtx.ClearTypesInPackages()
}

View File

@ -17,11 +17,11 @@ const (
// Debug messages, write to debug logs only by logutils.Debug.
LogLevelDebug LogLevel = 0
// Information messages, don't write too much messages,
// Information messages, don't write too many messages,
// only useful ones: they are shown when running with -v.
LogLevelInfo LogLevel = 1
// Hidden errors: non critical errors: work can be continued, no need to fail whole program;
// Hidden errors: non-critical errors: work can be continued, no need to fail whole program;
// tests will crash if any warning occurred.
LogLevelWarn LogLevel = 2

View File

@ -42,7 +42,7 @@ func ExtractErrors(pkg *packages.Package) []packages.Error {
continue
}
// change pos to local file to properly process it by processors (properly read line etc)
// change pos to local file to properly process it by processors (properly read line etc.)
uniqErrors[i].Msg = fmt.Sprintf("%s: %s", uniqErrors[i].Pos, uniqErrors[i].Msg)
uniqErrors[i].Pos = fmt.Sprintf("%s:1", pkg.GoFiles[0])
}
@ -65,7 +65,7 @@ func extractErrorsImpl(pkg *packages.Package, seenPackages map[*packages.Package
}
seenPackages[pkg] = true
if !pkg.IllTyped { // otherwise it may take hours to traverse all deps many times
if !pkg.IllTyped { // otherwise, it may take hours to traverse all deps many times
return nil
}

View File

@ -46,7 +46,7 @@ func (i *ignoredRange) doesMatch(issue *result.Issue) bool {
}
// handle possible unused nolint directives
// nolintlint generates potential issues for every nolint directive and they are filtered out here
// nolintlint generates potential issues for every nolint directive, and they are filtered out here
if issue.FromLinter == golinters.NolintlintName && issue.ExpectNoLint {
if issue.ExpectedNoLintLinter != "" {
return i.matchedIssueFromLinter[issue.ExpectedNoLintLinter]

View File

@ -9,7 +9,7 @@ import (
)
// Base propose of this functionality to sort results (issues)
// produced by various linters by analyzing code. We achieving this
// produced by various linters by analyzing code. We're achieving this
// by sorting results.Issues using processor step, and chain based
// rules that can compare different properties of the Issues struct.