aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/golangci
diff options
context:
space:
mode:
authorTaras Madan <tarasmadan@google.com>2024-11-11 11:41:38 +0100
committerTaras Madan <tarasmadan@google.com>2024-11-11 11:10:48 +0000
commit27e76fae2ee2d84dc7db63af1d9ed7358ba35b7a (patch)
treeed19c0e35e272b3c4cc5a2f2c595e035b2428337 /vendor/github.com/golangci
parent621e84e063b0e15b23e17780338627c509e1b9e8 (diff)
vendor: update
Diffstat (limited to 'vendor/github.com/golangci')
-rw-r--r--vendor/github.com/golangci/go-printf-func-name/LICENSE22
-rw-r--r--vendor/github.com/golangci/go-printf-func-name/pkg/analyzer/analyzer.go74
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/cache/cache.go631
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/cache/readme.md18
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/LICENSE27
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go663
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/cache_gcil.go12
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/default.go (renamed from vendor/github.com/golangci/golangci-lint/internal/cache/default.go)57
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/default_gcil.go6
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/hash.go (renamed from vendor/github.com/golangci/golangci-lint/internal/cache/hash.go)24
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/hash_gcil.go5
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/prog.go428
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/cache/readme.md51
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap.go31
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_other.go21
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_unix.go36
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_windows.go41
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/mmap/readme.md15
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/quoted/quoted.go129
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/quoted/readme.md13
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/robustio/readme.md (renamed from vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md)5
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio.go (renamed from vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go)0
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_darwin.go (renamed from vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go)0
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_flaky.go (renamed from vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go)0
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_other.go (renamed from vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go)0
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_windows.go (renamed from vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go)0
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go229
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md10
-rw-r--r--vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go93
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go1
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/commands/run.go5
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/config/config.go30
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go27
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/config/loader.go17
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go12
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go321
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go127
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go370
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_facts.go125
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go18
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go160
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go172
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go2
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go5
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go10
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go2
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go2
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname/goprintffuncname.go2
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/iface/iface.go57
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/staticcheck_common.go11
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/mnd/mnd.go21
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret/nakedret.go2
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go11
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go (renamed from vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery/execinquery.go)6
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go12
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv/tenv.go2
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/lint/context.go6
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go2
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go4
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go45
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go28
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go9
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go12
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go7
-rw-r--r--vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go22
65 files changed, 2735 insertions, 1571 deletions
diff --git a/vendor/github.com/golangci/go-printf-func-name/LICENSE b/vendor/github.com/golangci/go-printf-func-name/LICENSE
new file mode 100644
index 000000000..4585140d1
--- /dev/null
+++ b/vendor/github.com/golangci/go-printf-func-name/LICENSE
@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2024 Golangci-lint authors
+Copyright (c) 2020 Isaev Denis
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/golangci/go-printf-func-name/pkg/analyzer/analyzer.go b/vendor/github.com/golangci/go-printf-func-name/pkg/analyzer/analyzer.go
new file mode 100644
index 000000000..bce4b242e
--- /dev/null
+++ b/vendor/github.com/golangci/go-printf-func-name/pkg/analyzer/analyzer.go
@@ -0,0 +1,74 @@
+package analyzer
+
+import (
+ "go/ast"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/inspect"
+ "golang.org/x/tools/go/ast/inspector"
+)
+
+var Analyzer = &analysis.Analyzer{
+ Name: "goprintffuncname",
+ Doc: "Checks that printf-like functions are named with `f` at the end.",
+ Run: run,
+ Requires: []*analysis.Analyzer{inspect.Analyzer},
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ insp := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
+
+ nodeFilter := []ast.Node{
+ (*ast.FuncDecl)(nil),
+ }
+
+ insp.Preorder(nodeFilter, func(node ast.Node) {
+ funcDecl := node.(*ast.FuncDecl)
+
+ if res := funcDecl.Type.Results; res != nil && len(res.List) != 0 {
+ return
+ }
+
+ params := funcDecl.Type.Params.List
+ if len(params) < 2 { // [0] must be format (string), [1] must be args (...interface{})
+ return
+ }
+
+ formatParamType, ok := params[len(params)-2].Type.(*ast.Ident)
+ if !ok { // first param type isn't identificator so it can't be of type "string"
+ return
+ }
+
+ if formatParamType.Name != "string" { // first param (format) type is not string
+ return
+ }
+
+ if formatParamNames := params[len(params)-2].Names; len(formatParamNames) == 0 || formatParamNames[len(formatParamNames)-1].Name != "format" {
+ return
+ }
+
+ argsParamType, ok := params[len(params)-1].Type.(*ast.Ellipsis)
+ if !ok { // args are not ellipsis (...args)
+ return
+ }
+
+ elementType, ok := argsParamType.Elt.(*ast.InterfaceType)
+ if !ok { // args are not of interface type, but we need interface{}
+ return
+ }
+
+ if elementType.Methods != nil && len(elementType.Methods.List) != 0 {
+ return // has >= 1 method in interface, but we need an empty interface "interface{}"
+ }
+
+ if strings.HasSuffix(funcDecl.Name.Name, "f") {
+ return
+ }
+
+ pass.Reportf(node.Pos(), "printf-like formatting function '%s' should be named '%sf'",
+ funcDecl.Name.Name, funcDecl.Name.Name)
+ })
+
+ return nil, nil
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go
index 299fd5279..c249084e1 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go
@@ -1,525 +1,298 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cache implements a build artifact cache.
-//
-// This package is a slightly modified fork of Go's
-// cmd/go/internal/cache package.
package cache
import (
"bytes"
- "crypto/sha256"
+ "encoding/gob"
"encoding/hex"
"errors"
"fmt"
- "io"
- "os"
- "path/filepath"
- "strconv"
+ "runtime"
+ "slices"
"strings"
- "time"
+ "sync"
- "github.com/golangci/golangci-lint/internal/renameio"
- "github.com/golangci/golangci-lint/internal/robustio"
+ "golang.org/x/exp/maps"
+ "golang.org/x/tools/go/packages"
+
+ "github.com/golangci/golangci-lint/internal/go/cache"
+ "github.com/golangci/golangci-lint/pkg/logutils"
+ "github.com/golangci/golangci-lint/pkg/timeutils"
+)
+
+type HashMode int
+
+const (
+ HashModeNeedOnlySelf HashMode = iota
+ HashModeNeedDirectDeps
+ HashModeNeedAllDeps
)
-// An ActionID is a cache action key, the hash of a complete description of a
-// repeatable computation (command line, environment variables,
-// input file contents, executable contents).
-type ActionID [HashSize]byte
+var ErrMissing = errors.New("missing data")
-// An OutputID is a cache output key, the hash of an output of a computation.
-type OutputID [HashSize]byte
+type hashResults map[HashMode]string
-// A Cache is a package cache, backed by a file system directory tree.
+// Cache is a per-package data cache.
+// A cached data is invalidated when package,
+// or it's dependencies change.
type Cache struct {
- dir string
- now func() time.Time
+ lowLevelCache cache.Cache
+ pkgHashes sync.Map
+ sw *timeutils.Stopwatch
+ log logutils.Log
+ ioSem chan struct{} // semaphore limiting parallel IO
+}
+
+func NewCache(sw *timeutils.Stopwatch, log logutils.Log) (*Cache, error) {
+ return &Cache{
+ lowLevelCache: cache.Default(),
+ sw: sw,
+ log: log,
+ ioSem: make(chan struct{}, runtime.GOMAXPROCS(-1)),
+ }, nil
}
-// Open opens and returns the cache in the given directory.
-//
-// It is safe for multiple processes on a single machine to use the
-// same cache directory in a local file system simultaneously.
-// They will coordinate using operating system file locks and may
-// duplicate effort but will not corrupt the cache.
-//
-// However, it is NOT safe for multiple processes on different machines
-// to share a cache directory (for example, if the directory were stored
-// in a network file system). File locking is notoriously unreliable in
-// network file systems and may not suffice to protect the cache.
-func Open(dir string) (*Cache, error) {
- info, err := os.Stat(dir)
+func (c *Cache) Close() {
+ err := c.sw.TrackStageErr("close", c.lowLevelCache.Close)
if err != nil {
- return nil, err
+ c.log.Errorf("cache close: %v", err)
}
- if !info.IsDir() {
- return nil, &os.PathError{Op: "open", Path: dir, Err: errors.New("not a directory")}
- }
- for i := 0; i < 256; i++ {
- name := filepath.Join(dir, fmt.Sprintf("%02x", i))
- if err := os.MkdirAll(name, 0744); err != nil {
- return nil, err
- }
- }
- c := &Cache{
- dir: dir,
- now: time.Now,
- }
- return c, nil
}
-// fileName returns the name of the file corresponding to the given id.
-func (c *Cache) fileName(id [HashSize]byte, key string) string {
- return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
-}
+func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data any) error {
+ buf, err := c.encode(data)
+ if err != nil {
+ return err
+ }
-var errMissing = errors.New("cache entry not found")
+ actionID, err := c.buildKey(pkg, mode, key)
+ if err != nil {
+ return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err)
+ }
+
+ err = c.putBytes(actionID, buf)
+ if err != nil {
+ return fmt.Errorf("failed to save data to low-level cache by key %s for package %s: %w", key, pkg.Name, err)
+ }
-func IsErrMissing(err error) bool {
- return errors.Is(err, errMissing)
+ return nil
}
-const (
- // action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
- hexSize = HashSize * 2
- entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
-)
+func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data any) error {
+ actionID, err := c.buildKey(pkg, mode, key)
+ if err != nil {
+ return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err)
+ }
-// verify controls whether to run the cache in verify mode.
-// In verify mode, the cache always returns errMissing from Get
-// but then double-checks in Put that the data being written
-// exactly matches any existing entry. This provides an easy
-// way to detect program behavior that would have been different
-// had the cache entry been returned from Get.
-//
-// verify is enabled by setting the environment variable
-// GODEBUG=gocacheverify=1.
-var verify = false
-
-// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
-var DebugTest = false
-
-func init() { initEnv() }
-
-func initEnv() {
- verify = false
- debugHash = false
- debug := strings.Split(os.Getenv("GODEBUG"), ",")
- for _, f := range debug {
- if f == "gocacheverify=1" {
- verify = true
- }
- if f == "gocachehash=1" {
- debugHash = true
- }
- if f == "gocachetest=1" {
- DebugTest = true
+ cachedData, err := c.getBytes(actionID)
+ if err != nil {
+ if cache.IsErrMissing(err) {
+ return ErrMissing
}
+ return fmt.Errorf("failed to get data from low-level cache by key %s for package %s: %w", key, pkg.Name, err)
}
-}
-// Get looks up the action ID in the cache,
-// returning the corresponding output ID and file size, if any.
-// Note that finding an output ID does not guarantee that the
-// saved file for that output ID is still available.
-func (c *Cache) Get(id ActionID) (Entry, error) {
- if verify {
- return Entry{}, errMissing
- }
- return c.get(id)
+ return c.decode(cachedData, data)
}
-type Entry struct {
- OutputID OutputID
- Size int64
- Time time.Time
+func (c *Cache) buildKey(pkg *packages.Package, mode HashMode, key string) (cache.ActionID, error) {
+ return timeutils.TrackStage(c.sw, "key build", func() (cache.ActionID, error) {
+ actionID, err := c.pkgActionID(pkg, mode)
+ if err != nil {
+ return actionID, err
+ }
+
+ subkey, subkeyErr := cache.Subkey(actionID, key)
+ if subkeyErr != nil {
+ return actionID, fmt.Errorf("failed to build subkey: %w", subkeyErr)
+ }
+
+ return subkey, nil
+ })
}
-// get is Get but does not respect verify mode, so that Put can use it.
-func (c *Cache) get(id ActionID) (Entry, error) {
- missing := func() (Entry, error) {
- return Entry{}, errMissing
- }
- failed := func(err error) (Entry, error) {
- return Entry{}, err
- }
- fileName := c.fileName(id, "a")
- f, err := os.Open(fileName)
+func (c *Cache) pkgActionID(pkg *packages.Package, mode HashMode) (cache.ActionID, error) {
+ hash, err := c.packageHash(pkg, mode)
if err != nil {
- if os.IsNotExist(err) {
- return missing()
- }
- return failed(err)
- }
- defer f.Close()
- entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
- if n, readErr := io.ReadFull(f, entry); n != entrySize || readErr != io.ErrUnexpectedEOF {
- return failed(fmt.Errorf("read %d/%d bytes from %s with error %w", n, entrySize, fileName, readErr))
- }
- if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
- return failed(fmt.Errorf("bad data in %s", fileName))
- }
- eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
- eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
- esize, entry := entry[1:1+20], entry[1+20:]
- etime := entry[1 : 1+20]
- var buf [HashSize]byte
- if _, err = hex.Decode(buf[:], eid); err != nil || buf != id {
- return failed(fmt.Errorf("failed to hex decode eid data in %s: %w", fileName, err))
- }
- if _, err = hex.Decode(buf[:], eout); err != nil {
- return failed(fmt.Errorf("failed to hex decode eout data in %s: %w", fileName, err))
- }
- i := 0
- for i < len(esize) && esize[i] == ' ' {
- i++
- }
- size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
- if err != nil || size < 0 {
- return failed(fmt.Errorf("failed to parse esize int from %s with error %w", fileName, err))
- }
- i = 0
- for i < len(etime) && etime[i] == ' ' {
- i++
- }
- tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
- if err != nil || tm < 0 {
- return failed(fmt.Errorf("failed to parse etime int from %s with error %w", fileName, err))
+ return cache.ActionID{}, fmt.Errorf("failed to get package hash: %w", err)
}
- if err = c.used(fileName); err != nil {
- return failed(fmt.Errorf("failed to mark %s as used: %w", fileName, err))
+ key, err := cache.NewHash("action ID")
+ if err != nil {
+ return cache.ActionID{}, fmt.Errorf("failed to make a hash: %w", err)
}
- return Entry{buf, size, time.Unix(0, tm)}, nil
+ fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
+ fmt.Fprintf(key, "pkghash %s\n", hash)
+
+ return key.Sum(), nil
}
-// GetBytes looks up the action ID in the cache and returns
-// the corresponding output bytes.
-// GetBytes should only be used for data that can be expected to fit in memory.
-func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
- entry, err := c.Get(id)
- if err != nil {
- return nil, entry, err
- }
- outputFile, err := c.OutputFile(entry.OutputID)
- if err != nil {
- return nil, entry, err
+func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error) {
+ results, found := c.pkgHashes.Load(pkg)
+ if found {
+ hashRes := results.(hashResults)
+ if result, ok := hashRes[mode]; ok {
+ return result, nil
+ }
+
+ return "", fmt.Errorf("no mode %d in hash result", mode)
}
- data, err := robustio.ReadFile(outputFile)
+ hashRes, err := c.computePkgHash(pkg)
if err != nil {
- return nil, entry, err
+ return "", err
}
- if sha256.Sum256(data) != entry.OutputID {
- return nil, entry, errMissing
+ result, found := hashRes[mode]
+ if !found {
+ return "", fmt.Errorf("invalid mode %d", mode)
}
- return data, entry, nil
+
+ c.pkgHashes.Store(pkg, hashRes)
+
+ return result, nil
}
-// OutputFile returns the name of the cache file storing output with the given OutputID.
-func (c *Cache) OutputFile(out OutputID) (string, error) {
- file := c.fileName(out, "d")
- if err := c.used(file); err != nil {
- return "", err
+// computePkgHash computes a package's hash.
+// The hash is based on all Go files that make up the package,
+// as well as the hashes of imported packages.
+func (c *Cache) computePkgHash(pkg *packages.Package) (hashResults, error) {
+ key, err := cache.NewHash("package hash")
+ if err != nil {
+ return nil, fmt.Errorf("failed to make a hash: %w", err)
}
- return file, nil
-}
-// Time constants for cache expiration.
-//
-// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
-// to avoid causing many unnecessary inode updates. The mtimes therefore
-// roughly reflect "time of last use" but may in fact be older by at most an hour.
-//
-// We scan the cache for entries to delete at most once per trimInterval (1 day).
-//
-// When we do scan the cache, we delete entries that have not been used for
-// at least trimLimit (5 days). Statistics gathered from a month of usage by
-// Go developers found that essentially all reuse of cached entries happened
-// within 5 days of the previous reuse. See golang.org/issue/22990.
-const (
- mtimeInterval = 1 * time.Hour
- trimInterval = 24 * time.Hour
- trimLimit = 5 * 24 * time.Hour
-)
+ hashRes := hashResults{}
-// used makes a best-effort attempt to update mtime on file,
-// so that mtime reflects cache access time.
-//
-// Because the reflection only needs to be approximate,
-// and to reduce the amount of disk activity caused by using
-// cache entries, used only updates the mtime if the current
-// mtime is more than an hour old. This heuristic eliminates
-// nearly all the mtime updates that would otherwise happen,
-// while still keeping the mtimes useful for cache trimming.
-func (c *Cache) used(file string) error {
- info, err := os.Stat(file)
- if err != nil {
- if os.IsNotExist(err) {
- return errMissing
+ fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
+
+ for _, f := range pkg.CompiledGoFiles {
+ h, fErr := c.fileHash(f)
+ if fErr != nil {
+ return nil, fmt.Errorf("failed to calculate file %s hash: %w", f, fErr)
}
- return fmt.Errorf("failed to stat file %s: %w", file, err)
- }
- if c.now().Sub(info.ModTime()) < mtimeInterval {
- return nil
+ fmt.Fprintf(key, "file %s %x\n", f, h)
}
- if err := os.Chtimes(file, c.now(), c.now()); err != nil {
- return fmt.Errorf("failed to change time of file %s: %w", file, err)
- }
+ curSum := key.Sum()
+ hashRes[HashModeNeedOnlySelf] = hex.EncodeToString(curSum[:])
- return nil
-}
+ imps := maps.Values(pkg.Imports)
-// Trim removes old cache entries that are likely not to be reused.
-func (c *Cache) Trim() {
- now := c.now()
-
- // We maintain in dir/trim.txt the time of the last completed cache trim.
- // If the cache has been trimmed recently enough, do nothing.
- // This is the common case.
- data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt"))
- t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
- if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval {
- return
- }
+ slices.SortFunc(imps, func(a, b *packages.Package) int {
+ return strings.Compare(a.PkgPath, b.PkgPath)
+ })
- // Trim each of the 256 subdirectories.
- // We subtract an additional mtimeInterval
- // to account for the imprecision of our "last used" mtimes.
- cutoff := now.Add(-trimLimit - mtimeInterval)
- for i := 0; i < 256; i++ {
- subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
- c.trimSubdir(subdir, cutoff)
+ if err := c.computeDepsHash(HashModeNeedOnlySelf, imps, key); err != nil {
+ return nil, err
}
- // Ignore errors from here: if we don't write the complete timestamp, the
- // cache will appear older than it is, and we'll trim it again next time.
- _ = renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666)
-}
+ curSum = key.Sum()
+ hashRes[HashModeNeedDirectDeps] = hex.EncodeToString(curSum[:])
-// trimSubdir trims a single cache subdirectory.
-func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
- // Read all directory entries from subdir before removing
- // any files, in case removing files invalidates the file offset
- // in the directory scan. Also, ignore error from f.Readdirnames,
- // because we don't care about reporting the error, and we still
- // want to process any entries found before the error.
- f, err := os.Open(subdir)
- if err != nil {
- return
+ if err := c.computeDepsHash(HashModeNeedAllDeps, imps, key); err != nil {
+ return nil, err
}
- names, _ := f.Readdirnames(-1)
- f.Close()
- for _, name := range names {
- // Remove only cache entries (xxxx-a and xxxx-d).
- if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
+ curSum = key.Sum()
+ hashRes[HashModeNeedAllDeps] = hex.EncodeToString(curSum[:])
+
+ return hashRes, nil
+}
+
+func (c *Cache) computeDepsHash(depMode HashMode, imps []*packages.Package, key *cache.Hash) error {
+ for _, dep := range imps {
+ if dep.PkgPath == "unsafe" {
continue
}
- entry := filepath.Join(subdir, name)
- info, err := os.Stat(entry)
- if err == nil && info.ModTime().Before(cutoff) {
- os.Remove(entry)
+
+ depHash, err := c.packageHash(dep, depMode)
+ if err != nil {
+ return fmt.Errorf("failed to calculate hash for dependency %s with mode %d: %w", dep.Name, depMode, err)
}
+
+ fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash)
}
+
+ return nil
}
-// putIndexEntry adds an entry to the cache recording that executing the action
-// with the given id produces an output with the given output id (hash) and size.
-func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
- // Note: We expect that for one reason or another it may happen
- // that repeating an action produces a different output hash
- // (for example, if the output contains a time stamp or temp dir name).
- // While not ideal, this is also not a correctness problem, so we
- // don't make a big deal about it. In particular, we leave the action
- // cache entries writable specifically so that they can be overwritten.
- //
- // Setting GODEBUG=gocacheverify=1 does make a big deal:
- // in verify mode we are double-checking that the cache entries
- // are entirely reproducible. As just noted, this may be unrealistic
- // in some cases but the check is also useful for shaking out real bugs.
- entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
-
- if verify && allowVerify {
- old, err := c.get(id)
- if err == nil && (old.OutputID != out || old.Size != size) {
- // panic to show stack trace, so we can see what code is generating this cache entry.
- msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
- panic(msg)
- }
- }
- file := c.fileName(id, "a")
+func (c *Cache) putBytes(actionID cache.ActionID, buf *bytes.Buffer) error {
+ c.ioSem <- struct{}{}
+
+ err := c.sw.TrackStageErr("cache io", func() error {
+ return cache.PutBytes(c.lowLevelCache, actionID, buf.Bytes())
+ })
+
+ <-c.ioSem
- // Copy file to cache directory.
- mode := os.O_WRONLY | os.O_CREATE
- f, err := os.OpenFile(file, mode, 0666)
- if err != nil {
- return err
- }
- _, err = f.WriteString(entry)
- if err == nil {
- // Truncate the file only *after* writing it.
- // (This should be a no-op, but truncate just in case of previous corruption.)
- //
- // This differs from os.WriteFile, which truncates to 0 *before* writing
- // via os.O_TRUNC. Truncating only after writing ensures that a second write
- // of the same content to the same file is idempotent, and does not — even
- // temporarily! — undo the effect of the first write.
- err = f.Truncate(int64(len(entry)))
- }
- if closeErr := f.Close(); err == nil {
- err = closeErr
- }
if err != nil {
- // TODO(bcmills): This Remove potentially races with another go command writing to file.
- // Can we eliminate it?
- os.Remove(file)
return err
}
- if err = os.Chtimes(file, c.now(), c.now()); err != nil { // mainly for tests
- return fmt.Errorf("failed to change time of file %s: %w", file, err)
- }
return nil
}
-// Put stores the given output in the cache as the output for the action ID.
-// It may read file twice. The content of file must not change between the two passes.
-func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
- return c.put(id, file, true)
-}
+func (c *Cache) getBytes(actionID cache.ActionID) ([]byte, error) {
+ c.ioSem <- struct{}{}
-// PutNoVerify is like Put but disables the verify check
-// when GODEBUG=goverifycache=1 is set.
-// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
-// like test output containing times and the like.
-func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
- return c.put(id, file, false)
-}
+ cachedData, err := timeutils.TrackStage(c.sw, "cache io", func() ([]byte, error) {
+ b, _, errGB := cache.GetBytes(c.lowLevelCache, actionID)
+ return b, errGB
+ })
-func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
- // Compute output ID.
- h := sha256.New()
- if _, err := file.Seek(0, 0); err != nil {
- return OutputID{}, 0, err
- }
- size, err := io.Copy(h, file)
- if err != nil {
- return OutputID{}, 0, err
- }
- var out OutputID
- h.Sum(out[:0])
+ <-c.ioSem
- // Copy to cached output file (if not already present).
- if err := c.copyFile(file, out, size); err != nil {
- return out, size, err
+ if err != nil {
+ return nil, err
}
- // Add to cache index.
- return out, size, c.putIndexEntry(id, out, size, allowVerify)
+ return cachedData, nil
}
-// PutBytes stores the given bytes in the cache as the output for the action ID.
-func (c *Cache) PutBytes(id ActionID, data []byte) error {
- _, _, err := c.Put(id, bytes.NewReader(data))
- return err
-}
+func (c *Cache) fileHash(f string) ([cache.HashSize]byte, error) {
+ c.ioSem <- struct{}{}
-// copyFile copies file into the cache, expecting it to have the given
-// output ID and size, if that file is not present already.
-func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
- name := c.fileName(out, "d")
- info, err := os.Stat(name)
- if err == nil && info.Size() == size {
- // Check hash.
- if f, openErr := os.Open(name); openErr == nil {
- h := sha256.New()
- if _, copyErr := io.Copy(h, f); copyErr != nil {
- return fmt.Errorf("failed to copy to sha256: %w", copyErr)
- }
-
- f.Close()
- var out2 OutputID
- h.Sum(out2[:0])
- if out == out2 {
- return nil
- }
- }
- // Hash did not match. Fall through and rewrite file.
- }
+ h, err := cache.FileHash(f)
+
+ <-c.ioSem
- // Copy file to cache directory.
- mode := os.O_RDWR | os.O_CREATE
- if err == nil && info.Size() > size { // shouldn't happen but fix in case
- mode |= os.O_TRUNC
- }
- f, err := os.OpenFile(name, mode, 0666)
if err != nil {
- return err
- }
- defer f.Close()
- if size == 0 {
- // File now exists with correct size.
- // Only one possible zero-length file, so contents are OK too.
- // Early return here makes sure there's a "last byte" for code below.
- return nil
+ return [cache.HashSize]byte{}, err
}
- // From here on, if any of the I/O writing the file fails,
- // we make a best-effort attempt to truncate the file f
- // before returning, to avoid leaving bad bytes in the file.
+ return h, nil
+}
- // Copy file to f, but also into h to double-check hash.
- if _, err = file.Seek(0, 0); err != nil {
- _ = f.Truncate(0)
- return err
- }
- h := sha256.New()
- w := io.MultiWriter(f, h)
- if _, err = io.CopyN(w, file, size-1); err != nil {
- _ = f.Truncate(0)
- return err
- }
- // Check last byte before writing it; writing it will make the size match
- // what other processes expect to find and might cause them to start
- // using the file.
- buf := make([]byte, 1)
- if _, err = file.Read(buf); err != nil {
- _ = f.Truncate(0)
- return err
- }
- if n, wErr := h.Write(buf); n != len(buf) {
- return fmt.Errorf("wrote to hash %d/%d bytes with error %w", n, len(buf), wErr)
+func (c *Cache) encode(data any) (*bytes.Buffer, error) {
+ buf := &bytes.Buffer{}
+ err := c.sw.TrackStageErr("gob", func() error {
+ return gob.NewEncoder(buf).Encode(data)
+ })
+ if err != nil {
+ return nil, fmt.Errorf("failed to gob encode: %w", err)
}
- sum := h.Sum(nil)
- if !bytes.Equal(sum, out[:]) {
- _ = f.Truncate(0)
- return errors.New("file content changed underfoot")
- }
+ return buf, nil
+}
- // Commit cache file entry.
- if _, err = f.Write(buf); err != nil {
- _ = f.Truncate(0)
- return err
- }
- if err = f.Close(); err != nil {
- // Data might not have been written,
- // but file may look like it is the right size.
- // To be extra careful, remove cached file.
- os.Remove(name)
- return err
- }
- if err = os.Chtimes(name, c.now(), c.now()); err != nil { // mainly for tests
- return fmt.Errorf("failed to change time of file %s: %w", name, err)
+func (c *Cache) decode(b []byte, data any) error {
+ err := c.sw.TrackStageErr("gob", func() error {
+ return gob.NewDecoder(bytes.NewReader(b)).Decode(data)
+ })
+ if err != nil {
+ return fmt.Errorf("failed to gob decode: %w", err)
}
return nil
}
+
+func SetSalt(b *bytes.Buffer) {
+ cache.SetSalt(b.Bytes())
+}
+
+func DefaultDir() string {
+ cacheDir, _ := cache.DefaultDir()
+ return cacheDir
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md b/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md
deleted file mode 100644
index b469711ed..000000000
--- a/vendor/github.com/golangci/golangci-lint/internal/cache/readme.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# cache
-
-Extracted from go/src/cmd/go/internal/cache/
-I don't know what version of Go this package was pulled from.
-
-Adapted for golangci-lint:
-- https://github.com/golangci/golangci-lint/pull/699
-- https://github.com/golangci/golangci-lint/pull/779
-- https://github.com/golangci/golangci-lint/pull/788
-- https://github.com/golangci/golangci-lint/pull/808
-- https://github.com/golangci/golangci-lint/pull/1063
-- https://github.com/golangci/golangci-lint/pull/1070
-- https://github.com/golangci/golangci-lint/pull/1162
-- https://github.com/golangci/golangci-lint/pull/2318
-- https://github.com/golangci/golangci-lint/pull/2352
-- https://github.com/golangci/golangci-lint/pull/3012
-- https://github.com/golangci/golangci-lint/pull/3096
-- https://github.com/golangci/golangci-lint/pull/3204
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/LICENSE b/vendor/github.com/golangci/golangci-lint/internal/go/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go
new file mode 100644
index 000000000..7bf4f1d66
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache.go
@@ -0,0 +1,663 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cache implements a build artifact cache.
+//
+// This package is a slightly modified fork of Go's
+// cmd/go/internal/cache package.
+package cache
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golangci/golangci-lint/internal/go/mmap"
+ "github.com/golangci/golangci-lint/internal/go/robustio"
+ "github.com/rogpeppe/go-internal/lockedfile"
+)
+
+// An ActionID is a cache action key, the hash of a complete description of a
+// repeatable computation (command line, environment variables,
+// input file contents, executable contents).
+type ActionID [HashSize]byte
+
+// An OutputID is a cache output key, the hash of an output of a computation.
+type OutputID [HashSize]byte
+
+// Cache is the interface as used by the cmd/go.
+type Cache interface {
+ // Get returns the cache entry for the provided ActionID.
+ // On miss, the error type should be of type *entryNotFoundError.
+ //
+ // After a success call to Get, OutputFile(Entry.OutputID) must
+ // exist on disk for until Close is called (at the end of the process).
+ Get(ActionID) (Entry, error)
+
+ // Put adds an item to the cache.
+ //
+ // The seeker is only used to seek to the beginning. After a call to Put,
+ // the seek position is not guaranteed to be in any particular state.
+ //
+ // As a special case, if the ReadSeeker is of type noVerifyReadSeeker,
+ // the verification from GODEBUG=goverifycache=1 is skipped.
+ //
+ // After a success call to Get, OutputFile(Entry.OutputID) must
+ // exist on disk for until Close is called (at the end of the process).
+ Put(ActionID, io.ReadSeeker) (_ OutputID, size int64, _ error)
+
+ // Close is called at the end of the go process. Implementations can do
+ // cache cleanup work at this phase, or wait for and report any errors from
+ // background cleanup work started earlier. Any cache trimming should in one
+ // process should not violate cause the invariants of this interface to be
+ // violated in another process. Namely, a cache trim from one process should
+ // not delete an ObjectID from disk that was recently Get or Put from
+ // another process. As a rule of thumb, don't trim things used in the last
+ // day.
+ Close() error
+
+ // OutputFile returns the path on disk where OutputID is stored.
+ //
+ // It's only called after a successful get or put call so it doesn't need
+ // to return an error; it's assumed that if the previous get or put succeeded,
+ // it's already on disk.
+ OutputFile(OutputID) string
+
+ // FuzzDir returns where fuzz files are stored.
+ FuzzDir() string
+}
+
+// A Cache is a package cache, backed by a file system directory tree.
+type DiskCache struct {
+ dir string
+ now func() time.Time
+}
+
+// Open opens and returns the cache in the given directory.
+//
+// It is safe for multiple processes on a single machine to use the
+// same cache directory in a local file system simultaneously.
+// They will coordinate using operating system file locks and may
+// duplicate effort but will not corrupt the cache.
+//
+// However, it is NOT safe for multiple processes on different machines
+// to share a cache directory (for example, if the directory were stored
+// in a network file system). File locking is notoriously unreliable in
+// network file systems and may not suffice to protect the cache.
+func Open(dir string) (*DiskCache, error) {
+ info, err := os.Stat(dir)
+ if err != nil {
+ return nil, err
+ }
+ if !info.IsDir() {
+ return nil, &fs.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
+ }
+ for i := 0; i < 256; i++ {
+ name := filepath.Join(dir, fmt.Sprintf("%02x", i))
+ if err := os.MkdirAll(name, 0744); err != nil {
+ return nil, err
+ }
+ }
+ c := &DiskCache{
+ dir: dir,
+ now: time.Now,
+ }
+ return c, nil
+}
+
+// fileName returns the name of the file corresponding to the given id.
+func (c *DiskCache) fileName(id [HashSize]byte, key string) string {
+ return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
+}
+
+// An entryNotFoundError indicates that a cache entry was not found, with an
+// optional underlying reason.
+type entryNotFoundError struct {
+ Err error
+}
+
+func (e *entryNotFoundError) Error() string {
+ if e.Err == nil {
+ return "cache entry not found"
+ }
+ return fmt.Sprintf("cache entry not found: %v", e.Err)
+}
+
+func (e *entryNotFoundError) Unwrap() error {
+ return e.Err
+}
+
+const (
+ // action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n"
+ hexSize = HashSize * 2
+ entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
+)
+
+// verify controls whether to run the cache in verify mode.
+// In verify mode, the cache always returns errMissing from Get
+// but then double-checks in Put that the data being written
+// exactly matches any existing entry. This provides an easy
+// way to detect program behavior that would have been different
+// had the cache entry been returned from Get.
+//
+// verify is enabled by setting the environment variable
+// GODEBUG=gocacheverify=1.
+var verify = false
+
+var errVerifyMode = errors.New("gocacheverify=1")
+
+// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
+var DebugTest = false
+
+// func init() { initEnv() }
+
+// var (
+// gocacheverify = godebug.New("gocacheverify")
+// gocachehash = godebug.New("gocachehash")
+// gocachetest = godebug.New("gocachetest")
+// )
+
+// func initEnv() {
+// if gocacheverify.Value() == "1" {
+// gocacheverify.IncNonDefault()
+// verify = true
+// }
+// if gocachehash.Value() == "1" {
+// gocachehash.IncNonDefault()
+// debugHash = true
+// }
+// if gocachetest.Value() == "1" {
+// gocachetest.IncNonDefault()
+// DebugTest = true
+// }
+// }
+
+// Get looks up the action ID in the cache,
+// returning the corresponding output ID and file size, if any.
+// Note that finding an output ID does not guarantee that the
+// saved file for that output ID is still available.
+func (c *DiskCache) Get(id ActionID) (Entry, error) {
+ if verify {
+ return Entry{}, &entryNotFoundError{Err: errVerifyMode}
+ }
+ return c.get(id)
+}
+
+type Entry struct {
+ OutputID OutputID
+ Size int64
+ Time time.Time // when added to cache
+}
+
+// get is Get but does not respect verify mode, so that Put can use it.
+func (c *DiskCache) get(id ActionID) (Entry, error) {
+ missing := func(reason error) (Entry, error) {
+ return Entry{}, &entryNotFoundError{Err: reason}
+ }
+ f, err := os.Open(c.fileName(id, "a"))
+ if err != nil {
+ return missing(err)
+ }
+ defer f.Close()
+ entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
+ if n, err := io.ReadFull(f, entry); n > entrySize {
+ return missing(errors.New("too long"))
+ } else if err != io.ErrUnexpectedEOF {
+ if err == io.EOF {
+ return missing(errors.New("file is empty"))
+ }
+ return missing(err)
+ } else if n < entrySize {
+ return missing(errors.New("entry file incomplete"))
+ }
+ if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
+ return missing(errors.New("invalid header"))
+ }
+ eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
+ eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
+ esize, entry := entry[1:1+20], entry[1+20:]
+ etime, entry := entry[1:1+20], entry[1+20:]
+ var buf [HashSize]byte
+ if _, err := hex.Decode(buf[:], eid); err != nil {
+ return missing(fmt.Errorf("decoding ID: %v", err))
+ } else if buf != id {
+ return missing(errors.New("mismatched ID"))
+ }
+ if _, err := hex.Decode(buf[:], eout); err != nil {
+ return missing(fmt.Errorf("decoding output ID: %v", err))
+ }
+ i := 0
+ for i < len(esize) && esize[i] == ' ' {
+ i++
+ }
+ size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
+ if err != nil {
+ return missing(fmt.Errorf("parsing size: %v", err))
+ } else if size < 0 {
+ return missing(errors.New("negative size"))
+ }
+ i = 0
+ for i < len(etime) && etime[i] == ' ' {
+ i++
+ }
+ tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
+ if err != nil {
+ return missing(fmt.Errorf("parsing timestamp: %v", err))
+ } else if tm < 0 {
+ return missing(errors.New("negative timestamp"))
+ }
+
+ err = c.used(c.fileName(id, "a"))
+ if err != nil {
+ return Entry{}, fmt.Errorf("failed to mark %s as used: %w", c.fileName(id, "a"), err)
+ }
+
+ return Entry{buf, size, time.Unix(0, tm)}, nil
+}
+
+// GetFile looks up the action ID in the cache and returns
+// the name of the corresponding data file.
+func GetFile(c Cache, id ActionID) (file string, entry Entry, err error) {
+ entry, err = c.Get(id)
+ if err != nil {
+ return "", Entry{}, err
+ }
+ file = c.OutputFile(entry.OutputID)
+ info, err := os.Stat(file)
+ if err != nil {
+ return "", Entry{}, &entryNotFoundError{Err: err}
+ }
+ if info.Size() != entry.Size {
+ return "", Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")}
+ }
+ return file, entry, nil
+}
+
+// GetBytes looks up the action ID in the cache and returns
+// the corresponding output bytes.
+// GetBytes should only be used for data that can be expected to fit in memory.
+func GetBytes(c Cache, id ActionID) ([]byte, Entry, error) {
+ entry, err := c.Get(id)
+ if err != nil {
+ return nil, entry, err
+ }
+ data, err := robustio.ReadFile(c.OutputFile(entry.OutputID))
+ if err != nil {
+ return nil, entry, err
+ }
+ if sha256.Sum256(data) != entry.OutputID {
+ return nil, entry, &entryNotFoundError{Err: errors.New("bad checksum")}
+ }
+ return data, entry, nil
+}
+
+// GetMmap looks up the action ID in the cache and returns
+// the corresponding output bytes.
+// GetMmap should only be used for data that can be expected to fit in memory.
+func GetMmap(c Cache, id ActionID) ([]byte, Entry, error) {
+ entry, err := c.Get(id)
+ if err != nil {
+ return nil, entry, err
+ }
+ md, err := mmap.Mmap(c.OutputFile(entry.OutputID))
+ if err != nil {
+ return nil, Entry{}, err
+ }
+ if int64(len(md.Data)) != entry.Size {
+ return nil, Entry{}, &entryNotFoundError{Err: errors.New("file incomplete")}
+ }
+ return md.Data, entry, nil
+}
+
+// OutputFile returns the name of the cache file storing output with the given OutputID.
+func (c *DiskCache) OutputFile(out OutputID) string {
+ file := c.fileName(out, "d")
+ c.used(file)
+ return file
+}
+
+// Time constants for cache expiration.
+//
+// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
+// to avoid causing many unnecessary inode updates. The mtimes therefore
+// roughly reflect "time of last use" but may in fact be older by at most an hour.
+//
+// We scan the cache for entries to delete at most once per trimInterval (1 day).
+//
+// When we do scan the cache, we delete entries that have not been used for
+// at least trimLimit (5 days). Statistics gathered from a month of usage by
+// Go developers found that essentially all reuse of cached entries happened
+// within 5 days of the previous reuse. See golang.org/issue/22990.
+const (
+ mtimeInterval = 1 * time.Hour
+ trimInterval = 24 * time.Hour
+ trimLimit = 5 * 24 * time.Hour
+)
+
+// used makes a best-effort attempt to update mtime on file,
+// so that mtime reflects cache access time.
+//
+// Because the reflection only needs to be approximate,
+// and to reduce the amount of disk activity caused by using
+// cache entries, used only updates the mtime if the current
+// mtime is more than an hour old. This heuristic eliminates
+// nearly all of the mtime updates that would otherwise happen,
+// while still keeping the mtimes useful for cache trimming.
+func (c *DiskCache) used(file string) error {
+ info, err := os.Stat(file)
+ if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
+ return nil
+ }
+
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &entryNotFoundError{Err: err}
+ }
+ return &entryNotFoundError{Err: fmt.Errorf("failed to stat file %s: %w", file, err)}
+ }
+
+ err = os.Chtimes(file, c.now(), c.now())
+ if err != nil {
+ return fmt.Errorf("failed to change time of file %s: %w", file, err)
+ }
+
+ return nil
+}
+
+func (c *DiskCache) Close() error { return c.Trim() }
+
+// Trim removes old cache entries that are likely not to be reused.
+func (c *DiskCache) Trim() error {
+ now := c.now()
+
+ // We maintain in dir/trim.txt the time of the last completed cache trim.
+ // If the cache has been trimmed recently enough, do nothing.
+ // This is the common case.
+ // If the trim file is corrupt, detected if the file can't be parsed, or the
+ // trim time is too far in the future, attempt the trim anyway. It's possible that
+ // the cache was full when the corruption happened. Attempting a trim on
+ // an empty cache is cheap, so there wouldn't be a big performance hit in that case.
+ if data, err := lockedfile.Read(filepath.Join(c.dir, "trim.txt")); err == nil {
+ if t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64); err == nil {
+ lastTrim := time.Unix(t, 0)
+ if d := now.Sub(lastTrim); d < trimInterval && d > -mtimeInterval {
+ return nil
+ }
+ }
+ }
+
+ // Trim each of the 256 subdirectories.
+ // We subtract an additional mtimeInterval
+ // to account for the imprecision of our "last used" mtimes.
+ cutoff := now.Add(-trimLimit - mtimeInterval)
+ for i := 0; i < 256; i++ {
+ subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
+ c.trimSubdir(subdir, cutoff)
+ }
+
+ // Ignore errors from here: if we don't write the complete timestamp, the
+ // cache will appear older than it is, and we'll trim it again next time.
+ var b bytes.Buffer
+ fmt.Fprintf(&b, "%d", now.Unix())
+ if err := lockedfile.Write(filepath.Join(c.dir, "trim.txt"), &b, 0666); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// trimSubdir trims a single cache subdirectory.
+func (c *DiskCache) trimSubdir(subdir string, cutoff time.Time) {
+ // Read all directory entries from subdir before removing
+ // any files, in case removing files invalidates the file offset
+ // in the directory scan. Also, ignore error from f.Readdirnames,
+ // because we don't care about reporting the error and we still
+ // want to process any entries found before the error.
+ f, err := os.Open(subdir)
+ if err != nil {
+ return
+ }
+ names, _ := f.Readdirnames(-1)
+ f.Close()
+
+ for _, name := range names {
+ // Remove only cache entries (xxxx-a and xxxx-d).
+ if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
+ continue
+ }
+ entry := filepath.Join(subdir, name)
+ info, err := os.Stat(entry)
+ if err == nil && info.ModTime().Before(cutoff) {
+ os.Remove(entry)
+ }
+ }
+}
+
+// putIndexEntry adds an entry to the cache recording that executing the action
+// with the given id produces an output with the given output id (hash) and size.
+func (c *DiskCache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
+ // Note: We expect that for one reason or another it may happen
+ // that repeating an action produces a different output hash
+ // (for example, if the output contains a time stamp or temp dir name).
+ // While not ideal, this is also not a correctness problem, so we
+ // don't make a big deal about it. In particular, we leave the action
+ // cache entries writable specifically so that they can be overwritten.
+ //
+ // Setting GODEBUG=gocacheverify=1 does make a big deal:
+ // in verify mode we are double-checking that the cache entries
+ // are entirely reproducible. As just noted, this may be unrealistic
+ // in some cases but the check is also useful for shaking out real bugs.
+ entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
+ if verify && allowVerify {
+ old, err := c.get(id)
+ if err == nil && (old.OutputID != out || old.Size != size) {
+ // panic to show stack trace, so we can see what code is generating this cache entry.
+ msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
+ panic(msg)
+ }
+ }
+ file := c.fileName(id, "a")
+
+ // Copy file to cache directory.
+ mode := os.O_WRONLY | os.O_CREATE
+ f, err := os.OpenFile(file, mode, 0666)
+ if err != nil {
+ return err
+ }
+ _, err = f.WriteString(entry)
+ if err == nil {
+ // Truncate the file only *after* writing it.
+ // (This should be a no-op, but truncate just in case of previous corruption.)
+ //
+ // This differs from os.WriteFile, which truncates to 0 *before* writing
+ // via os.O_TRUNC. Truncating only after writing ensures that a second write
+ // of the same content to the same file is idempotent, and does not — even
+ // temporarily! — undo the effect of the first write.
+ err = f.Truncate(int64(len(entry)))
+ }
+ if closeErr := f.Close(); err == nil {
+ err = closeErr
+ }
+ if err != nil {
+ // TODO(bcmills): This Remove potentially races with another go command writing to file.
+ // Can we eliminate it?
+ os.Remove(file)
+ return err
+ }
+ err = os.Chtimes(file, c.now(), c.now()) // mainly for tests
+ if err != nil {
+ return fmt.Errorf("failed to change time of file %s: %w", file, err)
+ }
+
+ return nil
+}
+
+// noVerifyReadSeeker is an io.ReadSeeker wrapper sentinel type
+// that says that Cache.Put should skip the verify check
+// (from GODEBUG=goverifycache=1).
+type noVerifyReadSeeker struct {
+ io.ReadSeeker
+}
+
+// Put stores the given output in the cache as the output for the action ID.
+// It may read file twice. The content of file must not change between the two passes.
+func (c *DiskCache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
+ wrapper, isNoVerify := file.(noVerifyReadSeeker)
+ if isNoVerify {
+ file = wrapper.ReadSeeker
+ }
+ return c.put(id, file, !isNoVerify)
+}
+
+// PutNoVerify is like Put but disables the verify check
+// when GODEBUG=goverifycache=1 is set.
+// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
+// like test output containing times and the like.
+func PutNoVerify(c Cache, id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
+ return c.Put(id, noVerifyReadSeeker{file})
+}
+
+func (c *DiskCache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
+ // Compute output ID.
+ h := sha256.New()
+ if _, err := file.Seek(0, 0); err != nil {
+ return OutputID{}, 0, err
+ }
+ size, err := io.Copy(h, file)
+ if err != nil {
+ return OutputID{}, 0, err
+ }
+ var out OutputID
+ h.Sum(out[:0])
+
+ // Copy to cached output file (if not already present).
+ if err := c.copyFile(file, out, size); err != nil {
+ return out, size, err
+ }
+
+ // Add to cache index.
+ return out, size, c.putIndexEntry(id, out, size, allowVerify)
+}
+
+// PutBytes stores the given bytes in the cache as the output for the action ID.
+func PutBytes(c Cache, id ActionID, data []byte) error {
+ _, _, err := c.Put(id, bytes.NewReader(data))
+ return err
+}
+
+// copyFile copies file into the cache, expecting it to have the given
+// output ID and size, if that file is not present already.
+func (c *DiskCache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
+ name := c.fileName(out, "d")
+ info, err := os.Stat(name)
+ if err == nil && info.Size() == size {
+ // Check hash.
+ if f, err := os.Open(name); err == nil {
+ h := sha256.New()
+ _, copyErr := io.Copy(h, f)
+ if copyErr != nil {
+ return fmt.Errorf("failed to copy to sha256: %w", copyErr)
+ }
+
+ f.Close()
+ var out2 OutputID
+ h.Sum(out2[:0])
+ if out == out2 {
+ return nil
+ }
+ }
+ // Hash did not match. Fall through and rewrite file.
+ }
+
+ // Copy file to cache directory.
+ mode := os.O_RDWR | os.O_CREATE
+ if err == nil && info.Size() > size { // shouldn't happen but fix in case
+ mode |= os.O_TRUNC
+ }
+ f, err := os.OpenFile(name, mode, 0666)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if size == 0 {
+ // File now exists with correct size.
+ // Only one possible zero-length file, so contents are OK too.
+ // Early return here makes sure there's a "last byte" for code below.
+ return nil
+ }
+
+ // From here on, if any of the I/O writing the file fails,
+ // we make a best-effort attempt to truncate the file f
+ // before returning, to avoid leaving bad bytes in the file.
+
+ // Copy file to f, but also into h to double-check hash.
+ if _, err := file.Seek(0, 0); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ h := sha256.New()
+ w := io.MultiWriter(f, h)
+ if _, err := io.CopyN(w, file, size-1); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ // Check last byte before writing it; writing it will make the size match
+ // what other processes expect to find and might cause them to start
+ // using the file.
+ buf := make([]byte, 1)
+ if _, err := file.Read(buf); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ n, wErr := h.Write(buf)
+ if n != len(buf) {
+ return fmt.Errorf("wrote to hash %d/%d bytes with error %w", n, len(buf), wErr)
+ }
+
+ sum := h.Sum(nil)
+ if !bytes.Equal(sum, out[:]) {
+ f.Truncate(0)
+ return fmt.Errorf("file content changed underfoot")
+ }
+
+ // Commit cache file entry.
+ if _, err := f.Write(buf); err != nil {
+ f.Truncate(0)
+ return err
+ }
+ if err := f.Close(); err != nil {
+ // Data might not have been written,
+ // but file may look like it is the right size.
+ // To be extra careful, remove cached file.
+ os.Remove(name)
+ return err
+ }
+ err = os.Chtimes(name, c.now(), c.now()) // mainly for tests
+ if err != nil {
+ return fmt.Errorf("failed to change time of file %s: %w", name, err)
+ }
+
+ return nil
+}
+
+// FuzzDir returns a subdirectory within the cache for storing fuzzing data.
+// The subdirectory may not exist.
+//
+// This directory is managed by the internal/fuzz package. Files in this
+// directory aren't removed by the 'go clean -cache' command or by Trim.
+// They may be removed with 'go clean -fuzzcache'.
+//
+// TODO(#48526): make Trim remove unused files from this directory.
+func (c *DiskCache) FuzzDir() string {
+ return filepath.Join(c.dir, "fuzz")
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache_gcil.go b/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache_gcil.go
new file mode 100644
index 000000000..b4f07738e
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/cache_gcil.go
@@ -0,0 +1,12 @@
+package cache
+
+import (
+ "errors"
+)
+
+// IsErrMissing allows to access to the internal error.
+// TODO(ldez) the handling of this error inside runner_action.go should be refactored.
+func IsErrMissing(err error) bool {
+ var errENF *entryNotFoundError
+ return errors.As(err, &errENF)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go b/vendor/github.com/golangci/golangci-lint/internal/go/cache/default.go
index 399cc84cf..7232f1ef3 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/default.go
@@ -6,23 +6,22 @@ package cache
import (
"fmt"
- "log"
+ base "log"
"os"
"path/filepath"
"sync"
)
-const envGolangciLintCache = "GOLANGCI_LINT_CACHE"
-
// Default returns the default cache to use.
-func Default() (*Cache, error) {
+// It never returns nil.
+func Default() Cache {
defaultOnce.Do(initDefaultCache)
- return defaultCache, defaultDirErr
+ return defaultCache
}
var (
defaultOnce sync.Once
- defaultCache *Cache
+ defaultCache Cache
)
// cacheREADME is a message stored in a README in the cache directory.
@@ -34,32 +33,46 @@ const cacheREADME = `This directory holds cached build artifacts from golangci-l
// initDefaultCache does the work of finding the default cache
// the first time Default is called.
func initDefaultCache() {
- dir := DefaultDir()
+ dir, _ := DefaultDir()
+ if dir == "off" {
+ if defaultDirErr != nil {
+ base.Fatalf("build cache is required, but could not be located: %v", defaultDirErr)
+ }
+ base.Fatalf("build cache is disabled by %s=off, but required", envGolangciLintCache)
+ }
if err := os.MkdirAll(dir, 0744); err != nil {
- log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
+ base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
}
if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
// Best effort.
if wErr := os.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil {
- log.Fatalf("Failed to write README file to cache dir %s: %s", dir, err)
+ base.Fatalf("Failed to write README file to cache dir %s: %s", dir, err)
}
}
- c, err := Open(dir)
+ diskCache, err := Open(dir)
if err != nil {
- log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
+ base.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
+ }
+
+ if v := os.Getenv(envGolangciLintCacheProg); v != "" {
+ defaultCache = startCacheProg(v, diskCache)
+ } else {
+ defaultCache = diskCache
}
- defaultCache = c
}
var (
- defaultDirOnce sync.Once
- defaultDir string
- defaultDirErr error
+ defaultDirOnce sync.Once
+ defaultDir string
+ defaultDirChanged bool // effective value differs from $GOLANGCI_LINT_CACHE
+ defaultDirErr error
)
// DefaultDir returns the effective GOLANGCI_LINT_CACHE setting.
-func DefaultDir() string {
+// It returns "off" if the cache is disabled,
+// and reports whether the effective value differs from GOLANGCI_LINT_CACHE.
+func DefaultDir() (string, bool) {
// Save the result of the first call to DefaultDir for later use in
// initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
// subprocesses will inherit it, but that means initDefaultCache can't
@@ -67,10 +80,12 @@ func DefaultDir() string {
defaultDirOnce.Do(func() {
defaultDir = os.Getenv(envGolangciLintCache)
- if filepath.IsAbs(defaultDir) {
- return
- }
if defaultDir != "" {
+ defaultDirChanged = true
+ if filepath.IsAbs(defaultDir) || defaultDir == "off" {
+ return
+ }
+ defaultDir = "off"
defaultDirErr = fmt.Errorf("%s is not an absolute path", envGolangciLintCache)
return
}
@@ -78,11 +93,13 @@ func DefaultDir() string {
// Compute default location.
dir, err := os.UserCacheDir()
if err != nil {
+ defaultDir = "off"
+ defaultDirChanged = true
defaultDirErr = fmt.Errorf("%s is not defined and %w", envGolangciLintCache, err)
return
}
defaultDir = filepath.Join(dir, "golangci-lint")
})
- return defaultDir
+ return defaultDir, defaultDirChanged
}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/cache/default_gcil.go b/vendor/github.com/golangci/golangci-lint/internal/go/cache/default_gcil.go
new file mode 100644
index 000000000..a801f67f4
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/default_gcil.go
@@ -0,0 +1,6 @@
+package cache
+
+const (
+ envGolangciLintCache = "GOLANGCI_LINT_CACHE"
+ envGolangciLintCacheProg = "GOLANGCI_LINT_CACHEPROG"
+)
diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go b/vendor/github.com/golangci/golangci-lint/internal/go/cache/hash.go
index 4ce79e325..d5169dd4c 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/hash.go
@@ -11,6 +11,7 @@ import (
"hash"
"io"
"os"
+ "strings"
"sync"
)
@@ -36,22 +37,26 @@ type Hash struct {
// which are still addressed by unsalted SHA256.
var hashSalt []byte
-func SetSalt(b []byte) {
- hashSalt = b
+// stripExperiment strips any GOEXPERIMENT configuration from the Go
+// version string.
+func stripExperiment(version string) string {
+ if i := strings.Index(version, " X:"); i >= 0 {
+ return version[:i]
+ }
+ return version
}
// Subkey returns an action ID corresponding to mixing a parent
// action ID with a string description of the subkey.
func Subkey(parent ActionID, desc string) (ActionID, error) {
h := sha256.New()
- const subkeyPrefix = "subkey:"
- if n, err := h.Write([]byte(subkeyPrefix)); n != len(subkeyPrefix) {
- return ActionID{}, fmt.Errorf("wrote %d/%d bytes of subkey prefix with error %s", n, len(subkeyPrefix), err)
- }
- if n, err := h.Write(parent[:]); n != len(parent) {
+ h.Write([]byte(("subkey:")))
+ n, err := h.Write(parent[:])
+ if n != len(parent) {
return ActionID{}, fmt.Errorf("wrote %d/%d bytes of parent with error %s", n, len(parent), err)
}
- if n, err := h.Write([]byte(desc)); n != len(desc) {
+ n, err = h.Write([]byte(desc))
+ if n != len(desc) {
return ActionID{}, fmt.Errorf("wrote %d/%d bytes of desc with error %s", n, len(desc), err)
}
@@ -75,7 +80,8 @@ func NewHash(name string) (*Hash, error) {
if debugHash {
fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
}
- if n, err := h.Write(hashSalt); n != len(hashSalt) {
+ n, err := h.Write(hashSalt)
+ if n != len(hashSalt) {
return nil, fmt.Errorf("wrote %d/%d bytes of hash salt with error %s", n, len(hashSalt), err)
}
if verify {
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/cache/hash_gcil.go b/vendor/github.com/golangci/golangci-lint/internal/go/cache/hash_gcil.go
new file mode 100644
index 000000000..08749036b
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/hash_gcil.go
@@ -0,0 +1,5 @@
+package cache
+
+func SetSalt(b []byte) {
+ hashSalt = b
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/cache/prog.go b/vendor/github.com/golangci/golangci-lint/internal/go/cache/prog.go
new file mode 100644
index 000000000..a93740a3c
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/prog.go
@@ -0,0 +1,428 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cache
+
+import (
+ "bufio"
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ base "log"
+ "os"
+ "os/exec"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golangci/golangci-lint/internal/go/quoted"
+)
+
+// ProgCache implements Cache via JSON messages over stdin/stdout to a child
+// helper process which can then implement whatever caching policy/mechanism it
+// wants.
+//
+// See https://github.com/golang/go/issues/59719
+type ProgCache struct {
+ cmd *exec.Cmd
+ stdout io.ReadCloser // from the child process
+ stdin io.WriteCloser // to the child process
+ bw *bufio.Writer // to stdin
+ jenc *json.Encoder // to bw
+
+ // can are the commands that the child process declared that it supports.
+ // This is effectively the versioning mechanism.
+ can map[ProgCmd]bool
+
+ // fuzzDirCache is another Cache implementation to use for the FuzzDir
+ // method. In practice this is the default GOCACHE disk-based
+ // implementation.
+ //
+ // TODO(bradfitz): maybe this isn't ideal. But we'd need to extend the Cache
+ // interface and the fuzzing callers to be less disk-y to do more here.
+ fuzzDirCache Cache
+
+ closing atomic.Bool
+ ctx context.Context // valid until Close via ctxClose
+ ctxCancel context.CancelFunc // called on Close
+ readLoopDone chan struct{} // closed when readLoop returns
+
+ mu sync.Mutex // guards following fields
+ nextID int64
+ inFlight map[int64]chan<- *ProgResponse
+ outputFile map[OutputID]string // object => abs path on disk
+
+ // writeMu serializes writing to the child process.
+ // It must never be held at the same time as mu.
+ writeMu sync.Mutex
+}
+
+// ProgCmd is a command that can be issued to a child process.
+//
+// If the interface needs to grow, we can add new commands or new versioned
+// commands like "get2".
+type ProgCmd string
+
+const (
+ cmdGet = ProgCmd("get")
+ cmdPut = ProgCmd("put")
+ cmdClose = ProgCmd("close")
+)
+
+// ProgRequest is the JSON-encoded message that's sent from cmd/go to
+// the GOLANGCI_LINT_CACHEPROG child process over stdin. Each JSON object is on its
+// own line. A ProgRequest of Type "put" with BodySize > 0 will be followed
+// by a line containing a base64-encoded JSON string literal of the body.
+type ProgRequest struct {
+ // ID is a unique number per process across all requests.
+ // It must be echoed in the ProgResponse from the child.
+ ID int64
+
+ // Command is the type of request.
+ // The cmd/go tool will only send commands that were declared
+ // as supported by the child.
+ Command ProgCmd
+
+ // ActionID is non-nil for get and puts.
+ ActionID []byte `json:",omitempty"` // or nil if not used
+
+ // ObjectID is set for Type "put" and "output-file".
+ ObjectID []byte `json:",omitempty"` // or nil if not used
+
+ // Body is the body for "put" requests. It's sent after the JSON object
+ // as a base64-encoded JSON string when BodySize is non-zero.
+ // It's sent as a separate JSON value instead of being a struct field
+ // send in this JSON object so large values can be streamed in both directions.
+ // The base64 string body of a ProgRequest will always be written
+ // immediately after the JSON object and a newline.
+ Body io.Reader `json:"-"`
+
+ // BodySize is the number of bytes of Body. If zero, the body isn't written.
+ BodySize int64 `json:",omitempty"`
+}
+
+// ProgResponse is the JSON response from the child process to cmd/go.
+//
+// With the exception of the first protocol message that the child writes to its
+// stdout with ID==0 and KnownCommands populated, these are only sent in
+// response to a ProgRequest from cmd/go.
+//
+// ProgResponses can be sent in any order. The ID must match the request they're
+// replying to.
+type ProgResponse struct {
+ ID int64 // that corresponds to ProgRequest; they can be answered out of order
+ Err string `json:",omitempty"` // if non-empty, the error
+
+ // KnownCommands is included in the first message that cache helper program
+ // writes to stdout on startup (with ID==0). It includes the
+ // ProgRequest.Command types that are supported by the program.
+ //
+ // This lets us extend the protocol gracefully over time (adding "get2",
+ // etc), or fail gracefully when needed. It also lets us verify the program
+ // wants to be a cache helper.
+ KnownCommands []ProgCmd `json:",omitempty"`
+
+ // For Get requests.
+
+ Miss bool `json:",omitempty"` // cache miss
+ OutputID []byte `json:",omitempty"`
+ Size int64 `json:",omitempty"` // in bytes
+ Time *time.Time `json:",omitempty"` // an Entry.Time; when the object was added to the docs
+
+ // DiskPath is the absolute path on disk of the ObjectID corresponding
+ // a "get" request's ActionID (on cache hit) or a "put" request's
+ // provided ObjectID.
+ DiskPath string `json:",omitempty"`
+}
+
+// startCacheProg starts the prog binary (with optional space-separated flags)
+// and returns a Cache implementation that talks to it.
+//
+// It blocks a few seconds to wait for the child process to successfully start
+// and advertise its capabilities.
+func startCacheProg(progAndArgs string, fuzzDirCache Cache) Cache {
+ if fuzzDirCache == nil {
+ panic("missing fuzzDirCache")
+ }
+ args, err := quoted.Split(progAndArgs)
+ if err != nil {
+ base.Fatalf("%s args: %v", envGolangciLintCacheProg, err)
+ }
+ var prog string
+ if len(args) > 0 {
+ prog = args[0]
+ args = args[1:]
+ }
+
+ ctx, ctxCancel := context.WithCancel(context.Background())
+
+ cmd := exec.CommandContext(ctx, prog, args...)
+ out, err := cmd.StdoutPipe()
+ if err != nil {
+ base.Fatalf("StdoutPipe to %s: %v", envGolangciLintCacheProg, err)
+ }
+ in, err := cmd.StdinPipe()
+ if err != nil {
+ base.Fatalf("StdinPipe to %s: %v", envGolangciLintCacheProg, err)
+ }
+ cmd.Stderr = os.Stderr
+ cmd.Cancel = in.Close
+
+ if err := cmd.Start(); err != nil {
+ base.Fatalf("error starting %s program %q: %v", envGolangciLintCacheProg, prog, err)
+ }
+
+ pc := &ProgCache{
+ ctx: ctx,
+ ctxCancel: ctxCancel,
+ fuzzDirCache: fuzzDirCache,
+ cmd: cmd,
+ stdout: out,
+ stdin: in,
+ bw: bufio.NewWriter(in),
+ inFlight: make(map[int64]chan<- *ProgResponse),
+ outputFile: make(map[OutputID]string),
+ readLoopDone: make(chan struct{}),
+ }
+
+ // Register our interest in the initial protocol message from the child to
+ // us, saying what it can do.
+ capResc := make(chan *ProgResponse, 1)
+ pc.inFlight[0] = capResc
+
+ pc.jenc = json.NewEncoder(pc.bw)
+ go pc.readLoop(pc.readLoopDone)
+
+ // Give the child process a few seconds to report its capabilities. This
+ // should be instant and not require any slow work by the program.
+ timer := time.NewTicker(5 * time.Second)
+ defer timer.Stop()
+ for {
+ select {
+ case <-timer.C:
+ log.Printf("# still waiting for %s %v ...", envGolangciLintCacheProg, prog)
+ case capRes := <-capResc:
+ can := map[ProgCmd]bool{}
+ for _, cmd := range capRes.KnownCommands {
+ can[cmd] = true
+ }
+ if len(can) == 0 {
+ base.Fatalf("%s %v declared no supported commands", envGolangciLintCacheProg, prog)
+ }
+ pc.can = can
+ return pc
+ }
+ }
+}
+
+func (c *ProgCache) readLoop(readLoopDone chan<- struct{}) {
+ defer close(readLoopDone)
+ jd := json.NewDecoder(c.stdout)
+ for {
+ res := new(ProgResponse)
+ if err := jd.Decode(res); err != nil {
+ if c.closing.Load() {
+ return // quietly
+ }
+ if err == io.EOF {
+ c.mu.Lock()
+ inFlight := len(c.inFlight)
+ c.mu.Unlock()
+ base.Fatalf("%s exited pre-Close with %v pending requests", envGolangciLintCacheProg, inFlight)
+ }
+ base.Fatalf("error reading JSON from %s: %v", envGolangciLintCacheProg, err)
+ }
+ c.mu.Lock()
+ ch, ok := c.inFlight[res.ID]
+ delete(c.inFlight, res.ID)
+ c.mu.Unlock()
+ if ok {
+ ch <- res
+ } else {
+ base.Fatalf("%s sent response for unknown request ID %v", envGolangciLintCacheProg, res.ID)
+ }
+ }
+}
+
+func (c *ProgCache) send(ctx context.Context, req *ProgRequest) (*ProgResponse, error) {
+ resc := make(chan *ProgResponse, 1)
+ if err := c.writeToChild(req, resc); err != nil {
+ return nil, err
+ }
+ select {
+ case res := <-resc:
+ if res.Err != "" {
+ return nil, errors.New(res.Err)
+ }
+ return res, nil
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+func (c *ProgCache) writeToChild(req *ProgRequest, resc chan<- *ProgResponse) (err error) {
+ c.mu.Lock()
+ c.nextID++
+ req.ID = c.nextID
+ c.inFlight[req.ID] = resc
+ c.mu.Unlock()
+
+ defer func() {
+ if err != nil {
+ c.mu.Lock()
+ delete(c.inFlight, req.ID)
+ c.mu.Unlock()
+ }
+ }()
+
+ c.writeMu.Lock()
+ defer c.writeMu.Unlock()
+
+ if err := c.jenc.Encode(req); err != nil {
+ return err
+ }
+ if err := c.bw.WriteByte('\n'); err != nil {
+ return err
+ }
+ if req.Body != nil && req.BodySize > 0 {
+ if err := c.bw.WriteByte('"'); err != nil {
+ return err
+ }
+ e := base64.NewEncoder(base64.StdEncoding, c.bw)
+ wrote, err := io.Copy(e, req.Body)
+ if err != nil {
+ return err
+ }
+ if err := e.Close(); err != nil {
+ return nil
+ }
+ if wrote != req.BodySize {
+ return fmt.Errorf("short write writing body to %s for action %x, object %x: wrote %v; expected %v",
+ envGolangciLintCacheProg, req.ActionID, req.ObjectID, wrote, req.BodySize)
+ }
+ if _, err := c.bw.WriteString("\"\n"); err != nil {
+ return err
+ }
+ }
+ if err := c.bw.Flush(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *ProgCache) Get(a ActionID) (Entry, error) {
+ if !c.can[cmdGet] {
+ // They can't do a "get". Maybe they're a write-only cache.
+ //
+ // TODO(bradfitz,bcmills): figure out the proper error type here. Maybe
+ // errors.ErrUnsupported? Is entryNotFoundError even appropriate? There
+ // might be places where we rely on the fact that a recent Put can be
+ // read through a corresponding Get. Audit callers and check, and document
+ // error types on the Cache interface.
+ return Entry{}, &entryNotFoundError{}
+ }
+ res, err := c.send(c.ctx, &ProgRequest{
+ Command: cmdGet,
+ ActionID: a[:],
+ })
+ if err != nil {
+ return Entry{}, err // TODO(bradfitz): or entryNotFoundError? Audit callers.
+ }
+ if res.Miss {
+ return Entry{}, &entryNotFoundError{}
+ }
+ e := Entry{
+ Size: res.Size,
+ }
+ if res.Time != nil {
+ e.Time = *res.Time
+ } else {
+ e.Time = time.Now()
+ }
+ if res.DiskPath == "" {
+ return Entry{}, &entryNotFoundError{fmt.Errorf("%s didn't populate DiskPath on get hit", envGolangciLintCacheProg)}
+ }
+ if copy(e.OutputID[:], res.OutputID) != len(res.OutputID) {
+ return Entry{}, &entryNotFoundError{errors.New("incomplete ProgResponse OutputID")}
+ }
+ c.noteOutputFile(e.OutputID, res.DiskPath)
+ return e, nil
+}
+
+func (c *ProgCache) noteOutputFile(o OutputID, diskPath string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.outputFile[o] = diskPath
+}
+
+func (c *ProgCache) OutputFile(o OutputID) string {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.outputFile[o]
+}
+
+func (c *ProgCache) Put(a ActionID, file io.ReadSeeker) (_ OutputID, size int64, _ error) {
+ // Compute output ID.
+ h := sha256.New()
+ if _, err := file.Seek(0, 0); err != nil {
+ return OutputID{}, 0, err
+ }
+ size, err := io.Copy(h, file)
+ if err != nil {
+ return OutputID{}, 0, err
+ }
+ var out OutputID
+ h.Sum(out[:0])
+
+ if _, err := file.Seek(0, 0); err != nil {
+ return OutputID{}, 0, err
+ }
+
+ if !c.can[cmdPut] {
+ // Child is a read-only cache. Do nothing.
+ return out, size, nil
+ }
+
+ res, err := c.send(c.ctx, &ProgRequest{
+ Command: cmdPut,
+ ActionID: a[:],
+ ObjectID: out[:],
+ Body: file,
+ BodySize: size,
+ })
+ if err != nil {
+ return OutputID{}, 0, err
+ }
+ if res.DiskPath == "" {
+ return OutputID{}, 0, fmt.Errorf("%s didn't return DiskPath in put response", envGolangciLintCacheProg)
+ }
+ c.noteOutputFile(out, res.DiskPath)
+ return out, size, err
+}
+
+func (c *ProgCache) Close() error {
+ c.closing.Store(true)
+ var err error
+
+ // First write a "close" message to the child so it can exit nicely
+ // and clean up if it wants. Only after that exchange do we cancel
+ // the context that kills the process.
+ if c.can[cmdClose] {
+ _, err = c.send(c.ctx, &ProgRequest{Command: cmdClose})
+ }
+ c.ctxCancel()
+ <-c.readLoopDone
+ return err
+}
+
+func (c *ProgCache) FuzzDir() string {
+ // TODO(bradfitz): figure out what to do here. For now just use the
+ // disk-based default.
+ return c.fuzzDirCache.FuzzDir()
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/cache/readme.md b/vendor/github.com/golangci/golangci-lint/internal/go/cache/readme.md
new file mode 100644
index 000000000..5be600e42
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/cache/readme.md
@@ -0,0 +1,51 @@
+# cache
+
+Extracted from `go/src/cmd/go/internal/cache/`.
+
+The main modifications are:
+- The errors management
+ - Some methods return error.
+ - Some errors are returned instead of being ignored.
+- The name of the env vars:
+ - `GOCACHE` -> `GOLANGCI_LINT_CACHE`
+ - `GOCACHEPROG` -> `GOLANGCI_LINT_CACHEPROG`
+
+## History
+
+- https://github.com/golangci/golangci-lint/pull/5100
+ - Move package from `internal/cache` to `internal/go/cache`
+- https://github.com/golangci/golangci-lint/pull/5098
+ - sync with go1.23.2
+ - sync with go1.22.8
+ - sync with go1.21.13
+ - sync with go1.20.14
+ - sync with go1.19.13
+ - sync with go1.18.10
+ - sync with go1.17.13
+ - sync with go1.16.15
+ - sync with go1.15.15
+ - sync with go1.14.15
+
+## Previous History
+
+Based on the initial PR/commit the based in a mix between go1.12 and go1.13:
+- cache.go (go1.13)
+- cache_test.go (go1.12?)
+- default.go (go1.12?)
+- hash.go (go1.13 and go1.12 are identical)
+- hash_test.go -> (go1.12?)
+
+Adapted for golangci-lint:
+- https://github.com/golangci/golangci-lint/pull/699: initial code (contains modifications of the files)
+- https://github.com/golangci/golangci-lint/pull/779: just a nolint (`cache.go`)
+- https://github.com/golangci/golangci-lint/pull/788: only directory permissions changes (0777 -> 0744) (`cache.go`, `cache_test.go`, `default.go`)
+- https://github.com/golangci/golangci-lint/pull/808: mainly related to logs and errors (`cache.go`, `default.go`, `hash.go`, `hash_test.go`)
+- https://github.com/golangci/golangci-lint/pull/1063: `ioutil` -> `robustio` (`cache.go`)
+- https://github.com/golangci/golangci-lint/pull/1070: add `t.Parallel()` inside `cache_test.go`
+- https://github.com/golangci/golangci-lint/pull/1162: errors inside `cache.go`
+- https://github.com/golangci/golangci-lint/pull/2318: `ioutil` -> `os` (`cache.go`, `cache_test.go`, `default.go`, `hash_test.go`)
+- https://github.com/golangci/golangci-lint/pull/2352: Go doc typos
+- https://github.com/golangci/golangci-lint/pull/3012: errors inside `cache.go` (`cache.go`, `default.go`)
+- https://github.com/golangci/golangci-lint/pull/3196: constant for `GOLANGCI_LINT_CACHE` (`cache.go`)
+- https://github.com/golangci/golangci-lint/pull/3204: add this file and `%w` in `fmt.Errorf` (`cache.go`)
+- https://github.com/golangci/golangci-lint/pull/3604: remove `github.com/pkg/errors` (`cache.go`)
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap.go b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap.go
new file mode 100644
index 000000000..fcbd3e08c
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap.go
@@ -0,0 +1,31 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package is a lightly modified version of the mmap code
+// in github.com/google/codesearch/index.
+
+// The mmap package provides an abstraction for memory mapping files
+// on different platforms.
+package mmap
+
+import (
+ "os"
+)
+
+// Data is mmap'ed read-only data from a file.
+// The backing file is never closed, so Data
+// remains valid for the lifetime of the process.
+type Data struct {
+ f *os.File
+ Data []byte
+}
+
+// Mmap maps the given file into memory.
+func Mmap(file string) (Data, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return Data{}, err
+ }
+ return mmapFile(f)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_other.go b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_other.go
new file mode 100644
index 000000000..4d2844fc3
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_other.go
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build (js && wasm) || wasip1 || plan9
+
+package mmap
+
+import (
+ "io"
+ "os"
+)
+
+// mmapFile on other systems doesn't mmap the file. It just reads everything.
+func mmapFile(f *os.File) (Data, error) {
+ b, err := io.ReadAll(f)
+ if err != nil {
+ return Data{}, err
+ }
+ return Data{f, b}, nil
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_unix.go b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_unix.go
new file mode 100644
index 000000000..5dce87236
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_unix.go
@@ -0,0 +1,36 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package mmap
+
+import (
+ "fmt"
+ "io/fs"
+ "os"
+ "syscall"
+)
+
+func mmapFile(f *os.File) (Data, error) {
+ st, err := f.Stat()
+ if err != nil {
+ return Data{}, err
+ }
+ size := st.Size()
+ pagesize := int64(os.Getpagesize())
+ if int64(int(size+(pagesize-1))) != size+(pagesize-1) {
+ return Data{}, fmt.Errorf("%s: too large for mmap", f.Name())
+ }
+ n := int(size)
+ if n == 0 {
+ return Data{f, nil}, nil
+ }
+ mmapLength := int(((size + pagesize - 1) / pagesize) * pagesize) // round up to page size
+ data, err := syscall.Mmap(int(f.Fd()), 0, mmapLength, syscall.PROT_READ, syscall.MAP_SHARED)
+ if err != nil {
+ return Data{}, &fs.PathError{Op: "mmap", Path: f.Name(), Err: err}
+ }
+ return Data{f, data[:n]}, nil
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_windows.go b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_windows.go
new file mode 100644
index 000000000..479ee3075
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/mmap_windows.go
@@ -0,0 +1,41 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mmap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+func mmapFile(f *os.File) (Data, error) {
+ st, err := f.Stat()
+ if err != nil {
+ return Data{}, err
+ }
+ size := st.Size()
+ if size == 0 {
+ return Data{f, nil}, nil
+ }
+ h, err := syscall.CreateFileMapping(syscall.Handle(f.Fd()), nil, syscall.PAGE_READONLY, 0, 0, nil)
+ if err != nil {
+ return Data{}, fmt.Errorf("CreateFileMapping %s: %w", f.Name(), err)
+ }
+
+ addr, err := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
+ if err != nil {
+ return Data{}, fmt.Errorf("MapViewOfFile %s: %w", f.Name(), err)
+ }
+ var info windows.MemoryBasicInformation
+ err = windows.VirtualQuery(addr, &info, unsafe.Sizeof(info))
+ if err != nil {
+ return Data{}, fmt.Errorf("VirtualQuery %s: %w", f.Name(), err)
+ }
+ data := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.RegionSize))
+ return Data{f, data}, nil
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/mmap/readme.md b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/readme.md
new file mode 100644
index 000000000..f68aef097
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/mmap/readme.md
@@ -0,0 +1,15 @@
+# mmap
+
+Extracted from `go/src/cmd/go/internal/mmap/` (related to `cache`).
+This is just a copy of the Go code without any changes.
+
+## History
+
+- https://github.com/golangci/golangci-lint/pull/5100
+ - Move package from `internal/mmap` to `internal/go/mmap`
+- https://github.com/golangci/golangci-lint/pull/5098
+ - sync with go1.23.2
+ - sync with go1.22.8
+ - sync with go1.21.13
+ - sync with go1.20.14
+ - sync with go1.19.13
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/quoted/quoted.go b/vendor/github.com/golangci/golangci-lint/internal/go/quoted/quoted.go
new file mode 100644
index 000000000..a81227507
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/quoted/quoted.go
@@ -0,0 +1,129 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package quoted provides string manipulation utilities.
+package quoted
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+ "unicode"
+)
+
+func isSpaceByte(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+// Split splits s into a list of fields,
+// allowing single or double quotes around elements.
+// There is no unescaping or other processing within
+// quoted fields.
+//
+// Keep in sync with cmd/dist/quoted.go
+func Split(s string) ([]string, error) {
+ // Split fields allowing '' or "" around elements.
+ // Quotes further inside the string do not count.
+ var f []string
+ for len(s) > 0 {
+ for len(s) > 0 && isSpaceByte(s[0]) {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ break
+ }
+ // Accepted quoted string. No unescaping inside.
+ if s[0] == '"' || s[0] == '\'' {
+ quote := s[0]
+ s = s[1:]
+ i := 0
+ for i < len(s) && s[i] != quote {
+ i++
+ }
+ if i >= len(s) {
+ return nil, fmt.Errorf("unterminated %c string", quote)
+ }
+ f = append(f, s[:i])
+ s = s[i+1:]
+ continue
+ }
+ i := 0
+ for i < len(s) && !isSpaceByte(s[i]) {
+ i++
+ }
+ f = append(f, s[:i])
+ s = s[i:]
+ }
+ return f, nil
+}
+
+// Join joins a list of arguments into a string that can be parsed
+// with Split. Arguments are quoted only if necessary; arguments
+// without spaces or quotes are kept as-is. No argument may contain both
+// single and double quotes.
+func Join(args []string) (string, error) {
+ var buf []byte
+ for i, arg := range args {
+ if i > 0 {
+ buf = append(buf, ' ')
+ }
+ var sawSpace, sawSingleQuote, sawDoubleQuote bool
+ for _, c := range arg {
+ switch {
+ case c > unicode.MaxASCII:
+ continue
+ case isSpaceByte(byte(c)):
+ sawSpace = true
+ case c == '\'':
+ sawSingleQuote = true
+ case c == '"':
+ sawDoubleQuote = true
+ }
+ }
+ switch {
+ case !sawSpace && !sawSingleQuote && !sawDoubleQuote:
+ buf = append(buf, arg...)
+
+ case !sawSingleQuote:
+ buf = append(buf, '\'')
+ buf = append(buf, arg...)
+ buf = append(buf, '\'')
+
+ case !sawDoubleQuote:
+ buf = append(buf, '"')
+ buf = append(buf, arg...)
+ buf = append(buf, '"')
+
+ default:
+ return "", fmt.Errorf("argument %q contains both single and double quotes and cannot be quoted", arg)
+ }
+ }
+ return string(buf), nil
+}
+
+// A Flag parses a list of string arguments encoded with Join.
+// It is useful for flags like cmd/link's -extldflags.
+type Flag []string
+
+var _ flag.Value = (*Flag)(nil)
+
+func (f *Flag) Set(v string) error {
+ fs, err := Split(v)
+ if err != nil {
+ return err
+ }
+ *f = fs[:len(fs):len(fs)]
+ return nil
+}
+
+func (f *Flag) String() string {
+ if f == nil {
+ return ""
+ }
+ s, err := Join(*f)
+ if err != nil {
+ return strings.Join(*f, " ")
+ }
+ return s
+}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/go/quoted/readme.md b/vendor/github.com/golangci/golangci-lint/internal/go/quoted/readme.md
new file mode 100644
index 000000000..a5e4c4bb3
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/quoted/readme.md
@@ -0,0 +1,13 @@
+# quoted
+
+Extracted from `go/src/cmd/internal/quoted/` (related to `cache`).
+This is just a copy of the Go code without any changes.
+
+## History
+
+- https://github.com/golangci/golangci-lint/pull/5100
+ - Move package from `internal/quoted` to `internal/go/quoted`
+- https://github.com/golangci/golangci-lint/pull/5098
+ - sync go1.23.2
+ - sync go1.22.8
+ - sync go1.21.13
diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/readme.md
index 7c7ba0483..f4dbc1626 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/robustio/readme.md
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/readme.md
@@ -4,3 +4,8 @@ Extracted from go1.19.1/src/cmd/go/internal/robustio
There is only one modification:
- ERROR_SHARING_VIOLATION extracted from go1.19.1/src/internal/syscall/windows/syscall_windows.go to remove the dependencies to `internal/syscall/windows`
+
+## History
+
+- https://github.com/golangci/golangci-lint/pull/5100
+ - Move package from `internal/robustio` to `internal/go/robustio`
diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio.go
index 15b33773c..15b33773c 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio.go
diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_darwin.go
index 99fd8ebc2..99fd8ebc2 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_darwin.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_darwin.go
diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_flaky.go
index c56e36ca6..c56e36ca6 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_flaky.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_flaky.go
diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_other.go
index da9a46e4f..da9a46e4f 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_other.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_other.go
diff --git a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_windows.go
index fe1728954..fe1728954 100644
--- a/vendor/github.com/golangci/golangci-lint/internal/robustio/robustio_windows.go
+++ b/vendor/github.com/golangci/golangci-lint/internal/go/robustio/robustio_windows.go
diff --git a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go
deleted file mode 100644
index 3b3422eb7..000000000
--- a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package pkgcache
-
-import (
- "bytes"
- "encoding/gob"
- "encoding/hex"
- "errors"
- "fmt"
- "runtime"
- "sort"
- "sync"
-
- "golang.org/x/tools/go/packages"
-
- "github.com/golangci/golangci-lint/internal/cache"
- "github.com/golangci/golangci-lint/pkg/logutils"
- "github.com/golangci/golangci-lint/pkg/timeutils"
-)
-
-type HashMode int
-
-const (
- HashModeNeedOnlySelf HashMode = iota
- HashModeNeedDirectDeps
- HashModeNeedAllDeps
-)
-
-// Cache is a per-package data cache. A cached data is invalidated when
-// package, or it's dependencies change.
-type Cache struct {
- lowLevelCache *cache.Cache
- pkgHashes sync.Map
- sw *timeutils.Stopwatch
- log logutils.Log // not used now, but may be needed for future debugging purposes
- ioSem chan struct{} // semaphore limiting parallel IO
-}
-
-func NewCache(sw *timeutils.Stopwatch, log logutils.Log) (*Cache, error) {
- c, err := cache.Default()
- if err != nil {
- return nil, err
- }
- return &Cache{
- lowLevelCache: c,
- sw: sw,
- log: log,
- ioSem: make(chan struct{}, runtime.GOMAXPROCS(-1)),
- }, nil
-}
-
-func (c *Cache) Trim() {
- c.sw.TrackStage("trim", func() {
- c.lowLevelCache.Trim()
- })
-}
-
-func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data any) error {
- var err error
- buf := &bytes.Buffer{}
- c.sw.TrackStage("gob", func() {
- err = gob.NewEncoder(buf).Encode(data)
- })
- if err != nil {
- return fmt.Errorf("failed to gob encode: %w", err)
- }
-
- var aID cache.ActionID
-
- c.sw.TrackStage("key build", func() {
- aID, err = c.pkgActionID(pkg, mode)
- if err == nil {
- subkey, subkeyErr := cache.Subkey(aID, key)
- if subkeyErr != nil {
- err = fmt.Errorf("failed to build subkey: %w", subkeyErr)
- }
- aID = subkey
- }
- })
- if err != nil {
- return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err)
- }
- c.ioSem <- struct{}{}
- c.sw.TrackStage("cache io", func() {
- err = c.lowLevelCache.PutBytes(aID, buf.Bytes())
- })
- <-c.ioSem
- if err != nil {
- return fmt.Errorf("failed to save data to low-level cache by key %s for package %s: %w", key, pkg.Name, err)
- }
-
- return nil
-}
-
-var ErrMissing = errors.New("missing data")
-
-func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data any) error {
- var aID cache.ActionID
- var err error
- c.sw.TrackStage("key build", func() {
- aID, err = c.pkgActionID(pkg, mode)
- if err == nil {
- subkey, subkeyErr := cache.Subkey(aID, key)
- if subkeyErr != nil {
- err = fmt.Errorf("failed to build subkey: %w", subkeyErr)
- }
- aID = subkey
- }
- })
- if err != nil {
- return fmt.Errorf("failed to calculate package %s action id: %w", pkg.Name, err)
- }
-
- var b []byte
- c.ioSem <- struct{}{}
- c.sw.TrackStage("cache io", func() {
- b, _, err = c.lowLevelCache.GetBytes(aID)
- })
- <-c.ioSem
- if err != nil {
- if cache.IsErrMissing(err) {
- return ErrMissing
- }
- return fmt.Errorf("failed to get data from low-level cache by key %s for package %s: %w", key, pkg.Name, err)
- }
-
- c.sw.TrackStage("gob", func() {
- err = gob.NewDecoder(bytes.NewReader(b)).Decode(data)
- })
- if err != nil {
- return fmt.Errorf("failed to gob decode: %w", err)
- }
-
- return nil
-}
-
-func (c *Cache) pkgActionID(pkg *packages.Package, mode HashMode) (cache.ActionID, error) {
- hash, err := c.packageHash(pkg, mode)
- if err != nil {
- return cache.ActionID{}, fmt.Errorf("failed to get package hash: %w", err)
- }
-
- key, err := cache.NewHash("action ID")
- if err != nil {
- return cache.ActionID{}, fmt.Errorf("failed to make a hash: %w", err)
- }
- fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
- fmt.Fprintf(key, "pkghash %s\n", hash)
-
- return key.Sum(), nil
-}
-
-// packageHash computes a package's hash. The hash is based on all Go
-// files that make up the package, as well as the hashes of imported
-// packages.
-func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error) {
- type hashResults map[HashMode]string
- hashResI, ok := c.pkgHashes.Load(pkg)
- if ok {
- hashRes := hashResI.(hashResults)
- if _, ok := hashRes[mode]; !ok {
- return "", fmt.Errorf("no mode %d in hash result", mode)
- }
- return hashRes[mode], nil
- }
-
- hashRes := hashResults{}
-
- key, err := cache.NewHash("package hash")
- if err != nil {
- return "", fmt.Errorf("failed to make a hash: %w", err)
- }
-
- fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
- for _, f := range pkg.CompiledGoFiles {
- c.ioSem <- struct{}{}
- h, fErr := cache.FileHash(f)
- <-c.ioSem
- if fErr != nil {
- return "", fmt.Errorf("failed to calculate file %s hash: %w", f, fErr)
- }
- fmt.Fprintf(key, "file %s %x\n", f, h)
- }
- curSum := key.Sum()
- hashRes[HashModeNeedOnlySelf] = hex.EncodeToString(curSum[:])
-
- imps := make([]*packages.Package, 0, len(pkg.Imports))
- for _, imp := range pkg.Imports {
- imps = append(imps, imp)
- }
- sort.Slice(imps, func(i, j int) bool {
- return imps[i].PkgPath < imps[j].PkgPath
- })
-
- calcDepsHash := func(depMode HashMode) error {
- for _, dep := range imps {
- if dep.PkgPath == "unsafe" {
- continue
- }
-
- depHash, depErr := c.packageHash(dep, depMode)
- if depErr != nil {
- return fmt.Errorf("failed to calculate hash for dependency %s with mode %d: %w", dep.Name, depMode, depErr)
- }
-
- fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash)
- }
- return nil
- }
-
- if err := calcDepsHash(HashModeNeedOnlySelf); err != nil {
- return "", err
- }
-
- curSum = key.Sum()
- hashRes[HashModeNeedDirectDeps] = hex.EncodeToString(curSum[:])
-
- if err := calcDepsHash(HashModeNeedAllDeps); err != nil {
- return "", err
- }
- curSum = key.Sum()
- hashRes[HashModeNeedAllDeps] = hex.EncodeToString(curSum[:])
-
- if _, ok := hashRes[mode]; !ok {
- return "", fmt.Errorf("invalid mode %d", mode)
- }
-
- c.pkgHashes.Store(pkg, hashRes)
- return hashRes[mode], nil
-}
diff --git a/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md b/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md
deleted file mode 100644
index 36ec6ed49..000000000
--- a/vendor/github.com/golangci/golangci-lint/internal/renameio/readme.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# renameio
-
-Extracted from go/src/cmd/go/internal/renameio/
-I don't know what version of Go this package was pulled from.
-
-Adapted for golangci-lint:
-- https://github.com/golangci/golangci-lint/pull/699
-- https://github.com/golangci/golangci-lint/pull/808
-- https://github.com/golangci/golangci-lint/pull/1063
-- https://github.com/golangci/golangci-lint/pull/3204
diff --git a/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go b/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go
deleted file mode 100644
index 2f88f4f7c..000000000
--- a/vendor/github.com/golangci/golangci-lint/internal/renameio/renameio.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package renameio writes files atomically by renaming temporary files.
-package renameio
-
-import (
- "bytes"
- "io"
- "math/rand"
- "os"
- "path/filepath"
- "strconv"
-
- "github.com/golangci/golangci-lint/internal/robustio"
-)
-
-const patternSuffix = ".tmp"
-
-// Pattern returns a glob pattern that matches the unrenamed temporary files
-// created when writing to filename.
-func Pattern(filename string) string {
- return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
-}
-
-// WriteFile is like os.WriteFile, but first writes data to an arbitrary
-// file in the same directory as filename, then renames it atomically to the
-// final name.
-//
-// That ensures that the final location, if it exists, is always a complete file.
-func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
- return WriteToFile(filename, bytes.NewReader(data), perm)
-}
-
-// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader
-// instead of a slice.
-func WriteToFile(filename string, data io.Reader, perm os.FileMode) (err error) {
- f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm)
- if err != nil {
- return err
- }
- defer func() {
- // Only call os.Remove on f.Name() if we failed to rename it: otherwise,
- // some other process may have created a new file with the same name after
- // that.
- if err != nil {
- f.Close()
- os.Remove(f.Name())
- }
- }()
-
- if _, err := io.Copy(f, data); err != nil {
- return err
- }
- // Sync the file before renaming it: otherwise, after a crash the reader may
- // observe a 0-length file instead of the actual contents.
- // See https://golang.org/issue/22397#issuecomment-380831736.
- if err := f.Sync(); err != nil {
- return err
- }
- if err := f.Close(); err != nil {
- return err
- }
-
- return robustio.Rename(f.Name(), filename)
-}
-
-// tempFile creates a new temporary file with given permission bits.
-func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) {
- for i := 0; i < 10000; i++ {
- name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix)
- f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
- if os.IsExist(err) {
- continue
- }
- break
- }
- return
-}
-
-// ReadFile is like os.ReadFile, but on Windows retries spurious errors that
-// may occur if the file is concurrently replaced.
-//
-// Errors are classified heuristically and retries are bounded, so even this
-// function may occasionally return a spurious error on Windows.
-// If so, the error will likely wrap one of:
-// - syscall.ERROR_ACCESS_DENIED
-// - syscall.ERROR_FILE_NOT_FOUND
-// - internal/syscall/windows.ERROR_SHARING_VIOLATION
-func ReadFile(filename string) ([]byte, error) {
- return robustio.ReadFile(filename)
-}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go
index cc6c0eacd..4f2c812dc 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go
@@ -62,6 +62,7 @@ func (*cacheCommand) executeClean(_ *cobra.Command, _ []string) error {
func (*cacheCommand) executeStatus(_ *cobra.Command, _ []string) {
cacheDir := cache.DefaultDir()
+
_, _ = fmt.Fprintf(logutils.StdOut, "Dir: %s\n", cacheDir)
cacheSizeBytes, err := dirSizeBytes(cacheDir)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
index f289bfdd7..ff7c5e467 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go
@@ -28,7 +28,6 @@ import (
"gopkg.in/yaml.v3"
"github.com/golangci/golangci-lint/internal/cache"
- "github.com/golangci/golangci-lint/internal/pkgcache"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/exitcodes"
"github.com/golangci/golangci-lint/pkg/fsutils"
@@ -209,7 +208,7 @@ func (c *runCommand) preRunE(_ *cobra.Command, args []string) error {
sw := timeutils.NewStopwatch("pkgcache", c.log.Child(logutils.DebugKeyStopwatch))
- pkgCache, err := pkgcache.NewCache(sw, c.log.Child(logutils.DebugKeyPkgCache))
+ pkgCache, err := cache.NewCache(sw, c.log.Child(logutils.DebugKeyPkgCache))
if err != nil {
return fmt.Errorf("failed to build packages cache: %w", err)
}
@@ -640,7 +639,7 @@ func initHashSalt(version string, cfg *config.Config) error {
b := bytes.NewBuffer(binSalt)
b.Write(configSalt)
- cache.SetSalt(b.Bytes())
+ cache.SetSalt(b)
return nil
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
index 93b331bec..b863b329f 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go
@@ -75,10 +75,9 @@ func IsGoGreaterThanOrEqual(current, limit string) bool {
}
func detectGoVersion() string {
- file, _ := gomoddirectives.GetModuleFile()
-
- if file != nil && file.Go != nil && file.Go.Version != "" {
- return file.Go.Version
+ goVersion := detectGoVersionFromGoMod()
+ if goVersion != "" {
+ return goVersion
}
v := os.Getenv("GOVERSION")
@@ -88,3 +87,26 @@ func detectGoVersion() string {
return "1.17"
}
+
+// detectGoVersionFromGoMod tries to get Go version from go.mod.
+// It returns `toolchain` version if present,
+// else it returns `go` version if present,
+// else it returns empty.
+func detectGoVersionFromGoMod() string {
+ file, _ := gomoddirectives.GetModuleFile()
+ if file == nil {
+ return ""
+ }
+
+ // The toolchain exists only if 'toolchain' version > 'go' version.
+ // If 'toolchain' version <= 'go' version, `go mod tidy` will remove 'toolchain' version from go.mod.
+ if file.Toolchain != nil && file.Toolchain.Name != "" {
+ return strings.TrimPrefix(file.Toolchain.Name, "go")
+ }
+
+ if file.Go != nil && file.Go.Version != "" {
+ return file.Go.Version
+ }
+
+ return ""
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go
index 109de4243..b182d1e0f 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/config/linters_settings.go
@@ -47,6 +47,9 @@ var defaultLintersSettings = LintersSettings{
Sections: []string{"standard", "default"},
SkipGenerated: true,
},
+ GoChecksumType: GoChecksumTypeSettings{
+ DefaultSignifiesExhaustive: true,
+ },
Gocognit: GocognitSettings{
MinComplexity: 30,
},
@@ -216,6 +219,7 @@ type LintersSettings struct {
Gci GciSettings
GinkgoLinter GinkgoLinterSettings
Gocognit GocognitSettings
+ GoChecksumType GoChecksumTypeSettings
Goconst GoConstSettings
Gocritic GoCriticSettings
Gocyclo GoCycloSettings
@@ -225,7 +229,6 @@ type LintersSettings struct {
Gofumpt GofumptSettings
Goheader GoHeaderSettings
Goimports GoImportsSettings
- Gomnd GoMndSettings
GoModDirectives GoModDirectivesSettings
Gomodguard GoModGuardSettings
Gosec GoSecSettings
@@ -233,6 +236,7 @@ type LintersSettings struct {
Gosmopolitan GosmopolitanSettings
Govet GovetSettings
Grouper GrouperSettings
+ Iface IfaceSettings
ImportAs ImportAsSettings
Inamedparam INamedParamSettings
InterfaceBloat InterfaceBloatSettings
@@ -483,6 +487,11 @@ type GinkgoLinterSettings struct {
ForceExpectTo bool `mapstructure:"force-expect-to"`
ValidateAsyncIntervals bool `mapstructure:"validate-async-intervals"`
ForbidSpecPollution bool `mapstructure:"forbid-spec-pollution"`
+ ForceSucceedForFuncs bool `mapstructure:"force-succeed"`
+}
+
+type GoChecksumTypeSettings struct {
+ DefaultSignifiesExhaustive bool `mapstructure:"default-signifies-exhaustive"`
}
type GocognitSettings struct {
@@ -560,14 +569,6 @@ type GoImportsSettings struct {
LocalPrefixes string `mapstructure:"local-prefixes"`
}
-// Deprecated: use MndSettings.
-type GoMndSettings struct {
- MndSettings `mapstructure:",squash"`
-
- // Deprecated: use root level settings instead.
- Settings map[string]map[string]any
-}
-
type GoModDirectivesSettings struct {
ReplaceAllowList []string `mapstructure:"replace-allow-list"`
ReplaceLocal bool `mapstructure:"replace-local"`
@@ -648,6 +649,11 @@ type GrouperSettings struct {
VarRequireGrouping bool `mapstructure:"var-require-grouping"`
}
+type IfaceSettings struct {
+ Enable []string `mapstructure:"enable"`
+ Settings map[string]map[string]any `mapstructure:"settings"`
+}
+
type ImportAsSettings struct {
Alias []ImportAsAlias
NoUnaliased bool `mapstructure:"no-unaliased"`
@@ -725,7 +731,8 @@ type NestifSettings struct {
}
type NilNilSettings struct {
- CheckedTypes []string `mapstructure:"checked-types"`
+ DetectOpposite bool `mapstructure:"detect-opposite"`
+ CheckedTypes []string `mapstructure:"checked-types"`
}
type NlreturnSettings struct {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go
index efdbfce1f..efeed3ca4 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/config/loader.go
@@ -302,17 +302,6 @@ func (l *Loader) handleGoVersion() {
l.cfg.LintersSettings.Gocritic.Go = trimmedGoVersion
- // staticcheck related linters.
- if l.cfg.LintersSettings.Staticcheck.GoVersion == "" {
- l.cfg.LintersSettings.Staticcheck.GoVersion = trimmedGoVersion
- }
- if l.cfg.LintersSettings.Gosimple.GoVersion == "" {
- l.cfg.LintersSettings.Gosimple.GoVersion = trimmedGoVersion
- }
- if l.cfg.LintersSettings.Stylecheck.GoVersion == "" {
- l.cfg.LintersSettings.Stylecheck.GoVersion = trimmedGoVersion
- }
-
os.Setenv("GOSECGOVERSION", l.cfg.Run.Go)
}
@@ -413,12 +402,6 @@ func (l *Loader) handleLinterOptionDeprecations() {
l.log.Warnf("The configuration option `linters.godot.check-all` is deprecated, please use `linters.godot.scope: all`.")
}
- // Deprecated since v1.44.0.
- if len(l.cfg.LintersSettings.Gomnd.Settings) > 0 {
- l.log.Warnf("The configuration option `linters.gomnd.settings` is deprecated. Please use the options " +
- "`linters.gomnd.checks`,`linters.gomnd.ignored-numbers`,`linters.gomnd.ignored-files`,`linters.gomnd.ignored-functions`.")
- }
-
// Deprecated since v1.47.0
if l.cfg.LintersSettings.Gofumpt.LangVersion != "" {
l.log.Warnf("The configuration option `linters.gofumpt.lang-version` is deprecated, please use global `run.go`.")
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go
index c1274ec09..ac03c71ec 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner.go
@@ -1,8 +1,3 @@
-// checker is a partial copy of https://github.com/golang/tools/blob/master/go/analysis/internal/checker
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
// Package goanalysis defines the implementation of the checker commands.
// The same code drives the multi-analysis driver, the single-analysis
// driver that is conventionally provided for convenience along with
@@ -21,8 +16,8 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
+ "github.com/golangci/golangci-lint/internal/cache"
"github.com/golangci/golangci-lint/internal/errorutil"
- "github.com/golangci/golangci-lint/internal/pkgcache"
"github.com/golangci/golangci-lint/pkg/goanalysis/load"
"github.com/golangci/golangci-lint/pkg/logutils"
"github.com/golangci/golangci-lint/pkg/timeutils"
@@ -52,7 +47,7 @@ type Diagnostic struct {
type runner struct {
log logutils.Log
prefix string // ensure unique analyzer names
- pkgCache *pkgcache.Cache
+ pkgCache *cache.Cache
loadGuard *load.Guard
loadMode LoadMode
passToPkg map[*analysis.Pass]*packages.Package
@@ -60,7 +55,7 @@ type runner struct {
sw *timeutils.Stopwatch
}
-func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loadGuard *load.Guard,
+func newRunner(prefix string, logger logutils.Log, pkgCache *cache.Cache, loadGuard *load.Guard,
loadMode LoadMode, sw *timeutils.Stopwatch,
) *runner {
return &runner{
@@ -84,7 +79,6 @@ func (r *runner) run(analyzers []*analysis.Analyzer, initialPackages []*packages
[]error, map[*analysis.Pass]*packages.Package,
) {
debugf("Analyzing %d packages on load mode %s", len(initialPackages), r.loadMode)
- defer r.pkgCache.Trim()
roots := r.analyze(initialPackages, analyzers)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go
index 58ea297ea..152cab181 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action.go
@@ -1,21 +1,10 @@
package goanalysis
import (
- "errors"
"fmt"
- "go/types"
- "io"
- "reflect"
"runtime/debug"
- "time"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/go/types/objectpath"
"github.com/golangci/golangci-lint/internal/errorutil"
- "github.com/golangci/golangci-lint/internal/pkgcache"
- "github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors"
)
type actionAllocator struct {
@@ -39,54 +28,6 @@ func (actAlloc *actionAllocator) alloc() *action {
return act
}
-// An action represents one unit of analysis work: the application of
-// one analysis to one package. Actions form a DAG, both within a
-// package (as different analyzers are applied, either in sequence or
-// parallel), and across packages (as dependencies are analyzed).
-type action struct {
- a *analysis.Analyzer
- pkg *packages.Package
- pass *analysis.Pass
- deps []*action
- objectFacts map[objectFactKey]analysis.Fact
- packageFacts map[packageFactKey]analysis.Fact
- result any
- diagnostics []analysis.Diagnostic
- err error
- r *runner
- analysisDoneCh chan struct{}
- loadCachedFactsDone bool
- loadCachedFactsOk bool
- isroot bool
- isInitialPkg bool
- needAnalyzeSource bool
-}
-
-func (act *action) String() string {
- return fmt.Sprintf("%s@%s", act.a, act.pkg)
-}
-
-func (act *action) loadCachedFacts() bool {
- if act.loadCachedFactsDone { // can't be set in parallel
- return act.loadCachedFactsOk
- }
-
- res := func() bool {
- if act.isInitialPkg {
- return true // load cached facts only for non-initial packages
- }
-
- if len(act.a.FactTypes) == 0 {
- return true // no need to load facts
- }
-
- return act.loadPersistedFacts()
- }()
- act.loadCachedFactsDone = true
- act.loadCachedFactsOk = res
- return res
-}
-
func (act *action) waitUntilDependingAnalyzersWorked() {
for _, dep := range act.deps {
if dep.pkg == act.pkg {
@@ -109,268 +50,8 @@ func (act *action) analyzeSafe() {
act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack())
}
}()
- act.r.sw.TrackStage(act.a.Name, func() {
- act.analyze()
- })
-}
-
-func (act *action) analyze() {
- defer close(act.analysisDoneCh) // unblock actions depending on this action
-
- if !act.needAnalyzeSource {
- return
- }
-
- defer func(now time.Time) {
- analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now))
- }(time.Now())
-
- // Report an error if any dependency failures.
- var depErrors error
- for _, dep := range act.deps {
- if dep.err == nil {
- continue
- }
-
- depErrors = errors.Join(depErrors, errors.Unwrap(dep.err))
- }
- if depErrors != nil {
- act.err = fmt.Errorf("failed prerequisites: %w", depErrors)
- return
- }
-
- // Plumb the output values of the dependencies
- // into the inputs of this action. Also facts.
- inputs := make(map[*analysis.Analyzer]any)
- startedAt := time.Now()
- for _, dep := range act.deps {
- if dep.pkg == act.pkg {
- // Same package, different analysis (horizontal edge):
- // in-memory outputs of prerequisite analyzers
- // become inputs to this analysis pass.
- inputs[dep.a] = dep.result
- } else if dep.a == act.a { // (always true)
- // Same analysis, different package (vertical edge):
- // serialized facts produced by prerequisite analysis
- // become available to this analysis pass.
- inheritFacts(act, dep)
- }
- }
- factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt))
-
- // Run the analysis.
- pass := &analysis.Pass{
- Analyzer: act.a,
- Fset: act.pkg.Fset,
- Files: act.pkg.Syntax,
- OtherFiles: act.pkg.OtherFiles,
- Pkg: act.pkg.Types,
- TypesInfo: act.pkg.TypesInfo,
- TypesSizes: act.pkg.TypesSizes,
- ResultOf: inputs,
- Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
- ImportObjectFact: act.importObjectFact,
- ExportObjectFact: act.exportObjectFact,
- ImportPackageFact: act.importPackageFact,
- ExportPackageFact: act.exportPackageFact,
- AllObjectFacts: act.allObjectFacts,
- AllPackageFacts: act.allPackageFacts,
- }
- act.pass = pass
- act.r.passToPkgGuard.Lock()
- act.r.passToPkg[pass] = act.pkg
- act.r.passToPkgGuard.Unlock()
-
- if act.pkg.IllTyped {
- // It looks like there should be !pass.Analyzer.RunDespiteErrors
- // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here,
- // but it exits before it if packages.Load have failed.
- act.err = fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.pkg})
- } else {
- startedAt = time.Now()
- act.result, act.err = pass.Analyzer.Run(pass)
- analyzedIn := time.Since(startedAt)
- if analyzedIn > time.Millisecond*10 {
- debugf("%s: run analyzer in %s", act, analyzedIn)
- }
- }
-
- // disallow calls after Run
- pass.ExportObjectFact = nil
- pass.ExportPackageFact = nil
-
- if err := act.persistFactsToCache(); err != nil {
- act.r.log.Warnf("Failed to persist facts to cache: %s", err)
- }
-}
-
-// importObjectFact implements Pass.ImportObjectFact.
-// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
-// importObjectFact copies the fact value to *ptr.
-func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
- if obj == nil {
- panic("nil object")
- }
- key := objectFactKey{obj, act.factType(ptr)}
- if v, ok := act.objectFacts[key]; ok {
- reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
- return true
- }
- return false
-}
-
-// exportObjectFact implements Pass.ExportObjectFact.
-func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
- if obj.Pkg() != act.pkg.Types {
- act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
- act.a, act.pkg, obj, fact)
- }
-
- key := objectFactKey{obj, act.factType(fact)}
- act.objectFacts[key] = fact // clobber any existing entry
- if isFactsExportDebug {
- objstr := types.ObjectString(obj, (*types.Package).Name)
- factsExportDebugf("%s: object %s has fact %s\n",
- act.pkg.Fset.Position(obj.Pos()), objstr, fact)
- }
-}
-
-func (act *action) allObjectFacts() []analysis.ObjectFact {
- out := make([]analysis.ObjectFact, 0, len(act.objectFacts))
- for key, fact := range act.objectFacts {
- out = append(out, analysis.ObjectFact{
- Object: key.obj,
- Fact: fact,
- })
- }
- return out
-}
-
-// importPackageFact implements Pass.ImportPackageFact.
-// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
-// fact copies the fact value to *ptr.
-func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
- if pkg == nil {
- panic("nil package")
- }
- key := packageFactKey{pkg, act.factType(ptr)}
- if v, ok := act.packageFacts[key]; ok {
- reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
- return true
- }
- return false
-}
-
-// exportPackageFact implements Pass.ExportPackageFact.
-func (act *action) exportPackageFact(fact analysis.Fact) {
- key := packageFactKey{act.pass.Pkg, act.factType(fact)}
- act.packageFacts[key] = fact // clobber any existing entry
- factsDebugf("%s: package %s has fact %s\n",
- act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
-}
-
-func (act *action) allPackageFacts() []analysis.PackageFact {
- out := make([]analysis.PackageFact, 0, len(act.packageFacts))
- for key, fact := range act.packageFacts {
- out = append(out, analysis.PackageFact{
- Package: key.pkg,
- Fact: fact,
- })
- }
- return out
-}
-
-func (act *action) factType(fact analysis.Fact) reflect.Type {
- t := reflect.TypeOf(fact)
- if t.Kind() != reflect.Ptr {
- act.r.log.Fatalf("invalid Fact type: got %T, want pointer", t)
- }
- return t
-}
-
-func (act *action) persistFactsToCache() error {
- analyzer := act.a
- if len(analyzer.FactTypes) == 0 {
- return nil
- }
-
- // Merge new facts into the package and persist them.
- var facts []Fact
- for key, fact := range act.packageFacts {
- if key.pkg != act.pkg.Types {
- // The fact is from inherited facts from another package
- continue
- }
- facts = append(facts, Fact{
- Path: "",
- Fact: fact,
- })
- }
- for key, fact := range act.objectFacts {
- obj := key.obj
- if obj.Pkg() != act.pkg.Types {
- // The fact is from inherited facts from another package
- continue
- }
-
- path, err := objectpath.For(obj)
- if err != nil {
- // The object is not globally addressable
- continue
- }
-
- facts = append(facts, Fact{
- Path: string(path),
- Fact: fact,
- })
- }
-
- factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
-
- key := fmt.Sprintf("%s/facts", analyzer.Name)
- return act.r.pkgCache.Put(act.pkg, pkgcache.HashModeNeedAllDeps, key, facts)
-}
-
-func (act *action) loadPersistedFacts() bool {
- var facts []Fact
- key := fmt.Sprintf("%s/facts", act.a.Name)
- if err := act.r.pkgCache.Get(act.pkg, pkgcache.HashModeNeedAllDeps, key, &facts); err != nil {
- if !errors.Is(err, pkgcache.ErrMissing) && !errors.Is(err, io.EOF) {
- act.r.log.Warnf("Failed to get persisted facts: %s", err)
- }
-
- factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name)
- return false
- }
-
- factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
-
- for _, f := range facts {
- if f.Path == "" { // this is a package fact
- key := packageFactKey{act.pkg.Types, act.factType(f.Fact)}
- act.packageFacts[key] = f.Fact
- continue
- }
- obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path))
- if err != nil {
- // Be lenient about these errors. For example, when
- // analyzing io/ioutil from source, we may get a fact
- // for methods on the devNull type, and objectpath
- // will happily create a path for them. However, when
- // we later load io/ioutil from export data, the path
- // no longer resolves.
- //
- // If an exported type embeds the unexported type,
- // then (part of) the unexported type will become part
- // of the type information and our path will resolve
- // again.
- continue
- }
- factKey := objectFactKey{obj, act.factType(f.Fact)}
- act.objectFacts[factKey] = f.Fact
- }
- return true
+ act.r.sw.TrackStage(act.a.Name, act.analyze)
}
func (act *action) markDepsForAnalyzingSource() {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go
new file mode 100644
index 000000000..fbc2f82fa
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_action_cache.go
@@ -0,0 +1,127 @@
+package goanalysis
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/types/objectpath"
+
+ "github.com/golangci/golangci-lint/internal/cache"
+)
+
+type Fact struct {
+ Path string // non-empty only for object facts
+ Fact analysis.Fact
+}
+
+func (act *action) loadCachedFacts() bool {
+ if act.loadCachedFactsDone { // can't be set in parallel
+ return act.loadCachedFactsOk
+ }
+
+ res := func() bool {
+ if act.isInitialPkg {
+ return true // load cached facts only for non-initial packages
+ }
+
+ if len(act.a.FactTypes) == 0 {
+ return true // no need to load facts
+ }
+
+ return act.loadPersistedFacts()
+ }()
+ act.loadCachedFactsDone = true
+ act.loadCachedFactsOk = res
+ return res
+}
+
+func (act *action) persistFactsToCache() error {
+ analyzer := act.a
+ if len(analyzer.FactTypes) == 0 {
+ return nil
+ }
+
+ // Merge new facts into the package and persist them.
+ var facts []Fact
+ for key, fact := range act.packageFacts {
+ if key.pkg != act.pkg.Types {
+ // The fact is from inherited facts from another package
+ continue
+ }
+ facts = append(facts, Fact{
+ Path: "",
+ Fact: fact,
+ })
+ }
+ for key, fact := range act.objectFacts {
+ obj := key.obj
+ if obj.Pkg() != act.pkg.Types {
+ // The fact is from inherited facts from another package
+ continue
+ }
+
+ path, err := objectpath.For(obj)
+ if err != nil {
+ // The object is not globally addressable
+ continue
+ }
+
+ facts = append(facts, Fact{
+ Path: string(path),
+ Fact: fact,
+ })
+ }
+
+ factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
+
+ return act.r.pkgCache.Put(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(analyzer), facts)
+}
+
+func (act *action) loadPersistedFacts() bool {
+ var facts []Fact
+
+ err := act.r.pkgCache.Get(act.pkg, cache.HashModeNeedAllDeps, factCacheKey(act.a), &facts)
+ if err != nil {
+ if !errors.Is(err, cache.ErrMissing) && !errors.Is(err, io.EOF) {
+ act.r.log.Warnf("Failed to get persisted facts: %s", err)
+ }
+
+ factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name)
+ return false
+ }
+
+ factsCacheDebugf("Loaded %d cached facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name)
+
+ for _, f := range facts {
+ if f.Path == "" { // this is a package fact
+ key := packageFactKey{act.pkg.Types, act.factType(f.Fact)}
+ act.packageFacts[key] = f.Fact
+ continue
+ }
+ obj, err := objectpath.Object(act.pkg.Types, objectpath.Path(f.Path))
+ if err != nil {
+ // Be lenient about these errors. For example, when
+ // analyzing io/ioutil from source, we may get a fact
+ // for methods on the devNull type, and objectpath
+ // will happily create a path for them. However, when
+ // we later load io/ioutil from export data, the path
+ // no longer resolves.
+ //
+ // If an exported type embeds the unexported type,
+ // then (part of) the unexported type will become part
+ // of the type information and our path will resolve
+ // again.
+ continue
+ }
+ factKey := objectFactKey{obj, act.factType(f.Fact)}
+ act.objectFacts[factKey] = f.Fact
+ }
+
+ return true
+}
+
+func factCacheKey(a *analysis.Analyzer) string {
+ return fmt.Sprintf("%s/facts", a.Name)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go
new file mode 100644
index 000000000..d868f8f5d
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_base.go
@@ -0,0 +1,370 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Partial copy of https://github.com/golang/tools/blob/dba5486c2a1d03519930812112b23ed2c45c04fc/go/analysis/internal/checker/checker.go
+
+package goanalysis
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "go/types"
+ "reflect"
+ "time"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/packages"
+
+ "github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors"
+)
+
+// NOTE(ldez) altered: custom fields; remove 'once' and 'duration'.
+// An action represents one unit of analysis work: the application of
+// one analysis to one package. Actions form a DAG, both within a
+// package (as different analyzers are applied, either in sequence or
+// parallel), and across packages (as dependencies are analyzed).
+type action struct {
+ a *analysis.Analyzer
+ pkg *packages.Package
+ pass *analysis.Pass
+ isroot bool
+ deps []*action
+ objectFacts map[objectFactKey]analysis.Fact
+ packageFacts map[packageFactKey]analysis.Fact
+ result any
+ diagnostics []analysis.Diagnostic
+ err error
+
+ // NOTE(ldez) custom fields.
+ r *runner
+ analysisDoneCh chan struct{}
+ loadCachedFactsDone bool
+ loadCachedFactsOk bool
+ isInitialPkg bool
+ needAnalyzeSource bool
+}
+
+// NOTE(ldez) no alteration.
+type objectFactKey struct {
+ obj types.Object
+ typ reflect.Type
+}
+
+// NOTE(ldez) no alteration.
+type packageFactKey struct {
+ pkg *types.Package
+ typ reflect.Type
+}
+
+// NOTE(ldez) no alteration.
+func (act *action) String() string {
+ return fmt.Sprintf("%s@%s", act.a, act.pkg)
+}
+
+// NOTE(ldez) altered version of `func (act *action) execOnce()`.
+func (act *action) analyze() {
+ defer close(act.analysisDoneCh) // unblock actions depending on this action
+
+ if !act.needAnalyzeSource {
+ return
+ }
+
+ defer func(now time.Time) {
+ analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now))
+ }(time.Now())
+
+ // Report an error if any dependency failures.
+ var depErrors error
+ for _, dep := range act.deps {
+ if dep.err != nil {
+ depErrors = errors.Join(depErrors, errors.Unwrap(dep.err))
+ }
+ }
+
+ if depErrors != nil {
+ act.err = fmt.Errorf("failed prerequisites: %w", depErrors)
+ return
+ }
+
+ // Plumb the output values of the dependencies
+ // into the inputs of this action. Also facts.
+ inputs := make(map[*analysis.Analyzer]any)
+ act.objectFacts = make(map[objectFactKey]analysis.Fact)
+ act.packageFacts = make(map[packageFactKey]analysis.Fact)
+ startedAt := time.Now()
+
+ for _, dep := range act.deps {
+ if dep.pkg == act.pkg {
+ // Same package, different analysis (horizontal edge):
+ // in-memory outputs of prerequisite analyzers
+ // become inputs to this analysis pass.
+ inputs[dep.a] = dep.result
+ } else if dep.a == act.a { // (always true)
+ // Same analysis, different package (vertical edge):
+ // serialized facts produced by prerequisite analysis
+ // become available to this analysis pass.
+ inheritFacts(act, dep)
+ }
+ }
+
+ factsDebugf("%s: Inherited facts in %s", act, time.Since(startedAt))
+
+ module := &analysis.Module{} // possibly empty (non nil) in go/analysis drivers.
+ if mod := act.pkg.Module; mod != nil {
+ module.Path = mod.Path
+ module.Version = mod.Version
+ module.GoVersion = mod.GoVersion
+ }
+
+ // Run the analysis.
+ pass := &analysis.Pass{
+ Analyzer: act.a,
+ Fset: act.pkg.Fset,
+ Files: act.pkg.Syntax,
+ OtherFiles: act.pkg.OtherFiles,
+ IgnoredFiles: act.pkg.IgnoredFiles,
+ Pkg: act.pkg.Types,
+ TypesInfo: act.pkg.TypesInfo,
+ TypesSizes: act.pkg.TypesSizes,
+ TypeErrors: act.pkg.TypeErrors,
+ Module: module,
+
+ ResultOf: inputs,
+ Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
+ ImportObjectFact: act.importObjectFact,
+ ExportObjectFact: act.exportObjectFact,
+ ImportPackageFact: act.importPackageFact,
+ ExportPackageFact: act.exportPackageFact,
+ AllObjectFacts: act.allObjectFacts,
+ AllPackageFacts: act.allPackageFacts,
+ }
+
+ act.pass = pass
+ act.r.passToPkgGuard.Lock()
+ act.r.passToPkg[pass] = act.pkg
+ act.r.passToPkgGuard.Unlock()
+
+ if act.pkg.IllTyped {
+ // It looks like there should be !pass.Analyzer.RunDespiteErrors
+ // but govet's cgocall crashes on it. Govet itself contains !pass.Analyzer.RunDespiteErrors condition here,
+ // but it exits before it if packages.Load have failed.
+ act.err = fmt.Errorf("analysis skipped: %w", &pkgerrors.IllTypedError{Pkg: act.pkg})
+ } else {
+ startedAt = time.Now()
+
+ act.result, act.err = pass.Analyzer.Run(pass)
+
+ analyzedIn := time.Since(startedAt)
+ if analyzedIn > time.Millisecond*10 {
+ debugf("%s: run analyzer in %s", act, analyzedIn)
+ }
+ }
+
+ // disallow calls after Run
+ pass.ExportObjectFact = nil
+ pass.ExportPackageFact = nil
+
+ err := act.persistFactsToCache()
+ if err != nil {
+ act.r.log.Warnf("Failed to persist facts to cache: %s", err)
+ }
+}
+
+// NOTE(ldez) altered: logger; serialize.
+// inheritFacts populates act.facts with
+// those it obtains from its dependency, dep.
+func inheritFacts(act, dep *action) {
+ const serialize = false
+
+ for key, fact := range dep.objectFacts {
+ // Filter out facts related to objects
+ // that are irrelevant downstream
+ // (equivalently: not in the compiler export data).
+ if !exportedFrom(key.obj, dep.pkg.Types) {
+ factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
+ continue
+ }
+
+ // Optionally serialize/deserialize fact
+ // to verify that it works across address spaces.
+ if serialize {
+ encodedFact, err := codeFact(fact)
+ if err != nil {
+ act.r.log.Panicf("internal error: encoding of %T fact failed in %v: %v", fact, act, err)
+ }
+ fact = encodedFact
+ }
+
+ factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
+
+ act.objectFacts[key] = fact
+ }
+
+ for key, fact := range dep.packageFacts {
+ // TODO: filter out facts that belong to
+ // packages not mentioned in the export data
+ // to prevent side channels.
+
+ // Optionally serialize/deserialize fact
+ // to verify that it works across address spaces
+ // and is deterministic.
+ if serialize {
+ encodedFact, err := codeFact(fact)
+ if err != nil {
+ act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
+ }
+ fact = encodedFact
+ }
+
+ factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
+
+ act.packageFacts[key] = fact
+ }
+}
+
+// NOTE(ldez) no alteration.
+// codeFact encodes then decodes a fact,
+// just to exercise that logic.
+func codeFact(fact analysis.Fact) (analysis.Fact, error) {
+ // We encode facts one at a time.
+ // A real modular driver would emit all facts
+ // into one encoder to improve gob efficiency.
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
+ return nil, err
+ }
+
+ // Encode it twice and assert that we get the same bits.
+ // This helps detect nondeterministic Gob encoding (e.g. of maps).
+ var buf2 bytes.Buffer
+ if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
+ return nil, err
+ }
+ if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
+ return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
+ }
+
+ newFact := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
+ if err := gob.NewDecoder(&buf).Decode(newFact); err != nil {
+ return nil, err
+ }
+ return newFact, nil
+}
+
+// NOTE(ldez) no alteration.
+// exportedFrom reports whether obj may be visible to a package that imports pkg.
+// This includes not just the exported members of pkg, but also unexported
+// constants, types, fields, and methods, perhaps belonging to other packages,
+// that find there way into the API.
+// This is an over-approximation of the more accurate approach used by
+// gc export data, which walks the type graph, but it's much simpler.
+//
+// TODO(adonovan): do more accurate filtering by walking the type graph.
+func exportedFrom(obj types.Object, pkg *types.Package) bool {
+ switch obj := obj.(type) {
+ case *types.Func:
+ return obj.Exported() && obj.Pkg() == pkg ||
+ obj.Type().(*types.Signature).Recv() != nil
+ case *types.Var:
+ if obj.IsField() {
+ return true
+ }
+ // we can't filter more aggressively than this because we need
+ // to consider function parameters exported, but have no way
+ // of telling apart function parameters from local variables.
+ return obj.Pkg() == pkg
+ case *types.TypeName, *types.Const:
+ return true
+ }
+ return false // Nil, Builtin, Label, or PkgName
+}
+
+// NOTE(ldez) altered: logger; `act.factType`
+// importObjectFact implements Pass.ImportObjectFact.
+// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
+// importObjectFact copies the fact value to *ptr.
+func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
+ if obj == nil {
+ panic("nil object")
+ }
+ key := objectFactKey{obj, act.factType(ptr)}
+ if v, ok := act.objectFacts[key]; ok {
+ reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
+ return true
+ }
+ return false
+}
+
+// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`.
+// exportObjectFact implements Pass.ExportObjectFact.
+func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
+ if obj.Pkg() != act.pkg.Types {
+ act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
+ act.a, act.pkg, obj, fact)
+ }
+
+ key := objectFactKey{obj, act.factType(fact)}
+ act.objectFacts[key] = fact // clobber any existing entry
+ if isFactsExportDebug {
+ objstr := types.ObjectString(obj, (*types.Package).Name)
+
+ factsExportDebugf("%s: object %s has fact %s\n",
+ act.pkg.Fset.Position(obj.Pos()), objstr, fact)
+ }
+}
+
+// NOTE(ldez) no alteration.
+func (act *action) allObjectFacts() []analysis.ObjectFact {
+ facts := make([]analysis.ObjectFact, 0, len(act.objectFacts))
+ for k := range act.objectFacts {
+ facts = append(facts, analysis.ObjectFact{Object: k.obj, Fact: act.objectFacts[k]})
+ }
+ return facts
+}
+
+// NOTE(ldez) altered: `act.factType`
+// importPackageFact implements Pass.ImportPackageFact.
+// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
+// fact copies the fact value to *ptr.
+func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
+ if pkg == nil {
+ panic("nil package")
+ }
+ key := packageFactKey{pkg, act.factType(ptr)}
+ if v, ok := act.packageFacts[key]; ok {
+ reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
+ return true
+ }
+ return false
+}
+
+// NOTE(ldez) altered: removes code related to `act.pass.ExportPackageFact`; logger; `act.factType`.
+// exportPackageFact implements Pass.ExportPackageFact.
+func (act *action) exportPackageFact(fact analysis.Fact) {
+ key := packageFactKey{act.pass.Pkg, act.factType(fact)}
+ act.packageFacts[key] = fact // clobber any existing entry
+
+ factsDebugf("%s: package %s has fact %s\n",
+ act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
+}
+
+// NOTE(ldez) altered: add receiver to handle logs.
+func (act *action) factType(fact analysis.Fact) reflect.Type {
+ t := reflect.TypeOf(fact)
+ if t.Kind() != reflect.Ptr {
+ act.r.log.Fatalf("invalid Fact type: got %T, want pointer", fact)
+ }
+ return t
+}
+
+// NOTE(ldez) no alteration.
+func (act *action) allPackageFacts() []analysis.PackageFact {
+ facts := make([]analysis.PackageFact, 0, len(act.packageFacts))
+ for k := range act.packageFacts {
+ facts = append(facts, analysis.PackageFact{Package: k.pkg, Fact: act.packageFacts[k]})
+ }
+ return facts
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_facts.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_facts.go
deleted file mode 100644
index 1d0fb974e..000000000
--- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_facts.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package goanalysis
-
-import (
- "bytes"
- "encoding/gob"
- "fmt"
- "go/types"
- "reflect"
-
- "golang.org/x/tools/go/analysis"
-)
-
-type objectFactKey struct {
- obj types.Object
- typ reflect.Type
-}
-
-type packageFactKey struct {
- pkg *types.Package
- typ reflect.Type
-}
-
-type Fact struct {
- Path string // non-empty only for object facts
- Fact analysis.Fact
-}
-
-// inheritFacts populates act.facts with
-// those it obtains from its dependency, dep.
-func inheritFacts(act, dep *action) {
- serialize := false
-
- for key, fact := range dep.objectFacts {
- // Filter out facts related to objects
- // that are irrelevant downstream
- // (equivalently: not in the compiler export data).
- if !exportedFrom(key.obj, dep.pkg.Types) {
- factsInheritDebugf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
- continue
- }
-
- // Optionally serialize/deserialize fact
- // to verify that it works across address spaces.
- if serialize {
- var err error
- fact, err = codeFact(fact)
- if err != nil {
- act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
- }
- }
-
- factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
- act.objectFacts[key] = fact
- }
-
- for key, fact := range dep.packageFacts {
- // TODO: filter out facts that belong to
- // packages not mentioned in the export data
- // to prevent side channels.
-
- // Optionally serialize/deserialize fact
- // to verify that it works across address spaces
- // and is deterministic.
- if serialize {
- var err error
- fact, err = codeFact(fact)
- if err != nil {
- act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
- }
- }
-
- factsInheritDebugf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
- act.packageFacts[key] = fact
- }
-}
-
-// codeFact encodes then decodes a fact,
-// just to exercise that logic.
-func codeFact(fact analysis.Fact) (analysis.Fact, error) {
- // We encode facts one at a time.
- // A real modular driver would emit all facts
- // into one encoder to improve gob efficiency.
- var buf bytes.Buffer
- if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
- return nil, err
- }
-
- // Encode it twice and assert that we get the same bits.
- // This helps detect nondeterministic Gob encoding (e.g. of maps).
- var buf2 bytes.Buffer
- if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
- return nil, err
- }
- if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
- return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
- }
-
- newFact := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
- if err := gob.NewDecoder(&buf).Decode(newFact); err != nil {
- return nil, err
- }
- return newFact, nil
-}
-
-// exportedFrom reports whether obj may be visible to a package that imports pkg.
-// This includes not just the exported members of pkg, but also unexported
-// constants, types, fields, and methods, perhaps belonging to other packages,
-// that find there way into the API.
-// This is an over-approximation of the more accurate approach used by
-// gc export data, which walks the type graph, but it's much simpler.
-//
-// TODO(adonovan): do more accurate filtering by walking the type graph.
-func exportedFrom(obj types.Object, pkg *types.Package) bool {
- switch obj := obj.(type) {
- case *types.Func:
- return obj.Exported() && obj.Pkg() == pkg ||
- obj.Type().(*types.Signature).Recv() != nil
- case *types.Var:
- return obj.Exported() && obj.Pkg() == pkg ||
- obj.IsField()
- case *types.TypeName, *types.Const:
- return true
- }
- return false // Nil, Builtin, Label, or PkgName
-}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go
index 8abe2b6c1..44d676958 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runner_loadingpackage.go
@@ -4,11 +4,13 @@ import (
"errors"
"fmt"
"go/ast"
+ "go/build"
"go/parser"
"go/scanner"
"go/types"
"os"
"reflect"
+ "strings"
"sync"
"sync/atomic"
@@ -152,10 +154,15 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error {
return imp.Types, nil
}
- // TODO(ldez) temporary workaround
- rv, err := goutil.CleanRuntimeVersion()
- if err != nil {
- return err
+ var goVersion string
+ if pkg.Module != nil && pkg.Module.GoVersion != "" {
+ goVersion = "go" + strings.TrimPrefix(pkg.Module.GoVersion, "go")
+ } else {
+ var err error
+ goVersion, err = goutil.CleanRuntimeVersion()
+ if err != nil {
+ return err
+ }
}
tc := &types.Config{
@@ -163,7 +170,8 @@ func (lp *loadingPackage) loadFromSource(loadMode LoadMode) error {
Error: func(err error) {
pkg.Errors = append(pkg.Errors, lp.convertError(err)...)
},
- GoVersion: rv, // TODO(ldez) temporary workaround
+ GoVersion: goVersion,
+ Sizes: types.SizesFor(build.Default.Compiler, build.Default.GOARCH),
}
_ = types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go
index 79e52f52a..a9aee03a2 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners.go
@@ -2,17 +2,10 @@ package goanalysis
import (
"fmt"
- "runtime"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "time"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
- "github.com/golangci/golangci-lint/internal/pkgcache"
"github.com/golangci/golangci-lint/pkg/goanalysis/pkgerrors"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/logutils"
@@ -119,156 +112,3 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st
}
return issues
}
-
-func getIssuesCacheKey(analyzers []*analysis.Analyzer) string {
- return "lint/result:" + analyzersHashID(analyzers)
-}
-
-func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool,
- issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer,
-) {
- startedAt := time.Now()
- perPkgIssues := map[*packages.Package][]result.Issue{}
- for ind := range issues {
- i := &issues[ind]
- perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i)
- }
-
- var savedIssuesCount int64 = 0
- lintResKey := getIssuesCacheKey(analyzers)
-
- workerCount := runtime.GOMAXPROCS(-1)
- var wg sync.WaitGroup
- wg.Add(workerCount)
-
- pkgCh := make(chan *packages.Package, len(allPkgs))
- for i := 0; i < workerCount; i++ {
- go func() {
- defer wg.Done()
- for pkg := range pkgCh {
- pkgIssues := perPkgIssues[pkg]
- encodedIssues := make([]EncodingIssue, 0, len(pkgIssues))
- for ind := range pkgIssues {
- i := &pkgIssues[ind]
- encodedIssues = append(encodedIssues, EncodingIssue{
- FromLinter: i.FromLinter,
- Text: i.Text,
- Severity: i.Severity,
- Pos: i.Pos,
- LineRange: i.LineRange,
- Replacement: i.Replacement,
- ExpectNoLint: i.ExpectNoLint,
- ExpectedNoLintLinter: i.ExpectedNoLintLinter,
- })
- }
-
- atomic.AddInt64(&savedIssuesCount, int64(len(encodedIssues)))
- if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil {
- lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err)
- } else {
- issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues))
- }
- }
- }()
- }
-
- for _, pkg := range allPkgs {
- if pkgsFromCache[pkg] {
- continue
- }
-
- pkgCh <- pkg
- }
- close(pkgCh)
- wg.Wait()
-
- issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt))
-}
-
-func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context,
- analyzers []*analysis.Analyzer,
-) (issuesFromCache []result.Issue, pkgsFromCache map[*packages.Package]bool) {
- startedAt := time.Now()
-
- lintResKey := getIssuesCacheKey(analyzers)
- type cacheRes struct {
- issues []result.Issue
- loadErr error
- }
- pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs))
- for _, pkg := range pkgs {
- pkgToCacheRes[pkg] = &cacheRes{}
- }
-
- workerCount := runtime.GOMAXPROCS(-1)
- var wg sync.WaitGroup
- wg.Add(workerCount)
-
- pkgCh := make(chan *packages.Package, len(pkgs))
- for range workerCount {
- go func() {
- defer wg.Done()
- for pkg := range pkgCh {
- var pkgIssues []EncodingIssue
- err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues)
- cacheRes := pkgToCacheRes[pkg]
- cacheRes.loadErr = err
- if err != nil {
- continue
- }
- if len(pkgIssues) == 0 {
- continue
- }
-
- issues := make([]result.Issue, 0, len(pkgIssues))
- for i := range pkgIssues {
- issue := &pkgIssues[i]
- issues = append(issues, result.Issue{
- FromLinter: issue.FromLinter,
- Text: issue.Text,
- Severity: issue.Severity,
- Pos: issue.Pos,
- LineRange: issue.LineRange,
- Replacement: issue.Replacement,
- Pkg: pkg,
- ExpectNoLint: issue.ExpectNoLint,
- ExpectedNoLintLinter: issue.ExpectedNoLintLinter,
- })
- }
- cacheRes.issues = issues
- }
- }()
- }
-
- for _, pkg := range pkgs {
- pkgCh <- pkg
- }
- close(pkgCh)
- wg.Wait()
-
- loadedIssuesCount := 0
- pkgsFromCache = map[*packages.Package]bool{}
- for pkg, cacheRes := range pkgToCacheRes {
- if cacheRes.loadErr == nil {
- loadedIssuesCount += len(cacheRes.issues)
- pkgsFromCache[pkg] = true
- issuesFromCache = append(issuesFromCache, cacheRes.issues...)
- issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues))
- } else {
- issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr)
- }
- }
- issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages",
- loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs))
- return issuesFromCache, pkgsFromCache
-}
-
-func analyzersHashID(analyzers []*analysis.Analyzer) string {
- names := make([]string, 0, len(analyzers))
- for _, a := range analyzers {
- names = append(names, a.Name)
- }
-
- sort.Strings(names)
- return strings.Join(names, ",")
-}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go
new file mode 100644
index 000000000..8c244688b
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/goanalysis/runners_cache.go
@@ -0,0 +1,172 @@
+package goanalysis
+
+import (
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/packages"
+
+ "github.com/golangci/golangci-lint/internal/cache"
+ "github.com/golangci/golangci-lint/pkg/lint/linter"
+ "github.com/golangci/golangci-lint/pkg/result"
+)
+
+func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool,
+ issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer,
+) {
+ startedAt := time.Now()
+ perPkgIssues := map[*packages.Package][]result.Issue{}
+ for ind := range issues {
+ i := &issues[ind]
+ perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i)
+ }
+
+ var savedIssuesCount int64 = 0
+ lintResKey := getIssuesCacheKey(analyzers)
+
+ workerCount := runtime.GOMAXPROCS(-1)
+ var wg sync.WaitGroup
+ wg.Add(workerCount)
+
+ pkgCh := make(chan *packages.Package, len(allPkgs))
+ for i := 0; i < workerCount; i++ {
+ go func() {
+ defer wg.Done()
+ for pkg := range pkgCh {
+ pkgIssues := perPkgIssues[pkg]
+ encodedIssues := make([]EncodingIssue, 0, len(pkgIssues))
+ for ind := range pkgIssues {
+ i := &pkgIssues[ind]
+ encodedIssues = append(encodedIssues, EncodingIssue{
+ FromLinter: i.FromLinter,
+ Text: i.Text,
+ Severity: i.Severity,
+ Pos: i.Pos,
+ LineRange: i.LineRange,
+ Replacement: i.Replacement,
+ ExpectNoLint: i.ExpectNoLint,
+ ExpectedNoLintLinter: i.ExpectedNoLintLinter,
+ })
+ }
+
+ atomic.AddInt64(&savedIssuesCount, int64(len(encodedIssues)))
+ if err := lintCtx.PkgCache.Put(pkg, cache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil {
+ lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err)
+ } else {
+ issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues))
+ }
+ }
+ }()
+ }
+
+ for _, pkg := range allPkgs {
+ if pkgsFromCache[pkg] {
+ continue
+ }
+
+ pkgCh <- pkg
+ }
+ close(pkgCh)
+ wg.Wait()
+
+ lintCtx.PkgCache.Close()
+
+ issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt))
+}
+
+func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context,
+ analyzers []*analysis.Analyzer,
+) (issuesFromCache []result.Issue, pkgsFromCache map[*packages.Package]bool) {
+ startedAt := time.Now()
+
+ lintResKey := getIssuesCacheKey(analyzers)
+ type cacheRes struct {
+ issues []result.Issue
+ loadErr error
+ }
+ pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs))
+ for _, pkg := range pkgs {
+ pkgToCacheRes[pkg] = &cacheRes{}
+ }
+
+ workerCount := runtime.GOMAXPROCS(-1)
+ var wg sync.WaitGroup
+ wg.Add(workerCount)
+
+ pkgCh := make(chan *packages.Package, len(pkgs))
+ for range workerCount {
+ go func() {
+ defer wg.Done()
+ for pkg := range pkgCh {
+ var pkgIssues []EncodingIssue
+ err := lintCtx.PkgCache.Get(pkg, cache.HashModeNeedAllDeps, lintResKey, &pkgIssues)
+ cacheRes := pkgToCacheRes[pkg]
+ cacheRes.loadErr = err
+ if err != nil {
+ continue
+ }
+ if len(pkgIssues) == 0 {
+ continue
+ }
+
+ issues := make([]result.Issue, 0, len(pkgIssues))
+ for i := range pkgIssues {
+ issue := &pkgIssues[i]
+ issues = append(issues, result.Issue{
+ FromLinter: issue.FromLinter,
+ Text: issue.Text,
+ Severity: issue.Severity,
+ Pos: issue.Pos,
+ LineRange: issue.LineRange,
+ Replacement: issue.Replacement,
+ Pkg: pkg,
+ ExpectNoLint: issue.ExpectNoLint,
+ ExpectedNoLintLinter: issue.ExpectedNoLintLinter,
+ })
+ }
+ cacheRes.issues = issues
+ }
+ }()
+ }
+
+ for _, pkg := range pkgs {
+ pkgCh <- pkg
+ }
+ close(pkgCh)
+ wg.Wait()
+
+ loadedIssuesCount := 0
+ pkgsFromCache = map[*packages.Package]bool{}
+ for pkg, cacheRes := range pkgToCacheRes {
+ if cacheRes.loadErr == nil {
+ loadedIssuesCount += len(cacheRes.issues)
+ pkgsFromCache[pkg] = true
+ issuesFromCache = append(issuesFromCache, cacheRes.issues...)
+ issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues))
+ } else {
+ issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr)
+ }
+ }
+ issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages",
+ loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs))
+ return issuesFromCache, pkgsFromCache
+}
+
+func getIssuesCacheKey(analyzers []*analysis.Analyzer) string {
+ return "lint/result:" + analyzersHashID(analyzers)
+}
+
+func analyzersHashID(analyzers []*analysis.Analyzer) string {
+ names := make([]string, 0, len(analyzers))
+ for _, a := range analyzers {
+ names = append(names, a.Name)
+ }
+
+ sort.Strings(names)
+ return strings.Join(names, ",")
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go
index eb8c0577a..13baba5a6 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/cyclop/cyclop.go
@@ -30,7 +30,7 @@ func New(settings *config.Cyclop) *goanalysis.Linter {
return goanalysis.NewLinter(
a.Name,
- "checks function and package cyclomatic complexity",
+ a.Doc,
[]*analysis.Analyzer{a},
cfg,
).WithLoadMode(goanalysis.LoadModeTypesInfo)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go
index 54d207257..9873c9ba4 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ginkgolinter/ginkgolinter.go
@@ -23,8 +23,9 @@ func New(settings *config.GinkgoLinterSettings) *goanalysis.Linter {
SuppressTypeCompare: types.Boolean(settings.SuppressTypeCompareWarning),
AllowHaveLen0: types.Boolean(settings.AllowHaveLenZero),
ForceExpectTo: types.Boolean(settings.ForceExpectTo),
- ValidateAsyncIntervals: types.Boolean(settings.ForbidSpecPollution),
- ForbidSpecPollution: types.Boolean(settings.ValidateAsyncIntervals),
+ ValidateAsyncIntervals: types.Boolean(settings.ValidateAsyncIntervals),
+ ForbidSpecPollution: types.Boolean(settings.ForbidSpecPollution),
+ ForceSucceedForFuncs: types.Boolean(settings.ForceSucceedForFuncs),
}
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go
index 446f0e564..7aab0efeb 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecksumtype/gochecksumtype.go
@@ -8,6 +8,7 @@ import (
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/packages"
+ "github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/goanalysis"
"github.com/golangci/golangci-lint/pkg/lint/linter"
"github.com/golangci/golangci-lint/pkg/result"
@@ -15,7 +16,7 @@ import (
const linterName = "gochecksumtype"
-func New() *goanalysis.Linter {
+func New(settings *config.GoChecksumTypeSettings) *goanalysis.Linter {
var mu sync.Mutex
var resIssues []goanalysis.Issue
@@ -23,7 +24,7 @@ func New() *goanalysis.Linter {
Name: linterName,
Doc: goanalysis.TheOnlyanalyzerDoc,
Run: func(pass *analysis.Pass) (any, error) {
- issues, err := runGoCheckSumType(pass)
+ issues, err := runGoCheckSumType(pass, settings)
if err != nil {
return nil, err
}
@@ -50,7 +51,7 @@ func New() *goanalysis.Linter {
}).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
-func runGoCheckSumType(pass *analysis.Pass) ([]goanalysis.Issue, error) {
+func runGoCheckSumType(pass *analysis.Pass, settings *config.GoChecksumTypeSettings) ([]goanalysis.Issue, error) {
var resIssues []goanalysis.Issue
pkg := &packages.Package{
@@ -61,7 +62,8 @@ func runGoCheckSumType(pass *analysis.Pass) ([]goanalysis.Issue, error) {
}
var unknownError error
- errors := gochecksumtype.Run([]*packages.Package{pkg})
+ errors := gochecksumtype.Run([]*packages.Package{pkg},
+ gochecksumtype.Config{DefaultSignifiesExhaustive: settings.DefaultSignifiesExhaustive})
for _, err := range errors {
err, ok := err.(gochecksumtype.Error)
if !ok {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go
index 68cc338e4..194ea3535 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic/gocritic.go
@@ -8,6 +8,7 @@ import (
"path/filepath"
"reflect"
"runtime"
+ "slices"
"sort"
"strings"
"sync"
@@ -16,7 +17,6 @@ import (
gocriticlinter "github.com/go-critic/go-critic/linter"
_ "github.com/quasilyte/go-ruleguard/dsl"
"golang.org/x/exp/maps"
- "golang.org/x/exp/slices"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/config"
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go
index 14d517fb3..c6b1aae6b 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goheader/goheader.go
@@ -51,7 +51,7 @@ func New(settings *config.GoHeaderSettings) *goanalysis.Linter {
return goanalysis.NewLinter(
linterName,
- "Checks is file header matches to pattern",
+ "Checks if file header matches to pattern",
[]*analysis.Analyzer{analyzer},
nil,
).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname/goprintffuncname.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname/goprintffuncname.go
index 85154a9b3..c206ffaa3 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname/goprintffuncname.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goprintffuncname/goprintffuncname.go
@@ -1,7 +1,7 @@
package goprintffuncname
import (
- "github.com/jirfag/go-printf-func-name/pkg/analyzer"
+ "github.com/golangci/go-printf-func-name/pkg/analyzer"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/goanalysis"
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/iface/iface.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/iface/iface.go
new file mode 100644
index 000000000..31f88160e
--- /dev/null
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/iface/iface.go
@@ -0,0 +1,57 @@
+package iface
+
+import (
+ "slices"
+
+ "github.com/uudashr/iface/identical"
+ "github.com/uudashr/iface/opaque"
+ "github.com/uudashr/iface/unused"
+ "golang.org/x/tools/go/analysis"
+
+ "github.com/golangci/golangci-lint/pkg/config"
+ "github.com/golangci/golangci-lint/pkg/goanalysis"
+)
+
+func New(settings *config.IfaceSettings) *goanalysis.Linter {
+ var conf map[string]map[string]any
+ if settings != nil {
+ conf = settings.Settings
+ }
+
+ return goanalysis.NewLinter(
+ "iface",
+ "Detect the incorrect use of interfaces, helping developers avoid interface pollution.",
+ analyzersFromSettings(settings),
+ conf,
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
+}
+
+func analyzersFromSettings(settings *config.IfaceSettings) []*analysis.Analyzer {
+ allAnalyzers := map[string]*analysis.Analyzer{
+ "identical": identical.Analyzer,
+ "unused": unused.Analyzer,
+ "opaque": opaque.Analyzer,
+ }
+
+ if settings == nil || len(settings.Enable) == 0 {
+ // Default enable `identical` analyzer only
+ return []*analysis.Analyzer{identical.Analyzer}
+ }
+
+ var analyzers []*analysis.Analyzer
+ for _, name := range uniqueNames(settings.Enable) {
+ if _, ok := allAnalyzers[name]; !ok {
+ // skip unknown analyzer
+ continue
+ }
+
+ analyzers = append(analyzers, allAnalyzers[name])
+ }
+
+ return analyzers
+}
+
+func uniqueNames(names []string) []string {
+ slices.Sort(names)
+ return slices.Compact(names)
+}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/staticcheck_common.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/staticcheck_common.go
index 958013d0d..e5a0e33b7 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/staticcheck_common.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/internal/staticcheck_common.go
@@ -9,11 +9,8 @@ import (
scconfig "honnef.co/go/tools/config"
"github.com/golangci/golangci-lint/pkg/config"
- "github.com/golangci/golangci-lint/pkg/logutils"
)
-var debugf = logutils.Debug(logutils.DebugKeyMegacheck)
-
func SetupStaticCheckAnalyzers(src []*lint.Analyzer, checks []string) []*analysis.Analyzer {
var names []string
for _, a := range src {
@@ -32,14 +29,6 @@ func SetupStaticCheckAnalyzers(src []*lint.Analyzer, checks []string) []*analysi
return ret
}
-func SetAnalyzerGoVersion(a *analysis.Analyzer, goVersion string) {
- if v := a.Flags.Lookup("go"); v != nil {
- if err := v.Value.Set(goVersion); err != nil {
- debugf("Failed to set go version: %s", err)
- }
- }
-}
-
func StaticCheckConfig(settings *config.StaticCheckSettings) *scconfig.Config {
var cfg *scconfig.Config
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mnd/mnd.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mnd/mnd.go
index 9aa8692ff..fe64653b9 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/mnd/mnd.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/mnd/mnd.go
@@ -12,27 +12,6 @@ func New(settings *config.MndSettings) *goanalysis.Linter {
return newMND(mnd.Analyzer, settings, nil)
}
-func NewGoMND(settings *config.GoMndSettings) *goanalysis.Linter {
- // shallow copy because mnd.Analyzer is a global variable.
- a := new(analysis.Analyzer)
- *a = *mnd.Analyzer
-
- // Used to force the analyzer name to use the same name as the linter.
- // This is required to avoid displaying the analyzer name inside the issue text.
- a.Name = "gomnd"
-
- var linterCfg map[string]map[string]any
-
- if settings != nil && len(settings.Settings) > 0 {
- // Convert deprecated setting.
- linterCfg = map[string]map[string]any{
- a.Name: settings.Settings["mnd"],
- }
- }
-
- return newMND(a, &settings.MndSettings, linterCfg)
-}
-
func newMND(a *analysis.Analyzer, settings *config.MndSettings, linterCfg map[string]map[string]any) *goanalysis.Linter {
if len(linterCfg) == 0 && settings != nil {
cfg := make(map[string]any)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret/nakedret.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret/nakedret.go
index beabf2cd8..e69fa5e9f 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret/nakedret.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret/nakedret.go
@@ -14,7 +14,7 @@ func New(settings *config.NakedretSettings) *goanalysis.Linter {
maxLines = settings.MaxFuncLines
}
- a := nakedret.NakedReturnAnalyzer(maxLines)
+ a := nakedret.NakedReturnAnalyzer(maxLines, false)
return goanalysis.NewLinter(
a.Name,
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go
index c9237035d..d8d677d99 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nilnil/nilnil.go
@@ -1,8 +1,6 @@
package nilnil
import (
- "strings"
-
"github.com/Antonboom/nilnil/pkg/analyzer"
"golang.org/x/tools/go/analysis"
@@ -10,13 +8,16 @@ import (
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
-func New(cfg *config.NilNilSettings) *goanalysis.Linter {
+func New(settings *config.NilNilSettings) *goanalysis.Linter {
a := analyzer.New()
cfgMap := make(map[string]map[string]any)
- if cfg != nil && len(cfg.CheckedTypes) != 0 {
+ if settings != nil {
cfgMap[a.Name] = map[string]any{
- "checked-types": strings.Join(cfg.CheckedTypes, ","),
+ "detect-opposite": settings.DetectOpposite,
+ }
+ if len(settings.CheckedTypes) != 0 {
+ cfgMap[a.Name]["checked-types"] = settings.CheckedTypes
}
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery/execinquery.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go
index 3832873c6..8b030f15d 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/execinquery/execinquery.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/recvcheck/recvcheck.go
@@ -1,14 +1,14 @@
-package execinquery
+package recvcheck
import (
- "github.com/lufeee/execinquery"
+ "github.com/raeperd/recvcheck"
"golang.org/x/tools/go/analysis"
"github.com/golangci/golangci-lint/pkg/goanalysis"
)
func New() *goanalysis.Linter {
- a := execinquery.Analyzer
+ a := recvcheck.Analyzer
return goanalysis.NewLinter(
a.Name,
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go
index 90ce15db6..056a258e0 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/revive/revive.go
@@ -184,8 +184,8 @@ func toIssue(pass *analysis.Pass, object *jsonObject) goanalysis.Issue {
// This function mimics the GetConfig function of revive.
// This allows to get default values and right types.
// https://github.com/golangci/golangci-lint/issues/1745
-// https://github.com/mgechev/revive/blob/v1.3.7/config/config.go#L217
-// https://github.com/mgechev/revive/blob/v1.3.7/config/config.go#L169-L174
+// https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L220
+// https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L172-L178
func getConfig(cfg *config.ReviveSettings) (*lint.Config, error) {
conf := defaultConfig()
@@ -284,7 +284,7 @@ func safeTomlSlice(r []any) []any {
}
// This element is not exported by revive, so we need copy the code.
-// Extracted from https://github.com/mgechev/revive/blob/v1.3.9/config/config.go#L15
+// Extracted from https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L16
var defaultRules = []lint.Rule{
&rule.VarDeclarationsRule{},
&rule.PackageCommentsRule{},
@@ -368,12 +368,14 @@ var allRules = append([]lint.Rule{
&rule.EnforceSliceStyleRule{},
&rule.MaxControlNestingRule{},
&rule.CommentsDensityRule{},
+ &rule.FileLengthLimitRule{},
+ &rule.FilenameFormatRule{},
}, defaultRules...)
const defaultConfidence = 0.8
// This element is not exported by revive, so we need copy the code.
-// Extracted from https://github.com/mgechev/revive/blob/v1.1.4/config/config.go#L145
+// Extracted from https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L183
func normalizeConfig(cfg *lint.Config) {
// NOTE(ldez): this custom section for golangci-lint should be kept.
// ---
@@ -419,7 +421,7 @@ func normalizeConfig(cfg *lint.Config) {
}
// This element is not exported by revive, so we need copy the code.
-// Extracted from https://github.com/mgechev/revive/blob/v1.1.4/config/config.go#L214
+// Extracted from https://github.com/mgechev/revive/blob/v1.5.0/config/config.go#L252
func defaultConfig() *lint.Config {
defaultConfig := lint.Config{
Confidence: defaultConfidence,
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv/tenv.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv/tenv.go
index b80a783b6..2fc247fab 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv/tenv.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/tenv/tenv.go
@@ -25,5 +25,5 @@ func New(settings *config.TenvSettings) *goanalysis.Linter {
a.Doc,
[]*analysis.Analyzer{a},
cfg,
- ).WithLoadMode(goanalysis.LoadModeSyntax)
+ ).WithLoadMode(goanalysis.LoadModeTypesInfo)
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go
index 160620338..d04a11b81 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/context.go
@@ -4,7 +4,7 @@ import (
"context"
"fmt"
- "github.com/golangci/golangci-lint/internal/pkgcache"
+ "github.com/golangci/golangci-lint/internal/cache"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/exitcodes"
"github.com/golangci/golangci-lint/pkg/fsutils"
@@ -19,13 +19,13 @@ type ContextBuilder struct {
pkgLoader *PackageLoader
fileCache *fsutils.FileCache
- pkgCache *pkgcache.Cache
+ pkgCache *cache.Cache
loadGuard *load.Guard
}
func NewContextBuilder(cfg *config.Config, pkgLoader *PackageLoader,
- fileCache *fsutils.FileCache, pkgCache *pkgcache.Cache, loadGuard *load.Guard,
+ fileCache *fsutils.FileCache, pkgCache *cache.Cache, loadGuard *load.Guard,
) *ContextBuilder {
return &ContextBuilder{
cfg: cfg,
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go
index 57c51fa75..6d6d4b17e 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/config.go
@@ -81,7 +81,7 @@ func (lc *Config) IsSlowLinter() bool {
}
func (lc *Config) WithLoadFiles() *Config {
- lc.LoadMode |= packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles
+ lc.LoadMode |= packages.NeedName | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedModule
return lc
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go
index 5c03630b2..9f29b5c4c 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/linter/context.go
@@ -5,7 +5,7 @@ import (
"golang.org/x/tools/go/packages"
- "github.com/golangci/golangci-lint/internal/pkgcache"
+ "github.com/golangci/golangci-lint/internal/cache"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/fsutils"
"github.com/golangci/golangci-lint/pkg/goanalysis/load"
@@ -24,7 +24,7 @@ type Context struct {
FileCache *fsutils.FileCache
Log logutils.Log
- PkgCache *pkgcache.Cache
+ PkgCache *cache.Cache
LoadGuard *load.Guard
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go
index c06cd9a03..d2a2dc3d0 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/builder_linter.go
@@ -23,7 +23,6 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/errchkjson"
"github.com/golangci/golangci-lint/pkg/golinters/errname"
"github.com/golangci/golangci-lint/pkg/golinters/errorlint"
- "github.com/golangci/golangci-lint/pkg/golinters/execinquery"
"github.com/golangci/golangci-lint/pkg/golinters/exhaustive"
"github.com/golangci/golangci-lint/pkg/golinters/exhaustruct"
"github.com/golangci/golangci-lint/pkg/golinters/exportloopref"
@@ -55,6 +54,7 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/gosmopolitan"
"github.com/golangci/golangci-lint/pkg/golinters/govet"
"github.com/golangci/golangci-lint/pkg/golinters/grouper"
+ "github.com/golangci/golangci-lint/pkg/golinters/iface"
"github.com/golangci/golangci-lint/pkg/golinters/importas"
"github.com/golangci/golangci-lint/pkg/golinters/inamedparam"
"github.com/golangci/golangci-lint/pkg/golinters/ineffassign"
@@ -85,6 +85,7 @@ import (
"github.com/golangci/golangci-lint/pkg/golinters/promlinter"
"github.com/golangci/golangci-lint/pkg/golinters/protogetter"
"github.com/golangci/golangci-lint/pkg/golinters/reassign"
+ "github.com/golangci/golangci-lint/pkg/golinters/recvcheck"
"github.com/golangci/golangci-lint/pkg/golinters/revive"
"github.com/golangci/golangci-lint/pkg/golinters/rowserrcheck"
"github.com/golangci/golangci-lint/pkg/golinters/sloglint"
@@ -134,7 +135,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
// When a new linter is added the version in `WithSince(...)` must be the next minor version of golangci-lint.
return []*linter.Config{
linter.NewConfig(asasalint.New(&cfg.LintersSettings.Asasalint)).
- WithSince("1.47.0").
+ WithSince("v1.47.0").
WithPresets(linter.PresetBugs).
WithLoadForGoAnalysis().
WithURL("https://github.com/alingse/asasalint"),
@@ -145,7 +146,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/tdakkota/asciicheck"),
linter.NewConfig(bidichk.New(&cfg.LintersSettings.BiDiChk)).
- WithSince("1.43.0").
+ WithSince("v1.43.0").
WithPresets(linter.PresetBugs).
WithURL("https://github.com/breml/bidichk"),
@@ -162,7 +163,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/lasiar/canonicalHeader"),
linter.NewConfig(containedctx.New()).
- WithSince("1.44.0").
+ WithSince("v1.44.0").
WithLoadForGoAnalysis().
WithPresets(linter.PresetStyle).
WithURL("https://github.com/sivchari/containedctx"),
@@ -213,7 +214,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/mibk/dupl"),
linter.NewConfig(dupword.New(&cfg.LintersSettings.DupWord)).
- WithSince("1.50.0").
+ WithSince("v1.50.0").
WithPresets(linter.PresetComment).
WithURL("https://github.com/Abirdcfly/dupword"),
@@ -231,7 +232,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/kisielk/errcheck"),
linter.NewConfig(errchkjson.New(&cfg.LintersSettings.ErrChkJSON)).
- WithSince("1.44.0").
+ WithSince("v1.44.0").
WithPresets(linter.PresetBugs).
WithLoadForGoAnalysis().
WithURL("https://github.com/breml/errchkjson"),
@@ -248,12 +249,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithLoadForGoAnalysis().
WithURL("https://github.com/polyfloyd/go-errorlint"),
- linter.NewConfig(execinquery.New()).
+ linter.NewConfig(linter.NewNoopDeprecated("execinquery", cfg, linter.DeprecationError)).
WithSince("v1.46.0").
WithPresets(linter.PresetSQL).
WithLoadForGoAnalysis().
WithURL("https://github.com/1uf3/execinquery").
- DeprecatedWarning("The repository of the linter has been archived by the owner.", "v1.58.0", ""),
+ DeprecatedError("The repository of the linter has been archived by the owner.", "v1.58.0", ""),
linter.NewConfig(exhaustive.New(&cfg.LintersSettings.Exhaustive)).
WithSince(" v1.28.0").
@@ -297,7 +298,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/gostaticanalysis/forcetypeassert"),
linter.NewConfig(fatcontext.New()).
- WithSince("1.58.0").
+ WithSince("v1.58.0").
WithPresets(linter.PresetPerformance).
WithLoadForGoAnalysis().
WithURL("https://github.com/Crocmagnon/fatcontext"),
@@ -334,7 +335,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.12.0").
WithPresets(linter.PresetStyle),
- linter.NewConfig(gochecksumtype.New()).
+ linter.NewConfig(gochecksumtype.New(&cfg.LintersSettings.GoChecksumType)).
WithSince("v1.55.0").
WithPresets(linter.PresetBugs).
WithLoadForGoAnalysis().
@@ -416,11 +417,11 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithPresets(linter.PresetStyle).
WithURL("https://github.com/tommy-muehle/go-mnd"),
- linter.NewConfig(mnd.NewGoMND(&cfg.LintersSettings.Gomnd)).
+ linter.NewConfig(linter.NewNoopDeprecated("gomnd", cfg, linter.DeprecationError)).
WithSince("v1.22.0").
WithPresets(linter.PresetStyle).
WithURL("https://github.com/tommy-muehle/go-mnd").
- DeprecatedWarning("The linter has been renamed.", "v1.58.0", "mnd"),
+ DeprecatedError("The linter has been renamed.", "v1.58.0", "mnd"),
linter.NewConfig(gomoddirectives.New(&cfg.LintersSettings.GoModDirectives)).
WithSince("v1.39.0").
@@ -435,7 +436,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
linter.NewConfig(goprintffuncname.New()).
WithSince("v1.23.0").
WithPresets(linter.PresetStyle).
- WithURL("https://github.com/jirfag/go-printf-func-name"),
+ WithURL("https://github.com/golangci/go-printf-func-name"),
linter.NewConfig(gosec.New(&cfg.LintersSettings.Gosec)).
WithSince("v1.0.0").
@@ -477,6 +478,12 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/esimonov/ifshort").
DeprecatedError("The repository of the linter has been deprecated by the owner.", "v1.48.0", ""),
+ linter.NewConfig(iface.New(&cfg.LintersSettings.Iface)).
+ WithSince("v1.62.0").
+ WithLoadForGoAnalysis().
+ WithPresets(linter.PresetStyle).
+ WithURL("https://github.com/uudashr/iface"),
+
linter.NewConfig(importas.New(&cfg.LintersSettings.ImportAs)).
WithSince("v1.38.0").
WithPresets(linter.PresetStyle).
@@ -652,11 +659,17 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithURL("https://github.com/ghostiam/protogetter"),
linter.NewConfig(reassign.New(&cfg.LintersSettings.Reassign)).
- WithSince("1.49.0").
+ WithSince("v1.49.0").
WithPresets(linter.PresetBugs).
WithLoadForGoAnalysis().
WithURL("https://github.com/curioswitch/go-reassign"),
+ linter.NewConfig(recvcheck.New()).
+ WithSince("v1.62.0").
+ WithPresets(linter.PresetBugs).
+ WithLoadForGoAnalysis().
+ WithURL("https://github.com/raeperd/recvcheck"),
+
linter.NewConfig(revive.New(&cfg.LintersSettings.Revive)).
WithSince("v1.37.0").
WithPresets(linter.PresetStyle, linter.PresetMetaLinter).
@@ -699,7 +712,7 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithLoadForGoAnalysis().
WithPresets(linter.PresetBugs, linter.PresetMetaLinter).
WithAlternativeNames(megacheckName).
- WithURL("https://staticcheck.io/"),
+ WithURL("https://staticcheck.dev/"),
linter.NewConfig(linter.NewNoopDeprecated("structcheck", cfg, linter.DeprecationError)).
WithSince("v1.0.0").
@@ -838,6 +851,6 @@ func (LinterBuilder) Build(cfg *config.Config) ([]*linter.Config, error) {
WithSince("v1.26.0").
WithPresets(linter.PresetStyle).
WithAutoFix().
- WithURL("https://github.com/golangci/golangci-lint/blob/master/pkg/golinters/nolintlint/README.md"),
+ WithURL("https://github.com/golangci/golangci-lint/tree/master/pkg/golinters/nolintlint/internal"),
}, nil
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
index c3b983ff6..2c47c7166 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go
@@ -115,17 +115,17 @@ func (r *Runner) Run(ctx context.Context, linters []*linter.Config) ([]result.Is
)
for _, lc := range linters {
- sw.TrackStage(lc.Name(), func() {
- linterIssues, err := r.runLinterSafe(ctx, r.lintCtx, lc)
- if err != nil {
- lintErrors = errors.Join(lintErrors, fmt.Errorf("can't run linter %s", lc.Linter.Name()), err)
- r.Log.Warnf("Can't run linter %s: %v", lc.Linter.Name(), err)
+ linterIssues, err := timeutils.TrackStage(sw, lc.Name(), func() ([]result.Issue, error) {
+ return r.runLinterSafe(ctx, r.lintCtx, lc)
+ })
+ if err != nil {
+ lintErrors = errors.Join(lintErrors, fmt.Errorf("can't run linter %s", lc.Linter.Name()), err)
+ r.Log.Warnf("Can't run linter %s: %v", lc.Linter.Name(), err)
- return
- }
+ continue
+ }
- issues = append(issues, linterIssues...)
- })
+ issues = append(issues, linterIssues...)
}
return r.processLintResults(issues), lintErrors
@@ -188,9 +188,7 @@ func (r *Runner) processLintResults(inIssues []result.Issue) []result.Issue {
// finalize processors: logging, clearing, no heavy work here
for _, p := range r.Processors {
- sw.TrackStage(p.Name(), func() {
- p.Finish()
- })
+ sw.TrackStage(p.Name(), p.Finish)
}
if issuesBefore != issuesAfter {
@@ -216,10 +214,8 @@ func (r *Runner) printPerProcessorStat(stat map[string]processorStat) {
func (r *Runner) processIssues(issues []result.Issue, sw *timeutils.Stopwatch, statPerProcessor map[string]processorStat) []result.Issue {
for _, p := range r.Processors {
- var newIssues []result.Issue
- var err error
- sw.TrackStage(p.Name(), func() {
- newIssues, err = p.Process(issues)
+ newIssues, err := timeutils.TrackStage(sw, p.Name(), func() ([]result.Issue, error) {
+ return p.Process(issues)
})
if err != nil {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go
index e4bb98109..3c27e2557 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/logutils.go
@@ -60,11 +60,10 @@ const (
)
const (
- DebugKeyGoCritic = "gocritic" // Debugs `go-critic` linter.
- DebugKeyGovet = "govet" // Debugs `govet` linter.
- DebugKeyMegacheck = "megacheck" // Debugs `staticcheck` related linters.
- DebugKeyNolint = "nolint" // Debugs a filter excluding issues by `//nolint` comments.
- DebugKeyRevive = "revive" // Debugs `revive` linter.
+ DebugKeyGoCritic = "gocritic" // Debugs `go-critic` linter.
+ DebugKeyGovet = "govet" // Debugs `govet` linter.
+ DebugKeyNolint = "nolint" // Debugs a filter excluding issues by `//nolint` comments.
+ DebugKeyRevive = "revive" // Debugs `revive` linter.
)
func getEnabledDebugs() map[string]bool {
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
index 50d6dcff3..b65339682 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go
@@ -12,9 +12,10 @@ const defaultCodeClimateSeverity = "critical"
// CodeClimateIssue is a subset of the Code Climate spec.
// https://github.com/codeclimate/platform/blob/master/spec/analyzers/SPEC.md#data-types
// It is just enough to support GitLab CI Code Quality.
-// https://docs.gitlab.com/ee/user/project/merge_requests/code_quality.html
+// https://docs.gitlab.com/ee/ci/testing/code_quality.html#implement-a-custom-tool
type CodeClimateIssue struct {
Description string `json:"description"`
+ CheckName string `json:"check_name"`
Severity string `json:"severity,omitempty"`
Fingerprint string `json:"fingerprint"`
Location struct {
@@ -35,10 +36,13 @@ func NewCodeClimate(w io.Writer) *CodeClimate {
func (p CodeClimate) Print(issues []result.Issue) error {
codeClimateIssues := make([]CodeClimateIssue, 0, len(issues))
+
for i := range issues {
issue := &issues[i]
+
codeClimateIssue := CodeClimateIssue{}
codeClimateIssue.Description = issue.Description()
+ codeClimateIssue.CheckName = issue.FromLinter
codeClimateIssue.Location.Path = issue.Pos.Filename
codeClimateIssue.Location.Lines.Begin = issue.Pos.Line
codeClimateIssue.Fingerprint = issue.Fingerprint()
@@ -51,9 +55,5 @@ func (p CodeClimate) Print(issues []result.Issue) error {
codeClimateIssues = append(codeClimateIssues, codeClimateIssue)
}
- err := json.NewEncoder(p.w).Encode(codeClimateIssues)
- if err != nil {
- return err
- }
- return nil
+ return json.NewEncoder(p.w).Encode(codeClimateIssues)
}
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go
index 4915dc479..764af5a92 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go
@@ -8,7 +8,7 @@ import (
"sort"
"strings"
- "github.com/golangci/golangci-lint/internal/robustio"
+ "github.com/golangci/golangci-lint/internal/go/robustio"
"github.com/golangci/golangci-lint/pkg/config"
"github.com/golangci/golangci-lint/pkg/fsutils"
"github.com/golangci/golangci-lint/pkg/logutils"
@@ -56,9 +56,8 @@ func (p Fixer) Process(issues []result.Issue) ([]result.Issue, error) {
}
for file, issuesToFix := range issuesToFixPerFile {
- var err error
- p.sw.TrackStage("all", func() {
- err = p.fixIssuesInFile(file, issuesToFix)
+ err := p.sw.TrackStageErr("all", func() error {
+ return p.fixIssuesInFile(file, issuesToFix)
})
if err != nil {
p.log.Errorf("Failed to fix issues in file %s: %s", file, err)
diff --git a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go
index d944dea2e..95b16de9f 100644
--- a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go
+++ b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go
@@ -114,3 +114,25 @@ func (s *Stopwatch) TrackStage(name string, f func()) {
s.stages[name] += time.Since(startedAt)
s.mu.Unlock()
}
+
+func (s *Stopwatch) TrackStageErr(name string, f func() error) error {
+ startedAt := time.Now()
+ err := f()
+
+ s.mu.Lock()
+ s.stages[name] += time.Since(startedAt)
+ s.mu.Unlock()
+
+ return err
+}
+
+func TrackStage[T any](s *Stopwatch, name string, f func() (T, error)) (T, error) {
+ var result T
+ var err error
+
+ s.TrackStage(name, func() {
+ result, err = f()
+ })
+
+ return result, err
+}