aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/quasilyte
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2020-09-15 18:05:35 +0200
committerDmitry Vyukov <dvyukov@google.com>2020-09-15 19:34:30 +0200
commit712de1c63d9db97c81af68cd0dc4372c53d2e57a (patch)
treeae1761fec52c3ae4ddd003a4130ddbda8d0a2d69 /vendor/github.com/quasilyte
parent298a69c38dd5c8a9bbd7a022e88f4ddbcf885e16 (diff)
vendor/github.com/golangci/golangci-lint: update to v1.31
Diffstat (limited to 'vendor/github.com/quasilyte')
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/LICENSE29
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go3
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go53
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes2
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE27
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md55
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go61
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go72
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go332
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go1108
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go452
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go261
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go63
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go9
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go40
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go36
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/merge.go24
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go159
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go669
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go45
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go194
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go340
-rw-r--r--vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go205
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/LICENSE21
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/README.md29
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/ast.go64
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/errors.go27
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/go.mod3
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/lexer.go454
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/operation.go195
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/operation_string.go59
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/parser.go503
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/pos.go10
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go59
-rw-r--r--vendor/github.com/quasilyte/regex/syntax/utils.go30
35 files changed, 5693 insertions, 0 deletions
diff --git a/vendor/github.com/quasilyte/go-ruleguard/LICENSE b/vendor/github.com/quasilyte/go-ruleguard/LICENSE
new file mode 100644
index 000000000..f0381fb49
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/LICENSE
@@ -0,0 +1,29 @@
+BSD 3-Clause License
+
+Copyright (c) 2019, Iskander (Alex) Sharipov / quasilyte
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go
new file mode 100644
index 000000000..3ba584d3f
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dsl_sources.go
@@ -0,0 +1,3 @@
+package dslgen
+
+var Fluent = []byte("package fluent\n\n// Matcher is a main API group-level entry point.\n// It's used to define and configure the group rules.\n// It also represents a map of all rule-local variables.\ntype Matcher map[string]Var\n\n// Import loads given package path into a rule group imports table.\n//\n// That table is used during the rules compilation.\n//\n// The table has the following effect on the rules:\n//\t* For type expressions, it's used to resolve the\n//\t full package paths of qualified types, like `foo.Bar`.\n//\t If Import(`a/b/foo`) is called, `foo.Bar` will match\n//\t `a/b/foo.Bar` type during the pattern execution.\nfunc (m Matcher) Import(pkgPath string) {}\n\n// Match specifies a set of patterns that match a rule being defined.\n// Pattern matching succeeds if at least 1 pattern matches.\n//\n// If none of the given patterns matched, rule execution stops.\nfunc (m Matcher) Match(pattern string, alternatives ...string) Matcher {\n\treturn m\n}\n\n// Where applies additional constraint to a match.\n// If a given cond is not satisfied, a match is rejected and\n// rule execution stops.\nfunc (m Matcher) Where(cond bool) Matcher {\n\treturn m\n}\n\n// Report prints a message if associated rule match is successful.\n//\n// A message is a string that can contain interpolated expressions.\n// For every matched variable it's possible to interpolate\n// their printed representation into the message text with $<name>.\n// An entire match can be addressed with $$.\nfunc (m Matcher) Report(message string) Matcher {\n\treturn m\n}\n\n// Suggest assigns a quickfix suggestion for the matched code.\nfunc (m Matcher) Suggest(suggestion string) Matcher {\n\treturn m\n}\n\n// At binds the reported node to a named submatch.\n// If no explicit location is given, the outermost node ($$) is used.\nfunc (m Matcher) At(v Var) Matcher {\n\treturn m\n}\n\n// Var is a pattern variable that describes a named submatch.\ntype Var struct {\n\t// Pure reports whether expr matched by var is side-effect-free.\n\tPure bool\n\n\t// Const reports whether expr matched by var is a constant value.\n\tConst bool\n\n\t// Addressable reports whether the corresponding expression is addressable.\n\t// See https://golang.org/ref/spec#Address_operators.\n\tAddressable bool\n\n\t// Type is a type of a matched expr.\n\tType ExprType\n\n\t// Test is a captured node text as in the source code.\n\tText MatchedText\n}\n\n// ExprType describes a type of a matcher expr.\ntype ExprType struct {\n\t// Size represents expression type size in bytes.\n\tSize int\n}\n\n// AssignableTo reports whether a type is assign-compatible with a given type.\n// See https://golang.org/pkg/go/types/#AssignableTo.\nfunc (ExprType) AssignableTo(typ string) bool { return boolResult }\n\n// ConvertibleTo reports whether a type is conversible to a given type.\n// See https://golang.org/pkg/go/types/#ConvertibleTo.\nfunc (ExprType) ConvertibleTo(typ string) bool { return boolResult }\n\n// Implements reports whether a type implements a given interface.\n// See https://golang.org/pkg/go/types/#Implements.\nfunc (ExprType) Implements(typ string) bool { return boolResult }\n\n// Is reports whether a type is identical to a given type.\nfunc (ExprType) Is(typ string) bool { return boolResult }\n\n// MatchedText represents a source text associated with a matched node.\ntype MatchedText string\n\n// Matches reports whether the text matches the given regexp pattern.\nfunc (MatchedText) Matches(pattern string) bool { return boolResult }\n\n\n\nvar boolResult bool\n\n")
diff --git a/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go
new file mode 100644
index 000000000..a2269b2ed
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/dslgen/dslgen.go
@@ -0,0 +1,53 @@
+// +build generate
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+func main() {
+ // See #23.
+
+ data, err := dirToBytes("../dsl/fluent")
+ if err != nil {
+ panic(err)
+ }
+
+ f, err := os.Create("./dsl_sources.go")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+
+ fmt.Fprintf(f, `package dslgen
+
+var Fluent = []byte(%q)
+`, string(data))
+}
+
+func dirToBytes(dir string) ([]byte, error) {
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ for i, f := range files {
+ data, err := ioutil.ReadFile(filepath.Join(dir, f.Name()))
+ if err != nil {
+ return nil, err
+ }
+ if i != 0 {
+ newline := bytes.IndexByte(data, '\n')
+ data = data[newline:]
+ }
+ buf.Write(data)
+ buf.WriteByte('\n')
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes
new file mode 100644
index 000000000..6f9522992
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/.gitattributes
@@ -0,0 +1,2 @@
+# To prevent CRLF breakages on Windows for fragile files, like testdata.
+* -text
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE
new file mode 100644
index 000000000..a06c5ebfc
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017, Daniel Martí. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of the copyright holder nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md
new file mode 100644
index 000000000..12cb0fdc4
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/README.md
@@ -0,0 +1,55 @@
+# gogrep
+
+ go get mvdan.cc/gogrep
+
+Search for Go code using syntax trees. Work in progress.
+
+ gogrep -x 'if $x != nil { return $x, $*_ }'
+
+### Instructions
+
+ usage: gogrep commands [packages]
+
+A command is of the form "-A pattern", where -A is one of:
+
+ -x find all nodes matching a pattern
+ -g discard nodes not matching a pattern
+ -v discard nodes matching a pattern
+ -a filter nodes by certain attributes
+ -s substitute with a given syntax tree
+ -w write source back to disk or stdout
+
+A pattern is a piece of Go code which may include wildcards. It can be:
+
+ a statement (many if split by semicolonss)
+ an expression (many if split by commas)
+ a type expression
+ a top-level declaration (var, func, const)
+ an entire file
+
+Wildcards consist of `$` and a name. All wildcards with the same name
+within an expression must match the same node, excluding "_". Example:
+
+ $x.$_ = $x // assignment of self to a field in self
+
+If `*` is before the name, it will match any number of nodes. Example:
+
+ fmt.Fprintf(os.Stdout, $*_) // all Fprintfs on stdout
+
+`*` can also be used to match optional nodes, like:
+
+ for $*_ { $*_ } // will match all for loops
+ if $*_; $b { $*_ } // will match all ifs with condition $b
+
+Regexes can also be used to match certain identifier names only. The
+`.*` pattern can be used to match all identifiers. Example:
+
+ fmt.$(_ /Fprint.*/)(os.Stdout, $*_) // all Fprint* on stdout
+
+The nodes resulting from applying the commands will be printed line by
+line to standard output.
+
+Here are two simple examples of the -a operand:
+
+ gogrep -x '$x + $y' // will match both numerical and string "+" operations
+ gogrep -x '$x + $y' -a 'type(string)' // matches only string concatenations
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go
new file mode 100644
index 000000000..f366af84f
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/kludge.go
@@ -0,0 +1,61 @@
+package gogrep
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// This is an ugly way to use gogrep as a library.
+// It can go away when there will be another option.
+
+// Parse creates a gogrep pattern out of a given string expression.
+func Parse(fset *token.FileSet, expr string) (*Pattern, error) {
+ m := matcher{
+ fset: fset,
+ Info: &types.Info{},
+ }
+ node, err := m.parseExpr(expr)
+ if err != nil {
+ return nil, err
+ }
+ return &Pattern{m: &m, Expr: node}, nil
+}
+
+// Pattern is a compiled gogrep pattern.
+type Pattern struct {
+ Expr ast.Node
+ m *matcher
+}
+
+// MatchData describes a successful pattern match.
+type MatchData struct {
+ Node ast.Node
+ Values map[string]ast.Node
+}
+
+// MatchNode calls cb if n matches a pattern.
+func (p *Pattern) MatchNode(n ast.Node, cb func(MatchData)) {
+ p.m.values = map[string]ast.Node{}
+ if p.m.node(p.Expr, n) {
+ cb(MatchData{
+ Values: p.m.values,
+ Node: n,
+ })
+ }
+}
+
+// Match calls cb for any pattern match found in n.
+func (p *Pattern) Match(n ast.Node, cb func(MatchData)) {
+ cmd := exprCmd{name: "x", value: p.Expr}
+ matches := p.m.cmdRange(cmd, []submatch{{
+ values: map[string]ast.Node{},
+ node: n,
+ }})
+ for _, match := range matches {
+ cb(MatchData{
+ Values: match.values,
+ Node: match.node,
+ })
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go
new file mode 100644
index 000000000..09ab3fd01
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/load.go
@@ -0,0 +1,72 @@
+// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+)
+
+func (m *matcher) load(wd string, args ...string) ([]*packages.Package, error) {
+ mode := packages.NeedName | packages.NeedImports | packages.NeedSyntax |
+ packages.NeedTypes | packages.NeedTypesInfo
+ if m.recursive { // need the syntax trees for the dependencies too
+ mode |= packages.NeedDeps
+ }
+ cfg := &packages.Config{
+ Mode: mode,
+ Dir: wd,
+ Fset: m.fset,
+ Tests: m.tests,
+ }
+ pkgs, err := packages.Load(cfg, args...)
+ if err != nil {
+ return nil, err
+ }
+ jointErr := ""
+ packages.Visit(pkgs, nil, func(pkg *packages.Package) {
+ for _, err := range pkg.Errors {
+ jointErr += err.Error() + "\n"
+ }
+ })
+ if jointErr != "" {
+ return nil, fmt.Errorf("%s", jointErr)
+ }
+
+ // Make a sorted list of the packages, including transitive dependencies
+ // if recurse is true.
+ byPath := make(map[string]*packages.Package)
+ var addDeps func(*packages.Package)
+ addDeps = func(pkg *packages.Package) {
+ if strings.HasSuffix(pkg.PkgPath, ".test") {
+ // don't add recursive test deps
+ return
+ }
+ for _, imp := range pkg.Imports {
+ if _, ok := byPath[imp.PkgPath]; ok {
+ continue // seen; avoid recursive call
+ }
+ byPath[imp.PkgPath] = imp
+ addDeps(imp)
+ }
+ }
+ for _, pkg := range pkgs {
+ byPath[pkg.PkgPath] = pkg
+ if m.recursive {
+ // add all dependencies once
+ addDeps(pkg)
+ }
+ }
+ pkgs = pkgs[:0]
+ for _, pkg := range byPath {
+ pkgs = append(pkgs, pkg)
+ }
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].PkgPath < pkgs[j].PkgPath
+ })
+ return pkgs, nil
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go
new file mode 100644
index 000000000..004cb32e9
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/main.go
@@ -0,0 +1,332 @@
+// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/build"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "io"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var usage = func() {
+ fmt.Fprint(os.Stderr, `usage: gogrep commands [packages]
+
+gogrep performs a query on the given Go packages.
+
+ -r search dependencies recursively too
+ -tests search test files too (and direct test deps, with -r)
+
+A command is one of the following:
+
+ -x pattern find all nodes matching a pattern
+ -g pattern discard nodes not matching a pattern
+ -v pattern discard nodes matching a pattern
+ -a attribute discard nodes without an attribute
+ -s pattern substitute with a given syntax tree
+ -p number navigate up a number of node parents
+ -w write the entire source code back
+
+A pattern is a piece of Go code which may include dollar expressions. It can be
+a number of statements, a number of expressions, a declaration, or an entire
+file.
+
+A dollar expression consist of '$' and a name. Dollar expressions with the same
+name within a query always match the same node, excluding "_". Example:
+
+ -x '$x.$_ = $x' # assignment of self to a field in self
+
+If '*' is before the name, it will match any number of nodes. Example:
+
+ -x 'fmt.Fprintf(os.Stdout, $*_)' # all Fprintfs on stdout
+
+By default, the resulting nodes will be printed one per line to standard output.
+To update the input files, use -w.
+`)
+}
+
+func main() {
+ m := matcher{
+ out: os.Stdout,
+ ctx: &build.Default,
+ }
+ err := m.fromArgs(".", os.Args[1:])
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+}
+
+type matcher struct {
+ out io.Writer
+ ctx *build.Context
+
+ fset *token.FileSet
+
+ parents map[ast.Node]ast.Node
+
+ recursive, tests bool
+ aggressive bool
+
+ // information about variables (wildcards), by id (which is an
+ // integer starting at 0)
+ vars []varInfo
+
+ // node values recorded by name, excluding "_" (used only by the
+ // actual matching phase)
+ values map[string]ast.Node
+ scope *types.Scope
+
+ *types.Info
+ stdImporter types.Importer
+}
+
+type varInfo struct {
+ name string
+ any bool
+}
+
+func (m *matcher) info(id int) varInfo {
+ if id < 0 {
+ return varInfo{}
+ }
+ return m.vars[id]
+}
+
+type exprCmd struct {
+ name string
+ src string
+ value interface{}
+}
+
+type strCmdFlag struct {
+ name string
+ cmds *[]exprCmd
+}
+
+func (o *strCmdFlag) String() string { return "" }
+func (o *strCmdFlag) Set(val string) error {
+ *o.cmds = append(*o.cmds, exprCmd{name: o.name, src: val})
+ return nil
+}
+
+type boolCmdFlag struct {
+ name string
+ cmds *[]exprCmd
+}
+
+func (o *boolCmdFlag) String() string { return "" }
+func (o *boolCmdFlag) Set(val string) error {
+ if val != "true" {
+ return fmt.Errorf("flag can only be true")
+ }
+ *o.cmds = append(*o.cmds, exprCmd{name: o.name})
+ return nil
+}
+func (o *boolCmdFlag) IsBoolFlag() bool { return true }
+
+func (m *matcher) fromArgs(wd string, args []string) error {
+ m.fset = token.NewFileSet()
+ cmds, args, err := m.parseCmds(args)
+ if err != nil {
+ return err
+ }
+ pkgs, err := m.load(wd, args...)
+ if err != nil {
+ return err
+ }
+ var all []ast.Node
+ for _, pkg := range pkgs {
+ m.Info = pkg.TypesInfo
+ nodes := make([]ast.Node, len(pkg.Syntax))
+ for i, f := range pkg.Syntax {
+ nodes[i] = f
+ }
+ all = append(all, m.matches(cmds, nodes)...)
+ }
+ for _, n := range all {
+ fpos := m.fset.Position(n.Pos())
+ if strings.HasPrefix(fpos.Filename, wd) {
+ fpos.Filename = fpos.Filename[len(wd)+1:]
+ }
+ fmt.Fprintf(m.out, "%v: %s\n", fpos, singleLinePrint(n))
+ }
+ return nil
+}
+
+func (m *matcher) parseCmds(args []string) ([]exprCmd, []string, error) {
+ flagSet := flag.NewFlagSet("gogrep", flag.ExitOnError)
+ flagSet.Usage = usage
+ flagSet.BoolVar(&m.recursive, "r", false, "search dependencies recursively too")
+ flagSet.BoolVar(&m.tests, "tests", false, "search test files too (and direct test deps, with -r)")
+
+ var cmds []exprCmd
+ flagSet.Var(&strCmdFlag{
+ name: "x",
+ cmds: &cmds,
+ }, "x", "")
+ flagSet.Var(&strCmdFlag{
+ name: "g",
+ cmds: &cmds,
+ }, "g", "")
+ flagSet.Var(&strCmdFlag{
+ name: "v",
+ cmds: &cmds,
+ }, "v", "")
+ flagSet.Var(&strCmdFlag{
+ name: "a",
+ cmds: &cmds,
+ }, "a", "")
+ flagSet.Var(&strCmdFlag{
+ name: "s",
+ cmds: &cmds,
+ }, "s", "")
+ flagSet.Var(&strCmdFlag{
+ name: "p",
+ cmds: &cmds,
+ }, "p", "")
+ flagSet.Var(&boolCmdFlag{
+ name: "w",
+ cmds: &cmds,
+ }, "w", "")
+ flagSet.Parse(args)
+ paths := flagSet.Args()
+
+ if len(cmds) < 1 {
+ return nil, nil, fmt.Errorf("need at least one command")
+ }
+ for i, cmd := range cmds {
+ switch cmd.name {
+ case "w":
+ continue // no expr
+ case "p":
+ n, err := strconv.Atoi(cmd.src)
+ if err != nil {
+ return nil, nil, err
+ }
+ cmds[i].value = n
+ case "a":
+ m, err := m.parseAttrs(cmd.src)
+ if err != nil {
+ return nil, nil, fmt.Errorf("cannot parse mods: %v", err)
+ }
+ cmds[i].value = m
+ default:
+ node, err := m.parseExpr(cmd.src)
+ if err != nil {
+ return nil, nil, err
+ }
+ cmds[i].value = node
+ }
+ }
+ return cmds, paths, nil
+}
+
+type bufferJoinLines struct {
+ bytes.Buffer
+ last string
+}
+
+var rxNeedSemicolon = regexp.MustCompile(`([])}a-zA-Z0-9"'` + "`" + `]|\+\+|--)$`)
+
+func (b *bufferJoinLines) Write(p []byte) (n int, err error) {
+ if string(p) == "\n" {
+ if b.last == "\n" {
+ return 1, nil
+ }
+ if rxNeedSemicolon.MatchString(b.last) {
+ b.Buffer.WriteByte(';')
+ }
+ b.Buffer.WriteByte(' ')
+ b.last = "\n"
+ return 1, nil
+ }
+ p = bytes.Trim(p, "\t")
+ n, err = b.Buffer.Write(p)
+ b.last = string(p)
+ return
+}
+
+func (b *bufferJoinLines) String() string {
+ return strings.TrimSuffix(b.Buffer.String(), "; ")
+}
+
+// inspect is like ast.Inspect, but it supports our extra nodeList Node
+// type (only at the top level).
+func inspect(node ast.Node, fn func(ast.Node) bool) {
+ // ast.Walk barfs on ast.Node types it doesn't know, so
+ // do the first level manually here
+ list, ok := node.(nodeList)
+ if !ok {
+ ast.Inspect(node, fn)
+ return
+ }
+ if !fn(list) {
+ return
+ }
+ for i := 0; i < list.len(); i++ {
+ ast.Inspect(list.at(i), fn)
+ }
+ fn(nil)
+}
+
+var emptyFset = token.NewFileSet()
+
+func singleLinePrint(node ast.Node) string {
+ var buf bufferJoinLines
+ inspect(node, func(node ast.Node) bool {
+ bl, ok := node.(*ast.BasicLit)
+ if !ok || bl.Kind != token.STRING {
+ return true
+ }
+ if !strings.HasPrefix(bl.Value, "`") {
+ return true
+ }
+ if !strings.Contains(bl.Value, "\n") {
+ return true
+ }
+ bl.Value = strconv.Quote(bl.Value[1 : len(bl.Value)-1])
+ return true
+ })
+ printNode(&buf, emptyFset, node)
+ return buf.String()
+}
+
+func printNode(w io.Writer, fset *token.FileSet, node ast.Node) {
+ switch x := node.(type) {
+ case exprList:
+ if len(x) == 0 {
+ return
+ }
+ printNode(w, fset, x[0])
+ for _, n := range x[1:] {
+ fmt.Fprintf(w, ", ")
+ printNode(w, fset, n)
+ }
+ case stmtList:
+ if len(x) == 0 {
+ return
+ }
+ printNode(w, fset, x[0])
+ for _, n := range x[1:] {
+ fmt.Fprintf(w, "; ")
+ printNode(w, fset, n)
+ }
+ default:
+ err := printer.Fprint(w, fset, node)
+ if err != nil && strings.Contains(err.Error(), "go/printer: unsupported node type") {
+ // Should never happen, but make it obvious when it does.
+ panic(fmt.Errorf("cannot print node %T: %v", node, err))
+ }
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go
new file mode 100644
index 000000000..08b53d87d
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/match.go
@@ -0,0 +1,1108 @@
+// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "go/ast"
+ "go/importer"
+ "go/token"
+ "go/types"
+ "regexp"
+ "strconv"
+)
+
+func (m *matcher) matches(cmds []exprCmd, nodes []ast.Node) []ast.Node {
+ m.parents = make(map[ast.Node]ast.Node)
+ m.fillParents(nodes...)
+ initial := make([]submatch, len(nodes))
+ for i, node := range nodes {
+ initial[i].node = node
+ initial[i].values = make(map[string]ast.Node)
+ }
+ final := m.submatches(cmds, initial)
+ finalNodes := make([]ast.Node, len(final))
+ for i := range finalNodes {
+ finalNodes[i] = final[i].node
+ }
+ return finalNodes
+}
+
+func (m *matcher) fillParents(nodes ...ast.Node) {
+ stack := make([]ast.Node, 1, 32)
+ for _, node := range nodes {
+ inspect(node, func(node ast.Node) bool {
+ if node == nil {
+ stack = stack[:len(stack)-1]
+ return true
+ }
+ if _, ok := node.(nodeList); !ok {
+ m.parents[node] = stack[len(stack)-1]
+ }
+ stack = append(stack, node)
+ return true
+ })
+ }
+}
+
+type submatch struct {
+ node ast.Node
+ values map[string]ast.Node
+}
+
+func valsCopy(values map[string]ast.Node) map[string]ast.Node {
+ v2 := make(map[string]ast.Node, len(values))
+ for k, v := range values {
+ v2[k] = v
+ }
+ return v2
+}
+
+func (m *matcher) submatches(cmds []exprCmd, subs []submatch) []submatch {
+ if len(cmds) == 0 {
+ return subs
+ }
+ cmd := cmds[0]
+ var fn func(exprCmd, []submatch) []submatch
+ switch cmd.name {
+ case "x":
+ fn = m.cmdRange
+ case "g":
+ fn = m.cmdFilter(true)
+ case "v":
+ fn = m.cmdFilter(false)
+ case "s":
+ fn = m.cmdSubst
+ case "a":
+ fn = m.cmdAttr
+ case "p":
+ fn = m.cmdParents
+ case "w":
+ if len(cmds) > 1 {
+ panic("-w must be the last command")
+ }
+ fn = m.cmdWrite
+ default:
+ panic(fmt.Sprintf("unknown command: %q", cmd.name))
+ }
+ return m.submatches(cmds[1:], fn(cmd, subs))
+}
+
+func (m *matcher) cmdRange(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ seen := map[nodePosHash]bool{}
+
+ // The values context for each new submatch must be a new copy
+ // from its parent submatch. If we don't do this copy, all the
+ // submatches would share the same map and have side effects.
+ var startValues map[string]ast.Node
+
+ match := func(exprNode, node ast.Node) {
+ if node == nil {
+ return
+ }
+ m.values = valsCopy(startValues)
+ found := m.topNode(exprNode, node)
+ if found == nil {
+ return
+ }
+ hash := posHash(found)
+ if !seen[hash] {
+ matches = append(matches, submatch{
+ node: found,
+ values: m.values,
+ })
+ seen[hash] = true
+ }
+ }
+ for _, sub := range subs {
+ startValues = valsCopy(sub.values)
+ m.walkWithLists(cmd.value.(ast.Node), sub.node, match)
+ }
+ return matches
+}
+
+func (m *matcher) cmdFilter(wantAny bool) func(exprCmd, []submatch) []submatch {
+ return func(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ any := false
+ match := func(exprNode, node ast.Node) {
+ if node == nil {
+ return
+ }
+ found := m.topNode(exprNode, node)
+ if found != nil {
+ any = true
+ }
+ }
+ for _, sub := range subs {
+ any = false
+ m.values = sub.values
+ m.walkWithLists(cmd.value.(ast.Node), sub.node, match)
+ if any == wantAny {
+ matches = append(matches, sub)
+ }
+ }
+ return matches
+ }
+}
+
+func (m *matcher) cmdAttr(cmd exprCmd, subs []submatch) []submatch {
+ var matches []submatch
+ for _, sub := range subs {
+ m.values = sub.values
+ if m.attrApplies(sub.node, cmd.value.(attribute)) {
+ matches = append(matches, sub)
+ }
+ }
+ return matches
+}
+
+func (m *matcher) cmdParents(cmd exprCmd, subs []submatch) []submatch {
+ for i := range subs {
+ sub := &subs[i]
+ reps := cmd.value.(int)
+ for j := 0; j < reps; j++ {
+ sub.node = m.parentOf(sub.node)
+ }
+ }
+ return subs
+}
+
+func (m *matcher) attrApplies(node ast.Node, attr interface{}) bool {
+ if rx, ok := attr.(*regexp.Regexp); ok {
+ if exprStmt, ok := node.(*ast.ExprStmt); ok {
+ // since we prefer matching entire statements, get the
+ // ident from the ExprStmt
+ node = exprStmt.X
+ }
+ ident, ok := node.(*ast.Ident)
+ return ok && rx.MatchString(ident.Name)
+ }
+ expr, _ := node.(ast.Expr)
+ if expr == nil {
+ return false // only exprs have types
+ }
+ t := m.Info.TypeOf(expr)
+ if t == nil {
+ return false // an expr, but no type?
+ }
+ tv := m.Info.Types[expr]
+ switch x := attr.(type) {
+ case typeCheck:
+ want := m.resolveType(m.scope, x.expr)
+ switch {
+ case x.op == "type" && !types.Identical(t, want):
+ return false
+ case x.op == "asgn" && !types.AssignableTo(t, want):
+ return false
+ case x.op == "conv" && !types.ConvertibleTo(t, want):
+ return false
+ }
+ case typProperty:
+ switch {
+ case x == "comp" && !types.Comparable(t):
+ return false
+ case x == "addr" && !tv.Addressable():
+ return false
+ }
+ case typUnderlying:
+ u := t.Underlying()
+ uok := true
+ switch x {
+ case "basic":
+ _, uok = u.(*types.Basic)
+ case "array":
+ _, uok = u.(*types.Array)
+ case "slice":
+ _, uok = u.(*types.Slice)
+ case "struct":
+ _, uok = u.(*types.Struct)
+ case "interface":
+ _, uok = u.(*types.Interface)
+ case "pointer":
+ _, uok = u.(*types.Pointer)
+ case "func":
+ _, uok = u.(*types.Signature)
+ case "map":
+ _, uok = u.(*types.Map)
+ case "chan":
+ _, uok = u.(*types.Chan)
+ }
+ if !uok {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *matcher) walkWithLists(exprNode, node ast.Node, fn func(exprNode, node ast.Node)) {
+ visit := func(node ast.Node) bool {
+ fn(exprNode, node)
+ for _, list := range nodeLists(node) {
+ fn(exprNode, list)
+ if id := m.wildAnyIdent(exprNode); id != nil {
+ // so that "$*a" will match "a, b"
+ fn(exprList([]ast.Expr{id}), list)
+ // so that "$*a" will match "a; b"
+ fn(toStmtList(id), list)
+ }
+ }
+ return true
+ }
+ inspect(node, visit)
+}
+
+func (m *matcher) topNode(exprNode, node ast.Node) ast.Node {
+ sts1, ok1 := exprNode.(stmtList)
+ sts2, ok2 := node.(stmtList)
+ if ok1 && ok2 {
+ // allow a partial match at the top level
+ return m.nodes(sts1, sts2, true)
+ }
+ if m.node(exprNode, node) {
+ return node
+ }
+ return nil
+}
+
+// optNode is like node, but for those nodes that can be nil and are not
+// part of a list. For example, init and post statements in a for loop.
+func (m *matcher) optNode(expr, node ast.Node) bool {
+ if ident := m.wildAnyIdent(expr); ident != nil {
+ if m.node(toStmtList(ident), toStmtList(node)) {
+ return true
+ }
+ }
+ return m.node(expr, node)
+}
+
+func (m *matcher) node(expr, node ast.Node) bool {
+ switch node.(type) {
+ case *ast.File, *ast.FuncType, *ast.BlockStmt, *ast.IfStmt,
+ *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.CaseClause,
+ *ast.CommClause, *ast.ForStmt, *ast.RangeStmt:
+ if scope := m.Info.Scopes[node]; scope != nil {
+ m.scope = scope
+ }
+ }
+ if !m.aggressive {
+ if expr == nil || node == nil {
+ return expr == node
+ }
+ } else {
+ if expr == nil && node == nil {
+ return true
+ }
+ if node == nil {
+ expr, node = node, expr
+ }
+ }
+ switch x := expr.(type) {
+ case nil: // only in aggressive mode
+ y, ok := node.(*ast.Ident)
+ return ok && y.Name == "_"
+
+ case *ast.File:
+ y, ok := node.(*ast.File)
+ if !ok || !m.node(x.Name, y.Name) || len(x.Decls) != len(y.Decls) ||
+ len(x.Imports) != len(y.Imports) {
+ return false
+ }
+ for i, decl := range x.Decls {
+ if !m.node(decl, y.Decls[i]) {
+ return false
+ }
+ }
+ for i, imp := range x.Imports {
+ if !m.node(imp, y.Imports[i]) {
+ return false
+ }
+ }
+ return true
+
+ case *ast.Ident:
+ y, yok := node.(*ast.Ident)
+ if !isWildName(x.Name) {
+ // not a wildcard
+ return yok && x.Name == y.Name
+ }
+ if _, ok := node.(ast.Node); !ok {
+ return false // to not include our extra node types
+ }
+ id := fromWildName(x.Name)
+ info := m.info(id)
+ if info.any {
+ return false
+ }
+ if info.name == "_" {
+ // values are discarded, matches anything
+ return true
+ }
+ prev, ok := m.values[info.name]
+ if !ok {
+ // first occurrence, record value
+ m.values[info.name] = node
+ return true
+ }
+ // multiple uses must match
+ return m.node(prev, node)
+
+ // lists (ys are generated by us while walking)
+ case exprList:
+ y, ok := node.(exprList)
+ return ok && m.exprs(x, y)
+ case stmtList:
+ y, ok := node.(stmtList)
+ return ok && m.stmts(x, y)
+
+ // lits
+ case *ast.BasicLit:
+ y, ok := node.(*ast.BasicLit)
+ return ok && x.Kind == y.Kind && x.Value == y.Value
+ case *ast.CompositeLit:
+ y, ok := node.(*ast.CompositeLit)
+ return ok && m.node(x.Type, y.Type) && m.exprs(x.Elts, y.Elts)
+ case *ast.FuncLit:
+ y, ok := node.(*ast.FuncLit)
+ return ok && m.node(x.Type, y.Type) && m.node(x.Body, y.Body)
+
+ // types
+ case *ast.ArrayType:
+ y, ok := node.(*ast.ArrayType)
+ return ok && m.node(x.Len, y.Len) && m.node(x.Elt, y.Elt)
+ case *ast.MapType:
+ y, ok := node.(*ast.MapType)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value)
+ case *ast.StructType:
+ y, ok := node.(*ast.StructType)
+ return ok && m.fields(x.Fields, y.Fields)
+ case *ast.Field:
+ // TODO: tags?
+ y, ok := node.(*ast.Field)
+ if !ok {
+ return false
+ }
+ if len(x.Names) == 0 && x.Tag == nil && m.node(x.Type, y) {
+ // Allow $var to match a field.
+ return true
+ }
+ return m.idents(x.Names, y.Names) && m.node(x.Type, y.Type)
+ case *ast.FuncType:
+ y, ok := node.(*ast.FuncType)
+ return ok && m.fields(x.Params, y.Params) &&
+ m.fields(x.Results, y.Results)
+ case *ast.InterfaceType:
+ y, ok := node.(*ast.InterfaceType)
+ return ok && m.fields(x.Methods, y.Methods)
+ case *ast.ChanType:
+ y, ok := node.(*ast.ChanType)
+ return ok && x.Dir == y.Dir && m.node(x.Value, y.Value)
+
+ // other exprs
+ case *ast.Ellipsis:
+ y, ok := node.(*ast.Ellipsis)
+ return ok && m.node(x.Elt, y.Elt)
+ case *ast.ParenExpr:
+ y, ok := node.(*ast.ParenExpr)
+ return ok && m.node(x.X, y.X)
+ case *ast.UnaryExpr:
+ y, ok := node.(*ast.UnaryExpr)
+ return ok && x.Op == y.Op && m.node(x.X, y.X)
+ case *ast.BinaryExpr:
+ y, ok := node.(*ast.BinaryExpr)
+ return ok && x.Op == y.Op && m.node(x.X, y.X) && m.node(x.Y, y.Y)
+ case *ast.CallExpr:
+ y, ok := node.(*ast.CallExpr)
+ return ok && m.node(x.Fun, y.Fun) && m.exprs(x.Args, y.Args) &&
+ bothValid(x.Ellipsis, y.Ellipsis)
+ case *ast.KeyValueExpr:
+ y, ok := node.(*ast.KeyValueExpr)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value)
+ case *ast.StarExpr:
+ y, ok := node.(*ast.StarExpr)
+ return ok && m.node(x.X, y.X)
+ case *ast.SelectorExpr:
+ y, ok := node.(*ast.SelectorExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Sel, y.Sel)
+ case *ast.IndexExpr:
+ y, ok := node.(*ast.IndexExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Index, y.Index)
+ case *ast.SliceExpr:
+ y, ok := node.(*ast.SliceExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Low, y.Low) &&
+ m.node(x.High, y.High) && m.node(x.Max, y.Max)
+ case *ast.TypeAssertExpr:
+ y, ok := node.(*ast.TypeAssertExpr)
+ return ok && m.node(x.X, y.X) && m.node(x.Type, y.Type)
+
+ // decls
+ case *ast.GenDecl:
+ y, ok := node.(*ast.GenDecl)
+ return ok && x.Tok == y.Tok && m.specs(x.Specs, y.Specs)
+ case *ast.FuncDecl:
+ y, ok := node.(*ast.FuncDecl)
+ return ok && m.fields(x.Recv, y.Recv) && m.node(x.Name, y.Name) &&
+ m.node(x.Type, y.Type) && m.node(x.Body, y.Body)
+
+ // specs
+ case *ast.ValueSpec:
+ y, ok := node.(*ast.ValueSpec)
+ if !ok || !m.node(x.Type, y.Type) {
+ return false
+ }
+ if m.aggressive && len(x.Names) == 1 {
+ for i := range y.Names {
+ if m.node(x.Names[i], y.Names[i]) &&
+ (x.Values == nil || m.node(x.Values[i], y.Values[i])) {
+ return true
+ }
+ }
+ }
+ return m.idents(x.Names, y.Names) && m.exprs(x.Values, y.Values)
+
+ // stmt bridge nodes
+ case *ast.ExprStmt:
+ if id, ok := x.X.(*ast.Ident); ok && isWildName(id.Name) {
+ // prefer matching $x as a statement, as it's
+ // the parent
+ return m.node(id, node)
+ }
+ y, ok := node.(*ast.ExprStmt)
+ return ok && m.node(x.X, y.X)
+ case *ast.DeclStmt:
+ y, ok := node.(*ast.DeclStmt)
+ return ok && m.node(x.Decl, y.Decl)
+
+ // stmts
+ case *ast.EmptyStmt:
+ _, ok := node.(*ast.EmptyStmt)
+ return ok
+ case *ast.LabeledStmt:
+ y, ok := node.(*ast.LabeledStmt)
+ return ok && m.node(x.Label, y.Label) && m.node(x.Stmt, y.Stmt)
+ case *ast.SendStmt:
+ y, ok := node.(*ast.SendStmt)
+ return ok && m.node(x.Chan, y.Chan) && m.node(x.Value, y.Value)
+ case *ast.IncDecStmt:
+ y, ok := node.(*ast.IncDecStmt)
+ return ok && x.Tok == y.Tok && m.node(x.X, y.X)
+ case *ast.AssignStmt:
+ y, ok := node.(*ast.AssignStmt)
+ if !m.aggressive {
+ return ok && x.Tok == y.Tok &&
+ m.exprs(x.Lhs, y.Lhs) && m.exprs(x.Rhs, y.Rhs)
+ }
+ if ok {
+ return m.exprs(x.Lhs, y.Lhs) && m.exprs(x.Rhs, y.Rhs)
+ }
+ vs, ok := node.(*ast.ValueSpec)
+ return ok && m.nodesMatch(exprList(x.Lhs), identList(vs.Names)) &&
+ m.exprs(x.Rhs, vs.Values)
+ case *ast.GoStmt:
+ y, ok := node.(*ast.GoStmt)
+ return ok && m.node(x.Call, y.Call)
+ case *ast.DeferStmt:
+ y, ok := node.(*ast.DeferStmt)
+ return ok && m.node(x.Call, y.Call)
+ case *ast.ReturnStmt:
+ y, ok := node.(*ast.ReturnStmt)
+ return ok && m.exprs(x.Results, y.Results)
+ case *ast.BranchStmt:
+ y, ok := node.(*ast.BranchStmt)
+ return ok && x.Tok == y.Tok && m.node(maybeNilIdent(x.Label), maybeNilIdent(y.Label))
+ case *ast.BlockStmt:
+ if m.aggressive && m.node(stmtList(x.List), node) {
+ return true
+ }
+ y, ok := node.(*ast.BlockStmt)
+ if !ok {
+ return false
+ }
+ if x == nil || y == nil {
+ return x == y
+ }
+ return m.cases(x.List, y.List) || m.stmts(x.List, y.List)
+ case *ast.IfStmt:
+ y, ok := node.(*ast.IfStmt)
+ if !ok {
+ return false
+ }
+ condAny := m.wildAnyIdent(x.Cond)
+ if condAny != nil && x.Init == nil {
+ // if $*x { ... } on the left
+ left := toStmtList(condAny)
+ return m.node(left, toStmtList(y.Init, y.Cond)) &&
+ m.node(x.Body, y.Body) && m.optNode(x.Else, y.Else)
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Cond, y.Cond) &&
+ m.node(x.Body, y.Body) && m.node(x.Else, y.Else)
+ case *ast.CaseClause:
+ y, ok := node.(*ast.CaseClause)
+ return ok && m.exprs(x.List, y.List) && m.stmts(x.Body, y.Body)
+ case *ast.SwitchStmt:
+ y, ok := node.(*ast.SwitchStmt)
+ if !ok {
+ return false
+ }
+ tagAny := m.wildAnyIdent(x.Tag)
+ if tagAny != nil && x.Init == nil {
+ // switch $*x { ... } on the left
+ left := toStmtList(tagAny)
+ return m.node(left, toStmtList(y.Init, y.Tag)) &&
+ m.node(x.Body, y.Body)
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Tag, y.Tag) && m.node(x.Body, y.Body)
+ case *ast.TypeSwitchStmt:
+ y, ok := node.(*ast.TypeSwitchStmt)
+ return ok && m.optNode(x.Init, y.Init) && m.node(x.Assign, y.Assign) && m.node(x.Body, y.Body)
+ case *ast.CommClause:
+ y, ok := node.(*ast.CommClause)
+ return ok && m.node(x.Comm, y.Comm) && m.stmts(x.Body, y.Body)
+ case *ast.SelectStmt:
+ y, ok := node.(*ast.SelectStmt)
+ return ok && m.node(x.Body, y.Body)
+ case *ast.ForStmt:
+ condIdent := m.wildAnyIdent(x.Cond)
+ if condIdent != nil && x.Init == nil && x.Post == nil {
+ // "for $*x { ... }" on the left
+ left := toStmtList(condIdent)
+ // also accept RangeStmt on the right
+ switch y := node.(type) {
+ case *ast.ForStmt:
+ return m.node(left, toStmtList(y.Init, y.Cond, y.Post)) &&
+ m.node(x.Body, y.Body)
+ case *ast.RangeStmt:
+ return m.node(left, toStmtList(y.Key, y.Value, y.X)) &&
+ m.node(x.Body, y.Body)
+ default:
+ return false
+ }
+ }
+ y, ok := node.(*ast.ForStmt)
+ if !ok {
+ return false
+ }
+ return m.optNode(x.Init, y.Init) && m.node(x.Cond, y.Cond) &&
+ m.optNode(x.Post, y.Post) && m.node(x.Body, y.Body)
+ case *ast.RangeStmt:
+ y, ok := node.(*ast.RangeStmt)
+ return ok && m.node(x.Key, y.Key) && m.node(x.Value, y.Value) &&
+ m.node(x.X, y.X) && m.node(x.Body, y.Body)
+
+ case *ast.TypeSpec:
+ y, ok := node.(*ast.TypeSpec)
+ return ok && m.node(x.Name, y.Name) && m.node(x.Type, y.Type)
+
+ case *ast.FieldList:
+ // we ignore these, for now
+ return false
+ default:
+ panic(fmt.Sprintf("unexpected node: %T", x))
+ }
+}
+
+func (m *matcher) wildAnyIdent(node ast.Node) *ast.Ident {
+ switch x := node.(type) {
+ case *ast.ExprStmt:
+ return m.wildAnyIdent(x.X)
+ case *ast.Ident:
+ if !isWildName(x.Name) {
+ return nil
+ }
+ if !m.info(fromWildName(x.Name)).any {
+ return nil
+ }
+ return x
+ }
+ return nil
+}
+
+// resolveType resolves a type expression from a given scope.
+func (m *matcher) resolveType(scope *types.Scope, expr ast.Expr) types.Type {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ _, obj := scope.LookupParent(x.Name, token.NoPos)
+ if obj == nil {
+ // TODO: error if all resolveType calls on a type
+ // expression fail? or perhaps resolve type expressions
+ // across the entire program?
+ return nil
+ }
+ return obj.Type()
+ case *ast.ArrayType:
+ elt := m.resolveType(scope, x.Elt)
+ if x.Len == nil {
+ return types.NewSlice(elt)
+ }
+ bl, ok := x.Len.(*ast.BasicLit)
+ if !ok || bl.Kind != token.INT {
+ panic(fmt.Sprintf("TODO: %T", x))
+ }
+ len, _ := strconv.ParseInt(bl.Value, 0, 0)
+ return types.NewArray(elt, len)
+ case *ast.StarExpr:
+ return types.NewPointer(m.resolveType(scope, x.X))
+ case *ast.ChanType:
+ dir := types.SendRecv
+ switch x.Dir {
+ case ast.SEND:
+ dir = types.SendOnly
+ case ast.RECV:
+ dir = types.RecvOnly
+ }
+ return types.NewChan(dir, m.resolveType(scope, x.Value))
+ case *ast.SelectorExpr:
+ scope = m.findScope(scope, x.X)
+ return m.resolveType(scope, x.Sel)
+ default:
+ panic(fmt.Sprintf("resolveType TODO: %T", x))
+ }
+}
+
+func (m *matcher) findScope(scope *types.Scope, expr ast.Expr) *types.Scope {
+ switch x := expr.(type) {
+ case *ast.Ident:
+ _, obj := scope.LookupParent(x.Name, token.NoPos)
+ if pkg, ok := obj.(*types.PkgName); ok {
+ return pkg.Imported().Scope()
+ }
+ // try to fall back to std
+ if m.stdImporter == nil {
+ m.stdImporter = importer.Default()
+ }
+ path := x.Name
+ if longer, ok := stdImportFixes[path]; ok {
+ path = longer
+ }
+ pkg, err := m.stdImporter.Import(path)
+ if err != nil {
+ panic(fmt.Sprintf("findScope err: %v", err))
+ }
+ return pkg.Scope()
+ default:
+ panic(fmt.Sprintf("findScope TODO: %T", x))
+ }
+}
+
+var stdImportFixes = map[string]string{
+ // go list std | grep -vE 'vendor|internal' | grep '/' | sed -r 's@^(.*)/([^/]*)$@"\2": "\1/\2",@' | sort
+ // (after commenting out the less likely duplicates)
+ "adler32": "hash/adler32",
+ "aes": "crypto/aes",
+ "ascii85": "encoding/ascii85",
+ "asn1": "encoding/asn1",
+ "ast": "go/ast",
+ "atomic": "sync/atomic",
+ "base32": "encoding/base32",
+ "base64": "encoding/base64",
+ "big": "math/big",
+ "binary": "encoding/binary",
+ "bits": "math/bits",
+ "build": "go/build",
+ "bzip2": "compress/bzip2",
+ "cgi": "net/http/cgi",
+ "cgo": "runtime/cgo",
+ "cipher": "crypto/cipher",
+ "cmplx": "math/cmplx",
+ "color": "image/color",
+ "constant": "go/constant",
+ "cookiejar": "net/http/cookiejar",
+ "crc32": "hash/crc32",
+ "crc64": "hash/crc64",
+ "csv": "encoding/csv",
+ "debug": "runtime/debug",
+ "des": "crypto/des",
+ "doc": "go/doc",
+ "draw": "image/draw",
+ "driver": "database/sql/driver",
+ "dsa": "crypto/dsa",
+ "dwarf": "debug/dwarf",
+ "ecdsa": "crypto/ecdsa",
+ "elf": "debug/elf",
+ "elliptic": "crypto/elliptic",
+ "exec": "os/exec",
+ "fcgi": "net/http/fcgi",
+ "filepath": "path/filepath",
+ "flate": "compress/flate",
+ "fnv": "hash/fnv",
+ "format": "go/format",
+ "gif": "image/gif",
+ "gob": "encoding/gob",
+ "gosym": "debug/gosym",
+ "gzip": "compress/gzip",
+ "heap": "container/heap",
+ "hex": "encoding/hex",
+ "hmac": "crypto/hmac",
+ "http": "net/http",
+ "httptest": "net/http/httptest",
+ "httptrace": "net/http/httptrace",
+ "httputil": "net/http/httputil",
+ "importer": "go/importer",
+ "iotest": "testing/iotest",
+ "ioutil": "io/ioutil",
+ "jpeg": "image/jpeg",
+ "json": "encoding/json",
+ "jsonrpc": "net/rpc/jsonrpc",
+ "list": "container/list",
+ "lzw": "compress/lzw",
+ "macho": "debug/macho",
+ "mail": "net/mail",
+ "md5": "crypto/md5",
+ "multipart": "mime/multipart",
+ "palette": "image/color/palette",
+ "parser": "go/parser",
+ "parse": "text/template/parse",
+ "pe": "debug/pe",
+ "pem": "encoding/pem",
+ "pkix": "crypto/x509/pkix",
+ "plan9obj": "debug/plan9obj",
+ "png": "image/png",
+ //"pprof": "net/http/pprof",
+ "pprof": "runtime/pprof",
+ "printer": "go/printer",
+ "quick": "testing/quick",
+ "quotedprintable": "mime/quotedprintable",
+ "race": "runtime/race",
+ //"rand": "crypto/rand",
+ "rand": "math/rand",
+ "rc4": "crypto/rc4",
+ "ring": "container/ring",
+ "rpc": "net/rpc",
+ "rsa": "crypto/rsa",
+ //"scanner": "go/scanner",
+ "scanner": "text/scanner",
+ "sha1": "crypto/sha1",
+ "sha256": "crypto/sha256",
+ "sha512": "crypto/sha512",
+ "signal": "os/signal",
+ "smtp": "net/smtp",
+ "sql": "database/sql",
+ "subtle": "crypto/subtle",
+ "suffixarray": "index/suffixarray",
+ "syntax": "regexp/syntax",
+ "syslog": "log/syslog",
+ "tabwriter": "text/tabwriter",
+ "tar": "archive/tar",
+ //"template": "html/template",
+ "template": "text/template",
+ "textproto": "net/textproto",
+ "tls": "crypto/tls",
+ "token": "go/token",
+ "trace": "runtime/trace",
+ "types": "go/types",
+ "url": "net/url",
+ "user": "os/user",
+ "utf16": "unicode/utf16",
+ "utf8": "unicode/utf8",
+ "x509": "crypto/x509",
+ "xml": "encoding/xml",
+ "zip": "archive/zip",
+ "zlib": "compress/zlib",
+}
+
+func maybeNilIdent(x *ast.Ident) ast.Node {
+ if x == nil {
+ return nil
+ }
+ return x
+}
+
+func bothValid(p1, p2 token.Pos) bool {
+ return p1.IsValid() == p2.IsValid()
+}
+
+type nodeList interface {
+ at(i int) ast.Node
+ len() int
+ slice(from, to int) nodeList
+ ast.Node
+}
+
+// nodes matches two lists of nodes. It uses a common algorithm to match
+// wildcard patterns with any number of nodes without recursion.
+func (m *matcher) nodes(ns1, ns2 nodeList, partial bool) ast.Node {
+ ns1len, ns2len := ns1.len(), ns2.len()
+ if ns1len == 0 {
+ if ns2len == 0 {
+ return ns2
+ }
+ return nil
+ }
+ partialStart, partialEnd := 0, ns2len
+ i1, i2 := 0, 0
+ next1, next2 := 0, 0
+
+ // We need to keep a copy of m.values so that we can restart
+ // with a different "any of" match while discarding any matches
+ // we found while trying it.
+ type restart struct {
+ matches map[string]ast.Node
+ next1, next2 int
+ }
+ // We need to stack these because otherwise some edge cases
+ // would not match properly. Since we have various kinds of
+ // wildcards (nodes containing them, $_, and $*_), in some cases
+ // we may have to go back and do multiple restarts to get to the
+ // right starting position.
+ var stack []restart
+ push := func(n1, n2 int) {
+ if n2 > ns2len {
+ return // would be discarded anyway
+ }
+ stack = append(stack, restart{valsCopy(m.values), n1, n2})
+ next1, next2 = n1, n2
+ }
+ pop := func() {
+ i1, i2 = next1, next2
+ m.values = stack[len(stack)-1].matches
+ stack = stack[:len(stack)-1]
+ next1, next2 = 0, 0
+ if len(stack) > 0 {
+ next1 = stack[len(stack)-1].next1
+ next2 = stack[len(stack)-1].next2
+ }
+ }
+ wildName := ""
+ wildStart := 0
+
+ // wouldMatch returns whether the current wildcard - if any -
+ // matches the nodes we are currently trying it on.
+ wouldMatch := func() bool {
+ switch wildName {
+ case "", "_":
+ return true
+ }
+ list := ns2.slice(wildStart, i2)
+ // check that it matches any nodes found elsewhere
+ prev, ok := m.values[wildName]
+ if ok && !m.node(prev, list) {
+ return false
+ }
+ m.values[wildName] = list
+ return true
+ }
+ for i1 < ns1len || i2 < ns2len {
+ if i1 < ns1len {
+ n1 := ns1.at(i1)
+ id := fromWildNode(n1)
+ info := m.info(id)
+ if info.any {
+ // keep track of where this wildcard
+ // started (if info.name == wildName,
+ // we're trying the same wildcard
+ // matching one more node)
+ if info.name != wildName {
+ wildStart = i2
+ wildName = info.name
+ }
+ // try to match zero or more at i2,
+ // restarting at i2+1 if it fails
+ push(i1, i2+1)
+ i1++
+ continue
+ }
+ if partial && i1 == 0 {
+ // let "b; c" match "a; b; c"
+ // (simulates a $*_ at the beginning)
+ partialStart = i2
+ push(i1, i2+1)
+ }
+ if i2 < ns2len && wouldMatch() && m.node(n1, ns2.at(i2)) {
+ wildName = ""
+ // ordinary match
+ i1++
+ i2++
+ continue
+ }
+ }
+ if partial && i1 == ns1len && wildName == "" {
+ partialEnd = i2
+ break // let "b; c" match "b; c; d"
+ }
+ // mismatch, try to restart
+ if 0 < next2 && next2 <= ns2len && (i1 != next1 || i2 != next2) {
+ pop()
+ continue
+ }
+ return nil
+ }
+ if !wouldMatch() {
+ return nil
+ }
+ return ns2.slice(partialStart, partialEnd)
+}
+
+func (m *matcher) nodesMatch(list1, list2 nodeList) bool {
+ return m.nodes(list1, list2, false) != nil
+}
+
+func (m *matcher) exprs(exprs1, exprs2 []ast.Expr) bool {
+ return m.nodesMatch(exprList(exprs1), exprList(exprs2))
+}
+
+func (m *matcher) idents(ids1, ids2 []*ast.Ident) bool {
+ return m.nodesMatch(identList(ids1), identList(ids2))
+}
+
+func toStmtList(nodes ...ast.Node) stmtList {
+ var stmts []ast.Stmt
+ for _, node := range nodes {
+ switch x := node.(type) {
+ case nil:
+ case ast.Stmt:
+ stmts = append(stmts, x)
+ case ast.Expr:
+ stmts = append(stmts, &ast.ExprStmt{X: x})
+ default:
+ panic(fmt.Sprintf("unexpected node type: %T", x))
+ }
+ }
+ return stmtList(stmts)
+}
+
+func (m *matcher) cases(stmts1, stmts2 []ast.Stmt) bool {
+ for _, stmt := range stmts2 {
+ switch stmt.(type) {
+ case *ast.CaseClause, *ast.CommClause:
+ default:
+ return false
+ }
+ }
+ var left []*ast.Ident
+ for _, stmt := range stmts1 {
+ var expr ast.Expr
+ var bstmt ast.Stmt
+ switch x := stmt.(type) {
+ case *ast.CaseClause:
+ if len(x.List) != 1 || len(x.Body) != 1 {
+ return false
+ }
+ expr, bstmt = x.List[0], x.Body[0]
+ case *ast.CommClause:
+ if x.Comm == nil || len(x.Body) != 1 {
+ return false
+ }
+ if commExpr, ok := x.Comm.(*ast.ExprStmt); ok {
+ expr = commExpr.X
+ }
+ bstmt = x.Body[0]
+ default:
+ return false
+ }
+ xs, ok := bstmt.(*ast.ExprStmt)
+ if !ok {
+ return false
+ }
+ bodyIdent, ok := xs.X.(*ast.Ident)
+ if !ok || bodyIdent.Name != "gogrep_body" {
+ return false
+ }
+ id, ok := expr.(*ast.Ident)
+ if !ok || !isWildName(id.Name) {
+ return false
+ }
+ left = append(left, id)
+ }
+ return m.nodesMatch(identList(left), stmtList(stmts2))
+}
+
+func (m *matcher) stmts(stmts1, stmts2 []ast.Stmt) bool {
+ return m.nodesMatch(stmtList(stmts1), stmtList(stmts2))
+}
+
+func (m *matcher) specs(specs1, specs2 []ast.Spec) bool {
+ return m.nodesMatch(specList(specs1), specList(specs2))
+}
+
+func (m *matcher) fields(fields1, fields2 *ast.FieldList) bool {
+ if fields1 == nil || fields2 == nil {
+ return fields1 == fields2
+ }
+ return m.nodesMatch(fieldList(fields1.List), fieldList(fields2.List))
+}
+
+func fromWildNode(node ast.Node) int {
+ switch node := node.(type) {
+ case *ast.Ident:
+ return fromWildName(node.Name)
+ case *ast.ExprStmt:
+ return fromWildNode(node.X)
+ case *ast.Field:
+ // Allow $var to represent an entire field; the lone identifier
+ // gets picked up as an anonymous field.
+ if len(node.Names) == 0 && node.Tag == nil {
+ return fromWildNode(node.Type)
+ }
+ }
+ return -1
+}
+
+func nodeLists(n ast.Node) []nodeList {
+ var lists []nodeList
+ addList := func(list nodeList) {
+ if list.len() > 0 {
+ lists = append(lists, list)
+ }
+ }
+ switch x := n.(type) {
+ case nodeList:
+ addList(x)
+ case *ast.CompositeLit:
+ addList(exprList(x.Elts))
+ case *ast.CallExpr:
+ addList(exprList(x.Args))
+ case *ast.AssignStmt:
+ addList(exprList(x.Lhs))
+ addList(exprList(x.Rhs))
+ case *ast.ReturnStmt:
+ addList(exprList(x.Results))
+ case *ast.ValueSpec:
+ addList(exprList(x.Values))
+ case *ast.BlockStmt:
+ addList(stmtList(x.List))
+ case *ast.CaseClause:
+ addList(exprList(x.List))
+ addList(stmtList(x.Body))
+ case *ast.CommClause:
+ addList(stmtList(x.Body))
+ }
+ return lists
+}
+
+type exprList []ast.Expr
+type identList []*ast.Ident
+type stmtList []ast.Stmt
+type specList []ast.Spec
+type fieldList []*ast.Field
+
+func (l exprList) len() int { return len(l) }
+func (l identList) len() int { return len(l) }
+func (l stmtList) len() int { return len(l) }
+func (l specList) len() int { return len(l) }
+func (l fieldList) len() int { return len(l) }
+
+func (l exprList) at(i int) ast.Node { return l[i] }
+func (l identList) at(i int) ast.Node { return l[i] }
+func (l stmtList) at(i int) ast.Node { return l[i] }
+func (l specList) at(i int) ast.Node { return l[i] }
+func (l fieldList) at(i int) ast.Node { return l[i] }
+
+func (l exprList) slice(i, j int) nodeList { return l[i:j] }
+func (l identList) slice(i, j int) nodeList { return l[i:j] }
+func (l stmtList) slice(i, j int) nodeList { return l[i:j] }
+func (l specList) slice(i, j int) nodeList { return l[i:j] }
+func (l fieldList) slice(i, j int) nodeList { return l[i:j] }
+
+func (l exprList) Pos() token.Pos { return l[0].Pos() }
+func (l identList) Pos() token.Pos { return l[0].Pos() }
+func (l stmtList) Pos() token.Pos { return l[0].Pos() }
+func (l specList) Pos() token.Pos { return l[0].Pos() }
+func (l fieldList) Pos() token.Pos { return l[0].Pos() }
+
+func (l exprList) End() token.Pos { return l[len(l)-1].End() }
+func (l identList) End() token.Pos { return l[len(l)-1].End() }
+func (l stmtList) End() token.Pos { return l[len(l)-1].End() }
+func (l specList) End() token.Pos { return l[len(l)-1].End() }
+func (l fieldList) End() token.Pos { return l[len(l)-1].End() }
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go
new file mode 100644
index 000000000..b46e64393
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/parse.go
@@ -0,0 +1,452 @@
+// Copyright (c) 2017, Daniel Martí <mvdan@mvdan.cc>
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/scanner"
+ "go/token"
+ "regexp"
+ "strconv"
+ "strings"
+ "text/template"
+)
+
+func (m *matcher) transformSource(expr string) (string, []posOffset, error) {
+ toks, err := m.tokenize([]byte(expr))
+ if err != nil {
+ return "", nil, fmt.Errorf("cannot tokenize expr: %v", err)
+ }
+ var offs []posOffset
+ lbuf := lineColBuffer{line: 1, col: 1}
+ addOffset := func(length int) {
+ lbuf.offs -= length
+ offs = append(offs, posOffset{
+ atLine: lbuf.line,
+ atCol: lbuf.col,
+ offset: length,
+ })
+ }
+ if len(toks) > 0 && toks[0].tok == tokAggressive {
+ toks = toks[1:]
+ m.aggressive = true
+ }
+ lastLit := false
+ for _, t := range toks {
+ if lbuf.offs >= t.pos.Offset && lastLit && t.lit != "" {
+ lbuf.WriteString(" ")
+ }
+ for lbuf.offs < t.pos.Offset {
+ lbuf.WriteString(" ")
+ }
+ if t.lit == "" {
+ lbuf.WriteString(t.tok.String())
+ lastLit = false
+ continue
+ }
+ if isWildName(t.lit) {
+ // to correct the position offsets for the extra
+ // info attached to ident name strings
+ addOffset(len(wildPrefix) - 1)
+ }
+ lbuf.WriteString(t.lit)
+ lastLit = strings.TrimSpace(t.lit) != ""
+ }
+ // trailing newlines can cause issues with commas
+ return strings.TrimSpace(lbuf.String()), offs, nil
+}
+
+func (m *matcher) parseExpr(expr string) (ast.Node, error) {
+ exprStr, offs, err := m.transformSource(expr)
+ if err != nil {
+ return nil, err
+ }
+ node, _, err := parseDetectingNode(m.fset, exprStr)
+ if err != nil {
+ err = subPosOffsets(err, offs...)
+ return nil, fmt.Errorf("cannot parse expr: %v", err)
+ }
+ return node, nil
+}
+
+type lineColBuffer struct {
+ bytes.Buffer
+ line, col, offs int
+}
+
+func (l *lineColBuffer) WriteString(s string) (n int, err error) {
+ for _, r := range s {
+ if r == '\n' {
+ l.line++
+ l.col = 1
+ } else {
+ l.col++
+ }
+ l.offs++
+ }
+ return l.Buffer.WriteString(s)
+}
+
+var tmplDecl = template.Must(template.New("").Parse(`` +
+ `package p; {{ . }}`))
+
+var tmplExprs = template.Must(template.New("").Parse(`` +
+ `package p; var _ = []interface{}{ {{ . }}, }`))
+
+var tmplStmts = template.Must(template.New("").Parse(`` +
+ `package p; func _() { {{ . }} }`))
+
+var tmplType = template.Must(template.New("").Parse(`` +
+ `package p; var _ {{ . }}`))
+
+var tmplValSpec = template.Must(template.New("").Parse(`` +
+ `package p; var {{ . }}`))
+
+func execTmpl(tmpl *template.Template, src string) string {
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, src); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func noBadNodes(node ast.Node) bool {
+ any := false
+ ast.Inspect(node, func(n ast.Node) bool {
+ if any {
+ return false
+ }
+ switch n.(type) {
+ case *ast.BadExpr, *ast.BadDecl:
+ any = true
+ }
+ return true
+ })
+ return !any
+}
+
+func parseType(fset *token.FileSet, src string) (ast.Expr, *ast.File, error) {
+ asType := execTmpl(tmplType, src)
+ f, err := parser.ParseFile(fset, "", asType, 0)
+ if err != nil {
+ err = subPosOffsets(err, posOffset{1, 1, 17})
+ return nil, nil, err
+ }
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ return vs.Type, f, nil
+}
+
+// parseDetectingNode tries its best to parse the ast.Node contained in src, as
+// one of: *ast.File, ast.Decl, ast.Expr, ast.Stmt, *ast.ValueSpec.
+// It also returns the *ast.File used for the parsing, so that the returned node
+// can be easily type-checked.
+func parseDetectingNode(fset *token.FileSet, src string) (ast.Node, *ast.File, error) {
+ file := fset.AddFile("", fset.Base(), len(src))
+ scan := scanner.Scanner{}
+ scan.Init(file, []byte(src), nil, 0)
+ if _, tok, _ := scan.Scan(); tok == token.EOF {
+ return nil, nil, fmt.Errorf("empty source code")
+ }
+ var mainErr error
+
+ // first try as a whole file
+ if f, err := parser.ParseFile(fset, "", src, 0); err == nil && noBadNodes(f) {
+ return f, f, nil
+ }
+
+ // then as a single declaration, or many
+ asDecl := execTmpl(tmplDecl, src)
+ if f, err := parser.ParseFile(fset, "", asDecl, 0); err == nil && noBadNodes(f) {
+ if len(f.Decls) == 1 {
+ return f.Decls[0], f, nil
+ }
+ return f, f, nil
+ }
+
+ // then as value expressions
+ asExprs := execTmpl(tmplExprs, src)
+ if f, err := parser.ParseFile(fset, "", asExprs, 0); err == nil && noBadNodes(f) {
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ cl := vs.Values[0].(*ast.CompositeLit)
+ if len(cl.Elts) == 1 {
+ return cl.Elts[0], f, nil
+ }
+ return exprList(cl.Elts), f, nil
+ }
+
+ // then try as statements
+ asStmts := execTmpl(tmplStmts, src)
+ if f, err := parser.ParseFile(fset, "", asStmts, 0); err == nil && noBadNodes(f) {
+ bl := f.Decls[0].(*ast.FuncDecl).Body
+ if len(bl.List) == 1 {
+ return bl.List[0], f, nil
+ }
+ return stmtList(bl.List), f, nil
+ } else {
+ // Statements is what covers most cases, so it will give
+ // the best overall error message. Show positions
+ // relative to where the user's code is put in the
+ // template.
+ mainErr = subPosOffsets(err, posOffset{1, 1, 22})
+ }
+
+ // type expressions not yet picked up, for e.g. chans and interfaces
+ if typ, f, err := parseType(fset, src); err == nil && noBadNodes(f) {
+ return typ, f, nil
+ }
+
+ // value specs
+ asValSpec := execTmpl(tmplValSpec, src)
+ if f, err := parser.ParseFile(fset, "", asValSpec, 0); err == nil && noBadNodes(f) {
+ vs := f.Decls[0].(*ast.GenDecl).Specs[0].(*ast.ValueSpec)
+ return vs, f, nil
+ }
+ return nil, nil, mainErr
+}
+
+type posOffset struct {
+ atLine, atCol int
+ offset int
+}
+
+func subPosOffsets(err error, offs ...posOffset) error {
+ list, ok := err.(scanner.ErrorList)
+ if !ok {
+ return err
+ }
+ for i, err := range list {
+ for _, off := range offs {
+ if err.Pos.Line != off.atLine {
+ continue
+ }
+ if err.Pos.Column < off.atCol {
+ continue
+ }
+ err.Pos.Column -= off.offset
+ }
+ list[i] = err
+ }
+ return list
+}
+
+const (
+ _ token.Token = -iota
+ tokAggressive
+)
+
+type fullToken struct {
+ pos token.Position
+ tok token.Token
+ lit string
+}
+
+type caseStatus uint
+
+const (
+ caseNone caseStatus = iota
+ caseNeedBlock
+ caseHere
+)
+
+func (m *matcher) tokenize(src []byte) ([]fullToken, error) {
+ var s scanner.Scanner
+ fset := token.NewFileSet()
+ file := fset.AddFile("", fset.Base(), len(src))
+
+ var err error
+ onError := func(pos token.Position, msg string) {
+ switch msg { // allow certain extra chars
+ case `illegal character U+0024 '$'`:
+ case `illegal character U+007E '~'`:
+ default:
+ err = fmt.Errorf("%v: %s", pos, msg)
+ }
+ }
+
+ // we will modify the input source under the scanner's nose to
+ // enable some features such as regexes.
+ s.Init(file, src, onError, scanner.ScanComments)
+
+ next := func() fullToken {
+ pos, tok, lit := s.Scan()
+ return fullToken{fset.Position(pos), tok, lit}
+ }
+
+ caseStat := caseNone
+
+ var toks []fullToken
+ for t := next(); t.tok != token.EOF; t = next() {
+ switch t.lit {
+ case "$": // continues below
+ case "~":
+ toks = append(toks, fullToken{t.pos, tokAggressive, ""})
+ continue
+ case "switch", "select", "case":
+ if t.lit == "case" {
+ caseStat = caseNone
+ } else {
+ caseStat = caseNeedBlock
+ }
+ fallthrough
+ default: // regular Go code
+ if t.tok == token.LBRACE && caseStat == caseNeedBlock {
+ caseStat = caseHere
+ }
+ toks = append(toks, t)
+ continue
+ }
+ wt, err := m.wildcard(t.pos, next)
+ if err != nil {
+ return nil, err
+ }
+ if caseStat == caseHere {
+ toks = append(toks, fullToken{wt.pos, token.IDENT, "case"})
+ }
+ toks = append(toks, wt)
+ if caseStat == caseHere {
+ toks = append(toks, fullToken{wt.pos, token.COLON, ""})
+ toks = append(toks, fullToken{wt.pos, token.IDENT, "gogrep_body"})
+ }
+ }
+ return toks, err
+}
+
+func (m *matcher) wildcard(pos token.Position, next func() fullToken) (fullToken, error) {
+ wt := fullToken{pos, token.IDENT, wildPrefix}
+ t := next()
+ var info varInfo
+ if t.tok == token.MUL {
+ t = next()
+ info.any = true
+ }
+ if t.tok != token.IDENT {
+ return wt, fmt.Errorf("%v: $ must be followed by ident, got %v",
+ t.pos, t.tok)
+ }
+ id := len(m.vars)
+ wt.lit += strconv.Itoa(id)
+ info.name = t.lit
+ m.vars = append(m.vars, info)
+ return wt, nil
+}
+
+type typeCheck struct {
+ op string // "type", "asgn", "conv"
+ expr ast.Expr
+}
+
+type attribute interface{}
+
+type typProperty string
+
+type typUnderlying string
+
+func (m *matcher) parseAttrs(src string) (attribute, error) {
+ toks, err := m.tokenize([]byte(src))
+ if err != nil {
+ return nil, err
+ }
+ i := -1
+ var t fullToken
+ next := func() fullToken {
+ if i++; i < len(toks) {
+ return toks[i]
+ }
+ return fullToken{tok: token.EOF, pos: t.pos}
+ }
+ t = next()
+ op := t.lit
+ switch op { // the ones that don't take args
+ case "comp", "addr":
+ if t = next(); t.tok != token.SEMICOLON {
+ return nil, fmt.Errorf("%v: wanted EOF, got %v", t.pos, t.tok)
+ }
+ return typProperty(op), nil
+ }
+ opPos := t.pos
+ if t = next(); t.tok != token.LPAREN {
+ return nil, fmt.Errorf("%v: wanted (", t.pos)
+ }
+ var attr attribute
+ switch op {
+ case "rx":
+ t = next()
+ rxStr, err := strconv.Unquote(t.lit)
+ if err != nil {
+ return nil, fmt.Errorf("%v: %v", t.pos, err)
+ }
+ if !strings.HasPrefix(rxStr, "^") {
+ rxStr = "^" + rxStr
+ }
+ if !strings.HasSuffix(rxStr, "$") {
+ rxStr = rxStr + "$"
+ }
+ rx, err := regexp.Compile(rxStr)
+ if err != nil {
+ return nil, fmt.Errorf("%v: %v", t.pos, err)
+ }
+ attr = rx
+ case "type", "asgn", "conv":
+ t = next()
+ start := t.pos.Offset
+ for open := 1; open > 0; t = next() {
+ switch t.tok {
+ case token.LPAREN:
+ open++
+ case token.RPAREN:
+ open--
+ case token.EOF:
+ return nil, fmt.Errorf("%v: expected ) to close (", t.pos)
+ }
+ }
+ end := t.pos.Offset - 1
+ typeStr := strings.TrimSpace(string(src[start:end]))
+ fset := token.NewFileSet()
+ typeExpr, _, err := parseType(fset, typeStr)
+ if err != nil {
+ return nil, err
+ }
+ attr = typeCheck{op, typeExpr}
+ i -= 2 // since we went past RPAREN above
+ case "is":
+ switch t = next(); t.lit {
+ case "basic", "array", "slice", "struct", "interface",
+ "pointer", "func", "map", "chan":
+ default:
+ return nil, fmt.Errorf("%v: unknown type: %q", t.pos,
+ t.lit)
+ }
+ attr = typUnderlying(t.lit)
+ default:
+ return nil, fmt.Errorf("%v: unknown op %q", opPos, op)
+ }
+ if t = next(); t.tok != token.RPAREN {
+ return nil, fmt.Errorf("%v: wanted ), got %v", t.pos, t.tok)
+ }
+ if t = next(); t.tok != token.SEMICOLON {
+ return nil, fmt.Errorf("%v: wanted EOF, got %v", t.pos, t.tok)
+ }
+ return attr, nil
+}
+
+// using a prefix is good enough for now
+const wildPrefix = "gogrep_"
+
+func isWildName(name string) bool {
+ return strings.HasPrefix(name, wildPrefix)
+}
+
+func fromWildName(s string) int {
+ if !isWildName(s) {
+ return -1
+ }
+ n, err := strconv.Atoi(s[len(wildPrefix):])
+ if err != nil {
+ return -1
+ }
+ return n
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go
new file mode 100644
index 000000000..8870858ed
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/subst.go
@@ -0,0 +1,261 @@
+// Copyright (c) 2018, Daniel Martí <mvdan@mvdan.cc>
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "reflect"
+)
+
+func (m *matcher) cmdSubst(cmd exprCmd, subs []submatch) []submatch {
+ for i := range subs {
+ sub := &subs[i]
+ nodeCopy, _ := m.parseExpr(cmd.src)
+ // since we'll want to set positions within the file's
+ // FileSet
+ scrubPositions(nodeCopy)
+
+ m.fillParents(nodeCopy)
+ nodeCopy = m.fillValues(nodeCopy, sub.values)
+ m.substNode(sub.node, nodeCopy)
+ sub.node = nodeCopy
+ }
+ return subs
+}
+
+type topNode struct {
+ Node ast.Node
+}
+
+func (t topNode) Pos() token.Pos { return t.Node.Pos() }
+func (t topNode) End() token.Pos { return t.Node.End() }
+
+func (m *matcher) fillValues(node ast.Node, values map[string]ast.Node) ast.Node {
+ // node might not have a parent, in which case we need to set an
+ // artificial one. Its pointer interface is a copy, so we must also
+ // return it.
+ top := &topNode{node}
+ m.setParentOf(node, top)
+
+ inspect(node, func(node ast.Node) bool {
+ id := fromWildNode(node)
+ info := m.info(id)
+ if info.name == "" {
+ return true
+ }
+ prev := values[info.name]
+ switch prev.(type) {
+ case exprList:
+ node = exprList([]ast.Expr{
+ node.(*ast.Ident),
+ })
+ case stmtList:
+ if ident, ok := node.(*ast.Ident); ok {
+ node = &ast.ExprStmt{X: ident}
+ }
+ node = stmtList([]ast.Stmt{
+ node.(*ast.ExprStmt),
+ })
+ }
+ m.substNode(node, prev)
+ return true
+ })
+ m.setParentOf(node, nil)
+ return top.Node
+}
+
+func (m *matcher) substNode(oldNode, newNode ast.Node) {
+ parent := m.parentOf(oldNode)
+ m.setParentOf(newNode, parent)
+
+ ptr := m.nodePtr(oldNode)
+ switch x := ptr.(type) {
+ case **ast.Ident:
+ *x = newNode.(*ast.Ident)
+ case *ast.Node:
+ *x = newNode
+ case *ast.Expr:
+ *x = newNode.(ast.Expr)
+ case *ast.Stmt:
+ switch y := newNode.(type) {
+ case ast.Expr:
+ stmt := &ast.ExprStmt{X: y}
+ m.setParentOf(stmt, parent)
+ *x = stmt
+ case ast.Stmt:
+ *x = y
+ default:
+ panic(fmt.Sprintf("cannot replace stmt with %T", y))
+ }
+ case *[]ast.Expr:
+ oldList := oldNode.(exprList)
+ var first, last []ast.Expr
+ for i, expr := range *x {
+ if expr == oldList[0] {
+ first = (*x)[:i]
+ last = (*x)[i+len(oldList):]
+ break
+ }
+ }
+ switch y := newNode.(type) {
+ case ast.Expr:
+ *x = append(first, y)
+ case exprList:
+ *x = append(first, y...)
+ default:
+ panic(fmt.Sprintf("cannot replace exprs with %T", y))
+ }
+ *x = append(*x, last...)
+ case *[]ast.Stmt:
+ oldList := oldNode.(stmtList)
+ var first, last []ast.Stmt
+ for i, stmt := range *x {
+ if stmt == oldList[0] {
+ first = (*x)[:i]
+ last = (*x)[i+len(oldList):]
+ break
+ }
+ }
+ switch y := newNode.(type) {
+ case ast.Expr:
+ stmt := &ast.ExprStmt{X: y}
+ m.setParentOf(stmt, parent)
+ *x = append(first, stmt)
+ case ast.Stmt:
+ *x = append(first, y)
+ case stmtList:
+ *x = append(first, y...)
+ default:
+ panic(fmt.Sprintf("cannot replace stmts with %T", y))
+ }
+ *x = append(*x, last...)
+ case nil:
+ return
+ default:
+ panic(fmt.Sprintf("unsupported substitution: %T", x))
+ }
+ // the new nodes have scrubbed positions, so try our best to use
+ // sensible ones
+ fixPositions(parent)
+}
+
+func (m *matcher) parentOf(node ast.Node) ast.Node {
+ list, ok := node.(nodeList)
+ if ok {
+ node = list.at(0)
+ }
+ return m.parents[node]
+}
+
+func (m *matcher) setParentOf(node, parent ast.Node) {
+ list, ok := node.(nodeList)
+ if ok {
+ if list.len() == 0 {
+ return
+ }
+ node = list.at(0)
+ }
+ m.parents[node] = parent
+}
+
+func (m *matcher) nodePtr(node ast.Node) interface{} {
+ list, wantSlice := node.(nodeList)
+ if wantSlice {
+ node = list.at(0)
+ }
+ parent := m.parentOf(node)
+ if parent == nil {
+ return nil
+ }
+ v := reflect.ValueOf(parent).Elem()
+ for i := 0; i < v.NumField(); i++ {
+ fld := v.Field(i)
+ switch fld.Type().Kind() {
+ case reflect.Slice:
+ for i := 0; i < fld.Len(); i++ {
+ ifld := fld.Index(i)
+ if ifld.Interface() != node {
+ continue
+ }
+ if wantSlice {
+ return fld.Addr().Interface()
+ }
+ return ifld.Addr().Interface()
+ }
+ case reflect.Interface:
+ if fld.Interface() == node {
+ return fld.Addr().Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// nodePosHash is an ast.Node that can always be used as a key in maps,
+// even for nodes that are slices like nodeList.
+type nodePosHash struct {
+ pos, end token.Pos
+}
+
+func (n nodePosHash) Pos() token.Pos { return n.pos }
+func (n nodePosHash) End() token.Pos { return n.end }
+
+func posHash(node ast.Node) nodePosHash {
+ return nodePosHash{pos: node.Pos(), end: node.End()}
+}
+
+var posType = reflect.TypeOf(token.NoPos)
+
+func scrubPositions(node ast.Node) {
+ inspect(node, func(node ast.Node) bool {
+ v := reflect.ValueOf(node)
+ if v.Kind() != reflect.Ptr {
+ return true
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return true
+ }
+ for i := 0; i < v.NumField(); i++ {
+ fld := v.Field(i)
+ if fld.Type() == posType {
+ fld.SetInt(0)
+ }
+ }
+ return true
+ })
+}
+
+// fixPositions tries to fix common syntax errors caused from syntax rewrites.
+func fixPositions(node ast.Node) {
+ if top, ok := node.(*topNode); ok {
+ node = top.Node
+ }
+ // fallback sets pos to the 'to' position if not valid.
+ fallback := func(pos *token.Pos, to token.Pos) {
+ if !pos.IsValid() {
+ *pos = to
+ }
+ }
+ ast.Inspect(node, func(node ast.Node) bool {
+ // TODO: many more node types
+ switch x := node.(type) {
+ case *ast.GoStmt:
+ fallback(&x.Go, x.Call.Pos())
+ case *ast.ReturnStmt:
+ if len(x.Results) == 0 {
+ break
+ }
+ // Ensure that there's no newline before the returned
+ // values, as otherwise we have a naked return. See
+ // https://github.com/golang/go/issues/32854.
+ if pos := x.Results[0].Pos(); pos > x.Return {
+ x.Return = pos
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go
new file mode 100644
index 000000000..b4796a896
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep/write.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2018, Daniel Martí <mvdan@mvdan.cc>
+// See LICENSE for licensing information
+
+package gogrep
+
+import (
+ "go/ast"
+ "go/printer"
+ "os"
+)
+
+func (m *matcher) cmdWrite(cmd exprCmd, subs []submatch) []submatch {
+ seenRoot := make(map[nodePosHash]bool)
+ filePaths := make(map[*ast.File]string)
+ var next []submatch
+ for _, sub := range subs {
+ root := m.nodeRoot(sub.node)
+ hash := posHash(root)
+ if seenRoot[hash] {
+ continue // avoid dups
+ }
+ seenRoot[hash] = true
+ file, ok := root.(*ast.File)
+ if ok {
+ path := m.fset.Position(file.Package).Filename
+ if path != "" {
+ // write to disk
+ filePaths[file] = path
+ continue
+ }
+ }
+ // pass it on, to print to stdout
+ next = append(next, submatch{node: root})
+ }
+ for file, path := range filePaths {
+ f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC, 0)
+ if err != nil {
+ // TODO: return errors instead
+ panic(err)
+ }
+ if err := printConfig.Fprint(f, m.fset, file); err != nil {
+ // TODO: return errors instead
+ panic(err)
+ }
+ }
+ return next
+}
+
+var printConfig = printer.Config{
+ Mode: printer.UseSpaces | printer.TabIndent,
+ Tabwidth: 8,
+}
+
+func (m *matcher) nodeRoot(node ast.Node) ast.Node {
+ parent := m.parentOf(node)
+ if parent == nil {
+ return node
+ }
+ if _, ok := parent.(nodeList); ok {
+ return parent
+ }
+ return m.nodeRoot(parent)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go
new file mode 100644
index 000000000..6e9550c1a
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/bool3.go
@@ -0,0 +1,9 @@
+package ruleguard
+
+type bool3 int
+
+const (
+ bool3unset bool3 = iota
+ bool3false
+ bool3true
+)
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go
new file mode 100644
index 000000000..c566578d3
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/dsl_importer.go
@@ -0,0 +1,40 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+
+ "github.com/quasilyte/go-ruleguard/dslgen"
+)
+
+type dslImporter struct {
+ fallback types.Importer
+}
+
+func newDSLImporter() *dslImporter {
+ return &dslImporter{fallback: importer.Default()}
+}
+
+func (i *dslImporter) Import(path string) (*types.Package, error) {
+ switch path {
+ case "github.com/quasilyte/go-ruleguard/dsl/fluent":
+ return i.importDSL(path, dslgen.Fluent)
+
+ default:
+ return i.fallback.Import(path)
+ }
+}
+
+func (i *dslImporter) importDSL(path string, src []byte) (*types.Package, error) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "dsl.go", src, 0)
+ if err != nil {
+ return nil, err
+ }
+ var typecheker types.Config
+ var info types.Info
+ return typecheker.Check(path, fset, []*ast.File{f}, &info)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go
new file mode 100644
index 000000000..1192d8492
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/gorule.go
@@ -0,0 +1,36 @@
+package ruleguard
+
+import (
+ "go/types"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+)
+
+type scopedGoRuleSet struct {
+ uncategorized []goRule
+ categorizedNum int
+ rulesByCategory [nodeCategoriesCount][]goRule
+}
+
+type goRule struct {
+ filename string
+ severity string
+ pat *gogrep.Pattern
+ msg string
+ location string
+ suggestion string
+ filters map[string]submatchFilter
+}
+
+type submatchFilter struct {
+ typePred func(typeQuery) bool
+ textPred func(string) bool
+ pure bool3
+ constant bool3
+ addressable bool3
+}
+
+type typeQuery struct {
+ x types.Type
+ ctx *Context
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/merge.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/merge.go
new file mode 100644
index 000000000..e494930ab
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/merge.go
@@ -0,0 +1,24 @@
+package ruleguard
+
+func mergeRuleSets(toMerge []*GoRuleSet) *GoRuleSet {
+ out := &GoRuleSet{
+ local: &scopedGoRuleSet{},
+ universal: &scopedGoRuleSet{},
+ }
+
+ for _, x := range toMerge {
+ out.local = appendScopedRuleSet(out.local, x.local)
+ out.universal = appendScopedRuleSet(out.universal, x.universal)
+ }
+
+ return out
+}
+
+func appendScopedRuleSet(dst, src *scopedGoRuleSet) *scopedGoRuleSet {
+ dst.uncategorized = append(dst.uncategorized, src.uncategorized...)
+ for cat, rules := range src.rulesByCategory {
+ dst.rulesByCategory[cat] = append(dst.rulesByCategory[cat], rules...)
+ dst.categorizedNum += len(rules)
+ }
+ return dst
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go
new file mode 100644
index 000000000..859ed39a4
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/node_category.go
@@ -0,0 +1,159 @@
+package ruleguard
+
+import (
+ "go/ast"
+)
+
+type nodeCategory int
+
+const (
+ nodeUnknown nodeCategory = iota
+
+ nodeArrayType
+ nodeAssignStmt
+ nodeBasicLit
+ nodeBinaryExpr
+ nodeBlockStmt
+ nodeBranchStmt
+ nodeCallExpr
+ nodeCaseClause
+ nodeChanType
+ nodeCommClause
+ nodeCompositeLit
+ nodeDeclStmt
+ nodeDeferStmt
+ nodeEllipsis
+ nodeEmptyStmt
+ nodeExprStmt
+ nodeForStmt
+ nodeFuncDecl
+ nodeFuncLit
+ nodeFuncType
+ nodeGenDecl
+ nodeGoStmt
+ nodeIdent
+ nodeIfStmt
+ nodeImportSpec
+ nodeIncDecStmt
+ nodeIndexExpr
+ nodeInterfaceType
+ nodeKeyValueExpr
+ nodeLabeledStmt
+ nodeMapType
+ nodeParenExpr
+ nodeRangeStmt
+ nodeReturnStmt
+ nodeSelectStmt
+ nodeSelectorExpr
+ nodeSendStmt
+ nodeSliceExpr
+ nodeStarExpr
+ nodeStructType
+ nodeSwitchStmt
+ nodeTypeAssertExpr
+ nodeTypeSpec
+ nodeTypeSwitchStmt
+ nodeUnaryExpr
+ nodeValueSpec
+
+ nodeCategoriesCount
+)
+
+func categorizeNode(n ast.Node) nodeCategory {
+ switch n.(type) {
+ case *ast.ArrayType:
+ return nodeArrayType
+ case *ast.AssignStmt:
+ return nodeAssignStmt
+ case *ast.BasicLit:
+ return nodeBasicLit
+ case *ast.BinaryExpr:
+ return nodeBinaryExpr
+ case *ast.BlockStmt:
+ return nodeBlockStmt
+ case *ast.BranchStmt:
+ return nodeBranchStmt
+ case *ast.CallExpr:
+ return nodeCallExpr
+ case *ast.CaseClause:
+ return nodeCaseClause
+ case *ast.ChanType:
+ return nodeChanType
+ case *ast.CommClause:
+ return nodeCommClause
+ case *ast.CompositeLit:
+ return nodeCompositeLit
+ case *ast.DeclStmt:
+ return nodeDeclStmt
+ case *ast.DeferStmt:
+ return nodeDeferStmt
+ case *ast.Ellipsis:
+ return nodeEllipsis
+ case *ast.EmptyStmt:
+ return nodeEmptyStmt
+ case *ast.ExprStmt:
+ return nodeExprStmt
+ case *ast.ForStmt:
+ return nodeForStmt
+ case *ast.FuncDecl:
+ return nodeFuncDecl
+ case *ast.FuncLit:
+ return nodeFuncLit
+ case *ast.FuncType:
+ return nodeFuncType
+ case *ast.GenDecl:
+ return nodeGenDecl
+ case *ast.GoStmt:
+ return nodeGoStmt
+ case *ast.Ident:
+ return nodeIdent
+ case *ast.IfStmt:
+ return nodeIfStmt
+ case *ast.ImportSpec:
+ return nodeImportSpec
+ case *ast.IncDecStmt:
+ return nodeIncDecStmt
+ case *ast.IndexExpr:
+ return nodeIndexExpr
+ case *ast.InterfaceType:
+ return nodeInterfaceType
+ case *ast.KeyValueExpr:
+ return nodeKeyValueExpr
+ case *ast.LabeledStmt:
+ return nodeLabeledStmt
+ case *ast.MapType:
+ return nodeMapType
+ case *ast.ParenExpr:
+ return nodeParenExpr
+ case *ast.RangeStmt:
+ return nodeRangeStmt
+ case *ast.ReturnStmt:
+ return nodeReturnStmt
+ case *ast.SelectStmt:
+ return nodeSelectStmt
+ case *ast.SelectorExpr:
+ return nodeSelectorExpr
+ case *ast.SendStmt:
+ return nodeSendStmt
+ case *ast.SliceExpr:
+ return nodeSliceExpr
+ case *ast.StarExpr:
+ return nodeStarExpr
+ case *ast.StructType:
+ return nodeStructType
+ case *ast.SwitchStmt:
+ return nodeSwitchStmt
+ case *ast.TypeAssertExpr:
+ return nodeTypeAssertExpr
+ case *ast.TypeSpec:
+ return nodeTypeSpec
+ case *ast.TypeSwitchStmt:
+ return nodeTypeSwitchStmt
+ case *ast.UnaryExpr:
+ return nodeUnaryExpr
+ case *ast.ValueSpec:
+ return nodeValueSpec
+ default:
+ return nodeUnknown
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go
new file mode 100644
index 000000000..98fcd20df
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/parser.go
@@ -0,0 +1,669 @@
+package ruleguard
+
+import (
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/importer"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "io"
+ "path"
+ "regexp"
+ "strconv"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+ "github.com/quasilyte/go-ruleguard/ruleguard/typematch"
+)
+
+type rulesParser struct {
+ filename string
+ fset *token.FileSet
+ res *GoRuleSet
+ types *types.Info
+
+ itab *typematch.ImportsTab
+ dslImporter types.Importer
+ stdImporter types.Importer // TODO(quasilyte): share importer with gogrep?
+ srcImporter types.Importer
+}
+
+func newRulesParser() *rulesParser {
+ var stdlib = map[string]string{
+ "adler32": "hash/adler32",
+ "aes": "crypto/aes",
+ "ascii85": "encoding/ascii85",
+ "asn1": "encoding/asn1",
+ "ast": "go/ast",
+ "atomic": "sync/atomic",
+ "base32": "encoding/base32",
+ "base64": "encoding/base64",
+ "big": "math/big",
+ "binary": "encoding/binary",
+ "bits": "math/bits",
+ "bufio": "bufio",
+ "build": "go/build",
+ "bytes": "bytes",
+ "bzip2": "compress/bzip2",
+ "cgi": "net/http/cgi",
+ "cgo": "runtime/cgo",
+ "cipher": "crypto/cipher",
+ "cmplx": "math/cmplx",
+ "color": "image/color",
+ "constant": "go/constant",
+ "context": "context",
+ "cookiejar": "net/http/cookiejar",
+ "crc32": "hash/crc32",
+ "crc64": "hash/crc64",
+ "crypto": "crypto",
+ "csv": "encoding/csv",
+ "debug": "runtime/debug",
+ "des": "crypto/des",
+ "doc": "go/doc",
+ "draw": "image/draw",
+ "driver": "database/sql/driver",
+ "dsa": "crypto/dsa",
+ "dwarf": "debug/dwarf",
+ "ecdsa": "crypto/ecdsa",
+ "ed25519": "crypto/ed25519",
+ "elf": "debug/elf",
+ "elliptic": "crypto/elliptic",
+ "encoding": "encoding",
+ "errors": "errors",
+ "exec": "os/exec",
+ "expvar": "expvar",
+ "fcgi": "net/http/fcgi",
+ "filepath": "path/filepath",
+ "flag": "flag",
+ "flate": "compress/flate",
+ "fmt": "fmt",
+ "fnv": "hash/fnv",
+ "format": "go/format",
+ "gif": "image/gif",
+ "gob": "encoding/gob",
+ "gosym": "debug/gosym",
+ "gzip": "compress/gzip",
+ "hash": "hash",
+ "heap": "container/heap",
+ "hex": "encoding/hex",
+ "hmac": "crypto/hmac",
+ "html": "html",
+ "http": "net/http",
+ "httptest": "net/http/httptest",
+ "httptrace": "net/http/httptrace",
+ "httputil": "net/http/httputil",
+ "image": "image",
+ "importer": "go/importer",
+ "io": "io",
+ "iotest": "testing/iotest",
+ "ioutil": "io/ioutil",
+ "jpeg": "image/jpeg",
+ "json": "encoding/json",
+ "jsonrpc": "net/rpc/jsonrpc",
+ "list": "container/list",
+ "log": "log",
+ "lzw": "compress/lzw",
+ "macho": "debug/macho",
+ "mail": "net/mail",
+ "math": "math",
+ "md5": "crypto/md5",
+ "mime": "mime",
+ "multipart": "mime/multipart",
+ "net": "net",
+ "os": "os",
+ "palette": "image/color/palette",
+ "parse": "text/template/parse",
+ "parser": "go/parser",
+ "path": "path",
+ "pe": "debug/pe",
+ "pem": "encoding/pem",
+ "pkix": "crypto/x509/pkix",
+ "plan9obj": "debug/plan9obj",
+ "plugin": "plugin",
+ "png": "image/png",
+ "pprof": "runtime/pprof",
+ "printer": "go/printer",
+ "quick": "testing/quick",
+ "quotedprintable": "mime/quotedprintable",
+ "race": "runtime/race",
+ "rand": "math/rand",
+ "rc4": "crypto/rc4",
+ "reflect": "reflect",
+ "regexp": "regexp",
+ "ring": "container/ring",
+ "rpc": "net/rpc",
+ "rsa": "crypto/rsa",
+ "runtime": "runtime",
+ "scanner": "text/scanner",
+ "sha1": "crypto/sha1",
+ "sha256": "crypto/sha256",
+ "sha512": "crypto/sha512",
+ "signal": "os/signal",
+ "smtp": "net/smtp",
+ "sort": "sort",
+ "sql": "database/sql",
+ "strconv": "strconv",
+ "strings": "strings",
+ "subtle": "crypto/subtle",
+ "suffixarray": "index/suffixarray",
+ "sync": "sync",
+ "syntax": "regexp/syntax",
+ "syscall": "syscall",
+ "syslog": "log/syslog",
+ "tabwriter": "text/tabwriter",
+ "tar": "archive/tar",
+ "template": "text/template",
+ "testing": "testing",
+ "textproto": "net/textproto",
+ "time": "time",
+ "tls": "crypto/tls",
+ "token": "go/token",
+ "trace": "runtime/trace",
+ "types": "go/types",
+ "unicode": "unicode",
+ "unsafe": "unsafe",
+ "url": "net/url",
+ "user": "os/user",
+ "utf16": "unicode/utf16",
+ "utf8": "unicode/utf8",
+ "x509": "crypto/x509",
+ "xml": "encoding/xml",
+ "zip": "archive/zip",
+ "zlib": "compress/zlib",
+ }
+
+ // TODO(quasilyte): do we need to pass the fileset here?
+ fset := token.NewFileSet()
+ return &rulesParser{
+ itab: typematch.NewImportsTab(stdlib),
+ stdImporter: importer.Default(),
+ srcImporter: importer.ForCompiler(fset, "source", nil),
+ dslImporter: newDSLImporter(),
+ }
+}
+
+func (p *rulesParser) ParseFile(filename string, fset *token.FileSet, r io.Reader) (*GoRuleSet, error) {
+ p.filename = filename
+ p.fset = fset
+ p.res = &GoRuleSet{
+ local: &scopedGoRuleSet{},
+ universal: &scopedGoRuleSet{},
+ }
+
+ parserFlags := parser.Mode(0)
+ f, err := parser.ParseFile(fset, filename, r, parserFlags)
+ if err != nil {
+ return nil, fmt.Errorf("parser error: %v", err)
+ }
+
+ if f.Name.Name != "gorules" {
+ return nil, fmt.Errorf("expected a gorules package name, found %s", f.Name.Name)
+ }
+
+ typechecker := types.Config{Importer: p.dslImporter}
+ p.types = &types.Info{Types: map[ast.Expr]types.TypeAndValue{}}
+ _, err = typechecker.Check("gorules", fset, []*ast.File{f}, p.types)
+ if err != nil {
+ return nil, fmt.Errorf("typechecker error: %v", err)
+ }
+
+ for _, decl := range f.Decls {
+ decl, ok := decl.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if err := p.parseRuleGroup(decl); err != nil {
+ return nil, err
+ }
+ }
+
+ return p.res, nil
+}
+
+func (p *rulesParser) parseRuleGroup(f *ast.FuncDecl) error {
+ if f.Body == nil {
+ return p.errorf(f, "unexpected empty function body")
+ }
+ if f.Type.Results != nil {
+ return p.errorf(f.Type.Results, "rule group function should not return anything")
+ }
+ params := f.Type.Params.List
+ if len(params) != 1 || len(params[0].Names) != 1 {
+ return p.errorf(f.Type.Params, "rule group function should accept exactly 1 Matcher param")
+ }
+ // TODO(quasilyte): do an actual matcher param type check?
+ matcher := params[0].Names[0].Name
+
+ p.itab.EnterScope()
+ defer p.itab.LeaveScope()
+
+ for _, stmt := range f.Body.List {
+ if _, ok := stmt.(*ast.DeclStmt); ok {
+ continue
+ }
+ stmtExpr, ok := stmt.(*ast.ExprStmt)
+ if !ok {
+ return p.errorf(stmt, "expected a %s method call, found %s", matcher, sprintNode(p.fset, stmt))
+ }
+ call, ok := stmtExpr.X.(*ast.CallExpr)
+ if !ok {
+ return p.errorf(stmt, "expected a %s method call, found %s", matcher, sprintNode(p.fset, stmt))
+ }
+ if err := p.parseCall(matcher, call); err != nil {
+ return err
+ }
+
+ }
+
+ return nil
+}
+
+func (p *rulesParser) parseCall(matcher string, call *ast.CallExpr) error {
+ f := call.Fun.(*ast.SelectorExpr)
+ x, ok := f.X.(*ast.Ident)
+ if ok && x.Name == matcher {
+ return p.parseStmt(f.Sel, call.Args)
+ }
+
+ return p.parseRule(matcher, call)
+}
+
+func (p *rulesParser) parseStmt(fn *ast.Ident, args []ast.Expr) error {
+ switch fn.Name {
+ case "Import":
+ pkgPath, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ pkgName := path.Base(pkgPath)
+ p.itab.Load(pkgName, pkgPath)
+ return nil
+ default:
+ return p.errorf(fn, "unexpected %s method", fn.Name)
+ }
+}
+
+func (p *rulesParser) parseRule(matcher string, call *ast.CallExpr) error {
+ origCall := call
+ var (
+ matchArgs *[]ast.Expr
+ whereArgs *[]ast.Expr
+ suggestArgs *[]ast.Expr
+ reportArgs *[]ast.Expr
+ atArgs *[]ast.Expr
+ )
+ for {
+ chain, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ break
+ }
+ switch chain.Sel.Name {
+ case "Match":
+ matchArgs = &call.Args
+ case "Where":
+ whereArgs = &call.Args
+ case "Suggest":
+ suggestArgs = &call.Args
+ case "Report":
+ reportArgs = &call.Args
+ case "At":
+ atArgs = &call.Args
+ default:
+ return p.errorf(chain.Sel, "unexpected %s method", chain.Sel.Name)
+ }
+ call, ok = chain.X.(*ast.CallExpr)
+ if !ok {
+ break
+ }
+ }
+
+ dst := p.res.universal
+ filters := map[string]submatchFilter{}
+ proto := goRule{
+ filename: p.filename,
+ filters: filters,
+ }
+ var alternatives []string
+
+ if matchArgs == nil {
+ return p.errorf(origCall, "missing Match() call")
+ }
+ for _, arg := range *matchArgs {
+ alt, ok := p.toStringValue(arg)
+ if !ok {
+ return p.errorf(arg, "expected a string literal argument")
+ }
+ alternatives = append(alternatives, alt)
+ }
+
+ if whereArgs != nil {
+ if err := p.walkFilter(filters, (*whereArgs)[0], false); err != nil {
+ return err
+ }
+ }
+
+ if suggestArgs != nil {
+ s, ok := p.toStringValue((*suggestArgs)[0])
+ if !ok {
+ return p.errorf((*suggestArgs)[0], "expected string literal argument")
+ }
+ proto.suggestion = s
+ }
+
+ if reportArgs == nil {
+ if suggestArgs == nil {
+ return p.errorf(origCall, "missing Report() or Suggest() call")
+ }
+ proto.msg = "suggestion: " + proto.suggestion
+ } else {
+ message, ok := p.toStringValue((*reportArgs)[0])
+ if !ok {
+ return p.errorf((*reportArgs)[0], "expected string literal argument")
+ }
+ proto.msg = message
+ }
+
+ if atArgs != nil {
+ index, ok := (*atArgs)[0].(*ast.IndexExpr)
+ if !ok {
+ return p.errorf((*atArgs)[0], "expected %s[`varname`] expression", matcher)
+ }
+ arg, ok := p.toStringValue(index.Index)
+ if !ok {
+ return p.errorf(index.Index, "expected a string literal index")
+ }
+ proto.location = arg
+ }
+
+ for i, alt := range alternatives {
+ rule := proto
+ pat, err := gogrep.Parse(p.fset, alt)
+ if err != nil {
+ return p.errorf((*matchArgs)[i], "gogrep parse: %v", err)
+ }
+ rule.pat = pat
+ cat := categorizeNode(pat.Expr)
+ if cat == nodeUnknown {
+ dst.uncategorized = append(dst.uncategorized, rule)
+ } else {
+ dst.categorizedNum++
+ dst.rulesByCategory[cat] = append(dst.rulesByCategory[cat], rule)
+ }
+ }
+
+ return nil
+}
+
+func (p *rulesParser) walkFilter(dst map[string]submatchFilter, e ast.Expr, negate bool) error {
+ typeAnd := func(x, y func(typeQuery) bool) func(typeQuery) bool {
+ if x == nil {
+ return y
+ }
+ return func(q typeQuery) bool {
+ return x(q) && y(q)
+ }
+ }
+ textAnd := func(x, y func(string) bool) func(string) bool {
+ if x == nil {
+ return y
+ }
+ return func(s string) bool {
+ return x(s) && y(s)
+ }
+ }
+
+ switch e := e.(type) {
+ case *ast.UnaryExpr:
+ if e.Op == token.NOT {
+ return p.walkFilter(dst, e.X, !negate)
+ }
+ case *ast.BinaryExpr:
+ switch e.Op {
+ case token.LAND:
+ err := p.walkFilter(dst, e.X, negate)
+ if err != nil {
+ return err
+ }
+ return p.walkFilter(dst, e.Y, negate)
+ case token.GEQ, token.LEQ, token.LSS, token.GTR, token.EQL, token.NEQ:
+ operand := p.toFilterOperand(e.X)
+ y := p.types.Types[e.Y].Value
+ expectedResult := !negate
+ if operand.path == "Type.Size" && y != nil {
+ filter := dst[operand.varName]
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ x := constant.MakeInt64(q.ctx.Sizes.Sizeof(q.x))
+ return expectedResult == constant.Compare(x, e.Op, y)
+ })
+ dst[operand.varName] = filter
+ return nil
+ }
+ if operand.path == "Text" && y != nil {
+ filter := dst[operand.varName]
+ filter.textPred = textAnd(filter.textPred, func(s string) bool {
+ x := constant.MakeString(s)
+ return expectedResult == constant.Compare(x, e.Op, y)
+ })
+ dst[operand.varName] = filter
+ return nil
+ }
+ }
+ }
+
+ // TODO(quasilyte): refactor and extend.
+ operand := p.toFilterOperand(e)
+ args := operand.args
+ filter := dst[operand.varName]
+ switch operand.path {
+ default:
+ return p.errorf(e, "%s is not a valid filter expression", sprintNode(p.fset, e))
+ case "Pure":
+ if negate {
+ filter.pure = bool3false
+ } else {
+ filter.pure = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Const":
+ if negate {
+ filter.constant = bool3false
+ } else {
+ filter.constant = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Addressable":
+ if negate {
+ filter.addressable = bool3false
+ } else {
+ filter.addressable = bool3true
+ }
+ dst[operand.varName] = filter
+ case "Text.Matches":
+ patternString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ re, err := regexp.Compile(patternString)
+ if err != nil {
+ return p.errorf(args[0], "parse regexp: %v", err)
+ }
+ wantMatched := !negate
+ filter.textPred = textAnd(filter.textPred, func(s string) bool {
+ return wantMatched == re.MatchString(s)
+ })
+ dst[operand.varName] = filter
+ case "Type.Is":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ ctx := typematch.Context{Itab: p.itab}
+ pat, err := typematch.Parse(&ctx, typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ wantIdentical := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantIdentical == pat.MatchIdentical(q.x)
+ })
+ dst[operand.varName] = filter
+ case "Type.ConvertibleTo":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ y, err := typeFromString(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ if y == nil {
+ return p.errorf(args[0], "can't convert %s into a type constraint yet", typeString)
+ }
+ wantConvertible := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantConvertible == types.ConvertibleTo(q.x, y)
+ })
+ dst[operand.varName] = filter
+ case "Type.AssignableTo":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ y, err := typeFromString(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ if y == nil {
+ return p.errorf(args[0], "can't convert %s into a type constraint yet", typeString)
+ }
+ wantAssignable := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantAssignable == types.AssignableTo(q.x, y)
+ })
+ dst[operand.varName] = filter
+ case "Type.Implements":
+ typeString, ok := p.toStringValue(args[0])
+ if !ok {
+ return p.errorf(args[0], "expected a string literal argument")
+ }
+ n, err := parser.ParseExpr(typeString)
+ if err != nil {
+ return p.errorf(args[0], "parse type expr: %v", err)
+ }
+ e, ok := n.(*ast.SelectorExpr)
+ if !ok {
+ return p.errorf(args[0], "only qualified names are supported")
+ }
+ pkgName, ok := e.X.(*ast.Ident)
+ if !ok {
+ return p.errorf(e.X, "invalid package name")
+ }
+ pkgPath, ok := p.itab.Lookup(pkgName.Name)
+ if !ok {
+ return p.errorf(e.X, "package %s is not imported", pkgName.Name)
+ }
+ pkg, err := p.stdImporter.Import(pkgPath)
+ if err != nil {
+ pkg, err = p.srcImporter.Import(pkgPath)
+ if err != nil {
+ return p.errorf(e, "can't load %s: %v", pkgPath, err)
+ }
+ }
+ obj := pkg.Scope().Lookup(e.Sel.Name)
+ if obj == nil {
+ return p.errorf(e, "%s is not found in %s", e.Sel.Name, pkgPath)
+ }
+ iface, ok := obj.Type().Underlying().(*types.Interface)
+ if !ok {
+ return p.errorf(e, "%s is not an interface type", e.Sel.Name)
+ }
+ wantImplemented := !negate
+ filter.typePred = typeAnd(filter.typePred, func(q typeQuery) bool {
+ return wantImplemented == types.Implements(q.x, iface)
+ })
+ dst[operand.varName] = filter
+ }
+
+ return nil
+}
+
+func (p *rulesParser) toIntValue(x ast.Node) (int64, bool) {
+ lit, ok := x.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return 0, false
+ }
+ v, err := strconv.ParseInt(lit.Value, 10, 64)
+ return v, err == nil
+}
+
+func (p *rulesParser) toStringValue(x ast.Node) (string, bool) {
+ switch x := x.(type) {
+ case *ast.BasicLit:
+ if x.Kind != token.STRING {
+ return "", false
+ }
+ return unquoteNode(x), true
+ case ast.Expr:
+ typ, ok := p.types.Types[x]
+ if !ok || typ.Type.String() != "string" {
+ return "", false
+ }
+ str := typ.Value.ExactString()
+ str = str[1 : len(str)-1] // remove quotes
+ return str, true
+ }
+ return "", false
+}
+
+func (p *rulesParser) toFilterOperand(e ast.Expr) filterOperand {
+ var o filterOperand
+
+ if call, ok := e.(*ast.CallExpr); ok {
+ o.args = call.Args
+ e = call.Fun
+ }
+ var path string
+ for {
+ selector, ok := e.(*ast.SelectorExpr)
+ if !ok {
+ break
+ }
+ if path == "" {
+ path = selector.Sel.Name
+ } else {
+ path = selector.Sel.Name + "." + path
+ }
+ e = selector.X
+ }
+ indexing, ok := e.(*ast.IndexExpr)
+ if !ok {
+ return o
+ }
+ mapIdent, ok := indexing.X.(*ast.Ident)
+ if !ok {
+ return o
+ }
+ indexString, ok := p.toStringValue(indexing.Index)
+ if !ok {
+ return o
+ }
+
+ o.mapName = mapIdent.Name
+ o.varName = indexString
+ o.path = path
+ return o
+}
+
+func (p *rulesParser) errorf(n ast.Node, format string, args ...interface{}) error {
+ loc := p.fset.Position(n.Pos())
+ return fmt.Errorf("%s:%d: %s",
+ loc.Filename, loc.Line, fmt.Sprintf(format, args...))
+}
+
+type filterOperand struct {
+ mapName string
+ varName string
+ path string
+ args []ast.Expr
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go
new file mode 100644
index 000000000..f6032c862
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/ruleguard.go
@@ -0,0 +1,45 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+ "io"
+)
+
+type Context struct {
+ Types *types.Info
+ Sizes types.Sizes
+ Fset *token.FileSet
+ Report func(rule GoRuleInfo, n ast.Node, msg string, s *Suggestion)
+ Pkg *types.Package
+}
+
+type Suggestion struct {
+ From token.Pos
+ To token.Pos
+ Replacement []byte
+}
+
+func ParseRules(filename string, fset *token.FileSet, r io.Reader) (*GoRuleSet, error) {
+ p := newRulesParser()
+ return p.ParseFile(filename, fset, r)
+}
+
+func RunRules(ctx *Context, f *ast.File, rules *GoRuleSet) error {
+ return newRulesRunner(ctx, rules).run(f)
+}
+
+type GoRuleInfo struct {
+ // Filename is a file that defined this rule.
+ Filename string
+}
+
+type GoRuleSet struct {
+ universal *scopedGoRuleSet
+ local *scopedGoRuleSet
+}
+
+func MergeRuleSets(toMerge []*GoRuleSet) *GoRuleSet {
+ return mergeRuleSets(toMerge)
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go
new file mode 100644
index 000000000..971e92aed
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/runner.go
@@ -0,0 +1,194 @@
+package ruleguard
+
+import (
+ "bytes"
+ "go/ast"
+ "go/printer"
+ "io/ioutil"
+ "strings"
+
+ "github.com/quasilyte/go-ruleguard/internal/mvdan.cc/gogrep"
+)
+
+type rulesRunner struct {
+ ctx *Context
+ rules *GoRuleSet
+
+ filename string
+ src []byte
+}
+
+func newRulesRunner(ctx *Context, rules *GoRuleSet) *rulesRunner {
+ return &rulesRunner{
+ ctx: ctx,
+ rules: rules,
+ }
+}
+
+func (rr *rulesRunner) nodeText(n ast.Node) []byte {
+ from := rr.ctx.Fset.Position(n.Pos()).Offset
+ to := rr.ctx.Fset.Position(n.End()).Offset
+ src := rr.fileBytes()
+ if (from >= 0 && int(from) < len(src)) && (to >= 0 && int(to) < len(src)) {
+ return src[from:to]
+ }
+ // Fallback to the printer.
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, rr.ctx.Fset, n); err != nil {
+ panic(err)
+ }
+ return buf.Bytes()
+}
+
+func (rr *rulesRunner) fileBytes() []byte {
+ if rr.src != nil {
+ return rr.src
+ }
+
+ // TODO(quasilyte): re-use src slice?
+ src, err := ioutil.ReadFile(rr.filename)
+ if err != nil || src == nil {
+ // Assign a zero-length slice so rr.src
+ // is never nil during the second fileBytes call.
+ rr.src = make([]byte, 0)
+ } else {
+ rr.src = src
+ }
+ return rr.src
+}
+
+func (rr *rulesRunner) run(f *ast.File) error {
+ // TODO(quasilyte): run local rules as well.
+
+ rr.filename = rr.ctx.Fset.Position(f.Pos()).Filename
+
+ for _, rule := range rr.rules.universal.uncategorized {
+ rule.pat.Match(f, func(m gogrep.MatchData) {
+ rr.handleMatch(rule, m)
+ })
+ }
+
+ if rr.rules.universal.categorizedNum != 0 {
+ ast.Inspect(f, func(n ast.Node) bool {
+ cat := categorizeNode(n)
+ for _, rule := range rr.rules.universal.rulesByCategory[cat] {
+ matched := false
+ rule.pat.MatchNode(n, func(m gogrep.MatchData) {
+ matched = rr.handleMatch(rule, m)
+ })
+ if matched {
+ break
+ }
+ }
+ return true
+ })
+ }
+
+ return nil
+}
+
+func (rr *rulesRunner) handleMatch(rule goRule, m gogrep.MatchData) bool {
+ for name, node := range m.Values {
+ expr, ok := node.(ast.Expr)
+ if !ok {
+ continue
+ }
+ filter, ok := rule.filters[name]
+ if !ok {
+ continue
+ }
+ if filter.typePred != nil {
+ typ := rr.ctx.Types.TypeOf(expr)
+ q := typeQuery{x: typ, ctx: rr.ctx}
+ if !filter.typePred(q) {
+ return false
+ }
+ }
+ if filter.textPred != nil {
+ if !filter.textPred(string(rr.nodeText(expr))) {
+ return false
+ }
+ }
+ switch filter.addressable {
+ case bool3true:
+ if !isAddressable(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isAddressable(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ switch filter.pure {
+ case bool3true:
+ if !isPure(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isPure(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ switch filter.constant {
+ case bool3true:
+ if !isConstant(rr.ctx.Types, expr) {
+ return false
+ }
+ case bool3false:
+ if isConstant(rr.ctx.Types, expr) {
+ return false
+ }
+ }
+ }
+
+ prefix := ""
+ if rule.severity != "" {
+ prefix = rule.severity + ": "
+ }
+ message := prefix + rr.renderMessage(rule.msg, m.Node, m.Values, true)
+ node := m.Node
+ if rule.location != "" {
+ node = m.Values[rule.location]
+ }
+ var suggestion *Suggestion
+ if rule.suggestion != "" {
+ suggestion = &Suggestion{
+ Replacement: []byte(rr.renderMessage(rule.suggestion, m.Node, m.Values, false)),
+ From: node.Pos(),
+ To: node.End(),
+ }
+ }
+ info := GoRuleInfo{
+ Filename: rule.filename,
+ }
+ rr.ctx.Report(info, node, message, suggestion)
+ return true
+}
+
+func (rr *rulesRunner) renderMessage(msg string, n ast.Node, nodes map[string]ast.Node, truncate bool) string {
+ var buf strings.Builder
+ if strings.Contains(msg, "$$") {
+ buf.Write(rr.nodeText(n))
+ msg = strings.ReplaceAll(msg, "$$", buf.String())
+ }
+ if len(nodes) == 0 {
+ return msg
+ }
+ for name, n := range nodes {
+ key := "$" + name
+ if !strings.Contains(msg, key) {
+ continue
+ }
+ buf.Reset()
+ buf.Write(rr.nodeText(n))
+ // Don't interpolate strings that are too long.
+ var replacement string
+ if truncate && buf.Len() > 60 {
+ replacement = key
+ } else {
+ replacement = buf.String()
+ }
+ msg = strings.ReplaceAll(msg, key, replacement)
+ }
+ return msg
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go
new file mode 100644
index 000000000..5e14880cd
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/typematch/typematch.go
@@ -0,0 +1,340 @@
+package typematch
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+)
+
+type patternOp int
+
+const (
+ opBuiltinType patternOp = iota
+ opPointer
+ opVar
+ opSlice
+ opArray
+ opMap
+ opChan
+ opNamed
+)
+
+type Pattern struct {
+ typeMatches map[string]types.Type
+ int64Matches map[string]int64
+
+ root *pattern
+}
+
+type pattern struct {
+ value interface{}
+ op patternOp
+ subs []*pattern
+}
+
+type ImportsTab struct {
+ imports []map[string]string
+}
+
+func NewImportsTab(initial map[string]string) *ImportsTab {
+ return &ImportsTab{imports: []map[string]string{initial}}
+}
+
+func (itab *ImportsTab) Lookup(pkgName string) (string, bool) {
+ for i := len(itab.imports) - 1; i >= 0; i-- {
+ pkgPath, ok := itab.imports[i][pkgName]
+ if ok {
+ return pkgPath, true
+ }
+ }
+ return "", false
+}
+
+func (itab *ImportsTab) Load(pkgName, pkgPath string) {
+ itab.imports[len(itab.imports)-1][pkgName] = pkgPath
+}
+
+func (itab *ImportsTab) EnterScope() {
+ itab.imports = append(itab.imports, map[string]string{})
+}
+
+func (itab *ImportsTab) LeaveScope() {
+ itab.imports = itab.imports[:len(itab.imports)-1]
+}
+
+type Context struct {
+ Itab *ImportsTab
+}
+
+func Parse(ctx *Context, s string) (*Pattern, error) {
+ noDollars := strings.ReplaceAll(s, "$", "__")
+ n, err := parser.ParseExpr(noDollars)
+ if err != nil {
+ return nil, err
+ }
+ root := parseExpr(ctx, n)
+ if root == nil {
+ return nil, fmt.Errorf("can't convert %s type expression", s)
+ }
+ p := &Pattern{
+ typeMatches: map[string]types.Type{},
+ int64Matches: map[string]int64{},
+ root: root,
+ }
+ return p, nil
+}
+
+var (
+ builtinTypeByName = map[string]types.Type{
+ "bool": types.Typ[types.Bool],
+ "int": types.Typ[types.Int],
+ "int8": types.Typ[types.Int8],
+ "int16": types.Typ[types.Int16],
+ "int32": types.Typ[types.Int32],
+ "int64": types.Typ[types.Int64],
+ "uint": types.Typ[types.Uint],
+ "uint8": types.Typ[types.Uint8],
+ "uint16": types.Typ[types.Uint16],
+ "uint32": types.Typ[types.Uint32],
+ "uint64": types.Typ[types.Uint64],
+ "uintptr": types.Typ[types.Uintptr],
+ "float32": types.Typ[types.Float32],
+ "float64": types.Typ[types.Float64],
+ "complex64": types.Typ[types.Complex64],
+ "complex128": types.Typ[types.Complex128],
+ "string": types.Typ[types.String],
+
+ "error": types.Universe.Lookup("error").Type(),
+
+ // Aliases.
+ "byte": types.Typ[types.Uint8],
+ "rune": types.Typ[types.Int32],
+ }
+
+ efaceType = types.NewInterfaceType(nil, nil)
+)
+
+func parseExpr(ctx *Context, e ast.Expr) *pattern {
+ switch e := e.(type) {
+ case *ast.Ident:
+ basic, ok := builtinTypeByName[e.Name]
+ if ok {
+ return &pattern{op: opBuiltinType, value: basic}
+ }
+ if strings.HasPrefix(e.Name, "__") {
+ name := strings.TrimPrefix(e.Name, "__")
+ return &pattern{op: opVar, value: name}
+ }
+
+ case *ast.SelectorExpr:
+ pkg, ok := e.X.(*ast.Ident)
+ if !ok {
+ return nil
+ }
+ pkgPath, ok := ctx.Itab.Lookup(pkg.Name)
+ if !ok {
+ return nil
+ }
+ return &pattern{op: opNamed, value: [2]string{pkgPath, e.Sel.Name}}
+
+ case *ast.StarExpr:
+ elem := parseExpr(ctx, e.X)
+ if elem == nil {
+ return nil
+ }
+ return &pattern{op: opPointer, subs: []*pattern{elem}}
+
+ case *ast.ArrayType:
+ elem := parseExpr(ctx, e.Elt)
+ if elem == nil {
+ return nil
+ }
+ if e.Len == nil {
+ return &pattern{
+ op: opSlice,
+ subs: []*pattern{elem},
+ }
+ }
+ if id, ok := e.Len.(*ast.Ident); ok && strings.HasPrefix(id.Name, "__") {
+ name := strings.TrimPrefix(id.Name, "__")
+ return &pattern{
+ op: opArray,
+ value: name,
+ subs: []*pattern{elem},
+ }
+ }
+ lit, ok := e.Len.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return nil
+ }
+ length, err := strconv.ParseInt(lit.Value, 10, 64)
+ if err != nil {
+ return nil
+ }
+ return &pattern{
+ op: opArray,
+ value: length,
+ subs: []*pattern{elem},
+ }
+
+ case *ast.MapType:
+ keyType := parseExpr(ctx, e.Key)
+ if keyType == nil {
+ return nil
+ }
+ valType := parseExpr(ctx, e.Value)
+ if valType == nil {
+ return nil
+ }
+ return &pattern{
+ op: opMap,
+ subs: []*pattern{keyType, valType},
+ }
+
+ case *ast.ChanType:
+ valType := parseExpr(ctx, e.Value)
+ if valType == nil {
+ return nil
+ }
+ var dir types.ChanDir
+ switch {
+ case e.Dir&ast.SEND != 0 && e.Dir&ast.RECV != 0:
+ dir = types.SendRecv
+ case e.Dir&ast.SEND != 0:
+ dir = types.SendOnly
+ case e.Dir&ast.RECV != 0:
+ dir = types.RecvOnly
+ default:
+ return nil
+ }
+ return &pattern{
+ op: opChan,
+ value: dir,
+ subs: []*pattern{valType},
+ }
+
+ case *ast.ParenExpr:
+ return parseExpr(ctx, e.X)
+
+ case *ast.InterfaceType:
+ if len(e.Methods.List) == 0 {
+ return &pattern{op: opBuiltinType, value: efaceType}
+ }
+ }
+
+ return nil
+}
+
+func (p *Pattern) MatchIdentical(typ types.Type) bool {
+ p.reset()
+ return p.matchIdentical(p.root, typ)
+}
+
+func (p *Pattern) reset() {
+ if len(p.int64Matches) != 0 {
+ p.int64Matches = map[string]int64{}
+ }
+ if len(p.typeMatches) != 0 {
+ p.typeMatches = map[string]types.Type{}
+ }
+}
+
+func (p *Pattern) matchIdentical(sub *pattern, typ types.Type) bool {
+ switch sub.op {
+ case opVar:
+ name := sub.value.(string)
+ if name == "_" {
+ return true
+ }
+ y, ok := p.typeMatches[name]
+ if !ok {
+ p.typeMatches[name] = typ
+ return true
+ }
+ if y == nil {
+ return typ == nil
+ }
+ return types.Identical(typ, y)
+
+ case opBuiltinType:
+ return types.Identical(typ, sub.value.(types.Type))
+
+ case opPointer:
+ typ, ok := typ.(*types.Pointer)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opSlice:
+ typ, ok := typ.(*types.Slice)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opArray:
+ typ, ok := typ.(*types.Array)
+ if !ok {
+ return false
+ }
+ var wantLen int64
+ switch v := sub.value.(type) {
+ case string:
+ if v == "_" {
+ wantLen = typ.Len()
+ break
+ }
+ length, ok := p.int64Matches[v]
+ if ok {
+ wantLen = length
+ } else {
+ p.int64Matches[v] = typ.Len()
+ wantLen = typ.Len()
+ }
+ case int64:
+ wantLen = v
+ }
+ return wantLen == typ.Len() && p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opMap:
+ typ, ok := typ.(*types.Map)
+ if !ok {
+ return false
+ }
+ return p.matchIdentical(sub.subs[0], typ.Key()) &&
+ p.matchIdentical(sub.subs[1], typ.Elem())
+
+ case opChan:
+ typ, ok := typ.(*types.Chan)
+ if !ok {
+ return false
+ }
+ dir := sub.value.(types.ChanDir)
+ return dir == typ.Dir() && p.matchIdentical(sub.subs[0], typ.Elem())
+
+ case opNamed:
+ typ, ok := typ.(*types.Named)
+ if !ok {
+ return false
+ }
+ obj := typ.Obj()
+ pkg := obj.Pkg()
+ // pkg can be nil for builtin named types.
+ // There is no point in checking anything else as we never
+ // generate the opNamed for such types.
+ if pkg == nil {
+ return false
+ }
+ pkgPath := sub.value.([2]string)[0]
+ typeName := sub.value.([2]string)[1]
+ return obj.Pkg().Path() == pkgPath && typeName == obj.Name()
+
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go
new file mode 100644
index 000000000..c17dc2431
--- /dev/null
+++ b/vendor/github.com/quasilyte/go-ruleguard/ruleguard/utils.go
@@ -0,0 +1,205 @@
+package ruleguard
+
+import (
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "go/types"
+ "strconv"
+ "strings"
+)
+
+func unquoteNode(lit *ast.BasicLit) string {
+ return lit.Value[1 : len(lit.Value)-1]
+}
+
+func sprintNode(fset *token.FileSet, n ast.Node) string {
+ if fset == nil {
+ fset = token.NewFileSet()
+ }
+ var buf strings.Builder
+ if err := printer.Fprint(&buf, fset, n); err != nil {
+ return ""
+ }
+ return buf.String()
+}
+
+var basicTypeByName = map[string]types.Type{
+ "bool": types.Typ[types.Bool],
+ "int": types.Typ[types.Int],
+ "int8": types.Typ[types.Int8],
+ "int16": types.Typ[types.Int16],
+ "int32": types.Typ[types.Int32],
+ "int64": types.Typ[types.Int64],
+ "uint": types.Typ[types.Uint],
+ "uint8": types.Typ[types.Uint8],
+ "uint16": types.Typ[types.Uint16],
+ "uint32": types.Typ[types.Uint32],
+ "uint64": types.Typ[types.Uint64],
+ "uintptr": types.Typ[types.Uintptr],
+ "float32": types.Typ[types.Float32],
+ "float64": types.Typ[types.Float64],
+ "complex64": types.Typ[types.Complex64],
+ "complex128": types.Typ[types.Complex128],
+ "string": types.Typ[types.String],
+}
+
+func typeFromString(s string) (types.Type, error) {
+ s = strings.ReplaceAll(s, "?", "__any")
+
+ n, err := parser.ParseExpr(s)
+ if err != nil {
+ return nil, err
+ }
+ return typeFromNode(n), nil
+}
+
+func typeFromNode(e ast.Expr) types.Type {
+ switch e := e.(type) {
+ case *ast.Ident:
+ basic, ok := basicTypeByName[e.Name]
+ if ok {
+ return basic
+ }
+
+ case *ast.ArrayType:
+ elem := typeFromNode(e.Elt)
+ if elem == nil {
+ return nil
+ }
+ if e.Len == nil {
+ return types.NewSlice(elem)
+ }
+ lit, ok := e.Len.(*ast.BasicLit)
+ if !ok || lit.Kind != token.INT {
+ return nil
+ }
+ length, err := strconv.Atoi(lit.Value)
+ if err != nil {
+ return nil
+ }
+ types.NewArray(elem, int64(length))
+
+ case *ast.MapType:
+ keyType := typeFromNode(e.Key)
+ if keyType == nil {
+ return nil
+ }
+ valType := typeFromNode(e.Value)
+ if valType == nil {
+ return nil
+ }
+ return types.NewMap(keyType, valType)
+
+ case *ast.StarExpr:
+ typ := typeFromNode(e.X)
+ if typ != nil {
+ return types.NewPointer(typ)
+ }
+
+ case *ast.ParenExpr:
+ return typeFromNode(e.X)
+
+ case *ast.InterfaceType:
+ if len(e.Methods.List) == 0 {
+ return types.NewInterfaceType(nil, nil)
+ }
+ }
+
+ return nil
+}
+
+// isPure reports whether expr is a softly safe expression and contains
+// no significant side-effects. As opposed to strictly safe expressions,
+// soft safe expressions permit some forms of side-effects, like
+// panic possibility during indexing or nil pointer dereference.
+//
+// Uses types info to determine type conversion expressions that
+// are the only permitted kinds of call expressions.
+// Note that is does not check whether called function really
+// has any side effects. The analysis is very conservative.
+func isPure(info *types.Info, expr ast.Expr) bool {
+ // This list switch is not comprehensive and uses
+ // whitelist to be on the conservative side.
+ // Can be extended as needed.
+
+ switch expr := expr.(type) {
+ case *ast.StarExpr:
+ return isPure(info, expr.X)
+ case *ast.BinaryExpr:
+ return isPure(info, expr.X) &&
+ isPure(info, expr.Y)
+ case *ast.UnaryExpr:
+ return expr.Op != token.ARROW &&
+ isPure(info, expr.X)
+ case *ast.BasicLit, *ast.Ident:
+ return true
+ case *ast.IndexExpr:
+ return isPure(info, expr.X) &&
+ isPure(info, expr.Index)
+ case *ast.SelectorExpr:
+ return isPure(info, expr.X)
+ case *ast.ParenExpr:
+ return isPure(info, expr.X)
+ case *ast.CompositeLit:
+ return isPureList(info, expr.Elts)
+ case *ast.CallExpr:
+ return isTypeExpr(info, expr.Fun) && isPureList(info, expr.Args)
+
+ default:
+ return false
+ }
+}
+
+// isPureList reports whether every expr in list is safe.
+//
+// See isPure.
+func isPureList(info *types.Info, list []ast.Expr) bool {
+ for _, expr := range list {
+ if !isPure(info, expr) {
+ return false
+ }
+ }
+ return true
+}
+
+func isAddressable(info *types.Info, expr ast.Expr) bool {
+ tv, ok := info.Types[expr]
+ return ok && tv.Addressable()
+}
+
+func isConstant(info *types.Info, expr ast.Expr) bool {
+ tv, ok := info.Types[expr]
+ return ok && tv.Value != nil
+}
+
+// isTypeExpr reports whether x represents a type expression.
+//
+// Type expression does not evaluate to any run time value,
+// but rather describes a type that is used inside Go expression.
+//
+// For example, (*T)(v) is a CallExpr that "calls" (*T).
+// (*T) is a type expression that tells Go compiler type v should be converted to.
+func isTypeExpr(info *types.Info, x ast.Expr) bool {
+ switch x := x.(type) {
+ case *ast.StarExpr:
+ return isTypeExpr(info, x.X)
+ case *ast.ParenExpr:
+ return isTypeExpr(info, x.X)
+ case *ast.SelectorExpr:
+ return isTypeExpr(info, x.Sel)
+
+ case *ast.Ident:
+ // Identifier may be a type expression if object
+ // it reffers to is a type name.
+ _, ok := info.ObjectOf(x).(*types.TypeName)
+ return ok
+
+ case *ast.FuncType, *ast.StructType, *ast.InterfaceType, *ast.ArrayType, *ast.MapType, *ast.ChanType:
+ return true
+
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/LICENSE b/vendor/github.com/quasilyte/regex/syntax/LICENSE
new file mode 100644
index 000000000..f0c81282b
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Iskander (Alex) Sharipov / quasilyte
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/quasilyte/regex/syntax/README.md b/vendor/github.com/quasilyte/regex/syntax/README.md
new file mode 100644
index 000000000..b70e25ad9
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/README.md
@@ -0,0 +1,29 @@
+# Package `regex/syntax`
+
+Package `syntax` provides regular expressions parser as well as AST definitions.
+
+## Rationale
+
+The advantages of this package over stdlib [regexp/syntax](https://golang.org/pkg/regexp/syntax/):
+
+1. Does not transformations/optimizations during the parsing.
+ The produced parse tree is loseless.
+
+2. Simpler AST representation.
+
+3. Can parse most PCRE operations in addition to [re2](https://github.com/google/re2/wiki/Syntax) syntax.
+ It can also handle PHP/Perl style patterns with delimiters.
+
+4. This package is easier to extend than something from the standard library.
+
+This package does almost no assumptions about how generated AST is going to be used
+so it preserves as much syntax information as possible.
+
+It's easy to write another intermediate representation on top of it. The main
+function of this package is to convert a textual regexp pattern into a more
+structured form that can be processed more easily.
+
+## Users
+
+* [go-critic](https://github.com/go-critic/go-critic) - Go static analyzer
+* [NoVerify](https://github.com/VKCOM/noverify) - PHP static analyzer
diff --git a/vendor/github.com/quasilyte/regex/syntax/ast.go b/vendor/github.com/quasilyte/regex/syntax/ast.go
new file mode 100644
index 000000000..4d21a9432
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/ast.go
@@ -0,0 +1,64 @@
+package syntax
+
+import (
+ "strings"
+)
+
+type Regexp struct {
+ Pattern string
+ Expr Expr
+}
+
+type RegexpPCRE struct {
+ Pattern string
+ Expr Expr
+
+ Source string
+ Modifiers string
+ Delim [2]byte
+}
+
+func (re *RegexpPCRE) HasModifier(mod byte) bool {
+ return strings.IndexByte(re.Modifiers, mod) >= 0
+}
+
+type Expr struct {
+ // The operations that this expression performs. See `operation.go`.
+ Op Operation
+
+ Form Form
+
+ _ [2]byte // Reserved
+
+ // Pos describes a source location inside regexp pattern.
+ Pos Position
+
+ // Args is a list of sub-expressions of this expression.
+ //
+ // See Operation constants documentation to learn how to
+ // interpret the particular expression args.
+ Args []Expr
+
+ // Value holds expression textual value.
+ //
+ // Usually, that value is identical to src[Begin():End()],
+ // but this is not true for programmatically generated objects.
+ Value string
+}
+
+// Begin returns expression leftmost offset.
+func (e Expr) Begin() uint16 { return e.Pos.Begin }
+
+// End returns expression rightmost offset.
+func (e Expr) End() uint16 { return e.Pos.End }
+
+// LastArg returns expression last argument.
+//
+// Should not be called on expressions that may have 0 arguments.
+func (e Expr) LastArg() Expr {
+ return e.Args[len(e.Args)-1]
+}
+
+type Operation byte
+
+type Form byte
diff --git a/vendor/github.com/quasilyte/regex/syntax/errors.go b/vendor/github.com/quasilyte/regex/syntax/errors.go
new file mode 100644
index 000000000..beefba5f9
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/errors.go
@@ -0,0 +1,27 @@
+package syntax
+
+type ParseError struct {
+ Pos Position
+ Message string
+}
+
+func (e ParseError) Error() string { return e.Message }
+
+func throw(pos Position, message string) {
+ panic(ParseError{Pos: pos, Message: message})
+}
+
+func throwExpectedFound(pos Position, expected, found string) {
+ throw(pos, "expected '"+expected+"', found '"+found+"'")
+}
+
+func throwUnexpectedToken(pos Position, token string) {
+ throw(pos, "unexpected token: "+token)
+}
+
+func newPos(begin, end int) Position {
+ return Position{
+ Begin: uint16(begin),
+ End: uint16(end),
+ }
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/go.mod b/vendor/github.com/quasilyte/regex/syntax/go.mod
new file mode 100644
index 000000000..2a4e1f33b
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/go.mod
@@ -0,0 +1,3 @@
+module github.com/quasilyte/regex/syntax
+
+go 1.14
diff --git a/vendor/github.com/quasilyte/regex/syntax/lexer.go b/vendor/github.com/quasilyte/regex/syntax/lexer.go
new file mode 100644
index 000000000..aae146c2e
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/lexer.go
@@ -0,0 +1,454 @@
+package syntax
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ kind tokenKind
+ pos Position
+}
+
+func (tok token) String() string {
+ return tok.kind.String()
+}
+
+type tokenKind byte
+
+//go:generate stringer -type=tokenKind -trimprefix=tok -linecomment=true
+const (
+ tokNone tokenKind = iota
+
+ tokChar
+ tokGroupFlags
+ tokPosixClass
+ tokConcat
+ tokRepeat
+ tokEscapeChar
+ tokEscapeMeta
+ tokEscapeOctal
+ tokEscapeUni
+ tokEscapeUniFull
+ tokEscapeHex
+ tokEscapeHexFull
+ tokComment
+
+ tokQ // \Q
+ tokMinus // -
+ tokLbracket // [
+ tokLbracketCaret // [^
+ tokRbracket // ]
+ tokDollar // $
+ tokCaret // ^
+ tokQuestion // ?
+ tokDot // .
+ tokPlus // +
+ tokStar // *
+ tokPipe // |
+ tokLparen // (
+ tokLparenName // (?P<name>
+ tokLparenNameAngle // (?<name>
+ tokLparenNameQuote // (?'name'
+ tokLparenFlags // (?flags
+ tokLparenAtomic // (?>
+ tokLparenPositiveLookahead // (?=
+ tokLparenPositiveLookbehind // (?<=
+ tokLparenNegativeLookahead // (?!
+ tokLparenNegativeLookbehind // (?<!
+ tokRparen // )
+)
+
+// reMetachar is a table of meta chars outside of a char class.
+var reMetachar = [256]bool{
+ '\\': true,
+ '|': true,
+ '*': true,
+ '+': true,
+ '?': true,
+ '.': true,
+ '[': true,
+ ']': true,
+ '^': true,
+ '$': true,
+ '(': true,
+ ')': true,
+}
+
+// charClassMetachar is a table of meta chars inside char class.
+var charClassMetachar = [256]bool{
+ '-': true,
+ ']': true,
+}
+
+type lexer struct {
+ tokens []token
+ pos int
+ input string
+}
+
+func (l *lexer) HasMoreTokens() bool {
+ return l.pos < len(l.tokens)
+}
+
+func (l *lexer) NextToken() token {
+ if l.pos < len(l.tokens) {
+ tok := l.tokens[l.pos]
+ l.pos++
+ return tok
+ }
+ return token{}
+}
+
+func (l *lexer) Peek() token {
+ if l.pos < len(l.tokens) {
+ return l.tokens[l.pos]
+ }
+ return token{}
+}
+
+func (l *lexer) scan() {
+ for l.pos < len(l.input) {
+ ch := l.input[l.pos]
+ if ch >= utf8.RuneSelf {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pushTok(tokChar, size)
+ l.maybeInsertConcat()
+ continue
+ }
+ switch ch {
+ case '\\':
+ l.scanEscape(false)
+ case '.':
+ l.pushTok(tokDot, 1)
+ case '+':
+ l.pushTok(tokPlus, 1)
+ case '*':
+ l.pushTok(tokStar, 1)
+ case '^':
+ l.pushTok(tokCaret, 1)
+ case '$':
+ l.pushTok(tokDollar, 1)
+ case '?':
+ l.pushTok(tokQuestion, 1)
+ case ')':
+ l.pushTok(tokRparen, 1)
+ case '|':
+ l.pushTok(tokPipe, 1)
+ case '[':
+ if l.byteAt(l.pos+1) == '^' {
+ l.pushTok(tokLbracketCaret, 2)
+ } else {
+ l.pushTok(tokLbracket, 1)
+ }
+ l.scanCharClass()
+ case '(':
+ if l.byteAt(l.pos+1) == '?' {
+ switch {
+ case l.byteAt(l.pos+2) == '>':
+ l.pushTok(tokLparenAtomic, len("(?>"))
+ case l.byteAt(l.pos+2) == '=':
+ l.pushTok(tokLparenPositiveLookahead, len("(?="))
+ case l.byteAt(l.pos+2) == '!':
+ l.pushTok(tokLparenNegativeLookahead, len("(?!"))
+ case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '=':
+ l.pushTok(tokLparenPositiveLookbehind, len("(?<="))
+ case l.byteAt(l.pos+2) == '<' && l.byteAt(l.pos+3) == '!':
+ l.pushTok(tokLparenNegativeLookbehind, len("(?<!"))
+ default:
+ if l.tryScanComment(l.pos + 2) {
+ } else if l.tryScanGroupName(l.pos + 2) {
+ } else if l.tryScanGroupFlags(l.pos + 2) {
+ } else {
+ throw(newPos(l.pos, l.pos+1), "group token is incomplete")
+ }
+ }
+ } else {
+ l.pushTok(tokLparen, 1)
+ }
+ case '{':
+ if j := l.repeatWidth(l.pos + 1); j >= 0 {
+ l.pushTok(tokRepeat, len("{")+j)
+ } else {
+ l.pushTok(tokChar, 1)
+ }
+ default:
+ l.pushTok(tokChar, 1)
+ }
+ l.maybeInsertConcat()
+ }
+}
+
+func (l *lexer) scanCharClass() {
+ l.maybeInsertConcat()
+
+ // We need to handle first `]` in a special way. See #3.
+ if l.byteAt(l.pos) == ']' {
+ l.pushTok(tokChar, 1)
+ }
+
+ for l.pos < len(l.input) {
+ ch := l.input[l.pos]
+ if ch >= utf8.RuneSelf {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos:])
+ l.pushTok(tokChar, size)
+ continue
+ }
+ switch ch {
+ case '\\':
+ l.scanEscape(true)
+ case '[':
+ isPosixClass := false
+ if l.byteAt(l.pos+1) == ':' {
+ j := l.stringIndex(l.pos+2, ":]")
+ if j >= 0 {
+ isPosixClass = true
+ l.pushTok(tokPosixClass, j+len("[::]"))
+ }
+ }
+ if !isPosixClass {
+ l.pushTok(tokChar, 1)
+ }
+ case '-':
+ l.pushTok(tokMinus, 1)
+ case ']':
+ l.pushTok(tokRbracket, 1)
+ return // Stop scanning in the char context
+ default:
+ l.pushTok(tokChar, 1)
+ }
+ }
+}
+
+func (l *lexer) scanEscape(insideCharClass bool) {
+ s := l.input
+ if l.pos+1 >= len(s) {
+ throw(newPos(l.pos, l.pos+1), `unexpected end of pattern: trailing '\'`)
+ }
+ switch {
+ case s[l.pos+1] == 'p' || s[l.pos+1] == 'P':
+ if l.pos+2 >= len(s) {
+ throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected uni-class-short or '{'")
+ }
+ if s[l.pos+2] == '{' {
+ j := strings.IndexByte(s[l.pos+2:], '}')
+ if j < 0 {
+ throw(newPos(l.pos, l.pos+2), "can't find closing '}'")
+ }
+ l.pushTok(tokEscapeUniFull, len(`\p{`)+j)
+ } else {
+ l.pushTok(tokEscapeUni, len(`\pL`))
+ }
+ case s[l.pos+1] == 'x':
+ if l.pos+2 >= len(s) {
+ throw(newPos(l.pos, l.pos+2), "unexpected end of pattern: expected hex-digit or '{'")
+ }
+ if s[l.pos+2] == '{' {
+ j := strings.IndexByte(s[l.pos+2:], '}')
+ if j < 0 {
+ throw(newPos(l.pos, l.pos+2), "can't find closing '}'")
+ }
+ l.pushTok(tokEscapeHexFull, len(`\x{`)+j)
+ } else {
+ if isHexDigit(l.byteAt(l.pos + 3)) {
+ l.pushTok(tokEscapeHex, len(`\xFF`))
+ } else {
+ l.pushTok(tokEscapeHex, len(`\xF`))
+ }
+ }
+ case isOctalDigit(s[l.pos+1]):
+ digits := 1
+ if isOctalDigit(l.byteAt(l.pos + 2)) {
+ if isOctalDigit(l.byteAt(l.pos + 3)) {
+ digits = 3
+ } else {
+ digits = 2
+ }
+ }
+ l.pushTok(tokEscapeOctal, len(`\`)+digits)
+ case s[l.pos+1] == 'Q':
+ size := len(s) - l.pos // Until the pattern ends
+ j := l.stringIndex(l.pos+2, `\E`)
+ if j >= 0 {
+ size = j + len(`\Q\E`)
+ }
+ l.pushTok(tokQ, size)
+
+ default:
+ ch := l.byteAt(l.pos + 1)
+ if ch >= utf8.RuneSelf {
+ _, size := utf8.DecodeRuneInString(l.input[l.pos+1:])
+ l.pushTok(tokEscapeChar, len(`\`)+size)
+ return
+ }
+ kind := tokEscapeChar
+ if insideCharClass {
+ if charClassMetachar[ch] {
+ kind = tokEscapeMeta
+ }
+ } else {
+ if reMetachar[ch] {
+ kind = tokEscapeMeta
+ }
+ }
+ l.pushTok(kind, 2)
+ }
+}
+
+func (l *lexer) maybeInsertConcat() {
+ if l.isConcatPos() {
+ last := len(l.tokens) - 1
+ tok := l.tokens[last]
+ l.tokens[last].kind = tokConcat
+ l.tokens = append(l.tokens, tok)
+ }
+}
+
+func (l *lexer) Init(s string) {
+ l.pos = 0
+ l.tokens = l.tokens[:0]
+ l.input = s
+
+ l.scan()
+
+ l.pos = 0
+}
+
+func (l *lexer) tryScanGroupName(pos int) bool {
+ tok := tokLparenName
+ endCh := byte('>')
+ offset := 1
+ switch l.byteAt(pos) {
+ case '\'':
+ endCh = '\''
+ tok = tokLparenNameQuote
+ case '<':
+ tok = tokLparenNameAngle
+ case 'P':
+ offset = 2
+ default:
+ return false
+ }
+ if pos+offset >= len(l.input) {
+ return false
+ }
+ end := strings.IndexByte(l.input[pos+offset:], endCh)
+ if end < 0 {
+ return false
+ }
+ l.pushTok(tok, len("(?")+offset+end+1)
+ return true
+}
+
+func (l *lexer) tryScanGroupFlags(pos int) bool {
+ colonPos := strings.IndexByte(l.input[pos:], ':')
+ parenPos := strings.IndexByte(l.input[pos:], ')')
+ if parenPos < 0 {
+ return false
+ }
+ end := parenPos
+ if colonPos >= 0 && colonPos < parenPos {
+ end = colonPos + len(":")
+ }
+ l.pushTok(tokLparenFlags, len("(?")+end)
+ return true
+}
+
+func (l *lexer) tryScanComment(pos int) bool {
+ if l.byteAt(pos) != '#' {
+ return false
+ }
+ parenPos := strings.IndexByte(l.input[pos:], ')')
+ if parenPos < 0 {
+ return false
+ }
+ l.pushTok(tokComment, len("(?")+parenPos+len(")"))
+ return true
+}
+
+func (l *lexer) repeatWidth(pos int) int {
+ j := pos
+ for isDigit(l.byteAt(j)) {
+ j++
+ }
+ if j == pos {
+ return -1
+ }
+ if l.byteAt(j) == '}' {
+ return (j + len("}")) - pos // {min}
+ }
+ if l.byteAt(j) != ',' {
+ return -1
+ }
+ j += len(",")
+ for isDigit(l.byteAt(j)) {
+ j++
+ }
+ if l.byteAt(j) == '}' {
+ return (j + len("}")) - pos // {min,} or {min,max}
+ }
+ return -1
+}
+
+func (l *lexer) stringIndex(offset int, s string) int {
+ if offset < len(l.input) {
+ return strings.Index(l.input[offset:], s)
+ }
+ return -1
+}
+
+func (l *lexer) byteAt(pos int) byte {
+ if pos >= 0 && pos < len(l.input) {
+ return l.input[pos]
+ }
+ return 0
+}
+
+func (l *lexer) pushTok(kind tokenKind, size int) {
+ l.tokens = append(l.tokens, token{
+ kind: kind,
+ pos: Position{Begin: uint16(l.pos), End: uint16(l.pos + size)},
+ })
+ l.pos += size
+}
+
+func (l *lexer) isConcatPos() bool {
+ if len(l.tokens) < 2 {
+ return false
+ }
+ x := l.tokens[len(l.tokens)-2].kind
+ if concatTable[x]&concatX != 0 {
+ return false
+ }
+ y := l.tokens[len(l.tokens)-1].kind
+ return concatTable[y]&concatY == 0
+}
+
+const (
+ concatX byte = 1 << iota
+ concatY
+)
+
+var concatTable = [256]byte{
+ tokPipe: concatX | concatY,
+
+ tokLparen: concatX,
+ tokLparenFlags: concatX,
+ tokLparenName: concatX,
+ tokLparenNameAngle: concatX,
+ tokLparenNameQuote: concatX,
+ tokLparenAtomic: concatX,
+ tokLbracket: concatX,
+ tokLbracketCaret: concatX,
+ tokLparenPositiveLookahead: concatX,
+ tokLparenPositiveLookbehind: concatX,
+ tokLparenNegativeLookahead: concatX,
+ tokLparenNegativeLookbehind: concatX,
+
+ tokRparen: concatY,
+ tokRbracket: concatY,
+ tokPlus: concatY,
+ tokStar: concatY,
+ tokQuestion: concatY,
+ tokRepeat: concatY,
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/operation.go b/vendor/github.com/quasilyte/regex/syntax/operation.go
new file mode 100644
index 000000000..0fc8fc521
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/operation.go
@@ -0,0 +1,195 @@
+package syntax
+
+//go:generate stringer -type=Operation -trimprefix=Op
+const (
+ OpNone Operation = iota
+
+ // OpConcat is a concatenation of ops.
+ // Examples: `xy` `abc\d` ``
+ // Args - concatenated ops
+ //
+ // As a special case, OpConcat with 0 Args is used for "empty"
+ // set of operations.
+ OpConcat
+
+ // OpDot is a '.' wildcard.
+ OpDot
+
+ // OpAlt is x|y alternation of ops.
+ // Examples: `a|bc` `x(.*?)|y(.*?)`
+ // Args - union-connected regexp branches
+ OpAlt
+
+ // OpStar is a shorthand for {0,} repetition.
+ // Examples: `x*`
+ // Args[0] - repeated expression
+ OpStar
+
+ // OpPlus is a shorthand for {1,} repetition.
+ // Examples: `x+`
+ // Args[0] - repeated expression
+ OpPlus
+
+ // OpQuestion is a shorthand for {0,1} repetition.
+ // Examples: `x?`
+ // Args[0] - repeated expression
+ OpQuestion
+
+ // OpNonGreedy makes its operand quantifier non-greedy.
+ // Examples: `x??` `x*?` `x+?`
+ // Args[0] - quantified expression
+ OpNonGreedy
+
+ // OpPossessive makes its operand quantifier possessive.
+ // Examples: `x?+` `x*+` `x++`
+ // Args[0] - quantified expression
+ OpPossessive
+
+ // OpCaret is ^ anchor.
+ OpCaret
+
+ // OpDollar is $ anchor.
+ OpDollar
+
+ // OpLiteral is a collection of consecutive chars.
+ // Examples: `ab` `10x`
+ // Args - enclosed characters (OpChar)
+ OpLiteral
+
+ // OpChar is a single literal pattern character.
+ // Examples: `a` `6` `ф`
+ OpChar
+
+ // OpString is an artificial element that is used in other expressions.
+ OpString
+
+ // OpQuote is a \Q...\E enclosed literal.
+ // Examples: `\Q.?\E` `\Q?q[]=1`
+ // FormQuoteUnclosed: `\Qabc`
+ // Args[0] - literal value (OpString)
+ OpQuote
+
+ // OpEscapeChar is a single char escape.
+ // Examples: `\d` `\a` `\n`
+ // Args[0] - escaped value (OpString)
+ OpEscapeChar
+
+ // OpEscapeMeta is an escaped meta char.
+ // Examples: `\(` `\[` `\+`
+ // Args[0] - escaped value (OpString)
+ OpEscapeMeta
+
+ // OpEscapeOctal is an octal char code escape (up to 3 digits).
+ // Examples: `\123` `\12`
+ // Args[0] - escaped value (OpString)
+ OpEscapeOctal
+
+ // OpEscapeHex is a hex char code escape.
+ // Examples: `\x7F` `\xF7`
+ // FormEscapeHexFull examples: `\x{10FFFF}` `\x{F}`.
+ // Args[0] - escaped value (OpString)
+ OpEscapeHex
+
+ // OpEscapeUni is a Unicode char class escape.
+ // Examples: `\pS` `\pL` `\PL`
+ // FormEscapeUniFull examples: `\p{Greek}` `\p{Symbol}` `\p{^L}`
+ // Args[0] - escaped value (OpString)
+ OpEscapeUni
+
+ // OpCharClass is a char class enclosed in [].
+ // Examples: `[abc]` `[a-z0-9\]]`
+ // Args - char class elements (can include OpCharRange and OpPosixClass)
+ OpCharClass
+
+ // OpNegCharClass is a negated char class enclosed in [].
+ // Examples: `[^abc]` `[^a-z0-9\]]`
+ // Args - char class elements (can include OpCharRange and OpPosixClass)
+ OpNegCharClass
+
+ // OpCharRange is an inclusive char range inside a char class.
+ // Examples: `0-9` `A-Z`
+ // Args[0] - range lower bound
+ // Args[1] - range upper bound
+ OpCharRange
+
+ // OpPosixClass is a named ASCII char set inside a char class.
+ // Examples: `[:alpha:]` `[:blank:]`
+ OpPosixClass
+
+ // OpRepeat is a {min,max} repetition quantifier.
+ // Examples: `x{5}` `x{min,max}` `x{min,}`
+ // Args[0] - repeated expression
+ // Args[1] - repeat count (OpString)
+ OpRepeat
+
+ // OpCapture is `(re)` capturing group.
+ // Examples: `(abc)` `(x|y)`
+ // Args[0] - enclosed expression
+ OpCapture
+
+ // OpNamedCapture is `(?P<name>re)` capturing group.
+ // Examples: `(?P<foo>abc)` `(?P<name>x|y)`
+ // FormNamedCaptureAngle examples: `(?<foo>abc)` `(?<name>x|y)`
+ // FormNamedCaptureQuote examples: `(?'foo'abc)` `(?'name'x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ // Args[1] - group name (OpString)
+ OpNamedCapture
+
+ // OpGroup is `(?:re)` non-capturing group.
+ // Examples: `(?:abc)` `(?:x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpGroup
+
+ // OpGroupWithFlags is `(?flags:re)` non-capturing group.
+ // Examples: `(?i:abc)` `(?i:x|y)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ // Args[1] - flags (OpString)
+ OpGroupWithFlags
+
+ // OpAtomicGroup is `(?>re)` non-capturing group without backtracking.
+ // Examples: `(?>foo)` `(?>)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpAtomicGroup
+
+ // OpPositiveLookahead is `(?=re)` asserts that following text matches re.
+ // Examples: `(?=foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpPositiveLookahead
+
+ // OpNegativeLookahead is `(?!re)` asserts that following text doesn't match re.
+ // Examples: `(?!foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpNegativeLookahead
+
+ // OpPositiveLookbehind is `(?<=re)` asserts that preceding text matches re.
+ // Examples: `(?<=foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpPositiveLookbehind
+
+ // OpNegativeLookbehind is `(?=re)` asserts that preceding text doesn't match re.
+ // Examples: `(?<!foo)`
+ // Args[0] - enclosed expression (OpConcat with 0 args for empty group)
+ OpNegativeLookbehind
+
+ // OpFlagOnlyGroup is `(?flags)` form that affects current group flags.
+ // Examples: `(?i)` `(?i-m)` `(?-im)`
+ // Args[0] - flags (OpString)
+ OpFlagOnlyGroup
+
+ // OpComment is a group-like regexp comment expression.
+ // Examples: `(?#text)` `(?#)`
+ OpComment
+
+ // OpNone2 is a sentinel value that is never part of the AST.
+ // OpNone and OpNone2 can be used to cover all ops in a range.
+ OpNone2
+)
+
+const (
+ FormDefault Form = iota
+ FormEscapeHexFull
+ FormEscapeUniFull
+ FormNamedCaptureAngle
+ FormNamedCaptureQuote
+ FormQuoteUnclosed
+)
diff --git a/vendor/github.com/quasilyte/regex/syntax/operation_string.go b/vendor/github.com/quasilyte/regex/syntax/operation_string.go
new file mode 100644
index 000000000..b78e9ac5d
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/operation_string.go
@@ -0,0 +1,59 @@
+// Code generated by "stringer -type=Operation -trimprefix=Op"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[OpNone-0]
+ _ = x[OpConcat-1]
+ _ = x[OpDot-2]
+ _ = x[OpAlt-3]
+ _ = x[OpStar-4]
+ _ = x[OpPlus-5]
+ _ = x[OpQuestion-6]
+ _ = x[OpNonGreedy-7]
+ _ = x[OpPossessive-8]
+ _ = x[OpCaret-9]
+ _ = x[OpDollar-10]
+ _ = x[OpLiteral-11]
+ _ = x[OpChar-12]
+ _ = x[OpString-13]
+ _ = x[OpQuote-14]
+ _ = x[OpEscapeChar-15]
+ _ = x[OpEscapeMeta-16]
+ _ = x[OpEscapeOctal-17]
+ _ = x[OpEscapeHex-18]
+ _ = x[OpEscapeUni-19]
+ _ = x[OpCharClass-20]
+ _ = x[OpNegCharClass-21]
+ _ = x[OpCharRange-22]
+ _ = x[OpPosixClass-23]
+ _ = x[OpRepeat-24]
+ _ = x[OpCapture-25]
+ _ = x[OpNamedCapture-26]
+ _ = x[OpGroup-27]
+ _ = x[OpGroupWithFlags-28]
+ _ = x[OpAtomicGroup-29]
+ _ = x[OpPositiveLookahead-30]
+ _ = x[OpNegativeLookahead-31]
+ _ = x[OpPositiveLookbehind-32]
+ _ = x[OpNegativeLookbehind-33]
+ _ = x[OpFlagOnlyGroup-34]
+ _ = x[OpComment-35]
+ _ = x[OpNone2-36]
+}
+
+const _Operation_name = "NoneConcatDotAltStarPlusQuestionNonGreedyPossessiveCaretDollarLiteralCharStringQuoteEscapeCharEscapeMetaEscapeOctalEscapeHexEscapeUniCharClassNegCharClassCharRangePosixClassRepeatCaptureNamedCaptureGroupGroupWithFlagsAtomicGroupPositiveLookaheadNegativeLookaheadPositiveLookbehindNegativeLookbehindFlagOnlyGroupCommentNone2"
+
+var _Operation_index = [...]uint16{0, 4, 10, 13, 16, 20, 24, 32, 41, 51, 56, 62, 69, 73, 79, 84, 94, 104, 115, 124, 133, 142, 154, 163, 173, 179, 186, 198, 203, 217, 228, 245, 262, 280, 298, 311, 318, 323}
+
+func (i Operation) String() string {
+ if i >= Operation(len(_Operation_index)-1) {
+ return "Operation(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Operation_name[_Operation_index[i]:_Operation_index[i+1]]
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/parser.go b/vendor/github.com/quasilyte/regex/syntax/parser.go
new file mode 100644
index 000000000..c540ac593
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/parser.go
@@ -0,0 +1,503 @@
+package syntax
+
+import (
+ "errors"
+ "strings"
+)
+
+type ParserOptions struct {
+ // NoLiterals disables OpChar merging into OpLiteral.
+ NoLiterals bool
+}
+
+func NewParser(opts *ParserOptions) *Parser {
+ return newParser(opts)
+}
+
+type Parser struct {
+ out Regexp
+ lexer lexer
+ exprPool []Expr
+
+ prefixParselets [256]prefixParselet
+ infixParselets [256]infixParselet
+
+ charClass []Expr
+ allocated uint
+
+ opts ParserOptions
+}
+
+// ParsePCRE parses PHP-style pattern with delimiters.
+// An example of such pattern is `/foo/i`.
+func (p *Parser) ParsePCRE(pattern string) (*RegexpPCRE, error) {
+ pcre, err := p.newPCRE(pattern)
+ if err != nil {
+ return nil, err
+ }
+ if pcre.HasModifier('x') {
+ return nil, errors.New("'x' modifier is not supported")
+ }
+ re, err := p.Parse(pcre.Pattern)
+ if re != nil {
+ pcre.Expr = re.Expr
+ }
+ return pcre, err
+}
+
+func (p *Parser) Parse(pattern string) (result *Regexp, err error) {
+ defer func() {
+ r := recover()
+ if r == nil {
+ return
+ }
+ if err2, ok := r.(ParseError); ok {
+ err = err2
+ return
+ }
+ panic(r)
+ }()
+
+ p.lexer.Init(pattern)
+ p.allocated = 0
+ p.out.Pattern = pattern
+ if pattern == "" {
+ p.out.Expr = *p.newExpr(OpConcat, Position{})
+ } else {
+ p.out.Expr = *p.parseExpr(0)
+ }
+
+ if !p.opts.NoLiterals {
+ p.mergeChars(&p.out.Expr)
+ }
+ p.setValues(&p.out.Expr)
+
+ return &p.out, nil
+}
+
+type prefixParselet func(token) *Expr
+
+type infixParselet func(*Expr, token) *Expr
+
+func newParser(opts *ParserOptions) *Parser {
+ var p Parser
+
+ if opts != nil {
+ p.opts = *opts
+ }
+ p.exprPool = make([]Expr, 256)
+
+ for tok, op := range tok2op {
+ if op != 0 {
+ p.prefixParselets[tokenKind(tok)] = p.parsePrefixElementary
+ }
+ }
+
+ p.prefixParselets[tokQ] = func(tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(`\Q`))
+ form := FormQuoteUnclosed
+ if strings.HasSuffix(p.tokenValue(tok), `\E`) {
+ litPos.End -= uint16(len(`\E`))
+ form = FormDefault
+ }
+ lit := p.newExpr(OpString, litPos)
+ return p.newExprForm(OpQuote, form, tok.pos, lit)
+ }
+
+ p.prefixParselets[tokEscapeHexFull] = func(tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(`\x{`))
+ litPos.End -= uint16(len(`}`))
+ lit := p.newExpr(OpString, litPos)
+ return p.newExprForm(OpEscapeHex, FormEscapeHexFull, tok.pos, lit)
+ }
+ p.prefixParselets[tokEscapeUniFull] = func(tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(`\p{`))
+ litPos.End -= uint16(len(`}`))
+ lit := p.newExpr(OpString, litPos)
+ return p.newExprForm(OpEscapeUni, FormEscapeUniFull, tok.pos, lit)
+ }
+
+ p.prefixParselets[tokEscapeHex] = func(tok token) *Expr { return p.parseEscape(OpEscapeHex, `\x`, tok) }
+ p.prefixParselets[tokEscapeOctal] = func(tok token) *Expr { return p.parseEscape(OpEscapeOctal, `\`, tok) }
+ p.prefixParselets[tokEscapeChar] = func(tok token) *Expr { return p.parseEscape(OpEscapeChar, `\`, tok) }
+ p.prefixParselets[tokEscapeMeta] = func(tok token) *Expr { return p.parseEscape(OpEscapeMeta, `\`, tok) }
+ p.prefixParselets[tokEscapeUni] = func(tok token) *Expr { return p.parseEscape(OpEscapeUni, `\p`, tok) }
+
+ p.prefixParselets[tokLparen] = func(tok token) *Expr { return p.parseGroup(OpCapture, tok) }
+ p.prefixParselets[tokLparenAtomic] = func(tok token) *Expr { return p.parseGroup(OpAtomicGroup, tok) }
+ p.prefixParselets[tokLparenPositiveLookahead] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookahead, tok) }
+ p.prefixParselets[tokLparenNegativeLookahead] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookahead, tok) }
+ p.prefixParselets[tokLparenPositiveLookbehind] = func(tok token) *Expr { return p.parseGroup(OpPositiveLookbehind, tok) }
+ p.prefixParselets[tokLparenNegativeLookbehind] = func(tok token) *Expr { return p.parseGroup(OpNegativeLookbehind, tok) }
+
+ p.prefixParselets[tokLparenName] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormDefault, tok)
+ }
+ p.prefixParselets[tokLparenNameAngle] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormNamedCaptureAngle, tok)
+ }
+ p.prefixParselets[tokLparenNameQuote] = func(tok token) *Expr {
+ return p.parseNamedCapture(FormNamedCaptureQuote, tok)
+ }
+
+ p.prefixParselets[tokLparenFlags] = p.parseGroupWithFlags
+
+ p.prefixParselets[tokPipe] = func(tok token) *Expr {
+ // We need prefix pipe parselet to handle `(|x)` syntax.
+ right := p.parseExpr(1)
+ return p.newExpr(OpAlt, tok.pos, p.newEmpty(tok.pos), right)
+ }
+ p.prefixParselets[tokLbracket] = func(tok token) *Expr {
+ return p.parseCharClass(OpCharClass, tok)
+ }
+ p.prefixParselets[tokLbracketCaret] = func(tok token) *Expr {
+ return p.parseCharClass(OpNegCharClass, tok)
+ }
+
+ p.infixParselets[tokRepeat] = func(left *Expr, tok token) *Expr {
+ repeatLit := p.newExpr(OpString, tok.pos)
+ return p.newExpr(OpRepeat, combinePos(left.Pos, tok.pos), left, repeatLit)
+ }
+ p.infixParselets[tokStar] = func(left *Expr, tok token) *Expr {
+ return p.newExpr(OpStar, combinePos(left.Pos, tok.pos), left)
+ }
+ p.infixParselets[tokConcat] = func(left *Expr, tok token) *Expr {
+ right := p.parseExpr(2)
+ if left.Op == OpConcat {
+ left.Args = append(left.Args, *right)
+ left.Pos.End = right.End()
+ return left
+ }
+ return p.newExpr(OpConcat, combinePos(left.Pos, right.Pos), left, right)
+ }
+ p.infixParselets[tokPipe] = p.parseAlt
+ p.infixParselets[tokMinus] = p.parseMinus
+ p.infixParselets[tokPlus] = p.parsePlus
+ p.infixParselets[tokQuestion] = p.parseQuestion
+
+ return &p
+}
+
+func (p *Parser) setValues(e *Expr) {
+ for i := range e.Args {
+ p.setValues(&e.Args[i])
+ }
+ e.Value = p.exprValue(e)
+}
+
+func (p *Parser) tokenValue(tok token) string {
+ return p.out.Pattern[tok.pos.Begin:tok.pos.End]
+}
+
+func (p *Parser) exprValue(e *Expr) string {
+ return p.out.Pattern[e.Begin():e.End()]
+}
+
+func (p *Parser) mergeChars(e *Expr) {
+ for i := range e.Args {
+ p.mergeChars(&e.Args[i])
+ }
+ if e.Op != OpConcat || len(e.Args) < 2 {
+ return
+ }
+
+ args := e.Args[:0]
+ i := 0
+ for i < len(e.Args) {
+ first := i
+ chars := 0
+ for j := i; j < len(e.Args) && e.Args[j].Op == OpChar; j++ {
+ chars++
+ }
+ if chars > 1 {
+ c1 := e.Args[first]
+ c2 := e.Args[first+chars-1]
+ lit := p.newExpr(OpLiteral, combinePos(c1.Pos, c2.Pos))
+ for j := 0; j < chars; j++ {
+ lit.Args = append(lit.Args, e.Args[first+j])
+ }
+ args = append(args, *lit)
+ i += chars
+ } else {
+ args = append(args, e.Args[i])
+ i++
+ }
+ }
+ if len(args) == 1 {
+ *e = args[0] // Turn OpConcat into OpLiteral
+ } else {
+ e.Args = args
+ }
+}
+
+func (p *Parser) newEmpty(pos Position) *Expr {
+ return p.newExpr(OpConcat, pos)
+}
+
+func (p *Parser) newExprForm(op Operation, form Form, pos Position, args ...*Expr) *Expr {
+ e := p.newExpr(op, pos, args...)
+ e.Form = form
+ return e
+}
+
+func (p *Parser) newExpr(op Operation, pos Position, args ...*Expr) *Expr {
+ e := p.allocExpr()
+ *e = Expr{
+ Op: op,
+ Pos: pos,
+ Args: e.Args[:0],
+ }
+ for _, arg := range args {
+ e.Args = append(e.Args, *arg)
+ }
+ return e
+}
+
+func (p *Parser) allocExpr() *Expr {
+ i := p.allocated
+ if i < uint(len(p.exprPool)) {
+ p.allocated++
+ return &p.exprPool[i]
+ }
+ return &Expr{}
+}
+
+func (p *Parser) expect(kind tokenKind) Position {
+ tok := p.lexer.NextToken()
+ if tok.kind != kind {
+ throwExpectedFound(tok.pos, kind.String(), tok.kind.String())
+ }
+ return tok.pos
+}
+
+func (p *Parser) parseExpr(precedence int) *Expr {
+ tok := p.lexer.NextToken()
+ prefix := p.prefixParselets[tok.kind]
+ if prefix == nil {
+ throwUnexpectedToken(tok.pos, tok.String())
+ }
+ left := prefix(tok)
+
+ for precedence < p.precedenceOf(p.lexer.Peek()) {
+ tok := p.lexer.NextToken()
+ infix := p.infixParselets[tok.kind]
+ left = infix(left, tok)
+ }
+
+ return left
+}
+
+func (p *Parser) parsePrefixElementary(tok token) *Expr {
+ return p.newExpr(tok2op[tok.kind], tok.pos)
+}
+
+func (p *Parser) parseCharClass(op Operation, tok token) *Expr {
+ var endPos Position
+ p.charClass = p.charClass[:0]
+ for {
+ p.charClass = append(p.charClass, *p.parseExpr(0))
+ next := p.lexer.Peek()
+ if next.kind == tokRbracket {
+ endPos = next.pos
+ p.lexer.NextToken()
+ break
+ }
+ if next.kind == tokNone {
+ throw(tok.pos, "unterminated '['")
+ }
+ }
+
+ result := p.newExpr(op, combinePos(tok.pos, endPos))
+ result.Args = append(result.Args, p.charClass...)
+ return result
+}
+
+func (p *Parser) parseMinus(left *Expr, tok token) *Expr {
+ if p.isValidCharRangeOperand(left) {
+ if p.lexer.Peek().kind != tokRbracket {
+ right := p.parseExpr(2)
+ return p.newExpr(OpCharRange, combinePos(left.Pos, right.Pos), left, right)
+ }
+ }
+ p.charClass = append(p.charClass, *left)
+ return p.newExpr(OpChar, tok.pos)
+}
+
+func (p *Parser) isValidCharRangeOperand(e *Expr) bool {
+ switch e.Op {
+ case OpEscapeHex, OpEscapeOctal, OpEscapeMeta, OpChar:
+ return true
+ case OpEscapeChar:
+ switch p.exprValue(e) {
+ case `\\`, `\|`, `\*`, `\+`, `\?`, `\.`, `\[`, `\^`, `\$`, `\(`, `\)`:
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) parsePlus(left *Expr, tok token) *Expr {
+ op := OpPlus
+ switch left.Op {
+ case OpPlus, OpStar, OpQuestion, OpRepeat:
+ op = OpPossessive
+ }
+ return p.newExpr(op, combinePos(left.Pos, tok.pos), left)
+}
+
+func (p *Parser) parseQuestion(left *Expr, tok token) *Expr {
+ op := OpQuestion
+ switch left.Op {
+ case OpPlus, OpStar, OpQuestion, OpRepeat:
+ op = OpNonGreedy
+ }
+ return p.newExpr(op, combinePos(left.Pos, tok.pos), left)
+}
+
+func (p *Parser) parseAlt(left *Expr, tok token) *Expr {
+ var right *Expr
+ switch p.lexer.Peek().kind {
+ case tokRparen, tokNone:
+ // This is needed to handle `(x|)` syntax.
+ right = p.newEmpty(tok.pos)
+ default:
+ right = p.parseExpr(1)
+ }
+ if left.Op == OpAlt {
+ left.Args = append(left.Args, *right)
+ left.Pos.End = right.End()
+ return left
+ }
+ return p.newExpr(OpAlt, combinePos(left.Pos, right.Pos), left, right)
+}
+
+func (p *Parser) parseGroupItem(tok token) *Expr {
+ if p.lexer.Peek().kind == tokRparen {
+ // This is needed to handle `() syntax.`
+ return p.newEmpty(tok.pos)
+ }
+ return p.parseExpr(0)
+}
+
+func (p *Parser) parseGroup(op Operation, tok token) *Expr {
+ x := p.parseGroupItem(tok)
+ result := p.newExpr(op, tok.pos, x)
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseNamedCapture(form Form, tok token) *Expr {
+ prefixLen := len("(?<")
+ if form == FormDefault {
+ prefixLen = len("(?P<")
+ }
+ name := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(prefixLen),
+ End: tok.pos.End - uint16(len(">")),
+ })
+ x := p.parseGroupItem(tok)
+ result := p.newExprForm(OpNamedCapture, form, tok.pos, x, name)
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseGroupWithFlags(tok token) *Expr {
+ var result *Expr
+ val := p.out.Pattern[tok.pos.Begin+1 : tok.pos.End]
+ switch {
+ case !strings.HasSuffix(val, ":"):
+ flags := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(len("(?")),
+ End: tok.pos.End,
+ })
+ result = p.newExpr(OpFlagOnlyGroup, tok.pos, flags)
+ case val == "?:":
+ x := p.parseGroupItem(tok)
+ result = p.newExpr(OpGroup, tok.pos, x)
+ default:
+ flags := p.newExpr(OpString, Position{
+ Begin: tok.pos.Begin + uint16(len("(?")),
+ End: tok.pos.End - uint16(len(":")),
+ })
+ x := p.parseGroupItem(tok)
+ result = p.newExpr(OpGroupWithFlags, tok.pos, x, flags)
+ }
+ result.Pos.End = p.expect(tokRparen).End
+ return result
+}
+
+func (p *Parser) parseEscape(op Operation, prefix string, tok token) *Expr {
+ litPos := tok.pos
+ litPos.Begin += uint16(len(prefix))
+ lit := p.newExpr(OpString, litPos)
+ return p.newExpr(op, tok.pos, lit)
+}
+
+func (p *Parser) precedenceOf(tok token) int {
+ switch tok.kind {
+ case tokPipe:
+ return 1
+ case tokConcat, tokMinus:
+ return 2
+ case tokPlus, tokStar, tokQuestion, tokRepeat:
+ return 3
+ default:
+ return 0
+ }
+}
+
+func (p *Parser) newPCRE(source string) (*RegexpPCRE, error) {
+ if source == "" {
+ return nil, errors.New("empty pattern: can't find delimiters")
+ }
+
+ delim := source[0]
+ endDelim := delim
+ switch delim {
+ case '(':
+ endDelim = ')'
+ case '{':
+ endDelim = '}'
+ case '[':
+ endDelim = ']'
+ case '<':
+ endDelim = '>'
+ case '\\':
+ return nil, errors.New("'\\' is not a valid delimiter")
+ default:
+ if isSpace(delim) {
+ return nil, errors.New("whitespace is not a valid delimiter")
+ }
+ if isAlphanumeric(delim) {
+ return nil, errors.New("'" + string(delim) + "' is not a valid delimiter")
+ }
+ }
+
+ const delimLen = 1
+ j := strings.LastIndexByte(source[delimLen:], endDelim)
+ if j == -1 {
+ return nil, errors.New("can't find '" + string(endDelim) + "' ending delimiter")
+ }
+ j += delimLen
+
+ pcre := &RegexpPCRE{
+ Pattern: source[1:j],
+ Source: source,
+ Delim: [2]byte{delim, endDelim},
+ Modifiers: source[j+1:],
+ }
+ return pcre, nil
+}
+
+var tok2op = [256]Operation{
+ tokDollar: OpDollar,
+ tokCaret: OpCaret,
+ tokDot: OpDot,
+ tokChar: OpChar,
+ tokMinus: OpChar,
+ tokPosixClass: OpPosixClass,
+ tokComment: OpComment,
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/pos.go b/vendor/github.com/quasilyte/regex/syntax/pos.go
new file mode 100644
index 000000000..51bdbf87a
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/pos.go
@@ -0,0 +1,10 @@
+package syntax
+
+type Position struct {
+ Begin uint16
+ End uint16
+}
+
+func combinePos(begin, end Position) Position {
+ return Position{Begin: begin.Begin, End: end.End}
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go
new file mode 100644
index 000000000..8800436bc
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/tokenkind_string.go
@@ -0,0 +1,59 @@
+// Code generated by "stringer -type=tokenKind -trimprefix=tok -linecomment=true"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[tokNone-0]
+ _ = x[tokChar-1]
+ _ = x[tokGroupFlags-2]
+ _ = x[tokPosixClass-3]
+ _ = x[tokConcat-4]
+ _ = x[tokRepeat-5]
+ _ = x[tokEscapeChar-6]
+ _ = x[tokEscapeMeta-7]
+ _ = x[tokEscapeOctal-8]
+ _ = x[tokEscapeUni-9]
+ _ = x[tokEscapeUniFull-10]
+ _ = x[tokEscapeHex-11]
+ _ = x[tokEscapeHexFull-12]
+ _ = x[tokComment-13]
+ _ = x[tokQ-14]
+ _ = x[tokMinus-15]
+ _ = x[tokLbracket-16]
+ _ = x[tokLbracketCaret-17]
+ _ = x[tokRbracket-18]
+ _ = x[tokDollar-19]
+ _ = x[tokCaret-20]
+ _ = x[tokQuestion-21]
+ _ = x[tokDot-22]
+ _ = x[tokPlus-23]
+ _ = x[tokStar-24]
+ _ = x[tokPipe-25]
+ _ = x[tokLparen-26]
+ _ = x[tokLparenName-27]
+ _ = x[tokLparenNameAngle-28]
+ _ = x[tokLparenNameQuote-29]
+ _ = x[tokLparenFlags-30]
+ _ = x[tokLparenAtomic-31]
+ _ = x[tokLparenPositiveLookahead-32]
+ _ = x[tokLparenPositiveLookbehind-33]
+ _ = x[tokLparenNegativeLookahead-34]
+ _ = x[tokLparenNegativeLookbehind-35]
+ _ = x[tokRparen-36]
+}
+
+const _tokenKind_name = "NoneCharGroupFlagsPosixClassConcatRepeatEscapeCharEscapeMetaEscapeOctalEscapeUniEscapeUniFullEscapeHexEscapeHexFullComment\\Q-[[^]$^?.+*|((?P<name>(?<name>(?'name'(?flags(?>(?=(?<=(?!(?<!)"
+
+var _tokenKind_index = [...]uint8{0, 4, 8, 18, 28, 34, 40, 50, 60, 71, 80, 93, 102, 115, 122, 124, 125, 126, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 146, 154, 162, 169, 172, 175, 179, 182, 186, 187}
+
+func (i tokenKind) String() string {
+ if i >= tokenKind(len(_tokenKind_index)-1) {
+ return "tokenKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _tokenKind_name[_tokenKind_index[i]:_tokenKind_index[i+1]]
+}
diff --git a/vendor/github.com/quasilyte/regex/syntax/utils.go b/vendor/github.com/quasilyte/regex/syntax/utils.go
new file mode 100644
index 000000000..e5b654825
--- /dev/null
+++ b/vendor/github.com/quasilyte/regex/syntax/utils.go
@@ -0,0 +1,30 @@
+package syntax
+
+func isSpace(ch byte) bool {
+ switch ch {
+ case '\r', '\n', '\t', '\f', '\v', ' ':
+ return true
+ default:
+ return false
+ }
+}
+
+func isAlphanumeric(ch byte) bool {
+ return (ch >= 'a' && ch <= 'z') ||
+ (ch >= 'A' && ch <= 'Z') ||
+ (ch >= '0' && ch <= '9')
+}
+
+func isDigit(ch byte) bool {
+ return ch >= '0' && ch <= '9'
+}
+
+func isOctalDigit(ch byte) bool {
+ return ch >= '0' && ch <= '7'
+}
+
+func isHexDigit(ch byte) bool {
+ return (ch >= '0' && ch <= '9') ||
+ (ch >= 'a' && ch <= 'f') ||
+ (ch >= 'A' && ch <= 'F')
+}