aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/hexops/gotextdiff
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hexops/gotextdiff')
-rw-r--r--vendor/github.com/hexops/gotextdiff/LICENSE27
-rw-r--r--vendor/github.com/hexops/gotextdiff/README.md54
-rw-r--r--vendor/github.com/hexops/gotextdiff/diff.go159
-rw-r--r--vendor/github.com/hexops/gotextdiff/go.mod3
-rw-r--r--vendor/github.com/hexops/gotextdiff/myers/diff.go205
-rw-r--r--vendor/github.com/hexops/gotextdiff/span/parse.go100
-rw-r--r--vendor/github.com/hexops/gotextdiff/span/span.go285
-rw-r--r--vendor/github.com/hexops/gotextdiff/span/token.go194
-rw-r--r--vendor/github.com/hexops/gotextdiff/span/token111.go39
-rw-r--r--vendor/github.com/hexops/gotextdiff/span/token112.go16
-rw-r--r--vendor/github.com/hexops/gotextdiff/span/uri.go169
-rw-r--r--vendor/github.com/hexops/gotextdiff/span/utf16.go91
-rw-r--r--vendor/github.com/hexops/gotextdiff/unified.go210
13 files changed, 1552 insertions, 0 deletions
diff --git a/vendor/github.com/hexops/gotextdiff/LICENSE b/vendor/github.com/hexops/gotextdiff/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/hexops/gotextdiff/README.md b/vendor/github.com/hexops/gotextdiff/README.md
new file mode 100644
index 000000000..bfd49a0c9
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/README.md
@@ -0,0 +1,54 @@
+# gotextdiff - unified text diffing in Go <a href="https://hexops.com"><img align="right" alt="Hexops logo" src="https://raw.githubusercontent.com/hexops/media/master/readme.svg"></img></a>
+
+This is a copy of the Go text diffing packages that [the official Go language server gopls uses internally](https://github.com/golang/tools/tree/master/internal/lsp/diff) to generate unified diffs.
+
+If you've previously tried to generate unified text diffs in Go (like the ones you see in Git and on GitHub), you may have found [github.com/sergi/go-diff](https://github.com/sergi/go-diff) which is a Go port of Neil Fraser's google-diff-match-patch code - however it [does not support unified diffs](https://github.com/sergi/go-diff/issues/57).
+
+This is arguably one of the best (and most maintained) unified text diffing packages in Go as of at least 2020.
+
+(All credit goes to [the Go authors](http://tip.golang.org/AUTHORS), I am merely re-publishing their work so others can use it.)
+
+## Example usage
+
+Import the packages:
+
+```Go
+import (
+ "github.com/hexops/gotextdiff"
+ "github.com/hexops/gotextdiff/myers"
+)
+```
+
+Assuming you want to diff `a.txt` and `b.txt`, whose contents are stored in `aString` and `bString` then:
+
+```Go
+edits := myers.ComputeEdits(span.URIFromPath("a.txt"), aString, bString)
+diff := fmt.Sprint(gotextdiff.ToUnified("a.txt", "b.txt", aString, edits))
+```
+
+`diff` will be a string like:
+
+```diff
+--- a.txt
++++ b.txt
+@@ -1,13 +1,28 @@
+-foo
++bar
+```
+
+## API compatability
+
+We will publish a new major version anytime the API changes in a backwards-incompatible way. Because the upstream is not being developed with this being a public package in mind, API breakages may occur more often than in other Go packages (but you can always continue using the old version thanks to Go modules.)
+
+## Alternatives
+
+- [github.com/andreyvit/diff](https://github.com/andreyvit/diff): Quick'n'easy string diffing functions for Golang based on github.com/sergi/go-diff.
+- [github.com/kylelemons/godebug/diff](https://github.com/kylelemons/godebug/tree/master/diff): implements a linewise diff algorithm ([inactive](https://github.com/kylelemons/godebug/issues/22#issuecomment-524573477)).
+
+## Contributing
+
+We will only accept changes made [upstream](https://github.com/golang/tools/tree/master/internal/lsp/diff), please send any contributions to the upstream instead! Compared to the upstream, only import paths will be modified (to be non-`internal` so they are importable.) The only thing we add here is this README.
+
+## License
+
+See https://github.com/golang/tools/blob/master/LICENSE
diff --git a/vendor/github.com/hexops/gotextdiff/diff.go b/vendor/github.com/hexops/gotextdiff/diff.go
new file mode 100644
index 000000000..53e499bc0
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/diff.go
@@ -0,0 +1,159 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package gotextdiff supports a pluggable diff algorithm.
+package gotextdiff
+
+import (
+ "sort"
+ "strings"
+
+ "github.com/hexops/gotextdiff/span"
+)
+
+// TextEdit represents a change to a section of a document.
+// The text within the specified span should be replaced by the supplied new text.
+type TextEdit struct {
+ Span span.Span
+ NewText string
+}
+
+// ComputeEdits is the type for a function that produces a set of edits that
+// convert from the before content to the after content.
+type ComputeEdits func(uri span.URI, before, after string) []TextEdit
+
+// SortTextEdits attempts to order all edits by their starting points.
+// The sort is stable so that edits with the same starting point will not
+// be reordered.
+func SortTextEdits(d []TextEdit) {
+ // Use a stable sort to maintain the order of edits inserted at the same position.
+ sort.SliceStable(d, func(i int, j int) bool {
+ return span.Compare(d[i].Span, d[j].Span) < 0
+ })
+}
+
+// ApplyEdits applies the set of edits to the before and returns the resulting
+// content.
+// It may panic or produce garbage if the edits are not valid for the provided
+// before content.
+func ApplyEdits(before string, edits []TextEdit) string {
+ // Preconditions:
+ // - all of the edits apply to before
+ // - and all the spans for each TextEdit have the same URI
+ if len(edits) == 0 {
+ return before
+ }
+ _, edits, _ = prepareEdits(before, edits)
+ after := strings.Builder{}
+ last := 0
+ for _, edit := range edits {
+ start := edit.Span.Start().Offset()
+ if start > last {
+ after.WriteString(before[last:start])
+ last = start
+ }
+ after.WriteString(edit.NewText)
+ last = edit.Span.End().Offset()
+ }
+ if last < len(before) {
+ after.WriteString(before[last:])
+ }
+ return after.String()
+}
+
+// LineEdits takes a set of edits and expands and merges them as necessary
+// to ensure that there are only full line edits left when it is done.
+func LineEdits(before string, edits []TextEdit) []TextEdit {
+ if len(edits) == 0 {
+ return nil
+ }
+ c, edits, partial := prepareEdits(before, edits)
+ if partial {
+ edits = lineEdits(before, c, edits)
+ }
+ return edits
+}
+
+// prepareEdits returns a sorted copy of the edits
+func prepareEdits(before string, edits []TextEdit) (*span.TokenConverter, []TextEdit, bool) {
+ partial := false
+ c := span.NewContentConverter("", []byte(before))
+ copied := make([]TextEdit, len(edits))
+ for i, edit := range edits {
+ edit.Span, _ = edit.Span.WithAll(c)
+ copied[i] = edit
+ partial = partial ||
+ edit.Span.Start().Offset() >= len(before) ||
+ edit.Span.Start().Column() > 1 || edit.Span.End().Column() > 1
+ }
+ SortTextEdits(copied)
+ return c, copied, partial
+}
+
+// lineEdits rewrites the edits to always be full line edits
+func lineEdits(before string, c *span.TokenConverter, edits []TextEdit) []TextEdit {
+ adjusted := make([]TextEdit, 0, len(edits))
+ current := TextEdit{Span: span.Invalid}
+ for _, edit := range edits {
+ if current.Span.IsValid() && edit.Span.Start().Line() <= current.Span.End().Line() {
+ // overlaps with the current edit, need to combine
+ // first get the gap from the previous edit
+ gap := before[current.Span.End().Offset():edit.Span.Start().Offset()]
+ // now add the text of this edit
+ current.NewText += gap + edit.NewText
+ // and then adjust the end position
+ current.Span = span.New(current.Span.URI(), current.Span.Start(), edit.Span.End())
+ } else {
+ // does not overlap, add previous run (if there is one)
+ adjusted = addEdit(before, adjusted, current)
+ // and then remember this edit as the start of the next run
+ current = edit
+ }
+ }
+ // add the current pending run if there is one
+ return addEdit(before, adjusted, current)
+}
+
+func addEdit(before string, edits []TextEdit, edit TextEdit) []TextEdit {
+ if !edit.Span.IsValid() {
+ return edits
+ }
+ // if edit is partial, expand it to full line now
+ start := edit.Span.Start()
+ end := edit.Span.End()
+ if start.Column() > 1 {
+ // prepend the text and adjust to start of line
+ delta := start.Column() - 1
+ start = span.NewPoint(start.Line(), 1, start.Offset()-delta)
+ edit.Span = span.New(edit.Span.URI(), start, end)
+ edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
+ }
+ if start.Offset() >= len(before) && start.Line() > 1 && before[len(before)-1] != '\n' {
+ // after end of file that does not end in eol, so join to last line of file
+ // to do this we need to know where the start of the last line was
+ eol := strings.LastIndex(before, "\n")
+ if eol < 0 {
+ // file is one non terminated line
+ eol = 0
+ }
+ delta := len(before) - eol
+ start = span.NewPoint(start.Line()-1, 1, start.Offset()-delta)
+ edit.Span = span.New(edit.Span.URI(), start, end)
+ edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
+ }
+ if end.Column() > 1 {
+ remains := before[end.Offset():]
+ eol := strings.IndexRune(remains, '\n')
+ if eol < 0 {
+ eol = len(remains)
+ } else {
+ eol++
+ }
+ end = span.NewPoint(end.Line()+1, 1, end.Offset()+eol)
+ edit.Span = span.New(edit.Span.URI(), start, end)
+ edit.NewText = edit.NewText + remains[:eol]
+ }
+ edits = append(edits, edit)
+ return edits
+}
diff --git a/vendor/github.com/hexops/gotextdiff/go.mod b/vendor/github.com/hexops/gotextdiff/go.mod
new file mode 100644
index 000000000..e8a357256
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/go.mod
@@ -0,0 +1,3 @@
+module github.com/hexops/gotextdiff
+
+go 1.16
diff --git a/vendor/github.com/hexops/gotextdiff/myers/diff.go b/vendor/github.com/hexops/gotextdiff/myers/diff.go
new file mode 100644
index 000000000..5e3e92364
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/myers/diff.go
@@ -0,0 +1,205 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package myers implements the Myers diff algorithm.
+package myers
+
+import (
+ "strings"
+
+ diff "github.com/hexops/gotextdiff"
+ "github.com/hexops/gotextdiff/span"
+)
+
+// Sources:
+// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/
+// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2
+
+func ComputeEdits(uri span.URI, before, after string) []diff.TextEdit {
+ ops := operations(splitLines(before), splitLines(after))
+ edits := make([]diff.TextEdit, 0, len(ops))
+ for _, op := range ops {
+ s := span.New(uri, span.NewPoint(op.I1+1, 1, 0), span.NewPoint(op.I2+1, 1, 0))
+ switch op.Kind {
+ case diff.Delete:
+ // Delete: unformatted[i1:i2] is deleted.
+ edits = append(edits, diff.TextEdit{Span: s})
+ case diff.Insert:
+ // Insert: formatted[j1:j2] is inserted at unformatted[i1:i1].
+ if content := strings.Join(op.Content, ""); content != "" {
+ edits = append(edits, diff.TextEdit{Span: s, NewText: content})
+ }
+ }
+ }
+ return edits
+}
+
+type operation struct {
+ Kind diff.OpKind
+ Content []string // content from b
+ I1, I2 int // indices of the line in a
+ J1 int // indices of the line in b, J2 implied by len(Content)
+}
+
+// operations returns the list of operations to convert a into b, consolidating
+// operations for multiple lines and not including equal lines.
+func operations(a, b []string) []*operation {
+ if len(a) == 0 && len(b) == 0 {
+ return nil
+ }
+
+ trace, offset := shortestEditSequence(a, b)
+ snakes := backtrack(trace, len(a), len(b), offset)
+
+ M, N := len(a), len(b)
+
+ var i int
+ solution := make([]*operation, len(a)+len(b))
+
+ add := func(op *operation, i2, j2 int) {
+ if op == nil {
+ return
+ }
+ op.I2 = i2
+ if op.Kind == diff.Insert {
+ op.Content = b[op.J1:j2]
+ }
+ solution[i] = op
+ i++
+ }
+ x, y := 0, 0
+ for _, snake := range snakes {
+ if len(snake) < 2 {
+ continue
+ }
+ var op *operation
+ // delete (horizontal)
+ for snake[0]-snake[1] > x-y {
+ if op == nil {
+ op = &operation{
+ Kind: diff.Delete,
+ I1: x,
+ J1: y,
+ }
+ }
+ x++
+ if x == M {
+ break
+ }
+ }
+ add(op, x, y)
+ op = nil
+ // insert (vertical)
+ for snake[0]-snake[1] < x-y {
+ if op == nil {
+ op = &operation{
+ Kind: diff.Insert,
+ I1: x,
+ J1: y,
+ }
+ }
+ y++
+ }
+ add(op, x, y)
+ op = nil
+ // equal (diagonal)
+ for x < snake[0] {
+ x++
+ y++
+ }
+ if x >= M && y >= N {
+ break
+ }
+ }
+ return solution[:i]
+}
+
+// backtrack uses the trace for the edit sequence computation and returns the
+// "snakes" that make up the solution. A "snake" is a single deletion or
+// insertion followed by zero or diagonals.
+func backtrack(trace [][]int, x, y, offset int) [][]int {
+ snakes := make([][]int, len(trace))
+ d := len(trace) - 1
+ for ; x > 0 && y > 0 && d > 0; d-- {
+ V := trace[d]
+ if len(V) == 0 {
+ continue
+ }
+ snakes[d] = []int{x, y}
+
+ k := x - y
+
+ var kPrev int
+ if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
+ kPrev = k + 1
+ } else {
+ kPrev = k - 1
+ }
+
+ x = V[kPrev+offset]
+ y = x - kPrev
+ }
+ if x < 0 || y < 0 {
+ return snakes
+ }
+ snakes[d] = []int{x, y}
+ return snakes
+}
+
+// shortestEditSequence returns the shortest edit sequence that converts a into b.
+func shortestEditSequence(a, b []string) ([][]int, int) {
+ M, N := len(a), len(b)
+ V := make([]int, 2*(N+M)+1)
+ offset := N + M
+ trace := make([][]int, N+M+1)
+
+ // Iterate through the maximum possible length of the SES (N+M).
+ for d := 0; d <= N+M; d++ {
+ copyV := make([]int, len(V))
+ // k lines are represented by the equation y = x - k. We move in
+ // increments of 2 because end points for even d are on even k lines.
+ for k := -d; k <= d; k += 2 {
+ // At each point, we either go down or to the right. We go down if
+ // k == -d, and we go to the right if k == d. We also prioritize
+ // the maximum x value, because we prefer deletions to insertions.
+ var x int
+ if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
+ x = V[k+1+offset] // down
+ } else {
+ x = V[k-1+offset] + 1 // right
+ }
+
+ y := x - k
+
+ // Diagonal moves while we have equal contents.
+ for x < M && y < N && a[x] == b[y] {
+ x++
+ y++
+ }
+
+ V[k+offset] = x
+
+ // Return if we've exceeded the maximum values.
+ if x == M && y == N {
+ // Makes sure to save the state of the array before returning.
+ copy(copyV, V)
+ trace[d] = copyV
+ return trace, offset
+ }
+ }
+
+ // Save the state of the array.
+ copy(copyV, V)
+ trace[d] = copyV
+ }
+ return nil, 0
+}
+
+func splitLines(text string) []string {
+ lines := strings.SplitAfter(text, "\n")
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ return lines
+}
diff --git a/vendor/github.com/hexops/gotextdiff/span/parse.go b/vendor/github.com/hexops/gotextdiff/span/parse.go
new file mode 100644
index 000000000..aa17c84ec
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/span/parse.go
@@ -0,0 +1,100 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package span
+
+import (
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Parse returns the location represented by the input.
+// Only file paths are accepted, not URIs.
+// The returned span will be normalized, and thus if printed may produce a
+// different string.
+func Parse(input string) Span {
+ // :0:0#0-0:0#0
+ valid := input
+ var hold, offset int
+ hadCol := false
+ suf := rstripSuffix(input)
+ if suf.sep == "#" {
+ offset = suf.num
+ suf = rstripSuffix(suf.remains)
+ }
+ if suf.sep == ":" {
+ valid = suf.remains
+ hold = suf.num
+ hadCol = true
+ suf = rstripSuffix(suf.remains)
+ }
+ switch {
+ case suf.sep == ":":
+ return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), Point{})
+ case suf.sep == "-":
+ // we have a span, fall out of the case to continue
+ default:
+ // separator not valid, rewind to either the : or the start
+ return New(URIFromPath(valid), NewPoint(hold, 0, offset), Point{})
+ }
+ // only the span form can get here
+ // at this point we still don't know what the numbers we have mean
+ // if have not yet seen a : then we might have either a line or a column depending
+ // on whether start has a column or not
+ // we build an end point and will fix it later if needed
+ end := NewPoint(suf.num, hold, offset)
+ hold, offset = 0, 0
+ suf = rstripSuffix(suf.remains)
+ if suf.sep == "#" {
+ offset = suf.num
+ suf = rstripSuffix(suf.remains)
+ }
+ if suf.sep != ":" {
+ // turns out we don't have a span after all, rewind
+ return New(URIFromPath(valid), end, Point{})
+ }
+ valid = suf.remains
+ hold = suf.num
+ suf = rstripSuffix(suf.remains)
+ if suf.sep != ":" {
+ // line#offset only
+ return New(URIFromPath(valid), NewPoint(hold, 0, offset), end)
+ }
+ // we have a column, so if end only had one number, it is also the column
+ if !hadCol {
+ end = NewPoint(suf.num, end.v.Line, end.v.Offset)
+ }
+ return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), end)
+}
+
+type suffix struct {
+ remains string
+ sep string
+ num int
+}
+
+func rstripSuffix(input string) suffix {
+ if len(input) == 0 {
+ return suffix{"", "", -1}
+ }
+ remains := input
+ num := -1
+ // first see if we have a number at the end
+ last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
+ if last >= 0 && last < len(remains)-1 {
+ number, err := strconv.ParseInt(remains[last+1:], 10, 64)
+ if err == nil {
+ num = int(number)
+ remains = remains[:last+1]
+ }
+ }
+ // now see if we have a trailing separator
+ r, w := utf8.DecodeLastRuneInString(remains)
+ if r != ':' && r != '#' && r == '#' {
+ return suffix{input, "", -1}
+ }
+ remains = remains[:len(remains)-w]
+ return suffix{remains, string(r), num}
+}
diff --git a/vendor/github.com/hexops/gotextdiff/span/span.go b/vendor/github.com/hexops/gotextdiff/span/span.go
new file mode 100644
index 000000000..4d2ad0986
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/span/span.go
@@ -0,0 +1,285 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package span contains support for representing with positions and ranges in
+// text files.
+package span
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+)
+
+// Span represents a source code range in standardized form.
+type Span struct {
+ v span
+}
+
+// Point represents a single point within a file.
+// In general this should only be used as part of a Span, as on its own it
+// does not carry enough information.
+type Point struct {
+ v point
+}
+
+type span struct {
+ URI URI `json:"uri"`
+ Start point `json:"start"`
+ End point `json:"end"`
+}
+
+type point struct {
+ Line int `json:"line"`
+ Column int `json:"column"`
+ Offset int `json:"offset"`
+}
+
+// Invalid is a span that reports false from IsValid
+var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
+
+var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
+
+// Converter is the interface to an object that can convert between line:column
+// and offset forms for a single file.
+type Converter interface {
+ //ToPosition converts from an offset to a line:column pair.
+ ToPosition(offset int) (int, int, error)
+ //ToOffset converts from a line:column pair to an offset.
+ ToOffset(line, col int) (int, error)
+}
+
+func New(uri URI, start Point, end Point) Span {
+ s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
+ s.v.clean()
+ return s
+}
+
+func NewPoint(line, col, offset int) Point {
+ p := Point{v: point{Line: line, Column: col, Offset: offset}}
+ p.v.clean()
+ return p
+}
+
+func Compare(a, b Span) int {
+ if r := CompareURI(a.URI(), b.URI()); r != 0 {
+ return r
+ }
+ if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
+ return r
+ }
+ return comparePoint(a.v.End, b.v.End)
+}
+
+func ComparePoint(a, b Point) int {
+ return comparePoint(a.v, b.v)
+}
+
+func comparePoint(a, b point) int {
+ if !a.hasPosition() {
+ if a.Offset < b.Offset {
+ return -1
+ }
+ if a.Offset > b.Offset {
+ return 1
+ }
+ return 0
+ }
+ if a.Line < b.Line {
+ return -1
+ }
+ if a.Line > b.Line {
+ return 1
+ }
+ if a.Column < b.Column {
+ return -1
+ }
+ if a.Column > b.Column {
+ return 1
+ }
+ return 0
+}
+
+func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
+func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
+func (s Span) IsValid() bool { return s.v.Start.isValid() }
+func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
+func (s Span) URI() URI { return s.v.URI }
+func (s Span) Start() Point { return Point{s.v.Start} }
+func (s Span) End() Point { return Point{s.v.End} }
+func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
+func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
+
+func (p Point) HasPosition() bool { return p.v.hasPosition() }
+func (p Point) HasOffset() bool { return p.v.hasOffset() }
+func (p Point) IsValid() bool { return p.v.isValid() }
+func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
+func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
+func (p Point) Line() int {
+ if !p.v.hasPosition() {
+ panic(fmt.Errorf("position not set in %v", p.v))
+ }
+ return p.v.Line
+}
+func (p Point) Column() int {
+ if !p.v.hasPosition() {
+ panic(fmt.Errorf("position not set in %v", p.v))
+ }
+ return p.v.Column
+}
+func (p Point) Offset() int {
+ if !p.v.hasOffset() {
+ panic(fmt.Errorf("offset not set in %v", p.v))
+ }
+ return p.v.Offset
+}
+
+func (p point) hasPosition() bool { return p.Line > 0 }
+func (p point) hasOffset() bool { return p.Offset >= 0 }
+func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
+func (p point) isZero() bool {
+ return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
+}
+
+func (s *span) clean() {
+ //this presumes the points are already clean
+ if !s.End.isValid() || (s.End == point{}) {
+ s.End = s.Start
+ }
+}
+
+func (p *point) clean() {
+ if p.Line < 0 {
+ p.Line = 0
+ }
+ if p.Column <= 0 {
+ if p.Line > 0 {
+ p.Column = 1
+ } else {
+ p.Column = 0
+ }
+ }
+ if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
+ p.Offset = -1
+ }
+}
+
+// Format implements fmt.Formatter to print the Location in a standard form.
+// The format produced is one that can be read back in using Parse.
+func (s Span) Format(f fmt.State, c rune) {
+ fullForm := f.Flag('+')
+ preferOffset := f.Flag('#')
+ // we should always have a uri, simplify if it is file format
+ //TODO: make sure the end of the uri is unambiguous
+ uri := string(s.v.URI)
+ if c == 'f' {
+ uri = path.Base(uri)
+ } else if !fullForm {
+ uri = s.v.URI.Filename()
+ }
+ fmt.Fprint(f, uri)
+ if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
+ return
+ }
+ // see which bits of start to write
+ printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
+ printLine := s.HasPosition() && (fullForm || !printOffset)
+ printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
+ fmt.Fprint(f, ":")
+ if printLine {
+ fmt.Fprintf(f, "%d", s.v.Start.Line)
+ }
+ if printColumn {
+ fmt.Fprintf(f, ":%d", s.v.Start.Column)
+ }
+ if printOffset {
+ fmt.Fprintf(f, "#%d", s.v.Start.Offset)
+ }
+ // start is written, do we need end?
+ if s.IsPoint() {
+ return
+ }
+ // we don't print the line if it did not change
+ printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
+ fmt.Fprint(f, "-")
+ if printLine {
+ fmt.Fprintf(f, "%d", s.v.End.Line)
+ }
+ if printColumn {
+ if printLine {
+ fmt.Fprint(f, ":")
+ }
+ fmt.Fprintf(f, "%d", s.v.End.Column)
+ }
+ if printOffset {
+ fmt.Fprintf(f, "#%d", s.v.End.Offset)
+ }
+}
+
+func (s Span) WithPosition(c Converter) (Span, error) {
+ if err := s.update(c, true, false); err != nil {
+ return Span{}, err
+ }
+ return s, nil
+}
+
+func (s Span) WithOffset(c Converter) (Span, error) {
+ if err := s.update(c, false, true); err != nil {
+ return Span{}, err
+ }
+ return s, nil
+}
+
+func (s Span) WithAll(c Converter) (Span, error) {
+ if err := s.update(c, true, true); err != nil {
+ return Span{}, err
+ }
+ return s, nil
+}
+
+func (s *Span) update(c Converter, withPos, withOffset bool) error {
+ if !s.IsValid() {
+ return fmt.Errorf("cannot add information to an invalid span")
+ }
+ if withPos && !s.HasPosition() {
+ if err := s.v.Start.updatePosition(c); err != nil {
+ return err
+ }
+ if s.v.End.Offset == s.v.Start.Offset {
+ s.v.End = s.v.Start
+ } else if err := s.v.End.updatePosition(c); err != nil {
+ return err
+ }
+ }
+ if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
+ if err := s.v.Start.updateOffset(c); err != nil {
+ return err
+ }
+ if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
+ s.v.End.Offset = s.v.Start.Offset
+ } else if err := s.v.End.updateOffset(c); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *point) updatePosition(c Converter) error {
+ line, col, err := c.ToPosition(p.Offset)
+ if err != nil {
+ return err
+ }
+ p.Line = line
+ p.Column = col
+ return nil
+}
+
+func (p *point) updateOffset(c Converter) error {
+ offset, err := c.ToOffset(p.Line, p.Column)
+ if err != nil {
+ return err
+ }
+ p.Offset = offset
+ return nil
+}
diff --git a/vendor/github.com/hexops/gotextdiff/span/token.go b/vendor/github.com/hexops/gotextdiff/span/token.go
new file mode 100644
index 000000000..6f8b9b570
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/span/token.go
@@ -0,0 +1,194 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package span
+
+import (
+ "fmt"
+ "go/token"
+)
+
+// Range represents a source code range in token.Pos form.
+// It also carries the FileSet that produced the positions, so that it is
+// self contained.
+type Range struct {
+ FileSet *token.FileSet
+ Start token.Pos
+ End token.Pos
+ Converter Converter
+}
+
+type FileConverter struct {
+ file *token.File
+}
+
+// TokenConverter is a Converter backed by a token file set and file.
+// It uses the file set methods to work out the conversions, which
+// makes it fast and does not require the file contents.
+type TokenConverter struct {
+ FileConverter
+ fset *token.FileSet
+}
+
+// NewRange creates a new Range from a FileSet and two positions.
+// To represent a point pass a 0 as the end pos.
+func NewRange(fset *token.FileSet, start, end token.Pos) Range {
+ return Range{
+ FileSet: fset,
+ Start: start,
+ End: end,
+ }
+}
+
+// NewTokenConverter returns an implementation of Converter backed by a
+// token.File.
+func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
+ return &TokenConverter{fset: fset, FileConverter: FileConverter{file: f}}
+}
+
+// NewContentConverter returns an implementation of Converter for the
+// given file content.
+func NewContentConverter(filename string, content []byte) *TokenConverter {
+ fset := token.NewFileSet()
+ f := fset.AddFile(filename, -1, len(content))
+ f.SetLinesForContent(content)
+ return NewTokenConverter(fset, f)
+}
+
+// IsPoint returns true if the range represents a single point.
+func (r Range) IsPoint() bool {
+ return r.Start == r.End
+}
+
+// Span converts a Range to a Span that represents the Range.
+// It will fill in all the members of the Span, calculating the line and column
+// information.
+func (r Range) Span() (Span, error) {
+ if !r.Start.IsValid() {
+ return Span{}, fmt.Errorf("start pos is not valid")
+ }
+ f := r.FileSet.File(r.Start)
+ if f == nil {
+ return Span{}, fmt.Errorf("file not found in FileSet")
+ }
+ return FileSpan(f, r.Converter, r.Start, r.End)
+}
+
+// FileSpan returns a span within tok, using converter to translate between
+// offsets and positions.
+func FileSpan(tok *token.File, converter Converter, start, end token.Pos) (Span, error) {
+ var s Span
+ var err error
+ var startFilename string
+ startFilename, s.v.Start.Line, s.v.Start.Column, err = position(tok, start)
+ if err != nil {
+ return Span{}, err
+ }
+ s.v.URI = URIFromPath(startFilename)
+ if end.IsValid() {
+ var endFilename string
+ endFilename, s.v.End.Line, s.v.End.Column, err = position(tok, end)
+ if err != nil {
+ return Span{}, err
+ }
+ // In the presence of line directives, a single File can have sections from
+ // multiple file names.
+ if endFilename != startFilename {
+ return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename)
+ }
+ }
+ s.v.Start.clean()
+ s.v.End.clean()
+ s.v.clean()
+ if converter != nil {
+ return s.WithOffset(converter)
+ }
+ if startFilename != tok.Name() {
+ return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", tok.Name(), startFilename)
+ }
+ return s.WithOffset(&FileConverter{tok})
+}
+
+func position(f *token.File, pos token.Pos) (string, int, int, error) {
+ off, err := offset(f, pos)
+ if err != nil {
+ return "", 0, 0, err
+ }
+ return positionFromOffset(f, off)
+}
+
+func positionFromOffset(f *token.File, offset int) (string, int, int, error) {
+ if offset > f.Size() {
+ return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size())
+ }
+ pos := f.Pos(offset)
+ p := f.Position(pos)
+ // TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if
+ // the file's last character is not a newline.
+ if offset == f.Size() {
+ return p.Filename, p.Line + 1, 1, nil
+ }
+ return p.Filename, p.Line, p.Column, nil
+}
+
+// offset is a copy of the Offset function in go/token, but with the adjustment
+// that it does not panic on invalid positions.
+func offset(f *token.File, pos token.Pos) (int, error) {
+ if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
+ return 0, fmt.Errorf("invalid pos")
+ }
+ return int(pos) - f.Base(), nil
+}
+
+// Range converts a Span to a Range that represents the Span for the supplied
+// File.
+func (s Span) Range(converter *TokenConverter) (Range, error) {
+ s, err := s.WithOffset(converter)
+ if err != nil {
+ return Range{}, err
+ }
+ // go/token will panic if the offset is larger than the file's size,
+ // so check here to avoid panicking.
+ if s.Start().Offset() > converter.file.Size() {
+ return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
+ }
+ if s.End().Offset() > converter.file.Size() {
+ return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
+ }
+ return Range{
+ FileSet: converter.fset,
+ Start: converter.file.Pos(s.Start().Offset()),
+ End: converter.file.Pos(s.End().Offset()),
+ Converter: converter,
+ }, nil
+}
+
+func (l *FileConverter) ToPosition(offset int) (int, int, error) {
+ _, line, col, err := positionFromOffset(l.file, offset)
+ return line, col, err
+}
+
+func (l *FileConverter) ToOffset(line, col int) (int, error) {
+ if line < 0 {
+ return -1, fmt.Errorf("line is not valid")
+ }
+ lineMax := l.file.LineCount() + 1
+ if line > lineMax {
+ return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
+ } else if line == lineMax {
+ if col > 1 {
+ return -1, fmt.Errorf("column is beyond end of file")
+ }
+ // at the end of the file, allowing for a trailing eol
+ return l.file.Size(), nil
+ }
+ pos := lineStart(l.file, line)
+ if !pos.IsValid() {
+ return -1, fmt.Errorf("line is not in file")
+ }
+ // we assume that column is in bytes here, and that the first byte of a
+ // line is at column 1
+ pos += token.Pos(col - 1)
+ return offset(l.file, pos)
+}
diff --git a/vendor/github.com/hexops/gotextdiff/span/token111.go b/vendor/github.com/hexops/gotextdiff/span/token111.go
new file mode 100644
index 000000000..bf7a5406b
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/span/token111.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.12
+
+package span
+
+import (
+ "go/token"
+)
+
+// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
+// versions <= 1.11, we borrow logic from the analysisutil package.
+// TODO(rstambler): Delete this file when we no longer support Go 1.11.
+func lineStart(f *token.File, line int) token.Pos {
+ // Use binary search to find the start offset of this line.
+
+ min := 0 // inclusive
+ max := f.Size() // exclusive
+ for {
+ offset := (min + max) / 2
+ pos := f.Pos(offset)
+ posn := f.Position(pos)
+ if posn.Line == line {
+ return pos - (token.Pos(posn.Column) - 1)
+ }
+
+ if min+1 >= max {
+ return token.NoPos
+ }
+
+ if posn.Line < line {
+ min = offset
+ } else {
+ max = offset
+ }
+ }
+}
diff --git a/vendor/github.com/hexops/gotextdiff/span/token112.go b/vendor/github.com/hexops/gotextdiff/span/token112.go
new file mode 100644
index 000000000..017aec9c1
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/span/token112.go
@@ -0,0 +1,16 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.12
+
+package span
+
+import (
+ "go/token"
+)
+
+// TODO(rstambler): Delete this file when we no longer support Go 1.11.
+func lineStart(f *token.File, line int) token.Pos {
+ return f.LineStart(line)
+}
diff --git a/vendor/github.com/hexops/gotextdiff/span/uri.go b/vendor/github.com/hexops/gotextdiff/span/uri.go
new file mode 100644
index 000000000..250492135
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/span/uri.go
@@ -0,0 +1,169 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package span
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "unicode"
+)
+
+const fileScheme = "file"
+
+// URI represents the full URI for a file.
+type URI string
+
+func (uri URI) IsFile() bool {
+ return strings.HasPrefix(string(uri), "file://")
+}
+
+// Filename returns the file path for the given URI.
+// It is an error to call this on a URI that is not a valid filename.
+func (uri URI) Filename() string {
+ filename, err := filename(uri)
+ if err != nil {
+ panic(err)
+ }
+ return filepath.FromSlash(filename)
+}
+
+func filename(uri URI) (string, error) {
+ if uri == "" {
+ return "", nil
+ }
+ u, err := url.ParseRequestURI(string(uri))
+ if err != nil {
+ return "", err
+ }
+ if u.Scheme != fileScheme {
+ return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
+ }
+ // If the URI is a Windows URI, we trim the leading "/" and lowercase
+ // the drive letter, which will never be case sensitive.
+ if isWindowsDriveURIPath(u.Path) {
+ u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:]
+ }
+ return u.Path, nil
+}
+
+func URIFromURI(s string) URI {
+ if !strings.HasPrefix(s, "file://") {
+ return URI(s)
+ }
+
+ if !strings.HasPrefix(s, "file:///") {
+ // VS Code sends URLs with only two slashes, which are invalid. golang/go#39789.
+ s = "file:///" + s[len("file://"):]
+ }
+ // Even though the input is a URI, it may not be in canonical form. VS Code
+ // in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize.
+ path, err := url.PathUnescape(s[len("file://"):])
+ if err != nil {
+ panic(err)
+ }
+
+ // File URIs from Windows may have lowercase drive letters.
+ // Since drive letters are guaranteed to be case insensitive,
+ // we change them to uppercase to remain consistent.
+ // For example, file:///c:/x/y/z becomes file:///C:/x/y/z.
+ if isWindowsDriveURIPath(path) {
+ path = path[:1] + strings.ToUpper(string(path[1])) + path[2:]
+ }
+ u := url.URL{Scheme: fileScheme, Path: path}
+ return URI(u.String())
+}
+
+func CompareURI(a, b URI) int {
+ if equalURI(a, b) {
+ return 0
+ }
+ if a < b {
+ return -1
+ }
+ return 1
+}
+
+func equalURI(a, b URI) bool {
+ if a == b {
+ return true
+ }
+ // If we have the same URI basename, we may still have the same file URIs.
+ if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
+ return false
+ }
+ fa, err := filename(a)
+ if err != nil {
+ return false
+ }
+ fb, err := filename(b)
+ if err != nil {
+ return false
+ }
+ // Stat the files to check if they are equal.
+ infoa, err := os.Stat(filepath.FromSlash(fa))
+ if err != nil {
+ return false
+ }
+ infob, err := os.Stat(filepath.FromSlash(fb))
+ if err != nil {
+ return false
+ }
+ return os.SameFile(infoa, infob)
+}
+
+// URIFromPath returns a span URI for the supplied file path.
+// It will always have the file scheme.
+func URIFromPath(path string) URI {
+ if path == "" {
+ return ""
+ }
+ // Handle standard library paths that contain the literal "$GOROOT".
+ // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
+ const prefix = "$GOROOT"
+ if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
+ suffix := path[len(prefix):]
+ path = runtime.GOROOT() + suffix
+ }
+ if !isWindowsDrivePath(path) {
+ if abs, err := filepath.Abs(path); err == nil {
+ path = abs
+ }
+ }
+ // Check the file path again, in case it became absolute.
+ if isWindowsDrivePath(path) {
+ path = "/" + strings.ToUpper(string(path[0])) + path[1:]
+ }
+ path = filepath.ToSlash(path)
+ u := url.URL{
+ Scheme: fileScheme,
+ Path: path,
+ }
+ return URI(u.String())
+}
+
+// isWindowsDrivePath returns true if the file path is of the form used by
+// Windows. We check if the path begins with a drive letter, followed by a ":".
+// For example: C:/x/y/z.
+func isWindowsDrivePath(path string) bool {
+ if len(path) < 3 {
+ return false
+ }
+ return unicode.IsLetter(rune(path[0])) && path[1] == ':'
+}
+
+// isWindowsDriveURI returns true if the file URI is of the format used by
+// Windows URIs. The url.Parse package does not specially handle Windows paths
+// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
+func isWindowsDriveURIPath(uri string) bool {
+ if len(uri) < 4 {
+ return false
+ }
+ return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
+}
diff --git a/vendor/github.com/hexops/gotextdiff/span/utf16.go b/vendor/github.com/hexops/gotextdiff/span/utf16.go
new file mode 100644
index 000000000..f06a2468b
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/span/utf16.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package span
+
+import (
+ "fmt"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// ToUTF16Column calculates the utf16 column expressed by the point given the
+// supplied file contents.
+// This is used to convert from the native (always in bytes) column
+// representation and the utf16 counts used by some editors.
+func ToUTF16Column(p Point, content []byte) (int, error) {
+ if !p.HasPosition() {
+ return -1, fmt.Errorf("ToUTF16Column: point is missing position")
+ }
+ if !p.HasOffset() {
+ return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
+ }
+ offset := p.Offset() // 0-based
+ colZero := p.Column() - 1 // 0-based
+ if colZero == 0 {
+ // 0-based column 0, so it must be chr 1
+ return 1, nil
+ } else if colZero < 0 {
+ return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
+ }
+ // work out the offset at the start of the line using the column
+ lineOffset := offset - colZero
+ if lineOffset < 0 || offset > len(content) {
+ return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
+ }
+ // Use the offset to pick out the line start.
+ // This cannot panic: offset > len(content) and lineOffset < offset.
+ start := content[lineOffset:]
+
+ // Now, truncate down to the supplied column.
+ start = start[:colZero]
+
+ // and count the number of utf16 characters
+ // in theory we could do this by hand more efficiently...
+ return len(utf16.Encode([]rune(string(start)))) + 1, nil
+}
+
+// FromUTF16Column advances the point by the utf16 character offset given the
+// supplied line contents.
+// This is used to convert from the utf16 counts used by some editors to the
+// native (always in bytes) column representation.
+func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
+ if !p.HasOffset() {
+ return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
+ }
+ // if chr is 1 then no adjustment needed
+ if chr <= 1 {
+ return p, nil
+ }
+ if p.Offset() >= len(content) {
+ return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
+ }
+ remains := content[p.Offset():]
+ // scan forward the specified number of characters
+ for count := 1; count < chr; count++ {
+ if len(remains) <= 0 {
+ return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
+ }
+ r, w := utf8.DecodeRune(remains)
+ if r == '\n' {
+ // Per the LSP spec:
+ //
+ // > If the character value is greater than the line length it
+ // > defaults back to the line length.
+ break
+ }
+ remains = remains[w:]
+ if r >= 0x10000 {
+ // a two point rune
+ count++
+ // if we finished in a two point rune, do not advance past the first
+ if count >= chr {
+ break
+ }
+ }
+ p.v.Column += w
+ p.v.Offset += w
+ }
+ return p, nil
+}
diff --git a/vendor/github.com/hexops/gotextdiff/unified.go b/vendor/github.com/hexops/gotextdiff/unified.go
new file mode 100644
index 000000000..b7d85cfcc
--- /dev/null
+++ b/vendor/github.com/hexops/gotextdiff/unified.go
@@ -0,0 +1,210 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gotextdiff
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Unified represents a set of edits as a unified diff.
+type Unified struct {
+ // From is the name of the original file.
+ From string
+ // To is the name of the modified file.
+ To string
+ // Hunks is the set of edit hunks needed to transform the file content.
+ Hunks []*Hunk
+}
+
+// Hunk represents a contiguous set of line edits to apply.
+type Hunk struct {
+ // The line in the original source where the hunk starts.
+ FromLine int
+ // The line in the original source where the hunk finishes.
+ ToLine int
+ // The set of line based edits to apply.
+ Lines []Line
+}
+
+// Line represents a single line operation to apply as part of a Hunk.
+type Line struct {
+ // Kind is the type of line this represents, deletion, insertion or copy.
+ Kind OpKind
+ // Content is the content of this line.
+ // For deletion it is the line being removed, for all others it is the line
+ // to put in the output.
+ Content string
+}
+
+// OpKind is used to denote the type of operation a line represents.
+type OpKind int
+
+const (
+ // Delete is the operation kind for a line that is present in the input
+ // but not in the output.
+ Delete OpKind = iota
+ // Insert is the operation kind for a line that is new in the output.
+ Insert
+ // Equal is the operation kind for a line that is the same in the input and
+ // output, often used to provide context around edited lines.
+ Equal
+)
+
+// String returns a human readable representation of an OpKind. It is not
+// intended for machine processing.
+func (k OpKind) String() string {
+ switch k {
+ case Delete:
+ return "delete"
+ case Insert:
+ return "insert"
+ case Equal:
+ return "equal"
+ default:
+ panic("unknown operation kind")
+ }
+}
+
+const (
+ edge = 3
+ gap = edge * 2
+)
+
+// ToUnified takes a file contents and a sequence of edits, and calculates
+// a unified diff that represents those edits.
+func ToUnified(from, to string, content string, edits []TextEdit) Unified {
+ u := Unified{
+ From: from,
+ To: to,
+ }
+ if len(edits) == 0 {
+ return u
+ }
+ c, edits, partial := prepareEdits(content, edits)
+ if partial {
+ edits = lineEdits(content, c, edits)
+ }
+ lines := splitLines(content)
+ var h *Hunk
+ last := 0
+ toLine := 0
+ for _, edit := range edits {
+ start := edit.Span.Start().Line() - 1
+ end := edit.Span.End().Line() - 1
+ switch {
+ case h != nil && start == last:
+ //direct extension
+ case h != nil && start <= last+gap:
+ //within range of previous lines, add the joiners
+ addEqualLines(h, lines, last, start)
+ default:
+ //need to start a new hunk
+ if h != nil {
+ // add the edge to the previous hunk
+ addEqualLines(h, lines, last, last+edge)
+ u.Hunks = append(u.Hunks, h)
+ }
+ toLine += start - last
+ h = &Hunk{
+ FromLine: start + 1,
+ ToLine: toLine + 1,
+ }
+ // add the edge to the new hunk
+ delta := addEqualLines(h, lines, start-edge, start)
+ h.FromLine -= delta
+ h.ToLine -= delta
+ }
+ last = start
+ for i := start; i < end; i++ {
+ h.Lines = append(h.Lines, Line{Kind: Delete, Content: lines[i]})
+ last++
+ }
+ if edit.NewText != "" {
+ for _, line := range splitLines(edit.NewText) {
+ h.Lines = append(h.Lines, Line{Kind: Insert, Content: line})
+ toLine++
+ }
+ }
+ }
+ if h != nil {
+ // add the edge to the final hunk
+ addEqualLines(h, lines, last, last+edge)
+ u.Hunks = append(u.Hunks, h)
+ }
+ return u
+}
+
+func splitLines(text string) []string {
+ lines := strings.SplitAfter(text, "\n")
+ if lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ return lines
+}
+
+func addEqualLines(h *Hunk, lines []string, start, end int) int {
+ delta := 0
+ for i := start; i < end; i++ {
+ if i < 0 {
+ continue
+ }
+ if i >= len(lines) {
+ return delta
+ }
+ h.Lines = append(h.Lines, Line{Kind: Equal, Content: lines[i]})
+ delta++
+ }
+ return delta
+}
+
+// Format converts a unified diff to the standard textual form for that diff.
+// The output of this function can be passed to tools like patch.
+func (u Unified) Format(f fmt.State, r rune) {
+ if len(u.Hunks) == 0 {
+ return
+ }
+ fmt.Fprintf(f, "--- %s\n", u.From)
+ fmt.Fprintf(f, "+++ %s\n", u.To)
+ for _, hunk := range u.Hunks {
+ fromCount, toCount := 0, 0
+ for _, l := range hunk.Lines {
+ switch l.Kind {
+ case Delete:
+ fromCount++
+ case Insert:
+ toCount++
+ default:
+ fromCount++
+ toCount++
+ }
+ }
+ fmt.Fprint(f, "@@")
+ if fromCount > 1 {
+ fmt.Fprintf(f, " -%d,%d", hunk.FromLine, fromCount)
+ } else {
+ fmt.Fprintf(f, " -%d", hunk.FromLine)
+ }
+ if toCount > 1 {
+ fmt.Fprintf(f, " +%d,%d", hunk.ToLine, toCount)
+ } else {
+ fmt.Fprintf(f, " +%d", hunk.ToLine)
+ }
+ fmt.Fprint(f, " @@\n")
+ for _, l := range hunk.Lines {
+ switch l.Kind {
+ case Delete:
+ fmt.Fprintf(f, "-%s", l.Content)
+ case Insert:
+ fmt.Fprintf(f, "+%s", l.Content)
+ default:
+ fmt.Fprintf(f, " %s", l.Content)
+ }
+ if !strings.HasSuffix(l.Content, "\n") {
+ fmt.Fprintf(f, "\n\\ No newline at end of file\n")
+ }
+ }
+ }
+}