aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--go.mod1
-rw-r--r--go.sum2
-rw-r--r--vendor/github.com/google/flatbuffers/LICENSE202
-rw-r--r--vendor/github.com/google/flatbuffers/go/BUILD.bazel23
-rw-r--r--vendor/github.com/google/flatbuffers/go/builder.go856
-rw-r--r--vendor/github.com/google/flatbuffers/go/doc.go3
-rw-r--r--vendor/github.com/google/flatbuffers/go/encode.go238
-rw-r--r--vendor/github.com/google/flatbuffers/go/grpc.go38
-rw-r--r--vendor/github.com/google/flatbuffers/go/lib.go50
-rw-r--r--vendor/github.com/google/flatbuffers/go/sizes.go55
-rw-r--r--vendor/github.com/google/flatbuffers/go/struct.go8
-rw-r--r--vendor/github.com/google/flatbuffers/go/table.go505
-rw-r--r--vendor/modules.txt3
13 files changed, 1984 insertions, 0 deletions
diff --git a/go.mod b/go.mod
index d8d386225..c854e4cda 100644
--- a/go.mod
+++ b/go.mod
@@ -11,6 +11,7 @@ require (
github.com/bsm/histogram/v3 v3.0.2
github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6
github.com/golangci/golangci-lint v1.57.2
+ github.com/google/flatbuffers v24.3.25+incompatible
github.com/google/go-cmp v0.6.0
github.com/gorilla/handlers v1.5.2
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab
diff --git a/go.sum b/go.sum
index c998b2d2b..823bcd8e9 100644
--- a/go.sum
+++ b/go.sum
@@ -298,6 +298,8 @@ github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI=
+github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
diff --git a/vendor/github.com/google/flatbuffers/LICENSE b/vendor/github.com/google/flatbuffers/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/flatbuffers/go/BUILD.bazel b/vendor/github.com/google/flatbuffers/go/BUILD.bazel
new file mode 100644
index 000000000..78bd8d81a
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/BUILD.bazel
@@ -0,0 +1,23 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+alias(
+ name = "go_default_library",
+ actual = ":go",
+ visibility = ["//visibility:public"],
+)
+
+go_library(
+ name = "go",
+ srcs = [
+ "builder.go",
+ "doc.go",
+ "encode.go",
+ "grpc.go",
+ "lib.go",
+ "sizes.go",
+ "struct.go",
+ "table.go",
+ ],
+ importpath = "github.com/google/flatbuffers/go",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/google/flatbuffers/go/builder.go b/vendor/github.com/google/flatbuffers/go/builder.go
new file mode 100644
index 000000000..5d90e8ef9
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/builder.go
@@ -0,0 +1,856 @@
+package flatbuffers
+
+import "sort"
+
+// Builder is a state machine for creating FlatBuffer objects.
+// Use a Builder to construct object(s) starting from leaf nodes.
+//
+// A Builder constructs byte buffers in a last-first manner for simplicity and
+// performance.
+type Builder struct {
+ // `Bytes` gives raw access to the buffer. Most users will want to use
+ // FinishedBytes() instead.
+ Bytes []byte
+
+ minalign int
+ vtable []UOffsetT
+ objectEnd UOffsetT
+ vtables []UOffsetT
+ head UOffsetT
+ nested bool
+ finished bool
+
+ sharedStrings map[string]UOffsetT
+}
+
+const fileIdentifierLength = 4
+const sizePrefixLength = 4
+
+// NewBuilder initializes a Builder of size `initial_size`.
+// The internal buffer is grown as needed.
+func NewBuilder(initialSize int) *Builder {
+ if initialSize <= 0 {
+ initialSize = 0
+ }
+
+ b := &Builder{}
+ b.Bytes = make([]byte, initialSize)
+ b.head = UOffsetT(initialSize)
+ b.minalign = 1
+ b.vtables = make([]UOffsetT, 0, 16) // sensible default capacity
+ return b
+}
+
+// Reset truncates the underlying Builder buffer, facilitating alloc-free
+// reuse of a Builder. It also resets bookkeeping data.
+func (b *Builder) Reset() {
+ if b.Bytes != nil {
+ b.Bytes = b.Bytes[:cap(b.Bytes)]
+ }
+
+ if b.vtables != nil {
+ b.vtables = b.vtables[:0]
+ }
+
+ if b.vtable != nil {
+ b.vtable = b.vtable[:0]
+ }
+
+ if b.sharedStrings != nil {
+ for key := range b.sharedStrings {
+ delete(b.sharedStrings, key)
+ }
+ }
+
+ b.head = UOffsetT(len(b.Bytes))
+ b.minalign = 1
+ b.nested = false
+ b.finished = false
+}
+
+// FinishedBytes returns a pointer to the written data in the byte buffer.
+// Panics if the builder is not in a finished state (which is caused by calling
+// `Finish()`).
+func (b *Builder) FinishedBytes() []byte {
+ b.assertFinished()
+ return b.Bytes[b.Head():]
+}
+
+// StartObject initializes bookkeeping for writing a new object.
+func (b *Builder) StartObject(numfields int) {
+ b.assertNotNested()
+ b.nested = true
+
+ // use 32-bit offsets so that arithmetic doesn't overflow.
+ if cap(b.vtable) < numfields || b.vtable == nil {
+ b.vtable = make([]UOffsetT, numfields)
+ } else {
+ b.vtable = b.vtable[:numfields]
+ for i := 0; i < len(b.vtable); i++ {
+ b.vtable[i] = 0
+ }
+ }
+
+ b.objectEnd = b.Offset()
+}
+
+// WriteVtable serializes the vtable for the current object, if applicable.
+//
+// Before writing out the vtable, this checks pre-existing vtables for equality
+// to this one. If an equal vtable is found, point the object to the existing
+// vtable and return.
+//
+// Because vtable values are sensitive to alignment of object data, not all
+// logically-equal vtables will be deduplicated.
+//
+// A vtable has the following format:
+// <VOffsetT: size of the vtable in bytes, including this value>
+// <VOffsetT: size of the object in bytes, including the vtable offset>
+// <VOffsetT: offset for a field> * N, where N is the number of fields in
+// the schema for this type. Includes deprecated fields.
+// Thus, a vtable is made of 2 + N elements, each SizeVOffsetT bytes wide.
+//
+// An object has the following format:
+// <SOffsetT: offset to this object's vtable (may be negative)>
+// <byte: data>+
+func (b *Builder) WriteVtable() (n UOffsetT) {
+ // Prepend a zero scalar to the object. Later in this function we'll
+ // write an offset here that points to the object's vtable:
+ b.PrependSOffsetT(0)
+
+ objectOffset := b.Offset()
+ existingVtable := UOffsetT(0)
+
+ // Trim vtable of trailing zeroes.
+ i := len(b.vtable) - 1
+ for ; i >= 0 && b.vtable[i] == 0; i-- {
+ }
+ b.vtable = b.vtable[:i+1]
+
+ // Search backwards through existing vtables, because similar vtables
+ // are likely to have been recently appended. See
+ // BenchmarkVtableDeduplication for a case in which this heuristic
+ // saves about 30% of the time used in writing objects with duplicate
+ // tables.
+ for i := len(b.vtables) - 1; i >= 0; i-- {
+ // Find the other vtable, which is associated with `i`:
+ vt2Offset := b.vtables[i]
+ vt2Start := len(b.Bytes) - int(vt2Offset)
+ vt2Len := GetVOffsetT(b.Bytes[vt2Start:])
+
+ metadata := VtableMetadataFields * SizeVOffsetT
+ vt2End := vt2Start + int(vt2Len)
+ vt2 := b.Bytes[vt2Start+metadata : vt2End]
+
+ // Compare the other vtable to the one under consideration.
+ // If they are equal, store the offset and break:
+ if vtableEqual(b.vtable, objectOffset, vt2) {
+ existingVtable = vt2Offset
+ break
+ }
+ }
+
+ if existingVtable == 0 {
+ // Did not find a vtable, so write this one to the buffer.
+
+ // Write out the current vtable in reverse , because
+ // serialization occurs in last-first order:
+ for i := len(b.vtable) - 1; i >= 0; i-- {
+ var off UOffsetT
+ if b.vtable[i] != 0 {
+ // Forward reference to field;
+ // use 32bit number to assert no overflow:
+ off = objectOffset - b.vtable[i]
+ }
+
+ b.PrependVOffsetT(VOffsetT(off))
+ }
+
+ // The two metadata fields are written last.
+
+ // First, store the object bytesize:
+ objectSize := objectOffset - b.objectEnd
+ b.PrependVOffsetT(VOffsetT(objectSize))
+
+ // Second, store the vtable bytesize:
+ vBytes := (len(b.vtable) + VtableMetadataFields) * SizeVOffsetT
+ b.PrependVOffsetT(VOffsetT(vBytes))
+
+ // Next, write the offset to the new vtable in the
+ // already-allocated SOffsetT at the beginning of this object:
+ objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset)
+ WriteSOffsetT(b.Bytes[objectStart:],
+ SOffsetT(b.Offset())-SOffsetT(objectOffset))
+
+ // Finally, store this vtable in memory for future
+ // deduplication:
+ b.vtables = append(b.vtables, b.Offset())
+ } else {
+ // Found a duplicate vtable.
+
+ objectStart := SOffsetT(len(b.Bytes)) - SOffsetT(objectOffset)
+ b.head = UOffsetT(objectStart)
+
+ // Write the offset to the found vtable in the
+ // already-allocated SOffsetT at the beginning of this object:
+ WriteSOffsetT(b.Bytes[b.head:],
+ SOffsetT(existingVtable)-SOffsetT(objectOffset))
+ }
+
+ b.vtable = b.vtable[:0]
+ return objectOffset
+}
+
+// EndObject writes data necessary to finish object construction.
+func (b *Builder) EndObject() UOffsetT {
+ b.assertNested()
+ n := b.WriteVtable()
+ b.nested = false
+ return n
+}
+
+// Doubles the size of the byteslice, and copies the old data towards the
+// end of the new byteslice (since we build the buffer backwards).
+func (b *Builder) growByteBuffer() {
+ if (int64(len(b.Bytes)) & int64(0xC0000000)) != 0 {
+ panic("cannot grow buffer beyond 2 gigabytes")
+ }
+ newLen := len(b.Bytes) * 2
+ if newLen == 0 {
+ newLen = 1
+ }
+
+ if cap(b.Bytes) >= newLen {
+ b.Bytes = b.Bytes[:newLen]
+ } else {
+ extension := make([]byte, newLen-len(b.Bytes))
+ b.Bytes = append(b.Bytes, extension...)
+ }
+
+ middle := newLen / 2
+ copy(b.Bytes[middle:], b.Bytes[:middle])
+}
+
+// Head gives the start of useful data in the underlying byte buffer.
+// Note: unlike other functions, this value is interpreted as from the left.
+func (b *Builder) Head() UOffsetT {
+ return b.head
+}
+
+// Offset relative to the end of the buffer.
+func (b *Builder) Offset() UOffsetT {
+ return UOffsetT(len(b.Bytes)) - b.head
+}
+
+// Pad places zeros at the current offset.
+func (b *Builder) Pad(n int) {
+ for i := 0; i < n; i++ {
+ b.PlaceByte(0)
+ }
+}
+
+// Prep prepares to write an element of `size` after `additional_bytes`
+// have been written, e.g. if you write a string, you need to align such
+// the int length field is aligned to SizeInt32, and the string data follows it
+// directly.
+// If all you need to do is align, `additionalBytes` will be 0.
+func (b *Builder) Prep(size, additionalBytes int) {
+ // Track the biggest thing we've ever aligned to.
+ if size > b.minalign {
+ b.minalign = size
+ }
+ // Find the amount of alignment needed such that `size` is properly
+ // aligned after `additionalBytes`:
+ alignSize := (^(len(b.Bytes) - int(b.Head()) + additionalBytes)) + 1
+ alignSize &= (size - 1)
+
+ // Reallocate the buffer if needed:
+ for int(b.head) <= alignSize+size+additionalBytes {
+ oldBufSize := len(b.Bytes)
+ b.growByteBuffer()
+ b.head += UOffsetT(len(b.Bytes) - oldBufSize)
+ }
+ b.Pad(alignSize)
+}
+
+// PrependSOffsetT prepends an SOffsetT, relative to where it will be written.
+func (b *Builder) PrependSOffsetT(off SOffsetT) {
+ b.Prep(SizeSOffsetT, 0) // Ensure alignment is already done.
+ if !(UOffsetT(off) <= b.Offset()) {
+ panic("unreachable: off <= b.Offset()")
+ }
+ off2 := SOffsetT(b.Offset()) - off + SOffsetT(SizeSOffsetT)
+ b.PlaceSOffsetT(off2)
+}
+
+// PrependUOffsetT prepends an UOffsetT, relative to where it will be written.
+func (b *Builder) PrependUOffsetT(off UOffsetT) {
+ b.Prep(SizeUOffsetT, 0) // Ensure alignment is already done.
+ if !(off <= b.Offset()) {
+ panic("unreachable: off <= b.Offset()")
+ }
+ off2 := b.Offset() - off + UOffsetT(SizeUOffsetT)
+ b.PlaceUOffsetT(off2)
+}
+
+// StartVector initializes bookkeeping for writing a new vector.
+//
+// A vector has the following format:
+// <UOffsetT: number of elements in this vector>
+// <T: data>+, where T is the type of elements of this vector.
+func (b *Builder) StartVector(elemSize, numElems, alignment int) UOffsetT {
+ b.assertNotNested()
+ b.nested = true
+ b.Prep(SizeUint32, elemSize*numElems)
+ b.Prep(alignment, elemSize*numElems) // Just in case alignment > int.
+ return b.Offset()
+}
+
+// EndVector writes data necessary to finish vector construction.
+func (b *Builder) EndVector(vectorNumElems int) UOffsetT {
+ b.assertNested()
+
+ // we already made space for this, so write without PrependUint32
+ b.PlaceUOffsetT(UOffsetT(vectorNumElems))
+
+ b.nested = false
+ return b.Offset()
+}
+
+// CreateVectorOfTables serializes slice of table offsets into a vector.
+func (b *Builder) CreateVectorOfTables(offsets []UOffsetT) UOffsetT {
+ b.assertNotNested()
+ b.StartVector(4, len(offsets), 4)
+ for i := len(offsets) - 1; i >= 0; i-- {
+ b.PrependUOffsetT(offsets[i])
+ }
+ return b.EndVector(len(offsets))
+}
+
+type KeyCompare func(o1, o2 UOffsetT, buf []byte) bool
+
+func (b *Builder) CreateVectorOfSortedTables(offsets []UOffsetT, keyCompare KeyCompare) UOffsetT {
+ sort.Slice(offsets, func(i, j int) bool {
+ return keyCompare(offsets[i], offsets[j], b.Bytes)
+ })
+ return b.CreateVectorOfTables(offsets)
+}
+
+// CreateSharedString Checks if the string is already written
+// to the buffer before calling CreateString
+func (b *Builder) CreateSharedString(s string) UOffsetT {
+ if b.sharedStrings == nil {
+ b.sharedStrings = make(map[string]UOffsetT)
+ }
+ if v, ok := b.sharedStrings[s]; ok {
+ return v
+ }
+ off := b.CreateString(s)
+ b.sharedStrings[s] = off
+ return off
+}
+
+// CreateString writes a null-terminated string as a vector.
+func (b *Builder) CreateString(s string) UOffsetT {
+ b.assertNotNested()
+ b.nested = true
+
+ b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte)
+ b.PlaceByte(0)
+
+ l := UOffsetT(len(s))
+
+ b.head -= l
+ copy(b.Bytes[b.head:b.head+l], s)
+
+ return b.EndVector(len(s))
+}
+
+// CreateByteString writes a byte slice as a string (null-terminated).
+func (b *Builder) CreateByteString(s []byte) UOffsetT {
+ b.assertNotNested()
+ b.nested = true
+
+ b.Prep(int(SizeUOffsetT), (len(s)+1)*SizeByte)
+ b.PlaceByte(0)
+
+ l := UOffsetT(len(s))
+
+ b.head -= l
+ copy(b.Bytes[b.head:b.head+l], s)
+
+ return b.EndVector(len(s))
+}
+
+// CreateByteVector writes a ubyte vector
+func (b *Builder) CreateByteVector(v []byte) UOffsetT {
+ b.assertNotNested()
+ b.nested = true
+
+ b.Prep(int(SizeUOffsetT), len(v)*SizeByte)
+
+ l := UOffsetT(len(v))
+
+ b.head -= l
+ copy(b.Bytes[b.head:b.head+l], v)
+
+ return b.EndVector(len(v))
+}
+
+func (b *Builder) assertNested() {
+ // If you get this assert, you're in an object while trying to write
+ // data that belongs outside of an object.
+ // To fix this, write non-inline data (like vectors) before creating
+ // objects.
+ if !b.nested {
+ panic("Incorrect creation order: must be inside object.")
+ }
+}
+
+func (b *Builder) assertNotNested() {
+ // If you hit this, you're trying to construct a Table/Vector/String
+ // during the construction of its parent table (between the MyTableBuilder
+ // and builder.Finish()).
+ // Move the creation of these sub-objects to above the MyTableBuilder to
+ // not get this assert.
+ // Ignoring this assert may appear to work in simple cases, but the reason
+ // it is here is that storing objects in-line may cause vtable offsets
+ // to not fit anymore. It also leads to vtable duplication.
+ if b.nested {
+ panic("Incorrect creation order: object must not be nested.")
+ }
+}
+
+func (b *Builder) assertFinished() {
+ // If you get this assert, you're attempting to get access a buffer
+ // which hasn't been finished yet. Be sure to call builder.Finish()
+ // with your root table.
+ // If you really need to access an unfinished buffer, use the Bytes
+ // buffer directly.
+ if !b.finished {
+ panic("Incorrect use of FinishedBytes(): must call 'Finish' first.")
+ }
+}
+
+// PrependBoolSlot prepends a bool onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependBoolSlot(o int, x, d bool) {
+ val := byte(0)
+ if x {
+ val = 1
+ }
+ def := byte(0)
+ if d {
+ def = 1
+ }
+ b.PrependByteSlot(o, val, def)
+}
+
+// PrependByteSlot prepends a byte onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependByteSlot(o int, x, d byte) {
+ if x != d {
+ b.PrependByte(x)
+ b.Slot(o)
+ }
+}
+
+// PrependUint8Slot prepends a uint8 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint8Slot(o int, x, d uint8) {
+ if x != d {
+ b.PrependUint8(x)
+ b.Slot(o)
+ }
+}
+
+// PrependUint16Slot prepends a uint16 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint16Slot(o int, x, d uint16) {
+ if x != d {
+ b.PrependUint16(x)
+ b.Slot(o)
+ }
+}
+
+// PrependUint32Slot prepends a uint32 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint32Slot(o int, x, d uint32) {
+ if x != d {
+ b.PrependUint32(x)
+ b.Slot(o)
+ }
+}
+
+// PrependUint64Slot prepends a uint64 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUint64Slot(o int, x, d uint64) {
+ if x != d {
+ b.PrependUint64(x)
+ b.Slot(o)
+ }
+}
+
+// PrependInt8Slot prepends a int8 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt8Slot(o int, x, d int8) {
+ if x != d {
+ b.PrependInt8(x)
+ b.Slot(o)
+ }
+}
+
+// PrependInt16Slot prepends a int16 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt16Slot(o int, x, d int16) {
+ if x != d {
+ b.PrependInt16(x)
+ b.Slot(o)
+ }
+}
+
+// PrependInt32Slot prepends a int32 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt32Slot(o int, x, d int32) {
+ if x != d {
+ b.PrependInt32(x)
+ b.Slot(o)
+ }
+}
+
+// PrependInt64Slot prepends a int64 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependInt64Slot(o int, x, d int64) {
+ if x != d {
+ b.PrependInt64(x)
+ b.Slot(o)
+ }
+}
+
+// PrependFloat32Slot prepends a float32 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependFloat32Slot(o int, x, d float32) {
+ if x != d {
+ b.PrependFloat32(x)
+ b.Slot(o)
+ }
+}
+
+// PrependFloat64Slot prepends a float64 onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependFloat64Slot(o int, x, d float64) {
+ if x != d {
+ b.PrependFloat64(x)
+ b.Slot(o)
+ }
+}
+
+// PrependUOffsetTSlot prepends an UOffsetT onto the object at vtable slot `o`.
+// If value `x` equals default `d`, then the slot will be set to zero and no
+// other data will be written.
+func (b *Builder) PrependUOffsetTSlot(o int, x, d UOffsetT) {
+ if x != d {
+ b.PrependUOffsetT(x)
+ b.Slot(o)
+ }
+}
+
+// PrependStructSlot prepends a struct onto the object at vtable slot `o`.
+// Structs are stored inline, so nothing additional is being added.
+// In generated code, `d` is always 0.
+func (b *Builder) PrependStructSlot(voffset int, x, d UOffsetT) {
+ if x != d {
+ b.assertNested()
+ if x != b.Offset() {
+ panic("inline data write outside of object")
+ }
+ b.Slot(voffset)
+ }
+}
+
+// Slot sets the vtable key `voffset` to the current location in the buffer.
+func (b *Builder) Slot(slotnum int) {
+ b.vtable[slotnum] = UOffsetT(b.Offset())
+}
+
+// FinishWithFileIdentifier finalizes a buffer, pointing to the given `rootTable`.
+// as well as applys a file identifier
+func (b *Builder) FinishWithFileIdentifier(rootTable UOffsetT, fid []byte) {
+ if fid == nil || len(fid) != fileIdentifierLength {
+ panic("incorrect file identifier length")
+ }
+ // In order to add a file identifier to the flatbuffer message, we need
+ // to prepare an alignment and file identifier length
+ b.Prep(b.minalign, SizeInt32+fileIdentifierLength)
+ for i := fileIdentifierLength - 1; i >= 0; i-- {
+ // place the file identifier
+ b.PlaceByte(fid[i])
+ }
+ // finish
+ b.Finish(rootTable)
+}
+
+// FinishSizePrefixed finalizes a buffer, pointing to the given `rootTable`.
+// The buffer is prefixed with the size of the buffer, excluding the size
+// of the prefix itself.
+func (b *Builder) FinishSizePrefixed(rootTable UOffsetT) {
+ b.finish(rootTable, true)
+}
+
+// FinishSizePrefixedWithFileIdentifier finalizes a buffer, pointing to the given `rootTable`
+// and applies a file identifier. The buffer is prefixed with the size of the buffer,
+// excluding the size of the prefix itself.
+func (b *Builder) FinishSizePrefixedWithFileIdentifier(rootTable UOffsetT, fid []byte) {
+ if fid == nil || len(fid) != fileIdentifierLength {
+ panic("incorrect file identifier length")
+ }
+ // In order to add a file identifier and size prefix to the flatbuffer message,
+ // we need to prepare an alignment, a size prefix length, and file identifier length
+ b.Prep(b.minalign, SizeInt32+fileIdentifierLength+sizePrefixLength)
+ for i := fileIdentifierLength - 1; i >= 0; i-- {
+ // place the file identifier
+ b.PlaceByte(fid[i])
+ }
+ // finish
+ b.finish(rootTable, true)
+}
+
+// Finish finalizes a buffer, pointing to the given `rootTable`.
+func (b *Builder) Finish(rootTable UOffsetT) {
+ b.finish(rootTable, false)
+}
+
+// finish finalizes a buffer, pointing to the given `rootTable`
+// with an optional size prefix.
+func (b *Builder) finish(rootTable UOffsetT, sizePrefix bool) {
+ b.assertNotNested()
+
+ if sizePrefix {
+ b.Prep(b.minalign, SizeUOffsetT+sizePrefixLength)
+ } else {
+ b.Prep(b.minalign, SizeUOffsetT)
+ }
+
+ b.PrependUOffsetT(rootTable)
+
+ if sizePrefix {
+ b.PlaceUint32(uint32(b.Offset()))
+ }
+
+ b.finished = true
+}
+
+// vtableEqual compares an unwritten vtable to a written vtable.
+func vtableEqual(a []UOffsetT, objectStart UOffsetT, b []byte) bool {
+ if len(a)*SizeVOffsetT != len(b) {
+ return false
+ }
+
+ for i := 0; i < len(a); i++ {
+ x := GetVOffsetT(b[i*SizeVOffsetT : (i+1)*SizeVOffsetT])
+
+ // Skip vtable entries that indicate a default value.
+ if x == 0 && a[i] == 0 {
+ continue
+ }
+
+ y := SOffsetT(objectStart) - SOffsetT(a[i])
+ if SOffsetT(x) != y {
+ return false
+ }
+ }
+ return true
+}
+
+// PrependBool prepends a bool to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependBool(x bool) {
+ b.Prep(SizeBool, 0)
+ b.PlaceBool(x)
+}
+
+// PrependUint8 prepends a uint8 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint8(x uint8) {
+ b.Prep(SizeUint8, 0)
+ b.PlaceUint8(x)
+}
+
+// PrependUint16 prepends a uint16 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint16(x uint16) {
+ b.Prep(SizeUint16, 0)
+ b.PlaceUint16(x)
+}
+
+// PrependUint32 prepends a uint32 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint32(x uint32) {
+ b.Prep(SizeUint32, 0)
+ b.PlaceUint32(x)
+}
+
+// PrependUint64 prepends a uint64 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependUint64(x uint64) {
+ b.Prep(SizeUint64, 0)
+ b.PlaceUint64(x)
+}
+
+// PrependInt8 prepends a int8 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt8(x int8) {
+ b.Prep(SizeInt8, 0)
+ b.PlaceInt8(x)
+}
+
+// PrependInt16 prepends a int16 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt16(x int16) {
+ b.Prep(SizeInt16, 0)
+ b.PlaceInt16(x)
+}
+
+// PrependInt32 prepends a int32 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt32(x int32) {
+ b.Prep(SizeInt32, 0)
+ b.PlaceInt32(x)
+}
+
+// PrependInt64 prepends a int64 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependInt64(x int64) {
+ b.Prep(SizeInt64, 0)
+ b.PlaceInt64(x)
+}
+
+// PrependFloat32 prepends a float32 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependFloat32(x float32) {
+ b.Prep(SizeFloat32, 0)
+ b.PlaceFloat32(x)
+}
+
+// PrependFloat64 prepends a float64 to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependFloat64(x float64) {
+ b.Prep(SizeFloat64, 0)
+ b.PlaceFloat64(x)
+}
+
+// PrependByte prepends a byte to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependByte(x byte) {
+ b.Prep(SizeByte, 0)
+ b.PlaceByte(x)
+}
+
+// PrependVOffsetT prepends a VOffsetT to the Builder buffer.
+// Aligns and checks for space.
+func (b *Builder) PrependVOffsetT(x VOffsetT) {
+ b.Prep(SizeVOffsetT, 0)
+ b.PlaceVOffsetT(x)
+}
+
+// PlaceBool prepends a bool to the Builder, without checking for space.
+func (b *Builder) PlaceBool(x bool) {
+ b.head -= UOffsetT(SizeBool)
+ WriteBool(b.Bytes[b.head:], x)
+}
+
+// PlaceUint8 prepends a uint8 to the Builder, without checking for space.
+func (b *Builder) PlaceUint8(x uint8) {
+ b.head -= UOffsetT(SizeUint8)
+ WriteUint8(b.Bytes[b.head:], x)
+}
+
+// PlaceUint16 prepends a uint16 to the Builder, without checking for space.
+func (b *Builder) PlaceUint16(x uint16) {
+ b.head -= UOffsetT(SizeUint16)
+ WriteUint16(b.Bytes[b.head:], x)
+}
+
+// PlaceUint32 prepends a uint32 to the Builder, without checking for space.
+func (b *Builder) PlaceUint32(x uint32) {
+ b.head -= UOffsetT(SizeUint32)
+ WriteUint32(b.Bytes[b.head:], x)
+}
+
+// PlaceUint64 prepends a uint64 to the Builder, without checking for space.
+func (b *Builder) PlaceUint64(x uint64) {
+ b.head -= UOffsetT(SizeUint64)
+ WriteUint64(b.Bytes[b.head:], x)
+}
+
+// PlaceInt8 prepends a int8 to the Builder, without checking for space.
+func (b *Builder) PlaceInt8(x int8) {
+ b.head -= UOffsetT(SizeInt8)
+ WriteInt8(b.Bytes[b.head:], x)
+}
+
+// PlaceInt16 prepends a int16 to the Builder, without checking for space.
+func (b *Builder) PlaceInt16(x int16) {
+ b.head -= UOffsetT(SizeInt16)
+ WriteInt16(b.Bytes[b.head:], x)
+}
+
+// PlaceInt32 prepends a int32 to the Builder, without checking for space.
+func (b *Builder) PlaceInt32(x int32) {
+ b.head -= UOffsetT(SizeInt32)
+ WriteInt32(b.Bytes[b.head:], x)
+}
+
+// PlaceInt64 prepends a int64 to the Builder, without checking for space.
+func (b *Builder) PlaceInt64(x int64) {
+ b.head -= UOffsetT(SizeInt64)
+ WriteInt64(b.Bytes[b.head:], x)
+}
+
+// PlaceFloat32 prepends a float32 to the Builder, without checking for space.
+func (b *Builder) PlaceFloat32(x float32) {
+ b.head -= UOffsetT(SizeFloat32)
+ WriteFloat32(b.Bytes[b.head:], x)
+}
+
+// PlaceFloat64 prepends a float64 to the Builder, without checking for space.
+func (b *Builder) PlaceFloat64(x float64) {
+ b.head -= UOffsetT(SizeFloat64)
+ WriteFloat64(b.Bytes[b.head:], x)
+}
+
+// PlaceByte prepends a byte to the Builder, without checking for space.
+func (b *Builder) PlaceByte(x byte) {
+ b.head -= UOffsetT(SizeByte)
+ WriteByte(b.Bytes[b.head:], x)
+}
+
+// PlaceVOffsetT prepends a VOffsetT to the Builder, without checking for space.
+func (b *Builder) PlaceVOffsetT(x VOffsetT) {
+ b.head -= UOffsetT(SizeVOffsetT)
+ WriteVOffsetT(b.Bytes[b.head:], x)
+}
+
+// PlaceSOffsetT prepends a SOffsetT to the Builder, without checking for space.
+func (b *Builder) PlaceSOffsetT(x SOffsetT) {
+ b.head -= UOffsetT(SizeSOffsetT)
+ WriteSOffsetT(b.Bytes[b.head:], x)
+}
+
+// PlaceUOffsetT prepends a UOffsetT to the Builder, without checking for space.
+func (b *Builder) PlaceUOffsetT(x UOffsetT) {
+ b.head -= UOffsetT(SizeUOffsetT)
+ WriteUOffsetT(b.Bytes[b.head:], x)
+}
diff --git a/vendor/github.com/google/flatbuffers/go/doc.go b/vendor/github.com/google/flatbuffers/go/doc.go
new file mode 100644
index 000000000..694edc763
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/doc.go
@@ -0,0 +1,3 @@
+// Package flatbuffers provides facilities to read and write flatbuffers
+// objects.
+package flatbuffers
diff --git a/vendor/github.com/google/flatbuffers/go/encode.go b/vendor/github.com/google/flatbuffers/go/encode.go
new file mode 100644
index 000000000..a2a579812
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/encode.go
@@ -0,0 +1,238 @@
+package flatbuffers
+
+import (
+ "math"
+)
+
+type (
+ // A SOffsetT stores a signed offset into arbitrary data.
+ SOffsetT int32
+ // A UOffsetT stores an unsigned offset into vector data.
+ UOffsetT uint32
+ // A VOffsetT stores an unsigned offset in a vtable.
+ VOffsetT uint16
+)
+
+const (
+ // VtableMetadataFields is the count of metadata fields in each vtable.
+ VtableMetadataFields = 2
+)
+
+// GetByte decodes a little-endian byte from a byte slice.
+func GetByte(buf []byte) byte {
+ return byte(GetUint8(buf))
+}
+
+// GetBool decodes a little-endian bool from a byte slice.
+func GetBool(buf []byte) bool {
+ return buf[0] == 1
+}
+
+// GetUint8 decodes a little-endian uint8 from a byte slice.
+func GetUint8(buf []byte) (n uint8) {
+ n = uint8(buf[0])
+ return
+}
+
+// GetUint16 decodes a little-endian uint16 from a byte slice.
+func GetUint16(buf []byte) (n uint16) {
+ _ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+ n |= uint16(buf[0])
+ n |= uint16(buf[1]) << 8
+ return
+}
+
+// GetUint32 decodes a little-endian uint32 from a byte slice.
+func GetUint32(buf []byte) (n uint32) {
+ _ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+ n |= uint32(buf[0])
+ n |= uint32(buf[1]) << 8
+ n |= uint32(buf[2]) << 16
+ n |= uint32(buf[3]) << 24
+ return
+}
+
+// GetUint64 decodes a little-endian uint64 from a byte slice.
+func GetUint64(buf []byte) (n uint64) {
+ _ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+ n |= uint64(buf[0])
+ n |= uint64(buf[1]) << 8
+ n |= uint64(buf[2]) << 16
+ n |= uint64(buf[3]) << 24
+ n |= uint64(buf[4]) << 32
+ n |= uint64(buf[5]) << 40
+ n |= uint64(buf[6]) << 48
+ n |= uint64(buf[7]) << 56
+ return
+}
+
+// GetInt8 decodes a little-endian int8 from a byte slice.
+func GetInt8(buf []byte) (n int8) {
+ n = int8(buf[0])
+ return
+}
+
+// GetInt16 decodes a little-endian int16 from a byte slice.
+func GetInt16(buf []byte) (n int16) {
+ _ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+ n |= int16(buf[0])
+ n |= int16(buf[1]) << 8
+ return
+}
+
+// GetInt32 decodes a little-endian int32 from a byte slice.
+func GetInt32(buf []byte) (n int32) {
+ _ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+ n |= int32(buf[0])
+ n |= int32(buf[1]) << 8
+ n |= int32(buf[2]) << 16
+ n |= int32(buf[3]) << 24
+ return
+}
+
+// GetInt64 decodes a little-endian int64 from a byte slice.
+func GetInt64(buf []byte) (n int64) {
+ _ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+ n |= int64(buf[0])
+ n |= int64(buf[1]) << 8
+ n |= int64(buf[2]) << 16
+ n |= int64(buf[3]) << 24
+ n |= int64(buf[4]) << 32
+ n |= int64(buf[5]) << 40
+ n |= int64(buf[6]) << 48
+ n |= int64(buf[7]) << 56
+ return
+}
+
+// GetFloat32 decodes a little-endian float32 from a byte slice.
+func GetFloat32(buf []byte) float32 {
+ x := GetUint32(buf)
+ return math.Float32frombits(x)
+}
+
+// GetFloat64 decodes a little-endian float64 from a byte slice.
+func GetFloat64(buf []byte) float64 {
+ x := GetUint64(buf)
+ return math.Float64frombits(x)
+}
+
+// GetUOffsetT decodes a little-endian UOffsetT from a byte slice.
+func GetUOffsetT(buf []byte) UOffsetT {
+ return UOffsetT(GetUint32(buf))
+}
+
+// GetSOffsetT decodes a little-endian SOffsetT from a byte slice.
+func GetSOffsetT(buf []byte) SOffsetT {
+ return SOffsetT(GetInt32(buf))
+}
+
+// GetVOffsetT decodes a little-endian VOffsetT from a byte slice.
+func GetVOffsetT(buf []byte) VOffsetT {
+ return VOffsetT(GetUint16(buf))
+}
+
+// WriteByte encodes a little-endian uint8 into a byte slice.
+func WriteByte(buf []byte, n byte) {
+ WriteUint8(buf, uint8(n))
+}
+
+// WriteBool encodes a little-endian bool into a byte slice.
+func WriteBool(buf []byte, b bool) {
+ buf[0] = 0
+ if b {
+ buf[0] = 1
+ }
+}
+
+// WriteUint8 encodes a little-endian uint8 into a byte slice.
+func WriteUint8(buf []byte, n uint8) {
+ buf[0] = byte(n)
+}
+
+// WriteUint16 encodes a little-endian uint16 into a byte slice.
+func WriteUint16(buf []byte, n uint16) {
+ _ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+ buf[0] = byte(n)
+ buf[1] = byte(n >> 8)
+}
+
+// WriteUint32 encodes a little-endian uint32 into a byte slice.
+func WriteUint32(buf []byte, n uint32) {
+ _ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+ buf[0] = byte(n)
+ buf[1] = byte(n >> 8)
+ buf[2] = byte(n >> 16)
+ buf[3] = byte(n >> 24)
+}
+
+// WriteUint64 encodes a little-endian uint64 into a byte slice.
+func WriteUint64(buf []byte, n uint64) {
+ _ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+ buf[0] = byte(n)
+ buf[1] = byte(n >> 8)
+ buf[2] = byte(n >> 16)
+ buf[3] = byte(n >> 24)
+ buf[4] = byte(n >> 32)
+ buf[5] = byte(n >> 40)
+ buf[6] = byte(n >> 48)
+ buf[7] = byte(n >> 56)
+}
+
+// WriteInt8 encodes a little-endian int8 into a byte slice.
+func WriteInt8(buf []byte, n int8) {
+ buf[0] = byte(n)
+}
+
+// WriteInt16 encodes a little-endian int16 into a byte slice.
+func WriteInt16(buf []byte, n int16) {
+ _ = buf[1] // Force one bounds check. See: golang.org/issue/14808
+ buf[0] = byte(n)
+ buf[1] = byte(n >> 8)
+}
+
+// WriteInt32 encodes a little-endian int32 into a byte slice.
+func WriteInt32(buf []byte, n int32) {
+ _ = buf[3] // Force one bounds check. See: golang.org/issue/14808
+ buf[0] = byte(n)
+ buf[1] = byte(n >> 8)
+ buf[2] = byte(n >> 16)
+ buf[3] = byte(n >> 24)
+}
+
+// WriteInt64 encodes a little-endian int64 into a byte slice.
+func WriteInt64(buf []byte, n int64) {
+ _ = buf[7] // Force one bounds check. See: golang.org/issue/14808
+ buf[0] = byte(n)
+ buf[1] = byte(n >> 8)
+ buf[2] = byte(n >> 16)
+ buf[3] = byte(n >> 24)
+ buf[4] = byte(n >> 32)
+ buf[5] = byte(n >> 40)
+ buf[6] = byte(n >> 48)
+ buf[7] = byte(n >> 56)
+}
+
+// WriteFloat32 encodes a little-endian float32 into a byte slice.
+func WriteFloat32(buf []byte, n float32) {
+ WriteUint32(buf, math.Float32bits(n))
+}
+
+// WriteFloat64 encodes a little-endian float64 into a byte slice.
+func WriteFloat64(buf []byte, n float64) {
+ WriteUint64(buf, math.Float64bits(n))
+}
+
+// WriteVOffsetT encodes a little-endian VOffsetT into a byte slice.
+func WriteVOffsetT(buf []byte, n VOffsetT) {
+ WriteUint16(buf, uint16(n))
+}
+
+// WriteSOffsetT encodes a little-endian SOffsetT into a byte slice.
+func WriteSOffsetT(buf []byte, n SOffsetT) {
+ WriteInt32(buf, int32(n))
+}
+
+// WriteUOffsetT encodes a little-endian UOffsetT into a byte slice.
+func WriteUOffsetT(buf []byte, n UOffsetT) {
+ WriteUint32(buf, uint32(n))
+}
diff --git a/vendor/github.com/google/flatbuffers/go/grpc.go b/vendor/github.com/google/flatbuffers/go/grpc.go
new file mode 100644
index 000000000..15f1a510d
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/grpc.go
@@ -0,0 +1,38 @@
+package flatbuffers
+
+// Codec implements gRPC-go Codec which is used to encode and decode messages.
+var Codec = "flatbuffers"
+
+// FlatbuffersCodec defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a Codec's
+// methods can be called from concurrent goroutines.
+type FlatbuffersCodec struct{}
+
+// Marshal returns the wire format of v.
+func (FlatbuffersCodec) Marshal(v interface{}) ([]byte, error) {
+ return v.(*Builder).FinishedBytes(), nil
+}
+
+// Unmarshal parses the wire format into v.
+func (FlatbuffersCodec) Unmarshal(data []byte, v interface{}) error {
+ v.(flatbuffersInit).Init(data, GetUOffsetT(data))
+ return nil
+}
+
+// String old gRPC Codec interface func
+func (FlatbuffersCodec) String() string {
+ return Codec
+}
+
+// Name returns the name of the Codec implementation. The returned string
+// will be used as part of content type in transmission. The result must be
+// static; the result cannot change between calls.
+//
+// add Name() for ForceCodec interface
+func (FlatbuffersCodec) Name() string {
+ return Codec
+}
+
+type flatbuffersInit interface {
+ Init(data []byte, i UOffsetT)
+}
diff --git a/vendor/github.com/google/flatbuffers/go/lib.go b/vendor/github.com/google/flatbuffers/go/lib.go
new file mode 100644
index 000000000..a4e99de10
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/lib.go
@@ -0,0 +1,50 @@
+package flatbuffers
+
+// FlatBuffer is the interface that represents a flatbuffer.
+type FlatBuffer interface {
+ Table() Table
+ Init(buf []byte, i UOffsetT)
+}
+
+// GetRootAs is a generic helper to initialize a FlatBuffer with the provided buffer bytes and its data offset.
+func GetRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) {
+ n := GetUOffsetT(buf[offset:])
+ fb.Init(buf, n+offset)
+}
+
+// GetSizePrefixedRootAs is a generic helper to initialize a FlatBuffer with the provided size-prefixed buffer
+// bytes and its data offset
+func GetSizePrefixedRootAs(buf []byte, offset UOffsetT, fb FlatBuffer) {
+ n := GetUOffsetT(buf[offset+sizePrefixLength:])
+ fb.Init(buf, n+offset+sizePrefixLength)
+}
+
+// GetSizePrefix reads the size from a size-prefixed flatbuffer
+func GetSizePrefix(buf []byte, offset UOffsetT) uint32 {
+ return GetUint32(buf[offset:])
+}
+
+// GetIndirectOffset retrives the relative offset in the provided buffer stored at `offset`.
+func GetIndirectOffset(buf []byte, offset UOffsetT) UOffsetT {
+ return offset + GetUOffsetT(buf[offset:])
+}
+
+// GetBufferIdentifier returns the file identifier as string
+func GetBufferIdentifier(buf []byte) string {
+ return string(buf[SizeUOffsetT:][:fileIdentifierLength])
+}
+
+// GetBufferIdentifier returns the file identifier as string for a size-prefixed buffer
+func GetSizePrefixedBufferIdentifier(buf []byte) string {
+ return string(buf[SizeUOffsetT+sizePrefixLength:][:fileIdentifierLength])
+}
+
+// BufferHasIdentifier checks if the identifier in a buffer has the expected value
+func BufferHasIdentifier(buf []byte, identifier string) bool {
+ return GetBufferIdentifier(buf) == identifier
+}
+
+// BufferHasIdentifier checks if the identifier in a buffer has the expected value for a size-prefixed buffer
+func SizePrefixedBufferHasIdentifier(buf []byte, identifier string) bool {
+ return GetSizePrefixedBufferIdentifier(buf) == identifier
+}
diff --git a/vendor/github.com/google/flatbuffers/go/sizes.go b/vendor/github.com/google/flatbuffers/go/sizes.go
new file mode 100644
index 000000000..ba2216984
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/sizes.go
@@ -0,0 +1,55 @@
+package flatbuffers
+
+import (
+ "unsafe"
+)
+
+const (
+ // See http://golang.org/ref/spec#Numeric_types
+
+ // SizeUint8 is the byte size of a uint8.
+ SizeUint8 = 1
+ // SizeUint16 is the byte size of a uint16.
+ SizeUint16 = 2
+ // SizeUint32 is the byte size of a uint32.
+ SizeUint32 = 4
+ // SizeUint64 is the byte size of a uint64.
+ SizeUint64 = 8
+
+ // SizeInt8 is the byte size of a int8.
+ SizeInt8 = 1
+ // SizeInt16 is the byte size of a int16.
+ SizeInt16 = 2
+ // SizeInt32 is the byte size of a int32.
+ SizeInt32 = 4
+ // SizeInt64 is the byte size of a int64.
+ SizeInt64 = 8
+
+ // SizeFloat32 is the byte size of a float32.
+ SizeFloat32 = 4
+ // SizeFloat64 is the byte size of a float64.
+ SizeFloat64 = 8
+
+ // SizeByte is the byte size of a byte.
+ // The `byte` type is aliased (by Go definition) to uint8.
+ SizeByte = 1
+
+ // SizeBool is the byte size of a bool.
+ // The `bool` type is aliased (by flatbuffers convention) to uint8.
+ SizeBool = 1
+
+ // SizeSOffsetT is the byte size of an SOffsetT.
+ // The `SOffsetT` type is aliased (by flatbuffers convention) to int32.
+ SizeSOffsetT = 4
+ // SizeUOffsetT is the byte size of an UOffsetT.
+ // The `UOffsetT` type is aliased (by flatbuffers convention) to uint32.
+ SizeUOffsetT = 4
+ // SizeVOffsetT is the byte size of an VOffsetT.
+ // The `VOffsetT` type is aliased (by flatbuffers convention) to uint16.
+ SizeVOffsetT = 2
+)
+
+// byteSliceToString converts a []byte to string without a heap allocation.
+func byteSliceToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
diff --git a/vendor/github.com/google/flatbuffers/go/struct.go b/vendor/github.com/google/flatbuffers/go/struct.go
new file mode 100644
index 000000000..11258f715
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/struct.go
@@ -0,0 +1,8 @@
+package flatbuffers
+
+// Struct wraps a byte slice and provides read access to its data.
+//
+// Structs do not have a vtable.
+type Struct struct {
+ Table
+}
diff --git a/vendor/github.com/google/flatbuffers/go/table.go b/vendor/github.com/google/flatbuffers/go/table.go
new file mode 100644
index 000000000..b273146fa
--- /dev/null
+++ b/vendor/github.com/google/flatbuffers/go/table.go
@@ -0,0 +1,505 @@
+package flatbuffers
+
+// Table wraps a byte slice and provides read access to its data.
+//
+// The variable `Pos` indicates the root of the FlatBuffers object therein.
+type Table struct {
+ Bytes []byte
+ Pos UOffsetT // Always < 1<<31.
+}
+
+// Offset provides access into the Table's vtable.
+//
+// Fields which are deprecated are ignored by checking against the vtable's length.
+func (t *Table) Offset(vtableOffset VOffsetT) VOffsetT {
+ vtable := UOffsetT(SOffsetT(t.Pos) - t.GetSOffsetT(t.Pos))
+ if vtableOffset < t.GetVOffsetT(vtable) {
+ return t.GetVOffsetT(vtable + UOffsetT(vtableOffset))
+ }
+ return 0
+}
+
+// Indirect retrieves the relative offset stored at `offset`.
+func (t *Table) Indirect(off UOffsetT) UOffsetT {
+ return off + GetUOffsetT(t.Bytes[off:])
+}
+
+// String gets a string from data stored inside the flatbuffer.
+func (t *Table) String(off UOffsetT) string {
+ b := t.ByteVector(off)
+ return byteSliceToString(b)
+}
+
+// ByteVector gets a byte slice from data stored inside the flatbuffer.
+func (t *Table) ByteVector(off UOffsetT) []byte {
+ off += GetUOffsetT(t.Bytes[off:])
+ start := off + UOffsetT(SizeUOffsetT)
+ length := GetUOffsetT(t.Bytes[off:])
+ return t.Bytes[start : start+length]
+}
+
+// VectorLen retrieves the length of the vector whose offset is stored at
+// "off" in this object.
+func (t *Table) VectorLen(off UOffsetT) int {
+ off += t.Pos
+ off += GetUOffsetT(t.Bytes[off:])
+ return int(GetUOffsetT(t.Bytes[off:]))
+}
+
+// Vector retrieves the start of data of the vector whose offset is stored
+// at "off" in this object.
+func (t *Table) Vector(off UOffsetT) UOffsetT {
+ off += t.Pos
+ x := off + GetUOffsetT(t.Bytes[off:])
+ // data starts after metadata containing the vector length
+ x += UOffsetT(SizeUOffsetT)
+ return x
+}
+
+// Union initializes any Table-derived type to point to the union at the given
+// offset.
+func (t *Table) Union(t2 *Table, off UOffsetT) {
+ off += t.Pos
+ t2.Pos = off + t.GetUOffsetT(off)
+ t2.Bytes = t.Bytes
+}
+
+// GetBool retrieves a bool at the given offset.
+func (t *Table) GetBool(off UOffsetT) bool {
+ return GetBool(t.Bytes[off:])
+}
+
+// GetByte retrieves a byte at the given offset.
+func (t *Table) GetByte(off UOffsetT) byte {
+ return GetByte(t.Bytes[off:])
+}
+
+// GetUint8 retrieves a uint8 at the given offset.
+func (t *Table) GetUint8(off UOffsetT) uint8 {
+ return GetUint8(t.Bytes[off:])
+}
+
+// GetUint16 retrieves a uint16 at the given offset.
+func (t *Table) GetUint16(off UOffsetT) uint16 {
+ return GetUint16(t.Bytes[off:])
+}
+
+// GetUint32 retrieves a uint32 at the given offset.
+func (t *Table) GetUint32(off UOffsetT) uint32 {
+ return GetUint32(t.Bytes[off:])
+}
+
+// GetUint64 retrieves a uint64 at the given offset.
+func (t *Table) GetUint64(off UOffsetT) uint64 {
+ return GetUint64(t.Bytes[off:])
+}
+
+// GetInt8 retrieves a int8 at the given offset.
+func (t *Table) GetInt8(off UOffsetT) int8 {
+ return GetInt8(t.Bytes[off:])
+}
+
+// GetInt16 retrieves a int16 at the given offset.
+func (t *Table) GetInt16(off UOffsetT) int16 {
+ return GetInt16(t.Bytes[off:])
+}
+
+// GetInt32 retrieves a int32 at the given offset.
+func (t *Table) GetInt32(off UOffsetT) int32 {
+ return GetInt32(t.Bytes[off:])
+}
+
+// GetInt64 retrieves a int64 at the given offset.
+func (t *Table) GetInt64(off UOffsetT) int64 {
+ return GetInt64(t.Bytes[off:])
+}
+
+// GetFloat32 retrieves a float32 at the given offset.
+func (t *Table) GetFloat32(off UOffsetT) float32 {
+ return GetFloat32(t.Bytes[off:])
+}
+
+// GetFloat64 retrieves a float64 at the given offset.
+func (t *Table) GetFloat64(off UOffsetT) float64 {
+ return GetFloat64(t.Bytes[off:])
+}
+
+// GetUOffsetT retrieves a UOffsetT at the given offset.
+func (t *Table) GetUOffsetT(off UOffsetT) UOffsetT {
+ return GetUOffsetT(t.Bytes[off:])
+}
+
+// GetVOffsetT retrieves a VOffsetT at the given offset.
+func (t *Table) GetVOffsetT(off UOffsetT) VOffsetT {
+ return GetVOffsetT(t.Bytes[off:])
+}
+
+// GetSOffsetT retrieves a SOffsetT at the given offset.
+func (t *Table) GetSOffsetT(off UOffsetT) SOffsetT {
+ return GetSOffsetT(t.Bytes[off:])
+}
+
+// GetBoolSlot retrieves the bool that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetBoolSlot(slot VOffsetT, d bool) bool {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetBool(t.Pos + UOffsetT(off))
+}
+
+// GetByteSlot retrieves the byte that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetByteSlot(slot VOffsetT, d byte) byte {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetByte(t.Pos + UOffsetT(off))
+}
+
+// GetInt8Slot retrieves the int8 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt8Slot(slot VOffsetT, d int8) int8 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetInt8(t.Pos + UOffsetT(off))
+}
+
+// GetUint8Slot retrieves the uint8 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint8Slot(slot VOffsetT, d uint8) uint8 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetUint8(t.Pos + UOffsetT(off))
+}
+
+// GetInt16Slot retrieves the int16 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt16Slot(slot VOffsetT, d int16) int16 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetInt16(t.Pos + UOffsetT(off))
+}
+
+// GetUint16Slot retrieves the uint16 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint16Slot(slot VOffsetT, d uint16) uint16 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetUint16(t.Pos + UOffsetT(off))
+}
+
+// GetInt32Slot retrieves the int32 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt32Slot(slot VOffsetT, d int32) int32 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetInt32(t.Pos + UOffsetT(off))
+}
+
+// GetUint32Slot retrieves the uint32 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint32Slot(slot VOffsetT, d uint32) uint32 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetUint32(t.Pos + UOffsetT(off))
+}
+
+// GetInt64Slot retrieves the int64 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetInt64Slot(slot VOffsetT, d int64) int64 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetInt64(t.Pos + UOffsetT(off))
+}
+
+// GetUint64Slot retrieves the uint64 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetUint64Slot(slot VOffsetT, d uint64) uint64 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetUint64(t.Pos + UOffsetT(off))
+}
+
+// GetFloat32Slot retrieves the float32 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetFloat32Slot(slot VOffsetT, d float32) float32 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetFloat32(t.Pos + UOffsetT(off))
+}
+
+// GetFloat64Slot retrieves the float64 that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetFloat64Slot(slot VOffsetT, d float64) float64 {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+
+ return t.GetFloat64(t.Pos + UOffsetT(off))
+}
+
+// GetVOffsetTSlot retrieves the VOffsetT that the given vtable location
+// points to. If the vtable value is zero, the default value `d`
+// will be returned.
+func (t *Table) GetVOffsetTSlot(slot VOffsetT, d VOffsetT) VOffsetT {
+ off := t.Offset(slot)
+ if off == 0 {
+ return d
+ }
+ return VOffsetT(off)
+}
+
+// MutateBool updates a bool at the given offset.
+func (t *Table) MutateBool(off UOffsetT, n bool) bool {
+ WriteBool(t.Bytes[off:], n)
+ return true
+}
+
+// MutateByte updates a Byte at the given offset.
+func (t *Table) MutateByte(off UOffsetT, n byte) bool {
+ WriteByte(t.Bytes[off:], n)
+ return true
+}
+
+// MutateUint8 updates a Uint8 at the given offset.
+func (t *Table) MutateUint8(off UOffsetT, n uint8) bool {
+ WriteUint8(t.Bytes[off:], n)
+ return true
+}
+
+// MutateUint16 updates a Uint16 at the given offset.
+func (t *Table) MutateUint16(off UOffsetT, n uint16) bool {
+ WriteUint16(t.Bytes[off:], n)
+ return true
+}
+
+// MutateUint32 updates a Uint32 at the given offset.
+func (t *Table) MutateUint32(off UOffsetT, n uint32) bool {
+ WriteUint32(t.Bytes[off:], n)
+ return true
+}
+
+// MutateUint64 updates a Uint64 at the given offset.
+func (t *Table) MutateUint64(off UOffsetT, n uint64) bool {
+ WriteUint64(t.Bytes[off:], n)
+ return true
+}
+
+// MutateInt8 updates a Int8 at the given offset.
+func (t *Table) MutateInt8(off UOffsetT, n int8) bool {
+ WriteInt8(t.Bytes[off:], n)
+ return true
+}
+
+// MutateInt16 updates a Int16 at the given offset.
+func (t *Table) MutateInt16(off UOffsetT, n int16) bool {
+ WriteInt16(t.Bytes[off:], n)
+ return true
+}
+
+// MutateInt32 updates a Int32 at the given offset.
+func (t *Table) MutateInt32(off UOffsetT, n int32) bool {
+ WriteInt32(t.Bytes[off:], n)
+ return true
+}
+
+// MutateInt64 updates a Int64 at the given offset.
+func (t *Table) MutateInt64(off UOffsetT, n int64) bool {
+ WriteInt64(t.Bytes[off:], n)
+ return true
+}
+
+// MutateFloat32 updates a Float32 at the given offset.
+func (t *Table) MutateFloat32(off UOffsetT, n float32) bool {
+ WriteFloat32(t.Bytes[off:], n)
+ return true
+}
+
+// MutateFloat64 updates a Float64 at the given offset.
+func (t *Table) MutateFloat64(off UOffsetT, n float64) bool {
+ WriteFloat64(t.Bytes[off:], n)
+ return true
+}
+
+// MutateUOffsetT updates a UOffsetT at the given offset.
+func (t *Table) MutateUOffsetT(off UOffsetT, n UOffsetT) bool {
+ WriteUOffsetT(t.Bytes[off:], n)
+ return true
+}
+
+// MutateVOffsetT updates a VOffsetT at the given offset.
+func (t *Table) MutateVOffsetT(off UOffsetT, n VOffsetT) bool {
+ WriteVOffsetT(t.Bytes[off:], n)
+ return true
+}
+
+// MutateSOffsetT updates a SOffsetT at the given offset.
+func (t *Table) MutateSOffsetT(off UOffsetT, n SOffsetT) bool {
+ WriteSOffsetT(t.Bytes[off:], n)
+ return true
+}
+
+// MutateBoolSlot updates the bool at given vtable location
+func (t *Table) MutateBoolSlot(slot VOffsetT, n bool) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateBool(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateByteSlot updates the byte at given vtable location
+func (t *Table) MutateByteSlot(slot VOffsetT, n byte) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateByte(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateInt8Slot updates the int8 at given vtable location
+func (t *Table) MutateInt8Slot(slot VOffsetT, n int8) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateInt8(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateUint8Slot updates the uint8 at given vtable location
+func (t *Table) MutateUint8Slot(slot VOffsetT, n uint8) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateUint8(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateInt16Slot updates the int16 at given vtable location
+func (t *Table) MutateInt16Slot(slot VOffsetT, n int16) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateInt16(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateUint16Slot updates the uint16 at given vtable location
+func (t *Table) MutateUint16Slot(slot VOffsetT, n uint16) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateUint16(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateInt32Slot updates the int32 at given vtable location
+func (t *Table) MutateInt32Slot(slot VOffsetT, n int32) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateInt32(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateUint32Slot updates the uint32 at given vtable location
+func (t *Table) MutateUint32Slot(slot VOffsetT, n uint32) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateUint32(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateInt64Slot updates the int64 at given vtable location
+func (t *Table) MutateInt64Slot(slot VOffsetT, n int64) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateInt64(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateUint64Slot updates the uint64 at given vtable location
+func (t *Table) MutateUint64Slot(slot VOffsetT, n uint64) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateUint64(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateFloat32Slot updates the float32 at given vtable location
+func (t *Table) MutateFloat32Slot(slot VOffsetT, n float32) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateFloat32(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
+
+// MutateFloat64Slot updates the float64 at given vtable location
+func (t *Table) MutateFloat64Slot(slot VOffsetT, n float64) bool {
+ if off := t.Offset(slot); off != 0 {
+ t.MutateFloat64(t.Pos+UOffsetT(off), n)
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 4ba73a06c..39cb8eb38 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -341,6 +341,9 @@ github.com/golangci/revgrep
# github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed
## explicit; go 1.20
github.com/golangci/unconvert
+# github.com/google/flatbuffers v24.3.25+incompatible
+## explicit
+github.com/google/flatbuffers/go
# github.com/google/go-cmp v0.6.0
## explicit; go 1.13
github.com/google/go-cmp/cmp